langtrace-python-sdk 1.0.9__py3-none-any.whl → 1.0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,3 +1,6 @@
1
+ """
2
+ Instrumentation for ChromaDB
3
+ """
1
4
  import importlib.metadata
2
5
  from typing import Collection
3
6
 
@@ -5,11 +8,14 @@ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
5
8
  from opentelemetry.trace import get_tracer
6
9
  from wrapt import wrap_function_wrapper
7
10
 
8
- from instrumentation.chroma.lib.apis import APIS
11
+ from instrumentation.chroma.apis import APIS
9
12
  from instrumentation.chroma.patch import collection_patch
10
13
 
11
14
 
12
15
  class ChromaInstrumentation(BaseInstrumentor):
16
+ """
17
+ The ChromaInstrumentation class represents the ChromaDB instrumentation
18
+ """
13
19
 
14
20
  def instrumentation_dependencies(self) -> Collection[str]:
15
21
  return ["chromadb >= 0.4.23"]
@@ -19,7 +25,7 @@ class ChromaInstrumentation(BaseInstrumentor):
19
25
  tracer = get_tracer(__name__, "", tracer_provider)
20
26
  version = importlib.metadata.version('chromadb')
21
27
 
22
- for operation, details in APIS.items():
28
+ for operation, _ in APIS.items():
23
29
  wrap_function_wrapper(
24
30
  'chromadb.api.models.Collection',
25
31
  f'Collection.{operation.lower()}',
@@ -27,7 +33,7 @@ class ChromaInstrumentation(BaseInstrumentor):
27
33
  )
28
34
 
29
35
  def _instrument_module(self, module_name):
30
- print(module_name)
36
+ pass
31
37
 
32
38
  def _uninstrument(self, **kwargs):
33
- print(kwargs)
39
+ pass
@@ -1,12 +1,18 @@
1
+ """
2
+ This module contains the patching logic for the Chroma client.
3
+ """
1
4
  from langtrace.trace_attributes import DatabaseSpanAttributes
2
- from opentelemetry.trace import SpanKind, StatusCode
5
+ from opentelemetry.trace import SpanKind
3
6
  from opentelemetry.trace.status import Status, StatusCode
4
7
 
8
+ from instrumentation.chroma.apis import APIS
5
9
  from instrumentation.constants import SERVICE_PROVIDERS
6
- from instrumentation.chroma.lib.apis import APIS
7
10
 
8
11
 
9
12
  def collection_patch(method, version, tracer):
13
+ """
14
+ A generic patch method that wraps a function with a span
15
+ """
10
16
  def traced_method(wrapped, instance, args, kwargs):
11
17
  api = APIS[method]
12
18
  service_provider = SERVICE_PROVIDERS['CHROMA']
@@ -1,16 +1,11 @@
1
-
2
- TRACE_NAMESPACES = {
3
- "OPENAI": "Langtrace OpenAI SDK",
4
- "LANGCHAIN": "Langtrace Langchain SDK",
5
- "PINECONE": "Langtrace Pinecone SDK",
6
- "LLAMAINDEX": "Langtrace LlamaIndex SDK",
7
- "CHROMA": "Langtrace Chroma SDK",
8
- }
9
-
1
+ """
2
+ This file contains the constants used in the project.
3
+ """
10
4
  SERVICE_PROVIDERS = {
11
5
  "OPENAI": "OpenAI",
12
6
  "AZURE": "Azure",
13
7
  "LANGCHAIN": "Langchain",
8
+ "LANGCHAIN_CORE": "Langchain Core",
14
9
  "LANGCHAIN_COMMUNITY": "Langchain Community",
15
10
  "PINECONE": "Pinecone",
16
11
  "LLAMAINDEX": "LlamaIndex",
@@ -71,15 +71,21 @@ class LangchainCoreInstrumentation(BaseInstrumentor):
71
71
  version = importlib.metadata.version('langchain-core')
72
72
 
73
73
  exclude_methods = ['get_name', 'get_output_schema',
74
- 'get_input_schema', 'get_graph', 'to_json']
75
- exclude_classes = ['BaseChatPromptTemplate']
74
+ 'get_input_schema', 'get_graph', 'to_json',
75
+ 'to_json_not_implemented', 'bind', 'dict',
76
+ 'format', 'format_messages', 'format_prompt']
77
+ exclude_classes = ['BaseChatPromptTemplate', 'Runnable', 'RunnableBinding',
78
+ 'RunnableBindingBase', 'RunnableEach', 'RunnableEachBase',
79
+ 'RunnableGenerator', 'RunnablePick', 'RunnableMap',
80
+ 'RunnableSerializable']
81
+
76
82
  modules_to_patch = [
77
83
  ('langchain_core.retrievers', 'retriever',
78
84
  generic_patch, True, True),
79
- ('langchain_core.prompts.chat', 'chatprompt',
80
- generic_patch, True, False),
85
+ ('langchain_core.prompts.chat', 'prompt',
86
+ generic_patch, True, True),
81
87
  ('langchain_core.runnables.base',
82
- 'runnableparallel', runnable_patch, True, True),
88
+ 'runnable', runnable_patch, True, True),
83
89
  ('langchain_core.runnables.passthrough',
84
90
  'runnablepassthrough', runnable_patch, True, True),
85
91
  ('langchain_core.output_parsers.string',
@@ -22,7 +22,7 @@ def generic_patch(method_name, task, tracer, version, trace_output=True, trace_i
22
22
  """
23
23
 
24
24
  def traced_method(wrapped, instance, args, kwargs):
25
- service_provider = SERVICE_PROVIDERS['LANGCHAIN']
25
+ service_provider = SERVICE_PROVIDERS['LANGCHAIN_CORE']
26
26
  span_attributes = {
27
27
  'langtrace.service.name': service_provider,
28
28
  'langtrace.service.type': 'framework',
@@ -32,7 +32,18 @@ def generic_patch(method_name, task, tracer, version, trace_output=True, trace_i
32
32
  }
33
33
 
34
34
  if len(args) > 0 and trace_input:
35
- span_attributes['langchain.inputs'] = to_json_string(args)
35
+ inputs = {}
36
+ for arg in args:
37
+ if isinstance(arg, dict):
38
+ for key, value in arg.items():
39
+ if isinstance(value, list):
40
+ for item in value:
41
+ inputs[key] = item.__class__.__name__
42
+ elif isinstance(value, str):
43
+ inputs[key] = value
44
+ elif isinstance(arg, str):
45
+ inputs['input'] = arg
46
+ span_attributes['langchain.inputs'] = to_json_string(inputs)
36
47
 
37
48
  attributes = FrameworkSpanAttributes(**span_attributes)
38
49
 
@@ -73,7 +84,7 @@ def runnable_patch(method_name, task, tracer, version, trace_output=True, trace_
73
84
  trace_input: Whether to trace the input of the patched methods.
74
85
  """
75
86
  def traced_method(wrapped, instance, args, kwargs):
76
- service_provider = SERVICE_PROVIDERS['LANGCHAIN']
87
+ service_provider = SERVICE_PROVIDERS['LANGCHAIN_CORE']
77
88
  span_attributes = {
78
89
  'langtrace.service.name': service_provider,
79
90
  'langtrace.service.type': 'framework',
@@ -84,12 +95,17 @@ def runnable_patch(method_name, task, tracer, version, trace_output=True, trace_
84
95
 
85
96
  if trace_input:
86
97
  inputs = {}
87
- args_list = []
88
98
  if len(args) > 0:
89
- for value in args:
90
- if isinstance(value, str):
91
- args_list.append(value)
92
- inputs['args'] = args_list
99
+ for arg in args:
100
+ if isinstance(arg, dict):
101
+ for key, value in arg.items():
102
+ if isinstance(value, list):
103
+ for item in value:
104
+ inputs[key] = item.__class__.__name__
105
+ elif isinstance(value, str):
106
+ inputs[key] = value
107
+ elif isinstance(arg, str):
108
+ inputs['input'] = arg
93
109
 
94
110
  for field, value in instance.steps.items() if hasattr(instance, "steps") and \
95
111
  isinstance(instance.steps, dict) else {}:
@@ -1,3 +1,6 @@
1
+ """
2
+ The LlamaindexInstrumentation class represents the LlamaIndex instrumentation
3
+ """
1
4
  import importlib.metadata
2
5
  from typing import Collection
3
6
 
@@ -8,12 +11,11 @@ from wrapt import wrap_function_wrapper
8
11
 
9
12
  from instrumentation.llamaindex.patch import generic_patch
10
13
 
11
- MODULES = [
12
- "llama_index.core.query_pipeline.query",
13
- ]
14
-
15
14
 
16
15
  class LlamaindexInstrumentation(BaseInstrumentor):
16
+ """
17
+ The LlamaindexInstrumentation class represents the LlamaIndex instrumentation
18
+ """
17
19
 
18
20
  def instrumentation_dependencies(self) -> Collection[str]:
19
21
  return ["llama-index >= 0.10.0"]
@@ -1,11 +1,16 @@
1
+ """
2
+ This module contains a generic patch method that wraps a function with a span.
3
+ """
1
4
  from langtrace.trace_attributes import FrameworkSpanAttributes
2
- from opentelemetry.trace import SpanKind, StatusCode
5
+ from opentelemetry.trace import SpanKind
3
6
  from opentelemetry.trace.status import Status, StatusCode
4
7
 
5
8
  from instrumentation.constants import SERVICE_PROVIDERS
6
9
 
7
10
 
8
11
  def generic_patch(method, task, tracer, version):
12
+ """
13
+ A generic patch method that wraps a function with a span"""
9
14
  def traced_method(wrapped, instance, args, kwargs):
10
15
  service_provider = SERVICE_PROVIDERS['LLAMAINDEX']
11
16
  span_attributes = {
@@ -1,3 +1,6 @@
1
+ """
2
+ APIs to instrument OpenAI.
3
+ """
1
4
  from langtrace.trace_attributes import OpenAIMethods
2
5
 
3
6
  APIS = {
@@ -1,3 +1,6 @@
1
+ """
2
+ Constants for OpenAI API"""
3
+
1
4
  OPENAI_COST_TABLE = {
2
5
  "gpt-4-0125-preview": {
3
6
  "input": 0.01,
@@ -28,3 +31,13 @@ OPENAI_COST_TABLE = {
28
31
  "output": 0.002,
29
32
  },
30
33
  }
34
+
35
+ # TODO: Add more models
36
+ # https://github.com/dqbd/tiktoken/blob/74c147e19584a3a1acea0c8e0da4d39415cd33e0/wasm/src/lib.rs#L328
37
+ TIKTOKEN_MODEL_MAPPING = {
38
+ "gpt-4": "cl100k_base",
39
+ "gpt-4-32k": "cl100k_base",
40
+ "gpt-4-0125-preview": "cl100k_base",
41
+ "gpt-4-1106-preview": "cl100k_base",
42
+ "gpt-4-1106-vision-preview": "cl100k_base",
43
+ }
@@ -1,14 +1,21 @@
1
+ """
2
+ This module contains the patching logic for the OpenAI library."""
1
3
  import json
2
4
 
3
5
  from langtrace.trace_attributes import Event, LLMSpanAttributes
4
- from opentelemetry.trace import SpanKind, StatusCode
6
+ from opentelemetry.trace import SpanKind
5
7
  from opentelemetry.trace.status import Status, StatusCode
6
8
 
7
9
  from instrumentation.constants import SERVICE_PROVIDERS
8
- from instrumentation.openai.lib.apis import APIS
10
+ from instrumentation.openai.apis import APIS
11
+ from instrumentation.openai.token_estimation import (calculate_prompt_tokens,
12
+ estimate_tokens)
9
13
 
10
14
 
11
15
  def images_generate(original_method, version, tracer):
16
+ """
17
+ Wrap the `generate` method of the `Images` class to trace it.
18
+ """
12
19
  def traced_method(wrapped, instance, args, kwargs):
13
20
  base_url = str(instance._client._base_url) if hasattr(
14
21
  instance, '_client') and hasattr(instance._client, '_base_url') else ""
@@ -27,7 +34,8 @@ def images_generate(original_method, version, tracer):
27
34
 
28
35
  attributes = LLMSpanAttributes(**span_attributes)
29
36
 
30
- with tracer.start_as_current_span(APIS["IMAGES_GENERATION"]["METHOD"], kind=SpanKind.CLIENT) as span:
37
+ with tracer.start_as_current_span(APIS["IMAGES_GENERATION"]["METHOD"],
38
+ kind=SpanKind.CLIENT) as span:
31
39
  for field, value in attributes.model_dump(by_alias=True).items():
32
40
  if value is not None:
33
41
  span.set_attribute(field, value)
@@ -39,7 +47,8 @@ def images_generate(original_method, version, tracer):
39
47
  result, 'data') and len(result.data) > 0 else {}
40
48
  response = [{
41
49
  "url": data.url if hasattr(data, 'url') else "",
42
- "revised_prompt": data.revised_prompt if hasattr(data, 'revised_prompt') else "",
50
+ "revised_prompt": data.revised_prompt if
51
+ hasattr(data, 'revised_prompt') else "",
43
52
  }]
44
53
  span.set_attribute(
45
54
  "llm.responses", json.dumps(response))
@@ -60,6 +69,7 @@ def images_generate(original_method, version, tracer):
60
69
 
61
70
 
62
71
  def chat_completions_create(original_method, version, tracer):
72
+ """Wrap the `create` method of the `ChatCompletion` class to trace it."""
63
73
  def traced_method(wrapped, instance, args, kwargs):
64
74
  base_url = str(instance._client._base_url) if hasattr(
65
75
  instance, '_client') and hasattr(instance._client, '_base_url') else ""
@@ -84,83 +94,131 @@ def chat_completions_create(original_method, version, tracer):
84
94
  attributes.llm_top_p = kwargs.get('top_p')
85
95
  if kwargs.get('user') is not None:
86
96
  attributes.llm_user = kwargs.get('user')
87
-
88
- with tracer.start_as_current_span(APIS["CHAT_COMPLETION"]["METHOD"], kind=SpanKind.CLIENT) as span:
89
- for field, value in attributes.model_dump(by_alias=True).items():
90
- if value is not None:
91
- span.set_attribute(field, value)
92
- try:
93
- # Attempt to call the original method
94
- result = original_method(*args, **kwargs)
95
- if kwargs.get('stream') is False:
96
- if hasattr(result, 'choices') and result.choices is not None:
97
- responses = [
98
- {
99
- "message": choice.message.content if choice.message and choice.message.content else "",
100
- **({"content_filter_results": choice["content_filter_results"]} if "content_filter_results" in choice else {})
101
- }
102
- for choice in result.choices
103
- ]
104
- else:
105
- responses = []
106
- span.set_attribute("llm.responses", json.dumps(responses))
107
-
108
- if hasattr(result, 'system_fingerprint') and result.system_fingerprint is not None:
97
+ if kwargs.get('functions') is not None:
98
+ attributes.llm_function_prompts = json.dumps(
99
+ kwargs.get('functions'))
100
+
101
+ # TODO(Karthik): Gotta figure out how to handle streaming with context
102
+ # with tracer.start_as_current_span(APIS["CHAT_COMPLETION"]["METHOD"],
103
+ # kind=SpanKind.CLIENT) as span:
104
+ span = tracer.start_span(
105
+ APIS["CHAT_COMPLETION"]["METHOD"], kind=SpanKind.CLIENT)
106
+ for field, value in attributes.model_dump(by_alias=True).items():
107
+ if value is not None:
108
+ span.set_attribute(field, value)
109
+ try:
110
+ # Attempt to call the original method
111
+ result = original_method(*args, **kwargs)
112
+ if kwargs.get('stream') is False:
113
+ if hasattr(result, 'choices') and result.choices is not None:
114
+ responses = [
115
+ {
116
+ "message": choice.message.content if choice.message and
117
+ choice.message.content else choice.message.function_call.arguments
118
+ if choice.message and
119
+ choice.message.function_call.arguments else "",
120
+ **({"content_filter_results": choice["content_filter_results"]}
121
+ if "content_filter_results" in choice else {})
122
+ }
123
+ for choice in result.choices
124
+ ]
125
+ span.set_attribute(
126
+ "llm.responses", json.dumps(responses))
127
+ else:
128
+ responses = []
129
+ span.set_attribute(
130
+ "llm.responses", json.dumps(responses))
131
+ if hasattr(result, 'system_fingerprint') and \
132
+ result.system_fingerprint is not None:
133
+ span.set_attribute(
134
+ "llm.system.fingerprint", result.system_fingerprint)
135
+ # Get the usage
136
+ if hasattr(result, 'usage') and result.usage is not None:
137
+ usage = result.usage
138
+ if usage is not None:
139
+ usage_dict = {
140
+ "prompt_tokens": result.usage.prompt_tokens,
141
+ "completion_tokens": usage.completion_tokens,
142
+ "total_tokens": usage.total_tokens
143
+ }
109
144
  span.set_attribute(
110
- "llm.system.fingerprint", result.system_fingerprint)
111
-
112
- # Get the usage
113
- if hasattr(result, 'usage') and result.usage is not None:
114
- usage = result.usage
115
- if usage is not None:
116
- usage_dict = {
117
- "prompt_tokens": result.usage.prompt_tokens,
118
- "completion_tokens": usage.completion_tokens,
119
- "total_tokens": usage.total_tokens
120
- }
121
- span.set_attribute(
122
- "llm.token.counts", json.dumps(usage_dict))
123
-
124
- span.set_status(StatusCode.OK)
125
- return result
145
+ "llm.token.counts", json.dumps(usage_dict))
146
+ span.set_status(StatusCode.OK)
147
+ span.end()
148
+ return result
149
+ else:
150
+ prompt_tokens = calculate_prompt_tokens(json.dumps(
151
+ kwargs.get('messages', {})[0]), kwargs.get('model'))
152
+ return handle_streaming_response(result, span, prompt_tokens,
153
+ function_call=kwargs.get(
154
+ 'functions')
155
+ is not None)
156
+ except Exception as e:
157
+ # Record the exception in the span
158
+ span.record_exception(e)
159
+ # Set the span status to indicate an error
160
+ span.set_status(Status(StatusCode.ERROR, str(e)))
161
+ # Reraise the exception to ensure it's not swallowed
162
+ span.end()
163
+ raise
164
+
165
+ def handle_streaming_response(result, span, prompt_tokens, function_call=False):
166
+ """Process and yield streaming response chunks."""
167
+ result_content = []
168
+ span.add_event(Event.STREAM_START.value)
169
+ completion_tokens = 0
170
+ try:
171
+ for chunk in result:
172
+ if hasattr(chunk, 'choices') and chunk.choices is not None:
173
+ token_counts = [
174
+ estimate_tokens(choice.delta.content) if choice.delta
175
+ and choice.delta.content
176
+ else estimate_tokens(choice.delta.function_call.arguments)
177
+ if choice.delta.function_call and
178
+ choice.delta.function_call.arguments else 0
179
+ for choice in chunk.choices
180
+ ]
181
+ completion_tokens += sum(token_counts)
182
+ content = [
183
+ choice.delta.content if choice.delta and choice.delta.content
184
+ else choice.delta.function_call.arguments if choice.delta.function_call and
185
+ choice.delta.function_call.arguments else ""
186
+ for choice in chunk.choices
187
+ ]
126
188
  else:
127
- result_content = []
128
- span.add_event(Event.STREAM_START.value)
129
-
130
- for chunk in result:
131
- # Assuming `chunk` has a structure similar to what OpenAI might return,
132
- # adjust the access accordingly based on actual response structure.
133
- if hasattr(chunk, 'choices') and chunk.choices is not None:
134
- content = [
135
- choice.delta.content if choice.delta and choice.delta.content else ""
136
- for choice in chunk.choices
137
- ]
138
- else:
139
- content = []
140
- span.add_event(Event.STREAM_OUTPUT.value, {
141
- "response": "".join(content)
142
- })
143
- result_content.append(
144
- content[0] if len(content) > 0 else "")
145
- span.add_event(Event.STREAM_END.value)
146
- span.set_attribute("llm.responses", json.dumps(
147
- {"message": {"role": "assistant", "content": "".join(result_content)}}))
148
-
149
- except Exception as e:
150
- # Record the exception in the span
151
- span.record_exception(e)
152
-
153
- # Set the span status to indicate an error
154
- span.set_status(Status(StatusCode.ERROR, str(e)))
155
-
156
- # Reraise the exception to ensure it's not swallowed
157
- raise
189
+ content = []
190
+ span.add_event(Event.STREAM_OUTPUT.value, {
191
+ "response": "".join(content)
192
+ })
193
+ result_content.append(
194
+ content[0] if len(content) > 0 else "")
195
+ yield chunk
196
+ finally:
197
+
198
+ # Finalize span after processing all chunks
199
+ span.add_event(Event.STREAM_END.value)
200
+ span.set_attribute("llm.token.counts", json.dumps({
201
+ "prompt_tokens": prompt_tokens,
202
+ "completion_tokens": completion_tokens,
203
+ "total_tokens": prompt_tokens + completion_tokens
204
+ }))
205
+ if function_call is False:
206
+ span.set_attribute("llm.responses", json.dumps(
207
+ {"message": {"role": "assistant", "content": "".join(result_content)}}))
208
+ else:
209
+ span.set_attribute("llm.responses", json.dumps(
210
+ {"message": {"role": "assistant", "function_call": "".join(result_content)}}))
211
+ span.set_status(StatusCode.OK)
212
+ span.end()
158
213
 
159
214
  # return the wrapped method
160
215
  return traced_method
161
216
 
162
217
 
163
218
  def embeddings_create(original_method, version, tracer):
219
+ """
220
+ Wrap the `create` method of the `Embeddings` class to trace it.
221
+ """
164
222
  def traced_method(wrapped, instance, args, kwargs):
165
223
  base_url = str(instance._client._base_url) if hasattr(
166
224
  instance, '_client') and hasattr(instance._client, '_base_url') else ""
@@ -187,7 +245,8 @@ def embeddings_create(original_method, version, tracer):
187
245
  if kwargs.get('user') is not None:
188
246
  attributes["llm.user"] = kwargs.get('user')
189
247
 
190
- with tracer.start_as_current_span(APIS["EMBEDDINGS_CREATE"]["METHOD"], kind=SpanKind.CLIENT) as span:
248
+ with tracer.start_as_current_span(APIS["EMBEDDINGS_CREATE"]["METHOD"],
249
+ kind=SpanKind.CLIENT) as span:
191
250
  for field, value in attributes.model_dump(by_alias=True).items():
192
251
  if value is not None:
193
252
  span.set_attribute(field, value)
@@ -0,0 +1,48 @@
1
+ """
2
+ This module contains functions to estimate the number of tokens in a prompt and
3
+ to calculate the price of a model based on its usage.
4
+ """
5
+
6
+ from tiktoken import get_encoding
7
+
8
+ from instrumentation.openai.constants import (OPENAI_COST_TABLE,
9
+ TIKTOKEN_MODEL_MAPPING)
10
+
11
+
12
+ def estimate_tokens(prompt):
13
+ """
14
+ Estimate the number of tokens in a prompt."""
15
+ if prompt and len(prompt) > 0:
16
+ # Simplified token estimation: count the words.
17
+ return len([word for word in prompt.split() if word])
18
+ return 0
19
+
20
+
21
+ def estimate_tokens_using_tiktoken(prompt, model):
22
+ """
23
+ Estimate the number of tokens in a prompt using tiktoken."""
24
+ encoding = get_encoding(model)
25
+ tokens = encoding.encode(prompt)
26
+ return len(tokens)
27
+
28
+
29
+ def calculate_prompt_tokens(prompt_content, model):
30
+ """
31
+ Calculate the number of tokens in a prompt. If the model is supported by tiktoken, use it for the estimation."""
32
+ try:
33
+ tiktoken_model = TIKTOKEN_MODEL_MAPPING[model]
34
+ return estimate_tokens_using_tiktoken(prompt_content, tiktoken_model)
35
+ except Exception:
36
+ return estimate_tokens(prompt_content) # Fallback method
37
+
38
+
39
+ def calculate_price_from_usage(model, usage):
40
+ """
41
+ Calculate the price of a model based on its usage."""
42
+ cost_table = OPENAI_COST_TABLE.get(model)
43
+ if cost_table:
44
+ return (
45
+ (cost_table['input'] * usage['prompt_tokens'] +
46
+ cost_table['output'] * usage['completion_tokens']) / 1000
47
+ )
48
+ return 0
@@ -1,3 +1,7 @@
1
+ """
2
+ Pinecone instrumentation
3
+ """
4
+
1
5
  import importlib.metadata
2
6
  from typing import Collection
3
7
 
@@ -7,11 +11,13 @@ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
7
11
  from opentelemetry.trace import get_tracer
8
12
  from wrapt import wrap_function_wrapper
9
13
 
10
- from instrumentation.pinecone.lib.apis import APIS
14
+ from instrumentation.pinecone.apis import APIS
11
15
  from instrumentation.pinecone.patch import generic_patch
12
16
 
13
17
 
14
18
  class PineconeInstrumentation(BaseInstrumentor):
19
+ """
20
+ The PineconeInstrumentation class represents the Pinecone instrumentation"""
15
21
 
16
22
  def instrumentation_dependencies(self) -> Collection[str]:
17
23
  return ["pinecone-client >= 3.1.0"]
@@ -40,4 +46,4 @@ class PineconeInstrumentation(BaseInstrumentor):
40
46
  )
41
47
 
42
48
  def _uninstrument(self, **kwargs):
43
- print(kwargs)
49
+ pass
@@ -1,14 +1,16 @@
1
- import json
2
-
1
+ """
2
+ This module contains the patching logic for the Pinecone client."""
3
3
  from langtrace.trace_attributes import DatabaseSpanAttributes
4
- from opentelemetry.trace import SpanKind, StatusCode
4
+ from opentelemetry.trace import SpanKind
5
5
  from opentelemetry.trace.status import Status, StatusCode
6
6
 
7
7
  from instrumentation.constants import SERVICE_PROVIDERS
8
- from instrumentation.pinecone.lib.apis import APIS
8
+ from instrumentation.pinecone.apis import APIS
9
9
 
10
10
 
11
11
  def generic_patch(original_method, method, version, tracer):
12
+ """
13
+ A generic patch method that wraps a function with a span"""
12
14
  def traced_method(wrapped, instance, args, kwargs):
13
15
  api = APIS[method]
14
16
  service_provider = SERVICE_PROVIDERS['PINECONE']
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langtrace-python-sdk
3
- Version: 1.0.9
3
+ Version: 1.0.10
4
4
  Summary: LangTrace - Python SDK
5
5
  Home-page: https://github.com/Scale3-Labs/langtrace-python-sdk
6
6
  Author: Ali Waleed
@@ -0,0 +1,37 @@
1
+ instrumentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ instrumentation/constants.py,sha256=YdC62dsYpbbBdMHhfUbaK-cbDM4w4eau0ClUdWVmjmU,336
3
+ instrumentation/with_root_span.py,sha256=CRie2ljHhnHN8bUGDwBM-F18-c6xyoI_238KP8BEO-U,969
4
+ instrumentation/chroma/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
+ instrumentation/chroma/apis.py,sha256=hiPGYdHS0Yj4Kh3eaYBbuCAl_swqIygu80yFqkOgdak,955
6
+ instrumentation/chroma/instrumentation.py,sha256=ySEyLnXcjL7D3sgMHTkxwdpxDpsRVbRJvFOgTxYRHvs,1174
7
+ instrumentation/chroma/patch.py,sha256=2ERORLV4_F1UU2Is8Y4H7aEXNnAPI5XKvW9DnLeEndM,1943
8
+ instrumentation/chroma/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
+ instrumentation/langchain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
+ instrumentation/langchain/instrumentation.py,sha256=LXtx5edfHPLRZ9yP0yKbDHlvE7LOJumJMGTqQX5RhhM,2850
11
+ instrumentation/langchain/patch.py,sha256=f-lq0wdk7doop-Dak2VcGueDsESA_5RKyuGtJQIm4DQ,2979
12
+ instrumentation/langchain_community/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
+ instrumentation/langchain_community/instrumentation.py,sha256=XWzaHl4FPPlZEhnUlxRB0iO5kPkNVI9siRXZgxF_Yb4,4316
14
+ instrumentation/langchain_community/patch.py,sha256=w6R_lHTDg2GzWRH8BZNocQowedeaNUE-pLfCoRETnTk,2872
15
+ instrumentation/langchain_core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
+ instrumentation/langchain_core/instrumentation.py,sha256=AYHqyuOwunuMqrTCxzXCbkIDmJbHA6Q40moHntdL4tw,4739
17
+ instrumentation/langchain_core/patch.py,sha256=a314C0IaF0gSa2eh-yO8jHqtZwnWIczQsF3FgCgoiiU,7536
18
+ instrumentation/llamaindex/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
+ instrumentation/llamaindex/instrumentation.py,sha256=WwETor8jLwaDQwnwgbtKZHQ3MwtNIfZSp1aaUn-uLIk,2759
20
+ instrumentation/llamaindex/patch.py,sha256=hSSoOij70kIhAleHLOfTW-zNc-N9boQz1iyhoBdVRsQ,1709
21
+ instrumentation/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
+ instrumentation/openai/apis.py,sha256=lMUa6rCkT-fKltngOUxcXd0aNpTb5L8xlqjrdseLIZM,488
23
+ instrumentation/openai/constants.py,sha256=3_xaFfAhh2dpH0072Cijzb5iZazUpmj4SF0iWiMFm1A,973
24
+ instrumentation/openai/instrumentation.py,sha256=Mkk6fwvQ8kS9ykFFc8OffIGbNMYVi6rrBVjVVhjuTjo,1408
25
+ instrumentation/openai/patch.py,sha256=Syktbjz9R-XjGj9QEGojZ4fsxZdQ7Gq4nPOW49J4fLA,12234
26
+ instrumentation/openai/token_estimation.py,sha256=nwTR0yyZs2OB0S3bBviBgd_xMb6oh7nCPMt-HlIlCRU,1549
27
+ instrumentation/openai/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
28
+ instrumentation/pinecone/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
+ instrumentation/pinecone/apis.py,sha256=XpKNUfyzEE3HkBN10Qv1w_t1PT-J39pHlotrdU-wvec,477
30
+ instrumentation/pinecone/instrumentation.py,sha256=yfOxKkMtW6GEUQ0E9AWSBdaa07MHzV3o6Q09cAvoWIU,1708
31
+ instrumentation/pinecone/patch.py,sha256=fr07o97CqGc8sUEyMtSiT6watZiTPStRPOrxOzhJGLo,1840
32
+ instrumentation/pinecone/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
33
+ langtrace_python_sdk-1.0.10.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
34
+ langtrace_python_sdk-1.0.10.dist-info/METADATA,sha256=PD7RbdnqjA8lCTuz7rc89PgW6VcrpMxV_DntIHak4bY,6149
35
+ langtrace_python_sdk-1.0.10.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
36
+ langtrace_python_sdk-1.0.10.dist-info/top_level.txt,sha256=mdFAULSZuqUiDveRElCIPMvwAkRAYXP4bm_dEI4A96Q,16
37
+ langtrace_python_sdk-1.0.10.dist-info/RECORD,,
instrumentation/setup.py DELETED
@@ -1,50 +0,0 @@
1
-
2
- from opentelemetry import trace
3
- from opentelemetry.sdk.trace import TracerProvider
4
- from opentelemetry.sdk.trace.export import (ConsoleSpanExporter,
5
- SimpleSpanProcessor)
6
-
7
- from instrumentation.chroma.instrumentation import ChromaInstrumentation
8
- from instrumentation.langchain.instrumentation import LangchainInstrumentation
9
- from instrumentation.langchain_community.instrumentation import \
10
- LangchainCommunityInstrumentation
11
- from instrumentation.langchain_core.instrumentation import \
12
- LangchainCoreInstrumentation
13
- from instrumentation.llamaindex.instrumentation import \
14
- LlamaindexInstrumentation
15
- from instrumentation.openai.instrumentation import OpenAIInstrumentation
16
- from instrumentation.pinecone.instrumentation import PineconeInstrumentation
17
-
18
-
19
- def setup_instrumentation():
20
-
21
- # Set up OpenTelemetry tracing
22
- tracer_provider = TracerProvider()
23
-
24
- # Use the ConsoleSpanExporter to print traces to the console
25
- console_exporter = ConsoleSpanExporter()
26
- tracer_provider.add_span_processor(SimpleSpanProcessor(console_exporter))
27
-
28
- # Initialize tracer
29
- trace.set_tracer_provider(tracer_provider)
30
-
31
- # Initialize and enable your custom OpenAI instrumentation
32
- # Create an instance of OpenAIInstrumentation
33
- openai_instrumentation = OpenAIInstrumentation()
34
- pinecone_instrumentation = PineconeInstrumentation()
35
- llamaindex_instrumentation = LlamaindexInstrumentation()
36
- chroma_instrumentation = ChromaInstrumentation()
37
- langchain_instrumentation = LangchainInstrumentation()
38
- langchain_core_instrumentation = LangchainCoreInstrumentation()
39
- langchain_community_instrumentation = LangchainCommunityInstrumentation()
40
-
41
- # Call the instrument method with some arguments
42
- openai_instrumentation.instrument()
43
- pinecone_instrumentation.instrument()
44
- llamaindex_instrumentation.instrument()
45
- chroma_instrumentation.instrument()
46
- langchain_instrumentation.instrument()
47
- langchain_core_instrumentation.instrument()
48
- langchain_community_instrumentation.instrument()
49
-
50
- print("setup complete")
instrumentation/utils.py DELETED
@@ -1,27 +0,0 @@
1
- from tiktoken import TiktokenEncoding, get_encoding
2
- from .constants import TIKTOKEN_MODEL_MAPPING, OPENAI_COST_TABLE
3
-
4
- def estimate_tokens(prompt: str) -> int:
5
- if prompt and len(prompt) > 0:
6
- # Simplified token estimation: count the words.
7
- return len(prompt.split())
8
- return 0
9
-
10
- def estimate_tokens_using_tiktoken(prompt: str, model: TiktokenEncoding) -> int:
11
- encoding = get_encoding(model)
12
- tokens = encoding.encode(prompt)
13
- return len(tokens)
14
-
15
- def calculate_prompt_tokens(prompt_content: str, model: str) -> int:
16
- try:
17
- tiktoken_model = TIKTOKEN_MODEL_MAPPING[model]
18
- return estimate_tokens_using_tiktoken(prompt_content, tiktoken_model)
19
- except KeyError:
20
- return estimate_tokens(prompt_content) # Fallback method
21
-
22
- def calculate_price_from_usage(model: str, usage: dict) -> float:
23
- cost_table = OPENAI_COST_TABLE.get(model)
24
- if cost_table:
25
- return ((cost_table['input'] * usage['prompt_tokens'] +
26
- cost_table['output'] * usage['completion_tokens']) / 1000)
27
- return 0
@@ -1,38 +0,0 @@
1
- instrumentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- instrumentation/constants.py,sha256=1wth9_em4_h4UgmcADcPmDHvKCJLqOqBbTw7jmcga6A,467
3
- instrumentation/setup.py,sha256=07Sy36lUuNuPU5QPAs2BEMm-YKSosruzKJPl0QKc_rc,2105
4
- instrumentation/utils.py,sha256=2kQHQgeuk8kSGoQSBQByQYEXGLkSMDkw7riYVag_cv8,1059
5
- instrumentation/with_root_span.py,sha256=CRie2ljHhnHN8bUGDwBM-F18-c6xyoI_238KP8BEO-U,969
6
- instrumentation/chroma/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
- instrumentation/chroma/instrumentation.py,sha256=Gc0nNAMoBR-WPRbgPw5AYZBDSQUY56i_rAEbhX8h4A4,1078
8
- instrumentation/chroma/patch.py,sha256=smzSItC53-tuaMuGbiMhgB2luNJOHz3ttyL7uOqyYII,1811
9
- instrumentation/chroma/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
- instrumentation/chroma/lib/apis.py,sha256=hiPGYdHS0Yj4Kh3eaYBbuCAl_swqIygu80yFqkOgdak,955
11
- instrumentation/langchain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
- instrumentation/langchain/instrumentation.py,sha256=LXtx5edfHPLRZ9yP0yKbDHlvE7LOJumJMGTqQX5RhhM,2850
13
- instrumentation/langchain/patch.py,sha256=f-lq0wdk7doop-Dak2VcGueDsESA_5RKyuGtJQIm4DQ,2979
14
- instrumentation/langchain_community/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
- instrumentation/langchain_community/instrumentation.py,sha256=XWzaHl4FPPlZEhnUlxRB0iO5kPkNVI9siRXZgxF_Yb4,4316
16
- instrumentation/langchain_community/patch.py,sha256=w6R_lHTDg2GzWRH8BZNocQowedeaNUE-pLfCoRETnTk,2872
17
- instrumentation/langchain_core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
18
- instrumentation/langchain_core/instrumentation.py,sha256=uq9F0R0AVM-mlqcrtl2cKxFjHjMTqTZb8741AVhzjAA,4360
19
- instrumentation/langchain_core/patch.py,sha256=PRAi64V02TSY3uX3403dmQ0CDPHSf9ZF9fvw4Tfaf3Y,6702
20
- instrumentation/llamaindex/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
- instrumentation/llamaindex/instrumentation.py,sha256=igUTU0lXOVGXOF_4fvFrRa0Y8dmwdGpk6AqWmrQhDaE,2635
22
- instrumentation/llamaindex/patch.py,sha256=-vsnJBXBBUwNJckHfmH8BRuVM0_dHGOqH6d22NBY4_E,1562
23
- instrumentation/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
- instrumentation/openai/instrumentation.py,sha256=Mkk6fwvQ8kS9ykFFc8OffIGbNMYVi6rrBVjVVhjuTjo,1408
25
- instrumentation/openai/patch.py,sha256=hA71WogWtbP3gIGAE9cVMF3UyBOQPV-KyLrg50ctfoM,9467
26
- instrumentation/openai/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
- instrumentation/openai/lib/apis.py,sha256=9rVa-9nKkKWuIFKGmyrFK_fHe7XVe07SlfRfB3Bxl8Q,453
28
- instrumentation/openai/lib/constants.py,sha256=jbxBAXlyauJQFQhM5I01005y0qLr_IRRWjKDkzsltDA,594
29
- instrumentation/pinecone/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
- instrumentation/pinecone/instrumentation.py,sha256=asVu7d7slGOwv0ZTv9W_oIOe9gSDI7dFgxLhh-JMUt0,1598
31
- instrumentation/pinecone/patch.py,sha256=KCiNToy2hAq0eQHyR-Jw5aEyyn1XoQubGrzew-_sK04,1725
32
- instrumentation/pinecone/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
33
- instrumentation/pinecone/lib/apis.py,sha256=XpKNUfyzEE3HkBN10Qv1w_t1PT-J39pHlotrdU-wvec,477
34
- langtrace_python_sdk-1.0.9.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
35
- langtrace_python_sdk-1.0.9.dist-info/METADATA,sha256=ZDDY0OWZ-TMVH3SF8198BOOuTAZfdaN7qLQD9iYNAbU,6148
36
- langtrace_python_sdk-1.0.9.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
37
- langtrace_python_sdk-1.0.9.dist-info/top_level.txt,sha256=mdFAULSZuqUiDveRElCIPMvwAkRAYXP4bm_dEI4A96Q,16
38
- langtrace_python_sdk-1.0.9.dist-info/RECORD,,
File without changes
File without changes