langtrace-python-sdk 2.0.4__py3-none-any.whl → 2.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. examples/anthropic_example/completion.py +1 -1
  2. examples/chroma_example/basic.py +1 -1
  3. examples/cohere_example/chat.py +7 -3
  4. examples/cohere_example/chat_stream.py +7 -2
  5. examples/cohere_example/embed.py +2 -1
  6. examples/cohere_example/rerank.py +2 -1
  7. examples/cohere_example/tools.py +21 -5
  8. examples/fastapi_example/basic_route.py +1 -1
  9. examples/hiveagent_example/basic.py +1 -1
  10. examples/langchain_example/groq_example.py +3 -1
  11. examples/langchain_example/langgraph_example.py +11 -12
  12. examples/llamaindex_example/agent.py +1 -1
  13. examples/llamaindex_example/basic.py +1 -1
  14. examples/openai_example/async_tool_calling_nonstreaming.py +11 -4
  15. examples/openai_example/async_tool_calling_streaming.py +41 -29
  16. examples/openai_example/chat_completion.py +12 -8
  17. examples/openai_example/embeddings_create.py +2 -1
  18. examples/openai_example/function_calling.py +11 -6
  19. examples/openai_example/images_generate.py +2 -1
  20. examples/openai_example/tool_calling.py +1 -1
  21. examples/openai_example/tool_calling_nonstreaming.py +11 -3
  22. examples/openai_example/tool_calling_streaming.py +42 -29
  23. examples/perplexity_example/basic.py +1 -1
  24. examples/pinecone_example/basic.py +4 -1
  25. examples/qdrant_example/basic.py +8 -6
  26. langtrace_python_sdk/constants/instrumentation/groq.py +0 -2
  27. langtrace_python_sdk/extensions/langtrace_exporter.py +4 -12
  28. langtrace_python_sdk/instrumentation/anthropic/instrumentation.py +1 -2
  29. langtrace_python_sdk/instrumentation/anthropic/patch.py +14 -4
  30. langtrace_python_sdk/instrumentation/chroma/patch.py +4 -2
  31. langtrace_python_sdk/instrumentation/cohere/instrumentation.py +6 -3
  32. langtrace_python_sdk/instrumentation/groq/instrumentation.py +3 -1
  33. langtrace_python_sdk/instrumentation/groq/patch.py +26 -11
  34. langtrace_python_sdk/instrumentation/langchain/patch.py +4 -2
  35. langtrace_python_sdk/instrumentation/langchain_community/instrumentation.py +1 -2
  36. langtrace_python_sdk/instrumentation/langchain_community/patch.py +4 -3
  37. langtrace_python_sdk/instrumentation/langchain_core/instrumentation.py +3 -1
  38. langtrace_python_sdk/instrumentation/langchain_core/patch.py +4 -2
  39. langtrace_python_sdk/instrumentation/langgraph/instrumentation.py +17 -8
  40. langtrace_python_sdk/instrumentation/langgraph/patch.py +47 -26
  41. langtrace_python_sdk/instrumentation/llamaindex/patch.py +3 -1
  42. langtrace_python_sdk/instrumentation/openai/instrumentation.py +7 -3
  43. langtrace_python_sdk/instrumentation/openai/patch.py +40 -17
  44. langtrace_python_sdk/instrumentation/pinecone/patch.py +4 -2
  45. langtrace_python_sdk/instrumentation/qdrant/patch.py +4 -2
  46. langtrace_python_sdk/langtrace.py +131 -64
  47. langtrace_python_sdk/types/__init__.py +29 -0
  48. langtrace_python_sdk/utils/llm.py +2 -4
  49. langtrace_python_sdk/utils/with_root_span.py +3 -3
  50. langtrace_python_sdk/version.py +1 -1
  51. {langtrace_python_sdk-2.0.4.dist-info → langtrace_python_sdk-2.0.6.dist-info}/METADATA +2 -2
  52. {langtrace_python_sdk-2.0.4.dist-info → langtrace_python_sdk-2.0.6.dist-info}/RECORD +59 -58
  53. tests/chroma/test_chroma.py +26 -20
  54. tests/langchain/test_langchain.py +29 -16
  55. tests/langchain/test_langchain_community.py +28 -15
  56. tests/langchain/test_langchain_core.py +52 -26
  57. tests/pinecone/test_pinecone.py +27 -18
  58. {langtrace_python_sdk-2.0.4.dist-info → langtrace_python_sdk-2.0.6.dist-info}/WHEEL +0 -0
  59. {langtrace_python_sdk-2.0.4.dist-info → langtrace_python_sdk-2.0.6.dist-info}/licenses/LICENSE +0 -0
@@ -22,12 +22,12 @@ from opentelemetry.trace import SpanKind
22
22
  from opentelemetry.trace.status import Status, StatusCode
23
23
 
24
24
  from langtrace_python_sdk.constants.instrumentation.common import (
25
- LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY, SERVICE_PROVIDERS)
25
+ LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
26
+ SERVICE_PROVIDERS,
27
+ )
26
28
 
27
29
 
28
- def patch_graph_methods(
29
- method_name, tracer, version
30
- ):
30
+ def patch_graph_methods(method_name, tracer, version):
31
31
  def traced_method(wrapped, instance, args, kwargs):
32
32
  service_provider = SERVICE_PROVIDERS["LANGGRAPH"]
33
33
  extra_attributes = baggage.get_baggage(LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY)
@@ -38,7 +38,7 @@ def patch_graph_methods(
38
38
  "langtrace.service.type": "framework",
39
39
  "langtrace.service.version": version,
40
40
  "langtrace.version": "1.0.0",
41
- **(extra_attributes if extra_attributes is not None else {})
41
+ **(extra_attributes if extra_attributes is not None else {}),
42
42
  }
43
43
 
44
44
  attr = get_atrribute_key_value(method_name, args)
@@ -75,39 +75,60 @@ def get_atrribute_key_value(method_name, args):
75
75
 
76
76
  if "add_node" in method_name:
77
77
  return {
78
- 'langgraph.node': json.dumps({
79
- 'name': args[0],
80
- 'action': args[1].json() if hasattr(args[1], 'json') else args[1].__name__ if hasattr(args[1], '__name__') else str(args[1]),
81
- }),
82
- 'langgraph.task.name': 'add_node',
78
+ "langgraph.node": json.dumps(
79
+ {
80
+ "name": args[0],
81
+ "action": (
82
+ args[1].json()
83
+ if hasattr(args[1], "json")
84
+ else (
85
+ args[1].__name__
86
+ if hasattr(args[1], "__name__")
87
+ else str(args[1])
88
+ )
89
+ ),
90
+ }
91
+ ),
92
+ "langgraph.task.name": "add_node",
83
93
  }
84
94
  elif "add_edge" in method_name:
85
95
  return {
86
- 'langgraph.edge': json.dumps({
87
- 'source': args[0],
88
- 'destination': args[1],
89
- }),
90
- 'langgraph.task.name': 'add_edge',
96
+ "langgraph.edge": json.dumps(
97
+ {
98
+ "source": args[0],
99
+ "destination": args[1],
100
+ }
101
+ ),
102
+ "langgraph.task.name": "add_edge",
91
103
  }
92
104
  elif "add_conditional_edges" in method_name:
93
105
  return {
94
- 'langgraph.edge': json.dumps({
95
- 'source': args[0],
96
- 'path': args[1].json() if hasattr(args[1], 'json') else args[1].__name__ if hasattr(args[1], '__name__') else str(args[1]),
97
- 'path_map': args[2],
98
- }),
99
- 'langgraph.task.name': 'add_conditional_edges',
106
+ "langgraph.edge": json.dumps(
107
+ {
108
+ "source": args[0],
109
+ "path": (
110
+ args[1].json()
111
+ if hasattr(args[1], "json")
112
+ else (
113
+ args[1].__name__
114
+ if hasattr(args[1], "__name__")
115
+ else str(args[1])
116
+ )
117
+ ),
118
+ "path_map": args[2],
119
+ }
120
+ ),
121
+ "langgraph.task.name": "add_conditional_edges",
100
122
  }
101
123
  elif "set_entry_point" in method_name:
102
124
  return {
103
- 'langgraph.entrypoint': args[0],
104
- 'langgraph.task.name': 'set_entry_point',
125
+ "langgraph.entrypoint": args[0],
126
+ "langgraph.task.name": "set_entry_point",
105
127
  }
106
128
  elif "set_finish_point" in method_name:
107
129
  return {
108
- 'langgraph.finishpoint': args[0],
109
- 'langgraph.task.name': 'set_finish_point',
130
+ "langgraph.finishpoint": args[0],
131
+ "langgraph.task.name": "set_finish_point",
110
132
  }
111
133
  else:
112
134
  return None
113
-
@@ -20,7 +20,9 @@ from opentelemetry.trace import SpanKind
20
20
  from opentelemetry.trace.status import Status, StatusCode
21
21
 
22
22
  from langtrace_python_sdk.constants.instrumentation.common import (
23
- LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY, SERVICE_PROVIDERS)
23
+ LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
24
+ SERVICE_PROVIDERS,
25
+ )
24
26
 
25
27
 
26
28
  def generic_patch(method, task, tracer, version):
@@ -23,9 +23,13 @@ from opentelemetry.trace import get_tracer
23
23
  from wrapt import wrap_function_wrapper
24
24
 
25
25
  from langtrace_python_sdk.instrumentation.openai.patch import (
26
- async_chat_completions_create, async_embeddings_create,
27
- async_images_generate, chat_completions_create, embeddings_create,
28
- images_generate)
26
+ async_chat_completions_create,
27
+ async_embeddings_create,
28
+ async_images_generate,
29
+ chat_completions_create,
30
+ embeddings_create,
31
+ images_generate,
32
+ )
29
33
 
30
34
  logging.basicConfig(level=logging.FATAL)
31
35
 
@@ -22,10 +22,11 @@ from opentelemetry.trace import SpanKind
22
22
  from opentelemetry.trace.status import Status, StatusCode
23
23
 
24
24
  from langtrace_python_sdk.constants.instrumentation.common import (
25
- LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY, SERVICE_PROVIDERS)
25
+ LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
26
+ SERVICE_PROVIDERS,
27
+ )
26
28
  from langtrace_python_sdk.constants.instrumentation.openai import APIS
27
- from langtrace_python_sdk.utils.llm import (calculate_prompt_tokens,
28
- estimate_tokens)
29
+ from langtrace_python_sdk.utils.llm import calculate_prompt_tokens, estimate_tokens
29
30
 
30
31
 
31
32
  def images_generate(original_method, version, tracer):
@@ -52,7 +53,9 @@ def images_generate(original_method, version, tracer):
52
53
  "llm.api": APIS["IMAGES_GENERATION"]["ENDPOINT"],
53
54
  "llm.model": kwargs.get("model"),
54
55
  "llm.stream": kwargs.get("stream"),
55
- "llm.prompts": json.dumps([{"role": "user", "content": kwargs.get("prompt", [])}]),
56
+ "llm.prompts": json.dumps(
57
+ [{"role": "user", "content": kwargs.get("prompt", [])}]
58
+ ),
56
59
  **(extra_attributes if extra_attributes is not None else {}),
57
60
  }
58
61
 
@@ -83,7 +86,7 @@ def images_generate(original_method, version, tracer):
83
86
  if hasattr(data, "revised_prompt")
84
87
  else ""
85
88
  ),
86
- }
89
+ },
87
90
  }
88
91
  ]
89
92
  span.set_attribute("llm.responses", json.dumps(response))
@@ -127,7 +130,9 @@ def async_images_generate(original_method, version, tracer):
127
130
  "llm.api": APIS["IMAGES_GENERATION"]["ENDPOINT"],
128
131
  "llm.model": kwargs.get("model"),
129
132
  "llm.stream": kwargs.get("stream"),
130
- "llm.prompts": json.dumps([{"role": "user", "content": kwargs.get("prompt", [])}]),
133
+ "llm.prompts": json.dumps(
134
+ [{"role": "user", "content": kwargs.get("prompt", [])}]
135
+ ),
131
136
  **(extra_attributes if extra_attributes is not None else {}),
132
137
  }
133
138
 
@@ -159,7 +164,7 @@ def async_images_generate(original_method, version, tracer):
159
164
  if hasattr(data, "revised_prompt")
160
165
  else ""
161
166
  ),
162
- }
167
+ },
163
168
  }
164
169
  ]
165
170
  span.set_attribute("llm.responses", json.dumps(response))
@@ -287,7 +292,8 @@ def chat_completions_create(original_method, version, tracer):
287
292
  if "content_filter_results" in choice
288
293
  else {}
289
294
  ),
290
- } for choice in result.choices
295
+ }
296
+ for choice in result.choices
291
297
  ]
292
298
  span.set_attribute("llm.responses", json.dumps(responses))
293
299
  else:
@@ -375,16 +381,22 @@ def chat_completions_create(original_method, version, tracer):
375
381
  elif tool_calls:
376
382
  for choice in chunk.choices:
377
383
  tool_call = ""
378
- if (choice.delta and choice.delta.tool_calls is not None):
384
+ if choice.delta and choice.delta.tool_calls is not None:
379
385
  toolcalls = choice.delta.tool_calls
380
386
  content = []
381
387
  for tool_call in toolcalls:
382
- if tool_call and tool_call.function is not None and tool_call.function.arguments is not None:
388
+ if (
389
+ tool_call
390
+ and tool_call.function is not None
391
+ and tool_call.function.arguments is not None
392
+ ):
383
393
  token_counts = estimate_tokens(
384
394
  tool_call.function.arguments
385
395
  )
386
396
  completion_tokens += token_counts
387
- content = content + [tool_call.function.arguments]
397
+ content = content + [
398
+ tool_call.function.arguments
399
+ ]
388
400
  else:
389
401
  content = content + []
390
402
  else:
@@ -540,7 +552,8 @@ def async_chat_completions_create(original_method, version, tracer):
540
552
  if "content_filter_results" in choice
541
553
  else {}
542
554
  ),
543
- } for choice in result.choices
555
+ }
556
+ for choice in result.choices
544
557
  ]
545
558
  span.set_attribute("llm.responses", json.dumps(responses))
546
559
  else:
@@ -628,16 +641,22 @@ def async_chat_completions_create(original_method, version, tracer):
628
641
  elif tool_calls:
629
642
  for choice in chunk.choices:
630
643
  tool_call = ""
631
- if (choice.delta and choice.delta.tool_calls is not None):
644
+ if choice.delta and choice.delta.tool_calls is not None:
632
645
  toolcalls = choice.delta.tool_calls
633
646
  content = []
634
647
  for tool_call in toolcalls:
635
- if tool_call and tool_call.function is not None and tool_call.function.arguments is not None:
648
+ if (
649
+ tool_call
650
+ and tool_call.function is not None
651
+ and tool_call.function.arguments is not None
652
+ ):
636
653
  token_counts = estimate_tokens(
637
654
  tool_call.function.arguments
638
655
  )
639
656
  completion_tokens += token_counts
640
- content = content + [tool_call.function.arguments]
657
+ content = content + [
658
+ tool_call.function.arguments
659
+ ]
641
660
  else:
642
661
  content = content + []
643
662
  else:
@@ -715,7 +734,9 @@ def embeddings_create(original_method, version, tracer):
715
734
  }
716
735
 
717
736
  if kwargs.get("encoding_format") is not None:
718
- span_attributes["llm.encoding.formats"] = json.dumps([kwargs.get("encoding_format")])
737
+ span_attributes["llm.encoding.formats"] = json.dumps(
738
+ [kwargs.get("encoding_format")]
739
+ )
719
740
 
720
741
  attributes = LLMSpanAttributes(**span_attributes)
721
742
  kwargs.get("encoding_format")
@@ -774,7 +795,9 @@ def async_embeddings_create(original_method, version, tracer):
774
795
  "url.full": base_url,
775
796
  "llm.api": APIS["EMBEDDINGS_CREATE"]["ENDPOINT"],
776
797
  "llm.model": kwargs.get("model"),
777
- "llm.prompts": json.dumps([{"role": "user", "content": kwargs.get("input", "")}]),
798
+ "llm.prompts": json.dumps(
799
+ [{"role": "user", "content": kwargs.get("input", "")}]
800
+ ),
778
801
  **(extra_attributes if extra_attributes is not None else {}),
779
802
  }
780
803
 
@@ -20,7 +20,9 @@ from opentelemetry.trace import SpanKind
20
20
  from opentelemetry.trace.status import Status, StatusCode
21
21
 
22
22
  from langtrace_python_sdk.constants.instrumentation.common import (
23
- LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY, SERVICE_PROVIDERS)
23
+ LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
24
+ SERVICE_PROVIDERS,
25
+ )
24
26
  from langtrace_python_sdk.constants.instrumentation.pinecone import APIS
25
27
 
26
28
 
@@ -41,7 +43,7 @@ def generic_patch(original_method, method, version, tracer):
41
43
  "langtrace.version": "1.0.0",
42
44
  "db.system": "pinecone",
43
45
  "db.operation": api["OPERATION"],
44
- **(extra_attributes if extra_attributes is not None else {})
46
+ **(extra_attributes if extra_attributes is not None else {}),
45
47
  }
46
48
 
47
49
  attributes = DatabaseSpanAttributes(**span_attributes)
@@ -20,7 +20,9 @@ from opentelemetry.trace import SpanKind
20
20
  from opentelemetry.trace.status import Status, StatusCode
21
21
 
22
22
  from langtrace_python_sdk.constants.instrumentation.common import (
23
- LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY, SERVICE_PROVIDERS)
23
+ LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
24
+ SERVICE_PROVIDERS,
25
+ )
24
26
  from langtrace_python_sdk.constants.instrumentation.qdrant import APIS
25
27
 
26
28
 
@@ -42,7 +44,7 @@ def collection_patch(method, version, tracer):
42
44
  "langtrace.version": "1.0.0",
43
45
  "db.system": "qdrant",
44
46
  "db.operation": api["OPERATION"],
45
- **(extra_attributes if extra_attributes is not None else {})
47
+ **(extra_attributes if extra_attributes is not None else {}),
46
48
  }
47
49
 
48
50
  if hasattr(instance, "name") and instance.name is not None:
@@ -16,99 +16,166 @@ limitations under the License.
16
16
 
17
17
  from typing import Optional
18
18
 
19
+ from langtrace_python_sdk.types import DisableInstrumentations, InstrumentationType
19
20
  from opentelemetry import trace
20
21
  from opentelemetry.sdk.trace import TracerProvider
21
- from opentelemetry.sdk.trace.export import (BatchSpanProcessor,
22
- ConsoleSpanExporter,
23
- SimpleSpanProcessor)
24
-
25
- from langtrace_python_sdk.extensions.langtrace_exporter import \
26
- LangTraceExporter
27
- from langtrace_python_sdk.instrumentation.anthropic.instrumentation import \
28
- AnthropicInstrumentation
29
- from langtrace_python_sdk.instrumentation.chroma.instrumentation import \
30
- ChromaInstrumentation
31
- from langtrace_python_sdk.instrumentation.cohere.instrumentation import \
32
- CohereInstrumentation
33
- from langtrace_python_sdk.instrumentation.groq.instrumentation import \
34
- GroqInstrumentation
35
- from langtrace_python_sdk.instrumentation.langchain.instrumentation import \
36
- LangchainInstrumentation
37
- from langtrace_python_sdk.instrumentation.langchain_community.instrumentation import \
38
- LangchainCommunityInstrumentation
39
- from langtrace_python_sdk.instrumentation.langchain_core.instrumentation import \
40
- LangchainCoreInstrumentation
41
- from langtrace_python_sdk.instrumentation.langgraph.instrumentation import \
42
- LanggraphInstrumentation
43
- from langtrace_python_sdk.instrumentation.llamaindex.instrumentation import \
44
- LlamaindexInstrumentation
45
- from langtrace_python_sdk.instrumentation.openai.instrumentation import \
46
- OpenAIInstrumentation
47
- from langtrace_python_sdk.instrumentation.pinecone.instrumentation import \
48
- PineconeInstrumentation
49
- from langtrace_python_sdk.instrumentation.qdrant.instrumentation import \
50
- QdrantInstrumentation
22
+ from opentelemetry.sdk.trace.export import (
23
+ BatchSpanProcessor,
24
+ ConsoleSpanExporter,
25
+ SimpleSpanProcessor,
26
+ )
27
+
28
+ from langtrace_python_sdk.extensions.langtrace_exporter import LangTraceExporter
29
+ from langtrace_python_sdk.instrumentation.anthropic.instrumentation import (
30
+ AnthropicInstrumentation,
31
+ )
32
+ from langtrace_python_sdk.instrumentation.chroma.instrumentation import (
33
+ ChromaInstrumentation,
34
+ )
35
+ from langtrace_python_sdk.instrumentation.cohere.instrumentation import (
36
+ CohereInstrumentation,
37
+ )
38
+ from langtrace_python_sdk.instrumentation.groq.instrumentation import (
39
+ GroqInstrumentation,
40
+ )
41
+ from langtrace_python_sdk.instrumentation.langchain.instrumentation import (
42
+ LangchainInstrumentation,
43
+ )
44
+ from langtrace_python_sdk.instrumentation.langchain_community.instrumentation import (
45
+ LangchainCommunityInstrumentation,
46
+ )
47
+ from langtrace_python_sdk.instrumentation.langchain_core.instrumentation import (
48
+ LangchainCoreInstrumentation,
49
+ )
50
+ from langtrace_python_sdk.instrumentation.langgraph.instrumentation import (
51
+ LanggraphInstrumentation,
52
+ )
53
+ from langtrace_python_sdk.instrumentation.llamaindex.instrumentation import (
54
+ LlamaindexInstrumentation,
55
+ )
56
+ from langtrace_python_sdk.instrumentation.openai.instrumentation import (
57
+ OpenAIInstrumentation,
58
+ )
59
+ from langtrace_python_sdk.instrumentation.pinecone.instrumentation import (
60
+ PineconeInstrumentation,
61
+ )
62
+ from langtrace_python_sdk.instrumentation.qdrant.instrumentation import (
63
+ QdrantInstrumentation,
64
+ )
51
65
 
52
66
 
53
67
  def init(
54
68
  api_key: str = None,
55
69
  batch: bool = True,
56
- write_to_langtrace_cloud: bool = True,
70
+ write_spans_to_console: bool = False,
57
71
  custom_remote_exporter=None,
58
72
  api_host: Optional[str] = None,
73
+ disable_instrumentations: Optional[DisableInstrumentations] = None,
59
74
  ):
60
75
  provider = TracerProvider()
61
76
 
62
77
  remote_write_exporter = (
63
- LangTraceExporter(api_key, write_to_langtrace_cloud, api_host=api_host)
78
+ LangTraceExporter(api_key=api_key, api_host=api_host)
64
79
  if custom_remote_exporter is None
65
80
  else custom_remote_exporter
66
81
  )
67
82
  console_exporter = ConsoleSpanExporter()
68
83
  batch_processor_remote = BatchSpanProcessor(remote_write_exporter)
69
84
  simple_processor_remote = SimpleSpanProcessor(remote_write_exporter)
70
- batch_processor_console = BatchSpanProcessor(console_exporter)
71
85
  simple_processor_console = SimpleSpanProcessor(console_exporter)
72
86
 
73
- if write_to_langtrace_cloud:
74
- provider.add_span_processor(batch_processor_remote)
87
+ if write_spans_to_console:
88
+ provider.add_span_processor(simple_processor_console)
89
+
75
90
  elif custom_remote_exporter is not None:
76
91
  if batch:
77
92
  provider.add_span_processor(batch_processor_remote)
78
93
  else:
79
94
  provider.add_span_processor(simple_processor_remote)
80
- else:
95
+
96
+ elif api_host is not None:
81
97
  if batch:
82
- provider.add_span_processor(batch_processor_console)
98
+ provider.add_span_processor(batch_processor_remote)
83
99
  else:
84
- provider.add_span_processor(simple_processor_console)
100
+ provider.add_span_processor(simple_processor_remote)
101
+ else:
102
+ provider.add_span_processor(batch_processor_remote)
85
103
 
86
104
  # Initialize tracer
87
105
  trace.set_tracer_provider(provider)
88
106
 
89
- openai_instrumentation = OpenAIInstrumentation()
90
- groq_instrumentation = GroqInstrumentation()
91
- pinecone_instrumentation = PineconeInstrumentation()
92
- llamaindex_instrumentation = LlamaindexInstrumentation()
93
- chroma_instrumentation = ChromaInstrumentation()
94
- qdrant_instrumentation = QdrantInstrumentation()
95
- langchain_instrumentation = LangchainInstrumentation()
96
- langchain_core_instrumentation = LangchainCoreInstrumentation()
97
- langchain_community_instrumentation = LangchainCommunityInstrumentation()
98
- langgraph_instrumentation = LanggraphInstrumentation()
99
- anthropic_instrumentation = AnthropicInstrumentation()
100
- cohere_instrumentation = CohereInstrumentation()
101
-
102
- # Call the instrument method with some arguments
103
- openai_instrumentation.instrument()
104
- groq_instrumentation.instrument()
105
- pinecone_instrumentation.instrument()
106
- llamaindex_instrumentation.instrument()
107
- chroma_instrumentation.instrument()
108
- qdrant_instrumentation.instrument()
109
- langchain_instrumentation.instrument()
110
- langchain_core_instrumentation.instrument()
111
- langchain_community_instrumentation.instrument()
112
- langgraph_instrumentation.instrument()
113
- anthropic_instrumentation.instrument()
114
- cohere_instrumentation.instrument()
107
+ all_instrumentations = {
108
+ "openai": OpenAIInstrumentation(),
109
+ "groq": GroqInstrumentation(),
110
+ "pinecone": PineconeInstrumentation(),
111
+ "llamaindex": LlamaindexInstrumentation(),
112
+ "chroma": ChromaInstrumentation(),
113
+ "qdrant": QdrantInstrumentation(),
114
+ "langchain": LangchainInstrumentation(),
115
+ "langchain_core": LangchainCoreInstrumentation(),
116
+ "langchain_community": LangchainCommunityInstrumentation(),
117
+ "langgraph": LanggraphInstrumentation(),
118
+ "anthropic": AnthropicInstrumentation(),
119
+ "cohere": CohereInstrumentation(),
120
+ }
121
+
122
+ init_instrumentations(disable_instrumentations, all_instrumentations)
123
+
124
+
125
+ def init_instrumentations(
126
+ disable_instrumentations: DisableInstrumentations, all_instrumentations: dict
127
+ ):
128
+ if disable_instrumentations is None:
129
+ for _, v in all_instrumentations.items():
130
+ v.instrument()
131
+ return
132
+ validate_instrumentations(disable_instrumentations)
133
+
134
+ for key in disable_instrumentations:
135
+ for vendor in disable_instrumentations[key]:
136
+ if key == "only":
137
+ filtered_dict = {
138
+ k: v for k, v in all_instrumentations.items() if k != vendor.value
139
+ }
140
+ for _, v in filtered_dict.items():
141
+ v.instrument()
142
+ else:
143
+ filtered_dict = {
144
+ k: v for k, v in all_instrumentations.items() if k == vendor.value
145
+ }
146
+
147
+ for _, v in filtered_dict.items():
148
+ v.instrument()
149
+
150
+
151
+ def validate_instrumentations(disable_instrumentations):
152
+ if disable_instrumentations is not None:
153
+ for key, value in disable_instrumentations.items():
154
+ if isinstance(value, str):
155
+ # Convert single string to list of enum values
156
+ disable_instrumentations[key] = [InstrumentationType.from_string(value)]
157
+ elif isinstance(value, list):
158
+ # Convert list of strings to list of enum values
159
+ disable_instrumentations[key] = [
160
+ (
161
+ InstrumentationType.from_string(item)
162
+ if isinstance(item, str)
163
+ else item
164
+ )
165
+ for item in value
166
+ ]
167
+ # Validate all items are of enum type
168
+ if not all(
169
+ isinstance(item, InstrumentationType)
170
+ for item in disable_instrumentations[key]
171
+ ):
172
+ raise TypeError(
173
+ f"All items in {key} must be of type InstrumentationType"
174
+ )
175
+ if (
176
+ disable_instrumentations.get("all_except") is not None
177
+ and disable_instrumentations.get("only") is not None
178
+ ):
179
+ raise ValueError(
180
+ "Cannot specify both only and all_except in disable_instrumentations"
181
+ )
@@ -0,0 +1,29 @@
1
+ from typing import List, TypedDict
2
+ from enum import Enum
3
+
4
+
5
+ class InstrumentationType(Enum):
6
+ OPENAI = "openai"
7
+ COHERE = "cohere"
8
+ ANTHROPIC = "anthropic"
9
+ GROQ = "groq"
10
+ PINECONE = "pinecone"
11
+ LLAMAINDEX = "llamaindex"
12
+ CHROMADB = "chromadb"
13
+ QDRANT = "qdrant"
14
+ LANGCHAIN = "langchain"
15
+ LANGCHAIN_CORE = "langchain_core"
16
+ LANGCHAIN_COMMUNITY = "langchain_community"
17
+ LANGGRAPH = "langgraph"
18
+
19
+ @staticmethod
20
+ def from_string(value: str):
21
+ try:
22
+ return InstrumentationType[value.upper()]
23
+ except KeyError:
24
+ raise ValueError(f"Invalid value for InstrumentationType: {value}")
25
+
26
+
27
+ class DisableInstrumentations(TypedDict, total=False):
28
+ all_except: List[InstrumentationType]
29
+ only: List[InstrumentationType]
@@ -16,10 +16,8 @@ limitations under the License.
16
16
 
17
17
  from tiktoken import get_encoding
18
18
 
19
- from langtrace_python_sdk.constants.instrumentation.common import \
20
- TIKTOKEN_MODEL_MAPPING
21
- from langtrace_python_sdk.constants.instrumentation.openai import \
22
- OPENAI_COST_TABLE
19
+ from langtrace_python_sdk.constants.instrumentation.common import TIKTOKEN_MODEL_MAPPING
20
+ from langtrace_python_sdk.constants.instrumentation.openai import OPENAI_COST_TABLE
23
21
 
24
22
 
25
23
  def estimate_tokens(prompt):
@@ -14,15 +14,15 @@ See the License for the specific language governing permissions and
14
14
  limitations under the License.
15
15
  """
16
16
 
17
-
18
17
  import asyncio
19
18
  from functools import wraps
20
19
 
21
20
  from opentelemetry import baggage, context, trace
22
21
  from opentelemetry.trace import SpanKind
23
22
 
24
- from langtrace_python_sdk.constants.instrumentation.common import \
25
- LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY
23
+ from langtrace_python_sdk.constants.instrumentation.common import (
24
+ LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
25
+ )
26
26
 
27
27
 
28
28
  def with_langtrace_root_span(
@@ -1 +1 @@
1
- __version__ = "2.0.4"
1
+ __version__ = "2.0.6"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: langtrace-python-sdk
3
- Version: 2.0.4
3
+ Version: 2.0.6
4
4
  Summary: Python SDK for LangTrace
5
5
  Project-URL: Homepage, https://github.com/Scale3-Labs/langtrace-python-sdk
6
6
  Author-email: Scale3 Labs <engineering@scale3labs.com>
@@ -149,7 +149,7 @@ pip install langtrace-python-sdk
149
149
 
150
150
  ``` python
151
151
  from langtrace_python_sdk import langtrace # Must precede any llm module imports
152
- langtrace.init(write_to_langtrace_cloud=False)
152
+ langtrace.init(write_spans_to_console=True)
153
153
  ```
154
154
 
155
155
  ## Langtrace self hosted custom exporter