monocle-apptrace 0.2.0__tar.gz → 0.3.0b2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of monocle-apptrace might be problematic. Click here for more details.

Files changed (70) hide show
  1. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/.gitignore +2 -1
  2. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/CHANGELOG.md +14 -0
  3. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/Monocle_User_Guide.md +9 -0
  4. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/PKG-INFO +5 -2
  5. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/pyproject.toml +5 -2
  6. monocle_apptrace-0.3.0b2/src/monocle_apptrace/botocore/__init__.py +9 -0
  7. monocle_apptrace-0.3.0b2/src/monocle_apptrace/constants.py +36 -0
  8. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/exporters/aws/s3_exporter.py +16 -16
  9. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/exporters/azure/blob_exporter.py +2 -5
  10. monocle_apptrace-0.3.0b2/src/monocle_apptrace/exporters/base_exporter.py +48 -0
  11. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/instrumentor.py +3 -3
  12. monocle_apptrace-0.3.0b2/src/monocle_apptrace/message_processing.py +80 -0
  13. monocle_apptrace-0.3.0b2/src/monocle_apptrace/metamodel/maps/attributes/inference/botocore_entities.json +27 -0
  14. monocle_apptrace-0.3.0b2/src/monocle_apptrace/metamodel/maps/attributes/inference/haystack_entities.json +57 -0
  15. monocle_apptrace-0.3.0b2/src/monocle_apptrace/metamodel/maps/attributes/inference/langchain_entities.json +57 -0
  16. monocle_apptrace-0.3.0b2/src/monocle_apptrace/metamodel/maps/attributes/inference/llamaindex_entities.json +57 -0
  17. monocle_apptrace-0.3.0b2/src/monocle_apptrace/metamodel/maps/attributes/retrieval/haystack_entities.json +31 -0
  18. monocle_apptrace-0.3.0b2/src/monocle_apptrace/metamodel/maps/attributes/retrieval/langchain_entities.json +31 -0
  19. monocle_apptrace-0.3.0b2/src/monocle_apptrace/metamodel/maps/attributes/retrieval/llamaindex_entities.json +31 -0
  20. monocle_apptrace-0.3.0b2/src/monocle_apptrace/metamodel/maps/botocore_methods.json +13 -0
  21. monocle_apptrace-0.3.0b2/src/monocle_apptrace/metamodel/maps/haystack_methods.json +45 -0
  22. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/metamodel/maps/llamaindex_methods.json +18 -0
  23. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/utils.py +92 -12
  24. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/wrap_common.py +208 -114
  25. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/wrapper.py +2 -1
  26. monocle_apptrace-0.2.0/Pipfile.lock +0 -478
  27. monocle_apptrace-0.2.0/src/monocle_apptrace/constants.py +0 -22
  28. monocle_apptrace-0.2.0/src/monocle_apptrace/exporters/base_exporter.py +0 -47
  29. monocle_apptrace-0.2.0/src/monocle_apptrace/haystack/wrap_node.py +0 -27
  30. monocle_apptrace-0.2.0/src/monocle_apptrace/haystack/wrap_openai.py +0 -44
  31. monocle_apptrace-0.2.0/src/monocle_apptrace/metamodel/maps/attributes/inference/langchain_entities.json +0 -35
  32. monocle_apptrace-0.2.0/src/monocle_apptrace/metamodel/maps/attributes/inference/llamaindex_entities.json +0 -35
  33. monocle_apptrace-0.2.0/src/monocle_apptrace/metamodel/maps/attributes/retrieval/langchain_entities.json +0 -27
  34. monocle_apptrace-0.2.0/src/monocle_apptrace/metamodel/maps/attributes/retrieval/llamaindex_entities.json +0 -27
  35. monocle_apptrace-0.2.0/src/monocle_apptrace/metamodel/maps/haystack_methods.json +0 -25
  36. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/CODEOWNERS.md +0 -0
  37. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/CODE_OF_CONDUCT.md +0 -0
  38. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/CONTRIBUTING.md +0 -0
  39. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/COPYRIGHT.template +0 -0
  40. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/LICENSE +0 -0
  41. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/MAINTAINER.md +0 -0
  42. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/Monocle_committer_guide.md +0 -0
  43. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/Monocle_contributor_guide.md +0 -0
  44. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/NOTICE +0 -0
  45. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/README.md +0 -0
  46. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/SECURITY.md +0 -0
  47. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/README.md +0 -0
  48. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/__init__.py +0 -0
  49. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/exporters/exporter_processor.py +0 -0
  50. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/exporters/file_exporter.py +0 -0
  51. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/exporters/monocle_exporters.py +0 -0
  52. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/exporters/okahu/okahu_exporter.py +0 -0
  53. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/haystack/__init__.py +0 -0
  54. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/haystack/wrap_pipeline.py +0 -0
  55. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/langchain/__init__.py +0 -0
  56. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/llamaindex/__init__.py +0 -0
  57. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/metamodel/README.md +0 -0
  58. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/metamodel/entities/README.md +0 -0
  59. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/metamodel/entities/app_hosting_types.json +0 -0
  60. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/metamodel/entities/entities.json +0 -0
  61. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/metamodel/entities/inference_types.json +0 -0
  62. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/metamodel/entities/model_types.json +0 -0
  63. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/metamodel/entities/vector_store_types.json +0 -0
  64. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/metamodel/entities/workflow_types.json +0 -0
  65. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/metamodel/maps/langchain_methods.json +0 -0
  66. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/metamodel/spans/README.md +0 -0
  67. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/metamodel/spans/span_example.json +0 -0
  68. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/metamodel/spans/span_format.json +0 -0
  69. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/src/monocle_apptrace/metamodel/spans/span_types.json +0 -0
  70. {monocle_apptrace-0.2.0 → monocle_apptrace-0.3.0b2}/tox.ini +0 -0
@@ -12,4 +12,5 @@ traces.txt
12
12
  dist
13
13
  .tox
14
14
  .DS_Store
15
- Pipfile**
15
+ Pipfile**
16
+ launch.json
@@ -1,3 +1,17 @@
1
+ ## Version 0.3.0b2 (2024-12-10)
2
+
3
+ - Add dev dependency for Mistral AI integration ([#81](https://github.com/monocle2ai/monocle/pull/81))
4
+ - Add VectorStore deployment URL capture support ([#80](https://github.com/monocle2ai/monocle/pull/80))
5
+ - Clean up cloud exporter implementation ([#79](https://github.com/monocle2ai/monocle/pull/79))
6
+ - Capture inference span input/output events attributes ([#77](https://github.com/monocle2ai/monocle/pull/77))
7
+ - Add release automation workflows ([#76](https://github.com/monocle2ai/monocle/pull/76))
8
+ - Fix gaps in Monocle SDK implementation ([#72](https://github.com/monocle2ai/monocle/pull/72))
9
+ - Add kwargs and return value handling in Accessor ([#71](https://github.com/monocle2ai/monocle/pull/71))
10
+ - Update workflow name formatting ([#69](https://github.com/monocle2ai/monocle/pull/69))
11
+ - Implement Haystack metamodel support ([#68](https://github.com/monocle2ai/monocle/pull/68))
12
+
13
+ ## Version 0.2.0 (2024-12-05)
14
+
1
15
  ## 0.2.0 (Oct 22, 2024)
2
16
 
3
17
  - Ndjson format for S3 and Blob exporters ([#61](https://github.com/monocle2ai/monocle/pull/61))
@@ -40,6 +40,15 @@ You need to import monocle package and invoke the API ``setup_monocle_telemetry(
40
40
 
41
41
  ### Using Monocle's out of box support of genAI technology components
42
42
  Monocle community has done the hard work of figuring out what to trace and how to extract relevant details from multiple genAI technology components. For example, if you have a python app coded using LlamaIndex and using models hostsed in OpenAI, Monocle can seamlessly trace your app. All you need to do enable Monocle tracing.
43
+
44
+ ### Using Monocle's Support for Adding Custom Attributes
45
+ Monocle provides users with the ability to add custom attributes to various spans, such as inference and retrieval spans, by utilizing the output processor within its metamodel. This feature allows for dynamic attribute assignment through lambda functions, which operate on an arguments dictionary.
46
+ The arguments dictionary contains key-value pairs that can be used to compute custom attributes. The dictionary includes the following components:
47
+ ```python
48
+ arguments = {"instance":instance, "args":args, "kwargs":kwargs, "output":return_value}
49
+ ```
50
+ By leveraging this dictionary, users can define custom attributes for spans, enabling the integration of additional context and information into the tracing process. The lambda functions used in the attributes field can access and process these values to enrich the span with relevant custom data.
51
+
43
52
  #### Example - Enable Monocle tracing in your application
44
53
  ```python
45
54
  from monocle_apptrace.instrumentor import setup_monocle_telemetry
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.3
1
+ Metadata-Version: 2.4
2
2
  Name: monocle_apptrace
3
- Version: 0.2.0
3
+ Version: 0.3.0b2
4
4
  Summary: package with monocle genAI tracing
5
5
  Project-URL: Homepage, https://github.com/monocle2ai/monocle
6
6
  Project-URL: Issues, https://github.com/monocle2ai/monocle/issues
@@ -25,11 +25,14 @@ Requires-Dist: faiss-cpu==1.8.0; extra == 'dev'
25
25
  Requires-Dist: instructorembedding==1.0.1; extra == 'dev'
26
26
  Requires-Dist: langchain-chroma==0.1.1; extra == 'dev'
27
27
  Requires-Dist: langchain-community==0.2.5; extra == 'dev'
28
+ Requires-Dist: langchain-mistralai==0.1.13; extra == 'dev'
28
29
  Requires-Dist: langchain-openai==0.1.8; extra == 'dev'
29
30
  Requires-Dist: langchain==0.2.5; extra == 'dev'
30
31
  Requires-Dist: llama-index-embeddings-huggingface==0.2.0; extra == 'dev'
32
+ Requires-Dist: llama-index-llms-mistralai==0.1.20; extra == 'dev'
31
33
  Requires-Dist: llama-index-vector-stores-chroma==0.1.9; extra == 'dev'
32
34
  Requires-Dist: llama-index==0.10.30; extra == 'dev'
35
+ Requires-Dist: mistral-haystack==0.0.2; extra == 'dev'
33
36
  Requires-Dist: numpy==1.26.4; extra == 'dev'
34
37
  Requires-Dist: parameterized==0.9.0; extra == 'dev'
35
38
  Requires-Dist: pytest==8.0.0; extra == 'dev'
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "monocle_apptrace"
7
- version = "0.2.0"
7
+ version = "0.3.0b2"
8
8
  authors = []
9
9
  description = "package with monocle genAI tracing"
10
10
  readme = "README.md"
@@ -43,7 +43,10 @@ dev = [
43
43
  'llama-index==0.10.30',
44
44
  'llama-index-embeddings-huggingface==0.2.0',
45
45
  'llama-index-vector-stores-chroma==0.1.9',
46
- 'parameterized==0.9.0'
46
+ 'parameterized==0.9.0',
47
+ 'llama-index-llms-mistralai==0.1.20',
48
+ 'langchain-mistralai==0.1.13',
49
+ 'mistral-haystack==0.0.2'
47
50
  ]
48
51
 
49
52
  azure = [
@@ -0,0 +1,9 @@
1
+ import os
2
+ from monocle_apptrace.utils import get_wrapper_methods_config
3
+
4
+ parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
5
+ BOTOCORE_METHODS = get_wrapper_methods_config(
6
+ wrapper_methods_config_path=os.path.join(parent_dir, 'metamodel', 'maps', 'botocore_methods.json'),
7
+ attributes_config_base_path=os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
8
+
9
+
@@ -0,0 +1,36 @@
1
+ # Azure environment constants
2
+ AZURE_ML_ENDPOINT_ENV_NAME = "AZUREML_ENTRY_SCRIPT"
3
+ AZURE_FUNCTION_WORKER_ENV_NAME = "FUNCTIONS_WORKER_RUNTIME"
4
+ AZURE_APP_SERVICE_ENV_NAME = "WEBSITE_SITE_NAME"
5
+ AWS_LAMBDA_ENV_NAME = "AWS_LAMBDA_RUNTIME_API"
6
+ GITHUB_CODESPACE_ENV_NAME = "CODESPACES"
7
+
8
+ AWS_LAMBDA_FUNCTION_IDENTIFIER_ENV_NAME = "AWS_LAMBDA_FUNCTION_NAME"
9
+ AZURE_FUNCTION_IDENTIFIER_ENV_NAME = "WEBSITE_SITE_NAME"
10
+ AZURE_APP_SERVICE_IDENTIFIER_ENV_NAME = "WEBSITE_DEPLOYMENT_ID"
11
+ GITHUB_CODESPACE_IDENTIFIER_ENV_NAME = "GITHUB_REPOSITORY"
12
+ # Azure naming reference can be found here
13
+ # https://learn.microsoft.com/en-us/azure/cloud-adoption-framework/ready/azure-best-practices/resource-abbreviations
14
+ AZURE_FUNCTION_NAME = "azure.func"
15
+ AZURE_APP_SERVICE_NAME = "azure.asp"
16
+ AZURE_ML_SERVICE_NAME = "azure.mlw"
17
+ AWS_LAMBDA_SERVICE_NAME = "aws.lambda"
18
+ GITHUB_CODESPACE_SERVICE_NAME = "github_codespace"
19
+
20
+ # Env variables to identify infra service type
21
+ service_type_map = {
22
+ AZURE_ML_ENDPOINT_ENV_NAME: AZURE_ML_SERVICE_NAME,
23
+ AZURE_APP_SERVICE_ENV_NAME: AZURE_APP_SERVICE_NAME,
24
+ AZURE_FUNCTION_WORKER_ENV_NAME: AZURE_FUNCTION_NAME,
25
+ AWS_LAMBDA_ENV_NAME: AWS_LAMBDA_SERVICE_NAME,
26
+ GITHUB_CODESPACE_ENV_NAME: GITHUB_CODESPACE_SERVICE_NAME
27
+ }
28
+
29
+ # Env variables to identify infra service name
30
+ service_name_map = {
31
+ AZURE_APP_SERVICE_NAME: AZURE_APP_SERVICE_IDENTIFIER_ENV_NAME,
32
+ AZURE_FUNCTION_NAME: AZURE_FUNCTION_IDENTIFIER_ENV_NAME,
33
+ AZURE_ML_SERVICE_NAME: AZURE_ML_ENDPOINT_ENV_NAME,
34
+ AWS_LAMBDA_SERVICE_NAME: AWS_LAMBDA_FUNCTION_IDENTIFIER_ENV_NAME,
35
+ GITHUB_CODESPACE_SERVICE_NAME: GITHUB_CODESPACE_IDENTIFIER_ENV_NAME
36
+ }
@@ -6,6 +6,13 @@ import logging
6
6
  import asyncio
7
7
  import boto3
8
8
  from botocore.exceptions import ClientError
9
+ from botocore.exceptions import (
10
+ BotoCoreError,
11
+ ConnectionClosedError,
12
+ ConnectTimeoutError,
13
+ EndpointConnectionError,
14
+ ReadTimeoutError,
15
+ )
9
16
  from opentelemetry.sdk.trace import ReadableSpan
10
17
  from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
11
18
  from monocle_apptrace.exporters.base_exporter import SpanExporterBase
@@ -14,11 +21,11 @@ import json
14
21
  logger = logging.getLogger(__name__)
15
22
 
16
23
  class S3SpanExporter(SpanExporterBase):
17
- def __init__(self, bucket_name=None, region_name="us-east-1"):
24
+ def __init__(self, bucket_name=None, region_name=None):
18
25
  super().__init__()
19
26
  # Use environment variables if credentials are not provided
20
- DEFAULT_FILE_PREFIX = "monocle_trace__"
21
- DEFAULT_TIME_FORMAT = "%Y-%m-%d__%H.%M.%S"
27
+ DEFAULT_FILE_PREFIX = "monocle_trace_"
28
+ DEFAULT_TIME_FORMAT = "%Y-%m-%d_%H.%M.%S"
22
29
  self.max_batch_size = 500
23
30
  self.export_interval = 1
24
31
  self.s3_client = boto3.client(
@@ -36,13 +43,10 @@ class S3SpanExporter(SpanExporterBase):
36
43
  # Check if bucket exists or create it
37
44
  if not self.__bucket_exists(self.bucket_name):
38
45
  try:
39
- if region_name == "us-east-1":
40
- self.s3_client.create_bucket(Bucket=self.bucket_name)
41
- else:
42
- self.s3_client.create_bucket(
43
- Bucket=self.bucket_name,
44
- CreateBucketConfiguration={'LocationConstraint': region_name}
45
- )
46
+ self.s3_client.create_bucket(
47
+ Bucket=self.bucket_name,
48
+ CreateBucketConfiguration={'LocationConstraint': region_name}
49
+ )
46
50
  logger.info(f"Bucket {self.bucket_name} created successfully.")
47
51
  except ClientError as e:
48
52
  logger.error(f"Error creating bucket {self.bucket_name}: {e}")
@@ -131,15 +135,11 @@ class S3SpanExporter(SpanExporterBase):
131
135
  serialized_data = self.__serialize_spans(batch_to_export)
132
136
  self.export_queue = self.export_queue[self.max_batch_size:]
133
137
  try:
134
- if asyncio.get_event_loop().is_running():
135
- task = asyncio.create_task(self._retry_with_backoff(self.__upload_to_s3, serialized_data))
136
- await task
137
- else:
138
- await self._retry_with_backoff(self.__upload_to_s3, serialized_data)
139
-
138
+ self.__upload_to_s3(serialized_data)
140
139
  except Exception as e:
141
140
  logger.error(f"Failed to upload span batch: {e}")
142
141
 
142
+ @SpanExporterBase.retry_with_backoff(exceptions=(EndpointConnectionError, ConnectionClosedError, ReadTimeoutError, ConnectTimeoutError))
143
143
  def __upload_to_s3(self, span_data_batch: str):
144
144
  current_time = datetime.datetime.now().strftime(self.time_format)
145
145
  file_name = f"{self.file_prefix}{current_time}.ndjson"
@@ -105,14 +105,11 @@ class AzureBlobSpanExporter(SpanExporterBase):
105
105
  serialized_data = self.__serialize_spans(batch_to_export)
106
106
  self.export_queue = self.export_queue[self.max_batch_size:]
107
107
  try:
108
- if asyncio.get_event_loop().is_running():
109
- task = asyncio.create_task(self._retry_with_backoff(self.__upload_to_blob, serialized_data))
110
- await task
111
- else:
112
- await self._retry_with_backoff(self.__upload_to_blob, serialized_data)
108
+ self.__upload_to_blob(serialized_data)
113
109
  except Exception as e:
114
110
  logger.error(f"Failed to upload span batch: {e}")
115
111
 
112
+ @SpanExporterBase.retry_with_backoff(exceptions=(ResourceNotFoundError, ClientAuthenticationError, ServiceRequestError))
116
113
  def __upload_to_blob(self, span_data_batch: str):
117
114
  current_time = datetime.datetime.now().strftime(self.time_format)
118
115
  file_name = f"{self.file_prefix}{current_time}.ndjson"
@@ -0,0 +1,48 @@
1
+ import time
2
+ import random
3
+ import logging
4
+ from abc import ABC, abstractmethod
5
+ from opentelemetry.sdk.trace import ReadableSpan
6
+ from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
7
+ from typing import Sequence
8
+ import asyncio
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+ class SpanExporterBase(ABC):
13
+ def __init__(self):
14
+ self.backoff_factor = 2
15
+ self.max_retries = 10
16
+ self.export_queue = []
17
+ self.last_export_time = time.time()
18
+
19
+ @abstractmethod
20
+ async def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult:
21
+ pass
22
+
23
+ @abstractmethod
24
+ async def force_flush(self, timeout_millis: int = 30000) -> bool:
25
+ pass
26
+
27
+ def shutdown(self) -> None:
28
+ pass
29
+
30
+ @staticmethod
31
+ def retry_with_backoff(retries=3, backoff_in_seconds=1, max_backoff_in_seconds=32, exceptions=(Exception,)):
32
+ def decorator(func):
33
+ def wrapper(*args, **kwargs):
34
+ attempt = 0
35
+ while attempt < retries:
36
+ try:
37
+ return func(*args, **kwargs)
38
+ except exceptions as e:
39
+ attempt += 1
40
+ sleep_time = min(max_backoff_in_seconds, backoff_in_seconds * (2 ** (attempt - 1)))
41
+ sleep_time = sleep_time * (1 + random.uniform(-0.1, 0.1)) # Add jitter
42
+ logger.warning(f"Network connectivity error, Attempt {attempt} failed: {e}. Retrying in {sleep_time:.2f} seconds...")
43
+ time.sleep(sleep_time)
44
+ raise Exception(f"Failed after {retries} attempts")
45
+
46
+ return wrapper
47
+
48
+ return decorator
@@ -33,8 +33,8 @@ class MonocleInstrumentor(BaseInstrumentor):
33
33
  return _instruments
34
34
 
35
35
  def _instrument(self, **kwargs):
36
- tracer_provider = kwargs.get("tracer_provider")
37
- tracer = get_tracer(instrumenting_module_name=__name__, tracer_provider=tracer_provider)
36
+ tracer_provider: TracerProvider = kwargs.get("tracer_provider")
37
+ tracer = get_tracer(instrumenting_module_name="monocle_apptrace", tracer_provider=tracer_provider)
38
38
 
39
39
  user_method_list = [
40
40
  {
@@ -107,7 +107,7 @@ def setup_monocle_telemetry(
107
107
  instrumentor = MonocleInstrumentor(user_wrapper_methods=wrapper_methods or [])
108
108
  # instrumentor.app_name = workflow_name
109
109
  if not instrumentor.is_instrumented_by_opentelemetry:
110
- instrumentor.instrument()
110
+ instrumentor.instrument(trace_provider=trace_provider)
111
111
 
112
112
  def on_processor_start(span: Span, parent_context):
113
113
  context_properties = get_value(SESSION_PROPERTIES_KEY)
@@ -0,0 +1,80 @@
1
+ """
2
+ This module provides utility functions for extracting system, user,
3
+ and assistant messages from various input formats.
4
+ """
5
+
6
+ import logging
7
+ from monocle_apptrace.utils import get_attribute
8
+ DATA_INPUT_KEY = "data.input"
9
+
10
+ logger = logging.getLogger(__name__)
11
+ def extract_messages(args):
12
+ """Extract system and user messages"""
13
+ try:
14
+ messages = []
15
+ args_input = get_attribute(DATA_INPUT_KEY)
16
+ if args_input:
17
+ messages.append(args_input)
18
+ return messages
19
+ if args and isinstance(args, tuple) and len(args) > 0:
20
+ if hasattr(args[0], "messages") and isinstance(args[0].messages, list):
21
+ for msg in args[0].messages:
22
+ if hasattr(msg, 'content') and hasattr(msg, 'type'):
23
+ messages.append({msg.type: msg.content})
24
+ elif isinstance(args[0], list): #llama
25
+ for msg in args[0]:
26
+ if hasattr(msg, 'content') and hasattr(msg, 'role'):
27
+ if hasattr(msg.role, 'value'):
28
+ role = msg.role.value
29
+ else:
30
+ role = msg.role
31
+ if msg.role == "system":
32
+ messages.append({role: msg.content})
33
+ elif msg.role in ["user", "human"]:
34
+ user_message = extract_query_from_content(msg.content)
35
+ messages.append({role: user_message})
36
+ return messages
37
+ except Exception as e:
38
+ logger.warning("Warning: Error occurred in extract_messages: %s", str(e))
39
+ return []
40
+
41
+
42
+ def extract_assistant_message(response):
43
+ try:
44
+ if isinstance(response, str):
45
+ return [response]
46
+ if hasattr(response, "content"):
47
+ return [response.content]
48
+ if hasattr(response, "message") and hasattr(response.message, "content"):
49
+ return [response.message.content]
50
+ if "replies" in response:
51
+ reply = response["replies"][0]
52
+ if hasattr(reply, 'content'):
53
+ return [reply.content]
54
+ return [reply]
55
+ if isinstance(response, dict):
56
+ return [response]
57
+ return []
58
+ except Exception as e:
59
+ logger.warning("Warning: Error occurred in extract_assistant_message: %s", str(e))
60
+ return []
61
+
62
+
63
+ def extract_query_from_content(content):
64
+ try:
65
+ query_prefix = "Query:"
66
+ answer_prefix = "Answer:"
67
+ query_start = content.find(query_prefix)
68
+ if query_start == -1:
69
+ return None
70
+
71
+ query_start += len(query_prefix)
72
+ answer_start = content.find(answer_prefix, query_start)
73
+ if answer_start == -1:
74
+ query = content[query_start:].strip()
75
+ else:
76
+ query = content[query_start:answer_start].strip()
77
+ return query
78
+ except Exception as e:
79
+ logger.warning("Warning: Error occurred in extract_query_from_content: %s", str(e))
80
+ return ""
@@ -0,0 +1,27 @@
1
+ {
2
+ "type": "inference",
3
+ "attributes": [
4
+ [
5
+ {
6
+ "_comment": "provider type , inference_endpoint",
7
+ "attribute": "type",
8
+ "accessor": "lambda arguments:'inference.aws_sagemaker'"
9
+ },
10
+ {
11
+ "attribute": "inference_endpoint",
12
+ "accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['azure_endpoint', 'api_base']) or arguments['instance'].meta.endpoint_url"
13
+ }
14
+ ],
15
+ [
16
+ {
17
+ "_comment": "LLM Model",
18
+ "attribute": "name",
19
+ "accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name']) or arguments['kwargs'].get('EndpointName', '')"
20
+ },
21
+ {
22
+ "attribute": "type",
23
+ "accessor": "lambda arguments: 'model.llm.' + (resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name']) or arguments['kwargs'].get('EndpointName', ''))"
24
+ }
25
+ ]
26
+ ]
27
+ }
@@ -0,0 +1,57 @@
1
+ {
2
+ "type": "inference",
3
+ "attributes": [
4
+ [
5
+ {
6
+ "_comment": "provider type ,name , deployment , inference_endpoint",
7
+ "attribute": "type",
8
+ "accessor": "lambda arguments:'inference.azure_oai'"
9
+ },
10
+ {
11
+ "attribute": "provider_name",
12
+ "accessor": "lambda arguments:arguments['kwargs']['provider_name']"
13
+ },
14
+ {
15
+ "attribute": "deployment",
16
+ "accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['engine', 'azure_deployment', 'deployment_name', 'deployment_id', 'deployment'])"
17
+ },
18
+ {
19
+ "attribute": "inference_endpoint",
20
+ "accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['azure_endpoint', 'api_base']) or arguments['kwargs']['inference_endpoint']"
21
+ }
22
+ ],
23
+ [
24
+ {
25
+ "_comment": "LLM Model",
26
+ "attribute": "name",
27
+ "accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name'])"
28
+ },
29
+ {
30
+ "attribute": "type",
31
+ "accessor": "lambda arguments: 'model.llm.'+resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name'])"
32
+ }
33
+ ]
34
+ ],
35
+ "events": [
36
+ { "name":"data.input",
37
+ "attributes": [
38
+
39
+ {
40
+ "_comment": "this is instruction and user query to LLM",
41
+ "attribute": "input",
42
+ "accessor": "lambda arguments: extract_messages(arguments['args'])"
43
+ }
44
+ ]
45
+ },
46
+ {
47
+ "name":"data.output",
48
+ "attributes": [
49
+ {
50
+ "_comment": "this is response from LLM",
51
+ "attribute": "response",
52
+ "accessor": "lambda response: extract_assistant_message(response)"
53
+ }
54
+ ]
55
+ }
56
+ ]
57
+ }
@@ -0,0 +1,57 @@
1
+ {
2
+ "type": "inference",
3
+ "attributes": [
4
+ [
5
+ {
6
+ "_comment": "provider type ,name , deployment , inference_endpoint",
7
+ "attribute": "type",
8
+ "accessor": "lambda arguments:'inference.azure_oai'"
9
+ },
10
+ {
11
+ "attribute": "provider_name",
12
+ "accessor": "lambda arguments:arguments['kwargs']['provider_name']"
13
+ },
14
+ {
15
+ "attribute": "deployment",
16
+ "accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['engine', 'azure_deployment', 'deployment_name', 'deployment_id', 'deployment'])"
17
+ },
18
+ {
19
+ "attribute": "inference_endpoint",
20
+ "accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['azure_endpoint', 'api_base']) or arguments['kwargs']['inference_endpoint']"
21
+ }
22
+ ],
23
+ [
24
+ {
25
+ "_comment": "LLM Model",
26
+ "attribute": "name",
27
+ "accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name']) or arguments['instance'].model_id"
28
+ },
29
+ {
30
+ "attribute": "type",
31
+ "accessor": "lambda arguments: 'model.llm.'+ (resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name']) or arguments['instance'].model_id)"
32
+ }
33
+ ]
34
+ ],
35
+ "events": [
36
+ { "name":"data.input",
37
+ "attributes": [
38
+
39
+ {
40
+ "_comment": "this is instruction and user query to LLM",
41
+ "attribute": "input",
42
+ "accessor": "lambda arguments: extract_messages(arguments['args'])"
43
+ }
44
+ ]
45
+ },
46
+ {
47
+ "name":"data.output",
48
+ "attributes": [
49
+ {
50
+ "_comment": "this is response from LLM",
51
+ "attribute": "response",
52
+ "accessor": "lambda response: extract_assistant_message(response)"
53
+ }
54
+ ]
55
+ }
56
+ ]
57
+ }
@@ -0,0 +1,57 @@
1
+ {
2
+ "type": "inference",
3
+ "attributes": [
4
+ [
5
+ {
6
+ "_comment": "provider type ,name , deployment , inference_endpoint",
7
+ "attribute": "type",
8
+ "accessor": "lambda arguments:'inference.azure_oai'"
9
+ },
10
+ {
11
+ "attribute": "provider_name",
12
+ "accessor": "lambda arguments:arguments['kwargs']['provider_name']"
13
+ },
14
+ {
15
+ "attribute": "deployment",
16
+ "accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['engine', 'azure_deployment', 'deployment_name', 'deployment_id', 'deployment'])"
17
+ },
18
+ {
19
+ "attribute": "inference_endpoint",
20
+ "accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['azure_endpoint', 'api_base']) or arguments['kwargs']['inference_endpoint']"
21
+ }
22
+ ],
23
+ [
24
+ {
25
+ "_comment": "LLM Model",
26
+ "attribute": "name",
27
+ "accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name'])"
28
+ },
29
+ {
30
+ "attribute": "type",
31
+ "accessor": "lambda arguments: 'model.llm.'+resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name'])"
32
+ }
33
+ ]
34
+ ],
35
+ "events": [
36
+ { "name":"data.input",
37
+ "attributes": [
38
+
39
+ {
40
+ "_comment": "this is instruction and user query to LLM",
41
+ "attribute": "input",
42
+ "accessor": "lambda arguments: extract_messages(arguments['args'])"
43
+ }
44
+ ]
45
+ },
46
+ {
47
+ "name":"data.output",
48
+ "attributes": [
49
+ {
50
+ "_comment": "this is response from LLM",
51
+ "attribute": "response",
52
+ "accessor": "lambda response: extract_assistant_message(response)"
53
+ }
54
+ ]
55
+ }
56
+ ]
57
+ }
@@ -0,0 +1,31 @@
1
+ {
2
+ "type": "retrieval",
3
+ "attributes": [
4
+ [
5
+ {
6
+ "_comment": "vector store name and type",
7
+ "attribute": "name",
8
+ "accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['document_store', '_document_store']).__class__.__name__"
9
+ },
10
+ {
11
+ "attribute": "type",
12
+ "accessor": "lambda arguments: 'vectorstore.'+resolve_from_alias(arguments['instance'].__dict__, ['document_store', '_document_store']).__class__.__name__"
13
+ },
14
+ {
15
+ "attribute": "deployment",
16
+ "accessor": "lambda arguments: get_vectorstore_deployment(resolve_from_alias(arguments['instance'].__dict__, ['document_store', '_document_store']).__dict__)"
17
+ }
18
+ ],
19
+ [
20
+ {
21
+ "_comment": "embedding model name and type",
22
+ "attribute": "name",
23
+ "accessor": "lambda arguments: get_embedding_model()"
24
+ },
25
+ {
26
+ "attribute": "type",
27
+ "accessor": "lambda arguments: 'model.embedding.'+get_embedding_model()"
28
+ }
29
+ ]
30
+ ]
31
+ }
@@ -0,0 +1,31 @@
1
+ {
2
+ "type": "retrieval",
3
+ "attributes": [
4
+ [
5
+ {
6
+ "_comment": "vector store name and type",
7
+ "attribute": "name",
8
+ "accessor": "lambda arguments: type(arguments['instance'].vectorstore).__name__"
9
+ },
10
+ {
11
+ "attribute": "type",
12
+ "accessor": "lambda arguments: 'vectorstore.'+type(arguments['instance'].vectorstore).__name__"
13
+ },
14
+ {
15
+ "attribute": "deployment",
16
+ "accessor": "lambda arguments: get_vectorstore_deployment(arguments['instance'].vectorstore.__dict__)"
17
+ }
18
+ ],
19
+ [
20
+ {
21
+ "_comment": "embedding model name and type",
22
+ "attribute": "name",
23
+ "accessor": "lambda arguments: arguments['instance'].vectorstore.embeddings.model"
24
+ },
25
+ {
26
+ "attribute": "type",
27
+ "accessor": "lambda arguments: 'model.embedding.'+arguments['instance'].vectorstore.embeddings.model"
28
+ }
29
+ ]
30
+ ]
31
+ }