monocle-apptrace 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of monocle-apptrace might be problematic. Click here for more details.

@@ -0,0 +1,77 @@
1
+ #Monocle User Guide
2
+
3
+ ## Monocle Concepts
4
+ ### Traces
5
+ Traces are the full view of a single end-to-end application KPI eg Chatbot application to provide a response to end user’s question. Traces consists of various metadata about the application run including status, start time, duration, input/outputs etc. It also includes a list of individual steps aka “spans with details about that step.
6
+ It’s typically the workflow code components of an application that generate the traces for application runs.
7
+ ### Spans
8
+ Spans are the individual steps executed by the application to perform a GenAI related task” eg app retrieving vectors from DB, app querying LLM for inference etc. The span includes the type of operation, start time, duration and metadata relevant to that step eg Model name, parameters and model endpoint/server for an inference request.
9
+ It’s typically the workflow code components of an application that generate the traces for application runs.
10
+
11
+ ## Setup Monocle
12
+ - You can download Monocle library releases from Pypi
13
+ ```
14
+ > python3 -m pip install pipenv
15
+ > pip install monocle-observability
16
+ ```
17
+ - You can locally build and install Monocle library from source
18
+ ```
19
+ > pip install .
20
+ > pip install -e ".[dev]"
21
+
22
+ > python3 -m pip install pipenv
23
+ > pipenv install build
24
+ ```
25
+
26
+ ## Examples
27
+ ### Enable Monocle tracing in your application
28
+ ```python
29
+ from monocle_apptrace.instrumentor import setup_monocle_telemetry
30
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter
31
+ from langchain.chains import LLMChain
32
+ from langchain_openai import OpenAI
33
+ from langchain.prompts import PromptTemplate
34
+
35
+ # Call the setup Monocle telemetry method
36
+ setup_monocle_telemetry(workflow_name = "simple_math_app",
37
+ span_processors=[BatchSpanProcessor(ConsoleSpanExporter())])
38
+
39
+ llm = OpenAI()
40
+ prompt = PromptTemplate.from_template("1 + {number} = ")
41
+
42
+ chain = LLMChain(llm=llm, prompt=prompt)
43
+ chain.invoke({"number":2})
44
+
45
+ # Request callbacks: Finally, let's use the request `callbacks` to achieve the same result
46
+ chain = LLMChain(llm=llm, prompt=prompt)
47
+ chain.invoke({"number":2}, {"callbacks":[handler]})
48
+
49
+ ```
50
+
51
+ ### Monitoring custom methods with Monocle
52
+
53
+ ```python
54
+ from monocle_apptrace.wrapper import WrapperMethod,task_wrapper,atask_wrapper
55
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter
56
+
57
+ # extend the default wrapped methods list as follows
58
+ app_name = "simple_math_app"
59
+ setup_monocle_telemetry(
60
+ workflow_name=app_name,
61
+ span_processors=[BatchSpanProcessor(ConsoleSpanExporter())],
62
+ wrapper_methods=[
63
+ WrapperMethod(
64
+ package="langchain.schema.runnable",
65
+ object="RunnableParallel",
66
+ method="invoke",
67
+ span_name="langchain.workflow",
68
+ wrapper=task_wrapper),
69
+ WrapperMethod(
70
+ package="langchain.schema.runnable",
71
+ object="RunnableParallel",
72
+ method="ainvoke",
73
+ span_name="langchain.workflow",
74
+ wrapper=atask_wrapper)
75
+ ])
76
+
77
+ ```
@@ -0,0 +1,2 @@
1
+
2
+
@@ -0,0 +1,28 @@
1
+
2
+
3
+ import logging
4
+ from monocle_apptrace.haystack.wrap_openai import wrap_openai
5
+ from monocle_apptrace.haystack.wrap_pipeline import wrap as wrap_pipeline
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+ HAYSTACK_METHODS = [
10
+ {
11
+ "package": "haystack.components.generators.openai",
12
+ "object": "OpenAIGenerator",
13
+ "method": "run",
14
+ "wrapper": wrap_openai,
15
+ },
16
+ {
17
+ "package": "haystack.components.generators.chat.openai",
18
+ "object": "OpenAIChatGenerator",
19
+ "method": "run",
20
+ "wrapper": wrap_openai,
21
+ },
22
+ {
23
+ "package": "haystack.core.pipeline.pipeline",
24
+ "object": "Pipeline",
25
+ "method": "run",
26
+ "wrapper": wrap_pipeline,
27
+ },
28
+ ]
@@ -0,0 +1,27 @@
1
+
2
+
3
+ import logging
4
+ from opentelemetry import context as context_api
5
+ from opentelemetry.context import attach, set_value
6
+ from opentelemetry.instrumentation.utils import (
7
+ _SUPPRESS_INSTRUMENTATION_KEY,
8
+ )
9
+ from monocle_apptrace.wrap_common import WORKFLOW_TYPE_MAP, with_tracer_wrapper
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ @with_tracer_wrapper
15
+ def wrap(tracer, to_wrap, wrapped, instance, args, kwargs):
16
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
17
+ return wrapped(*args, **kwargs)
18
+ name = instance.name
19
+ attach(set_value("workflow_name", name))
20
+ with tracer.start_as_current_span(f"{name}.task") as span:
21
+ workflow_name = span.resource.attributes.get("service.name")
22
+ span.set_attribute("workflow_name",workflow_name)
23
+ span.set_attribute("workflow_type", WORKFLOW_TYPE_MAP["haystack"])
24
+
25
+ response = wrapped(*args, **kwargs)
26
+
27
+ return response
@@ -0,0 +1,52 @@
1
+
2
+
3
+ import logging
4
+ from opentelemetry import context as context_api
5
+ from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
6
+ from monocle_apptrace.wrap_common import with_tracer_wrapper
7
+ from monocle_apptrace.utils import (
8
+ dont_throw,
9
+ set_span_attribute
10
+ )
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+ @dont_throw
15
+ def _set_input_attributes(span, kwargs, instance, args):
16
+ set_span_attribute(span, "llm_input", kwargs.get("prompt"))
17
+
18
+ if 'model' in instance.__dict__:
19
+ model_name = instance.__dict__.get("model")
20
+ set_span_attribute(span, "openai_model_name", model_name)
21
+
22
+ return
23
+
24
+ @dont_throw
25
+ def _set_response_attributes(span, response):
26
+
27
+ if "meta" in response:
28
+ token_usage = response["meta"][0]["usage"]
29
+ set_span_attribute(span, "completion_tokens", token_usage.get("completion_tokens"))
30
+ set_span_attribute(span, "prompt_tokens", token_usage.get("prompt_tokens"))
31
+ set_span_attribute(span, "total_tokens", token_usage.get("total_tokens"))
32
+
33
+
34
+ @with_tracer_wrapper
35
+ def wrap_openai(tracer, to_wrap, wrapped, instance, args, kwargs):
36
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
37
+ return wrapped(*args, **kwargs)
38
+
39
+ with tracer.start_as_current_span("haystack.openai") as span:
40
+ if span.is_recording():
41
+ _set_input_attributes(span, kwargs, instance, args)
42
+
43
+
44
+
45
+ response = wrapped(*args, **kwargs)
46
+
47
+ if response:
48
+ if span.is_recording():
49
+ _set_response_attributes(span, response)
50
+
51
+
52
+ return response
@@ -0,0 +1,49 @@
1
+
2
+
3
+ import logging
4
+ from opentelemetry import context as context_api
5
+ from opentelemetry.context import attach, set_value
6
+ from opentelemetry.instrumentation.utils import (
7
+ _SUPPRESS_INSTRUMENTATION_KEY,
8
+ )
9
+ from monocle_apptrace.wrap_common import PROMPT_INPUT_KEY, PROMPT_OUTPUT_KEY, WORKFLOW_TYPE_MAP, with_tracer_wrapper
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ @with_tracer_wrapper
15
+ def wrap(tracer, to_wrap, wrapped, instance, args, kwargs):
16
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
17
+ return wrapped(*args, **kwargs)
18
+ name = "haystack_pipeline"
19
+ attach(set_value("workflow_name", name))
20
+ inputs = set()
21
+ input = get_workflow_input(args, inputs)
22
+
23
+ with tracer.start_as_current_span(f"{name}.workflow") as span:
24
+ span.set_attribute(PROMPT_INPUT_KEY, input)
25
+ workflow_name = span.resource.attributes.get("service.name")
26
+ set_workflow_attributes(span, workflow_name)
27
+
28
+ response = wrapped(*args, **kwargs)
29
+ set_workflow_output(span, response)
30
+ return response
31
+
32
+ def set_workflow_output(span, response):
33
+ workflow_output: str = response["llm"]["replies"][0]
34
+ span.set_attribute(PROMPT_OUTPUT_KEY, workflow_output)
35
+
36
+ def get_workflow_input(args, inputs):
37
+ for value in args[0].values():
38
+ for text in value.values():
39
+ inputs.add(text)
40
+
41
+ input: str = ""
42
+
43
+ for input_str in inputs:
44
+ input = input + input_str
45
+ return input
46
+
47
+ def set_workflow_attributes(span, workflow_name):
48
+ span.set_attribute("workflow_name",workflow_name)
49
+ span.set_attribute("workflow_type", WORKFLOW_TYPE_MAP["haystack"])
@@ -0,0 +1,127 @@
1
+
2
+
3
+ import logging
4
+ from typing import Collection,List
5
+ from wrapt import wrap_function_wrapper
6
+ from opentelemetry.trace import get_tracer
7
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
8
+ from opentelemetry.instrumentation.utils import unwrap
9
+ from opentelemetry.sdk.trace import TracerProvider, Span
10
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor, SpanProcessor
11
+ from opentelemetry.sdk.resources import SERVICE_NAME, Resource
12
+ from opentelemetry import trace
13
+ from monocle_apptrace.wrap_common import CONTEXT_PROPERTIES_KEY
14
+ from monocle_apptrace.wrapper import INBUILT_METHODS_LIST, WrapperMethod
15
+ from opentelemetry.context import get_value, attach, set_value
16
+
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+ _instruments = ("langchain >= 0.0.346",)
21
+
22
+ class MonocleInstrumentor(BaseInstrumentor):
23
+
24
+ workflow_name: str = ""
25
+ user_wrapper_methods: list[WrapperMethod] = []
26
+ instrumented_method_list: list[object] = []
27
+
28
+ def __init__(
29
+ self,
30
+ user_wrapper_methods: list[WrapperMethod] = []) -> None:
31
+ self.user_wrapper_methods = user_wrapper_methods
32
+ super().__init__()
33
+
34
+ def instrumentation_dependencies(self) -> Collection[str]:
35
+ return _instruments
36
+
37
+ def _instrument(self, **kwargs):
38
+ tracer_provider = kwargs.get("tracer_provider")
39
+ tracer = get_tracer(instrumenting_module_name= __name__, tracer_provider= tracer_provider)
40
+
41
+ user_method_list = [
42
+ {
43
+ "package": method.package,
44
+ "object": method.object,
45
+ "method": method.method,
46
+ "span_name": method.span_name,
47
+ "wrapper": method.wrapper,
48
+ } for method in self.user_wrapper_methods]
49
+
50
+ final_method_list = user_method_list + INBUILT_METHODS_LIST
51
+
52
+ for wrapped_method in final_method_list:
53
+ try:
54
+ wrap_package = wrapped_method.get("package")
55
+ wrap_object = wrapped_method.get("object")
56
+ wrap_method = wrapped_method.get("method")
57
+ wrapper = wrapped_method.get("wrapper")
58
+ wrap_function_wrapper(
59
+ wrap_package,
60
+ f"{wrap_object}.{wrap_method}" if wrap_object else wrap_method,
61
+ wrapper(tracer, wrapped_method),
62
+ )
63
+ self.instrumented_method_list.append(wrapped_method)
64
+ except Exception as ex:
65
+ if wrapped_method in user_method_list:
66
+ logger.error(f"""_instrument wrap Exception: {str(ex)}
67
+ for package: {wrap_package},
68
+ object:{wrap_object},
69
+ method:{wrap_method}""")
70
+
71
+
72
+ def _uninstrument(self, **kwargs):
73
+ for wrapped_method in self.instrumented_method_list:
74
+ try:
75
+ wrap_package = wrapped_method.get("package")
76
+ wrap_object = wrapped_method.get("object")
77
+ wrap_method = wrapped_method.get("method")
78
+ unwrap(
79
+ f"{wrap_package}.{wrap_object}" if wrap_object else wrap_package,
80
+ wrap_method,
81
+ )
82
+ except Exception as ex:
83
+ logger.error(f"""_instrument unwrap Exception: {str(ex)}
84
+ for package: {wrap_package},
85
+ object:{wrap_object},
86
+ method:{wrap_method}""")
87
+
88
+
89
+ def setup_monocle_telemetry(
90
+ workflow_name: str,
91
+ span_processors: List[SpanProcessor] = [],
92
+ wrapper_methods: List[WrapperMethod] = []):
93
+ resource = Resource(attributes={
94
+ SERVICE_NAME: workflow_name
95
+ })
96
+ traceProvider = TracerProvider(resource=resource)
97
+ tracerProviderDefault = trace.get_tracer_provider()
98
+ providerType = type(tracerProviderDefault).__name__
99
+ isProxyProvider = "Proxy" in providerType
100
+ for processor in span_processors:
101
+ processor.on_start = on_processor_start
102
+ if not isProxyProvider:
103
+ tracerProviderDefault.add_span_processor(processor)
104
+ else :
105
+ traceProvider.add_span_processor(processor)
106
+ if isProxyProvider :
107
+ trace.set_tracer_provider(traceProvider)
108
+ instrumentor = MonocleInstrumentor(user_wrapper_methods=wrapper_methods)
109
+ instrumentor.app_name = workflow_name
110
+ if not instrumentor.is_instrumented_by_opentelemetry:
111
+ instrumentor.instrument()
112
+
113
+
114
+ def on_processor_start(span: Span, parent_context):
115
+ context_properties = get_value(CONTEXT_PROPERTIES_KEY)
116
+ if context_properties is not None:
117
+ for key, value in context_properties.items():
118
+ span.set_attribute(
119
+ f"{CONTEXT_PROPERTIES_KEY}.{key}", value
120
+ )
121
+
122
+ def set_context_properties(properties: dict) -> None:
123
+ attach(set_value(CONTEXT_PROPERTIES_KEY, properties))
124
+
125
+
126
+
127
+
@@ -0,0 +1,95 @@
1
+
2
+
3
+ from monocle_apptrace.wrap_common import allm_wrapper, atask_wrapper, llm_wrapper, task_wrapper
4
+
5
+ LANGCHAIN_METHODS = [
6
+ {
7
+ "package": "langchain.prompts.base",
8
+ "object": "BasePromptTemplate",
9
+ "method": "invoke",
10
+ "wrapper": task_wrapper,
11
+ },
12
+ {
13
+ "package": "langchain.prompts.base",
14
+ "object": "BasePromptTemplate",
15
+ "method": "ainvoke",
16
+ "wrapper": atask_wrapper,
17
+ },
18
+ {
19
+ "package": "langchain.chat_models.base",
20
+ "object": "BaseChatModel",
21
+ "method": "invoke",
22
+ "wrapper": llm_wrapper,
23
+ },
24
+ {
25
+ "package": "langchain.chat_models.base",
26
+ "object": "BaseChatModel",
27
+ "method": "ainvoke",
28
+ "wrapper": allm_wrapper,
29
+ },
30
+ {
31
+ "package": "langchain_core.language_models.llms",
32
+ "object": "LLM",
33
+ "method": "_generate",
34
+ "wrapper": llm_wrapper,
35
+ },
36
+ {
37
+ "package": "langchain_core.language_models.llms",
38
+ "object": "LLM",
39
+ "method": "_agenerate",
40
+ "wrapper": llm_wrapper,
41
+ },
42
+ {
43
+ "package": "langchain_core.retrievers",
44
+ "object": "BaseRetriever",
45
+ "method": "invoke",
46
+ "wrapper": task_wrapper,
47
+ },
48
+ {
49
+ "package": "langchain_core.retrievers",
50
+ "object": "BaseRetriever",
51
+ "method": "ainvoke",
52
+ "wrapper": atask_wrapper,
53
+ },
54
+ {
55
+ "package": "langchain.schema",
56
+ "object": "BaseOutputParser",
57
+ "method": "invoke",
58
+ "wrapper": task_wrapper,
59
+ },
60
+ {
61
+ "package": "langchain.schema",
62
+ "object": "BaseOutputParser",
63
+ "method": "ainvoke",
64
+ "wrapper": atask_wrapper,
65
+ },
66
+ {
67
+ "package": "langchain.schema.runnable",
68
+ "object": "RunnableSequence",
69
+ "method": "invoke",
70
+ "span_name": "langchain.workflow",
71
+ "wrapper": task_wrapper,
72
+ },
73
+ {
74
+ "package": "langchain.schema.runnable",
75
+ "object": "RunnableSequence",
76
+ "method": "ainvoke",
77
+ "span_name": "langchain.workflow",
78
+ "wrapper": atask_wrapper,
79
+ },
80
+ {
81
+ "package": "langchain.schema.runnable",
82
+ "object": "RunnableParallel",
83
+ "method": "invoke",
84
+ "span_name": "langchain.workflow",
85
+ "wrapper": task_wrapper,
86
+ },
87
+ {
88
+ "package": "langchain.schema.runnable",
89
+ "object": "RunnableParallel",
90
+ "method": "ainvoke",
91
+ "span_name": "langchain.workflow",
92
+ "wrapper": atask_wrapper,
93
+ },
94
+
95
+ ]
@@ -0,0 +1,71 @@
1
+
2
+
3
+ from monocle_apptrace.wrap_common import allm_wrapper, atask_wrapper, llm_wrapper, task_wrapper
4
+
5
+ def get_llm_span_name_for_openai(instance):
6
+ if (hasattr(instance, "_is_azure_client")
7
+ and callable(getattr(instance, "_is_azure_client"))
8
+ and instance._is_azure_client()):
9
+ return "llamaindex.azure_openai"
10
+ return "llamaindex.openai"
11
+
12
+ LLAMAINDEX_METHODS = [
13
+ {
14
+ "package": "llama_index.core.indices.base_retriever",
15
+ "object": "BaseRetriever",
16
+ "method": "retrieve",
17
+ "span_name": "llamaindex.retrieve",
18
+ "wrapper": task_wrapper
19
+ },
20
+ {
21
+ "package": "llama_index.core.indices.base_retriever",
22
+ "object": "BaseRetriever",
23
+ "method": "aretrieve",
24
+ "span_name": "llamaindex.retrieve",
25
+ "wrapper": atask_wrapper
26
+ },
27
+ {
28
+ "package": "llama_index.core.base.base_query_engine",
29
+ "object": "BaseQueryEngine",
30
+ "method": "query",
31
+ "span_name": "llamaindex.query",
32
+ "wrapper": task_wrapper,
33
+ },
34
+ {
35
+ "package": "llama_index.core.base.base_query_engine",
36
+ "object": "BaseQueryEngine",
37
+ "method": "aquery",
38
+ "span_name": "llamaindex.query",
39
+ "wrapper": atask_wrapper,
40
+ },
41
+ {
42
+ "package": "llama_index.core.llms.custom",
43
+ "object": "CustomLLM",
44
+ "method": "chat",
45
+ "span_name": "llamaindex.llmchat",
46
+ "wrapper": task_wrapper,
47
+ },
48
+ {
49
+ "package": "llama_index.core.llms.custom",
50
+ "object": "CustomLLM",
51
+ "method": "achat",
52
+ "span_name": "llamaindex.llmchat",
53
+ "wrapper": atask_wrapper,
54
+ },
55
+ {
56
+ "package": "llama_index.llms.openai.base",
57
+ "object": "OpenAI",
58
+ "method": "chat",
59
+ "span_name": "llamaindex.openai",
60
+ "span_name_getter" : get_llm_span_name_for_openai,
61
+ "wrapper": llm_wrapper,
62
+ },
63
+ {
64
+ "package": "llama_index.llms.openai.base",
65
+ "object": "OpenAI",
66
+ "method": "achat",
67
+ "span_name": "llamaindex.openai",
68
+ "wrapper": allm_wrapper,
69
+ }
70
+ ]
71
+
@@ -0,0 +1,53 @@
1
+
2
+
3
+ import logging
4
+ logger = logging.getLogger(__name__)
5
+
6
+ class Config:
7
+ exception_logger = None
8
+
9
+ def set_span_attribute(span, name, value):
10
+ if value is not None:
11
+ if value != "":
12
+ span.set_attribute(name, value)
13
+ return
14
+
15
+
16
+ def dont_throw(func):
17
+ """
18
+ A decorator that wraps the passed in function and logs exceptions instead of throwing them.
19
+
20
+ @param func: The function to wrap
21
+ @return: The wrapper function
22
+ """
23
+ # Obtain a logger specific to the function's module
24
+ logger = logging.getLogger(func.__module__)
25
+
26
+ def wrapper(*args, **kwargs):
27
+ try:
28
+ return func(*args, **kwargs)
29
+ except Exception as e:
30
+ logger.warning("Failed to execute %s, error: %s", func.__name__, str(e))
31
+ if Config.exception_logger:
32
+ Config.exception_logger(e)
33
+
34
+ return wrapper
35
+
36
+ def with_tracer_wrapper(func):
37
+ """Helper for providing tracer for wrapper functions."""
38
+
39
+ def _with_tracer(tracer, to_wrap):
40
+ def wrapper(wrapped, instance, args, kwargs):
41
+ return func(tracer, to_wrap, wrapped, instance, args, kwargs)
42
+
43
+ return wrapper
44
+
45
+ return _with_tracer
46
+
47
+ def resolve_from_alias(map, alias):
48
+ """Find a alias that is not none from list of aliases"""
49
+
50
+ for i in alias:
51
+ if i in map.keys():
52
+ return map[i]
53
+ return None
@@ -0,0 +1,214 @@
1
+ import logging
2
+ import os
3
+
4
+ from opentelemetry.trace import Span, Tracer
5
+
6
+ from monocle_apptrace.utils import resolve_from_alias, with_tracer_wrapper
7
+
8
+ logger = logging.getLogger(__name__)
9
+ WORKFLOW_TYPE_KEY = "workflow_type"
10
+ CONTEXT_INPUT_KEY = "context_input"
11
+ CONTEXT_OUTPUT_KEY = "context_output"
12
+ PROMPT_INPUT_KEY = "input"
13
+ PROMPT_OUTPUT_KEY = "output"
14
+ QUERY = "question"
15
+ RESPONSE = "response"
16
+ TAGS = "tags"
17
+ CONTEXT_PROPERTIES_KEY = "workflow_context_properties"
18
+
19
+
20
+ WORKFLOW_TYPE_MAP = {
21
+ "llama_index": "workflow.llamaindex",
22
+ "langchain": "workflow.langchain",
23
+ "haystack": "workflow.haystack"
24
+ }
25
+
26
+ @with_tracer_wrapper
27
+ def task_wrapper(tracer: Tracer, to_wrap, wrapped, instance, args, kwargs):
28
+ """Instruments and calls every function defined in TO_WRAP."""
29
+
30
+ # Some Langchain objects are wrapped elsewhere, so we ignore them here
31
+ if instance.__class__.__name__ in ("AgentExecutor"):
32
+ return wrapped(*args, **kwargs)
33
+
34
+ if hasattr(instance, "name") and instance.name:
35
+ name = f"{to_wrap.get('span_name')}.{instance.name.lower()}"
36
+ elif to_wrap.get("span_name"):
37
+ name = to_wrap.get("span_name")
38
+ else:
39
+ name = f"langchain.task.{instance.__class__.__name__}"
40
+ kind = to_wrap.get("kind")
41
+
42
+ with tracer.start_as_current_span(name) as span:
43
+ if is_root_span(span):
44
+ update_span_with_prompt_input(to_wrap=to_wrap, wrapped_args=args, span=span)
45
+
46
+ #capture the tags attribute of the instance if present, else ignore
47
+ try:
48
+ span.set_attribute(TAGS, getattr(instance, TAGS))
49
+ except AttributeError:
50
+ pass
51
+ update_span_with_context_input(to_wrap=to_wrap, wrapped_args=args, span=span)
52
+ return_value = wrapped(*args, **kwargs)
53
+ update_span_with_context_output(to_wrap=to_wrap, return_value=return_value, span=span)
54
+
55
+ if is_root_span(span):
56
+ workflow_name = span.resource.attributes.get("service.name")
57
+ span.set_attribute("workflow_name",workflow_name)
58
+ update_span_with_prompt_output(to_wrap=to_wrap, wrapped_args=return_value, span=span)
59
+ update_workflow_type(to_wrap, span)
60
+
61
+ return return_value
62
+
63
+ @with_tracer_wrapper
64
+ async def atask_wrapper(tracer, to_wrap, wrapped, instance, args, kwargs):
65
+ """Instruments and calls every function defined in TO_WRAP."""
66
+
67
+ # Some Langchain objects are wrapped elsewhere, so we ignore them here
68
+ if instance.__class__.__name__ in ("AgentExecutor"):
69
+ return wrapped(*args, **kwargs)
70
+
71
+ if hasattr(instance, "name") and instance.name:
72
+ name = f"{to_wrap.get('span_name')}.{instance.name.lower()}"
73
+ elif to_wrap.get("span_name"):
74
+ name = to_wrap.get("span_name")
75
+ else:
76
+ name = f"langchain.task.{instance.__class__.__name__}"
77
+ kind = to_wrap.get("kind")
78
+ with tracer.start_as_current_span(name) as span:
79
+ return_value = await wrapped(*args, **kwargs)
80
+
81
+ return return_value
82
+
83
+ @with_tracer_wrapper
84
+ async def allm_wrapper(tracer, to_wrap, wrapped, instance, args, kwargs):
85
+ # Some Langchain objects are wrapped elsewhere, so we ignore them here
86
+ if instance.__class__.__name__ in ("AgentExecutor"):
87
+ return wrapped(*args, **kwargs)
88
+
89
+ if to_wrap.get("span_name_getter"):
90
+ name = to_wrap.get("span_name_getter")(instance)
91
+
92
+ elif hasattr(instance, "name") and instance.name:
93
+ name = f"{to_wrap.get('span_name')}.{instance.name.lower()}"
94
+ elif to_wrap.get("span_name"):
95
+ name = to_wrap.get("span_name")
96
+ else:
97
+ name = f"langchain.task.{instance.__class__.__name__}"
98
+ kind = to_wrap.get("kind")
99
+ with tracer.start_as_current_span(name) as span:
100
+ update_llm_endpoint(curr_span= span, instance=instance)
101
+
102
+ return_value = await wrapped(*args, **kwargs)
103
+
104
+ return return_value
105
+
106
+ @with_tracer_wrapper
107
+ def llm_wrapper(tracer: Tracer, to_wrap, wrapped, instance, args, kwargs):
108
+
109
+ # Some Langchain objects are wrapped elsewhere, so we ignore them here
110
+ if instance.__class__.__name__ in ("AgentExecutor"):
111
+ return wrapped(*args, **kwargs)
112
+
113
+ if callable(to_wrap.get("span_name_getter")):
114
+ name = to_wrap.get("span_name_getter")(instance)
115
+
116
+ elif hasattr(instance, "name") and instance.name:
117
+ name = f"{to_wrap.get('span_name')}.{instance.name.lower()}"
118
+ elif to_wrap.get("span_name"):
119
+ name = to_wrap.get("span_name")
120
+ else:
121
+ name = f"langchain.task.{instance.__class__.__name__}"
122
+ kind = to_wrap.get("kind")
123
+ with tracer.start_as_current_span(name) as span:
124
+ update_llm_endpoint(curr_span= span, instance=instance)
125
+
126
+ return_value = wrapped(*args, **kwargs)
127
+ update_span_from_llm_response(response = return_value, span = span)
128
+
129
+ return return_value
130
+
131
+ def update_llm_endpoint(curr_span: Span, instance):
132
+ triton_llm_endpoint = os.environ.get("TRITON_LLM_ENDPOINT")
133
+ if triton_llm_endpoint is not None and len(triton_llm_endpoint) > 0:
134
+ curr_span.set_attribute("server_url", triton_llm_endpoint)
135
+ else:
136
+ if 'temperature' in instance.__dict__:
137
+ temp_val = instance.__dict__.get("temperature")
138
+ curr_span.set_attribute("temperature", temp_val)
139
+ # handling for model name
140
+ model_name = resolve_from_alias(instance.__dict__ , ["model","model_name"])
141
+ curr_span.set_attribute("openai_model_name", model_name)
142
+ # handling AzureOpenAI deployment
143
+ deployment_name = resolve_from_alias(instance.__dict__ , [ "engine", "azure_deployment",
144
+ "deployment_name", "deployment_id", "deployment"])
145
+ curr_span.set_attribute("az_openai_deployment", deployment_name)
146
+ # handling the inference endpoint
147
+ inference_ep = resolve_from_alias(instance.__dict__,["azure_endpoint","api_base"])
148
+ curr_span.set_attribute("inference_endpoint",inference_ep)
149
+
150
+ def is_root_span(curr_span: Span) -> bool:
151
+ return curr_span.parent == None
152
+
153
+ def get_input_from_args(chain_args):
154
+ if len(chain_args) > 0 and type(chain_args[0]) == str:
155
+ return chain_args[0]
156
+ return ""
157
+
158
+ def update_span_from_llm_response(response, span: Span):
159
+ # extract token uasge from langchain openai
160
+ if (response is not None and hasattr(response, "response_metadata")):
161
+ response_metadata = response.response_metadata
162
+ token_usage = response_metadata.get("token_usage")
163
+ if token_usage is not None:
164
+ span.set_attribute("completion_tokens", token_usage.get("completion_tokens"))
165
+ span.set_attribute("prompt_tokens", token_usage.get("prompt_tokens"))
166
+ span.set_attribute("total_tokens", token_usage.get("total_tokens"))
167
+ # extract token usage from llamaindex openai
168
+ if(response is not None and hasattr(response, "raw")):
169
+ if response.raw is not None:
170
+ token_usage = response.raw.get("usage")
171
+ if token_usage is not None:
172
+ if(hasattr(token_usage, "completion_tokens")):
173
+ span.set_attribute("completion_tokens", token_usage.completion_tokens)
174
+ if(hasattr(token_usage, "prompt_tokens")):
175
+ span.set_attribute("prompt_tokens", token_usage.prompt_tokens)
176
+ if(hasattr(token_usage, "total_tokens")):
177
+ span.set_attribute("total_tokens", token_usage.total_tokens)
178
+
179
+ def update_workflow_type(to_wrap, span: Span):
180
+ package_name = to_wrap.get('package')
181
+
182
+ for (package, workflow_type) in WORKFLOW_TYPE_MAP.items():
183
+ if(package_name is not None and package in package_name):
184
+ span.set_attribute(WORKFLOW_TYPE_KEY, workflow_type)
185
+
186
+ def update_span_with_context_input(to_wrap, wrapped_args ,span: Span):
187
+ package_name: str = to_wrap.get('package')
188
+ if("langchain_core.retrievers" in package_name):
189
+ input_arg_text = wrapped_args[0]
190
+ span.add_event(CONTEXT_INPUT_KEY, {QUERY:input_arg_text})
191
+ if("llama_index.core.indices.base_retriever" in package_name):
192
+ input_arg_text = wrapped_args[0].query_str
193
+ span.add_event(CONTEXT_INPUT_KEY, {QUERY:input_arg_text})
194
+
195
+ def update_span_with_context_output(to_wrap, return_value ,span: Span):
196
+ package_name: str = to_wrap.get('package')
197
+ if("llama_index.core.indices.base_retriever" in package_name):
198
+ output_arg_text = return_value[0].text
199
+ span.add_event(CONTEXT_OUTPUT_KEY, {RESPONSE:output_arg_text})
200
+
201
+ def update_span_with_prompt_input(to_wrap, wrapped_args ,span: Span):
202
+ input_arg_text = wrapped_args[0]
203
+
204
+ if isinstance(input_arg_text, dict):
205
+ span.add_event(PROMPT_INPUT_KEY,input_arg_text)
206
+ else:
207
+ span.add_event(PROMPT_INPUT_KEY,{QUERY:input_arg_text})
208
+
209
+ def update_span_with_prompt_output(to_wrap, wrapped_args ,span: Span):
210
+ package_name: str = to_wrap.get('package')
211
+ if(isinstance(wrapped_args, str)):
212
+ span.add_event(PROMPT_OUTPUT_KEY, {RESPONSE:wrapped_args})
213
+ if("llama_index.core.base.base_query_engine" in package_name):
214
+ span.add_event(PROMPT_OUTPUT_KEY, {RESPONSE:wrapped_args.response})
@@ -0,0 +1,24 @@
1
+
2
+
3
+ from monocle_apptrace.haystack import HAYSTACK_METHODS
4
+ from monocle_apptrace.langchain import LANGCHAIN_METHODS
5
+ from monocle_apptrace.llamaindex import LLAMAINDEX_METHODS
6
+ from monocle_apptrace.wrap_common import task_wrapper
7
+
8
+ class WrapperMethod:
9
+ def __init__(
10
+ self,
11
+ package: str,
12
+ object: str,
13
+ method: str,
14
+ span_name: str = None,
15
+ wrapper = task_wrapper
16
+ ):
17
+ self.package = package
18
+ self.object = object
19
+ self.method = method
20
+ self.span_name = span_name
21
+ self.wrapper = wrapper
22
+
23
+ INBUILT_METHODS_LIST = LANGCHAIN_METHODS + LLAMAINDEX_METHODS + HAYSTACK_METHODS
24
+
@@ -0,0 +1,76 @@
1
+ Metadata-Version: 2.3
2
+ Name: monocle_apptrace
3
+ Version: 0.0.1
4
+ Summary: package with monocle genAI tracing
5
+ Project-URL: Homepage, https://github.com/monocle2ai/monocle
6
+ Project-URL: Issues, https://github.com/monocle2ai/monocle/issues
7
+ License-File: LICENSE
8
+ License-File: NOTICE
9
+ Classifier: License :: OSI Approved :: MIT License
10
+ Classifier: Operating System :: OS Independent
11
+ Classifier: Programming Language :: Python :: 3
12
+ Requires-Python: >=3.8
13
+ Requires-Dist: opentelemetry-api>=1.21.0
14
+ Requires-Dist: opentelemetry-instrumentation
15
+ Requires-Dist: opentelemetry-sdk>=1.21.0
16
+ Requires-Dist: requests
17
+ Requires-Dist: wrapt>=1.14.0
18
+ Provides-Extra: dev
19
+ Requires-Dist: datasets==2.20.0; extra == 'dev'
20
+ Requires-Dist: faiss-cpu==1.8.0; extra == 'dev'
21
+ Requires-Dist: instructorembedding==1.0.1; extra == 'dev'
22
+ Requires-Dist: langchain-chroma==0.1.1; extra == 'dev'
23
+ Requires-Dist: langchain-community==0.2.5; extra == 'dev'
24
+ Requires-Dist: langchain-openai==0.1.8; extra == 'dev'
25
+ Requires-Dist: langchain==0.2.5; extra == 'dev'
26
+ Requires-Dist: llama-index-embeddings-huggingface==0.2.0; extra == 'dev'
27
+ Requires-Dist: llama-index-vector-stores-chroma==0.1.9; extra == 'dev'
28
+ Requires-Dist: llama-index==0.10.30; extra == 'dev'
29
+ Requires-Dist: numpy==1.26.4; extra == 'dev'
30
+ Requires-Dist: pytest==8.0.0; extra == 'dev'
31
+ Requires-Dist: sentence-transformers==2.6.1; extra == 'dev'
32
+ Requires-Dist: types-requests==2.31.0.20240106; extra == 'dev'
33
+ Description-Content-Type: text/markdown
34
+
35
+ # monocle genAI observability
36
+ ### Background
37
+ Generative AI (GenAI) is the type of AI used to create content such as conversations, images, or video based on prior learning from existing content. GenAI relies on foundational models, which are exceptionally large ML models trained on vast amounts of generalized and unlabeled data to perform variety of general tasks such as understanding language and generating new text, audio or images from user provided prompts in a human language. Foundational models (FM) work by using learned patterns and relationships from the training data to predict the next item in a sequence given a prompt. It is cheaper and faster for data scientists to use foundational models as starting points rather than building models from scratch to build ML apps.
38
+ Large Language Models (LLMs) are a class of foundational models trained on text data used to perform a variety of tasks such as understanding language, reasoning over text, and generating new text based on user prompts in a human language. Examples of LLMs include ChatGPT, Llama, and Claude.
39
+ LLM-based AI apps leverage understanding language, reasoning & text generation to augment or automate complex tasks that typically require human intervention such as summarizing legal documents, triaging customer support tickets, or more.
40
+ Typically, AI developers build LLM-based AI apps that automate complex workflows by combining multiple LLMs and components such as prompts, vectors, or agents that each solve a discrete task that are connected by chains or pipelines in different ways using LLM (Large Language Model) orchestration frameworks.
41
+ When deployed to production, different parts of multi-component distributed LLM-based AI apps run on a combination of different kinds of AI infrastructure such as LLM-as-a-Service, GPU (graphics processing units) clouds, managed services from cloud, or custom-engineered AI stack. Typically, these systems are managed in production by IT DevOps engineers.
42
+ AI developers code, monitor, debug and optimize the resources in an LLM-based AI application. IT DevOps engineers monitor, troubleshoot, and optimize the services in the AI infra that the LLM-based AI application runs on.
43
+
44
+ ## Introducing “Monocle – An eye for A.I.”
45
+ The goal of project Monocle is to help GenAI developer to trace their applications. A typical GenAI application comprises of several technology components like application code/workflow, models, inferences services, vector databases etc. Understanding the dependencies and tracking application quickly becomes a difficult task. Monocle can be integrated into application code with very little to no code changes. Monocle supports tracing all GenAI technology components, application frameworks, LLM hosting services. We do all the hard work of finding what needs to be instrumented and how to instrument it. This enables the enlightened applications to generate detailed traces without any additional efforts from the developers.
46
+ The traces are compatible with OpenTelemetry format. They are further enriched to contain lot more attribute relevant to GenAI applications like prompts. The project will have out of box support to store the traces locally and a extensibility for a third party store which can be implemented by end user or a supplied by third party vendors.
47
+
48
+ ## Monocle integration
49
+ ### genAI Appliation frameworks
50
+ - Langchain
51
+ - LlamaIndex
52
+ - Haystack
53
+ ### LLMs
54
+ - OpenAI
55
+ - Azure OpenAI
56
+ - NVIDIA Triton
57
+
58
+ ## Getting started
59
+ ### Try Monocle with your python genAI application
60
+ - Get latest Monocle python brary
61
+ ```
62
+ pip install monocle_apptrace
63
+ ```
64
+ - Enable Monocle tracing in your app by adding following
65
+ ```
66
+ setup_okahu_telemetry(workflow_name="your-app-name")
67
+ ```
68
+ Please refer to [Monocle user guide](Monocle_User_Guide.md) for more details
69
+
70
+ ## Get involved
71
+ ### Provide feedback
72
+ - Submit issues and enhancements requests via Github issues
73
+
74
+ ### Contribute
75
+ - Monocle is community based open source project. We welcome your contributions. Please refer to the CONTRIBUTING and CODE_OF_CONDUCT for guidelines. The [contributor's guide](CONTRIBUTING.md) provides technical details of the project.
76
+
@@ -0,0 +1,17 @@
1
+ monocle_apptrace/README.md,sha256=C0JfJtNC7LOlr_iHSxOwuZn89vuoZ2RbyeGB7ACu4iY,3094
2
+ monocle_apptrace/__init__.py,sha256=daEdpEyAJIa8b2VkCqSKcw8PaExcB6Qro80XNes_sHA,2
3
+ monocle_apptrace/instrumentor.py,sha256=T-g0OpR2VYm7lswyiFoBlBeRPKHX6a-mhH8q7rE-o2g,4927
4
+ monocle_apptrace/utils.py,sha256=KZwwXK8NTbVjQo4BkFwPdccE6hzvBzawPp6an44Gmz8,1366
5
+ monocle_apptrace/wrap_common.py,sha256=Qxwq2drHOovd0CMd7YiQbe_4FqGL07RLfu4Vek1Z-SU,9107
6
+ monocle_apptrace/wrapper.py,sha256=95Yg_dwGv5T6qtNv-ozhXRGaDVI0P27exU2901Pw2ps,684
7
+ monocle_apptrace/haystack/__init__.py,sha256=GspHWTmqMDxQhpTgl91xighNL8MHaWs6BF0YzzZayuE,714
8
+ monocle_apptrace/haystack/wrap_node.py,sha256=96qUlDZtNOVIPrrDZzjJ7ZDCi70mYbd7bIEX41e-RH0,916
9
+ monocle_apptrace/haystack/wrap_openai.py,sha256=9ecWsOUXNjTKrQjNZz1GCgkCAN9GChhasMRDmZMuANE,1585
10
+ monocle_apptrace/haystack/wrap_pipeline.py,sha256=eVxJ4-yS4gFxInwaWy5oxNcNl44X2M7_dhBpKVFmI6Q,1622
11
+ monocle_apptrace/langchain/__init__.py,sha256=PrZl6vxX5WsYfsPFVhumVmUGkwddAgPBFs1hpifbmZw,2519
12
+ monocle_apptrace/llamaindex/__init__.py,sha256=iMaboVDeM5yGR01cEqCD8MotCj2FuY4QjSZmFBv4f2o,2094
13
+ monocle_apptrace-0.0.1.dist-info/METADATA,sha256=uYd9O71FeKzrkLq6DmJYp9F62QxxZ7UfSAaGa5dPVm8,5645
14
+ monocle_apptrace-0.0.1.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
15
+ monocle_apptrace-0.0.1.dist-info/licenses/LICENSE,sha256=ay9trLiP5I7ZsFXo6AqtkLYdRqe5S9r-DrPOvsNlZrg,9136
16
+ monocle_apptrace-0.0.1.dist-info/licenses/NOTICE,sha256=9jn4xtwM_uUetJMx5WqGnhrR7MIhpoRlpokjSTlyt8c,112
17
+ monocle_apptrace-0.0.1.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.25.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,51 @@
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
10
+
11
+ "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
12
+
13
+ "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
14
+
15
+ "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
16
+
17
+ "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
18
+
19
+ "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
20
+
21
+ "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
22
+
23
+ "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
24
+
25
+ "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
26
+
27
+ "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
28
+
29
+ 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
30
+
31
+ 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
32
+
33
+ 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
34
+
35
+ You must give any other recipients of the Work or Derivative Works a copy of this License; and
36
+ You must cause any modified files to carry prominent notices stating that You changed the files; and
37
+ You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
38
+ If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
39
+ You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
40
+
41
+ 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
42
+
43
+ 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
44
+
45
+ 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
46
+
47
+ 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
48
+
49
+ 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
50
+
51
+ END OF TERMS AND CONDITIONS
@@ -0,0 +1,4 @@
1
+ Monocle
2
+ Copyright 2018-2021 Monocle Project Authors
3
+
4
+ Licensed under Apache License 2.0. See LICENSE for terms.