microsoft-agents-a365-observability-extensions-langchain 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- microsoft_agents_a365_observability_extensions_langchain-0.1.0/PKG-INFO +71 -0
- microsoft_agents_a365_observability_extensions_langchain-0.1.0/README.md +33 -0
- microsoft_agents_a365_observability_extensions_langchain-0.1.0/microsoft_agents_a365/observability/extensions/langchain/__init__.py +12 -0
- microsoft_agents_a365_observability_extensions_langchain-0.1.0/microsoft_agents_a365/observability/extensions/langchain/tracer.py +222 -0
- microsoft_agents_a365_observability_extensions_langchain-0.1.0/microsoft_agents_a365/observability/extensions/langchain/tracer_instrumentor.py +170 -0
- microsoft_agents_a365_observability_extensions_langchain-0.1.0/microsoft_agents_a365/observability/extensions/langchain/utils.py +516 -0
- microsoft_agents_a365_observability_extensions_langchain-0.1.0/microsoft_agents_a365_observability_extensions_langchain.egg-info/PKG-INFO +71 -0
- microsoft_agents_a365_observability_extensions_langchain-0.1.0/microsoft_agents_a365_observability_extensions_langchain.egg-info/SOURCES.txt +12 -0
- microsoft_agents_a365_observability_extensions_langchain-0.1.0/microsoft_agents_a365_observability_extensions_langchain.egg-info/dependency_links.txt +1 -0
- microsoft_agents_a365_observability_extensions_langchain-0.1.0/microsoft_agents_a365_observability_extensions_langchain.egg-info/requires.txt +17 -0
- microsoft_agents_a365_observability_extensions_langchain-0.1.0/microsoft_agents_a365_observability_extensions_langchain.egg-info/top_level.txt +1 -0
- microsoft_agents_a365_observability_extensions_langchain-0.1.0/pyproject.toml +78 -0
- microsoft_agents_a365_observability_extensions_langchain-0.1.0/setup.cfg +4 -0
- microsoft_agents_a365_observability_extensions_langchain-0.1.0/setup.py +28 -0
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: microsoft-agents-a365-observability-extensions-langchain
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: LangChain observability and tracing extensions for Microsoft Agent 365
|
|
5
|
+
Author-email: Microsoft <support@microsoft.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/microsoft/Agent365-python
|
|
8
|
+
Project-URL: Repository, https://github.com/microsoft/Agent365-python
|
|
9
|
+
Project-URL: Issues, https://github.com/microsoft/Agent365-python/issues
|
|
10
|
+
Project-URL: Documentation, https://github.com/microsoft/Agent365-python/tree/main/libraries/microsoft-agents-a365-observability-extensions-langchain
|
|
11
|
+
Keywords: observability,telemetry,tracing,opentelemetry,langchain,agents,ai
|
|
12
|
+
Classifier: Development Status :: 3 - Alpha
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
17
|
+
Classifier: Operating System :: OS Independent
|
|
18
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
19
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
20
|
+
Classifier: Topic :: System :: Monitoring
|
|
21
|
+
Requires-Python: >=3.11
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
Requires-Dist: microsoft-agents-a365-observability-core>=0.0.0
|
|
24
|
+
Requires-Dist: langchain>=0.1.0
|
|
25
|
+
Requires-Dist: langchain-core>=0.1.0
|
|
26
|
+
Requires-Dist: opentelemetry-api>=1.36.0
|
|
27
|
+
Requires-Dist: opentelemetry-sdk>=1.36.0
|
|
28
|
+
Requires-Dist: opentelemetry-instrumentation>=0.47b0
|
|
29
|
+
Provides-Extra: dev
|
|
30
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
31
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
|
|
32
|
+
Requires-Dist: ruff>=0.1.0; extra == "dev"
|
|
33
|
+
Requires-Dist: black>=23.0.0; extra == "dev"
|
|
34
|
+
Requires-Dist: mypy>=1.0.0; extra == "dev"
|
|
35
|
+
Provides-Extra: test
|
|
36
|
+
Requires-Dist: pytest>=7.0.0; extra == "test"
|
|
37
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == "test"
|
|
38
|
+
|
|
39
|
+
# microsoft-agents-a365-observability-extensions-langchain
|
|
40
|
+
|
|
41
|
+
[](https://pypi.org/project/microsoft-agents-a365-observability-extensions-langchain)
|
|
42
|
+
[](https://pypi.org/project/microsoft-agents-a365-observability-extensions-langchain)
|
|
43
|
+
|
|
44
|
+
Observability extensions for LangChain framework. This package provides OpenTelemetry tracing integration for LangChain-based AI applications with automatic instrumentation for chains, agents, and tools.
|
|
45
|
+
|
|
46
|
+
## Installation
|
|
47
|
+
|
|
48
|
+
```bash
|
|
49
|
+
pip install microsoft-agents-a365-observability-extensions-langchain
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
## Usage
|
|
53
|
+
|
|
54
|
+
For usage examples and detailed documentation, see the [Observability documentation](https://learn.microsoft.com/microsoft-agent-365/developer/observability?tabs=python) on Microsoft Learn.
|
|
55
|
+
|
|
56
|
+
## Support
|
|
57
|
+
|
|
58
|
+
For issues, questions, or feedback:
|
|
59
|
+
|
|
60
|
+
- File issues in the [GitHub Issues](https://github.com/microsoft/Agent365-python/issues) section
|
|
61
|
+
- See the [main documentation](../../../README.md) for more information
|
|
62
|
+
|
|
63
|
+
## Trademarks
|
|
64
|
+
|
|
65
|
+
*Microsoft, Windows, Microsoft Azure and/or other Microsoft products and services referenced in the documentation may be either trademarks or registered trademarks of Microsoft in the United States and/or other countries. The licenses for this project do not grant you rights to use any Microsoft names, logos, or trademarks. Microsoft's general trademark guidelines can be found at http://go.microsoft.com/fwlink/?LinkID=254653.*
|
|
66
|
+
|
|
67
|
+
## License
|
|
68
|
+
|
|
69
|
+
Copyright (c) Microsoft Corporation. All rights reserved.
|
|
70
|
+
|
|
71
|
+
Licensed under the MIT License - see the [LICENSE](../../../LICENSE.md) file for details.
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
# microsoft-agents-a365-observability-extensions-langchain
|
|
2
|
+
|
|
3
|
+
[](https://pypi.org/project/microsoft-agents-a365-observability-extensions-langchain)
|
|
4
|
+
[](https://pypi.org/project/microsoft-agents-a365-observability-extensions-langchain)
|
|
5
|
+
|
|
6
|
+
Observability extensions for LangChain framework. This package provides OpenTelemetry tracing integration for LangChain-based AI applications with automatic instrumentation for chains, agents, and tools.
|
|
7
|
+
|
|
8
|
+
## Installation
|
|
9
|
+
|
|
10
|
+
```bash
|
|
11
|
+
pip install microsoft-agents-a365-observability-extensions-langchain
|
|
12
|
+
```
|
|
13
|
+
|
|
14
|
+
## Usage
|
|
15
|
+
|
|
16
|
+
For usage examples and detailed documentation, see the [Observability documentation](https://learn.microsoft.com/microsoft-agent-365/developer/observability?tabs=python) on Microsoft Learn.
|
|
17
|
+
|
|
18
|
+
## Support
|
|
19
|
+
|
|
20
|
+
For issues, questions, or feedback:
|
|
21
|
+
|
|
22
|
+
- File issues in the [GitHub Issues](https://github.com/microsoft/Agent365-python/issues) section
|
|
23
|
+
- See the [main documentation](../../../README.md) for more information
|
|
24
|
+
|
|
25
|
+
## Trademarks
|
|
26
|
+
|
|
27
|
+
*Microsoft, Windows, Microsoft Azure and/or other Microsoft products and services referenced in the documentation may be either trademarks or registered trademarks of Microsoft in the United States and/or other countries. The licenses for this project do not grant you rights to use any Microsoft names, logos, or trademarks. Microsoft's general trademark guidelines can be found at http://go.microsoft.com/fwlink/?LinkID=254653.*
|
|
28
|
+
|
|
29
|
+
## License
|
|
30
|
+
|
|
31
|
+
Copyright (c) Microsoft Corporation. All rights reserved.
|
|
32
|
+
|
|
33
|
+
Licensed under the MIT License - see the [LICENSE](../../../LICENSE.md) file for details.
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
# Copyright (c) Microsoft. All rights reserved.
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
Wraps the Langchain Agents SDK tracer to integrate with our Telemetry Solution.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from .tracer_instrumentor import CustomLangChainInstrumentor
|
|
8
|
+
|
|
9
|
+
__all__ = ["CustomLangChainInstrumentor"]
|
|
10
|
+
|
|
11
|
+
# This is a namespace package
|
|
12
|
+
__path__ = __import__("pkgutil").extend_path(__path__, __name__)
|
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
# Copyright (c) Microsoft. All rights reserved.
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import re
|
|
5
|
+
from collections.abc import Iterator
|
|
6
|
+
from itertools import chain
|
|
7
|
+
from threading import RLock
|
|
8
|
+
from typing import (
|
|
9
|
+
TYPE_CHECKING,
|
|
10
|
+
Any,
|
|
11
|
+
cast,
|
|
12
|
+
)
|
|
13
|
+
from uuid import UUID
|
|
14
|
+
|
|
15
|
+
from langchain_core.tracers import BaseTracer, LangChainTracer
|
|
16
|
+
from langchain_core.tracers.schemas import Run
|
|
17
|
+
from microsoft_agents_a365.observability.core.inference_operation_type import InferenceOperationType
|
|
18
|
+
from microsoft_agents_a365.observability.core.utils import (
|
|
19
|
+
DictWithLock,
|
|
20
|
+
as_utc_nano,
|
|
21
|
+
flatten,
|
|
22
|
+
record_exception,
|
|
23
|
+
)
|
|
24
|
+
from opentelemetry import context as context_api
|
|
25
|
+
from opentelemetry import trace as trace_api
|
|
26
|
+
from opentelemetry.context import (
|
|
27
|
+
_SUPPRESS_INSTRUMENTATION_KEY,
|
|
28
|
+
get_value,
|
|
29
|
+
)
|
|
30
|
+
from opentelemetry.trace import Span
|
|
31
|
+
from opentelemetry.util.types import AttributeValue
|
|
32
|
+
|
|
33
|
+
from microsoft_agents_a365.observability.extensions.langchain.utils import (
|
|
34
|
+
IGNORED_EXCEPTION_PATTERNS,
|
|
35
|
+
add_operation_type,
|
|
36
|
+
function_calls,
|
|
37
|
+
input_messages,
|
|
38
|
+
invocation_parameters,
|
|
39
|
+
llm_provider,
|
|
40
|
+
metadata,
|
|
41
|
+
model_name,
|
|
42
|
+
output_messages,
|
|
43
|
+
prompts,
|
|
44
|
+
token_counts,
|
|
45
|
+
tools,
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
logger = logging.getLogger(__name__)
|
|
49
|
+
logger.addHandler(logging.NullHandler())
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
CONTEXT_ATTRIBUTES = (
|
|
53
|
+
"session.id",
|
|
54
|
+
"user.id",
|
|
55
|
+
"metadata",
|
|
56
|
+
"tag.tags",
|
|
57
|
+
"llm.prompt_template.template",
|
|
58
|
+
"llm.prompt_template.variables",
|
|
59
|
+
"llm.prompt_template.version",
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class CustomLangChainTracer(BaseTracer):
|
|
64
|
+
__slots__ = (
|
|
65
|
+
"_tracer",
|
|
66
|
+
"_separate_trace_from_runtime_context",
|
|
67
|
+
"_spans_by_run",
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
def __init__(
|
|
71
|
+
self,
|
|
72
|
+
tracer: trace_api.Tracer,
|
|
73
|
+
separate_trace_from_runtime_context: bool,
|
|
74
|
+
*args: Any,
|
|
75
|
+
**kwargs: Any,
|
|
76
|
+
) -> None:
|
|
77
|
+
"""Initialize the OpenInferenceTracer.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
tracer (trace_api.Tracer): The OpenTelemetry tracer for creating spans.
|
|
81
|
+
separate_trace_from_runtime_context (bool): When True, always start a new trace for each
|
|
82
|
+
span without a parent, isolating it from any existing trace in the runtime context.
|
|
83
|
+
*args (Any): Positional arguments for BaseTracer.
|
|
84
|
+
**kwargs (Any): Keyword arguments for BaseTracer.
|
|
85
|
+
"""
|
|
86
|
+
super().__init__(*args, **kwargs)
|
|
87
|
+
if TYPE_CHECKING:
|
|
88
|
+
# check that `run_map` still exists in parent class
|
|
89
|
+
assert self.run_map
|
|
90
|
+
self.run_map = DictWithLock[str, Run](self.run_map)
|
|
91
|
+
self._tracer = tracer
|
|
92
|
+
self._separate_trace_from_runtime_context = separate_trace_from_runtime_context
|
|
93
|
+
self._spans_by_run: dict[UUID, Span] = DictWithLock[UUID, Span]()
|
|
94
|
+
self._lock = RLock() # handlers may be run in a thread by langchain
|
|
95
|
+
|
|
96
|
+
def get_span(self, run_id: UUID) -> Span | None:
|
|
97
|
+
return self._spans_by_run.get(run_id)
|
|
98
|
+
|
|
99
|
+
def _start_trace(self, run: Run) -> None:
|
|
100
|
+
self.run_map[str(run.id)] = run
|
|
101
|
+
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
|
|
102
|
+
return
|
|
103
|
+
with self._lock:
|
|
104
|
+
parent_context = (
|
|
105
|
+
trace_api.set_span_in_context(parent)
|
|
106
|
+
if (parent_run_id := run.parent_run_id)
|
|
107
|
+
and (parent := self._spans_by_run.get(parent_run_id))
|
|
108
|
+
else (context_api.Context() if self._separate_trace_from_runtime_context else None)
|
|
109
|
+
)
|
|
110
|
+
# We can't use real time because the handler may be
|
|
111
|
+
# called in a background thread.
|
|
112
|
+
start_time_utc_nano = as_utc_nano(run.start_time)
|
|
113
|
+
span = self._tracer.start_span(
|
|
114
|
+
name=run.name,
|
|
115
|
+
context=parent_context,
|
|
116
|
+
start_time=start_time_utc_nano,
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
# The following line of code is commented out to serve as a reminder that in a system
|
|
120
|
+
# of callbacks, attaching the context can be hazardous because there is no guarantee
|
|
121
|
+
# that the context will be detached. An error could happen between callbacks leaving
|
|
122
|
+
# the context attached forever, and all future spans will use it as parent. What's
|
|
123
|
+
# worse is that the error could have also prevented the span from being exported,
|
|
124
|
+
# leaving all future spans as orphans. That is a very bad scenario.
|
|
125
|
+
# token = context_api.attach(context)
|
|
126
|
+
with self._lock:
|
|
127
|
+
self._spans_by_run[run.id] = span
|
|
128
|
+
|
|
129
|
+
def _end_trace(self, run: Run) -> None:
|
|
130
|
+
self.run_map.pop(str(run.id), None)
|
|
131
|
+
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
|
|
132
|
+
return
|
|
133
|
+
span = self._spans_by_run.pop(run.id, None)
|
|
134
|
+
if span:
|
|
135
|
+
try:
|
|
136
|
+
_update_span(span, run)
|
|
137
|
+
except Exception:
|
|
138
|
+
logger.exception("Failed to update span with run data.")
|
|
139
|
+
# We can't use real time because the handler may be
|
|
140
|
+
# called in a background thread.
|
|
141
|
+
end_time_utc_nano = as_utc_nano(run.end_time) if run.end_time else None
|
|
142
|
+
span.end(end_time=end_time_utc_nano)
|
|
143
|
+
|
|
144
|
+
def _persist_run(self, run: Run) -> None:
|
|
145
|
+
pass
|
|
146
|
+
|
|
147
|
+
def on_llm_error(self, error: BaseException, *args: Any, run_id: UUID, **kwargs: Any) -> Run:
|
|
148
|
+
if span := self._spans_by_run.get(run_id):
|
|
149
|
+
record_exception(span, error)
|
|
150
|
+
return super().on_llm_error(error, *args, run_id=run_id, **kwargs)
|
|
151
|
+
|
|
152
|
+
def on_chain_error(self, error: BaseException, *args: Any, run_id: UUID, **kwargs: Any) -> Run:
|
|
153
|
+
if span := self._spans_by_run.get(run_id):
|
|
154
|
+
record_exception(span, error)
|
|
155
|
+
return super().on_chain_error(error, *args, run_id=run_id, **kwargs)
|
|
156
|
+
|
|
157
|
+
def on_retriever_error(
|
|
158
|
+
self, error: BaseException, *args: Any, run_id: UUID, **kwargs: Any
|
|
159
|
+
) -> Run:
|
|
160
|
+
if span := self._spans_by_run.get(run_id):
|
|
161
|
+
record_exception(span, error)
|
|
162
|
+
return super().on_retriever_error(error, *args, run_id=run_id, **kwargs)
|
|
163
|
+
|
|
164
|
+
def on_tool_error(self, error: BaseException, *args: Any, run_id: UUID, **kwargs: Any) -> Run:
|
|
165
|
+
if span := self._spans_by_run.get(run_id):
|
|
166
|
+
record_exception(span, error)
|
|
167
|
+
return super().on_tool_error(error, *args, run_id=run_id, **kwargs)
|
|
168
|
+
|
|
169
|
+
def on_chat_model_start(self, *args: Any, **kwargs: Any) -> Run:
|
|
170
|
+
"""
|
|
171
|
+
This emulates the behavior of the LangChainTracer.
|
|
172
|
+
https://github.com/langchain-ai/langchain/blob/c01467b1f4f9beae8f1edb105b17aa4f36bf6573/libs/core/langchain_core/tracers/langchain.py#L115
|
|
173
|
+
|
|
174
|
+
Although this method exists on the parent class, i.e. `BaseTracer`,
|
|
175
|
+
it requires setting `self._schema_format = "original+chat"`.
|
|
176
|
+
https://github.com/langchain-ai/langchain/blob/c01467b1f4f9beae8f1edb105b17aa4f36bf6573/libs/core/langchain_core/tracers/base.py#L170
|
|
177
|
+
|
|
178
|
+
But currently self._schema_format is marked for internal use.
|
|
179
|
+
https://github.com/langchain-ai/langchain/blob/c01467b1f4f9beae8f1edb105b17aa4f36bf6573/libs/core/langchain_core/tracers/base.py#L60
|
|
180
|
+
""" # noqa: E501
|
|
181
|
+
return LangChainTracer.on_chat_model_start(self, *args, **kwargs) # type: ignore
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
def get_attributes_from_context() -> Iterator[tuple[str, AttributeValue]]:
|
|
185
|
+
for ctx_attr in CONTEXT_ATTRIBUTES:
|
|
186
|
+
if (val := get_value(ctx_attr)) is not None:
|
|
187
|
+
yield ctx_attr, cast(AttributeValue, val)
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def _update_span(span: Span, run: Run) -> None:
|
|
191
|
+
# If there is no error or if there is an agent control exception, set the span to OK
|
|
192
|
+
if run.error is None or any(
|
|
193
|
+
re.match(pattern, run.error) for pattern in IGNORED_EXCEPTION_PATTERNS
|
|
194
|
+
):
|
|
195
|
+
span.set_status(trace_api.StatusCode.OK)
|
|
196
|
+
else:
|
|
197
|
+
span.set_status(trace_api.Status(trace_api.StatusCode.ERROR, run.error))
|
|
198
|
+
|
|
199
|
+
if run.run_type == "llm" and run.outputs.get("llm_output").get("id").startswith("chat"):
|
|
200
|
+
span.update_name(f"{InferenceOperationType.CHAT.value.lower()} {span.name}")
|
|
201
|
+
elif run.run_type.lower() == "tool":
|
|
202
|
+
span.update_name(f"execute_tool {span.name}")
|
|
203
|
+
span.set_attributes(dict(get_attributes_from_context()))
|
|
204
|
+
span.set_attributes(
|
|
205
|
+
dict(
|
|
206
|
+
flatten(
|
|
207
|
+
chain(
|
|
208
|
+
add_operation_type(run),
|
|
209
|
+
prompts(run.inputs),
|
|
210
|
+
input_messages(run.inputs),
|
|
211
|
+
output_messages(run.outputs),
|
|
212
|
+
invocation_parameters(run),
|
|
213
|
+
llm_provider(run.extra),
|
|
214
|
+
model_name(run.outputs, run.extra),
|
|
215
|
+
token_counts(run.outputs),
|
|
216
|
+
function_calls(run.outputs),
|
|
217
|
+
tools(run),
|
|
218
|
+
metadata(run),
|
|
219
|
+
)
|
|
220
|
+
)
|
|
221
|
+
)
|
|
222
|
+
)
|
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
# Copyright (c) Microsoft. All rights reserved.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from collections.abc import Callable, Collection
|
|
6
|
+
from typing import Any
|
|
7
|
+
from uuid import UUID
|
|
8
|
+
|
|
9
|
+
import langchain_core
|
|
10
|
+
import opentelemetry.trace as optel_trace
|
|
11
|
+
from langchain_core.callbacks import BaseCallbackManager
|
|
12
|
+
from microsoft_agents_a365.observability.core.config import (
|
|
13
|
+
get_tracer,
|
|
14
|
+
get_tracer_provider,
|
|
15
|
+
is_configured,
|
|
16
|
+
)
|
|
17
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
|
18
|
+
from opentelemetry.trace import Span
|
|
19
|
+
from wrapt import wrap_function_wrapper
|
|
20
|
+
|
|
21
|
+
from microsoft_agents_a365.observability.extensions.langchain.tracer import CustomLangChainTracer
|
|
22
|
+
|
|
23
|
+
_INSTRUMENTS: str = "langchain_core >= 0.1.0"
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class CustomLangChainInstrumentor(BaseInstrumentor):
|
|
27
|
+
"""
|
|
28
|
+
Minimal instrumentor that attaches a TraceForLangChain to every new
|
|
29
|
+
LangChain BaseCallbackManager so runs produce OpenTelemetry spans.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
def __init__(self) -> None:
|
|
33
|
+
if not is_configured():
|
|
34
|
+
raise RuntimeError(
|
|
35
|
+
"Tracing SDK is not configured. Configure it before using this instrumentor."
|
|
36
|
+
)
|
|
37
|
+
super().__init__()
|
|
38
|
+
self._tracer: CustomLangChainTracer | None = None
|
|
39
|
+
self._original_cb_init: Callable[..., None] | None = None
|
|
40
|
+
self.instrument()
|
|
41
|
+
|
|
42
|
+
# ---- BaseInstrumentor API -------------------------------------------------
|
|
43
|
+
|
|
44
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
|
45
|
+
return (_INSTRUMENTS,)
|
|
46
|
+
|
|
47
|
+
def _instrument(self, **kwargs: Any) -> None:
|
|
48
|
+
tracer_name: str | None = kwargs.get("tracer_name")
|
|
49
|
+
tracer_version: str | None = kwargs.get("tracer_version")
|
|
50
|
+
|
|
51
|
+
# Prefer the Agent 365 tracer; fall back to OpenTelemetry’s default if needed.
|
|
52
|
+
try:
|
|
53
|
+
tracer = get_tracer(tracer_name, tracer_version)
|
|
54
|
+
except Exception:
|
|
55
|
+
tracer = optel_trace.get_tracer(tracer_name, tracer_version)
|
|
56
|
+
|
|
57
|
+
# Ensure tracer provider exists (ignore returned value; side-effect is enough).
|
|
58
|
+
try:
|
|
59
|
+
get_tracer_provider()
|
|
60
|
+
except Exception:
|
|
61
|
+
optel_trace.get_tracer_provider()
|
|
62
|
+
|
|
63
|
+
self._tracer = CustomLangChainTracer(
|
|
64
|
+
tracer,
|
|
65
|
+
bool(kwargs.get("separate_trace_from_runtime_context")),
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
# Save and wrap BaseCallbackManager.__init__ to attach the processor once per instance.
|
|
69
|
+
self._original_cb_init = langchain_core.callbacks.BaseCallbackManager.__init__
|
|
70
|
+
wrap_function_wrapper(
|
|
71
|
+
module="langchain_core.callbacks",
|
|
72
|
+
name="BaseCallbackManager.__init__",
|
|
73
|
+
wrapper=_BaseCallbackManagerInit(self._tracer),
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
def _uninstrument(self, **kwargs: Any) -> None:
|
|
77
|
+
# Restore original constructor if we wrapped it.
|
|
78
|
+
if self._original_cb_init is not None:
|
|
79
|
+
langchain_core.callbacks.BaseCallbackManager.__init__ = self._original_cb_init # type: ignore[assignment]
|
|
80
|
+
self._original_cb_init = None
|
|
81
|
+
self._tracer = None
|
|
82
|
+
|
|
83
|
+
# ---- Helpers used by module-level functions -------------------------------
|
|
84
|
+
|
|
85
|
+
def get_span(self, run_id: UUID) -> Span | None:
|
|
86
|
+
"""Return the span for a specific LangChain run_id, if available."""
|
|
87
|
+
if not self._tracer:
|
|
88
|
+
print("Missing tracer; call InstrumentorForLangChain().instrument() first.")
|
|
89
|
+
return None
|
|
90
|
+
# TraceForLangChain is expected to expose get_span(run_id).
|
|
91
|
+
get_span_fn = getattr(self._tracer, "get_span", None)
|
|
92
|
+
return get_span_fn(run_id) if callable(get_span_fn) else None
|
|
93
|
+
|
|
94
|
+
def get_ancestors(self, run_id: UUID) -> list[Span]:
|
|
95
|
+
"""Return ancestor spans from the run’s parent up to the root (nearest first)."""
|
|
96
|
+
if not self._tracer:
|
|
97
|
+
print("Missing tracer; call InstrumentorForLangChain().instrument() first.")
|
|
98
|
+
return []
|
|
99
|
+
|
|
100
|
+
# Expect the processor to keep a run_map with parent linkage (string keys).
|
|
101
|
+
run_map = getattr(self._tracer, "run_map", {}) or {}
|
|
102
|
+
ancestors: list[Span] = []
|
|
103
|
+
|
|
104
|
+
run = run_map.get(str(run_id))
|
|
105
|
+
if not run:
|
|
106
|
+
return ancestors
|
|
107
|
+
|
|
108
|
+
ancestor_id = getattr(run, "parent_run_id", None)
|
|
109
|
+
while ancestor_id:
|
|
110
|
+
span = self.get_span(ancestor_id)
|
|
111
|
+
if span:
|
|
112
|
+
ancestors.append(span)
|
|
113
|
+
run = run_map.get(str(ancestor_id))
|
|
114
|
+
ancestor_id = getattr(run, "parent_run_id", None) if run else None
|
|
115
|
+
|
|
116
|
+
return ancestors
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
class _BaseCallbackManagerInit:
|
|
120
|
+
"""Post-constructor hook that adds the TraceProcessor once (inheritable)."""
|
|
121
|
+
|
|
122
|
+
__slots__ = ("_processor",)
|
|
123
|
+
|
|
124
|
+
def __init__(self, processor: CustomLangChainTracer):
|
|
125
|
+
self._processor = processor
|
|
126
|
+
|
|
127
|
+
def __call__(
|
|
128
|
+
self,
|
|
129
|
+
wrapped: Callable[..., None],
|
|
130
|
+
instance: BaseCallbackManager,
|
|
131
|
+
args: tuple[Any, ...],
|
|
132
|
+
kwargs: dict[str, Any],
|
|
133
|
+
) -> None:
|
|
134
|
+
wrapped(*args, **kwargs) # run original __init__
|
|
135
|
+
# Avoid duplicates: only add if a handler of the same type isn’t present.
|
|
136
|
+
if not any(isinstance(h, type(self._processor)) for h in instance.inheritable_handlers):
|
|
137
|
+
instance.add_handler(self._processor, inherit=True)
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
# ------------------------------ Convenience APIs ------------------------------
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def _current_parent_run_id() -> UUID | None:
|
|
144
|
+
"""Best-effort: fetch current parent run_id from langchain runtime context."""
|
|
145
|
+
config = langchain_core.runnables.config.var_child_runnable_config.get()
|
|
146
|
+
if not isinstance(config, dict):
|
|
147
|
+
return None
|
|
148
|
+
for v in config.values():
|
|
149
|
+
if isinstance(v, langchain_core.callbacks.BaseCallbackManager):
|
|
150
|
+
if v.parent_run_id:
|
|
151
|
+
return v.parent_run_id
|
|
152
|
+
return None
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def get_current_span() -> Span | None:
|
|
156
|
+
"""Return the current context’s parent span, if any."""
|
|
157
|
+
run_id = _current_parent_run_id()
|
|
158
|
+
if not run_id:
|
|
159
|
+
return None
|
|
160
|
+
return CustomLangChainInstrumentor().get_span(run_id)
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def get_ancestor_spans() -> list[Span]:
|
|
164
|
+
"""
|
|
165
|
+
Return ancestor spans for the current context (immediate parent → root).
|
|
166
|
+
"""
|
|
167
|
+
run_id = _current_parent_run_id()
|
|
168
|
+
if not run_id:
|
|
169
|
+
return []
|
|
170
|
+
return CustomLangChainInstrumentor().get_ancestors(run_id)
|
|
@@ -0,0 +1,516 @@
|
|
|
1
|
+
# Copyright (c) Microsoft. All rights reserved.
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from collections.abc import Iterable, Iterator, Mapping, Sequence
|
|
5
|
+
from copy import deepcopy
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
from langchain_core.messages import BaseMessage
|
|
9
|
+
from langchain_core.tracers.schemas import Run
|
|
10
|
+
from microsoft_agents_a365.observability.core.constants import (
|
|
11
|
+
GEN_AI_INPUT_MESSAGES_KEY,
|
|
12
|
+
GEN_AI_OPERATION_NAME_KEY,
|
|
13
|
+
GEN_AI_OUTPUT_MESSAGES_KEY,
|
|
14
|
+
GEN_AI_PROVIDER_NAME_KEY,
|
|
15
|
+
GEN_AI_REQUEST_MODEL_KEY,
|
|
16
|
+
GEN_AI_RESPONSE_FINISH_REASONS_KEY,
|
|
17
|
+
GEN_AI_RESPONSE_ID_KEY,
|
|
18
|
+
GEN_AI_SYSTEM_INSTRUCTIONS_KEY,
|
|
19
|
+
GEN_AI_TOOL_ARGS_KEY,
|
|
20
|
+
GEN_AI_TOOL_CALL_ID_KEY,
|
|
21
|
+
GEN_AI_TOOL_CALL_RESULT_KEY,
|
|
22
|
+
GEN_AI_TOOL_DESCRIPTION_KEY,
|
|
23
|
+
GEN_AI_TOOL_NAME_KEY,
|
|
24
|
+
GEN_AI_TOOL_TYPE_KEY,
|
|
25
|
+
GEN_AI_USAGE_INPUT_TOKENS_KEY,
|
|
26
|
+
GEN_AI_USAGE_OUTPUT_TOKENS_KEY,
|
|
27
|
+
SESSION_ID_KEY,
|
|
28
|
+
)
|
|
29
|
+
from microsoft_agents_a365.observability.core.inference_operation_type import InferenceOperationType
|
|
30
|
+
from microsoft_agents_a365.observability.core.utils import (
|
|
31
|
+
get_first_value,
|
|
32
|
+
safe_json_dumps,
|
|
33
|
+
stop_on_exception,
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
IGNORED_EXCEPTION_PATTERNS = [
|
|
37
|
+
r"^Command\(",
|
|
38
|
+
r"^ParentCommand\(",
|
|
39
|
+
]
|
|
40
|
+
|
|
41
|
+
LANGCHAIN_SESSION_ID = "session_id"
|
|
42
|
+
LANGCHAIN_CONVERSATION_ID = "conversation_id"
|
|
43
|
+
LANGCHAIN_THREAD_ID = "thread_id"
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
@stop_on_exception
|
|
47
|
+
def prompts(inputs: Mapping[str, Any] | None) -> Iterator[tuple[str, list[str]]]:
|
|
48
|
+
"""Yields prompts if present."""
|
|
49
|
+
if not inputs:
|
|
50
|
+
return
|
|
51
|
+
assert hasattr(inputs, "get"), f"expected Mapping, found {type(inputs)}"
|
|
52
|
+
if prompts := inputs.get("prompts"):
|
|
53
|
+
yield GEN_AI_SYSTEM_INSTRUCTIONS_KEY, prompts
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@stop_on_exception
|
|
57
|
+
def _extract_message_kwargs(message_data: Mapping[str, Any] | None) -> Iterator[[str, Any]]:
|
|
58
|
+
if not message_data:
|
|
59
|
+
return
|
|
60
|
+
assert hasattr(message_data, "get"), f"expected Mapping, found {type(message_data)}"
|
|
61
|
+
if kwargs := message_data.get("kwargs"):
|
|
62
|
+
assert hasattr(kwargs, "get"), f"expected Mapping, found {type(kwargs)}"
|
|
63
|
+
if content := kwargs.get("content"):
|
|
64
|
+
# Just yield as-is (string or list)
|
|
65
|
+
yield "message.content", content
|
|
66
|
+
if tool_call_id := kwargs.get("tool_call_id"):
|
|
67
|
+
assert isinstance(tool_call_id, str), f"expected str, found {type(tool_call_id)}"
|
|
68
|
+
yield GEN_AI_TOOL_CALL_ID_KEY, tool_call_id
|
|
69
|
+
if name := kwargs.get("name"):
|
|
70
|
+
assert isinstance(name, str), f"expected str, found {type(name)}"
|
|
71
|
+
yield "message.name", name
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
@stop_on_exception
|
|
75
|
+
def _extract_message_additional_kwargs(
|
|
76
|
+
message_data: Mapping[str, Any] | None,
|
|
77
|
+
) -> Iterator[tuple[str, Any]]:
|
|
78
|
+
if not message_data:
|
|
79
|
+
return
|
|
80
|
+
assert hasattr(message_data, "get"), f"expected Mapping, found {type(message_data)}"
|
|
81
|
+
if kwargs := message_data.get("kwargs"):
|
|
82
|
+
assert hasattr(kwargs, "get"), f"expected Mapping, found {type(kwargs)}"
|
|
83
|
+
if additional_kwargs := kwargs.get("additional_kwargs"):
|
|
84
|
+
assert hasattr(additional_kwargs, "get"), (
|
|
85
|
+
f"expected Mapping, found {type(additional_kwargs)}"
|
|
86
|
+
)
|
|
87
|
+
if function_call := additional_kwargs.get("function_call"):
|
|
88
|
+
assert hasattr(function_call, "get"), (
|
|
89
|
+
f"expected Mapping, found {type(function_call)}"
|
|
90
|
+
)
|
|
91
|
+
if name := function_call.get("name"):
|
|
92
|
+
assert isinstance(name, str), f"expected str, found {type(name)}"
|
|
93
|
+
yield GEN_AI_TOOL_NAME_KEY, name
|
|
94
|
+
if arguments := function_call.get("arguments"):
|
|
95
|
+
if isinstance(arguments, str):
|
|
96
|
+
yield GEN_AI_TOOL_ARGS_KEY, arguments
|
|
97
|
+
else:
|
|
98
|
+
yield GEN_AI_TOOL_ARGS_KEY, safe_json_dumps(arguments)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
@stop_on_exception
|
|
102
|
+
def _get_tool_call(tool_call: Mapping[str, Any] | None) -> Iterator[tuple[str, Any]]:
|
|
103
|
+
if not tool_call:
|
|
104
|
+
return
|
|
105
|
+
assert hasattr(tool_call, "get"), f"expected Mapping, found {type(tool_call)}"
|
|
106
|
+
|
|
107
|
+
# id
|
|
108
|
+
id_ = tool_call.get("id")
|
|
109
|
+
if id_ is not None:
|
|
110
|
+
yield GEN_AI_TOOL_CALL_ID_KEY, id_
|
|
111
|
+
|
|
112
|
+
fn = tool_call.get("function")
|
|
113
|
+
name = None
|
|
114
|
+
arguments = None
|
|
115
|
+
|
|
116
|
+
if hasattr(fn, "get"):
|
|
117
|
+
name = fn.get("name")
|
|
118
|
+
arguments = fn.get("arguments")
|
|
119
|
+
else:
|
|
120
|
+
name = tool_call.get("name")
|
|
121
|
+
arguments = tool_call.get("args")
|
|
122
|
+
|
|
123
|
+
# name
|
|
124
|
+
if name is not None:
|
|
125
|
+
assert isinstance(name, str), f"expected str, found {type(name)}"
|
|
126
|
+
yield GEN_AI_TOOL_NAME_KEY, name
|
|
127
|
+
|
|
128
|
+
# arguments -> always emit a JSON string
|
|
129
|
+
if arguments is not None:
|
|
130
|
+
if isinstance(arguments, str):
|
|
131
|
+
args_json = arguments
|
|
132
|
+
else:
|
|
133
|
+
args_json = safe_json_dumps(arguments)
|
|
134
|
+
yield GEN_AI_TOOL_ARGS_KEY, args_json
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def _process_tool_calls(tool_calls: Any) -> str:
|
|
138
|
+
"""Return all tool calls as a single compact string (JSON-joined), or '' if none."""
|
|
139
|
+
if not tool_calls:
|
|
140
|
+
return ""
|
|
141
|
+
assert isinstance(tool_calls, Iterable), f"expected Iterable, found {type(tool_calls)}"
|
|
142
|
+
|
|
143
|
+
parts: list[str] = []
|
|
144
|
+
for tool_call in tool_calls:
|
|
145
|
+
data = dict(_get_tool_call(tool_call))
|
|
146
|
+
if data:
|
|
147
|
+
# Compact, stable representation
|
|
148
|
+
parts.append(safe_json_dumps(data, separators=(",", ":"), sort_keys=True))
|
|
149
|
+
|
|
150
|
+
return "; ".join(parts)
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
@stop_on_exception
|
|
154
|
+
def _extract_message_tool_calls(
|
|
155
|
+
message_data: Mapping[str, Any] | None,
|
|
156
|
+
) -> Iterator[tuple[str, str]]:
|
|
157
|
+
if not message_data:
|
|
158
|
+
return
|
|
159
|
+
assert hasattr(message_data, "get"), f"expected Mapping, found {type(message_data)}"
|
|
160
|
+
|
|
161
|
+
# Collect tool_calls from multiple possible locations
|
|
162
|
+
all_tool_calls: list[str] = []
|
|
163
|
+
|
|
164
|
+
def collect(calls: Any) -> None:
|
|
165
|
+
if calls:
|
|
166
|
+
processed = _process_tool_calls(calls)
|
|
167
|
+
if processed:
|
|
168
|
+
if isinstance(processed, list):
|
|
169
|
+
all_tool_calls.extend(map(str, processed))
|
|
170
|
+
else:
|
|
171
|
+
all_tool_calls.append(str(processed))
|
|
172
|
+
|
|
173
|
+
collect(message_data.get("tool_calls"))
|
|
174
|
+
|
|
175
|
+
if kwargs := message_data.get("kwargs"):
|
|
176
|
+
assert hasattr(kwargs, "get"), f"expected Mapping, found {type(kwargs)}"
|
|
177
|
+
collect(kwargs.get("tool_calls"))
|
|
178
|
+
|
|
179
|
+
if additional_kwargs := kwargs.get("additional_kwargs"):
|
|
180
|
+
assert hasattr(additional_kwargs, "get"), (
|
|
181
|
+
f"expected Mapping, found {type(additional_kwargs)}"
|
|
182
|
+
)
|
|
183
|
+
collect(additional_kwargs.get("tool_calls"))
|
|
184
|
+
|
|
185
|
+
if all_tool_calls:
|
|
186
|
+
# Return all as a single string (comma-separated)
|
|
187
|
+
yield "message.tool_calls", ", ".join(all_tool_calls)
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
@stop_on_exception
|
|
191
|
+
def _parse_message_data(message_data: Mapping[str, Any] | None) -> Iterator[tuple[str, Any]]:
|
|
192
|
+
"""Parses message data to grab message role, content, etc."""
|
|
193
|
+
yield from _extract_message_kwargs(message_data)
|
|
194
|
+
yield from _extract_message_additional_kwargs(message_data)
|
|
195
|
+
yield from _extract_message_tool_calls(message_data)
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
@stop_on_exception
|
|
199
|
+
def input_messages(
|
|
200
|
+
inputs: Mapping[str, Any] | None,
|
|
201
|
+
) -> Iterator[tuple[str, list[dict[str, Any]]]]:
|
|
202
|
+
"""Yields chat messages if present."""
|
|
203
|
+
if not inputs:
|
|
204
|
+
return
|
|
205
|
+
assert hasattr(inputs, "get"), f"expected Mapping, found {type(inputs)}"
|
|
206
|
+
# There may be more than one set of messages. We'll use just the first set.
|
|
207
|
+
if not (multiple_messages := inputs.get("messages")):
|
|
208
|
+
return
|
|
209
|
+
assert isinstance(multiple_messages, Iterable), (
|
|
210
|
+
f"expected Iterable, found {type(multiple_messages)}"
|
|
211
|
+
)
|
|
212
|
+
# This will only get the first set of messages.
|
|
213
|
+
if not (first_messages := next(iter(multiple_messages), None)):
|
|
214
|
+
return
|
|
215
|
+
parsed_messages = []
|
|
216
|
+
if isinstance(first_messages, list):
|
|
217
|
+
for message_data in first_messages:
|
|
218
|
+
if isinstance(message_data, BaseMessage):
|
|
219
|
+
parsed_messages.append(dict(_parse_message_data(message_data.to_json())))
|
|
220
|
+
elif hasattr(message_data, "get"):
|
|
221
|
+
parsed_messages.append(dict(_parse_message_data(message_data)))
|
|
222
|
+
else:
|
|
223
|
+
raise ValueError(f"failed to parse message of type {type(message_data)}")
|
|
224
|
+
elif isinstance(first_messages, BaseMessage):
|
|
225
|
+
parsed_messages.append(dict(_parse_message_data(first_messages.to_json())))
|
|
226
|
+
elif hasattr(first_messages, "get"):
|
|
227
|
+
parsed_messages.append(dict(_parse_message_data(first_messages)))
|
|
228
|
+
elif isinstance(first_messages, Sequence) and len(first_messages) == 2:
|
|
229
|
+
# See e.g. https://github.com/langchain-ai/langchain/blob/18cf457eec106d99e0098b42712299f5d0daa798/libs/core/langchain_core/messages/utils.py#L317 # noqa: E501
|
|
230
|
+
role, content = first_messages
|
|
231
|
+
parsed_messages.append({"MESSAGE_ROLE": role, "MESSAGE_CONTENT": content})
|
|
232
|
+
else:
|
|
233
|
+
raise ValueError(f"failed to parse messages of type {type(first_messages)}")
|
|
234
|
+
if parsed_messages:
|
|
235
|
+
yield GEN_AI_INPUT_MESSAGES_KEY, parsed_messages
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
@stop_on_exception
|
|
239
|
+
def metadata(run: Run) -> Iterator[tuple[str, str]]:
|
|
240
|
+
"""
|
|
241
|
+
Takes the LangChain chain metadata and adds it to the trace
|
|
242
|
+
"""
|
|
243
|
+
if not run.extra or not (metadata := run.extra.get("metadata")):
|
|
244
|
+
return
|
|
245
|
+
assert isinstance(metadata, Mapping), f"expected Mapping, found {type(metadata)}"
|
|
246
|
+
if session_id := (
|
|
247
|
+
metadata.get(LANGCHAIN_SESSION_ID)
|
|
248
|
+
or metadata.get(LANGCHAIN_CONVERSATION_ID)
|
|
249
|
+
or metadata.get(LANGCHAIN_THREAD_ID)
|
|
250
|
+
):
|
|
251
|
+
yield SESSION_ID_KEY, session_id
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
@stop_on_exception
|
|
255
|
+
def output_messages(
|
|
256
|
+
outputs: Mapping[str, Any] | None,
|
|
257
|
+
) -> Iterator[tuple[str, list[dict[str, Any]]]]:
|
|
258
|
+
"""Yields chat messages if present."""
|
|
259
|
+
if not outputs:
|
|
260
|
+
return
|
|
261
|
+
assert hasattr(outputs, "get"), f"expected Mapping, found {type(outputs)}"
|
|
262
|
+
output_type = outputs.get("type")
|
|
263
|
+
if output_type and output_type.lower() == "llmresult":
|
|
264
|
+
llm_output = outputs.get("llm_output")
|
|
265
|
+
if llm_output and hasattr(llm_output, "get"):
|
|
266
|
+
response_id = llm_output.get("id")
|
|
267
|
+
if response_id:
|
|
268
|
+
yield GEN_AI_RESPONSE_ID_KEY, response_id
|
|
269
|
+
# There may be more than one set of generations. We'll use just the first set.
|
|
270
|
+
if not (multiple_generations := outputs.get("generations")):
|
|
271
|
+
return
|
|
272
|
+
assert isinstance(multiple_generations, Iterable), (
|
|
273
|
+
f"expected Iterable, found {type(multiple_generations)}"
|
|
274
|
+
)
|
|
275
|
+
# This will only get the first set of generations.
|
|
276
|
+
if not (first_generations := next(iter(multiple_generations), None)):
|
|
277
|
+
return
|
|
278
|
+
assert isinstance(first_generations, Iterable), (
|
|
279
|
+
f"expected Iterable, found {type(first_generations)}"
|
|
280
|
+
)
|
|
281
|
+
parsed_messages = []
|
|
282
|
+
for generation in first_generations:
|
|
283
|
+
assert hasattr(generation, "get"), f"expected Mapping, found {type(generation)}"
|
|
284
|
+
if message_data := generation.get("message"):
|
|
285
|
+
if isinstance(message_data, BaseMessage):
|
|
286
|
+
parsed_messages.append(dict(_parse_message_data(message_data.to_json())))
|
|
287
|
+
elif hasattr(message_data, "get"):
|
|
288
|
+
parsed_messages.append(dict(_parse_message_data(message_data)))
|
|
289
|
+
else:
|
|
290
|
+
raise ValueError(f"fail to parse message of type {type(message_data)}")
|
|
291
|
+
if parsed_messages:
|
|
292
|
+
yield GEN_AI_OUTPUT_MESSAGES_KEY, parsed_messages
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
@stop_on_exception
|
|
296
|
+
def invocation_parameters(run: Run) -> Iterator[tuple[str, str]]:
|
|
297
|
+
"""Yields invocation parameters if present."""
|
|
298
|
+
if run.run_type.lower() != "llm":
|
|
299
|
+
return
|
|
300
|
+
if not (extra := run.extra):
|
|
301
|
+
return
|
|
302
|
+
assert hasattr(extra, "get"), f"expected Mapping, found {type(extra)}"
|
|
303
|
+
if invocation_parameters := extra.get("invocation_params"):
|
|
304
|
+
assert isinstance(invocation_parameters, Mapping), (
|
|
305
|
+
f"expected Mapping, found {type(invocation_parameters)}"
|
|
306
|
+
)
|
|
307
|
+
yield GEN_AI_INPUT_MESSAGES_KEY, safe_json_dumps(invocation_parameters)
|
|
308
|
+
tools = invocation_parameters.get("tools", [])
|
|
309
|
+
for idx, tool in enumerate(tools):
|
|
310
|
+
yield f"{GEN_AI_TOOL_ARGS_KEY}.{idx}", safe_json_dumps(tool)
|
|
311
|
+
|
|
312
|
+
|
|
313
|
+
@stop_on_exception
|
|
314
|
+
def llm_provider(extra: Mapping[str, Any] | None) -> Iterator[tuple[str, str]]:
|
|
315
|
+
if not extra:
|
|
316
|
+
return
|
|
317
|
+
if (meta := extra.get("metadata")) and (ls_provider := meta.get("ls_provider")):
|
|
318
|
+
ls_provider_lower = ls_provider.lower()
|
|
319
|
+
yield GEN_AI_PROVIDER_NAME_KEY, ls_provider_lower
|
|
320
|
+
|
|
321
|
+
|
|
322
|
+
@stop_on_exception
|
|
323
|
+
def model_name(
|
|
324
|
+
outputs: Mapping[str, Any] | None,
|
|
325
|
+
extra: Mapping[str, Any] | None,
|
|
326
|
+
) -> Iterator[tuple[str, str]]:
|
|
327
|
+
"""Yields model name if present."""
|
|
328
|
+
if (
|
|
329
|
+
outputs
|
|
330
|
+
and hasattr(outputs, "get")
|
|
331
|
+
and (llm_output := outputs.get("llm_output"))
|
|
332
|
+
and hasattr(llm_output, "get")
|
|
333
|
+
):
|
|
334
|
+
for key in "model_name", "model":
|
|
335
|
+
if name := str(llm_output.get(key) or "").strip():
|
|
336
|
+
yield GEN_AI_REQUEST_MODEL_KEY, name
|
|
337
|
+
return
|
|
338
|
+
if not extra:
|
|
339
|
+
return
|
|
340
|
+
assert hasattr(extra, "get"), f"expected Mapping, found {type(extra)}"
|
|
341
|
+
if (
|
|
342
|
+
(metadata := extra.get("metadata"))
|
|
343
|
+
and hasattr(metadata, "get")
|
|
344
|
+
and (ls_model_name := str(metadata.get("ls_model_name") or "").strip())
|
|
345
|
+
):
|
|
346
|
+
yield GEN_AI_REQUEST_MODEL_KEY, ls_model_name
|
|
347
|
+
return
|
|
348
|
+
if not (invocation_params := extra.get("invocation_params")):
|
|
349
|
+
return
|
|
350
|
+
for key in ["model_name", "model"]:
|
|
351
|
+
if name := invocation_params.get(key):
|
|
352
|
+
yield GEN_AI_REQUEST_MODEL_KEY, name
|
|
353
|
+
return
|
|
354
|
+
|
|
355
|
+
|
|
356
|
+
@stop_on_exception
|
|
357
|
+
def token_counts(outputs: Mapping[str, Any] | None) -> Iterator[tuple[str, int]]:
|
|
358
|
+
"""Yields token count information if present."""
|
|
359
|
+
if not (token_usage := (parse_token_usage_for_non_streaming_outputs(outputs))):
|
|
360
|
+
return
|
|
361
|
+
for attribute_name, keys in [
|
|
362
|
+
(
|
|
363
|
+
GEN_AI_USAGE_INPUT_TOKENS_KEY,
|
|
364
|
+
(
|
|
365
|
+
"prompt_tokens",
|
|
366
|
+
"input_tokens", # Anthropic-specific key
|
|
367
|
+
"prompt_token_count", # Gemini-specific key - https://ai.google.dev/gemini-api/docs/tokens?lang=python
|
|
368
|
+
),
|
|
369
|
+
),
|
|
370
|
+
(
|
|
371
|
+
GEN_AI_USAGE_OUTPUT_TOKENS_KEY,
|
|
372
|
+
(
|
|
373
|
+
"completion_tokens",
|
|
374
|
+
"output_tokens", # Anthropic-specific key
|
|
375
|
+
"candidates_token_count", # Gemini-specific key
|
|
376
|
+
),
|
|
377
|
+
),
|
|
378
|
+
]:
|
|
379
|
+
if (token_count := get_first_value(token_usage, keys)) is not None:
|
|
380
|
+
yield attribute_name, token_count
|
|
381
|
+
|
|
382
|
+
# OpenAI
|
|
383
|
+
for attribute_name, details_key, keys in [
|
|
384
|
+
(
|
|
385
|
+
GEN_AI_RESPONSE_FINISH_REASONS_KEY,
|
|
386
|
+
"completion_tokens_details",
|
|
387
|
+
("reasoning_tokens",),
|
|
388
|
+
),
|
|
389
|
+
]:
|
|
390
|
+
if (details := token_usage.get(details_key)) is not None:
|
|
391
|
+
if (token_count := get_first_value(details, keys)) is not None:
|
|
392
|
+
yield attribute_name, token_count
|
|
393
|
+
|
|
394
|
+
# maps langchain_core.messages.ai.UsageMetadata object
|
|
395
|
+
for attribute_name, details_key_or_none, keys in [
|
|
396
|
+
(GEN_AI_USAGE_INPUT_TOKENS_KEY, None, ("input_tokens",)),
|
|
397
|
+
(GEN_AI_USAGE_OUTPUT_TOKENS_KEY, None, ("output_tokens",)),
|
|
398
|
+
(
|
|
399
|
+
GEN_AI_RESPONSE_FINISH_REASONS_KEY,
|
|
400
|
+
"output_token_details",
|
|
401
|
+
("reasoning",),
|
|
402
|
+
),
|
|
403
|
+
]:
|
|
404
|
+
details = token_usage.get(details_key_or_none) if details_key_or_none else token_usage
|
|
405
|
+
if details is not None:
|
|
406
|
+
if (token_count := get_first_value(details, keys)) is not None:
|
|
407
|
+
yield attribute_name, token_count
|
|
408
|
+
|
|
409
|
+
|
|
410
|
+
def parse_token_usage_for_non_streaming_outputs(
|
|
411
|
+
outputs: Mapping[str, Any] | None,
|
|
412
|
+
) -> Any:
|
|
413
|
+
"""
|
|
414
|
+
Parses output to get token usage information for non-streaming LLMs, i.e.,
|
|
415
|
+
when `stream_usage` is set to false.
|
|
416
|
+
"""
|
|
417
|
+
if (
|
|
418
|
+
outputs
|
|
419
|
+
and hasattr(outputs, "get")
|
|
420
|
+
and (llm_output := outputs.get("llm_output"))
|
|
421
|
+
and hasattr(llm_output, "get")
|
|
422
|
+
and (
|
|
423
|
+
token_usage := get_first_value(
|
|
424
|
+
llm_output,
|
|
425
|
+
(
|
|
426
|
+
"token_usage",
|
|
427
|
+
"usage", # Anthropic-specific key
|
|
428
|
+
),
|
|
429
|
+
)
|
|
430
|
+
)
|
|
431
|
+
):
|
|
432
|
+
return token_usage
|
|
433
|
+
return None
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
@stop_on_exception
|
|
437
|
+
def function_calls(outputs: Mapping[str, Any] | None) -> Iterator[tuple[str, str]]:
|
|
438
|
+
"""
|
|
439
|
+
Extract a single OpenAI-style function call from model outputs and emit
|
|
440
|
+
GenAI tool attributes as (key, value) pairs. Arguments/result are JSON strings.
|
|
441
|
+
|
|
442
|
+
"""
|
|
443
|
+
if not outputs:
|
|
444
|
+
return
|
|
445
|
+
assert hasattr(outputs, "get"), f"expected Mapping, found {type(outputs)}"
|
|
446
|
+
|
|
447
|
+
try:
|
|
448
|
+
# Typical OpenAI LangChain shape:
|
|
449
|
+
# outputs["generations"][0][0]["message"]["kwargs"]["additional_kwargs"]["function_call"]
|
|
450
|
+
fc = deepcopy(
|
|
451
|
+
outputs["generations"][0][0]["message"]["kwargs"]["additional_kwargs"]["function_call"]
|
|
452
|
+
)
|
|
453
|
+
except Exception:
|
|
454
|
+
return
|
|
455
|
+
|
|
456
|
+
if not isinstance(fc, dict):
|
|
457
|
+
return
|
|
458
|
+
|
|
459
|
+
# Tool type (explicit)
|
|
460
|
+
yield GEN_AI_OPERATION_NAME_KEY, "execute_tool"
|
|
461
|
+
yield GEN_AI_TOOL_TYPE_KEY, "function"
|
|
462
|
+
|
|
463
|
+
name = fc.get("name")
|
|
464
|
+
if isinstance(name, str):
|
|
465
|
+
yield GEN_AI_TOOL_NAME_KEY, name
|
|
466
|
+
|
|
467
|
+
desc = fc.get("description")
|
|
468
|
+
if isinstance(desc, str):
|
|
469
|
+
yield GEN_AI_TOOL_DESCRIPTION_KEY, desc
|
|
470
|
+
|
|
471
|
+
call_id = fc.get("id")
|
|
472
|
+
if isinstance(call_id, str):
|
|
473
|
+
yield GEN_AI_TOOL_CALL_ID_KEY, call_id
|
|
474
|
+
|
|
475
|
+
args = fc.get("arguments")
|
|
476
|
+
if args is not None:
|
|
477
|
+
if isinstance(args, str):
|
|
478
|
+
# If it's a JSON string, try to parse then re-dump for normalization
|
|
479
|
+
try:
|
|
480
|
+
args_json = safe_json_dumps(json.loads(args))
|
|
481
|
+
except Exception:
|
|
482
|
+
# Not valid JSON; store raw string
|
|
483
|
+
args_json = safe_json_dumps(args)
|
|
484
|
+
else:
|
|
485
|
+
args_json = safe_json_dumps(args)
|
|
486
|
+
yield GEN_AI_TOOL_ARGS_KEY, args_json
|
|
487
|
+
|
|
488
|
+
result = fc.get("result")
|
|
489
|
+
if result is not None:
|
|
490
|
+
yield GEN_AI_TOOL_CALL_RESULT_KEY, safe_json_dumps(result)
|
|
491
|
+
|
|
492
|
+
|
|
493
|
+
@stop_on_exception
|
|
494
|
+
def tools(run: Run) -> Iterator[tuple[str, str]]:
|
|
495
|
+
"""Yields tool attributes if present."""
|
|
496
|
+
if run.run_type.lower() != "tool":
|
|
497
|
+
return
|
|
498
|
+
if not (serialized := run.serialized):
|
|
499
|
+
return
|
|
500
|
+
assert hasattr(serialized, "get"), f"expected Mapping, found {type(serialized)}"
|
|
501
|
+
yield GEN_AI_TOOL_TYPE_KEY, "extension"
|
|
502
|
+
if name := serialized.get("name"):
|
|
503
|
+
yield GEN_AI_TOOL_NAME_KEY, name
|
|
504
|
+
if description := serialized.get("description"):
|
|
505
|
+
yield GEN_AI_TOOL_DESCRIPTION_KEY, description
|
|
506
|
+
|
|
507
|
+
|
|
508
|
+
def add_operation_type(run: Run) -> Iterator[tuple[str, str]]:
|
|
509
|
+
"""Yields operation type based on run type."""
|
|
510
|
+
run_type = run.run_type.lower()
|
|
511
|
+
if run_type == "llm":
|
|
512
|
+
yield GEN_AI_OPERATION_NAME_KEY, InferenceOperationType.CHAT.value.lower()
|
|
513
|
+
elif run_type == "chat_model":
|
|
514
|
+
yield GEN_AI_OPERATION_NAME_KEY, "chat"
|
|
515
|
+
elif run_type == "tool":
|
|
516
|
+
yield GEN_AI_OPERATION_NAME_KEY, "execute_tool"
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: microsoft-agents-a365-observability-extensions-langchain
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: LangChain observability and tracing extensions for Microsoft Agent 365
|
|
5
|
+
Author-email: Microsoft <support@microsoft.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/microsoft/Agent365-python
|
|
8
|
+
Project-URL: Repository, https://github.com/microsoft/Agent365-python
|
|
9
|
+
Project-URL: Issues, https://github.com/microsoft/Agent365-python/issues
|
|
10
|
+
Project-URL: Documentation, https://github.com/microsoft/Agent365-python/tree/main/libraries/microsoft-agents-a365-observability-extensions-langchain
|
|
11
|
+
Keywords: observability,telemetry,tracing,opentelemetry,langchain,agents,ai
|
|
12
|
+
Classifier: Development Status :: 3 - Alpha
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
17
|
+
Classifier: Operating System :: OS Independent
|
|
18
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
19
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
20
|
+
Classifier: Topic :: System :: Monitoring
|
|
21
|
+
Requires-Python: >=3.11
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
Requires-Dist: microsoft-agents-a365-observability-core>=0.0.0
|
|
24
|
+
Requires-Dist: langchain>=0.1.0
|
|
25
|
+
Requires-Dist: langchain-core>=0.1.0
|
|
26
|
+
Requires-Dist: opentelemetry-api>=1.36.0
|
|
27
|
+
Requires-Dist: opentelemetry-sdk>=1.36.0
|
|
28
|
+
Requires-Dist: opentelemetry-instrumentation>=0.47b0
|
|
29
|
+
Provides-Extra: dev
|
|
30
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
31
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
|
|
32
|
+
Requires-Dist: ruff>=0.1.0; extra == "dev"
|
|
33
|
+
Requires-Dist: black>=23.0.0; extra == "dev"
|
|
34
|
+
Requires-Dist: mypy>=1.0.0; extra == "dev"
|
|
35
|
+
Provides-Extra: test
|
|
36
|
+
Requires-Dist: pytest>=7.0.0; extra == "test"
|
|
37
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == "test"
|
|
38
|
+
|
|
39
|
+
# microsoft-agents-a365-observability-extensions-langchain
|
|
40
|
+
|
|
41
|
+
[](https://pypi.org/project/microsoft-agents-a365-observability-extensions-langchain)
|
|
42
|
+
[](https://pypi.org/project/microsoft-agents-a365-observability-extensions-langchain)
|
|
43
|
+
|
|
44
|
+
Observability extensions for LangChain framework. This package provides OpenTelemetry tracing integration for LangChain-based AI applications with automatic instrumentation for chains, agents, and tools.
|
|
45
|
+
|
|
46
|
+
## Installation
|
|
47
|
+
|
|
48
|
+
```bash
|
|
49
|
+
pip install microsoft-agents-a365-observability-extensions-langchain
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
## Usage
|
|
53
|
+
|
|
54
|
+
For usage examples and detailed documentation, see the [Observability documentation](https://learn.microsoft.com/microsoft-agent-365/developer/observability?tabs=python) on Microsoft Learn.
|
|
55
|
+
|
|
56
|
+
## Support
|
|
57
|
+
|
|
58
|
+
For issues, questions, or feedback:
|
|
59
|
+
|
|
60
|
+
- File issues in the [GitHub Issues](https://github.com/microsoft/Agent365-python/issues) section
|
|
61
|
+
- See the [main documentation](../../../README.md) for more information
|
|
62
|
+
|
|
63
|
+
## Trademarks
|
|
64
|
+
|
|
65
|
+
*Microsoft, Windows, Microsoft Azure and/or other Microsoft products and services referenced in the documentation may be either trademarks or registered trademarks of Microsoft in the United States and/or other countries. The licenses for this project do not grant you rights to use any Microsoft names, logos, or trademarks. Microsoft's general trademark guidelines can be found at http://go.microsoft.com/fwlink/?LinkID=254653.*
|
|
66
|
+
|
|
67
|
+
## License
|
|
68
|
+
|
|
69
|
+
Copyright (c) Microsoft Corporation. All rights reserved.
|
|
70
|
+
|
|
71
|
+
Licensed under the MIT License - see the [LICENSE](../../../LICENSE.md) file for details.
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
README.md
|
|
2
|
+
pyproject.toml
|
|
3
|
+
setup.py
|
|
4
|
+
microsoft_agents_a365/observability/extensions/langchain/__init__.py
|
|
5
|
+
microsoft_agents_a365/observability/extensions/langchain/tracer.py
|
|
6
|
+
microsoft_agents_a365/observability/extensions/langchain/tracer_instrumentor.py
|
|
7
|
+
microsoft_agents_a365/observability/extensions/langchain/utils.py
|
|
8
|
+
microsoft_agents_a365_observability_extensions_langchain.egg-info/PKG-INFO
|
|
9
|
+
microsoft_agents_a365_observability_extensions_langchain.egg-info/SOURCES.txt
|
|
10
|
+
microsoft_agents_a365_observability_extensions_langchain.egg-info/dependency_links.txt
|
|
11
|
+
microsoft_agents_a365_observability_extensions_langchain.egg-info/requires.txt
|
|
12
|
+
microsoft_agents_a365_observability_extensions_langchain.egg-info/top_level.txt
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
microsoft-agents-a365-observability-core>=0.0.0
|
|
2
|
+
langchain>=0.1.0
|
|
3
|
+
langchain-core>=0.1.0
|
|
4
|
+
opentelemetry-api>=1.36.0
|
|
5
|
+
opentelemetry-sdk>=1.36.0
|
|
6
|
+
opentelemetry-instrumentation>=0.47b0
|
|
7
|
+
|
|
8
|
+
[dev]
|
|
9
|
+
pytest>=7.0.0
|
|
10
|
+
pytest-asyncio>=0.21.0
|
|
11
|
+
ruff>=0.1.0
|
|
12
|
+
black>=23.0.0
|
|
13
|
+
mypy>=1.0.0
|
|
14
|
+
|
|
15
|
+
[test]
|
|
16
|
+
pytest>=7.0.0
|
|
17
|
+
pytest-asyncio>=0.21.0
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
microsoft_agents_a365
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=68", "wheel", "tzdata"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "microsoft-agents-a365-observability-extensions-langchain"
|
|
7
|
+
dynamic = ["version"]
|
|
8
|
+
authors = [
|
|
9
|
+
{ name = "Microsoft", email = "support@microsoft.com" },
|
|
10
|
+
]
|
|
11
|
+
description = "LangChain observability and tracing extensions for Microsoft Agent 365"
|
|
12
|
+
readme = "README.md"
|
|
13
|
+
requires-python = ">=3.11"
|
|
14
|
+
classifiers = [
|
|
15
|
+
"Development Status :: 3 - Alpha",
|
|
16
|
+
"Intended Audience :: Developers",
|
|
17
|
+
"Programming Language :: Python :: 3",
|
|
18
|
+
"Programming Language :: Python :: 3.11",
|
|
19
|
+
"Programming Language :: Python :: 3.12",
|
|
20
|
+
"Operating System :: OS Independent",
|
|
21
|
+
"Topic :: Software Development :: Libraries :: Python Modules",
|
|
22
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
23
|
+
"Topic :: System :: Monitoring",
|
|
24
|
+
]
|
|
25
|
+
license = {text = "MIT"}
|
|
26
|
+
keywords = ["observability", "telemetry", "tracing", "opentelemetry", "langchain", "agents", "ai"]
|
|
27
|
+
dependencies = [
|
|
28
|
+
"microsoft-agents-a365-observability-core >= 0.0.0",
|
|
29
|
+
"langchain >= 0.1.0",
|
|
30
|
+
"langchain-core >= 0.1.0",
|
|
31
|
+
"opentelemetry-api >= 1.36.0",
|
|
32
|
+
"opentelemetry-sdk >= 1.36.0",
|
|
33
|
+
"opentelemetry-instrumentation >= 0.47b0",
|
|
34
|
+
]
|
|
35
|
+
|
|
36
|
+
[project.urls]
|
|
37
|
+
Homepage = "https://github.com/microsoft/Agent365-python"
|
|
38
|
+
Repository = "https://github.com/microsoft/Agent365-python"
|
|
39
|
+
Issues = "https://github.com/microsoft/Agent365-python/issues"
|
|
40
|
+
Documentation = "https://github.com/microsoft/Agent365-python/tree/main/libraries/microsoft-agents-a365-observability-extensions-langchain"
|
|
41
|
+
|
|
42
|
+
[project.optional-dependencies]
|
|
43
|
+
dev = [
|
|
44
|
+
"pytest >= 7.0.0",
|
|
45
|
+
"pytest-asyncio >= 0.21.0",
|
|
46
|
+
"ruff >= 0.1.0",
|
|
47
|
+
"black >= 23.0.0",
|
|
48
|
+
"mypy >= 1.0.0",
|
|
49
|
+
]
|
|
50
|
+
test = [
|
|
51
|
+
"pytest >= 7.0.0",
|
|
52
|
+
"pytest-asyncio >= 0.21.0",
|
|
53
|
+
]
|
|
54
|
+
|
|
55
|
+
[tool.setuptools.packages.find]
|
|
56
|
+
where = ["."]
|
|
57
|
+
namespaces = true
|
|
58
|
+
|
|
59
|
+
[tool.setuptools]
|
|
60
|
+
license-files = ["../../LICENSE"]
|
|
61
|
+
include-package-data = true
|
|
62
|
+
|
|
63
|
+
[tool.setuptools.package-data]
|
|
64
|
+
"*" = ["../../LICENSE"]
|
|
65
|
+
|
|
66
|
+
[tool.black]
|
|
67
|
+
line-length = 100
|
|
68
|
+
target-version = ['py311']
|
|
69
|
+
|
|
70
|
+
[tool.ruff]
|
|
71
|
+
line-length = 100
|
|
72
|
+
target-version = "py311"
|
|
73
|
+
|
|
74
|
+
[tool.mypy]
|
|
75
|
+
python_version = "3.11"
|
|
76
|
+
strict = true
|
|
77
|
+
warn_return_any = true
|
|
78
|
+
warn_unused_configs = true
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
# Copyright (c) Microsoft Corporation.
|
|
2
|
+
# Licensed under the MIT License.
|
|
3
|
+
|
|
4
|
+
import sys
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from os import environ
|
|
7
|
+
from setuptools import setup
|
|
8
|
+
|
|
9
|
+
# Get version from environment variable set by CI/CD
|
|
10
|
+
package_version = environ.get("AGENT365_PYTHON_SDK_PACKAGE_VERSION", "0.0.0")
|
|
11
|
+
|
|
12
|
+
# Add versioning helper to path
|
|
13
|
+
helper_path = Path(__file__).parent.parent.parent / "versioning" / "helper"
|
|
14
|
+
sys.path.insert(0, str(helper_path))
|
|
15
|
+
|
|
16
|
+
from setup_utils import get_dynamic_dependencies
|
|
17
|
+
|
|
18
|
+
# Use minimum version strategy:
|
|
19
|
+
# - Internal packages get: >= current_base_version (e.g., >= 0.1.0)
|
|
20
|
+
# - Automatically updates when you build new versions
|
|
21
|
+
# - Consumers can upgrade to any higher version
|
|
22
|
+
setup(
|
|
23
|
+
version=package_version,
|
|
24
|
+
install_requires=get_dynamic_dependencies(
|
|
25
|
+
use_compatible_release=False, # No upper bound
|
|
26
|
+
use_exact_match=False, # Not exact match
|
|
27
|
+
),
|
|
28
|
+
)
|