launchdarkly-server-sdk-ai 0.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- launchdarkly_server_sdk_ai-0.0.0/LICENSE.txt +13 -0
- launchdarkly_server_sdk_ai-0.0.0/PKG-INFO +70 -0
- launchdarkly_server_sdk_ai-0.0.0/README.md +41 -0
- launchdarkly_server_sdk_ai-0.0.0/ldai/__init__.py +1 -0
- launchdarkly_server_sdk_ai-0.0.0/ldai/client.py +92 -0
- launchdarkly_server_sdk_ai-0.0.0/ldai/testing/__init__.py +0 -0
- launchdarkly_server_sdk_ai-0.0.0/ldai/testing/test_model_config.py +162 -0
- launchdarkly_server_sdk_ai-0.0.0/ldai/tracker.py +277 -0
- launchdarkly_server_sdk_ai-0.0.0/pyproject.toml +71 -0
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
Copyright 2024 Catamorphic, Co.
|
|
2
|
+
|
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
you may not use this file except in compliance with the License.
|
|
5
|
+
You may obtain a copy of the License at
|
|
6
|
+
|
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
See the License for the specific language governing permissions and
|
|
13
|
+
limitations under the License.
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: launchdarkly-server-sdk-ai
|
|
3
|
+
Version: 0.0.0
|
|
4
|
+
Summary: LaunchDarkly SDK for AI
|
|
5
|
+
Home-page: https://docs.launchdarkly.com/sdk/ai/python
|
|
6
|
+
License: Apache-2.0
|
|
7
|
+
Author: LaunchDarkly
|
|
8
|
+
Author-email: dev@launchdarkly.com
|
|
9
|
+
Requires-Python: >=3.8
|
|
10
|
+
Classifier: Intended Audience :: Developers
|
|
11
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
12
|
+
Classifier: Operating System :: OS Independent
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Topic :: Software Development
|
|
20
|
+
Classifier: Topic :: Software Development :: Libraries
|
|
21
|
+
Requires-Dist: chevron (==0.14.0)
|
|
22
|
+
Requires-Dist: isort (>=5.13.2,<6.0.0)
|
|
23
|
+
Requires-Dist: launchdarkly-server-sdk (>=9.4.0)
|
|
24
|
+
Requires-Dist: pycodestyle (>=2.12.1,<3.0.0)
|
|
25
|
+
Project-URL: Documentation, https://launchdarkly-python-sdk-ai.readthedocs.io/en/latest/
|
|
26
|
+
Project-URL: Repository, https://github.com/launchdarkly/python-server-sdk-ai
|
|
27
|
+
Description-Content-Type: text/markdown
|
|
28
|
+
|
|
29
|
+
# LaunchDarkly Server-side AI library for Python
|
|
30
|
+
|
|
31
|
+
## LaunchDarkly overview
|
|
32
|
+
|
|
33
|
+
[LaunchDarkly](https://www.launchdarkly.com) is a feature management platform that serves trillions of feature flags daily to help teams build better software, faster. [Get started](https://docs.launchdarkly.com/home/getting-started) using LaunchDarkly today!
|
|
34
|
+
|
|
35
|
+
[](https://twitter.com/intent/follow?screen_name=launchdarkly)
|
|
36
|
+
|
|
37
|
+
## Supported Python versions
|
|
38
|
+
|
|
39
|
+
This version of the library has a minimum Python version of 3.8.
|
|
40
|
+
|
|
41
|
+
## Getting started
|
|
42
|
+
|
|
43
|
+
Refer to the [SDK reference guide](https://docs.launchdarkly.com/sdk/ai/python) for instructions on getting started with using the SDK.
|
|
44
|
+
|
|
45
|
+
## Learn more
|
|
46
|
+
|
|
47
|
+
Read our [documentation](http://docs.launchdarkly.com) for in-depth instructions on configuring and using LaunchDarkly. You can also head straight to the [reference guide for the python SDK](http://docs.launchdarkly.com/docs/python-sdk-ai-reference).
|
|
48
|
+
|
|
49
|
+
## Contributing
|
|
50
|
+
|
|
51
|
+
We encourage pull requests and other contributions from the community. Check out our [contributing guidelines](CONTRIBUTING.md) for instructions on how to contribute to this library.
|
|
52
|
+
|
|
53
|
+
## Verifying library build provenance with the SLSA framework
|
|
54
|
+
|
|
55
|
+
LaunchDarkly uses the [SLSA framework](https://slsa.dev/spec/v1.0/about) (Supply-chain Levels for Software Artifacts) to help developers make their supply chain more secure by ensuring the authenticity and build integrity of our published library packages. To learn more, see the [provenance guide](PROVENANCE.md).
|
|
56
|
+
|
|
57
|
+
## About LaunchDarkly
|
|
58
|
+
|
|
59
|
+
- LaunchDarkly is a continuous delivery platform that provides feature flags as a service and allows developers to iterate quickly and safely. We allow you to easily flag your features and manage them from the LaunchDarkly dashboard. With LaunchDarkly, you can:
|
|
60
|
+
- Roll out a new feature to a subset of your users (like a group of users who opt-in to a beta tester group), gathering feedback and bug reports from real-world use cases.
|
|
61
|
+
- Gradually roll out a feature to an increasing percentage of users, and track the effect that the feature has on key metrics (for instance, how likely is a user to complete a purchase if they have feature A versus feature B?).
|
|
62
|
+
- Turn off a feature that you realize is causing performance problems in production, without needing to re-deploy, or even restart the application with a changed configuration file.
|
|
63
|
+
- Grant access to certain features based on user attributes, like payment plan (eg: users on the ‘gold’ plan get access to more features than users in the ‘silver’ plan). Disable parts of your application to facilitate maintenance, without taking everything offline.
|
|
64
|
+
- LaunchDarkly provides feature flag SDKs for a wide variety of languages and technologies. Read [our documentation](https://docs.launchdarkly.com/sdk) for a complete list.
|
|
65
|
+
- Explore LaunchDarkly
|
|
66
|
+
- [launchdarkly.com](https://www.launchdarkly.com/ "LaunchDarkly Main Website") for more information
|
|
67
|
+
- [docs.launchdarkly.com](https://docs.launchdarkly.com/ "LaunchDarkly Documentation") for our documentation and SDK reference guides
|
|
68
|
+
- [apidocs.launchdarkly.com](https://apidocs.launchdarkly.com/ "LaunchDarkly API Documentation") for our API documentation
|
|
69
|
+
- [blog.launchdarkly.com](https://blog.launchdarkly.com/ "LaunchDarkly Blog Documentation") for the latest product updates
|
|
70
|
+
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
# LaunchDarkly Server-side AI library for Python
|
|
2
|
+
|
|
3
|
+
## LaunchDarkly overview
|
|
4
|
+
|
|
5
|
+
[LaunchDarkly](https://www.launchdarkly.com) is a feature management platform that serves trillions of feature flags daily to help teams build better software, faster. [Get started](https://docs.launchdarkly.com/home/getting-started) using LaunchDarkly today!
|
|
6
|
+
|
|
7
|
+
[](https://twitter.com/intent/follow?screen_name=launchdarkly)
|
|
8
|
+
|
|
9
|
+
## Supported Python versions
|
|
10
|
+
|
|
11
|
+
This version of the library has a minimum Python version of 3.8.
|
|
12
|
+
|
|
13
|
+
## Getting started
|
|
14
|
+
|
|
15
|
+
Refer to the [SDK reference guide](https://docs.launchdarkly.com/sdk/ai/python) for instructions on getting started with using the SDK.
|
|
16
|
+
|
|
17
|
+
## Learn more
|
|
18
|
+
|
|
19
|
+
Read our [documentation](http://docs.launchdarkly.com) for in-depth instructions on configuring and using LaunchDarkly. You can also head straight to the [reference guide for the python SDK](http://docs.launchdarkly.com/docs/python-sdk-ai-reference).
|
|
20
|
+
|
|
21
|
+
## Contributing
|
|
22
|
+
|
|
23
|
+
We encourage pull requests and other contributions from the community. Check out our [contributing guidelines](CONTRIBUTING.md) for instructions on how to contribute to this library.
|
|
24
|
+
|
|
25
|
+
## Verifying library build provenance with the SLSA framework
|
|
26
|
+
|
|
27
|
+
LaunchDarkly uses the [SLSA framework](https://slsa.dev/spec/v1.0/about) (Supply-chain Levels for Software Artifacts) to help developers make their supply chain more secure by ensuring the authenticity and build integrity of our published library packages. To learn more, see the [provenance guide](PROVENANCE.md).
|
|
28
|
+
|
|
29
|
+
## About LaunchDarkly
|
|
30
|
+
|
|
31
|
+
- LaunchDarkly is a continuous delivery platform that provides feature flags as a service and allows developers to iterate quickly and safely. We allow you to easily flag your features and manage them from the LaunchDarkly dashboard. With LaunchDarkly, you can:
|
|
32
|
+
- Roll out a new feature to a subset of your users (like a group of users who opt-in to a beta tester group), gathering feedback and bug reports from real-world use cases.
|
|
33
|
+
- Gradually roll out a feature to an increasing percentage of users, and track the effect that the feature has on key metrics (for instance, how likely is a user to complete a purchase if they have feature A versus feature B?).
|
|
34
|
+
- Turn off a feature that you realize is causing performance problems in production, without needing to re-deploy, or even restart the application with a changed configuration file.
|
|
35
|
+
- Grant access to certain features based on user attributes, like payment plan (eg: users on the ‘gold’ plan get access to more features than users in the ‘silver’ plan). Disable parts of your application to facilitate maintenance, without taking everything offline.
|
|
36
|
+
- LaunchDarkly provides feature flag SDKs for a wide variety of languages and technologies. Read [our documentation](https://docs.launchdarkly.com/sdk) for a complete list.
|
|
37
|
+
- Explore LaunchDarkly
|
|
38
|
+
- [launchdarkly.com](https://www.launchdarkly.com/ "LaunchDarkly Main Website") for more information
|
|
39
|
+
- [docs.launchdarkly.com](https://docs.launchdarkly.com/ "LaunchDarkly Documentation") for our documentation and SDK reference guides
|
|
40
|
+
- [apidocs.launchdarkly.com](https://apidocs.launchdarkly.com/ "LaunchDarkly API Documentation") for our API documentation
|
|
41
|
+
- [blog.launchdarkly.com](https://blog.launchdarkly.com/ "LaunchDarkly Blog Documentation") for the latest product updates
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.0.0" # x-release-please-version
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from typing import Any, Dict, List, Literal, Optional
|
|
3
|
+
|
|
4
|
+
import chevron
|
|
5
|
+
from ldclient import Context
|
|
6
|
+
from ldclient.client import LDClient
|
|
7
|
+
|
|
8
|
+
from ldai.tracker import LDAIConfigTracker
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass
|
|
12
|
+
class LDMessage:
|
|
13
|
+
role: Literal['system', 'user', 'assistant']
|
|
14
|
+
content: str
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class AIConfigData:
|
|
19
|
+
model: Optional[dict]
|
|
20
|
+
prompt: Optional[List[LDMessage]]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class AIConfig:
|
|
24
|
+
def __init__(self, config: AIConfigData, tracker: LDAIConfigTracker, enabled: bool):
|
|
25
|
+
self.config = config
|
|
26
|
+
self.tracker = tracker
|
|
27
|
+
self.enabled = enabled
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class LDAIClient:
|
|
31
|
+
"""The LaunchDarkly AI SDK client object."""
|
|
32
|
+
|
|
33
|
+
def __init__(self, client: LDClient):
|
|
34
|
+
self.client = client
|
|
35
|
+
|
|
36
|
+
def model_config(
|
|
37
|
+
self,
|
|
38
|
+
key: str,
|
|
39
|
+
context: Context,
|
|
40
|
+
default_value: AIConfig,
|
|
41
|
+
variables: Optional[Dict[str, Any]] = None,
|
|
42
|
+
) -> AIConfig:
|
|
43
|
+
"""
|
|
44
|
+
Get the value of a model configuration asynchronously.
|
|
45
|
+
|
|
46
|
+
:param key: The key of the model configuration.
|
|
47
|
+
:param context: The context to evaluate the model configuration in.
|
|
48
|
+
:param default_value: The default value of the model configuration.
|
|
49
|
+
:param variables: Additional variables for the model configuration.
|
|
50
|
+
:return: The value of the model configuration.
|
|
51
|
+
"""
|
|
52
|
+
variation = self.client.variation(key, context, default_value)
|
|
53
|
+
|
|
54
|
+
all_variables = {}
|
|
55
|
+
if variables:
|
|
56
|
+
all_variables.update(variables)
|
|
57
|
+
all_variables['ldctx'] = context
|
|
58
|
+
|
|
59
|
+
if isinstance(variation['prompt'], list) and all(
|
|
60
|
+
isinstance(entry, dict) for entry in variation['prompt']
|
|
61
|
+
):
|
|
62
|
+
variation['prompt'] = [
|
|
63
|
+
LDMessage(
|
|
64
|
+
role=entry['role'],
|
|
65
|
+
content=self.__interpolate_template(
|
|
66
|
+
entry['content'], all_variables
|
|
67
|
+
),
|
|
68
|
+
)
|
|
69
|
+
for entry in variation['prompt']
|
|
70
|
+
]
|
|
71
|
+
|
|
72
|
+
enabled = variation.get('_ldMeta', {}).get('enabled', False)
|
|
73
|
+
return AIConfig(
|
|
74
|
+
config=AIConfigData(model=variation['model'], prompt=variation['prompt']),
|
|
75
|
+
tracker=LDAIConfigTracker(
|
|
76
|
+
self.client,
|
|
77
|
+
variation.get('_ldMeta', {}).get('versionKey', ''),
|
|
78
|
+
key,
|
|
79
|
+
context,
|
|
80
|
+
),
|
|
81
|
+
enabled=bool(enabled),
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
def __interpolate_template(self, template: str, variables: Dict[str, Any]) -> str:
|
|
85
|
+
"""
|
|
86
|
+
Interpolate the template with the given variables.
|
|
87
|
+
|
|
88
|
+
:template: The template string.
|
|
89
|
+
:variables: The variables to interpolate into the template.
|
|
90
|
+
:return: The interpolated string.
|
|
91
|
+
"""
|
|
92
|
+
return chevron.render(template, variables)
|
|
File without changes
|
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
from ldclient import Config, Context, LDClient
|
|
3
|
+
from ldclient.integrations.test_data import TestData
|
|
4
|
+
from ldclient.testing.builders import *
|
|
5
|
+
|
|
6
|
+
from ldai.client import AIConfig, AIConfigData, LDAIClient, LDMessage
|
|
7
|
+
from ldai.tracker import LDAIConfigTracker
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@pytest.fixture
|
|
11
|
+
def td() -> TestData:
|
|
12
|
+
td = TestData.data_source()
|
|
13
|
+
td.update(
|
|
14
|
+
td.flag('model-config')
|
|
15
|
+
.variations(
|
|
16
|
+
{
|
|
17
|
+
'model': {'modelId': 'fakeModel'},
|
|
18
|
+
'prompt': [{'role': 'system', 'content': 'Hello, {{name}}!'}],
|
|
19
|
+
'_ldMeta': {'enabled': True, 'versionKey': 'abcd'},
|
|
20
|
+
},
|
|
21
|
+
"green",
|
|
22
|
+
)
|
|
23
|
+
.variation_for_all(0)
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
td.update(
|
|
27
|
+
td.flag('multiple-prompt')
|
|
28
|
+
.variations(
|
|
29
|
+
{
|
|
30
|
+
'model': {'modelId': 'fakeModel'},
|
|
31
|
+
'prompt': [
|
|
32
|
+
{'role': 'system', 'content': 'Hello, {{name}}!'},
|
|
33
|
+
{'role': 'user', 'content': 'The day is, {{day}}!'},
|
|
34
|
+
],
|
|
35
|
+
'_ldMeta': {'enabled': True, 'versionKey': 'abcd'},
|
|
36
|
+
},
|
|
37
|
+
"green",
|
|
38
|
+
)
|
|
39
|
+
.variation_for_all(0)
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
td.update(
|
|
43
|
+
td.flag('ctx-interpolation')
|
|
44
|
+
.variations(
|
|
45
|
+
{
|
|
46
|
+
'model': {'modelId': 'fakeModel'},
|
|
47
|
+
'prompt': [{'role': 'system', 'content': 'Hello, {{ldctx.name}}!'}],
|
|
48
|
+
'_ldMeta': {'enabled': True, 'versionKey': 'abcd'},
|
|
49
|
+
}
|
|
50
|
+
)
|
|
51
|
+
.variation_for_all(0)
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
td.update(
|
|
55
|
+
td.flag('off-config')
|
|
56
|
+
.variations(
|
|
57
|
+
{
|
|
58
|
+
'model': {'modelId': 'fakeModel'},
|
|
59
|
+
'prompt': [{'role': 'system', 'content': 'Hello, {{name}}!'}],
|
|
60
|
+
'_ldMeta': {'enabled': False, 'versionKey': 'abcd'},
|
|
61
|
+
}
|
|
62
|
+
)
|
|
63
|
+
.variation_for_all(0)
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
return td
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
@pytest.fixture
|
|
70
|
+
def client(td: TestData) -> LDClient:
|
|
71
|
+
config = Config('sdk-key', update_processor_class=td, send_events=False)
|
|
72
|
+
return LDClient(config=config)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
@pytest.fixture
|
|
76
|
+
def tracker(client: LDClient) -> LDAIConfigTracker:
|
|
77
|
+
return LDAIConfigTracker(client, 'abcd', 'model-config', Context.create('user-key'))
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
@pytest.fixture
|
|
81
|
+
def ldai_client(client: LDClient) -> LDAIClient:
|
|
82
|
+
return LDAIClient(client)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def test_model_config_interpolation(ldai_client: LDAIClient, tracker):
|
|
86
|
+
context = Context.create('user-key')
|
|
87
|
+
default_value = AIConfig(
|
|
88
|
+
config=AIConfigData(
|
|
89
|
+
model={'modelId': 'fakeModel'},
|
|
90
|
+
prompt=[LDMessage(role='system', content='Hello, {{name}}!')],
|
|
91
|
+
),
|
|
92
|
+
tracker=tracker,
|
|
93
|
+
enabled=True,
|
|
94
|
+
)
|
|
95
|
+
variables = {'name': 'World'}
|
|
96
|
+
|
|
97
|
+
config = ldai_client.model_config('model-config', context, default_value, variables)
|
|
98
|
+
|
|
99
|
+
assert config.config.prompt is not None
|
|
100
|
+
assert len(config.config.prompt) > 0
|
|
101
|
+
assert config.config.prompt[0].content == 'Hello, World!'
|
|
102
|
+
assert config.enabled is True
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def test_model_config_no_variables(ldai_client: LDAIClient, tracker):
|
|
106
|
+
context = Context.create('user-key')
|
|
107
|
+
default_value = AIConfig(
|
|
108
|
+
config=AIConfigData(model={}, prompt=[]), tracker=tracker, enabled=True
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
config = ldai_client.model_config('model-config', context, default_value, {})
|
|
112
|
+
|
|
113
|
+
assert config.config.prompt is not None
|
|
114
|
+
assert len(config.config.prompt) > 0
|
|
115
|
+
assert config.config.prompt[0].content == 'Hello, !'
|
|
116
|
+
assert config.enabled is True
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def test_context_interpolation(ldai_client: LDAIClient, tracker):
|
|
120
|
+
context = Context.builder('user-key').name("Sandy").build()
|
|
121
|
+
default_value = AIConfig(
|
|
122
|
+
config=AIConfigData(model={}, prompt=[]), tracker=tracker, enabled=True
|
|
123
|
+
)
|
|
124
|
+
variables = {'name': 'World'}
|
|
125
|
+
|
|
126
|
+
config = ldai_client.model_config(
|
|
127
|
+
'ctx-interpolation', context, default_value, variables
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
assert config.config.prompt is not None
|
|
131
|
+
assert len(config.config.prompt) > 0
|
|
132
|
+
assert config.config.prompt[0].content == 'Hello, Sandy!'
|
|
133
|
+
assert config.enabled is True
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def test_model_config_multiple(ldai_client: LDAIClient, tracker):
|
|
137
|
+
context = Context.create('user-key')
|
|
138
|
+
default_value = AIConfig(
|
|
139
|
+
config=AIConfigData(model={}, prompt=[]), tracker=tracker, enabled=True
|
|
140
|
+
)
|
|
141
|
+
variables = {'name': 'World', 'day': 'Monday'}
|
|
142
|
+
|
|
143
|
+
config = ldai_client.model_config(
|
|
144
|
+
'multiple-prompt', context, default_value, variables
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
assert config.config.prompt is not None
|
|
148
|
+
assert len(config.config.prompt) > 0
|
|
149
|
+
assert config.config.prompt[0].content == 'Hello, World!'
|
|
150
|
+
assert config.config.prompt[1].content == 'The day is, Monday!'
|
|
151
|
+
assert config.enabled is True
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
def test_model_config_disabled(ldai_client: LDAIClient, tracker):
|
|
155
|
+
context = Context.create('user-key')
|
|
156
|
+
default_value = AIConfig(
|
|
157
|
+
config=AIConfigData(model={}, prompt=[]), tracker=tracker, enabled=False
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
config = ldai_client.model_config('off-config', context, default_value, {})
|
|
161
|
+
|
|
162
|
+
assert config.enabled is False
|
|
@@ -0,0 +1,277 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
from enum import Enum
|
|
4
|
+
from typing import Dict, Union
|
|
5
|
+
|
|
6
|
+
from ldclient import Context, LDClient
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class TokenMetrics:
|
|
11
|
+
"""
|
|
12
|
+
Metrics for token usage in AI operations.
|
|
13
|
+
|
|
14
|
+
:param total: Total number of tokens used.
|
|
15
|
+
:param input: Number of input tokens.
|
|
16
|
+
:param output: Number of output tokens.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
total: int
|
|
20
|
+
input: int
|
|
21
|
+
output: int # type: ignore
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass
|
|
25
|
+
class FeedbackKind(Enum):
|
|
26
|
+
"""
|
|
27
|
+
Types of feedback that can be provided for AI operations.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
Positive = "positive"
|
|
31
|
+
Negative = "negative"
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
@dataclass
|
|
35
|
+
class TokenUsage:
|
|
36
|
+
"""
|
|
37
|
+
Tracks token usage for AI operations.
|
|
38
|
+
|
|
39
|
+
:param total_tokens: Total number of tokens used.
|
|
40
|
+
:param prompt_tokens: Number of tokens in the prompt.
|
|
41
|
+
:param completion_tokens: Number of tokens in the completion.
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
total_tokens: int
|
|
45
|
+
prompt_tokens: int
|
|
46
|
+
completion_tokens: int
|
|
47
|
+
|
|
48
|
+
def to_metrics(self):
|
|
49
|
+
"""
|
|
50
|
+
Convert token usage to metrics format.
|
|
51
|
+
|
|
52
|
+
:return: Dictionary containing token metrics.
|
|
53
|
+
"""
|
|
54
|
+
return {
|
|
55
|
+
'total': self['total_tokens'],
|
|
56
|
+
'input': self['prompt_tokens'],
|
|
57
|
+
'output': self['completion_tokens'],
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
@dataclass
|
|
62
|
+
class LDOpenAIUsage:
|
|
63
|
+
"""
|
|
64
|
+
LaunchDarkly-specific OpenAI usage tracking.
|
|
65
|
+
|
|
66
|
+
:param total_tokens: Total number of tokens used.
|
|
67
|
+
:param prompt_tokens: Number of tokens in the prompt.
|
|
68
|
+
:param completion_tokens: Number of tokens in the completion.
|
|
69
|
+
"""
|
|
70
|
+
|
|
71
|
+
total_tokens: int
|
|
72
|
+
prompt_tokens: int
|
|
73
|
+
completion_tokens: int
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
@dataclass
|
|
77
|
+
class OpenAITokenUsage:
|
|
78
|
+
"""
|
|
79
|
+
Tracks OpenAI-specific token usage.
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
def __init__(self, data: LDOpenAIUsage):
|
|
83
|
+
"""
|
|
84
|
+
Initialize OpenAI token usage tracking.
|
|
85
|
+
|
|
86
|
+
:param data: OpenAI usage data.
|
|
87
|
+
"""
|
|
88
|
+
self.total_tokens = data.total_tokens
|
|
89
|
+
self.prompt_tokens = data.prompt_tokens
|
|
90
|
+
self.completion_tokens = data.completion_tokens
|
|
91
|
+
|
|
92
|
+
def to_metrics(self) -> TokenMetrics:
|
|
93
|
+
"""
|
|
94
|
+
Convert OpenAI token usage to metrics format.
|
|
95
|
+
|
|
96
|
+
:return: TokenMetrics object containing usage data.
|
|
97
|
+
"""
|
|
98
|
+
return TokenMetrics(
|
|
99
|
+
total=self.total_tokens,
|
|
100
|
+
input=self.prompt_tokens,
|
|
101
|
+
output=self.completion_tokens,
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
@dataclass
|
|
106
|
+
class BedrockTokenUsage:
|
|
107
|
+
"""
|
|
108
|
+
Tracks AWS Bedrock-specific token usage.
|
|
109
|
+
"""
|
|
110
|
+
|
|
111
|
+
def __init__(self, data: dict):
|
|
112
|
+
"""
|
|
113
|
+
Initialize Bedrock token usage tracking.
|
|
114
|
+
|
|
115
|
+
:param data: Dictionary containing Bedrock usage data.
|
|
116
|
+
"""
|
|
117
|
+
self.totalTokens = data.get('totalTokens', 0)
|
|
118
|
+
self.inputTokens = data.get('inputTokens', 0)
|
|
119
|
+
self.outputTokens = data.get('outputTokens', 0)
|
|
120
|
+
|
|
121
|
+
def to_metrics(self) -> TokenMetrics:
|
|
122
|
+
"""
|
|
123
|
+
Convert Bedrock token usage to metrics format.
|
|
124
|
+
|
|
125
|
+
:return: TokenMetrics object containing usage data.
|
|
126
|
+
"""
|
|
127
|
+
return TokenMetrics(
|
|
128
|
+
total=self.totalTokens,
|
|
129
|
+
input=self.inputTokens,
|
|
130
|
+
output=self.outputTokens,
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
class LDAIConfigTracker:
|
|
135
|
+
"""
|
|
136
|
+
Tracks configuration and usage metrics for LaunchDarkly AI operations.
|
|
137
|
+
"""
|
|
138
|
+
|
|
139
|
+
def __init__(
|
|
140
|
+
self, ld_client: LDClient, version_key: str, config_key: str, context: Context
|
|
141
|
+
):
|
|
142
|
+
"""
|
|
143
|
+
Initialize an AI configuration tracker.
|
|
144
|
+
|
|
145
|
+
:param ld_client: LaunchDarkly client instance.
|
|
146
|
+
:param version_key: Version key for tracking.
|
|
147
|
+
:param config_key: Configuration key for tracking.
|
|
148
|
+
:param context: Context for evaluation.
|
|
149
|
+
"""
|
|
150
|
+
self.ld_client = ld_client
|
|
151
|
+
self.version_key = version_key
|
|
152
|
+
self.config_key = config_key
|
|
153
|
+
self.context = context
|
|
154
|
+
|
|
155
|
+
def __get_track_data(self):
|
|
156
|
+
"""
|
|
157
|
+
Get tracking data for events.
|
|
158
|
+
|
|
159
|
+
:return: Dictionary containing version and config keys.
|
|
160
|
+
"""
|
|
161
|
+
return {
|
|
162
|
+
'versionKey': self.version_key,
|
|
163
|
+
'configKey': self.config_key,
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
def track_duration(self, duration: int) -> None:
|
|
167
|
+
"""
|
|
168
|
+
Manually track the duration of an AI operation.
|
|
169
|
+
|
|
170
|
+
:param duration: Duration in milliseconds.
|
|
171
|
+
"""
|
|
172
|
+
self.ld_client.track(
|
|
173
|
+
'$ld:ai:duration:total', self.context, self.__get_track_data(), duration
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
def track_duration_of(self, func):
|
|
177
|
+
"""
|
|
178
|
+
Automatically track the duration of an AI operation.
|
|
179
|
+
|
|
180
|
+
:param func: Function to track.
|
|
181
|
+
:return: Result of the tracked function.
|
|
182
|
+
"""
|
|
183
|
+
start_time = time.time()
|
|
184
|
+
result = func()
|
|
185
|
+
end_time = time.time()
|
|
186
|
+
duration = int((end_time - start_time) * 1000) # duration in milliseconds
|
|
187
|
+
self.track_duration(duration)
|
|
188
|
+
return result
|
|
189
|
+
|
|
190
|
+
def track_feedback(self, feedback: Dict[str, FeedbackKind]) -> None:
|
|
191
|
+
"""
|
|
192
|
+
Track user feedback for an AI operation.
|
|
193
|
+
|
|
194
|
+
:param feedback: Dictionary containing feedback kind.
|
|
195
|
+
"""
|
|
196
|
+
if feedback['kind'] == FeedbackKind.Positive:
|
|
197
|
+
self.ld_client.track(
|
|
198
|
+
'$ld:ai:feedback:user:positive',
|
|
199
|
+
self.context,
|
|
200
|
+
self.__get_track_data(),
|
|
201
|
+
1,
|
|
202
|
+
)
|
|
203
|
+
elif feedback['kind'] == FeedbackKind.Negative:
|
|
204
|
+
self.ld_client.track(
|
|
205
|
+
'$ld:ai:feedback:user:negative',
|
|
206
|
+
self.context,
|
|
207
|
+
self.__get_track_data(),
|
|
208
|
+
1,
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
def track_success(self) -> None:
|
|
212
|
+
"""
|
|
213
|
+
Track a successful AI generation.
|
|
214
|
+
"""
|
|
215
|
+
self.ld_client.track(
|
|
216
|
+
'$ld:ai:generation', self.context, self.__get_track_data(), 1
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
def track_openai(self, func):
|
|
220
|
+
"""
|
|
221
|
+
Track OpenAI-specific operations.
|
|
222
|
+
|
|
223
|
+
:param func: Function to track.
|
|
224
|
+
:return: Result of the tracked function.
|
|
225
|
+
"""
|
|
226
|
+
result = self.track_duration_of(func)
|
|
227
|
+
if result.usage:
|
|
228
|
+
self.track_tokens(OpenAITokenUsage(result.usage))
|
|
229
|
+
return result
|
|
230
|
+
|
|
231
|
+
def track_bedrock_converse(self, res: dict) -> dict:
|
|
232
|
+
"""
|
|
233
|
+
Track AWS Bedrock conversation operations.
|
|
234
|
+
|
|
235
|
+
:param res: Response dictionary from Bedrock.
|
|
236
|
+
:return: The original response dictionary.
|
|
237
|
+
"""
|
|
238
|
+
status_code = res.get('$metadata', {}).get('httpStatusCode', 0)
|
|
239
|
+
if status_code == 200:
|
|
240
|
+
self.track_success()
|
|
241
|
+
elif status_code >= 400:
|
|
242
|
+
# Potentially add error tracking in the future.
|
|
243
|
+
pass
|
|
244
|
+
if res.get('metrics', {}).get('latencyMs'):
|
|
245
|
+
self.track_duration(res['metrics']['latencyMs'])
|
|
246
|
+
if res.get('usage'):
|
|
247
|
+
self.track_tokens(BedrockTokenUsage(res['usage']))
|
|
248
|
+
return res
|
|
249
|
+
|
|
250
|
+
def track_tokens(self, tokens: Union[TokenUsage, BedrockTokenUsage]) -> None:
|
|
251
|
+
"""
|
|
252
|
+
Track token usage metrics.
|
|
253
|
+
|
|
254
|
+
:param tokens: Token usage data from either custom, OpenAI, or Bedrock sources.
|
|
255
|
+
"""
|
|
256
|
+
token_metrics = tokens.to_metrics()
|
|
257
|
+
if token_metrics.total > 0:
|
|
258
|
+
self.ld_client.track(
|
|
259
|
+
'$ld:ai:tokens:total',
|
|
260
|
+
self.context,
|
|
261
|
+
self.__get_track_data(),
|
|
262
|
+
token_metrics.total,
|
|
263
|
+
)
|
|
264
|
+
if token_metrics.input > 0:
|
|
265
|
+
self.ld_client.track(
|
|
266
|
+
'$ld:ai:tokens:input',
|
|
267
|
+
self.context,
|
|
268
|
+
self.__get_track_data(),
|
|
269
|
+
token_metrics.input,
|
|
270
|
+
)
|
|
271
|
+
if token_metrics.output > 0:
|
|
272
|
+
self.ld_client.track(
|
|
273
|
+
'$ld:ai:tokens:output',
|
|
274
|
+
self.context,
|
|
275
|
+
self.__get_track_data(),
|
|
276
|
+
token_metrics.output,
|
|
277
|
+
)
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
[tool.poetry]
|
|
2
|
+
name = "launchdarkly-server-sdk-ai"
|
|
3
|
+
version = "0.0.0"
|
|
4
|
+
description = "LaunchDarkly SDK for AI"
|
|
5
|
+
authors = ["LaunchDarkly <dev@launchdarkly.com>"]
|
|
6
|
+
license = "Apache-2.0"
|
|
7
|
+
readme = "README.md"
|
|
8
|
+
homepage = "https://docs.launchdarkly.com/sdk/ai/python"
|
|
9
|
+
repository = "https://github.com/launchdarkly/python-server-sdk-ai"
|
|
10
|
+
documentation = "https://launchdarkly-python-sdk-ai.readthedocs.io/en/latest/"
|
|
11
|
+
classifiers = [
|
|
12
|
+
"Intended Audience :: Developers",
|
|
13
|
+
"License :: OSI Approved :: Apache Software License",
|
|
14
|
+
"Operating System :: OS Independent",
|
|
15
|
+
"Programming Language :: Python :: 3",
|
|
16
|
+
"Programming Language :: Python :: 3.8",
|
|
17
|
+
"Programming Language :: Python :: 3.9",
|
|
18
|
+
"Programming Language :: Python :: 3.10",
|
|
19
|
+
"Programming Language :: Python :: 3.11",
|
|
20
|
+
"Programming Language :: Python :: 3.12",
|
|
21
|
+
"Topic :: Software Development",
|
|
22
|
+
"Topic :: Software Development :: Libraries",
|
|
23
|
+
]
|
|
24
|
+
packages = [ { include = "ldai" } ]
|
|
25
|
+
exclude = [
|
|
26
|
+
{ path = "ldai/testing", format = "wheel" }
|
|
27
|
+
]
|
|
28
|
+
|
|
29
|
+
[tool.poetry.dependencies]
|
|
30
|
+
python = ">=3.8"
|
|
31
|
+
launchdarkly-server-sdk = ">=9.4.0"
|
|
32
|
+
chevron = "=0.14.0"
|
|
33
|
+
pycodestyle = "^2.12.1"
|
|
34
|
+
isort = "^5.13.2"
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
[tool.poetry.group.dev.dependencies]
|
|
38
|
+
pytest = ">=2.8"
|
|
39
|
+
pytest-cov = ">=2.4.0"
|
|
40
|
+
pytest-mypy = "==0.10.3"
|
|
41
|
+
mypy = "==1.13.0"
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
[tool.poetry.group.docs]
|
|
45
|
+
optional = true
|
|
46
|
+
|
|
47
|
+
[tool.poetry.group.docs.dependencies]
|
|
48
|
+
sphinx = ">=6,<8"
|
|
49
|
+
sphinx-rtd-theme = ">=1.3,<4.0"
|
|
50
|
+
certifi = ">=2018.4.16"
|
|
51
|
+
expiringdict = ">=1.1.4"
|
|
52
|
+
pyrfc3339 = ">=1.0"
|
|
53
|
+
jsonpickle = ">1.4.1"
|
|
54
|
+
semver = ">=2.7.9"
|
|
55
|
+
urllib3 = ">=1.26.0"
|
|
56
|
+
jinja2 = "3.1.4"
|
|
57
|
+
|
|
58
|
+
[tool.mypy]
|
|
59
|
+
python_version = "3.8"
|
|
60
|
+
ignore_missing_imports = true
|
|
61
|
+
install_types = true
|
|
62
|
+
non_interactive = true
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
[tool.pytest.ini_options]
|
|
66
|
+
addopts = ["-ra"]
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
[build-system]
|
|
70
|
+
requires = ["poetry-core"]
|
|
71
|
+
build-backend = "poetry.core.masonry.api"
|