getstacklens 0.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,87 @@
1
+ name: ci
2
+
3
+ on:
4
+ pull_request:
5
+ branches: [main]
6
+ push:
7
+ branches: [main]
8
+ workflow_dispatch:
9
+
10
+ permissions:
11
+ contents: read
12
+
13
+ concurrency:
14
+ group: ci-${{ github.ref }}
15
+ cancel-in-progress: true
16
+
17
+ jobs:
18
+ lint:
19
+ name: lint (ruff)
20
+ runs-on: ubuntu-latest
21
+ steps:
22
+ - uses: actions/checkout@v4
23
+ - uses: actions/setup-python@v5
24
+ with:
25
+ python-version: "3.12"
26
+ cache: pip
27
+ - run: python -m pip install -U pip
28
+ - run: pip install ruff
29
+ - run: ruff format --check .
30
+ - run: ruff check .
31
+
32
+ typecheck:
33
+ name: typecheck (mypy)
34
+ runs-on: ubuntu-latest
35
+ steps:
36
+ - uses: actions/checkout@v4
37
+ - uses: actions/setup-python@v5
38
+ with:
39
+ python-version: "3.12"
40
+ cache: pip
41
+ - run: python -m pip install -U pip
42
+ - run: pip install ".[dev]" mypy
43
+ - run: mypy --ignore-missing-imports --show-error-codes getstacklens
44
+
45
+ test:
46
+ name: test (pytest, py${{ matrix.python-version }})
47
+ runs-on: ubuntu-latest
48
+ strategy:
49
+ fail-fast: false
50
+ matrix:
51
+ python-version: ["3.9", "3.10", "3.11", "3.12"]
52
+ steps:
53
+ - uses: actions/checkout@v4
54
+ - uses: actions/setup-python@v5
55
+ with:
56
+ python-version: ${{ matrix.python-version }}
57
+ cache: pip
58
+ - run: python -m pip install -U pip
59
+ - run: pip install ".[dev]"
60
+ - run: pytest -q
61
+
62
+ build:
63
+ name: build (sdist+wheel)
64
+ runs-on: ubuntu-latest
65
+ steps:
66
+ - uses: actions/checkout@v4
67
+ - uses: actions/setup-python@v5
68
+ with:
69
+ python-version: "3.12"
70
+ cache: pip
71
+ - run: python -m pip install -U pip
72
+ - run: pip install build twine
73
+ - run: python -m build
74
+ - run: python -m twine check dist/*
75
+
76
+ security:
77
+ name: security (pip-audit)
78
+ runs-on: ubuntu-latest
79
+ steps:
80
+ - uses: actions/checkout@v4
81
+ - uses: actions/setup-python@v5
82
+ with:
83
+ python-version: "3.12"
84
+ cache: pip
85
+ - run: python -m pip install -U pip
86
+ - run: pip install ".[dev]" pip-audit
87
+ - run: pip-audit
@@ -0,0 +1,25 @@
1
+ name: codeql
2
+
3
+ on:
4
+ pull_request:
5
+ branches: [main]
6
+ push:
7
+ branches: [main]
8
+ schedule:
9
+ - cron: "0 6 * * 1"
10
+ workflow_dispatch:
11
+
12
+ permissions:
13
+ contents: read
14
+ security-events: write
15
+
16
+ jobs:
17
+ analyze:
18
+ name: analyze (codeql)
19
+ runs-on: ubuntu-latest
20
+ steps:
21
+ - uses: actions/checkout@v4
22
+ - uses: github/codeql-action/init@v3
23
+ with:
24
+ languages: python
25
+ - uses: github/codeql-action/analyze@v3
@@ -0,0 +1,37 @@
1
+ name: publish
2
+
3
+ on:
4
+ push:
5
+ tags:
6
+ - "v*"
7
+
8
+ permissions:
9
+ contents: write # required to create GitHub releases
10
+ id-token: write # required for PyPI trusted publishing
11
+
12
+ jobs:
13
+ publish:
14
+ name: build, publish to PyPI, and create release
15
+ runs-on: ubuntu-latest
16
+ environment: pypi
17
+ steps:
18
+ - uses: actions/checkout@v4
19
+
20
+ - uses: actions/setup-python@v5
21
+ with:
22
+ python-version: "3.12"
23
+ cache: pip
24
+
25
+ - run: python -m pip install -U pip build
26
+
27
+ - name: build sdist and wheel
28
+ run: python -m build
29
+
30
+ - name: publish to PyPI
31
+ uses: pypa/gh-action-pypi-publish@release/v1
32
+
33
+ - name: create GitHub release
34
+ uses: softprops/action-gh-release@v2
35
+ with:
36
+ generate_release_notes: true
37
+ files: dist/*
@@ -0,0 +1,27 @@
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *.egg-info/
5
+ *.egg
6
+ .eggs/
7
+
8
+ # Build
9
+ dist/
10
+ build/
11
+
12
+ # Virtual environments
13
+ .venv/
14
+ venv/
15
+ env/
16
+
17
+ # Testing
18
+ .pytest_cache/
19
+ .coverage
20
+ htmlcov/
21
+
22
+ # Tooling caches
23
+ .ruff_cache/
24
+ .mypy_cache/
25
+
26
+ # OS
27
+ .DS_Store
@@ -0,0 +1,31 @@
1
+ StackLens Source-Available License
2
+
3
+ Copyright (c) 2026 StackLens Private Limited. All rights reserved.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated files (the "Software"), to use and run it
7
+ for personal or commercial purposes, subject to the following conditions:
8
+
9
+ PERMITTED:
10
+ - Using and running the Software as-is
11
+ - Integrating the Software into your own applications
12
+ - Reading and referencing the source code
13
+ - Submitting corrections or improvements via pull request to the official
14
+ repository at https://github.com/getstacklens/stacklens-sdk-python
15
+
16
+ NOT PERMITTED without prior written permission from StackLens Private Limited:
17
+ - Modifying or creating derivative works based on the Software
18
+ - Redistributing the Software, in whole or in part
19
+ - Sublicensing the Software
20
+ - Selling the Software or incorporating it into a competing product or service
21
+ - Publishing a fork or modified copy of the Software
22
+
23
+ CONTRIBUTIONS:
24
+ All contributions submitted via pull request are made under the terms of this
25
+ license. By submitting a pull request, you agree to transfer copyright of your
26
+ contribution to StackLens Private Limited and grant StackLens Private Limited the right to use,
27
+ modify, and publish your contribution under any license.
28
+
29
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30
+ IMPLIED. IN NO EVENT SHALL STACKLENS, INC. BE LIABLE FOR ANY CLAIM, DAMAGES,
31
+ OR OTHER LIABILITY ARISING FROM USE OF THE SOFTWARE.
@@ -0,0 +1,196 @@
1
+ Metadata-Version: 2.4
2
+ Name: getstacklens
3
+ Version: 0.0.1
4
+ Summary: Python SDK for StackLens — observability and governance for your AI stack
5
+ Project-URL: Homepage, https://getstacklens.ai
6
+ Project-URL: Documentation, https://getstacklens.ai/docs
7
+ Project-URL: Repository, https://github.com/getstacklens-ai/stacklens-sdk-python
8
+ Project-URL: Bug Tracker, https://github.com/getstacklens-ai/stacklens-sdk-python/issues
9
+ License: StackLens Source-Available License
10
+
11
+ Copyright (c) 2026 StackLens Private Limited. All rights reserved.
12
+
13
+ Permission is hereby granted, free of charge, to any person obtaining a copy
14
+ of this software and associated files (the "Software"), to use and run it
15
+ for personal or commercial purposes, subject to the following conditions:
16
+
17
+ PERMITTED:
18
+ - Using and running the Software as-is
19
+ - Integrating the Software into your own applications
20
+ - Reading and referencing the source code
21
+ - Submitting corrections or improvements via pull request to the official
22
+ repository at https://github.com/getstacklens/stacklens-sdk-python
23
+
24
+ NOT PERMITTED without prior written permission from StackLens Private Limited:
25
+ - Modifying or creating derivative works based on the Software
26
+ - Redistributing the Software, in whole or in part
27
+ - Sublicensing the Software
28
+ - Selling the Software or incorporating it into a competing product or service
29
+ - Publishing a fork or modified copy of the Software
30
+
31
+ CONTRIBUTIONS:
32
+ All contributions submitted via pull request are made under the terms of this
33
+ license. By submitting a pull request, you agree to transfer copyright of your
34
+ contribution to StackLens Private Limited and grant StackLens Private Limited the right to use,
35
+ modify, and publish your contribution under any license.
36
+
37
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
38
+ IMPLIED. IN NO EVENT SHALL STACKLENS, INC. BE LIABLE FOR ANY CLAIM, DAMAGES,
39
+ OR OTHER LIABILITY ARISING FROM USE OF THE SOFTWARE.
40
+ License-File: LICENSE
41
+ Keywords: ai,governance,llm,observability,prompt-management,tracing
42
+ Classifier: Development Status :: 3 - Alpha
43
+ Classifier: Intended Audience :: Developers
44
+ Classifier: License :: Other/Proprietary License
45
+ Classifier: Programming Language :: Python :: 3
46
+ Classifier: Programming Language :: Python :: 3.9
47
+ Classifier: Programming Language :: Python :: 3.10
48
+ Classifier: Programming Language :: Python :: 3.11
49
+ Classifier: Programming Language :: Python :: 3.12
50
+ Classifier: Programming Language :: Python :: 3.13
51
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
52
+ Classifier: Typing :: Typed
53
+ Requires-Python: >=3.9
54
+ Requires-Dist: httpx>=0.27
55
+ Provides-Extra: dev
56
+ Requires-Dist: pytest-asyncio>=0.24; extra == 'dev'
57
+ Requires-Dist: pytest>=8; extra == 'dev'
58
+ Requires-Dist: respx>=0.21; extra == 'dev'
59
+ Description-Content-Type: text/markdown
60
+
61
+ # stacklens-sdk-python
62
+
63
+ Python SDK for [StackLens](https://getstacklens.ai) — observability and governance for your AI stack.
64
+
65
+ Trace LLM calls, fetch versioned prompts, and enforce AI governance policies — in three lines of Python.
66
+
67
+ ## Installation
68
+
69
+ ```bash
70
+ pip install getstacklens
71
+ ```
72
+
73
+ Requires Python 3.9+.
74
+
75
+ ## Quickstart
76
+
77
+ ```python
78
+ import getstacklens
79
+
80
+ getstacklens.configure(api_key="sl-xxxx")
81
+ getstacklens.trace("my-llm-call", model="gpt-4o", provider="openai", input_tokens=150, output_tokens=200)
82
+ ```
83
+
84
+ Get your API key from the [StackLens dashboard](https://app.getstacklens.ai) under **Settings → API Keys**.
85
+
86
+ ## Tracing LLM calls
87
+
88
+ ### Simple trace (one line)
89
+
90
+ For accurate latency, record `start_time` before the call and pass it in:
91
+
92
+ ```python
93
+ from datetime import datetime, timezone
94
+ import getstacklens
95
+
96
+ getstacklens.configure(api_key="sl-xxxx")
97
+
98
+ start = datetime.now(timezone.utc)
99
+ response = openai_client.chat.completions.create(
100
+ model="gpt-4o",
101
+ messages=[{"role": "user", "content": "Summarise this document."}],
102
+ )
103
+ getstacklens.trace(
104
+ "chat-completion",
105
+ model="gpt-4o",
106
+ provider="openai",
107
+ input_tokens=response.usage.prompt_tokens,
108
+ output_tokens=response.usage.completion_tokens,
109
+ start_time=start,
110
+ )
111
+ ```
112
+
113
+ ### Context manager (recommended for agent workflows)
114
+
115
+ ```python
116
+ import openai
117
+ import getstacklens
118
+
119
+ getstacklens.configure(api_key="sl-xxxx")
120
+ client = openai.OpenAI()
121
+
122
+ with getstacklens.start_trace("customer-support-agent") as span:
123
+ response = client.chat.completions.create(
124
+ model="gpt-4o",
125
+ messages=[{"role": "user", "content": "How do I reset my password?"}],
126
+ )
127
+ span.record_llm(
128
+ model="gpt-4o",
129
+ provider="openai",
130
+ input_tokens=response.usage.prompt_tokens,
131
+ output_tokens=response.usage.completion_tokens,
132
+ completion=response.choices[0].message.content,
133
+ )
134
+ span.set_attribute("user_id", "u_123")
135
+ span.add_tag("support", "production")
136
+ ```
137
+
138
+ If an exception is raised inside the context, the span status is automatically set to `error`.
139
+
140
+ ## Fetching versioned prompts (FlowOps)
141
+
142
+ Manage prompts in the StackLens dashboard, then fetch them at runtime — no deploys needed.
143
+
144
+ ```python
145
+ import getstacklens
146
+
147
+ getstacklens.configure(api_key="sl-xxxx")
148
+
149
+ # Fetch the active prompt for the production environment
150
+ system_prompt = getstacklens.prompts.get("support-system-prompt", env="production")
151
+
152
+ # Use in an LLM call
153
+ response = client.chat.completions.create(
154
+ model="gpt-4o",
155
+ messages=[
156
+ {"role": "system", "content": system_prompt},
157
+ {"role": "user", "content": user_message},
158
+ ],
159
+ )
160
+ ```
161
+
162
+ Available environments: `"dev"`, `"staging"`, `"production"` (default).
163
+
164
+ ## Self-hosted deployments
165
+
166
+ Point the SDK at your own StackLens instance:
167
+
168
+ ```python
169
+ getstacklens.configure(
170
+ api_key="sl-xxxx",
171
+ endpoint="https://api.your-domain.com",
172
+ )
173
+ ```
174
+
175
+ See the [self-hosting guide](https://getstacklens.ai/docs/self-hosting) for setup instructions.
176
+
177
+ ## Supported providers
178
+
179
+ Works with any LLM provider — pass the model and provider name you use:
180
+
181
+ | Provider | `provider` value |
182
+ |---|---|
183
+ | OpenAI | `"openai"` |
184
+ | Anthropic | `"anthropic"` |
185
+ | Google Gemini | `"gemini"` |
186
+ | Azure OpenAI | `"azure-openai"` |
187
+ | AWS Bedrock | `"bedrock"` |
188
+ | Any other | any string |
189
+
190
+ ## Links
191
+
192
+ - [Documentation](https://getstacklens.ai/docs)
193
+ - [StackLens Platform](https://getstacklens.ai)
194
+ - [Dashboard](https://app.getstacklens.ai)
195
+ - [GitHub](https://github.com/getstacklens-ai/stacklens-sdk-python)
196
+ - [Report an issue](https://github.com/getstacklens-ai/stacklens-sdk-python/issues)
@@ -0,0 +1,136 @@
1
+ # stacklens-sdk-python
2
+
3
+ Python SDK for [StackLens](https://getstacklens.ai) — observability and governance for your AI stack.
4
+
5
+ Trace LLM calls, fetch versioned prompts, and enforce AI governance policies — in three lines of Python.
6
+
7
+ ## Installation
8
+
9
+ ```bash
10
+ pip install getstacklens
11
+ ```
12
+
13
+ Requires Python 3.9+.
14
+
15
+ ## Quickstart
16
+
17
+ ```python
18
+ import getstacklens
19
+
20
+ getstacklens.configure(api_key="sl-xxxx")
21
+ getstacklens.trace("my-llm-call", model="gpt-4o", provider="openai", input_tokens=150, output_tokens=200)
22
+ ```
23
+
24
+ Get your API key from the [StackLens dashboard](https://app.getstacklens.ai) under **Settings → API Keys**.
25
+
26
+ ## Tracing LLM calls
27
+
28
+ ### Simple trace (one line)
29
+
30
+ For accurate latency, record `start_time` before the call and pass it in:
31
+
32
+ ```python
33
+ from datetime import datetime, timezone
34
+ import getstacklens
35
+
36
+ getstacklens.configure(api_key="sl-xxxx")
37
+
38
+ start = datetime.now(timezone.utc)
39
+ response = openai_client.chat.completions.create(
40
+ model="gpt-4o",
41
+ messages=[{"role": "user", "content": "Summarise this document."}],
42
+ )
43
+ getstacklens.trace(
44
+ "chat-completion",
45
+ model="gpt-4o",
46
+ provider="openai",
47
+ input_tokens=response.usage.prompt_tokens,
48
+ output_tokens=response.usage.completion_tokens,
49
+ start_time=start,
50
+ )
51
+ ```
52
+
53
+ ### Context manager (recommended for agent workflows)
54
+
55
+ ```python
56
+ import openai
57
+ import getstacklens
58
+
59
+ getstacklens.configure(api_key="sl-xxxx")
60
+ client = openai.OpenAI()
61
+
62
+ with getstacklens.start_trace("customer-support-agent") as span:
63
+ response = client.chat.completions.create(
64
+ model="gpt-4o",
65
+ messages=[{"role": "user", "content": "How do I reset my password?"}],
66
+ )
67
+ span.record_llm(
68
+ model="gpt-4o",
69
+ provider="openai",
70
+ input_tokens=response.usage.prompt_tokens,
71
+ output_tokens=response.usage.completion_tokens,
72
+ completion=response.choices[0].message.content,
73
+ )
74
+ span.set_attribute("user_id", "u_123")
75
+ span.add_tag("support", "production")
76
+ ```
77
+
78
+ If an exception is raised inside the context, the span status is automatically set to `error`.
79
+
80
+ ## Fetching versioned prompts (FlowOps)
81
+
82
+ Manage prompts in the StackLens dashboard, then fetch them at runtime — no deploys needed.
83
+
84
+ ```python
85
+ import getstacklens
86
+
87
+ getstacklens.configure(api_key="sl-xxxx")
88
+
89
+ # Fetch the active prompt for the production environment
90
+ system_prompt = getstacklens.prompts.get("support-system-prompt", env="production")
91
+
92
+ # Use in an LLM call
93
+ response = client.chat.completions.create(
94
+ model="gpt-4o",
95
+ messages=[
96
+ {"role": "system", "content": system_prompt},
97
+ {"role": "user", "content": user_message},
98
+ ],
99
+ )
100
+ ```
101
+
102
+ Available environments: `"dev"`, `"staging"`, `"production"` (default).
103
+
104
+ ## Self-hosted deployments
105
+
106
+ Point the SDK at your own StackLens instance:
107
+
108
+ ```python
109
+ getstacklens.configure(
110
+ api_key="sl-xxxx",
111
+ endpoint="https://api.your-domain.com",
112
+ )
113
+ ```
114
+
115
+ See the [self-hosting guide](https://getstacklens.ai/docs/self-hosting) for setup instructions.
116
+
117
+ ## Supported providers
118
+
119
+ Works with any LLM provider — pass the model and provider name you use:
120
+
121
+ | Provider | `provider` value |
122
+ |---|---|
123
+ | OpenAI | `"openai"` |
124
+ | Anthropic | `"anthropic"` |
125
+ | Google Gemini | `"gemini"` |
126
+ | Azure OpenAI | `"azure-openai"` |
127
+ | AWS Bedrock | `"bedrock"` |
128
+ | Any other | any string |
129
+
130
+ ## Links
131
+
132
+ - [Documentation](https://getstacklens.ai/docs)
133
+ - [StackLens Platform](https://getstacklens.ai)
134
+ - [Dashboard](https://app.getstacklens.ai)
135
+ - [GitHub](https://github.com/getstacklens-ai/stacklens-sdk-python)
136
+ - [Report an issue](https://github.com/getstacklens-ai/stacklens-sdk-python/issues)