turingpulse-sdk-semantic-kernel 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,42 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # Virtual environments
7
+ .venv/
8
+ venv/
9
+ ENV/
10
+
11
+ # Distribution / packaging
12
+ dist/
13
+ build/
14
+ *.egg-info/
15
+
16
+ # Database files
17
+ *.db
18
+ *.sqlite3
19
+
20
+ # Environment variables
21
+ .env
22
+ .env.local
23
+
24
+ # IDE
25
+ .idea/
26
+ .vscode/
27
+ *.swp
28
+ *.swo
29
+
30
+ # Testing
31
+ .pytest_cache/
32
+ .coverage
33
+ htmlcov/
34
+ .tox/
35
+
36
+ # Logs
37
+ *.log
38
+ logs/
39
+
40
+ # OS files
41
+ .DS_Store
42
+ Thumbs.db
@@ -0,0 +1,11 @@
1
+ Metadata-Version: 2.4
2
+ Name: turingpulse-sdk-semantic-kernel
3
+ Version: 1.0.0
4
+ Summary: TuringPulse SDK integration for Microsoft Semantic Kernel
5
+ License-Expression: Apache-2.0
6
+ Requires-Python: >=3.11
7
+ Requires-Dist: semantic-kernel>=1.1.1
8
+ Requires-Dist: turingpulse-sdk>=1.0.0
9
+ Provides-Extra: dev
10
+ Requires-Dist: pytest-asyncio>=0.23; extra == 'dev'
11
+ Requires-Dist: pytest>=8.0; extra == 'dev'
@@ -0,0 +1,17 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "turingpulse-sdk-semantic-kernel"
7
+ version = "1.0.0"
8
+ description = "TuringPulse SDK integration for Microsoft Semantic Kernel"
9
+ requires-python = ">=3.11"
10
+ license = "Apache-2.0"
11
+ dependencies = [
12
+ "turingpulse-sdk>=1.0.0",
13
+ "semantic-kernel>=1.1.1",
14
+ ]
15
+
16
+ [project.optional-dependencies]
17
+ dev = ["pytest>=8.0", "pytest-asyncio>=0.23"]
@@ -0,0 +1,6 @@
1
+ """TuringPulse SDK integration for Microsoft Semantic Kernel."""
2
+
3
+ from ._wrapper import instrument_semantic_kernel
4
+
5
+ __version__ = "0.1.0"
6
+ __all__ = ["instrument_semantic_kernel"]
@@ -0,0 +1,73 @@
1
+ """Semantic Kernel instrumentation for TuringPulse.
2
+
3
+ Wraps ``semantic_kernel.Kernel.invoke()`` / ``invoke_stream()``
4
+ to capture plugin/function calls, model usage, and planner steps.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import logging
10
+ from contextvars import ContextVar
11
+ from typing import Any, Dict, Optional, Sequence
12
+
13
+ from turingpulse_sdk import instrument, GovernanceDirective
14
+ from turingpulse_sdk.config import MAX_FIELD_SIZE
15
+ from turingpulse_sdk.context import current_context
16
+
17
+ logger = logging.getLogger("turingpulse.sdk.semantic_kernel")
18
+
19
+ _INSTRUMENTING: ContextVar[bool] = ContextVar("_tp_sk_instrumenting", default=False)
20
+
21
+
22
+ def instrument_semantic_kernel(
23
+ kernel,
24
+ function,
25
+ *,
26
+ name: str,
27
+ governance: Optional[GovernanceDirective] = None,
28
+ model: Optional[str] = None,
29
+ provider: str = "openai",
30
+ kpis: Optional[Sequence["KPIConfig"]] = None,
31
+ metadata: Optional[Dict[str, str]] = None,
32
+ ):
33
+ """Wrap a Semantic Kernel function invocation for TuringPulse observability.
34
+
35
+ Returns an async callable that instruments ``kernel.invoke(function, ...)``.
36
+
37
+ Args:
38
+ kernel: A ``semantic_kernel.Kernel`` instance.
39
+ function: The KernelFunction to invoke.
40
+ name: Workflow display name.
41
+ governance: Optional governance directive.
42
+ model: LLM model name override.
43
+ provider: LLM provider name.
44
+
45
+ Returns:
46
+ An async callable wrapping ``kernel.invoke()``.
47
+ """
48
+
49
+ @instrument(name=name, governance=governance, kpis=kpis, metadata=metadata or {})
50
+ async def _run(**kwargs: Any) -> Any:
51
+ token = _INSTRUMENTING.set(True)
52
+ try:
53
+ result = await kernel.invoke(function, **kwargs)
54
+
55
+ ctx = current_context()
56
+ if ctx:
57
+ ctx.framework = "semantic-kernel"
58
+ ctx.node_type = "workflow"
59
+
60
+ result_str = str(result) if result else ""
61
+ ctx.set_io(
62
+ input_data=str(kwargs)[:MAX_FIELD_SIZE],
63
+ output_data=result_str[:MAX_FIELD_SIZE],
64
+ )
65
+
66
+ if model:
67
+ ctx.set_model(model, provider)
68
+
69
+ return result
70
+ finally:
71
+ _INSTRUMENTING.reset(token)
72
+
73
+ return _run