specops-ai 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- specops_ai/__init__.py +124 -0
- specops_ai/_constants.py +33 -0
- specops_ai/_context.py +21 -0
- specops_ai/adapters/__init__.py +102 -0
- specops_ai/adapters/autogen.py +57 -0
- specops_ai/adapters/crewai.py +80 -0
- specops_ai/adapters/langgraph.py +88 -0
- specops_ai/config.py +76 -0
- specops_ai/coordinate.py +239 -0
- specops_ai/eval.py +237 -0
- specops_ai/heal.py +447 -0
- specops_ai/rca.py +202 -0
- specops_ai/replay.py +286 -0
- specops_ai/simulate.py +369 -0
- specops_ai/trace.py +219 -0
- specops_ai/viz.py +70 -0
- specops_ai-0.2.0.dist-info/METADATA +306 -0
- specops_ai-0.2.0.dist-info/RECORD +20 -0
- specops_ai-0.2.0.dist-info/WHEEL +4 -0
- specops_ai-0.2.0.dist-info/licenses/LICENSE +21 -0
specops_ai/__init__.py
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
"""SpecOps — Agent Reliability Kit."""
|
|
2
|
+
|
|
3
|
+
__version__ = "0.2.0"
|
|
4
|
+
|
|
5
|
+
from specops_ai.adapters import BaseAdapter, PlainAdapter, get_adapter, register_adapter
|
|
6
|
+
from specops_ai.config import configure, get_tracer, reset
|
|
7
|
+
from specops_ai.coordinate import (
|
|
8
|
+
AgentOutput,
|
|
9
|
+
BehaviorTrace,
|
|
10
|
+
CoordinationIssue,
|
|
11
|
+
CoordinationResult,
|
|
12
|
+
MemorySnapshot,
|
|
13
|
+
check_consensus,
|
|
14
|
+
check_divergence,
|
|
15
|
+
check_memory_integrity,
|
|
16
|
+
)
|
|
17
|
+
from specops_ai.eval import (
|
|
18
|
+
EvalCase,
|
|
19
|
+
EvalResult,
|
|
20
|
+
JudgeVerdict,
|
|
21
|
+
eval_golden_set,
|
|
22
|
+
eval_golden_set_async,
|
|
23
|
+
llm_judge,
|
|
24
|
+
llm_judge_async,
|
|
25
|
+
)
|
|
26
|
+
from specops_ai.heal import (
|
|
27
|
+
EscalatePolicy,
|
|
28
|
+
FallbackPolicy,
|
|
29
|
+
HealingChain,
|
|
30
|
+
PolicyAction,
|
|
31
|
+
PolicyResult,
|
|
32
|
+
PruneMemoryPolicy,
|
|
33
|
+
RetryPolicy,
|
|
34
|
+
self_healing,
|
|
35
|
+
)
|
|
36
|
+
from specops_ai.rca import RCAEdge, RCAGraph, RCANode, build_rca_graph
|
|
37
|
+
from specops_ai.replay import (
|
|
38
|
+
RecordedCall,
|
|
39
|
+
ReplayMismatchError,
|
|
40
|
+
ReplaySession,
|
|
41
|
+
ReplayStore,
|
|
42
|
+
recording,
|
|
43
|
+
replayable,
|
|
44
|
+
replaying,
|
|
45
|
+
)
|
|
46
|
+
from specops_ai.simulate import (
|
|
47
|
+
AnomalyType,
|
|
48
|
+
SimEvent,
|
|
49
|
+
SimResult,
|
|
50
|
+
SimulationBudgetExceeded,
|
|
51
|
+
SimulationEnvironment,
|
|
52
|
+
get_current_simulation,
|
|
53
|
+
simulate,
|
|
54
|
+
simulation,
|
|
55
|
+
)
|
|
56
|
+
from specops_ai.trace import trace_agent, trace_llm, trace_tool
|
|
57
|
+
from specops_ai.viz import save_dot, to_dot
|
|
58
|
+
|
|
59
|
+
__all__ = [
|
|
60
|
+
# Adapters
|
|
61
|
+
"BaseAdapter",
|
|
62
|
+
"PlainAdapter",
|
|
63
|
+
"get_adapter",
|
|
64
|
+
"register_adapter",
|
|
65
|
+
# Config
|
|
66
|
+
"configure",
|
|
67
|
+
"get_tracer",
|
|
68
|
+
"reset",
|
|
69
|
+
# Tracing
|
|
70
|
+
"trace_agent",
|
|
71
|
+
"trace_llm",
|
|
72
|
+
"trace_tool",
|
|
73
|
+
# Replay
|
|
74
|
+
"RecordedCall",
|
|
75
|
+
"ReplayMismatchError",
|
|
76
|
+
"ReplaySession",
|
|
77
|
+
"ReplayStore",
|
|
78
|
+
"recording",
|
|
79
|
+
"replayable",
|
|
80
|
+
"replaying",
|
|
81
|
+
# Eval
|
|
82
|
+
"EvalCase",
|
|
83
|
+
"EvalResult",
|
|
84
|
+
"JudgeVerdict",
|
|
85
|
+
"eval_golden_set",
|
|
86
|
+
"eval_golden_set_async",
|
|
87
|
+
"llm_judge",
|
|
88
|
+
"llm_judge_async",
|
|
89
|
+
# Heal
|
|
90
|
+
"EscalatePolicy",
|
|
91
|
+
"FallbackPolicy",
|
|
92
|
+
"HealingChain",
|
|
93
|
+
"PolicyAction",
|
|
94
|
+
"PolicyResult",
|
|
95
|
+
"PruneMemoryPolicy",
|
|
96
|
+
"RetryPolicy",
|
|
97
|
+
"self_healing",
|
|
98
|
+
# RCA
|
|
99
|
+
"RCAEdge",
|
|
100
|
+
"RCAGraph",
|
|
101
|
+
"RCANode",
|
|
102
|
+
"build_rca_graph",
|
|
103
|
+
# Viz
|
|
104
|
+
"save_dot",
|
|
105
|
+
"to_dot",
|
|
106
|
+
# Simulation
|
|
107
|
+
"AnomalyType",
|
|
108
|
+
"SimEvent",
|
|
109
|
+
"SimResult",
|
|
110
|
+
"SimulationBudgetExceeded",
|
|
111
|
+
"SimulationEnvironment",
|
|
112
|
+
"get_current_simulation",
|
|
113
|
+
"simulate",
|
|
114
|
+
"simulation",
|
|
115
|
+
# Coordination
|
|
116
|
+
"AgentOutput",
|
|
117
|
+
"BehaviorTrace",
|
|
118
|
+
"CoordinationIssue",
|
|
119
|
+
"CoordinationResult",
|
|
120
|
+
"MemorySnapshot",
|
|
121
|
+
"check_consensus",
|
|
122
|
+
"check_divergence",
|
|
123
|
+
"check_memory_integrity",
|
|
124
|
+
]
|
specops_ai/_constants.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""Semantic attribute keys for SpecOps OTel spans."""
|
|
2
|
+
|
|
3
|
+
# Agent
|
|
4
|
+
AGENT_NAME = "specops.agent.name"
|
|
5
|
+
AGENT_TASK = "specops.agent.task"
|
|
6
|
+
AGENT_FRAMEWORK = "specops.agent.framework"
|
|
7
|
+
AGENT_STEP = "specops.agent.step"
|
|
8
|
+
AGENT_DECISION = "specops.agent.decision"
|
|
9
|
+
|
|
10
|
+
# Tool
|
|
11
|
+
TOOL_NAME = "specops.tool.name"
|
|
12
|
+
TOOL_ARGS = "specops.tool.args"
|
|
13
|
+
TOOL_RESULT = "specops.tool.result"
|
|
14
|
+
|
|
15
|
+
# LLM
|
|
16
|
+
LLM_MODEL = "specops.llm.model"
|
|
17
|
+
LLM_PROVIDER = "specops.llm.provider"
|
|
18
|
+
LLM_TOKENS_INPUT = "specops.llm.tokens.input"
|
|
19
|
+
LLM_TOKENS_OUTPUT = "specops.llm.tokens.output"
|
|
20
|
+
LLM_TEMPERATURE = "specops.llm.temperature"
|
|
21
|
+
LLM_SEED = "specops.llm.seed"
|
|
22
|
+
LLM_RESULT = "specops.llm.result"
|
|
23
|
+
|
|
24
|
+
# Coordination / multi-agent
|
|
25
|
+
COORDINATION_EVENT = "specops.coordination.event"
|
|
26
|
+
MEMORY_ACCESS = "specops.memory.access"
|
|
27
|
+
|
|
28
|
+
# Replay
|
|
29
|
+
REPLAY_SEED = "specops.replay.seed"
|
|
30
|
+
REPLAY_SESSION_ID = "specops.replay.session_id"
|
|
31
|
+
|
|
32
|
+
# Limits
|
|
33
|
+
MAX_ATTR_LENGTH = 1024
|
specops_ai/_context.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
"""Context propagation utilities for SpecOps tracing."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from contextvars import ContextVar, Token
|
|
6
|
+
|
|
7
|
+
from opentelemetry.context import Context
|
|
8
|
+
|
|
9
|
+
_current_agent_ctx: ContextVar[Context | None] = ContextVar(
|
|
10
|
+
"specops_agent_ctx", default=None
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def get_current_context() -> Context | None:
|
|
15
|
+
"""Return the active SpecOps trace context."""
|
|
16
|
+
return _current_agent_ctx.get()
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def set_current_context(ctx: Context) -> Token[Context | None]:
|
|
20
|
+
"""Set the active context. Returns a token for reset."""
|
|
21
|
+
return _current_agent_ctx.set(ctx)
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
"""Framework adapters for SpecOps tracing."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from abc import ABC, abstractmethod
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class BaseAdapter(ABC):
|
|
10
|
+
"""Base class for framework adapters.
|
|
11
|
+
|
|
12
|
+
Adapters normalize framework-specific metadata into SpecOps semantic attributes.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
@abstractmethod
|
|
16
|
+
def extract_task(self, args: tuple[Any, ...], kwargs: dict[str, Any]) -> str:
|
|
17
|
+
"""Extract the agent task from function arguments."""
|
|
18
|
+
|
|
19
|
+
@abstractmethod
|
|
20
|
+
def extract_llm_metadata(self, result: Any) -> dict[str, Any]:
|
|
21
|
+
"""Extract LLM metadata (tokens, model) from a call result."""
|
|
22
|
+
|
|
23
|
+
@abstractmethod
|
|
24
|
+
def extract_tool_metadata(
|
|
25
|
+
self, args: tuple[Any, ...], kwargs: dict[str, Any], result: Any
|
|
26
|
+
) -> dict[str, Any]:
|
|
27
|
+
"""Extract tool metadata from a call."""
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class PlainAdapter(BaseAdapter):
|
|
31
|
+
"""Default adapter for plain Python agent code.
|
|
32
|
+
|
|
33
|
+
Assumes:
|
|
34
|
+
- First positional arg is the task string.
|
|
35
|
+
- LLM results are dicts with `model`, `input_tokens`, `output_tokens`.
|
|
36
|
+
- Tool results are returned directly.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
def extract_task(self, args: tuple[Any, ...], kwargs: dict[str, Any]) -> str:
|
|
40
|
+
"""First positional arg or 'task' kwarg."""
|
|
41
|
+
if args:
|
|
42
|
+
return str(args[0])
|
|
43
|
+
return str(kwargs.get("task", ""))
|
|
44
|
+
|
|
45
|
+
def extract_llm_metadata(self, result: Any) -> dict[str, Any]:
|
|
46
|
+
"""Extract from dict result."""
|
|
47
|
+
if isinstance(result, dict):
|
|
48
|
+
return {
|
|
49
|
+
k: v
|
|
50
|
+
for k, v in result.items()
|
|
51
|
+
if k in ("model", "input_tokens", "output_tokens")
|
|
52
|
+
}
|
|
53
|
+
return {}
|
|
54
|
+
|
|
55
|
+
def extract_tool_metadata(
|
|
56
|
+
self, args: tuple[Any, ...], kwargs: dict[str, Any], result: Any
|
|
57
|
+
) -> dict[str, Any]:
|
|
58
|
+
"""Return args and result as metadata."""
|
|
59
|
+
return {"args": args, "kwargs": kwargs, "result": result}
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
_ADAPTERS: dict[str, type[BaseAdapter]] = {
|
|
63
|
+
"plain": PlainAdapter,
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def get_adapter(framework: str) -> BaseAdapter:
|
|
68
|
+
"""Get an adapter instance by framework name."""
|
|
69
|
+
cls = _ADAPTERS.get(framework, PlainAdapter)
|
|
70
|
+
return cls()
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def register_adapter(framework: str, adapter_cls: type[BaseAdapter]) -> None:
|
|
74
|
+
"""Register a custom adapter for a framework."""
|
|
75
|
+
_ADAPTERS[framework] = adapter_cls
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def _auto_register() -> None:
|
|
79
|
+
"""Auto-register framework adapters if their libraries are available."""
|
|
80
|
+
try:
|
|
81
|
+
from specops_ai.adapters.langgraph import LangGraphAdapter
|
|
82
|
+
|
|
83
|
+
_ADAPTERS.setdefault("langgraph", LangGraphAdapter)
|
|
84
|
+
except ImportError:
|
|
85
|
+
pass
|
|
86
|
+
|
|
87
|
+
try:
|
|
88
|
+
from specops_ai.adapters.crewai import CrewAIAdapter
|
|
89
|
+
|
|
90
|
+
_ADAPTERS.setdefault("crewai", CrewAIAdapter)
|
|
91
|
+
except ImportError:
|
|
92
|
+
pass
|
|
93
|
+
|
|
94
|
+
try:
|
|
95
|
+
from specops_ai.adapters.autogen import AutoGenAdapter
|
|
96
|
+
|
|
97
|
+
_ADAPTERS.setdefault("autogen", AutoGenAdapter)
|
|
98
|
+
except ImportError:
|
|
99
|
+
pass
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
_auto_register()
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"""AutoGen adapter for SpecOps tracing.
|
|
2
|
+
|
|
3
|
+
Stub implementation for Microsoft AutoGen's multi-agent chat patterns.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from specops_ai.adapters import BaseAdapter
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class AutoGenAdapter(BaseAdapter):
|
|
14
|
+
"""Adapter for Microsoft AutoGen framework (stub).
|
|
15
|
+
|
|
16
|
+
Handles basic AutoGen chat patterns:
|
|
17
|
+
- Task extracted from message content or initiate_chat args.
|
|
18
|
+
- LLM metadata extracted from response dicts.
|
|
19
|
+
- Tool metadata from function call results.
|
|
20
|
+
|
|
21
|
+
Note: This is a minimal stub. Full support for GroupChat, nested chats,
|
|
22
|
+
and custom speaker selection will be added in a future release.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
def extract_task(self, args: tuple[Any, ...], kwargs: dict[str, Any]) -> str:
|
|
26
|
+
"""Extract task from AutoGen message or initiate_chat call."""
|
|
27
|
+
message = kwargs.get("message", "")
|
|
28
|
+
if message:
|
|
29
|
+
return str(message)
|
|
30
|
+
if args:
|
|
31
|
+
first = args[0]
|
|
32
|
+
if isinstance(first, dict):
|
|
33
|
+
return str(first.get("content", first.get("message", first)))
|
|
34
|
+
return str(first)
|
|
35
|
+
return ""
|
|
36
|
+
|
|
37
|
+
def extract_llm_metadata(self, result: Any) -> dict[str, Any]:
|
|
38
|
+
"""Extract LLM metadata from AutoGen response."""
|
|
39
|
+
meta: dict[str, Any] = {}
|
|
40
|
+
if isinstance(result, dict):
|
|
41
|
+
meta["model"] = result.get("model", "")
|
|
42
|
+
usage = result.get("usage", {})
|
|
43
|
+
if isinstance(usage, dict):
|
|
44
|
+
meta["input_tokens"] = usage.get("prompt_tokens", 0)
|
|
45
|
+
meta["output_tokens"] = usage.get("completion_tokens", 0)
|
|
46
|
+
return meta
|
|
47
|
+
|
|
48
|
+
def extract_tool_metadata(
|
|
49
|
+
self, args: tuple[Any, ...], kwargs: dict[str, Any], result: Any
|
|
50
|
+
) -> dict[str, Any]:
|
|
51
|
+
"""Extract tool metadata from AutoGen function calls."""
|
|
52
|
+
meta: dict[str, Any] = {"args": args, "kwargs": kwargs}
|
|
53
|
+
if isinstance(result, dict):
|
|
54
|
+
meta["result"] = result.get("content", result)
|
|
55
|
+
else:
|
|
56
|
+
meta["result"] = result
|
|
57
|
+
return meta
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
"""CrewAI adapter for SpecOps tracing.
|
|
2
|
+
|
|
3
|
+
Extracts metadata from CrewAI's Crew, Agent, Task, and Process patterns.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from specops_ai.adapters import BaseAdapter
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class CrewAIAdapter(BaseAdapter):
|
|
14
|
+
"""Adapter for CrewAI framework.
|
|
15
|
+
|
|
16
|
+
Handles CrewAI conventions:
|
|
17
|
+
- Task extracted from CrewAI Task objects or description strings.
|
|
18
|
+
- LLM metadata extracted from CrewAI's LLM response patterns.
|
|
19
|
+
- Tool metadata extracted from CrewAI tool execution results.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
def extract_task(self, args: tuple[Any, ...], kwargs: dict[str, Any]) -> str:
|
|
23
|
+
"""Extract task from CrewAI Task object or kickoff inputs.
|
|
24
|
+
|
|
25
|
+
CrewAI patterns:
|
|
26
|
+
- crew.kickoff(inputs={"topic": "..."})
|
|
27
|
+
- Task(description="...")
|
|
28
|
+
- Plain string as first arg
|
|
29
|
+
"""
|
|
30
|
+
# Check kwargs for CrewAI kickoff inputs
|
|
31
|
+
inputs = kwargs.get("inputs", {})
|
|
32
|
+
if isinstance(inputs, dict) and inputs:
|
|
33
|
+
return str(next(iter(inputs.values())))
|
|
34
|
+
|
|
35
|
+
if not args:
|
|
36
|
+
return str(kwargs.get("task", kwargs.get("description", "")))
|
|
37
|
+
|
|
38
|
+
first = args[0]
|
|
39
|
+
# CrewAI Task object
|
|
40
|
+
if hasattr(first, "description"):
|
|
41
|
+
return str(first.description)
|
|
42
|
+
# Dict with task info
|
|
43
|
+
if isinstance(first, dict):
|
|
44
|
+
return str(first.get("description", first.get("task", first)))
|
|
45
|
+
return str(first)
|
|
46
|
+
|
|
47
|
+
def extract_llm_metadata(self, result: Any) -> dict[str, Any]:
|
|
48
|
+
"""Extract LLM metadata from CrewAI results.
|
|
49
|
+
|
|
50
|
+
Supports CrewAI's output objects and plain dicts.
|
|
51
|
+
"""
|
|
52
|
+
meta: dict[str, Any] = {}
|
|
53
|
+
# CrewAI TaskOutput / CrewOutput with token_usage
|
|
54
|
+
if hasattr(result, "token_usage"):
|
|
55
|
+
usage = result.token_usage
|
|
56
|
+
if isinstance(usage, dict):
|
|
57
|
+
meta["input_tokens"] = usage.get("prompt_tokens", 0)
|
|
58
|
+
meta["output_tokens"] = usage.get("completion_tokens", 0)
|
|
59
|
+
meta["model"] = usage.get("model", "")
|
|
60
|
+
# Dict fallback
|
|
61
|
+
if isinstance(result, dict):
|
|
62
|
+
meta.setdefault("model", result.get("model", ""))
|
|
63
|
+
usage = result.get("token_usage", result.get("usage", {}))
|
|
64
|
+
if isinstance(usage, dict):
|
|
65
|
+
meta.setdefault("input_tokens", usage.get("prompt_tokens", 0))
|
|
66
|
+
meta.setdefault("output_tokens", usage.get("completion_tokens", 0))
|
|
67
|
+
return meta
|
|
68
|
+
|
|
69
|
+
def extract_tool_metadata(
|
|
70
|
+
self, args: tuple[Any, ...], kwargs: dict[str, Any], result: Any
|
|
71
|
+
) -> dict[str, Any]:
|
|
72
|
+
"""Extract tool metadata from CrewAI tool executions."""
|
|
73
|
+
meta: dict[str, Any] = {"args": args, "kwargs": kwargs}
|
|
74
|
+
if hasattr(result, "output"):
|
|
75
|
+
meta["result"] = result.output
|
|
76
|
+
elif isinstance(result, dict):
|
|
77
|
+
meta["result"] = result.get("output", result.get("result", result))
|
|
78
|
+
else:
|
|
79
|
+
meta["result"] = result
|
|
80
|
+
return meta
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
"""LangGraph adapter for SpecOps tracing.
|
|
2
|
+
|
|
3
|
+
Extracts metadata from LangGraph's StateGraph/Pregel execution patterns.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from specops_ai.adapters import BaseAdapter
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class LangGraphAdapter(BaseAdapter):
|
|
14
|
+
"""Adapter for LangGraph (StateGraph, Pregel, MessageGraph).
|
|
15
|
+
|
|
16
|
+
Handles LangGraph conventions:
|
|
17
|
+
- Task extracted from state dict's "input", "messages", or "task" key.
|
|
18
|
+
- LLM metadata extracted from AIMessage-like objects or dicts.
|
|
19
|
+
- Tool metadata extracted from ToolMessage-like objects or dicts.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
def extract_task(self, args: tuple[Any, ...], kwargs: dict[str, Any]) -> str:
|
|
23
|
+
"""Extract task from LangGraph state input.
|
|
24
|
+
|
|
25
|
+
LangGraph invocations typically pass a state dict as the first arg
|
|
26
|
+
with keys like 'input', 'messages', or 'task'.
|
|
27
|
+
"""
|
|
28
|
+
state = args[0] if args else kwargs.get("state", kwargs.get("input", {}))
|
|
29
|
+
if isinstance(state, dict):
|
|
30
|
+
for key in ("input", "task", "question"):
|
|
31
|
+
if key in state:
|
|
32
|
+
return str(state[key])
|
|
33
|
+
# Check messages list — use last HumanMessage content
|
|
34
|
+
messages = state.get("messages", [])
|
|
35
|
+
if messages:
|
|
36
|
+
last = messages[-1] if isinstance(messages, list) else messages
|
|
37
|
+
if hasattr(last, "content"):
|
|
38
|
+
return str(last.content)
|
|
39
|
+
if isinstance(last, dict):
|
|
40
|
+
return str(last.get("content", ""))
|
|
41
|
+
if isinstance(state, str):
|
|
42
|
+
return state
|
|
43
|
+
return str(state)[:256] if state else ""
|
|
44
|
+
|
|
45
|
+
def extract_llm_metadata(self, result: Any) -> dict[str, Any]:
|
|
46
|
+
"""Extract LLM metadata from LangGraph results.
|
|
47
|
+
|
|
48
|
+
Supports AIMessage objects (with usage_metadata) and plain dicts.
|
|
49
|
+
"""
|
|
50
|
+
meta: dict[str, Any] = {}
|
|
51
|
+
# AIMessage with usage_metadata (langchain-core pattern)
|
|
52
|
+
if hasattr(result, "usage_metadata"):
|
|
53
|
+
usage = result.usage_metadata
|
|
54
|
+
if isinstance(usage, dict):
|
|
55
|
+
meta["input_tokens"] = usage.get("input_tokens", 0)
|
|
56
|
+
meta["output_tokens"] = usage.get("output_tokens", 0)
|
|
57
|
+
elif hasattr(usage, "input_tokens"):
|
|
58
|
+
meta["input_tokens"] = usage.input_tokens
|
|
59
|
+
meta["output_tokens"] = usage.output_tokens
|
|
60
|
+
if hasattr(result, "response_metadata"):
|
|
61
|
+
rm = result.response_metadata
|
|
62
|
+
if isinstance(rm, dict):
|
|
63
|
+
meta.setdefault("model", rm.get("model_name", rm.get("model", "")))
|
|
64
|
+
# Dict fallback
|
|
65
|
+
if isinstance(result, dict):
|
|
66
|
+
if "model" in result:
|
|
67
|
+
meta["model"] = result["model"]
|
|
68
|
+
usage = result.get("usage_metadata", result.get("usage", {}))
|
|
69
|
+
if isinstance(usage, dict):
|
|
70
|
+
meta.setdefault("input_tokens", usage.get("input_tokens", 0))
|
|
71
|
+
meta.setdefault("output_tokens", usage.get("output_tokens", 0))
|
|
72
|
+
return meta
|
|
73
|
+
|
|
74
|
+
def extract_tool_metadata(
|
|
75
|
+
self, args: tuple[Any, ...], kwargs: dict[str, Any], result: Any
|
|
76
|
+
) -> dict[str, Any]:
|
|
77
|
+
"""Extract tool metadata from LangGraph tool invocations.
|
|
78
|
+
|
|
79
|
+
Supports ToolMessage objects and plain dicts.
|
|
80
|
+
"""
|
|
81
|
+
meta: dict[str, Any] = {"args": args, "kwargs": kwargs}
|
|
82
|
+
if hasattr(result, "content"):
|
|
83
|
+
meta["result"] = result.content
|
|
84
|
+
elif isinstance(result, dict):
|
|
85
|
+
meta["result"] = result.get("content", result.get("output", result))
|
|
86
|
+
else:
|
|
87
|
+
meta["result"] = result
|
|
88
|
+
return meta
|
specops_ai/config.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
"""Auto-configuration for SpecOps tracing."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
|
|
7
|
+
from opentelemetry import trace
|
|
8
|
+
from opentelemetry.sdk.resources import Resource
|
|
9
|
+
from opentelemetry.sdk.trace import TracerProvider
|
|
10
|
+
from opentelemetry.sdk.trace.export import (
|
|
11
|
+
ConsoleSpanExporter,
|
|
12
|
+
SimpleSpanProcessor,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
_tracer: trace.Tracer | None = None
|
|
16
|
+
_configured = False
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def configure(
|
|
20
|
+
service_name: str | None = None,
|
|
21
|
+
endpoint: str | None = None,
|
|
22
|
+
*,
|
|
23
|
+
enabled: bool = True,
|
|
24
|
+
) -> None:
|
|
25
|
+
"""Configure the SpecOps tracer.
|
|
26
|
+
|
|
27
|
+
Falls back to environment variables:
|
|
28
|
+
- OTEL_SERVICE_NAME (default: "specops")
|
|
29
|
+
- OTEL_EXPORTER_OTLP_ENDPOINT (default: None → console exporter)
|
|
30
|
+
- SPECOPS_ENABLED (default: "true")
|
|
31
|
+
"""
|
|
32
|
+
global _tracer, _configured # noqa: PLW0603
|
|
33
|
+
|
|
34
|
+
if not enabled or os.environ.get("SPECOPS_ENABLED", "true").lower() == "false":
|
|
35
|
+
_tracer = trace.NoOpTracer()
|
|
36
|
+
_configured = True
|
|
37
|
+
return
|
|
38
|
+
|
|
39
|
+
svc = service_name or os.environ.get("OTEL_SERVICE_NAME", "specops")
|
|
40
|
+
resource = Resource.create({"service.name": svc})
|
|
41
|
+
provider = TracerProvider(resource=resource)
|
|
42
|
+
|
|
43
|
+
otlp_endpoint = endpoint or os.environ.get("OTEL_EXPORTER_OTLP_ENDPOINT")
|
|
44
|
+
if otlp_endpoint:
|
|
45
|
+
try:
|
|
46
|
+
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( # type: ignore[import-not-found]
|
|
47
|
+
OTLPSpanExporter,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
exporter = OTLPSpanExporter(endpoint=otlp_endpoint)
|
|
51
|
+
except ImportError:
|
|
52
|
+
# Fall back to console if OTLP exporter not installed
|
|
53
|
+
exporter = ConsoleSpanExporter()
|
|
54
|
+
else:
|
|
55
|
+
exporter = ConsoleSpanExporter()
|
|
56
|
+
|
|
57
|
+
provider.add_span_processor(SimpleSpanProcessor(exporter))
|
|
58
|
+
trace.set_tracer_provider(provider)
|
|
59
|
+
_tracer = provider.get_tracer("specops", "0.1.0")
|
|
60
|
+
_configured = True
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def get_tracer() -> trace.Tracer:
|
|
64
|
+
"""Get the configured tracer, initializing with defaults if needed."""
|
|
65
|
+
global _tracer # noqa: PLW0603
|
|
66
|
+
if not _configured:
|
|
67
|
+
configure()
|
|
68
|
+
assert _tracer is not None
|
|
69
|
+
return _tracer
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def reset() -> None:
|
|
73
|
+
"""Reset configuration (for testing)."""
|
|
74
|
+
global _tracer, _configured # noqa: PLW0603
|
|
75
|
+
_tracer = None
|
|
76
|
+
_configured = False
|