strobe 0.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- strobe-0.0.1/.github/workflows/ci.yml +68 -0
- strobe-0.0.1/.github/workflows/publish.yml +30 -0
- strobe-0.0.1/.gitignore +5 -0
- strobe-0.0.1/.pre-commit-config.yaml +24 -0
- strobe-0.0.1/CLAUDE.md +140 -0
- strobe-0.0.1/PKG-INFO +10 -0
- strobe-0.0.1/README.md +261 -0
- strobe-0.0.1/pyproject.toml +41 -0
- strobe-0.0.1/strobe/__init__.py +26 -0
- strobe-0.0.1/strobe/_version.py +34 -0
- strobe-0.0.1/strobe/analysis/__init__.py +11 -0
- strobe-0.0.1/strobe/analysis/conformance.py +38 -0
- strobe-0.0.1/strobe/analysis/discovery.py +48 -0
- strobe-0.0.1/strobe/analysis/performance.py +52 -0
- strobe-0.0.1/strobe/instrumentation/__init__.py +4 -0
- strobe-0.0.1/strobe/instrumentation/event_log.py +76 -0
- strobe-0.0.1/strobe/instrumentation/plugin.py +131 -0
- strobe-0.0.1/strobe/visualization/__init__.py +17 -0
- strobe-0.0.1/strobe/visualization/app.py +174 -0
- strobe-0.0.1/strobe/visualization/plots.py +339 -0
- strobe-0.0.1/tests/__init__.py +0 -0
- strobe-0.0.1/tests/analysis/__init__.py +0 -0
- strobe-0.0.1/tests/analysis/test_conformance.py +69 -0
- strobe-0.0.1/tests/analysis/test_discovery.py +81 -0
- strobe-0.0.1/tests/analysis/test_performance.py +127 -0
- strobe-0.0.1/tests/instrumentation/__init__.py +0 -0
- strobe-0.0.1/tests/instrumentation/test_event_log.py +98 -0
- strobe-0.0.1/tests/instrumentation/test_plugin.py +221 -0
- strobe-0.0.1/tests/visualization/__init__.py +0 -0
- strobe-0.0.1/tests/visualization/test_plots.py +199 -0
- strobe-0.0.1/uv.lock +3022 -0
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
name: CI
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
push:
|
|
5
|
+
branches: [main]
|
|
6
|
+
pull_request:
|
|
7
|
+
branches: [main]
|
|
8
|
+
|
|
9
|
+
jobs:
|
|
10
|
+
lint:
|
|
11
|
+
name: Lint and Format
|
|
12
|
+
runs-on: ubuntu-latest
|
|
13
|
+
steps:
|
|
14
|
+
- uses: actions/checkout@v4
|
|
15
|
+
|
|
16
|
+
- name: Install uv
|
|
17
|
+
uses: astral-sh/setup-uv@v2
|
|
18
|
+
|
|
19
|
+
- name: Set up Python
|
|
20
|
+
uses: actions/setup-python@v5
|
|
21
|
+
with:
|
|
22
|
+
python-version: "3.13"
|
|
23
|
+
|
|
24
|
+
- name: Sync dependencies
|
|
25
|
+
run: uv sync --group test --group dev
|
|
26
|
+
|
|
27
|
+
- name: Run ruff check
|
|
28
|
+
run: uv run ruff check strobe tests
|
|
29
|
+
|
|
30
|
+
- name: Run ruff format check
|
|
31
|
+
run: uv run ruff format --check strobe tests
|
|
32
|
+
|
|
33
|
+
- name: Run mypy type check
|
|
34
|
+
run: uv run mypy strobe --ignore-missing-imports --no-error-summary
|
|
35
|
+
|
|
36
|
+
test:
|
|
37
|
+
name: Tests
|
|
38
|
+
runs-on: ubuntu-latest
|
|
39
|
+
steps:
|
|
40
|
+
- uses: actions/checkout@v4
|
|
41
|
+
|
|
42
|
+
- name: Install uv
|
|
43
|
+
uses: astral-sh/setup-uv@v2
|
|
44
|
+
|
|
45
|
+
- name: Set up Python
|
|
46
|
+
uses: actions/setup-python@v5
|
|
47
|
+
with:
|
|
48
|
+
python-version: "3.13"
|
|
49
|
+
|
|
50
|
+
- name: Sync dependencies
|
|
51
|
+
run: uv sync --group test
|
|
52
|
+
|
|
53
|
+
- name: Run tests
|
|
54
|
+
run: uv run pytest
|
|
55
|
+
|
|
56
|
+
pre-commit:
|
|
57
|
+
name: Pre-commit hooks
|
|
58
|
+
runs-on: ubuntu-latest
|
|
59
|
+
steps:
|
|
60
|
+
- uses: actions/checkout@v4
|
|
61
|
+
|
|
62
|
+
- name: Set up Python
|
|
63
|
+
uses: actions/setup-python@v5
|
|
64
|
+
with:
|
|
65
|
+
python-version: "3.13"
|
|
66
|
+
|
|
67
|
+
- name: Run pre-commit
|
|
68
|
+
uses: pre-commit/action@v3.0.0
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
name: Publish
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
push:
|
|
5
|
+
tags:
|
|
6
|
+
- "v*"
|
|
7
|
+
|
|
8
|
+
jobs:
|
|
9
|
+
publish:
|
|
10
|
+
name: Publish to PyPI
|
|
11
|
+
runs-on: ubuntu-latest
|
|
12
|
+
environment: publish
|
|
13
|
+
permissions:
|
|
14
|
+
id-token: write
|
|
15
|
+
steps:
|
|
16
|
+
- uses: actions/checkout@v4
|
|
17
|
+
|
|
18
|
+
- name: Install uv
|
|
19
|
+
uses: astral-sh/setup-uv@v2
|
|
20
|
+
|
|
21
|
+
- name: Set up Python
|
|
22
|
+
uses: actions/setup-python@v5
|
|
23
|
+
with:
|
|
24
|
+
python-version: "3.13"
|
|
25
|
+
|
|
26
|
+
- name: Build package
|
|
27
|
+
run: uv build
|
|
28
|
+
|
|
29
|
+
- name: Publish to PyPI
|
|
30
|
+
uses: pypa/gh-action-pypi-publish@release/v1
|
strobe-0.0.1/.gitignore
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
repos:
|
|
2
|
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
|
3
|
+
rev: v4.5.0
|
|
4
|
+
hooks:
|
|
5
|
+
- id: trailing-whitespace
|
|
6
|
+
- id: end-of-file-fixer
|
|
7
|
+
- id: check-yaml
|
|
8
|
+
- id: check-json
|
|
9
|
+
- id: check-merge-conflict
|
|
10
|
+
- id: debug-statements
|
|
11
|
+
- id: mixed-line-ending
|
|
12
|
+
|
|
13
|
+
- repo: https://github.com/astral-sh/ruff-pre-commit
|
|
14
|
+
rev: v0.15.4
|
|
15
|
+
hooks:
|
|
16
|
+
- id: ruff
|
|
17
|
+
args: [--fix]
|
|
18
|
+
- id: ruff-format
|
|
19
|
+
|
|
20
|
+
- repo: https://github.com/pre-commit/mirrors-mypy
|
|
21
|
+
rev: v1.19.1
|
|
22
|
+
hooks:
|
|
23
|
+
- id: mypy
|
|
24
|
+
args: [--ignore-missing-imports, --no-error-summary]
|
strobe-0.0.1/CLAUDE.md
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
# CLAUDE.md
|
|
2
|
+
|
|
3
|
+
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
|
4
|
+
|
|
5
|
+
## Project Overview
|
|
6
|
+
|
|
7
|
+
`strobe` is a Python package (requires Python >=3.13) managed with `uv`. It provides instrumentation methods
|
|
8
|
+
for recording execution events on different kinds of agent frameworks. Then it provides a set of tools to analyze
|
|
9
|
+
the recorded events and study the behavior of the agents. For that it makes use of process mining techniques such as
|
|
10
|
+
process discovery, process performance analysis, compliance and conformance analysis, and other process mining techniques.
|
|
11
|
+
|
|
12
|
+
## Commands
|
|
13
|
+
|
|
14
|
+
```bash
|
|
15
|
+
# Install dependencies (including test group)
|
|
16
|
+
uv sync --group test
|
|
17
|
+
|
|
18
|
+
# Install dev dependencies (includes linting, formatting, pre-commit)
|
|
19
|
+
uv sync --group dev
|
|
20
|
+
|
|
21
|
+
# Run all tests
|
|
22
|
+
uv run pytest
|
|
23
|
+
|
|
24
|
+
# Run a single test file
|
|
25
|
+
uv run pytest tests/path/to/test_file.py
|
|
26
|
+
|
|
27
|
+
# Run a single test by name
|
|
28
|
+
uv run pytest tests/path/to/test_file.py::test_name
|
|
29
|
+
|
|
30
|
+
# Run tests matching a keyword
|
|
31
|
+
uv run pytest -k "keyword"
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
## Pre-commit Hooks
|
|
35
|
+
|
|
36
|
+
Pre-commit hooks are configured to run automatically before each commit. Install them with:
|
|
37
|
+
|
|
38
|
+
```bash
|
|
39
|
+
pre-commit install
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
Hooks run:
|
|
43
|
+
- **ruff check**: Linting with Ruff (auto-fixes when possible)
|
|
44
|
+
- **ruff format**: Code formatting with Ruff
|
|
45
|
+
- **mypy**: Static type checking
|
|
46
|
+
- **Basic checks**: Trailing whitespace, end-of-file fixers, YAML/JSON validation, merge conflict detection
|
|
47
|
+
|
|
48
|
+
To run hooks manually on all files:
|
|
49
|
+
|
|
50
|
+
```bash
|
|
51
|
+
pre-commit run --all-files
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
## CI/CD
|
|
55
|
+
|
|
56
|
+
GitHub Actions workflows run the same checks on pull requests and pushes to `main`:
|
|
57
|
+
- `.github/workflows/ci.yml`: Runs linting, formatting, type checking, and tests
|
|
58
|
+
|
|
59
|
+
## Architecture
|
|
60
|
+
|
|
61
|
+
The library has three main layers:
|
|
62
|
+
|
|
63
|
+
- **Instrumentation** (`strobe.instrumentation`): decorators/hooks/callbacks that integrate with agent frameworks to capture execution events (e.g. tool calls, LLM invocations, agent steps) and record them as event logs.
|
|
64
|
+
- **Analysis** (`strobe.analysis`): process mining tools that operate on the recorded event logs — process discovery, performance analysis, conformance checking, and other process mining techniques.
|
|
65
|
+
- **Visualization** (`strobe.visualization`): interactive Plotly charts and a Streamlit dashboard for exploring analysis results.
|
|
66
|
+
|
|
67
|
+
### Module layout
|
|
68
|
+
|
|
69
|
+
```
|
|
70
|
+
strobe/
|
|
71
|
+
__init__.py # re-exports StrobePlugin + key analysis/viz functions
|
|
72
|
+
instrumentation/
|
|
73
|
+
__init__.py # exports StrobePlugin, EventLog
|
|
74
|
+
event_log.py # EventLog: internal buffer → DataFrame / XES
|
|
75
|
+
plugin.py # StrobePlugin: ADK BasePlugin implementation
|
|
76
|
+
analysis/
|
|
77
|
+
__init__.py # exports discover_dfg, discover_process_model,
|
|
78
|
+
# check_conformance, throughput_times, activity_statistics
|
|
79
|
+
discovery.py # DFG and Petri net discovery wrappers
|
|
80
|
+
conformance.py # token-based replay conformance checking
|
|
81
|
+
performance.py # throughput times, per-activity stats
|
|
82
|
+
visualization/
|
|
83
|
+
__init__.py # exports plot_* functions + launch_dashboard
|
|
84
|
+
plots.py # pure Plotly figure factories (no Streamlit import)
|
|
85
|
+
app.py # Streamlit dashboard + launch_dashboard()
|
|
86
|
+
|
|
87
|
+
tests/
|
|
88
|
+
instrumentation/
|
|
89
|
+
test_event_log.py
|
|
90
|
+
test_plugin.py
|
|
91
|
+
analysis/
|
|
92
|
+
test_discovery.py
|
|
93
|
+
test_conformance.py
|
|
94
|
+
test_performance.py
|
|
95
|
+
visualization/
|
|
96
|
+
test_plots.py
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
### Key types
|
|
100
|
+
|
|
101
|
+
| Symbol | Module | Description |
|
|
102
|
+
|---|---|---|
|
|
103
|
+
| `EventLog` | `strobe.instrumentation.event_log` | Accumulates events; exports XES / DataFrame |
|
|
104
|
+
| `StrobePlugin` | `strobe.instrumentation.plugin` | ADK `BasePlugin`; records tool/LLM/agent callbacks |
|
|
105
|
+
| `discover_dfg` | `strobe.analysis.discovery` | Returns `(dfg, start_acts, end_acts)` |
|
|
106
|
+
| `discover_process_model` | `strobe.analysis.discovery` | Returns `(net, im, fm)`; supports inductive & alpha |
|
|
107
|
+
| `check_conformance` | `strobe.analysis.conformance` | Returns fitness/precision/generalization/simplicity |
|
|
108
|
+
| `throughput_times` | `strobe.analysis.performance` | Per-case duration `Series` |
|
|
109
|
+
| `activity_statistics` | `strobe.analysis.performance` | Per-activity count + duration stats `DataFrame` |
|
|
110
|
+
| `plot_dfg` | `strobe.visualization.plots` | Plotly DFG figure (nodes + weighted edges) |
|
|
111
|
+
| `plot_petri_net` | `strobe.visualization.plots` | Plotly Petri net figure (places/transitions) |
|
|
112
|
+
| `plot_throughput_times` | `strobe.visualization.plots` | Violin plot of case durations |
|
|
113
|
+
| `plot_activity_statistics` | `strobe.visualization.plots` | Dual-axis bar chart (count + mean duration) |
|
|
114
|
+
| `plot_conformance` | `strobe.visualization.plots` | Horizontal bar chart of 4 conformance metrics |
|
|
115
|
+
| `launch_dashboard` | `strobe.visualization.app` | Starts Streamlit dashboard via subprocess |
|
|
116
|
+
|
|
117
|
+
### Running the dashboard
|
|
118
|
+
|
|
119
|
+
```bash
|
|
120
|
+
# Direct
|
|
121
|
+
streamlit run strobe/visualization/app.py
|
|
122
|
+
|
|
123
|
+
# From Python (programmatic, e.g. after saving a XES file)
|
|
124
|
+
from strobe import launch_dashboard
|
|
125
|
+
proc = launch_dashboard(xes_path="path/to/log.xes")
|
|
126
|
+
```
|
|
127
|
+
|
|
128
|
+
### XES event attribute mapping
|
|
129
|
+
|
|
130
|
+
| ADK concept | XES attribute | Example |
|
|
131
|
+
|---|---|---|
|
|
132
|
+
| `invocation_id` | `case:concept:name` | `"inv-abc123"` |
|
|
133
|
+
| activity | `concept:name` | `"tool:search"`, `"llm:gemini-2.0"`, `"agent:root_agent"` |
|
|
134
|
+
| completion time | `time:timestamp` | `datetime(...)` |
|
|
135
|
+
| start time | `strobe:start_time` | ISO string |
|
|
136
|
+
| wall-clock duration | `strobe:duration_s` | `1.23` |
|
|
137
|
+
| tool args | `strobe:tool_args` | JSON string |
|
|
138
|
+
| tool result | `strobe:tool_result` | JSON string |
|
|
139
|
+
| model name | `strobe:model_name` | `"gemini-2.0-flash"` |
|
|
140
|
+
| tokens in/out | `strobe:input_tokens`, `strobe:output_tokens` | `123`, `456` |
|
strobe-0.0.1/PKG-INFO
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: strobe
|
|
3
|
+
Version: 0.0.1
|
|
4
|
+
Summary: Process Mining & Agent Instrumentation for AI Agent Frameworks
|
|
5
|
+
Requires-Python: >=3.13
|
|
6
|
+
Requires-Dist: google-adk>=1.0.0
|
|
7
|
+
Requires-Dist: pandas>=2.0.0
|
|
8
|
+
Requires-Dist: plotly>=5.18.0
|
|
9
|
+
Requires-Dist: pm4py>=2.7.0
|
|
10
|
+
Requires-Dist: streamlit>=1.32.0
|
strobe-0.0.1/README.md
ADDED
|
@@ -0,0 +1,261 @@
|
|
|
1
|
+
# strobe
|
|
2
|
+
|
|
3
|
+
**Process Mining & Agent Instrumentation for AI Agent Frameworks**
|
|
4
|
+
|
|
5
|
+
`strobe` is a Python package that instruments AI agent frameworks to capture execution events and analyze agent behavior using process mining techniques. It helps you understand, visualize, and optimize how your agents execute.
|
|
6
|
+
|
|
7
|
+
## Features
|
|
8
|
+
|
|
9
|
+
- 🎯 **Event Instrumentation**: Decorators and plugins to capture agent execution events (tool calls, LLM invocations, agent steps)
|
|
10
|
+
- 🔬 **Process Discovery**: Automatically discover process models from execution traces (Directly-Follows Graphs, Petri nets)
|
|
11
|
+
- 📊 **Performance Analysis**: Throughput times, activity statistics, and bottleneck detection
|
|
12
|
+
- ✅ **Conformance Checking**: Verify if agent behavior conforms to expected process specifications
|
|
13
|
+
- 📈 **Interactive Visualization**: Beautiful Plotly charts and Streamlit dashboard for exploring results
|
|
14
|
+
- 📁 **Standard Formats**: Export/import event logs in XES (eXtensible Event Stream) format
|
|
15
|
+
|
|
16
|
+
## Installation
|
|
17
|
+
|
|
18
|
+
### Requirements
|
|
19
|
+
- Python >= 3.13
|
|
20
|
+
- `uv` package manager (see [https://docs.astral.sh/uv/](https://docs.astral.sh/uv/))
|
|
21
|
+
|
|
22
|
+
### Setup
|
|
23
|
+
|
|
24
|
+
```bash
|
|
25
|
+
# Clone the repository
|
|
26
|
+
git clone <repository-url>
|
|
27
|
+
cd strobe
|
|
28
|
+
|
|
29
|
+
# Install dependencies (including test dependencies)
|
|
30
|
+
uv sync --group test
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
## Quick Start
|
|
34
|
+
|
|
35
|
+
### 1. Instrument Your Agent
|
|
36
|
+
|
|
37
|
+
Use `StrobePlugin` to capture events from your agent framework:
|
|
38
|
+
|
|
39
|
+
```python
|
|
40
|
+
from strobe import StrobePlugin, EventLog
|
|
41
|
+
|
|
42
|
+
# Create a plugin and event log
|
|
43
|
+
plugin = StrobePlugin(event_log=EventLog())
|
|
44
|
+
|
|
45
|
+
# Register with your agent framework (e.g., Google ADK)
|
|
46
|
+
# The plugin automatically captures:
|
|
47
|
+
# - Tool invocations
|
|
48
|
+
# - LLM calls
|
|
49
|
+
# - Agent steps
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
### 2. Discover Process Models
|
|
53
|
+
|
|
54
|
+
Analyze the captured events to discover how your agent actually behaves:
|
|
55
|
+
|
|
56
|
+
```python
|
|
57
|
+
from strobe import discover_dfg, discover_process_model
|
|
58
|
+
|
|
59
|
+
# Extract a Directly-Follows Graph (DFG)
|
|
60
|
+
dfg, start_acts, end_acts = discover_dfg(event_log)
|
|
61
|
+
|
|
62
|
+
# Or discover a Petri net process model
|
|
63
|
+
net, initial_marking, final_marking = discover_process_model(event_log, method='inductive')
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
### 3. Visualize Results
|
|
67
|
+
|
|
68
|
+
Create interactive visualizations of discovered processes:
|
|
69
|
+
|
|
70
|
+
```python
|
|
71
|
+
from strobe import plot_dfg, plot_petri_net
|
|
72
|
+
|
|
73
|
+
# Plot the DFG with hierarchical flowchart layout
|
|
74
|
+
fig = plot_dfg(dfg, start_acts, end_acts)
|
|
75
|
+
fig.show()
|
|
76
|
+
|
|
77
|
+
# Plot the Petri net
|
|
78
|
+
fig = plot_petri_net(net, initial_marking, final_marking)
|
|
79
|
+
fig.show()
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
### 4. Analyze Performance
|
|
83
|
+
|
|
84
|
+
Get detailed performance metrics:
|
|
85
|
+
|
|
86
|
+
```python
|
|
87
|
+
from strobe import throughput_times, activity_statistics, plot_activity_statistics
|
|
88
|
+
|
|
89
|
+
# Per-case throughput times
|
|
90
|
+
times = throughput_times(event_log)
|
|
91
|
+
print(f"Mean execution time: {times.mean()}")
|
|
92
|
+
|
|
93
|
+
# Per-activity statistics
|
|
94
|
+
stats = activity_statistics(event_log)
|
|
95
|
+
fig = plot_activity_statistics(stats)
|
|
96
|
+
fig.show()
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
### 5. Check Conformance
|
|
100
|
+
|
|
101
|
+
Verify if execution traces conform to a process model:
|
|
102
|
+
|
|
103
|
+
```python
|
|
104
|
+
from strobe import check_conformance
|
|
105
|
+
|
|
106
|
+
# Token-based replay conformance checking
|
|
107
|
+
scores = check_conformance(event_log, net, initial_marking, final_marking)
|
|
108
|
+
print(f"Fitness: {scores['fitness']:.3f}")
|
|
109
|
+
print(f"Precision: {scores['precision']:.3f}")
|
|
110
|
+
print(f"Generalization: {scores['generalization']:.3f}")
|
|
111
|
+
print(f"Simplicity: {scores['simplicity']:.3f}")
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
### 6. Launch Interactive Dashboard
|
|
115
|
+
|
|
116
|
+
View and explore your analysis results in an interactive Streamlit dashboard:
|
|
117
|
+
|
|
118
|
+
```python
|
|
119
|
+
from strobe import launch_dashboard
|
|
120
|
+
|
|
121
|
+
# Launch dashboard and serve a saved XES log
|
|
122
|
+
proc = launch_dashboard(xes_path="path/to/execution_log.xes")
|
|
123
|
+
```
|
|
124
|
+
|
|
125
|
+
Or run it directly:
|
|
126
|
+
```bash
|
|
127
|
+
streamlit run strobe/visualization/app.py
|
|
128
|
+
```
|
|
129
|
+
|
|
130
|
+
## Architecture
|
|
131
|
+
|
|
132
|
+
strobe has three main layers:
|
|
133
|
+
|
|
134
|
+
### 1. Instrumentation (`strobe.instrumentation`)
|
|
135
|
+
Captures execution events from agent frameworks via callbacks/plugins:
|
|
136
|
+
- `StrobePlugin`: Integrates with agent framework (ADK BasePlugin)
|
|
137
|
+
- `EventLog`: Accumulates events and exports to DataFrame/XES
|
|
138
|
+
|
|
139
|
+
### 2. Analysis (`strobe.analysis`)
|
|
140
|
+
Process mining algorithms operating on event logs:
|
|
141
|
+
- **Discovery**: Extract Directly-Follows Graphs (DFG) and Petri nets
|
|
142
|
+
- **Performance**: Throughput times and activity statistics
|
|
143
|
+
- **Conformance**: Token-based replay conformance checking
|
|
144
|
+
|
|
145
|
+
### 3. Visualization (`strobe.visualization`)
|
|
146
|
+
Interactive charts and dashboards:
|
|
147
|
+
- **Plots**: Plotly figure factories (hierarchical flowcharts, Petri nets, statistics)
|
|
148
|
+
- **Dashboard**: Streamlit app for exploring results
|
|
149
|
+
|
|
150
|
+
### Module Structure
|
|
151
|
+
|
|
152
|
+
```
|
|
153
|
+
strobe/
|
|
154
|
+
├── __init__.py # Main API exports
|
|
155
|
+
├── instrumentation/
|
|
156
|
+
│ ├── event_log.py # EventLog class
|
|
157
|
+
│ └── plugin.py # StrobePlugin (ADK integration)
|
|
158
|
+
├── analysis/
|
|
159
|
+
│ ├── discovery.py # Process discovery
|
|
160
|
+
│ ├── conformance.py # Conformance checking
|
|
161
|
+
│ └── performance.py # Performance analysis
|
|
162
|
+
└── visualization/
|
|
163
|
+
├── plots.py # Plotly figure factories
|
|
164
|
+
└── app.py # Streamlit dashboard
|
|
165
|
+
```
|
|
166
|
+
|
|
167
|
+
## API Reference
|
|
168
|
+
|
|
169
|
+
### Key Classes
|
|
170
|
+
|
|
171
|
+
| Name | Module | Purpose |
|
|
172
|
+
|------|--------|---------|
|
|
173
|
+
| `EventLog` | `strobe.instrumentation` | Buffer and export execution events |
|
|
174
|
+
| `StrobePlugin` | `strobe.instrumentation` | ADK plugin for event capture |
|
|
175
|
+
|
|
176
|
+
### Key Functions
|
|
177
|
+
|
|
178
|
+
| Name | Module | Returns |
|
|
179
|
+
|------|--------|---------|
|
|
180
|
+
| `discover_dfg` | `strobe.analysis` | `(dfg, start_acts, end_acts)` |
|
|
181
|
+
| `discover_process_model` | `strobe.analysis` | `(net, im, fm)` |
|
|
182
|
+
| `check_conformance` | `strobe.analysis` | `dict[str, float]` (fitness, precision, generalization, simplicity) |
|
|
183
|
+
| `throughput_times` | `strobe.analysis` | `pd.Series` |
|
|
184
|
+
| `activity_statistics` | `strobe.analysis` | `pd.DataFrame` |
|
|
185
|
+
| `plot_dfg` | `strobe.visualization` | `plotly.graph_objects.Figure` |
|
|
186
|
+
| `plot_petri_net` | `strobe.visualization` | `plotly.graph_objects.Figure` |
|
|
187
|
+
| `plot_activity_statistics` | `strobe.visualization` | `plotly.graph_objects.Figure` |
|
|
188
|
+
| `plot_conformance` | `strobe.visualization` | `plotly.graph_objects.Figure` |
|
|
189
|
+
| `launch_dashboard` | `strobe.visualization` | Process handle |
|
|
190
|
+
|
|
191
|
+
## Running Tests
|
|
192
|
+
|
|
193
|
+
```bash
|
|
194
|
+
# Run all tests
|
|
195
|
+
uv run pytest
|
|
196
|
+
|
|
197
|
+
# Run tests for a specific module
|
|
198
|
+
uv run pytest tests/analysis/
|
|
199
|
+
uv run pytest tests/instrumentation/
|
|
200
|
+
uv run pytest tests/visualization/
|
|
201
|
+
|
|
202
|
+
# Run a specific test file
|
|
203
|
+
uv run pytest tests/analysis/test_discovery.py
|
|
204
|
+
|
|
205
|
+
# Run tests matching a keyword
|
|
206
|
+
uv run pytest -k "discovery"
|
|
207
|
+
|
|
208
|
+
# Run with verbose output
|
|
209
|
+
uv run pytest -v
|
|
210
|
+
```
|
|
211
|
+
|
|
212
|
+
All tests pass (44 tests across instrumentation, analysis, and visualization layers).
|
|
213
|
+
|
|
214
|
+
## XES Event Log Format
|
|
215
|
+
|
|
216
|
+
Events are stored in XES (eXtensible Event Stream) format with the following attribute mapping:
|
|
217
|
+
|
|
218
|
+
| Concept | XES Attribute | Example |
|
|
219
|
+
|---------|---------------|---------|
|
|
220
|
+
| Invocation ID | `case:concept:name` | `"inv-abc123"` |
|
|
221
|
+
| Activity | `concept:name` | `"tool:search"`, `"llm:gemini-2.0"` |
|
|
222
|
+
| Completion time | `time:timestamp` | ISO datetime |
|
|
223
|
+
| Start time | `strobe:start_time` | ISO datetime |
|
|
224
|
+
| Duration | `strobe:duration_s` | `1.23` |
|
|
225
|
+
| Tool arguments | `strobe:tool_args` | JSON string |
|
|
226
|
+
| Tool result | `strobe:tool_result` | JSON string |
|
|
227
|
+
| Model name | `strobe:model_name` | `"gemini-2.0-flash"` |
|
|
228
|
+
| Input tokens | `strobe:input_tokens` | Integer count |
|
|
229
|
+
| Output tokens | `strobe:output_tokens` | Integer count |
|
|
230
|
+
|
|
231
|
+
## Supported Frameworks
|
|
232
|
+
|
|
233
|
+
Currently supports:
|
|
234
|
+
- Google AI Agent Development Kit (ADK)
|
|
235
|
+
|
|
236
|
+
## Dependencies
|
|
237
|
+
|
|
238
|
+
- **pm4py** >= 2.7.0 — Process mining algorithms
|
|
239
|
+
- **pandas** >= 2.0.0 — Data manipulation
|
|
240
|
+
- **networkx** — Graph algorithms
|
|
241
|
+
- **plotly** >= 5.18.0 — Interactive visualizations
|
|
242
|
+
- **streamlit** >= 1.32.0 — Dashboard UI
|
|
243
|
+
- **google-adk** >= 1.0.0 — Agent framework
|
|
244
|
+
|
|
245
|
+
## Contributing
|
|
246
|
+
|
|
247
|
+
Contributions are welcome! Please:
|
|
248
|
+
1. Write tests for any new functionality
|
|
249
|
+
2. Run `uv run pytest` to ensure tests pass
|
|
250
|
+
3. Follow existing code style and patterns
|
|
251
|
+
|
|
252
|
+
## License
|
|
253
|
+
|
|
254
|
+
(Add your license here)
|
|
255
|
+
|
|
256
|
+
## Resources
|
|
257
|
+
|
|
258
|
+
- [Process Mining Overview](https://en.wikipedia.org/wiki/Process_mining)
|
|
259
|
+
- [Petri Nets](https://en.wikipedia.org/wiki/Petri_net)
|
|
260
|
+
- [XES Standard](http://www.xes-standard.org/)
|
|
261
|
+
- [pm4py Documentation](https://pm4py.fit.fraunhofer.de/)
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "strobe"
|
|
3
|
+
dynamic = ["version"]
|
|
4
|
+
description = "Process Mining & Agent Instrumentation for AI Agent Frameworks"
|
|
5
|
+
requires-python = ">=3.13"
|
|
6
|
+
dependencies = [
|
|
7
|
+
"google-adk>=1.0.0",
|
|
8
|
+
"pm4py>=2.7.0",
|
|
9
|
+
"pandas>=2.0.0",
|
|
10
|
+
"streamlit>=1.32.0",
|
|
11
|
+
"plotly>=5.18.0",
|
|
12
|
+
]
|
|
13
|
+
|
|
14
|
+
[dependency-groups]
|
|
15
|
+
test = [
|
|
16
|
+
"pytest>=9.0.2",
|
|
17
|
+
"pytest-asyncio>=0.23.0",
|
|
18
|
+
]
|
|
19
|
+
dev = [
|
|
20
|
+
"ruff==0.15.4",
|
|
21
|
+
"mypy==1.19.1",
|
|
22
|
+
"pre-commit==4.5.1",
|
|
23
|
+
]
|
|
24
|
+
|
|
25
|
+
[build-system]
|
|
26
|
+
requires = ["hatchling", "hatch-vcs"]
|
|
27
|
+
build-backend = "hatchling.build"
|
|
28
|
+
|
|
29
|
+
[tool.hatch.version]
|
|
30
|
+
source = "vcs"
|
|
31
|
+
|
|
32
|
+
[tool.hatch.build.hooks.vcs]
|
|
33
|
+
version-file = "strobe/_version.py"
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
[tool.pytest.ini_options]
|
|
37
|
+
asyncio_mode = "auto"
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
[tool.ruff]
|
|
41
|
+
extend-exclude = ["strobe/_version.py"]
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
try:
|
|
2
|
+
from strobe._version import __version__
|
|
3
|
+
except ImportError:
|
|
4
|
+
__version__ = "0.0.1.dev0"
|
|
5
|
+
|
|
6
|
+
from strobe.analysis import (
|
|
7
|
+
activity_statistics,
|
|
8
|
+
check_conformance,
|
|
9
|
+
discover_dfg,
|
|
10
|
+
discover_process_model,
|
|
11
|
+
throughput_times,
|
|
12
|
+
)
|
|
13
|
+
from strobe.instrumentation import EventLog, StrobePlugin
|
|
14
|
+
from strobe.visualization import launch_dashboard
|
|
15
|
+
|
|
16
|
+
__all__ = [
|
|
17
|
+
"__version__",
|
|
18
|
+
"StrobePlugin",
|
|
19
|
+
"EventLog",
|
|
20
|
+
"discover_dfg",
|
|
21
|
+
"discover_process_model",
|
|
22
|
+
"check_conformance",
|
|
23
|
+
"throughput_times",
|
|
24
|
+
"activity_statistics",
|
|
25
|
+
"launch_dashboard",
|
|
26
|
+
]
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
# file generated by setuptools-scm
|
|
2
|
+
# don't change, don't track in version control
|
|
3
|
+
|
|
4
|
+
__all__ = [
|
|
5
|
+
"__version__",
|
|
6
|
+
"__version_tuple__",
|
|
7
|
+
"version",
|
|
8
|
+
"version_tuple",
|
|
9
|
+
"__commit_id__",
|
|
10
|
+
"commit_id",
|
|
11
|
+
]
|
|
12
|
+
|
|
13
|
+
TYPE_CHECKING = False
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from typing import Tuple
|
|
16
|
+
from typing import Union
|
|
17
|
+
|
|
18
|
+
VERSION_TUPLE = Tuple[Union[int, str], ...]
|
|
19
|
+
COMMIT_ID = Union[str, None]
|
|
20
|
+
else:
|
|
21
|
+
VERSION_TUPLE = object
|
|
22
|
+
COMMIT_ID = object
|
|
23
|
+
|
|
24
|
+
version: str
|
|
25
|
+
__version__: str
|
|
26
|
+
__version_tuple__: VERSION_TUPLE
|
|
27
|
+
version_tuple: VERSION_TUPLE
|
|
28
|
+
commit_id: COMMIT_ID
|
|
29
|
+
__commit_id__: COMMIT_ID
|
|
30
|
+
|
|
31
|
+
__version__ = version = '0.0.1'
|
|
32
|
+
__version_tuple__ = version_tuple = (0, 0, 1)
|
|
33
|
+
|
|
34
|
+
__commit_id__ = commit_id = None
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from .conformance import check_conformance
|
|
2
|
+
from .discovery import discover_dfg, discover_process_model
|
|
3
|
+
from .performance import activity_statistics, throughput_times
|
|
4
|
+
|
|
5
|
+
__all__ = [
|
|
6
|
+
"discover_dfg",
|
|
7
|
+
"discover_process_model",
|
|
8
|
+
"check_conformance",
|
|
9
|
+
"throughput_times",
|
|
10
|
+
"activity_statistics",
|
|
11
|
+
]
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import pandas as pd
|
|
4
|
+
import pm4py
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def check_conformance(
|
|
8
|
+
df: pd.DataFrame,
|
|
9
|
+
net,
|
|
10
|
+
initial_marking,
|
|
11
|
+
final_marking,
|
|
12
|
+
) -> dict[str, float]:
|
|
13
|
+
"""Run token-based replay conformance checking.
|
|
14
|
+
|
|
15
|
+
Parameters
|
|
16
|
+
----------
|
|
17
|
+
df:
|
|
18
|
+
pm4py-formatted event log DataFrame.
|
|
19
|
+
net, initial_marking, final_marking:
|
|
20
|
+
Petri net model (e.g. from :func:`~strobe.analysis.discover_process_model`).
|
|
21
|
+
|
|
22
|
+
Returns
|
|
23
|
+
-------
|
|
24
|
+
dict with keys ``fitness``, ``precision``, ``generalization``, ``simplicity``.
|
|
25
|
+
"""
|
|
26
|
+
fitness = pm4py.fitness_token_based_replay(df, net, initial_marking, final_marking)
|
|
27
|
+
precision = pm4py.precision_token_based_replay(
|
|
28
|
+
df, net, initial_marking, final_marking
|
|
29
|
+
)
|
|
30
|
+
generalization = pm4py.generalization_tbr(df, net, initial_marking, final_marking)
|
|
31
|
+
simplicity = pm4py.simplicity_petri_net(net, initial_marking, final_marking)
|
|
32
|
+
|
|
33
|
+
return {
|
|
34
|
+
"fitness": fitness.get("average_trace_fitness", float("nan")),
|
|
35
|
+
"precision": float(precision),
|
|
36
|
+
"generalization": float(generalization),
|
|
37
|
+
"simplicity": float(simplicity),
|
|
38
|
+
}
|