splank 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- splank-0.1.0/PKG-INFO +89 -0
- splank-0.1.0/README.md +65 -0
- splank-0.1.0/pyproject.toml +35 -0
- splank-0.1.0/setup.cfg +4 -0
- splank-0.1.0/splank/__init__.py +32 -0
- splank-0.1.0/splank/__main__.py +6 -0
- splank-0.1.0/splank/cli.py +439 -0
- splank-0.1.0/splank/client.py +193 -0
- splank-0.1.0/splank/config.py +129 -0
- splank-0.1.0/splank.egg-info/PKG-INFO +89 -0
- splank-0.1.0/splank.egg-info/SOURCES.txt +13 -0
- splank-0.1.0/splank.egg-info/dependency_links.txt +1 -0
- splank-0.1.0/splank.egg-info/entry_points.txt +2 -0
- splank-0.1.0/splank.egg-info/requires.txt +1 -0
- splank-0.1.0/splank.egg-info/top_level.txt +1 -0
splank-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: splank
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: CLI tool for querying Splunk logs. Search indexes, discover fields, and manage search jobs.
|
|
5
|
+
License-Expression: MIT
|
|
6
|
+
Project-URL: Homepage, https://github.com/vivainio/splank
|
|
7
|
+
Project-URL: Repository, https://github.com/vivainio/splank
|
|
8
|
+
Project-URL: Issues, https://github.com/vivainio/splank/issues
|
|
9
|
+
Keywords: splunk,cli,logs,search,observability,siem
|
|
10
|
+
Classifier: Development Status :: 4 - Beta
|
|
11
|
+
Classifier: Environment :: Console
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: Intended Audience :: System Administrators
|
|
14
|
+
Classifier: Operating System :: OS Independent
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
19
|
+
Classifier: Topic :: System :: Logging
|
|
20
|
+
Classifier: Topic :: Utilities
|
|
21
|
+
Requires-Python: >=3.11
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
Requires-Dist: platformdirs>=4.0.0
|
|
24
|
+
|
|
25
|
+
# Splank
|
|
26
|
+
|
|
27
|
+
CLI tool for querying Splunk logs.
|
|
28
|
+
|
|
29
|
+
## Setup
|
|
30
|
+
|
|
31
|
+
```bash
|
|
32
|
+
splank init
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
This creates `~/.config/splank/credentials.toml` with your Splunk credentials.
|
|
36
|
+
|
|
37
|
+
### Configuration
|
|
38
|
+
|
|
39
|
+
The credentials file supports multiple profiles:
|
|
40
|
+
|
|
41
|
+
```toml
|
|
42
|
+
default_profile = "prod"
|
|
43
|
+
|
|
44
|
+
[profiles.prod]
|
|
45
|
+
host = "splunk.example.com"
|
|
46
|
+
port = 8089
|
|
47
|
+
token = "your-token-here"
|
|
48
|
+
verify_ssl = true
|
|
49
|
+
|
|
50
|
+
[profiles.qa]
|
|
51
|
+
host = "splunk-qa.example.com"
|
|
52
|
+
port = 8089
|
|
53
|
+
username = "admin"
|
|
54
|
+
password = "changeme"
|
|
55
|
+
verify_ssl = true
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
## Usage
|
|
59
|
+
|
|
60
|
+
```bash
|
|
61
|
+
# Search (uses default profile)
|
|
62
|
+
splank search 'index=main Level=ERROR' -m 10
|
|
63
|
+
|
|
64
|
+
# Search using specific profile
|
|
65
|
+
splank -p qa search 'index=main Level=ERROR'
|
|
66
|
+
|
|
67
|
+
# Discover indexes
|
|
68
|
+
splank discover 'web*'
|
|
69
|
+
|
|
70
|
+
# Discover with field info
|
|
71
|
+
splank discover 'app-*' --fields -o DISCOVERY.md
|
|
72
|
+
|
|
73
|
+
# Manage jobs
|
|
74
|
+
splank jobs
|
|
75
|
+
splank clear
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
## Commands
|
|
79
|
+
|
|
80
|
+
- `init` - Create credentials file
|
|
81
|
+
- `search` - Execute SPL query
|
|
82
|
+
- `discover` - Discover available indexes
|
|
83
|
+
- `jobs` - List search jobs
|
|
84
|
+
- `clear` - Clear my search jobs
|
|
85
|
+
|
|
86
|
+
## Options
|
|
87
|
+
|
|
88
|
+
- `-p, --profile` - Splunk profile to use (e.g., 'qa', 'prod')
|
|
89
|
+
- `-V, --version` - Show version
|
splank-0.1.0/README.md
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
# Splank
|
|
2
|
+
|
|
3
|
+
CLI tool for querying Splunk logs.
|
|
4
|
+
|
|
5
|
+
## Setup
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
splank init
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
This creates `~/.config/splank/credentials.toml` with your Splunk credentials.
|
|
12
|
+
|
|
13
|
+
### Configuration
|
|
14
|
+
|
|
15
|
+
The credentials file supports multiple profiles:
|
|
16
|
+
|
|
17
|
+
```toml
|
|
18
|
+
default_profile = "prod"
|
|
19
|
+
|
|
20
|
+
[profiles.prod]
|
|
21
|
+
host = "splunk.example.com"
|
|
22
|
+
port = 8089
|
|
23
|
+
token = "your-token-here"
|
|
24
|
+
verify_ssl = true
|
|
25
|
+
|
|
26
|
+
[profiles.qa]
|
|
27
|
+
host = "splunk-qa.example.com"
|
|
28
|
+
port = 8089
|
|
29
|
+
username = "admin"
|
|
30
|
+
password = "changeme"
|
|
31
|
+
verify_ssl = true
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
## Usage
|
|
35
|
+
|
|
36
|
+
```bash
|
|
37
|
+
# Search (uses default profile)
|
|
38
|
+
splank search 'index=main Level=ERROR' -m 10
|
|
39
|
+
|
|
40
|
+
# Search using specific profile
|
|
41
|
+
splank -p qa search 'index=main Level=ERROR'
|
|
42
|
+
|
|
43
|
+
# Discover indexes
|
|
44
|
+
splank discover 'web*'
|
|
45
|
+
|
|
46
|
+
# Discover with field info
|
|
47
|
+
splank discover 'app-*' --fields -o DISCOVERY.md
|
|
48
|
+
|
|
49
|
+
# Manage jobs
|
|
50
|
+
splank jobs
|
|
51
|
+
splank clear
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
## Commands
|
|
55
|
+
|
|
56
|
+
- `init` - Create credentials file
|
|
57
|
+
- `search` - Execute SPL query
|
|
58
|
+
- `discover` - Discover available indexes
|
|
59
|
+
- `jobs` - List search jobs
|
|
60
|
+
- `clear` - Clear my search jobs
|
|
61
|
+
|
|
62
|
+
## Options
|
|
63
|
+
|
|
64
|
+
- `-p, --profile` - Splunk profile to use (e.g., 'qa', 'prod')
|
|
65
|
+
- `-V, --version` - Show version
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "splank"
|
|
3
|
+
version = "0.1.0"
|
|
4
|
+
description = "CLI tool for querying Splunk logs. Search indexes, discover fields, and manage search jobs."
|
|
5
|
+
readme = "README.md"
|
|
6
|
+
license = "MIT"
|
|
7
|
+
requires-python = ">=3.11"
|
|
8
|
+
keywords = ["splunk", "cli", "logs", "search", "observability", "siem"]
|
|
9
|
+
classifiers = [
|
|
10
|
+
"Development Status :: 4 - Beta",
|
|
11
|
+
"Environment :: Console",
|
|
12
|
+
"Intended Audience :: Developers",
|
|
13
|
+
"Intended Audience :: System Administrators",
|
|
14
|
+
"Operating System :: OS Independent",
|
|
15
|
+
"Programming Language :: Python :: 3",
|
|
16
|
+
"Programming Language :: Python :: 3.11",
|
|
17
|
+
"Programming Language :: Python :: 3.12",
|
|
18
|
+
"Programming Language :: Python :: 3.13",
|
|
19
|
+
"Topic :: System :: Logging",
|
|
20
|
+
"Topic :: Utilities",
|
|
21
|
+
]
|
|
22
|
+
dependencies = [
|
|
23
|
+
"platformdirs>=4.0.0",
|
|
24
|
+
]
|
|
25
|
+
|
|
26
|
+
[project.urls]
|
|
27
|
+
Homepage = "https://github.com/vivainio/splank"
|
|
28
|
+
Repository = "https://github.com/vivainio/splank"
|
|
29
|
+
Issues = "https://github.com/vivainio/splank/issues"
|
|
30
|
+
|
|
31
|
+
[project.scripts]
|
|
32
|
+
splank = "splank.cli:main"
|
|
33
|
+
|
|
34
|
+
[tool.uv]
|
|
35
|
+
package = true
|
splank-0.1.0/setup.cfg
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
"""Splank - CLI tool for querying Splunk logs."""
|
|
2
|
+
|
|
3
|
+
from importlib.metadata import version
|
|
4
|
+
from typing import TYPE_CHECKING
|
|
5
|
+
|
|
6
|
+
if TYPE_CHECKING:
|
|
7
|
+
from splank.client import SplunkClient
|
|
8
|
+
|
|
9
|
+
__version__ = version("splank")
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def client(profile: str | None = None) -> "SplunkClient":
|
|
13
|
+
"""Get an authenticated Splunk client.
|
|
14
|
+
|
|
15
|
+
Returns an authenticated SplunkClient instance using credentials
|
|
16
|
+
from ~/.config/splank/credentials.toml.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
profile: Profile name (e.g., 'qa', 'prod'). If None, uses default profile.
|
|
20
|
+
|
|
21
|
+
Usage:
|
|
22
|
+
import splank
|
|
23
|
+
client = splank.client() # uses default profile
|
|
24
|
+
client = splank.client("qa") # uses qa profile
|
|
25
|
+
results = list(client.search("index=main | head 10"))
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
SplunkClient: Authenticated Splunk client
|
|
29
|
+
"""
|
|
30
|
+
from splank.config import get_client
|
|
31
|
+
|
|
32
|
+
return get_client(profile)
|
|
@@ -0,0 +1,439 @@
|
|
|
1
|
+
"""Splank CLI - Main entry point."""
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
import csv
|
|
5
|
+
import fnmatch
|
|
6
|
+
import json
|
|
7
|
+
import sys
|
|
8
|
+
from typing import Iterator
|
|
9
|
+
|
|
10
|
+
from splank import __version__
|
|
11
|
+
from splank.config import get_client, init_config
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def output_json(results: list[dict], file: str | None = None) -> None:
|
|
15
|
+
"""Output results as JSON."""
|
|
16
|
+
output = json.dumps(results, indent=2)
|
|
17
|
+
if file:
|
|
18
|
+
with open(file, "w") as f:
|
|
19
|
+
f.write(output)
|
|
20
|
+
else:
|
|
21
|
+
print(output)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def output_csv(results: list[dict], file: str | None = None) -> None:
|
|
25
|
+
"""Output results as CSV."""
|
|
26
|
+
if not results:
|
|
27
|
+
return
|
|
28
|
+
|
|
29
|
+
# Get all unique fields
|
|
30
|
+
fields: set[str] = set()
|
|
31
|
+
for row in results:
|
|
32
|
+
fields.update(row.keys())
|
|
33
|
+
sorted_fields = sorted(fields)
|
|
34
|
+
|
|
35
|
+
if file:
|
|
36
|
+
f = open(file, "w", newline="")
|
|
37
|
+
else:
|
|
38
|
+
f = sys.stdout
|
|
39
|
+
|
|
40
|
+
writer = csv.DictWriter(f, fieldnames=sorted_fields)
|
|
41
|
+
writer.writeheader()
|
|
42
|
+
writer.writerows(results)
|
|
43
|
+
|
|
44
|
+
if file:
|
|
45
|
+
f.close()
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def output_table_streaming(results_iter: Iterator[dict]) -> None:
|
|
49
|
+
"""Output results as a simple table, streaming rows as they arrive."""
|
|
50
|
+
fields: list[str] | None = None
|
|
51
|
+
widths: dict[str, int] | None = None
|
|
52
|
+
buffer: list[dict] = []
|
|
53
|
+
header_printed = False
|
|
54
|
+
|
|
55
|
+
for row in results_iter:
|
|
56
|
+
if fields is None:
|
|
57
|
+
# First row - determine fields from it
|
|
58
|
+
fields = sorted(row.keys())
|
|
59
|
+
widths = {f: max(len(f), 10) for f in fields}
|
|
60
|
+
|
|
61
|
+
# Update widths and buffer until we have enough to print header
|
|
62
|
+
if not header_printed:
|
|
63
|
+
buffer.append(row)
|
|
64
|
+
for f in fields:
|
|
65
|
+
val = str(row.get(f, ""))
|
|
66
|
+
widths[f] = max(widths[f], min(len(val), 50))
|
|
67
|
+
|
|
68
|
+
# Print header after first few rows to get better column widths
|
|
69
|
+
if len(buffer) >= 5:
|
|
70
|
+
header = " | ".join(
|
|
71
|
+
f.ljust(widths[f])[: widths[f]] for f in fields
|
|
72
|
+
)
|
|
73
|
+
print(header)
|
|
74
|
+
print("-" * len(header))
|
|
75
|
+
header_printed = True
|
|
76
|
+
for buffered_row in buffer:
|
|
77
|
+
line = " | ".join(
|
|
78
|
+
str(buffered_row.get(f, "")).ljust(widths[f])[: widths[f]]
|
|
79
|
+
for f in fields
|
|
80
|
+
)
|
|
81
|
+
print(line)
|
|
82
|
+
buffer = []
|
|
83
|
+
else:
|
|
84
|
+
# Stream directly
|
|
85
|
+
line = " | ".join(
|
|
86
|
+
str(row.get(f, "")).ljust(widths[f])[: widths[f]] for f in fields
|
|
87
|
+
)
|
|
88
|
+
print(line)
|
|
89
|
+
|
|
90
|
+
# Print any remaining buffered rows
|
|
91
|
+
if buffer:
|
|
92
|
+
if fields and widths:
|
|
93
|
+
header = " | ".join(
|
|
94
|
+
f.ljust(widths[f])[: widths[f]] for f in fields
|
|
95
|
+
)
|
|
96
|
+
print(header)
|
|
97
|
+
print("-" * len(header))
|
|
98
|
+
for row in buffer:
|
|
99
|
+
line = " | ".join(
|
|
100
|
+
str(row.get(f, "")).ljust(widths[f])[: widths[f]] for f in fields
|
|
101
|
+
)
|
|
102
|
+
print(line)
|
|
103
|
+
elif fields is None:
|
|
104
|
+
print("No results")
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def cmd_init(args: argparse.Namespace) -> None:
|
|
108
|
+
"""Initialize credentials file."""
|
|
109
|
+
init_config()
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def cmd_search(args: argparse.Namespace) -> None:
|
|
113
|
+
"""Execute a search query."""
|
|
114
|
+
client = get_client(args.profile)
|
|
115
|
+
|
|
116
|
+
use_streaming = args.format == "table" and not args.output
|
|
117
|
+
|
|
118
|
+
results = client.search(
|
|
119
|
+
query=args.query,
|
|
120
|
+
earliest=args.earliest,
|
|
121
|
+
latest=args.latest,
|
|
122
|
+
max_results=args.max_results,
|
|
123
|
+
stream=use_streaming,
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
if args.format == "json":
|
|
127
|
+
output_json(list(results), args.output)
|
|
128
|
+
elif args.format == "csv":
|
|
129
|
+
output_csv(list(results), args.output)
|
|
130
|
+
else:
|
|
131
|
+
output_table_streaming(results)
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def cmd_clear(args: argparse.Namespace) -> None:
|
|
135
|
+
"""Clear search jobs to free quota."""
|
|
136
|
+
client = get_client(args.profile)
|
|
137
|
+
|
|
138
|
+
# List user's jobs
|
|
139
|
+
jobs = client.list_jobs(count=100)
|
|
140
|
+
|
|
141
|
+
# Filter to non-scheduler jobs (user's own jobs)
|
|
142
|
+
my_jobs = [
|
|
143
|
+
j for j in jobs if not j["content"].get("sid", "").startswith("scheduler_")
|
|
144
|
+
]
|
|
145
|
+
|
|
146
|
+
if not my_jobs:
|
|
147
|
+
print("No jobs to clear.")
|
|
148
|
+
return
|
|
149
|
+
|
|
150
|
+
deleted = 0
|
|
151
|
+
for job in my_jobs:
|
|
152
|
+
sid = job["content"]["sid"]
|
|
153
|
+
try:
|
|
154
|
+
client.delete_job(sid)
|
|
155
|
+
deleted += 1
|
|
156
|
+
except Exception:
|
|
157
|
+
pass
|
|
158
|
+
|
|
159
|
+
print(f"Cleared {deleted} job(s).")
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def cmd_discover(args: argparse.Namespace) -> None:
|
|
163
|
+
"""Discover available indexes via search."""
|
|
164
|
+
client = get_client(args.profile)
|
|
165
|
+
|
|
166
|
+
# Use eventcount to discover all searchable indexes (including federated)
|
|
167
|
+
print("Discovering indexes...", file=sys.stderr)
|
|
168
|
+
results = list(
|
|
169
|
+
client.search(
|
|
170
|
+
"| eventcount summarize=false index=*", earliest="-24h", max_results=1000
|
|
171
|
+
)
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
# Dedupe and sum counts across indexers
|
|
175
|
+
index_counts: dict[str, int] = {}
|
|
176
|
+
for row in results:
|
|
177
|
+
name = row.get("index", "")
|
|
178
|
+
count = int(row.get("count", 0))
|
|
179
|
+
index_counts[name] = max(index_counts.get(name, 0), count)
|
|
180
|
+
|
|
181
|
+
# Filter indexes
|
|
182
|
+
indexes: list[tuple[str, int]] = []
|
|
183
|
+
for name in sorted(index_counts.keys()):
|
|
184
|
+
if not args.all and name.startswith(("_", "history", "summary")):
|
|
185
|
+
continue
|
|
186
|
+
if args.patterns:
|
|
187
|
+
if not any(fnmatch.fnmatch(name, p) for p in args.patterns):
|
|
188
|
+
continue
|
|
189
|
+
indexes.append((name, index_counts[name]))
|
|
190
|
+
|
|
191
|
+
if not args.fields:
|
|
192
|
+
# Simple list mode
|
|
193
|
+
for name, count in indexes:
|
|
194
|
+
print(f"{name:40} {count:>12} events")
|
|
195
|
+
return
|
|
196
|
+
|
|
197
|
+
# Detailed mode with fields - output markdown
|
|
198
|
+
output = ["# Splunk Index Discovery", ""]
|
|
199
|
+
|
|
200
|
+
for name, count in indexes:
|
|
201
|
+
print(f"Discovering fields for {name}...", file=sys.stderr)
|
|
202
|
+
output.append(f"## {name}")
|
|
203
|
+
output.append(f"- **Events (24h):** {count:,}")
|
|
204
|
+
|
|
205
|
+
# Get sourcetypes
|
|
206
|
+
try:
|
|
207
|
+
st_results = list(
|
|
208
|
+
client.search(
|
|
209
|
+
f"index={name} | stats count by sourcetype | sort -count",
|
|
210
|
+
earliest="-24h",
|
|
211
|
+
max_results=20,
|
|
212
|
+
)
|
|
213
|
+
)
|
|
214
|
+
if st_results:
|
|
215
|
+
sourcetypes = [r.get("sourcetype", "") for r in st_results]
|
|
216
|
+
output.append(f"- **Sourcetypes:** {', '.join(sourcetypes)}")
|
|
217
|
+
except Exception:
|
|
218
|
+
pass
|
|
219
|
+
|
|
220
|
+
# Get fields using fieldsummary
|
|
221
|
+
try:
|
|
222
|
+
field_results = list(
|
|
223
|
+
client.search(
|
|
224
|
+
f"index={name} | head 1000 | fieldsummary | where count > 100 | sort -count | head 30",
|
|
225
|
+
earliest="-24h",
|
|
226
|
+
max_results=50,
|
|
227
|
+
)
|
|
228
|
+
)
|
|
229
|
+
if field_results:
|
|
230
|
+
output.append("")
|
|
231
|
+
output.append("### Fields")
|
|
232
|
+
output.append("")
|
|
233
|
+
# Skip internal fields
|
|
234
|
+
skip_fields = {
|
|
235
|
+
"date_hour",
|
|
236
|
+
"date_mday",
|
|
237
|
+
"date_minute",
|
|
238
|
+
"date_month",
|
|
239
|
+
"date_second",
|
|
240
|
+
"date_wday",
|
|
241
|
+
"date_year",
|
|
242
|
+
"date_zone",
|
|
243
|
+
"punct",
|
|
244
|
+
"timestartpos",
|
|
245
|
+
"timeendpos",
|
|
246
|
+
"linecount",
|
|
247
|
+
"index",
|
|
248
|
+
"splunk_server",
|
|
249
|
+
}
|
|
250
|
+
fields = [
|
|
251
|
+
f.get("field", "")
|
|
252
|
+
for f in field_results
|
|
253
|
+
if f.get("field", "") not in skip_fields
|
|
254
|
+
]
|
|
255
|
+
output.append(", ".join(f"`{f}`" for f in fields))
|
|
256
|
+
|
|
257
|
+
# Get sample values for interesting fields
|
|
258
|
+
interesting = {
|
|
259
|
+
"Level",
|
|
260
|
+
"level",
|
|
261
|
+
"severity",
|
|
262
|
+
"status",
|
|
263
|
+
"sourcetype",
|
|
264
|
+
"host",
|
|
265
|
+
"TenantType",
|
|
266
|
+
"Environment",
|
|
267
|
+
"environment",
|
|
268
|
+
"env",
|
|
269
|
+
"Region",
|
|
270
|
+
"region",
|
|
271
|
+
"cluster",
|
|
272
|
+
"Cluster",
|
|
273
|
+
"cluster_name",
|
|
274
|
+
"ClusterName",
|
|
275
|
+
}
|
|
276
|
+
found_interesting = [f for f in fields if f in interesting]
|
|
277
|
+
if found_interesting:
|
|
278
|
+
output.append("")
|
|
279
|
+
output.append("### Sample Values")
|
|
280
|
+
output.append("")
|
|
281
|
+
# Get a few sample events
|
|
282
|
+
try:
|
|
283
|
+
sample_events = list(
|
|
284
|
+
client.search(
|
|
285
|
+
f"index={name} | head 100",
|
|
286
|
+
earliest="-1h",
|
|
287
|
+
max_results=100,
|
|
288
|
+
)
|
|
289
|
+
)
|
|
290
|
+
for field in found_interesting:
|
|
291
|
+
seen: set[str] = set()
|
|
292
|
+
for evt in sample_events:
|
|
293
|
+
val = str(evt.get(field, "")).strip()[:50]
|
|
294
|
+
if val:
|
|
295
|
+
seen.add(val)
|
|
296
|
+
if len(seen) >= 5:
|
|
297
|
+
break
|
|
298
|
+
if seen:
|
|
299
|
+
output.append(
|
|
300
|
+
f"- **{field}:** {', '.join(f'`{s}`' for s in sorted(seen))}"
|
|
301
|
+
)
|
|
302
|
+
except Exception:
|
|
303
|
+
pass
|
|
304
|
+
except Exception as e:
|
|
305
|
+
output.append(f"*Error getting fields: {e}*")
|
|
306
|
+
|
|
307
|
+
output.append("")
|
|
308
|
+
|
|
309
|
+
# Write output
|
|
310
|
+
md_content = "\n".join(output)
|
|
311
|
+
if args.output:
|
|
312
|
+
with open(args.output, "w") as f:
|
|
313
|
+
f.write(md_content)
|
|
314
|
+
print(f"Written to {args.output}", file=sys.stderr)
|
|
315
|
+
else:
|
|
316
|
+
print(md_content)
|
|
317
|
+
|
|
318
|
+
|
|
319
|
+
def cmd_jobs(args: argparse.Namespace) -> None:
|
|
320
|
+
"""List search jobs."""
|
|
321
|
+
client = get_client(args.profile)
|
|
322
|
+
|
|
323
|
+
jobs = client.list_jobs(count=50)
|
|
324
|
+
|
|
325
|
+
if args.mine:
|
|
326
|
+
jobs = [
|
|
327
|
+
j for j in jobs if not j["content"].get("sid", "").startswith("scheduler_")
|
|
328
|
+
]
|
|
329
|
+
|
|
330
|
+
if not jobs:
|
|
331
|
+
print("No jobs found.")
|
|
332
|
+
return
|
|
333
|
+
|
|
334
|
+
total_mb = 0.0
|
|
335
|
+
for job in jobs:
|
|
336
|
+
content = job["content"]
|
|
337
|
+
state = content.get("dispatchState", "?")
|
|
338
|
+
sid = content.get("sid", "?")
|
|
339
|
+
disk = content.get("diskUsage", 0)
|
|
340
|
+
total_mb += disk / 1024 / 1024
|
|
341
|
+
search = content.get("search", "")[:50]
|
|
342
|
+
print(f"{state:10} {disk/1024/1024:6.1f}MB {sid[:25]:25} {search}")
|
|
343
|
+
|
|
344
|
+
print(f"\nTotal: {total_mb:.1f}MB")
|
|
345
|
+
|
|
346
|
+
|
|
347
|
+
def main() -> None:
|
|
348
|
+
parser = argparse.ArgumentParser(
|
|
349
|
+
prog="splank",
|
|
350
|
+
description="CLI tool for querying Splunk logs",
|
|
351
|
+
)
|
|
352
|
+
parser.add_argument(
|
|
353
|
+
"-V",
|
|
354
|
+
"--version",
|
|
355
|
+
action="version",
|
|
356
|
+
version=f"%(prog)s {__version__}",
|
|
357
|
+
)
|
|
358
|
+
parser.add_argument(
|
|
359
|
+
"-p",
|
|
360
|
+
"--profile",
|
|
361
|
+
help="Splunk profile to use (e.g., 'qa', 'prod')",
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
subparsers = parser.add_subparsers(dest="command", help="Commands")
|
|
365
|
+
|
|
366
|
+
# init command
|
|
367
|
+
init_parser = subparsers.add_parser("init", help="Initialize credentials")
|
|
368
|
+
init_parser.set_defaults(func=cmd_init)
|
|
369
|
+
|
|
370
|
+
# search command
|
|
371
|
+
search_parser = subparsers.add_parser("search", help="Execute SPL query")
|
|
372
|
+
search_parser.add_argument("query", help="SPL query to execute")
|
|
373
|
+
search_parser.add_argument(
|
|
374
|
+
"--earliest", "-e", default="-24h", help="Earliest time (default: -24h)"
|
|
375
|
+
)
|
|
376
|
+
search_parser.add_argument(
|
|
377
|
+
"--latest", "-l", default="now", help="Latest time (default: now)"
|
|
378
|
+
)
|
|
379
|
+
search_parser.add_argument(
|
|
380
|
+
"--max-results", "-m", type=int, default=100, help="Max results (default: 100)"
|
|
381
|
+
)
|
|
382
|
+
search_parser.add_argument(
|
|
383
|
+
"--format",
|
|
384
|
+
"-f",
|
|
385
|
+
choices=["json", "csv", "table"],
|
|
386
|
+
default="table",
|
|
387
|
+
help="Output format",
|
|
388
|
+
)
|
|
389
|
+
search_parser.add_argument(
|
|
390
|
+
"--output", "-o", help="Output file (default: stdout)"
|
|
391
|
+
)
|
|
392
|
+
search_parser.set_defaults(func=cmd_search)
|
|
393
|
+
|
|
394
|
+
# discover command
|
|
395
|
+
discover_parser = subparsers.add_parser(
|
|
396
|
+
"discover", help="Discover available indexes"
|
|
397
|
+
)
|
|
398
|
+
discover_parser.add_argument(
|
|
399
|
+
"patterns",
|
|
400
|
+
nargs="*",
|
|
401
|
+
help="Glob patterns to filter indexes (e.g. 'web*' 'app-*')",
|
|
402
|
+
)
|
|
403
|
+
discover_parser.add_argument(
|
|
404
|
+
"--all", "-a", action="store_true", help="Include internal indexes"
|
|
405
|
+
)
|
|
406
|
+
discover_parser.add_argument(
|
|
407
|
+
"--fields", "-f", action="store_true", help="Discover fields for each index"
|
|
408
|
+
)
|
|
409
|
+
discover_parser.add_argument(
|
|
410
|
+
"--output", "-o", help="Output file (default: stdout)"
|
|
411
|
+
)
|
|
412
|
+
discover_parser.set_defaults(func=cmd_discover)
|
|
413
|
+
|
|
414
|
+
# jobs command
|
|
415
|
+
jobs_parser = subparsers.add_parser("jobs", help="List search jobs")
|
|
416
|
+
jobs_parser.add_argument(
|
|
417
|
+
"--mine", action="store_true", help="Show only my jobs"
|
|
418
|
+
)
|
|
419
|
+
jobs_parser.set_defaults(func=cmd_jobs)
|
|
420
|
+
|
|
421
|
+
# clear command
|
|
422
|
+
clear_parser = subparsers.add_parser("clear", help="Clear my search jobs")
|
|
423
|
+
clear_parser.set_defaults(func=cmd_clear)
|
|
424
|
+
|
|
425
|
+
args = parser.parse_args()
|
|
426
|
+
|
|
427
|
+
if not args.command:
|
|
428
|
+
parser.print_help()
|
|
429
|
+
sys.exit(1)
|
|
430
|
+
|
|
431
|
+
try:
|
|
432
|
+
args.func(args)
|
|
433
|
+
except Exception as e:
|
|
434
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
435
|
+
sys.exit(1)
|
|
436
|
+
|
|
437
|
+
|
|
438
|
+
if __name__ == "__main__":
|
|
439
|
+
main()
|
|
@@ -0,0 +1,193 @@
|
|
|
1
|
+
"""Splunk REST API client."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import ssl
|
|
5
|
+
import time
|
|
6
|
+
import urllib.error
|
|
7
|
+
import urllib.parse
|
|
8
|
+
import urllib.request
|
|
9
|
+
from typing import Iterator
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def get_ssl_context(verify: bool = True) -> ssl.SSLContext:
|
|
13
|
+
"""Create SSL context."""
|
|
14
|
+
if verify:
|
|
15
|
+
return ssl.create_default_context()
|
|
16
|
+
ctx = ssl.create_default_context()
|
|
17
|
+
ctx.check_hostname = False
|
|
18
|
+
ctx.verify_mode = ssl.CERT_NONE
|
|
19
|
+
return ctx
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class SplunkClient:
|
|
23
|
+
"""Splunk REST API client."""
|
|
24
|
+
|
|
25
|
+
def __init__(
|
|
26
|
+
self,
|
|
27
|
+
host: str,
|
|
28
|
+
port: int = 8089,
|
|
29
|
+
username: str | None = None,
|
|
30
|
+
password: str | None = None,
|
|
31
|
+
token: str | None = None,
|
|
32
|
+
verify_ssl: bool = True,
|
|
33
|
+
):
|
|
34
|
+
self.base_url = f"https://{host}:{port}"
|
|
35
|
+
self.username = username
|
|
36
|
+
self.password = password
|
|
37
|
+
self.token = token
|
|
38
|
+
self.ssl_context = get_ssl_context(verify_ssl)
|
|
39
|
+
self.session_key: str | None = None
|
|
40
|
+
|
|
41
|
+
def _request(
|
|
42
|
+
self,
|
|
43
|
+
method: str,
|
|
44
|
+
endpoint: str,
|
|
45
|
+
data: dict | None = None,
|
|
46
|
+
params: dict | None = None,
|
|
47
|
+
) -> dict:
|
|
48
|
+
"""Make HTTP request to Splunk API."""
|
|
49
|
+
url = f"{self.base_url}{endpoint}"
|
|
50
|
+
if params:
|
|
51
|
+
url += "?" + urllib.parse.urlencode(params)
|
|
52
|
+
|
|
53
|
+
headers = {"Content-Type": "application/x-www-form-urlencoded"}
|
|
54
|
+
|
|
55
|
+
if self.token:
|
|
56
|
+
headers["Authorization"] = f"Bearer {self.token}"
|
|
57
|
+
elif self.session_key:
|
|
58
|
+
headers["Authorization"] = f"Splunk {self.session_key}"
|
|
59
|
+
|
|
60
|
+
body = urllib.parse.urlencode(data).encode() if data else None
|
|
61
|
+
|
|
62
|
+
req = urllib.request.Request(url, data=body, headers=headers, method=method)
|
|
63
|
+
|
|
64
|
+
try:
|
|
65
|
+
with urllib.request.urlopen(req, context=self.ssl_context) as resp:
|
|
66
|
+
return json.loads(resp.read().decode())
|
|
67
|
+
except urllib.error.HTTPError as e:
|
|
68
|
+
error_body = e.read().decode()
|
|
69
|
+
raise RuntimeError(f"HTTP {e.code}: {error_body}")
|
|
70
|
+
|
|
71
|
+
def login(self) -> None:
|
|
72
|
+
"""Authenticate and get session key."""
|
|
73
|
+
if self.token:
|
|
74
|
+
return # Token auth doesn't need login
|
|
75
|
+
|
|
76
|
+
if not self.username or not self.password:
|
|
77
|
+
raise ValueError("Username and password required for authentication")
|
|
78
|
+
|
|
79
|
+
data = {
|
|
80
|
+
"username": self.username,
|
|
81
|
+
"password": self.password,
|
|
82
|
+
"output_mode": "json",
|
|
83
|
+
}
|
|
84
|
+
result = self._request("POST", "/services/auth/login", data=data)
|
|
85
|
+
self.session_key = result["sessionKey"]
|
|
86
|
+
|
|
87
|
+
def search(
|
|
88
|
+
self,
|
|
89
|
+
query: str,
|
|
90
|
+
earliest: str = "-24h",
|
|
91
|
+
latest: str = "now",
|
|
92
|
+
max_results: int = 100,
|
|
93
|
+
stream: bool = False,
|
|
94
|
+
) -> Iterator[dict]:
|
|
95
|
+
"""Execute a search query and return results."""
|
|
96
|
+
# Build search query with head limit to reduce server-side processing
|
|
97
|
+
if query.strip().startswith("|"):
|
|
98
|
+
spl = query
|
|
99
|
+
else:
|
|
100
|
+
spl = f"search {query}"
|
|
101
|
+
|
|
102
|
+
# Add head limit if not already present to reduce disk usage
|
|
103
|
+
if "| head" not in spl.lower():
|
|
104
|
+
spl = f"{spl} | head {max_results}"
|
|
105
|
+
|
|
106
|
+
data = {
|
|
107
|
+
"search": spl,
|
|
108
|
+
"earliest_time": earliest,
|
|
109
|
+
"latest_time": latest,
|
|
110
|
+
"output_mode": "json",
|
|
111
|
+
}
|
|
112
|
+
result = self._request("POST", "/services/search/jobs", data=data)
|
|
113
|
+
sid = result["sid"]
|
|
114
|
+
|
|
115
|
+
if stream:
|
|
116
|
+
yield from self._stream_results(sid, max_results)
|
|
117
|
+
else:
|
|
118
|
+
# Wait for job to complete
|
|
119
|
+
while True:
|
|
120
|
+
status = self._request(
|
|
121
|
+
"GET",
|
|
122
|
+
f"/services/search/jobs/{sid}",
|
|
123
|
+
params={"output_mode": "json"},
|
|
124
|
+
)
|
|
125
|
+
state = status["entry"][0]["content"]["dispatchState"]
|
|
126
|
+
if state == "DONE":
|
|
127
|
+
break
|
|
128
|
+
if state == "FAILED":
|
|
129
|
+
raise RuntimeError("Search job failed")
|
|
130
|
+
time.sleep(0.5)
|
|
131
|
+
|
|
132
|
+
# Get results
|
|
133
|
+
results = self._request(
|
|
134
|
+
"GET",
|
|
135
|
+
f"/services/search/jobs/{sid}/results",
|
|
136
|
+
params={"output_mode": "json", "count": max_results},
|
|
137
|
+
)
|
|
138
|
+
yield from results.get("results", [])
|
|
139
|
+
|
|
140
|
+
def _stream_results(self, sid: str, max_results: int) -> Iterator[dict]:
|
|
141
|
+
"""Stream preview results as they become available."""
|
|
142
|
+
seen_count = 0
|
|
143
|
+
while True:
|
|
144
|
+
status = self._request(
|
|
145
|
+
"GET",
|
|
146
|
+
f"/services/search/jobs/{sid}",
|
|
147
|
+
params={"output_mode": "json"},
|
|
148
|
+
)
|
|
149
|
+
state = status["entry"][0]["content"]["dispatchState"]
|
|
150
|
+
|
|
151
|
+
# Get preview results
|
|
152
|
+
preview = self._request(
|
|
153
|
+
"GET",
|
|
154
|
+
f"/services/search/jobs/{sid}/results_preview",
|
|
155
|
+
params={
|
|
156
|
+
"output_mode": "json",
|
|
157
|
+
"count": max_results,
|
|
158
|
+
"offset": seen_count,
|
|
159
|
+
},
|
|
160
|
+
)
|
|
161
|
+
new_results = preview.get("results", [])
|
|
162
|
+
for result in new_results:
|
|
163
|
+
yield result
|
|
164
|
+
seen_count += 1
|
|
165
|
+
if seen_count >= max_results:
|
|
166
|
+
return
|
|
167
|
+
|
|
168
|
+
if state == "DONE":
|
|
169
|
+
break
|
|
170
|
+
if state == "FAILED":
|
|
171
|
+
raise RuntimeError("Search job failed")
|
|
172
|
+
time.sleep(0.3)
|
|
173
|
+
|
|
174
|
+
def list_jobs(self, count: int = 50) -> list[dict]:
|
|
175
|
+
"""List search jobs."""
|
|
176
|
+
result = self._request(
|
|
177
|
+
"GET",
|
|
178
|
+
"/services/search/jobs",
|
|
179
|
+
params={"output_mode": "json", "count": count},
|
|
180
|
+
)
|
|
181
|
+
return result.get("entry", [])
|
|
182
|
+
|
|
183
|
+
def delete_job(self, sid: str) -> None:
|
|
184
|
+
"""Delete a search job."""
|
|
185
|
+
url = f"{self.base_url}/services/search/jobs/{sid}"
|
|
186
|
+
headers = {}
|
|
187
|
+
if self.token:
|
|
188
|
+
headers["Authorization"] = f"Bearer {self.token}"
|
|
189
|
+
elif self.session_key:
|
|
190
|
+
headers["Authorization"] = f"Splunk {self.session_key}"
|
|
191
|
+
|
|
192
|
+
req = urllib.request.Request(url, headers=headers, method="DELETE")
|
|
193
|
+
urllib.request.urlopen(req, context=self.ssl_context)
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
"""Configuration handling for Splank."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import shutil
|
|
5
|
+
import subprocess
|
|
6
|
+
import sys
|
|
7
|
+
import tomllib
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
|
|
10
|
+
from platformdirs import user_config_dir
|
|
11
|
+
|
|
12
|
+
from splank.client import SplunkClient
|
|
13
|
+
|
|
14
|
+
CONFIG_DIR = Path(user_config_dir("splank"))
|
|
15
|
+
CREDENTIALS_FILE = CONFIG_DIR / "credentials.toml"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def load_credentials() -> dict:
|
|
19
|
+
"""Load credentials from $XDG_CONFIG_HOME/splank/credentials.toml."""
|
|
20
|
+
if not CREDENTIALS_FILE.exists():
|
|
21
|
+
return {}
|
|
22
|
+
|
|
23
|
+
with open(CREDENTIALS_FILE, "rb") as f:
|
|
24
|
+
return tomllib.load(f)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def get_profile(profile: str | None = None) -> dict:
|
|
28
|
+
"""Get credentials for a specific profile.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
profile: Profile name (e.g., 'qa', 'prod'). If None, uses 'default' profile.
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
Profile configuration dict with host, port, username, password, token, verify_ssl.
|
|
35
|
+
"""
|
|
36
|
+
creds = load_credentials()
|
|
37
|
+
|
|
38
|
+
if not creds:
|
|
39
|
+
print(
|
|
40
|
+
f"Credentials not configured in {CREDENTIALS_FILE}",
|
|
41
|
+
file=sys.stderr,
|
|
42
|
+
)
|
|
43
|
+
print("\nRun 'splank init' to set up credentials.", file=sys.stderr)
|
|
44
|
+
sys.exit(1)
|
|
45
|
+
|
|
46
|
+
# Determine which profile to use
|
|
47
|
+
profile_name = profile or creds.get("default_profile", "default")
|
|
48
|
+
|
|
49
|
+
# Get profile from profiles section
|
|
50
|
+
profiles = creds.get("profiles", {})
|
|
51
|
+
if profile_name in profiles:
|
|
52
|
+
return profiles[profile_name]
|
|
53
|
+
|
|
54
|
+
# Fall back to top-level config (for simple single-profile setup)
|
|
55
|
+
if "host" in creds:
|
|
56
|
+
return creds
|
|
57
|
+
|
|
58
|
+
print(f"Profile '{profile_name}' not found in {CREDENTIALS_FILE}", file=sys.stderr)
|
|
59
|
+
print(f"Available profiles: {', '.join(profiles.keys()) or '(none)'}", file=sys.stderr)
|
|
60
|
+
sys.exit(1)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def get_client(profile: str | None = None) -> SplunkClient:
|
|
64
|
+
"""Load credentials and create authenticated client.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
profile: Profile name (e.g., 'qa', 'prod'). If None, uses default profile.
|
|
68
|
+
"""
|
|
69
|
+
creds = get_profile(profile)
|
|
70
|
+
|
|
71
|
+
host = creds.get("host")
|
|
72
|
+
if not host:
|
|
73
|
+
print(f"'host' is required in profile", file=sys.stderr)
|
|
74
|
+
sys.exit(1)
|
|
75
|
+
|
|
76
|
+
client = SplunkClient(
|
|
77
|
+
host=host,
|
|
78
|
+
port=creds.get("port", 8089),
|
|
79
|
+
username=creds.get("username"),
|
|
80
|
+
password=creds.get("password"),
|
|
81
|
+
token=creds.get("token"),
|
|
82
|
+
verify_ssl=creds.get("verify_ssl", True),
|
|
83
|
+
)
|
|
84
|
+
client.login()
|
|
85
|
+
return client
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def init_config() -> None:
|
|
89
|
+
"""Initialize credentials file with example values and open in editor."""
|
|
90
|
+
CONFIG_DIR.mkdir(parents=True, exist_ok=True)
|
|
91
|
+
|
|
92
|
+
example = """\
|
|
93
|
+
# Splank configuration
|
|
94
|
+
# Credentials are stored in TOML format
|
|
95
|
+
|
|
96
|
+
# Default profile to use when --profile is not specified
|
|
97
|
+
default_profile = "prod"
|
|
98
|
+
|
|
99
|
+
[profiles.prod]
|
|
100
|
+
host = "splunk-prod.example.com"
|
|
101
|
+
port = 8089
|
|
102
|
+
# Use either username/password or token authentication
|
|
103
|
+
username = ""
|
|
104
|
+
password = ""
|
|
105
|
+
token = ""
|
|
106
|
+
verify_ssl = true
|
|
107
|
+
|
|
108
|
+
[profiles.qa]
|
|
109
|
+
host = "splunk-qa.example.com"
|
|
110
|
+
port = 8089
|
|
111
|
+
username = ""
|
|
112
|
+
password = ""
|
|
113
|
+
token = ""
|
|
114
|
+
verify_ssl = true
|
|
115
|
+
"""
|
|
116
|
+
CREDENTIALS_FILE.write_text(example)
|
|
117
|
+
CREDENTIALS_FILE.chmod(0o600)
|
|
118
|
+
print(f"Credentials file created: {CREDENTIALS_FILE}")
|
|
119
|
+
|
|
120
|
+
editor = (
|
|
121
|
+
os.environ.get("EDITOR")
|
|
122
|
+
or shutil.which("nano")
|
|
123
|
+
or shutil.which("vim")
|
|
124
|
+
or shutil.which("vi")
|
|
125
|
+
)
|
|
126
|
+
if editor:
|
|
127
|
+
subprocess.run([editor, str(CREDENTIALS_FILE)])
|
|
128
|
+
else:
|
|
129
|
+
print("No editor found. Please edit the credentials file manually.")
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: splank
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: CLI tool for querying Splunk logs. Search indexes, discover fields, and manage search jobs.
|
|
5
|
+
License-Expression: MIT
|
|
6
|
+
Project-URL: Homepage, https://github.com/vivainio/splank
|
|
7
|
+
Project-URL: Repository, https://github.com/vivainio/splank
|
|
8
|
+
Project-URL: Issues, https://github.com/vivainio/splank/issues
|
|
9
|
+
Keywords: splunk,cli,logs,search,observability,siem
|
|
10
|
+
Classifier: Development Status :: 4 - Beta
|
|
11
|
+
Classifier: Environment :: Console
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: Intended Audience :: System Administrators
|
|
14
|
+
Classifier: Operating System :: OS Independent
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
19
|
+
Classifier: Topic :: System :: Logging
|
|
20
|
+
Classifier: Topic :: Utilities
|
|
21
|
+
Requires-Python: >=3.11
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
Requires-Dist: platformdirs>=4.0.0
|
|
24
|
+
|
|
25
|
+
# Splank
|
|
26
|
+
|
|
27
|
+
CLI tool for querying Splunk logs.
|
|
28
|
+
|
|
29
|
+
## Setup
|
|
30
|
+
|
|
31
|
+
```bash
|
|
32
|
+
splank init
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
This creates `~/.config/splank/credentials.toml` with your Splunk credentials.
|
|
36
|
+
|
|
37
|
+
### Configuration
|
|
38
|
+
|
|
39
|
+
The credentials file supports multiple profiles:
|
|
40
|
+
|
|
41
|
+
```toml
|
|
42
|
+
default_profile = "prod"
|
|
43
|
+
|
|
44
|
+
[profiles.prod]
|
|
45
|
+
host = "splunk.example.com"
|
|
46
|
+
port = 8089
|
|
47
|
+
token = "your-token-here"
|
|
48
|
+
verify_ssl = true
|
|
49
|
+
|
|
50
|
+
[profiles.qa]
|
|
51
|
+
host = "splunk-qa.example.com"
|
|
52
|
+
port = 8089
|
|
53
|
+
username = "admin"
|
|
54
|
+
password = "changeme"
|
|
55
|
+
verify_ssl = true
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
## Usage
|
|
59
|
+
|
|
60
|
+
```bash
|
|
61
|
+
# Search (uses default profile)
|
|
62
|
+
splank search 'index=main Level=ERROR' -m 10
|
|
63
|
+
|
|
64
|
+
# Search using specific profile
|
|
65
|
+
splank -p qa search 'index=main Level=ERROR'
|
|
66
|
+
|
|
67
|
+
# Discover indexes
|
|
68
|
+
splank discover 'web*'
|
|
69
|
+
|
|
70
|
+
# Discover with field info
|
|
71
|
+
splank discover 'app-*' --fields -o DISCOVERY.md
|
|
72
|
+
|
|
73
|
+
# Manage jobs
|
|
74
|
+
splank jobs
|
|
75
|
+
splank clear
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
## Commands
|
|
79
|
+
|
|
80
|
+
- `init` - Create credentials file
|
|
81
|
+
- `search` - Execute SPL query
|
|
82
|
+
- `discover` - Discover available indexes
|
|
83
|
+
- `jobs` - List search jobs
|
|
84
|
+
- `clear` - Clear my search jobs
|
|
85
|
+
|
|
86
|
+
## Options
|
|
87
|
+
|
|
88
|
+
- `-p, --profile` - Splunk profile to use (e.g., 'qa', 'prod')
|
|
89
|
+
- `-V, --version` - Show version
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
README.md
|
|
2
|
+
pyproject.toml
|
|
3
|
+
splank/__init__.py
|
|
4
|
+
splank/__main__.py
|
|
5
|
+
splank/cli.py
|
|
6
|
+
splank/client.py
|
|
7
|
+
splank/config.py
|
|
8
|
+
splank.egg-info/PKG-INFO
|
|
9
|
+
splank.egg-info/SOURCES.txt
|
|
10
|
+
splank.egg-info/dependency_links.txt
|
|
11
|
+
splank.egg-info/entry_points.txt
|
|
12
|
+
splank.egg-info/requires.txt
|
|
13
|
+
splank.egg-info/top_level.txt
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
platformdirs>=4.0.0
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
splank
|