@pmoses-s1/sentinelone-mcp 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +199 -0
- package/index.js +352 -0
- package/lib/credentials.js +116 -0
- package/lib/s1.js +767 -0
- package/lib/sdl.js +165 -0
- package/lib/uam-ingest.js +438 -0
- package/package.json +44 -0
- package/tools/hyperautomation.js +250 -0
- package/tools/mgmt-console.js +307 -0
- package/tools/powerquery.js +124 -0
- package/tools/sdl-api.js +133 -0
- package/tools/uam-ingest.js +128 -0
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* PowerQuery tools — sentinelone-powerquery skill
|
|
3
|
+
*
|
|
4
|
+
* Tools:
|
|
5
|
+
* powerquery_run Run a PowerQuery via the LRQ API
|
|
6
|
+
* powerquery_schema_discover Discover field schema for a data source via V1 query
|
|
7
|
+
* powerquery_enumerate_sources List all data sources active in SDL (session init)
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
import { lrqRun } from '../lib/s1.js';
|
|
11
|
+
import { v1Query } from '../lib/sdl.js';
|
|
12
|
+
|
|
13
|
+
export const tools = [
|
|
14
|
+
// ─── powerquery_enumerate_sources ─────────────────────────────────────────
|
|
15
|
+
{
|
|
16
|
+
name: 'powerquery_enumerate_sources',
|
|
17
|
+
description: `MANDATORY SESSION INIT: Run the standard data-source enumeration query to discover every dataSource.name, dataSource.vendor, and dataSource.category active in this SDL tenant. Always call this at the start of every session before writing any hunt queries. Results are environment-specific and can change between sessions as integrations are added or removed. Never assume sources from a prior session.`,
|
|
18
|
+
inputSchema: {
|
|
19
|
+
type: 'object',
|
|
20
|
+
properties: {
|
|
21
|
+
hours: {
|
|
22
|
+
type: 'number',
|
|
23
|
+
description: 'Lookback window in hours (default 24). Increase to 168 (7d) if the last 24h had low volume.',
|
|
24
|
+
default: 24,
|
|
25
|
+
},
|
|
26
|
+
},
|
|
27
|
+
required: [],
|
|
28
|
+
},
|
|
29
|
+
async handler({ hours = 24 } = {}) {
|
|
30
|
+
const query = `| group UniqueDataSourceNames = array_agg_distinct(dataSource.name),
|
|
31
|
+
UniqueVendors = array_agg_distinct(dataSource.vendor),
|
|
32
|
+
UniqueCategories = array_agg_distinct(dataSource.category)
|
|
33
|
+
| limit 1000`;
|
|
34
|
+
const result = await lrqRun(query, { hours });
|
|
35
|
+
return JSON.stringify(result, null, 2);
|
|
36
|
+
},
|
|
37
|
+
},
|
|
38
|
+
|
|
39
|
+
// ─── powerquery_run ────────────────────────────────────────────────────────
|
|
40
|
+
{
|
|
41
|
+
name: 'powerquery_run',
|
|
42
|
+
description: `Run a SentinelOne PowerQuery against the Singularity Data Lake using the LRQ API. The LRQ API is async — this tool handles the full launch-poll-cancel lifecycle and returns results. Use for threat hunting, telemetry analysis, dashboard panel validation, and STAR rule testing. Auth: Bearer <jwt> (same token as mgmt API). Time range defaults to last 24 hours if startTime/endTime are omitted.`,
|
|
43
|
+
inputSchema: {
|
|
44
|
+
type: 'object',
|
|
45
|
+
properties: {
|
|
46
|
+
query: {
|
|
47
|
+
type: 'string',
|
|
48
|
+
description: 'The PowerQuery string. Use pipe-separated commands: | filter | group | sort | limit | columns. Three distinct wildcard idioms — use the right one: (1) FIELD PRESENCE / ATTRIBUTE WILDCARD: field=* means "field is present/non-null", e.g. dataSource.name=* | group count=count() by dataSource.name — use this as a query-opener or whenever you need "all events that have this field". (2) ALL-COLUMN TEXT SEARCH: * contains \'value\' or * matches \'regex\' in the initial filter (before the first |) searches ALL indexed fields — use when the user asks to find text anywhere in the event, e.g. dataSource.name=\'MySource\' * contains \'evil.com\'. Dramatically faster than message contains. (3) EMPTY FILTER (all events): start with | and no initial predicate, e.g. | group ct=count() by event.type. Do NOT use bare * alone as the initial filter — that causes HTTP 500 ("Don\'t understand [*]").',
|
|
49
|
+
},
|
|
50
|
+
startTime: {
|
|
51
|
+
type: 'string',
|
|
52
|
+
description: 'ISO-8601 UTC start time, e.g. "2026-04-20T00:00:00Z". If omitted, defaults to (now - hours) ago.',
|
|
53
|
+
},
|
|
54
|
+
endTime: {
|
|
55
|
+
type: 'string',
|
|
56
|
+
description: 'ISO-8601 UTC end time, e.g. "2026-04-21T00:00:00Z". If omitted, defaults to now.',
|
|
57
|
+
},
|
|
58
|
+
hours: {
|
|
59
|
+
type: 'number',
|
|
60
|
+
description: 'Lookback window in hours when startTime/endTime are not specified (default 24).',
|
|
61
|
+
default: 24,
|
|
62
|
+
},
|
|
63
|
+
maxRows: {
|
|
64
|
+
type: 'number',
|
|
65
|
+
description: 'Maximum rows to return (default 1000, max 5000).',
|
|
66
|
+
default: 1000,
|
|
67
|
+
},
|
|
68
|
+
},
|
|
69
|
+
required: ['query'],
|
|
70
|
+
},
|
|
71
|
+
async handler({ query, startTime, endTime, hours = 24, maxRows = 1000 }) {
|
|
72
|
+
const result = await lrqRun(query, { startTime, endTime, hours, maxRows });
|
|
73
|
+
return JSON.stringify(result, null, 2);
|
|
74
|
+
},
|
|
75
|
+
},
|
|
76
|
+
|
|
77
|
+
// ─── powerquery_schema_discover ────────────────────────────────────────────
|
|
78
|
+
{
|
|
79
|
+
name: 'powerquery_schema_discover',
|
|
80
|
+
description: `Discover the field schema for a specific SDL data source by fetching raw event JSON via the V1 query endpoint. PowerQuery's default projection only returns timestamp+message; V1 query returns full event attributes so you can see what field names are actually present. Use this before authoring any hunt query or dashboard panel against a non-OCSF source. The V1 endpoint is deprecated (sunset Feb 2027) but is still the only way to get full event JSON per-source. Auth falls through to console JWT automatically.`,
|
|
81
|
+
inputSchema: {
|
|
82
|
+
type: 'object',
|
|
83
|
+
properties: {
|
|
84
|
+
dataSourceName: {
|
|
85
|
+
type: 'string',
|
|
86
|
+
description: 'Exact dataSource.name value (case-sensitive, as returned by powerquery_enumerate_sources).',
|
|
87
|
+
},
|
|
88
|
+
maxEvents: {
|
|
89
|
+
type: 'number',
|
|
90
|
+
description: 'Number of sample events to retrieve (default 5, max 50).',
|
|
91
|
+
default: 5,
|
|
92
|
+
},
|
|
93
|
+
startTime: {
|
|
94
|
+
type: 'string',
|
|
95
|
+
description: 'Lookback string or ISO date, e.g. "24h", "7d", or "2026-04-20T00:00:00Z" (default "24h").',
|
|
96
|
+
default: '24h',
|
|
97
|
+
},
|
|
98
|
+
},
|
|
99
|
+
required: ['dataSourceName'],
|
|
100
|
+
},
|
|
101
|
+
async handler({ dataSourceName, maxEvents = 5, startTime = '24h' }) {
|
|
102
|
+
const filter = `dataSource.name=='${dataSourceName}'`;
|
|
103
|
+
const result = await v1Query(filter, { maxCount: Math.min(maxEvents, 50), startTime });
|
|
104
|
+
|
|
105
|
+
const matches = result.matches || [];
|
|
106
|
+
if (matches.length === 0) {
|
|
107
|
+
return JSON.stringify({ dataSourceName, message: 'No events found in the specified time range. Try a longer startTime like "7d".', result }, null, 2);
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
// Extract field names from first event
|
|
111
|
+
const firstAttrs = matches[0]?.attributes || {};
|
|
112
|
+
const allFields = new Set();
|
|
113
|
+
matches.forEach(m => Object.keys(m?.attributes || {}).forEach(k => allFields.add(k)));
|
|
114
|
+
|
|
115
|
+
return JSON.stringify({
|
|
116
|
+
dataSourceName,
|
|
117
|
+
sampleEventCount: matches.length,
|
|
118
|
+
confirmedFields: Array.from(allFields).sort(),
|
|
119
|
+
firstEventAttributes: firstAttrs,
|
|
120
|
+
allSampleAttributes: matches.map(m => m.attributes),
|
|
121
|
+
}, null, 2);
|
|
122
|
+
},
|
|
123
|
+
},
|
|
124
|
+
];
|
package/tools/sdl-api.js
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* SDL API tools — sentinelone-sdl-api, sentinelone-sdl-dashboard, sentinelone-sdl-log-parser skills
|
|
3
|
+
*
|
|
4
|
+
* Tools:
|
|
5
|
+
* sdl_list_files List all config files on the SDL tenant
|
|
6
|
+
* sdl_get_file Get file content and version (parsers, dashboards, alerts, lookups)
|
|
7
|
+
* sdl_put_file Deploy or update a config file (with optimistic locking)
|
|
8
|
+
* sdl_delete_file Delete a config file
|
|
9
|
+
* sdl_upload_logs Upload raw log events to SDL (requires Log Write key)
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
import { listFiles, getFile, putFile, deleteFile, uploadLogs } from '../lib/sdl.js';
|
|
13
|
+
|
|
14
|
+
export const tools = [
|
|
15
|
+
// ─── sdl_list_files ───────────────────────────────────────────────────────
|
|
16
|
+
{
|
|
17
|
+
name: 'sdl_list_files',
|
|
18
|
+
description: `List all configuration files stored in the SDL tenant. Returns all paths organized by type: /logParsers/, /dashboards/, /alerts/, /lookups/, /datatables/. Use this to discover what parsers and dashboards are already deployed, or to find a file path before calling sdl_get_file or sdl_put_file.`,
|
|
19
|
+
inputSchema: {
|
|
20
|
+
type: 'object',
|
|
21
|
+
properties: {},
|
|
22
|
+
required: [],
|
|
23
|
+
},
|
|
24
|
+
async handler() {
|
|
25
|
+
const result = await listFiles();
|
|
26
|
+
return JSON.stringify(result, null, 2);
|
|
27
|
+
},
|
|
28
|
+
},
|
|
29
|
+
|
|
30
|
+
// ─── sdl_get_file ─────────────────────────────────────────────────────────
|
|
31
|
+
{
|
|
32
|
+
name: 'sdl_get_file',
|
|
33
|
+
description: `Get the content and current version number of a SDL configuration file. Use before sdl_put_file to read the current version for optimistic locking (pass the returned version as expectedVersion). Supports any file type: parsers (/logParsers/<name>), dashboards (/dashboards/<name>), alerts (/alerts/<name>), lookups (/lookups/<name>), datatables (/datatables/<name>). Always read before overwriting — this prevents concurrent-edit conflicts.`,
|
|
34
|
+
inputSchema: {
|
|
35
|
+
type: 'object',
|
|
36
|
+
properties: {
|
|
37
|
+
path: {
|
|
38
|
+
type: 'string',
|
|
39
|
+
description: 'Full SDL config path, e.g. "/logParsers/FortiGate" or "/dashboards/SOC-Overview". Get the path from sdl_list_files.',
|
|
40
|
+
},
|
|
41
|
+
},
|
|
42
|
+
required: ['path'],
|
|
43
|
+
},
|
|
44
|
+
async handler({ path }) {
|
|
45
|
+
const result = await getFile(path);
|
|
46
|
+
return JSON.stringify(result, null, 2);
|
|
47
|
+
},
|
|
48
|
+
},
|
|
49
|
+
|
|
50
|
+
// ─── sdl_put_file ─────────────────────────────────────────────────────────
|
|
51
|
+
{
|
|
52
|
+
name: 'sdl_put_file',
|
|
53
|
+
description: `Deploy or update a SDL configuration file. Always call sdl_get_file first to obtain the current expectedVersion — this prevents overwriting concurrent edits. If creating a new file, omit expectedVersion. File type conventions: parsers go to /logParsers/<name>, dashboards to /dashboards/<name>, alerts to /alerts/<name>, lookups to /lookups/<name>. Requires Configuration Write key (SDL_CONFIG_WRITE_KEY) or a console JWT that has config write permissions.`,
|
|
54
|
+
inputSchema: {
|
|
55
|
+
type: 'object',
|
|
56
|
+
properties: {
|
|
57
|
+
path: {
|
|
58
|
+
type: 'string',
|
|
59
|
+
description: 'Full SDL config path, e.g. "/logParsers/MyParser" or "/dashboards/SOC-Ops".',
|
|
60
|
+
},
|
|
61
|
+
content: {
|
|
62
|
+
type: 'string',
|
|
63
|
+
description: 'File content as a string. For dashboards: valid dashboard JSON. For parsers: augmented-JSON parser definition. For lookups: CSV or JSON.',
|
|
64
|
+
},
|
|
65
|
+
expectedVersion: {
|
|
66
|
+
type: 'number',
|
|
67
|
+
description: 'Current file version from sdl_get_file. Required for updates to enable optimistic locking. Omit only when creating a new file.',
|
|
68
|
+
},
|
|
69
|
+
},
|
|
70
|
+
required: ['path', 'content'],
|
|
71
|
+
},
|
|
72
|
+
async handler({ path, content, expectedVersion }) {
|
|
73
|
+
const result = await putFile(path, content, expectedVersion);
|
|
74
|
+
return JSON.stringify(result, null, 2);
|
|
75
|
+
},
|
|
76
|
+
},
|
|
77
|
+
|
|
78
|
+
// ─── sdl_delete_file ──────────────────────────────────────────────────────
|
|
79
|
+
{
|
|
80
|
+
name: 'sdl_delete_file',
|
|
81
|
+
description: `Delete a SDL configuration file (parser, dashboard, alert, lookup, datatable). Use with caution — deletion is permanent. Always read the file with sdl_get_file first to confirm you have the right path and version.`,
|
|
82
|
+
inputSchema: {
|
|
83
|
+
type: 'object',
|
|
84
|
+
properties: {
|
|
85
|
+
path: {
|
|
86
|
+
type: 'string',
|
|
87
|
+
description: 'Full SDL config path to delete.',
|
|
88
|
+
},
|
|
89
|
+
expectedVersion: {
|
|
90
|
+
type: 'number',
|
|
91
|
+
description: 'Current file version for optimistic locking (from sdl_get_file). Strongly recommended.',
|
|
92
|
+
},
|
|
93
|
+
},
|
|
94
|
+
required: ['path'],
|
|
95
|
+
},
|
|
96
|
+
async handler({ path, expectedVersion }) {
|
|
97
|
+
const result = await deleteFile(path, expectedVersion);
|
|
98
|
+
return JSON.stringify(result, null, 2);
|
|
99
|
+
},
|
|
100
|
+
},
|
|
101
|
+
|
|
102
|
+
// ─── sdl_upload_logs ──────────────────────────────────────────────────────
|
|
103
|
+
{
|
|
104
|
+
name: 'sdl_upload_logs',
|
|
105
|
+
description: `Upload raw log events to SDL via the uploadLogs endpoint (plain text, newline-separated). Used for ingesting custom telemetry, testing parsers, and one-off log imports. Requires an SDL Log Write Access key (SDL_LOG_WRITE_KEY) — the console JWT is NOT accepted for this endpoint. Max 6 MB per request, 10 GB per day. Pair with a parser at logfile= to apply field extraction.`,
|
|
106
|
+
inputSchema: {
|
|
107
|
+
type: 'object',
|
|
108
|
+
properties: {
|
|
109
|
+
logContent: {
|
|
110
|
+
type: 'string',
|
|
111
|
+
description: 'Raw log text, newline-separated. Each line becomes a separate SDL event.',
|
|
112
|
+
},
|
|
113
|
+
parser: {
|
|
114
|
+
type: 'string',
|
|
115
|
+
description: 'Parser name to apply to the uploaded events (matches the "parser" header). Omit to use the default parser.',
|
|
116
|
+
},
|
|
117
|
+
logfile: {
|
|
118
|
+
type: 'string',
|
|
119
|
+
description: 'Logical logfile identifier sent as the "logfile" header, e.g. "myapp/access.log". Used by parsers to route events.',
|
|
120
|
+
},
|
|
121
|
+
serverHost: {
|
|
122
|
+
type: 'string',
|
|
123
|
+
description: 'Source host name, sent as the "server-host" header.',
|
|
124
|
+
},
|
|
125
|
+
},
|
|
126
|
+
required: ['logContent'],
|
|
127
|
+
},
|
|
128
|
+
async handler({ logContent, parser, logfile, serverHost }) {
|
|
129
|
+
const result = await uploadLogs(logContent, { parser, logfile, serverHost });
|
|
130
|
+
return JSON.stringify(result, null, 2);
|
|
131
|
+
},
|
|
132
|
+
},
|
|
133
|
+
];
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* UAM Alert Interface tools — push OCSF indicators + alerts INTO UAM
|
|
3
|
+
* via the SentinelOne HEC ingest host (ingest.us1.sentinelone.net).
|
|
4
|
+
*
|
|
5
|
+
* Tools:
|
|
6
|
+
* uam_ingest_alert End-to-end: build + POST one FileSystem indicator + one SecurityAlert
|
|
7
|
+
* uam_post_indicators Low-level: POST raw OCSF indicators to /v1/indicators
|
|
8
|
+
* uam_post_alert Low-level: POST a single raw OCSF SecurityAlert to /v1/alerts
|
|
9
|
+
*
|
|
10
|
+
* These tools require S1_HEC_INGEST_URL in credentials.json in addition to
|
|
11
|
+
* S1_CONSOLE_API_TOKEN (same token, Bearer prefix instead of ApiToken).
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
import { ingestAlert, ingestAlertInline, postIndicators, postAlert } from '../lib/uam-ingest.js';
|
|
15
|
+
|
|
16
|
+
export const tools = [
|
|
17
|
+
|
|
18
|
+
// ─── uam_ingest_alert ─────────────────────────────────────────────────────
|
|
19
|
+
{
|
|
20
|
+
name: 'uam_ingest_alert',
|
|
21
|
+
description: `Create a synthetic test alert in Unified Alert Management (UAM) via the SentinelOne HEC ingest API. Supports two modes controlled by the "inline" parameter:
|
|
22
|
+
|
|
23
|
+
Two-call mode (inline=false, default): POST indicator to /v1/indicators, sleep 3s, POST SecurityAlert to /v1/alerts referencing the indicator uid. The stitcher resolves the full indicator into alert.rawIndicators. Best for testing deep indicator stitching and the Indicators tab in UAM.
|
|
24
|
+
|
|
25
|
+
Inline mode (inline=true): POST a single SecurityAlert to /v1/alerts with the indicator's file/device/actor fields embedded inside finding_info.related_events[]. No separate indicator POST, no sleep — one round-trip. Best for rapid alert creation or when a single call is preferred.
|
|
26
|
+
|
|
27
|
+
Both modes return indicator_uid and alert_uid. The alert surfaces in UAM within 30-60s. Requires S1_HEC_INGEST_URL in credentials.json.`,
|
|
28
|
+
inputSchema: {
|
|
29
|
+
type: 'object',
|
|
30
|
+
properties: {
|
|
31
|
+
scope: {
|
|
32
|
+
type: 'string',
|
|
33
|
+
description: 'Mandatory. accountId or "accountId:siteId" (colon-separated). Find the accountId via s1_api_get /web/api/v2.1/accounts?limit=1.',
|
|
34
|
+
},
|
|
35
|
+
title: {
|
|
36
|
+
type: 'string',
|
|
37
|
+
description: 'Alert name shown in UAM. Default: "MCP Test Alert".',
|
|
38
|
+
default: 'MCP Test Alert',
|
|
39
|
+
},
|
|
40
|
+
description: {
|
|
41
|
+
type: 'string',
|
|
42
|
+
description: 'Alert description body. Default: generic synthetic alert text.',
|
|
43
|
+
},
|
|
44
|
+
hostname: {
|
|
45
|
+
type: 'string',
|
|
46
|
+
description: 'Hostname to use for the synthetic indicator device. Default: "mcp-test-host".',
|
|
47
|
+
default: 'mcp-test-host',
|
|
48
|
+
},
|
|
49
|
+
filename: {
|
|
50
|
+
type: 'string',
|
|
51
|
+
description: 'Filename for the OCSF FileSystem Activity indicator. Default: "test-payload.exe".',
|
|
52
|
+
default: 'test-payload.exe',
|
|
53
|
+
},
|
|
54
|
+
sha256: {
|
|
55
|
+
type: 'string',
|
|
56
|
+
description: 'SHA-256 hash (64 lowercase hex chars). If omitted, a zeroed placeholder hash is used.',
|
|
57
|
+
},
|
|
58
|
+
sleep_ms: {
|
|
59
|
+
type: 'number',
|
|
60
|
+
description: 'Two-call mode only. Milliseconds to sleep between the indicator POST and the alert POST. Default 3000. Do not go below 2000 on loaded tenants.',
|
|
61
|
+
default: 3000,
|
|
62
|
+
},
|
|
63
|
+
inline: {
|
|
64
|
+
type: 'boolean',
|
|
65
|
+
description: 'When true, embed indicator data (file, device, actor, observables) directly inside the alert\'s finding_info.related_events[] and POST only to /v1/alerts — no separate /v1/indicators call, no sleep. When false (default), use the two-call flow: POST indicator first, sleep, then POST alert.',
|
|
66
|
+
default: false,
|
|
67
|
+
},
|
|
68
|
+
},
|
|
69
|
+
required: ['scope'],
|
|
70
|
+
},
|
|
71
|
+
async handler({ scope, title, description, hostname, filename, sha256, sleep_ms = 3000, inline = false }) {
|
|
72
|
+
const result = inline
|
|
73
|
+
? await ingestAlertInline({ scope, title, description, hostname, filename, sha256 })
|
|
74
|
+
: await ingestAlert({ scope, title, description, hostname, filename, sha256, sleepMs: sleep_ms });
|
|
75
|
+
return JSON.stringify(result, null, 2);
|
|
76
|
+
},
|
|
77
|
+
},
|
|
78
|
+
|
|
79
|
+
// ─── uam_post_indicators ──────────────────────────────────────────────────
|
|
80
|
+
{
|
|
81
|
+
name: 'uam_post_indicators',
|
|
82
|
+
description: `POST one or more raw OCSF behavioral indicators to /v1/indicators on the SentinelOne HEC ingest host. Batching is supported — pass multiple indicators in the array and they are sent in a single gzip-compressed request. Each indicator must carry metadata.profiles=["s1/security_indicator"] and a unique metadata.uid (used as the join key when an alert references it). After posting, wait at least 3s before posting a SecurityAlert that references these indicator uids (use uam_post_alert or uam_ingest_alert which enforce the sleep). Requires S1_HEC_INGEST_URL in credentials.json.`,
|
|
83
|
+
inputSchema: {
|
|
84
|
+
type: 'object',
|
|
85
|
+
properties: {
|
|
86
|
+
scope: {
|
|
87
|
+
type: 'string',
|
|
88
|
+
description: 'accountId or "accountId:siteId". Mandatory.',
|
|
89
|
+
},
|
|
90
|
+
indicators: {
|
|
91
|
+
type: 'array',
|
|
92
|
+
description: 'Array of OCSF indicator objects. Each must have metadata.uid, metadata.profiles=["s1/security_indicator"], class_uid, and observables[]. file.hashes must be a Fingerprint array [{algorithm_id, algorithm, value}], not a plain dict.',
|
|
93
|
+
items: { type: 'object', additionalProperties: true },
|
|
94
|
+
},
|
|
95
|
+
},
|
|
96
|
+
required: ['scope', 'indicators'],
|
|
97
|
+
},
|
|
98
|
+
async handler({ scope, indicators }) {
|
|
99
|
+
const result = await postIndicators({ scope, indicators });
|
|
100
|
+
return JSON.stringify(result, null, 2);
|
|
101
|
+
},
|
|
102
|
+
},
|
|
103
|
+
|
|
104
|
+
// ─── uam_post_alert ───────────────────────────────────────────────────────
|
|
105
|
+
{
|
|
106
|
+
name: 'uam_post_alert',
|
|
107
|
+
description: `POST a single raw OCSF SecurityAlert to /v1/alerts on the SentinelOne HEC ingest host. IMPORTANT: one alert per call. The HEC stitcher silently drops all but one alert in a multi-alert POST body (HTTP 202 still returned), so this tool rejects arrays. To send multiple alerts, loop this call. Always post indicator(s) first via uam_post_indicators and sleep at least 3s before calling this — posting an alert before its indicator uids are registered causes a silent drop. Requires S1_HEC_INGEST_URL in credentials.json.`,
|
|
108
|
+
inputSchema: {
|
|
109
|
+
type: 'object',
|
|
110
|
+
properties: {
|
|
111
|
+
scope: {
|
|
112
|
+
type: 'string',
|
|
113
|
+
description: 'accountId or "accountId:siteId". Mandatory.',
|
|
114
|
+
},
|
|
115
|
+
alert: {
|
|
116
|
+
type: 'object',
|
|
117
|
+
description: 'Single OCSF SecurityAlert object (class_uid 2002). Must have metadata.uid, finding_info.related_events[] each referencing a previously-posted indicator via uid. Each related_events entry needs class_uid, type_uid, category_uid, activity_id, severity_id, time, message, and observables[] with type+typeName.',
|
|
118
|
+
additionalProperties: true,
|
|
119
|
+
},
|
|
120
|
+
},
|
|
121
|
+
required: ['scope', 'alert'],
|
|
122
|
+
},
|
|
123
|
+
async handler({ scope, alert }) {
|
|
124
|
+
const result = await postAlert({ scope, alert });
|
|
125
|
+
return JSON.stringify(result, null, 2);
|
|
126
|
+
},
|
|
127
|
+
},
|
|
128
|
+
];
|