@securityreviewai/securityreview-kit 0.1.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,205 @@
1
+ import { existsSync } from 'node:fs';
2
+ import { join } from 'node:path';
3
+ import chalk from 'chalk';
4
+ import { input, select } from '@inquirer/prompts';
5
+ import { SENTINEL_START, TARGET_NAMES, TARGETS } from '../utils/constants.js';
6
+ import { readJson, readText } from '../utils/fs-helpers.js';
7
+ import { fetchProjectNames, getStoredCredentials, normalizeApiUrl } from '../utils/srai.js';
8
+
9
+ const ruleGenerators = {
10
+ cursor: () => import('../generators/rules/cursor.js'),
11
+ claude: () => import('../generators/rules/claude.js'),
12
+ vscode: () => import('../generators/rules/vscode.js'),
13
+ windsurf: () => import('../generators/rules/windsurf.js'),
14
+ codex: () => import('../generators/rules/codex.js'),
15
+ gemini: () => import('../generators/rules/gemini.js'),
16
+ antigravity: () => import('../generators/rules/antigravity.js'),
17
+ };
18
+
19
+ function normalizeRuleResults(rawResult) {
20
+ const entries = Array.isArray(rawResult) ? rawResult : [rawResult];
21
+
22
+ return entries.map((entry) => {
23
+ if (typeof entry === 'string') {
24
+ return { filePath: entry, action: 'created', kind: 'rule' };
25
+ }
26
+
27
+ if (entry && typeof entry.filePath === 'string') {
28
+ const allowedKinds = new Set(['rule', 'command', 'agent', 'skill']);
29
+ const kind = allowedKinds.has(entry.kind) ? entry.kind : 'rule';
30
+ return { filePath: entry.filePath, action: entry.action || 'created', kind };
31
+ }
32
+
33
+ throw new Error('Rule generator returned an invalid result.');
34
+ });
35
+ }
36
+
37
+ function hasConfiguredMcpServer(cwd, target) {
38
+ const mcpPath = join(cwd, target.mcpConfigPath);
39
+ if (!existsSync(mcpPath)) return false;
40
+
41
+ if (mcpPath.endsWith('.toml')) {
42
+ const content = readText(mcpPath);
43
+ return content.includes('[mcp_servers.security-review-mcp]');
44
+ }
45
+
46
+ const json = readJson(mcpPath);
47
+ const servers = json?.mcpServers || json?.servers || {};
48
+ return 'security-review-mcp' in servers;
49
+ }
50
+
51
+ function hasInstalledRule(cwd, target) {
52
+ const rulePath = join(cwd, target.rulePath);
53
+ if (!existsSync(rulePath)) return false;
54
+
55
+ if (target.ruleMode === 'append') {
56
+ const content = readText(rulePath);
57
+ return content.includes(SENTINEL_START) || content.includes('SRAI Security Review');
58
+ }
59
+
60
+ return true;
61
+ }
62
+
63
+ function resolveConfiguredTargets(cwd) {
64
+ return TARGET_NAMES.filter((key) => {
65
+ const target = TARGETS[key];
66
+ return hasConfiguredMcpServer(cwd, target) || hasInstalledRule(cwd, target);
67
+ });
68
+ }
69
+
70
+ async function resolveCredentials(options, cwd) {
71
+ let apiUrl = normalizeApiUrl(options.apiUrl || process.env.SECURITY_REVIEW_API_URL || '');
72
+ let apiToken = String(options.apiKey || process.env.SECURITY_REVIEW_API_TOKEN || '').trim();
73
+
74
+ if (!apiUrl || !apiToken) {
75
+ const stored = getStoredCredentials(cwd);
76
+ if (!apiUrl && stored.apiUrl) {
77
+ apiUrl = stored.apiUrl;
78
+ }
79
+ if (!apiToken && stored.apiToken) {
80
+ apiToken = stored.apiToken;
81
+ }
82
+ }
83
+
84
+ if (!apiUrl) {
85
+ apiUrl = await input({
86
+ message: '🔗 SRAI API URL:',
87
+ default: 'app.demo.securityreview.ai',
88
+ validate: (v) => {
89
+ const normalized = normalizeApiUrl(v);
90
+ if (!normalized) return 'Must be a valid URL';
91
+
92
+ try {
93
+ new URL(normalized);
94
+ return true;
95
+ } catch {
96
+ return 'Must be a valid URL';
97
+ }
98
+ },
99
+ });
100
+ apiUrl = normalizeApiUrl(apiUrl);
101
+ } else {
102
+ console.log(chalk.dim(` API URL: ${apiUrl} (from saved config/env/flags)`));
103
+ }
104
+
105
+ if (!apiToken) {
106
+ apiToken = await input({
107
+ message: '🔑 SRAI API Token:',
108
+ validate: (v) => (v.length > 0 ? true : 'Token is required'),
109
+ });
110
+ } else {
111
+ console.log(chalk.dim(` API Token: ${'•'.repeat(8)} (from saved config/env/flags)`));
112
+ }
113
+
114
+ return { apiUrl, apiToken };
115
+ }
116
+
117
+ async function resolveProjectName(options, apiUrl, apiToken) {
118
+ const pinnedProject = (options.projectName || process.env.SECURITY_REVIEW_PROJECT_NAME || '').trim();
119
+ const projectNames = await fetchProjectNames(apiUrl, apiToken);
120
+
121
+ return select({
122
+ message: '🧩 Select SRAI project:',
123
+ choices: projectNames.map((name) => ({
124
+ name,
125
+ value: name,
126
+ })),
127
+ default: projectNames.includes(pinnedProject) ? pinnedProject : undefined,
128
+ pageSize: 12,
129
+ });
130
+ }
131
+
132
+ export async function switchProjectCommand(options = {}) {
133
+ const cwd = process.cwd();
134
+ const targets = resolveConfiguredTargets(cwd);
135
+
136
+ console.log('');
137
+ console.log(chalk.bold.cyan(' ╔══════════════════════════════════════╗'));
138
+ console.log(chalk.bold.cyan(' ║') + chalk.bold(' 🛡️ Security Review Kit — Switch ') + chalk.bold.cyan(' ║'));
139
+ console.log(chalk.bold.cyan(' ╚══════════════════════════════════════╝'));
140
+ console.log('');
141
+
142
+ if (targets.length === 0) {
143
+ console.log(chalk.yellow(' ⚠ No configured targets found in this workspace.'));
144
+ console.log(chalk.dim(' Run `securityreview-kit init` first.'));
145
+ console.log('');
146
+ return;
147
+ }
148
+
149
+ console.log(chalk.dim(` Targets to update: ${targets.map((t) => TARGETS[t].name).join(', ')}`));
150
+ console.log('');
151
+ console.log(chalk.bold.white(' Step 1 of 2: SRAI Credentials'));
152
+ console.log(chalk.dim(' ─────────────────────────────────'));
153
+ const envVars = await resolveCredentials(options, cwd);
154
+ console.log(chalk.green(' ✓ Credentials configured'));
155
+ console.log('');
156
+
157
+ console.log(chalk.bold.white(' Step 2 of 2: SRAI Project Mapping'));
158
+ console.log(chalk.dim(' ─────────────────────────────────'));
159
+ const projectName = await resolveProjectName(options, envVars.apiUrl, envVars.apiToken);
160
+ console.log(chalk.green(` ✓ Project mapped: ${projectName}`));
161
+ console.log('');
162
+
163
+ console.log(chalk.bold.white(' Updating workspace rules...'));
164
+ console.log(chalk.dim(' ─────────────────────────────────'));
165
+
166
+ const results = [];
167
+
168
+ for (const target of targets) {
169
+ const targetInfo = TARGETS[target];
170
+ console.log('');
171
+ console.log(chalk.bold(` ${targetInfo.name}`));
172
+
173
+ try {
174
+ const gen = await ruleGenerators[target]();
175
+ const generatedRules = normalizeRuleResults(gen.generate(cwd, { projectName }));
176
+
177
+ for (const rule of generatedRules) {
178
+ const labelByKind = {
179
+ rule: 'Workspace rule',
180
+ command: 'Workspace command',
181
+ agent: 'Workspace agent',
182
+ skill: 'Workspace skill',
183
+ };
184
+ const label = labelByKind[rule.kind] || 'Workspace rule';
185
+ console.log(chalk.green(` ✓ ${label} → ${rule.filePath} (${rule.action})`));
186
+ results.push({ target, type: rule.kind, status: 'ok', path: rule.filePath, action: rule.action });
187
+ }
188
+ } catch (err) {
189
+ console.log(chalk.red(` ✗ Workspace rule failed: ${err.message}`));
190
+ results.push({ target, type: 'rule', status: 'error', error: err.message });
191
+ }
192
+ }
193
+
194
+ const ok = results.filter((r) => r.status === 'ok').length;
195
+ const errors = results.filter((r) => r.status === 'error').length;
196
+
197
+ console.log('');
198
+ console.log(chalk.dim(' ─────────────────────────────────'));
199
+ if (errors === 0) {
200
+ console.log(chalk.bold.green(` ✅ Done! ${ok} rule update(s) applied successfully.`));
201
+ } else {
202
+ console.log(chalk.bold.yellow(` ⚠ Done with ${errors} error(s). ${ok} rule update(s) applied.`));
203
+ }
204
+ console.log('');
205
+ }
@@ -0,0 +1,27 @@
1
+ import { join } from 'node:path';
2
+ import { readJson, writeJson } from '../../utils/fs-helpers.js';
3
+ import { MCP_SERVER_NAME, MCP_SERVER_PACKAGE } from '../../utils/constants.js';
4
+
5
+ /**
6
+ * Generate Claude Code MCP config at .claude/settings.json
7
+ */
8
+ export function generate(cwd, envVars) {
9
+ const filePath = join(cwd, '.claude', 'settings.json');
10
+ const existing = readJson(filePath) || {};
11
+
12
+ if (!existing.mcpServers) {
13
+ existing.mcpServers = {};
14
+ }
15
+
16
+ existing.mcpServers[MCP_SERVER_NAME] = {
17
+ command: 'npx',
18
+ args: ['-y', `${MCP_SERVER_PACKAGE}@latest`],
19
+ env: {
20
+ SECURITY_REVIEW_API_URL: envVars.apiUrl,
21
+ SECURITY_REVIEW_API_TOKEN: envVars.apiToken,
22
+ },
23
+ };
24
+
25
+ writeJson(filePath, existing);
26
+ return filePath;
27
+ }
@@ -0,0 +1,44 @@
1
+ import { join } from 'node:path';
2
+ import { readText, writeText, ensureDir } from '../../utils/fs-helpers.js';
3
+ import { MCP_SERVER_NAME, MCP_SERVER_PACKAGE } from '../../utils/constants.js';
4
+
5
+ /**
6
+ * Generate Codex MCP config at .codex/config.toml
7
+ * Codex uses TOML format. We use simple string templating since the structure
8
+ * is straightforward and avoids a TOML library dependency.
9
+ */
10
+ export function generate(cwd, envVars) {
11
+ const filePath = join(cwd, '.codex', 'config.toml');
12
+ const existing = readText(filePath);
13
+
14
+ const serverBlock = `
15
+ [mcp_servers.${MCP_SERVER_NAME}]
16
+ command = "npx"
17
+ args = ["-y", "${MCP_SERVER_PACKAGE}@latest"]
18
+
19
+ [mcp_servers.${MCP_SERVER_NAME}.env]
20
+ SECURITY_REVIEW_API_URL = "${envVars.apiUrl}"
21
+ SECURITY_REVIEW_API_TOKEN = "${envVars.apiToken}"
22
+ `.trim();
23
+
24
+ // Check if we already have this server configured
25
+ if (existing.includes(`[mcp_servers.${MCP_SERVER_NAME}]`)) {
26
+ // Replace the existing block — find from the server header to the next
27
+ // section header or end of file
28
+ const regex = new RegExp(
29
+ `\\[mcp_servers\\.${MCP_SERVER_NAME}\\][\\s\\S]*?(?=\\n\\[(?!mcp_servers\\.${MCP_SERVER_NAME})|$)`,
30
+ );
31
+ const updated = existing.replace(regex, serverBlock);
32
+ writeText(filePath, updated);
33
+ } else if (existing) {
34
+ // Append to existing file
35
+ const separator = existing.endsWith('\n') ? '\n' : '\n\n';
36
+ writeText(filePath, existing + separator + serverBlock + '\n');
37
+ } else {
38
+ // New file
39
+ ensureDir(join(cwd, '.codex'));
40
+ writeText(filePath, serverBlock + '\n');
41
+ }
42
+
43
+ return filePath;
44
+ }
@@ -0,0 +1,27 @@
1
+ import { join } from 'node:path';
2
+ import { readJson, writeJson } from '../../utils/fs-helpers.js';
3
+ import { MCP_SERVER_NAME, MCP_SERVER_PACKAGE } from '../../utils/constants.js';
4
+
5
+ /**
6
+ * Generate Cursor MCP config at .cursor/mcp.json
7
+ */
8
+ export function generate(cwd, envVars) {
9
+ const filePath = join(cwd, '.cursor', 'mcp.json');
10
+ const existing = readJson(filePath) || {};
11
+
12
+ if (!existing.mcpServers) {
13
+ existing.mcpServers = {};
14
+ }
15
+
16
+ existing.mcpServers[MCP_SERVER_NAME] = {
17
+ command: 'npx',
18
+ args: ['-y', `${MCP_SERVER_PACKAGE}@latest`],
19
+ env: {
20
+ SECURITY_REVIEW_API_URL: envVars.apiUrl,
21
+ SECURITY_REVIEW_API_TOKEN: envVars.apiToken,
22
+ },
23
+ };
24
+
25
+ writeJson(filePath, existing);
26
+ return filePath;
27
+ }
@@ -0,0 +1,28 @@
1
+ import { join } from 'node:path';
2
+ import { readJson, writeJson } from '../../utils/fs-helpers.js';
3
+ import { MCP_SERVER_NAME, MCP_SERVER_PACKAGE } from '../../utils/constants.js';
4
+
5
+ /**
6
+ * Generate Gemini CLI / Antigravity MCP config at .gemini/settings.json
7
+ * Both Gemini CLI and Antigravity use the same config file path.
8
+ */
9
+ export function generate(cwd, envVars) {
10
+ const filePath = join(cwd, '.gemini', 'settings.json');
11
+ const existing = readJson(filePath) || {};
12
+
13
+ if (!existing.mcpServers) {
14
+ existing.mcpServers = {};
15
+ }
16
+
17
+ existing.mcpServers[MCP_SERVER_NAME] = {
18
+ command: 'npx',
19
+ args: ['-y', `${MCP_SERVER_PACKAGE}@latest`],
20
+ env: {
21
+ SECURITY_REVIEW_API_URL: envVars.apiUrl,
22
+ SECURITY_REVIEW_API_TOKEN: envVars.apiToken,
23
+ },
24
+ };
25
+
26
+ writeJson(filePath, existing);
27
+ return filePath;
28
+ }
@@ -0,0 +1,29 @@
1
+ import { join } from 'node:path';
2
+ import { readJson, writeJson } from '../../utils/fs-helpers.js';
3
+ import { MCP_SERVER_NAME, MCP_SERVER_PACKAGE } from '../../utils/constants.js';
4
+
5
+ /**
6
+ * Generate VS Code Copilot MCP config at .vscode/mcp.json
7
+ * Uses the VS Code input variable pattern for secure credential prompting.
8
+ */
9
+ export function generate(cwd, envVars) {
10
+ const filePath = join(cwd, '.vscode', 'mcp.json');
11
+ const existing = readJson(filePath) || {};
12
+
13
+ if (!existing.servers) {
14
+ existing.servers = {};
15
+ }
16
+
17
+ existing.servers[MCP_SERVER_NAME] = {
18
+ type: 'stdio',
19
+ command: 'npx',
20
+ args: ['-y', `${MCP_SERVER_PACKAGE}@latest`],
21
+ env: {
22
+ SECURITY_REVIEW_API_URL: envVars.apiUrl,
23
+ SECURITY_REVIEW_API_TOKEN: envVars.apiToken,
24
+ },
25
+ };
26
+
27
+ writeJson(filePath, existing);
28
+ return filePath;
29
+ }
@@ -0,0 +1,27 @@
1
+ import { join } from 'node:path';
2
+ import { readJson, writeJson } from '../../utils/fs-helpers.js';
3
+ import { MCP_SERVER_NAME, MCP_SERVER_PACKAGE } from '../../utils/constants.js';
4
+
5
+ /**
6
+ * Generate Windsurf MCP config at .windsurf/mcp_config.json
7
+ */
8
+ export function generate(cwd, envVars) {
9
+ const filePath = join(cwd, '.windsurf', 'mcp_config.json');
10
+ const existing = readJson(filePath) || {};
11
+
12
+ if (!existing.mcpServers) {
13
+ existing.mcpServers = {};
14
+ }
15
+
16
+ existing.mcpServers[MCP_SERVER_NAME] = {
17
+ command: 'npx',
18
+ args: ['-y', `${MCP_SERVER_PACKAGE}@latest`],
19
+ env: {
20
+ SECURITY_REVIEW_API_URL: envVars.apiUrl,
21
+ SECURITY_REVIEW_API_TOKEN: envVars.apiToken,
22
+ },
23
+ };
24
+
25
+ writeJson(filePath, existing);
26
+ return filePath;
27
+ }
@@ -0,0 +1,22 @@
1
+ import { join } from 'node:path';
2
+ import { writeText } from '../../utils/fs-helpers.js';
3
+ import { getRuleContent } from './content.js';
4
+
5
+ /**
6
+ * Generate Antigravity workspace rule at .agent/rules/srai-security-review.md
7
+ * Antigravity uses .agent/rules/ with YAML frontmatter (trigger: always_on).
8
+ */
9
+ export function generate(cwd, options = {}) {
10
+ const filePath = join(cwd, '.agent', 'rules', 'srai-security-review.md');
11
+ const content = getRuleContent(options);
12
+
13
+ const rule = `---
14
+ trigger: always_on
15
+ ---
16
+
17
+ ${content}
18
+ `;
19
+
20
+ writeText(filePath, rule);
21
+ return filePath;
22
+ }
@@ -0,0 +1,13 @@
1
+ import { join } from 'node:path';
2
+ import { upsertSentinelBlock } from '../../utils/fs-helpers.js';
3
+ import { getRuleContent } from './content.js';
4
+
5
+ /**
6
+ * Generate Claude Code workspace rule — appends to CLAUDE.md
7
+ */
8
+ export function generate(cwd, options = {}) {
9
+ const filePath = join(cwd, 'CLAUDE.md');
10
+ const content = getRuleContent(options);
11
+ const action = upsertSentinelBlock(filePath, content);
12
+ return { filePath, action };
13
+ }
@@ -0,0 +1,13 @@
1
+ import { join } from 'node:path';
2
+ import { upsertSentinelBlock } from '../../utils/fs-helpers.js';
3
+ import { getRuleContent } from './content.js';
4
+
5
+ /**
6
+ * Generate Codex workspace rule — appends to AGENTS.md
7
+ */
8
+ export function generate(cwd, options = {}) {
9
+ const filePath = join(cwd, 'AGENTS.md');
10
+ const content = getRuleContent(options);
11
+ const action = upsertSentinelBlock(filePath, content);
12
+ return { filePath, action };
13
+ }
@@ -0,0 +1,72 @@
1
+ import { readFileSync } from 'node:fs';
2
+ import { dirname, join } from 'node:path';
3
+ import { fileURLToPath } from 'node:url';
4
+
5
+ const __dirname = dirname(fileURLToPath(import.meta.url));
6
+
7
+ function sanitizeProjectName(value) {
8
+ return value.replace(/[\r\n`]/g, ' ').trim();
9
+ }
10
+
11
+ /**
12
+ * Reads a markdown template and injects the configured project name placeholder.
13
+ */
14
+ function readTemplate(templateFileName, options = {}) {
15
+ const template = readFileSync(join(__dirname, templateFileName), 'utf-8').trim();
16
+ const rawProjectName = typeof options.projectName === 'string' ? options.projectName : '';
17
+ const projectName = sanitizeProjectName(rawProjectName);
18
+ const resolvedProjectName = projectName || '<SRAI_PROJECT_NAME>';
19
+
20
+ return template
21
+ .replaceAll('{{SRAI_PROJECT_NAME}}', resolvedProjectName)
22
+ .replaceAll('<SRAI_PROJECT_NAME>', resolvedProjectName);
23
+ }
24
+
25
+ /**
26
+ * Returns the shared rule content markdown.
27
+ */
28
+ export function getRuleContent(options = {}) {
29
+ return readTemplate('content.md', options);
30
+ }
31
+
32
+ /**
33
+ * Returns the Cursor profile uploader command markdown.
34
+ */
35
+ export function getProfileCommandContent(options = {}) {
36
+ return readTemplate('srai-profile.md', options);
37
+ }
38
+
39
+ /**
40
+ * Returns the CTM sync workflow content markdown.
41
+ */
42
+ export function getCtmSyncWorkflowContent(options = {}) {
43
+ return readTemplate('ctm_sync.md', options);
44
+ }
45
+
46
+ /**
47
+ * Returns the Cursor CTM sync trigger rule markdown.
48
+ */
49
+ export function getCtmSyncTriggerRuleContent(options = {}) {
50
+ return readTemplate('ctm_sync_rule.md', options);
51
+ }
52
+
53
+ /**
54
+ * Returns the Cursor create-ide-workflow command markdown.
55
+ */
56
+ export function getCreateIdeWorkflowCommandContent(options = {}) {
57
+ return readTemplate('create-ide-workflow.md', options);
58
+ }
59
+
60
+ /**
61
+ * Returns the threat-modelling skill markdown.
62
+ */
63
+ export function getThreatModellingSkillContent(options = {}) {
64
+ return readTemplate('skill.md', options);
65
+ }
66
+
67
+ /**
68
+ * Returns the hooks.json content for Cursor session hooks.
69
+ */
70
+ export function getHooksContent() {
71
+ return readFileSync(join(__dirname, 'hooks.json'), 'utf-8').trim();
72
+ }
@@ -0,0 +1,93 @@
1
+ # Security-First Code Generation with PWNISMS Threat Modeling
2
+
3
+ **THIS RULE IS NON-NEGOTIABLE AND TRIGGERED BEFORE YOU EVEN START THINKING.** You MUST think security-first and perform on-the-go threat modeling using the PWNISMS framework BEFORE and DURING every task whether it's TODO or PLAN or codegen. This is not optional. This is not deferred to a later review. Every line of code you write must be informed by threat awareness.
4
+
5
+ > **Skill Reference:** Read and apply `.cursor/skills/threat-modelling/SKILL.md` for the full PWNISMS framework — the 7 threat categories, code generation rules, and post-generation checklist. You MUST walk through all 7 categories for every code task that seems security relevant.
6
+
7
+ ### When NOT to apply
8
+
9
+ Skip PWNISMS and the MCP flow entirely for tasks that have **no security surface**:
10
+
11
+ - Documentation-only changes (README, comments, changelogs)
12
+ - Code formatting, linting, or style fixes
13
+ - Renaming variables/files with no logic change
14
+ - General Q&A, explanations, or research that produces no code
15
+ - UI copy, typo fixes, or purely cosmetic changes
16
+ - Adding or updating tests that don't touch auth, crypto, or input handling
17
+
18
+ If in doubt, do a quick mental scan: *does this change touch auth, input handling, secrets, network, data storage, or dependencies?* If **no**, skip. If **yes** or **maybe**, apply.
19
+
20
+ **MANDATORY POST THREAT MODELLING** ALWAYS invoke `ctm_sync` immediately after threat modelling is generated or updated. This is required on every threat-model run.
21
+ ---
22
+
23
+ ## MCP Enhancement — `security-review-mcp`
24
+
25
+ Configured SRAI project name: `<SRAI_PROJECT_NAME>`
26
+
27
+ Assess Task Security Relevance
28
+
29
+ Before any MCP call, classify the task:
30
+
31
+ 1. Bug fix or refactor with no security surface change — Enrich your context with the profile exploration below, skip review tools
32
+ 2. Non-security task — Skip MCP entirely
33
+ 3. New feature — Start with full profile exploration, then full MCP flow
34
+
35
+ When the `security-review-mcp` MCP server is available, use it to **enrich and persist** your threat analysis. This is a force multiplier on top of PWNISMS, not a replacement.
36
+
37
+ ### Enriching Your Analysis — Step-by-Step Profile Exploration
38
+
39
+ Rather than fetching the entire profile at once, walk through each reasoning block below **in order**. Each step builds context for the next and directly informs your PWNISMS threat modeling.
40
+
41
+ 1. **Find the project** — Call `find_project_by_name` with `name="<SRAI_PROJECT_NAME>"`. If that project does not exist, call `list_projects`. If no project exists, call `create_project`.
42
+
43
+ 2. **Understand what the project does** — Call `get_project_profile_description`.
44
+ - Use this to frame the threat surface: what data flows exist, what the system's purpose is, and where trust boundaries lie.
45
+
46
+ 3. **Discover technology categories** — Call `list_profile_technology_categories`.
47
+ - This reveals the broad technology areas (e.g., frontend, backend, database, cloud). Use this to scope which PWNISMS categories are most relevant.
48
+
49
+ 4. **Deep-dive each technology category** — For each category returned above, call `get_project_profile_technology_category` with the category name.
50
+ - Understand specific frameworks, libraries, and versions. Flag known-vulnerable dependencies for the **Supply Chain (S)** PWNISMS category.
51
+
52
+ 5. **Review architecture notes** — Call `list_project_profile_architecture_notes`.
53
+ - Identify deployment topology, trust boundaries, and data flow patterns. Feed this into **Workload (W)** and **Network (N)** analysis.
54
+
55
+ 6. **Identify user groups** — Call `list_project_profile_user_groups`.
56
+ - Map user roles and privilege levels. This directly informs **IAM (I)** and **Product (P)** threat categories.
57
+
58
+ 7. **Get language stacks** — Call `list_project_profile_language_stacks`.
59
+ - Understand the language/runtime landscape for language-specific vulnerability patterns.
60
+
61
+ 8. **List security controls** — Call `list_project_profile_security_controls`.
62
+ - Get an overview of existing defenses. For each control that is relevant to the current task, call `get_project_profile_security_control` to get full details.
63
+ - Cross-reference with your PWNISMS findings: are there gaps in **Secrets (S)**, **Monitoring (M)**, or **IAM (I)**?
64
+
65
+ 9. **List compliance requirements** — Call `list_profile_compliance_requirements`.
66
+ - For each requirement relevant to the task, call `get_profile_compliance_requirement` for full details.
67
+ - Map these to security objectives and ensure your implementation meets them.
68
+
69
+ 10. **Check for existing reviews** — Call `list_reviews`. If a completed review exists, pull context:
70
+ - `get_threat_scenarios` — Cross-reference with your PWNISMS findings
71
+ - `get_countermeasures` — Apply recommended mitigations
72
+ - `get_components` — Understand the system architecture
73
+ - `get_data_dictionaries` — Identify sensitive data assets and stepping stones
74
+ - `get_security_objectives` — Understand compliance requirements
75
+ - `get_findings` — Review aggregated security insights
76
+
77
+ 11. **No review exists?** Create one:
78
+ - Upload context via `create_document_from_content`
79
+ - `create_review` → `start_workflow` → poll `get_workflow_status`
80
+
81
+ 12. **After threat modeling (ALWAYS / POST threat modelling)** — Invoke `ctm_sync` agent and upload the required CTM data.
82
+
83
+ ### Tool Reference
84
+
85
+ | Category | Tools |
86
+ |---|---|
87
+ | **Projects** | `list_projects`, `find_project_by_name`, `create_project`, `get_project` |
88
+ | **Profile Exploration** | `get_project_profile_description`, `list_profile_technology_categories`, `get_project_profile_technology_category`, `list_project_profile_architecture_notes`, `list_project_profile_user_groups`, `list_project_profile_language_stacks`, `list_project_profile_security_controls`, `get_project_profile_security_control`, `list_profile_compliance_requirements`, `get_profile_compliance_requirement` |
89
+ | **Documents** | `list_documents`, `create_document_from_content`, `upload_document`, `link_external_document` |
90
+ | **Reviews** | `create_review`, `list_reviews`, `get_review`, `get_review_overview` |
91
+ | **Workflow** | `start_workflow`, `get_workflow_status`, `start_next_workflow_job`, `start_workflow_job`, `retry_workflow_job` |
92
+ | **Analysis** | `get_threat_scenarios`, `get_countermeasures`, `get_components`, `get_data_dictionaries`, `get_security_objectives`, `get_findings`, `get_security_test_cases` |
93
+ | **Integrations** | `fetch_jira_issue`, `fetch_confluence_page`, `search_confluence_pages`, `fetch_and_link_to_srai` |
@@ -0,0 +1,34 @@
1
+ ---
2
+ name: create-ide-workflow
3
+ description: Create an AI IDE workflow in SRAI via security-review-mcp.
4
+ ---
5
+
6
+ # Create IDE Workflow
7
+
8
+ Use `security-review-mcp` to create a workflow by calling:
9
+
10
+ - `create_ai_ide_workflow`
11
+
12
+ Required payload fields:
13
+
14
+ - `project_id`
15
+ - `name`
16
+ - `description`
17
+
18
+ ## Steps
19
+
20
+ 1. Resolve `project_id`.
21
+ - Use configured project name `<SRAI_PROJECT_NAME>` by default.
22
+ - Call `find_project_by_name` with `name="<SRAI_PROJECT_NAME>"`.
23
+ - If not found, call `list_projects` and select the right project.
24
+ 2. Build `name` and `description` from the user request.
25
+ - Keep `name` short and action-oriented.
26
+ - Keep `description` specific about trigger and output.
27
+ 3. Call `create_ai_ide_workflow` with:
28
+ - `project_id`
29
+ - `name`
30
+ - `description`
31
+ 4. Return a concise confirmation including:
32
+ - project id
33
+ - workflow name
34
+ - workflow id from MCP response (if returned)