@reverse-craft/ai-tools 1.0.0 → 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/README.md +204 -77
  2. package/dist/__tests__/batchProcessing.property.test.d.ts +10 -0
  3. package/dist/__tests__/batchProcessing.property.test.d.ts.map +1 -0
  4. package/dist/__tests__/errorHandling.property.test.d.ts +11 -0
  5. package/dist/__tests__/errorHandling.property.test.d.ts.map +1 -0
  6. package/dist/__tests__/llmConfig.property.test.d.ts +48 -0
  7. package/dist/__tests__/llmConfig.property.test.d.ts.map +1 -0
  8. package/dist/__tests__/mergeResults.property.test.d.ts +12 -0
  9. package/dist/__tests__/mergeResults.property.test.d.ts.map +1 -0
  10. package/dist/__tests__/tokenizer.property.test.d.ts +20 -0
  11. package/dist/__tests__/tokenizer.property.test.d.ts.map +1 -0
  12. package/dist/index.d.ts +8 -3
  13. package/dist/index.d.ts.map +1 -1
  14. package/dist/jsvmpDetector.d.ts +70 -5
  15. package/dist/jsvmpDetector.d.ts.map +1 -1
  16. package/dist/llmConfig.d.ts +36 -1
  17. package/dist/llmConfig.d.ts.map +1 -1
  18. package/dist/server.d.ts +2 -0
  19. package/dist/server.d.ts.map +1 -0
  20. package/dist/server.js +614 -0
  21. package/dist/server.js.map +7 -0
  22. package/dist/tokenizer.d.ts +23 -0
  23. package/dist/tokenizer.d.ts.map +1 -0
  24. package/dist/tools/ToolDefinition.d.ts +24 -0
  25. package/dist/tools/ToolDefinition.d.ts.map +1 -0
  26. package/dist/tools/findJsvmpDispatcher.d.ts +41 -0
  27. package/dist/tools/findJsvmpDispatcher.d.ts.map +1 -0
  28. package/dist/tools/findJsvmpDispatcherTool.d.ts +29 -0
  29. package/dist/tools/findJsvmpDispatcherTool.d.ts.map +1 -0
  30. package/dist/tools/findJsvmpDispatcherTool.test.d.ts +7 -0
  31. package/dist/tools/findJsvmpDispatcherTool.test.d.ts.map +1 -0
  32. package/dist/tools/index.d.ts +20 -0
  33. package/dist/tools/index.d.ts.map +1 -0
  34. package/package.json +19 -11
package/README.md CHANGED
@@ -1,12 +1,14 @@
1
1
  # @reverse-craft/ai-tools
2
2
 
3
- AI-powered code analysis tools for smart-fs. Provides LLM-driven functionality including JSVMP (JavaScript Virtual Machine Protection) dispatcher detection.
3
+ MCP server for AI-powered JSVMP detection. Provides LLM-driven code analysis tools for identifying JavaScript Virtual Machine Protection patterns.
4
4
 
5
5
  ## Features
6
6
 
7
+ - **MCP Server** - Model Context Protocol server for AI assistant integration
7
8
  - **JSVMP Detection** - Detect VM protection patterns using LLM analysis
8
- - **LLM Configuration** - Flexible OpenAI-compatible API configuration
9
- - **Code Formatting** - Format code for LLM analysis with source map coordinates
9
+ - **Multiple LLM Providers** - Support for OpenAI, Anthropic Claude, and Google Gemini
10
+ - **Multiple Pattern Types** - Identifies dispatchers, instruction arrays, stack operations
11
+ - **Confidence Levels** - Results include ultra_high, high, medium, low confidence ratings
10
12
 
11
13
  ## Installation
12
14
 
@@ -16,114 +18,239 @@ npm install @reverse-craft/ai-tools
16
18
 
17
19
  ## Configuration
18
20
 
19
- Set environment variables for LLM access:
21
+ ### LLM Provider Selection
22
+
23
+ Set `LLM_PROVIDER` to choose your AI provider (defaults to `openai`):
24
+
25
+ ```bash
26
+ export LLM_PROVIDER=openai # or anthropic, google
27
+ ```
28
+
29
+ ### Universal Configuration (applies to any provider)
30
+
31
+ ```bash
32
+ export LLM_PROVIDER=openai # Provider selection
33
+ export LLM_MODEL=gpt-4o # Override model for any provider
34
+ export LLM_BASE_URL=https://custom.api.com # Override base URL for any provider
35
+ ```
36
+
37
+ ### OpenAI Configuration
20
38
 
21
39
  ```bash
22
- # Required
40
+ export LLM_PROVIDER=openai
23
41
  export OPENAI_API_KEY=your-api-key
42
+ export OPENAI_MODEL=gpt-4o-mini # Optional, default: gpt-4o-mini
43
+ export OPENAI_BASE_URL=https://api.openai.com/v1 # Optional, for custom endpoints
44
+ ```
45
+
46
+ ### Anthropic Claude Configuration
47
+
48
+ ```bash
49
+ export LLM_PROVIDER=anthropic
50
+ export ANTHROPIC_API_KEY=your-api-key
51
+ export ANTHROPIC_MODEL=claude-sonnet-4-20250514 # Optional, default: claude-sonnet-4-20250514
52
+ export ANTHROPIC_BASE_URL=https://api.anthropic.com # Optional, for custom endpoints
53
+ ```
54
+
55
+ ### Google Gemini Configuration
56
+
57
+ ```bash
58
+ export LLM_PROVIDER=google
59
+ export GOOGLE_API_KEY=your-api-key
60
+ export GOOGLE_MODEL=gemini-2.0-flash # Optional, default: gemini-2.0-flash
61
+ export GOOGLE_BASE_URL=https://generativelanguage.googleapis.com # Optional, for custom endpoints
62
+ ```
63
+
64
+ ### Environment Variables Summary
65
+
66
+ | Variable | Provider | Required | Default | Description |
67
+ |----------|----------|----------|---------|-------------|
68
+ | `LLM_PROVIDER` | All | No | `openai` | LLM provider selection |
69
+ | `LLM_MODEL` | All | No | - | Universal model override (highest priority) |
70
+ | `LLM_BASE_URL` | All | No | - | Universal base URL override (highest priority) |
71
+ | `OPENAI_API_KEY` | OpenAI | Yes* | - | OpenAI API key |
72
+ | `OPENAI_MODEL` | OpenAI | No | `gpt-4o-mini` | Model to use |
73
+ | `OPENAI_BASE_URL` | OpenAI | No | SDK default | Custom API endpoint |
74
+ | `ANTHROPIC_API_KEY` | Anthropic | Yes* | - | Anthropic API key |
75
+ | `ANTHROPIC_MODEL` | Anthropic | No | `claude-sonnet-4-20250514` | Model to use |
76
+ | `ANTHROPIC_BASE_URL` | Anthropic | No | SDK default | Custom API endpoint |
77
+ | `GOOGLE_API_KEY` | Google | Yes* | - | Google API key |
78
+ | `GOOGLE_MODEL` | Google | No | `gemini-2.0-flash` | Model to use |
79
+ | `GOOGLE_BASE_URL` | Google | No | SDK default | Custom API endpoint |
80
+
81
+ *Required only when using the corresponding provider
82
+
83
+ **Priority Order:**
84
+ - Model: `LLM_MODEL` > `{PROVIDER}_MODEL` > default
85
+ - Base URL: `LLM_BASE_URL` > `{PROVIDER}_BASE_URL` > SDK default
86
+
87
+ ## MCP Server Usage
24
88
 
25
- # Optional (defaults shown)
26
- export OPENAI_BASE_URL=https://api.openai.com/v1
27
- export OPENAI_MODEL=gpt-4
89
+ ### Running the Server
90
+
91
+ ```bash
92
+ # Via npx
93
+ npx @reverse-craft/ai-tools
94
+
95
+ # Or if installed globally
96
+ ai-tools-mcp
28
97
  ```
29
98
 
30
- ## Usage
99
+ ### MCP Configuration
31
100
 
32
- ### JSVMP Dispatcher Detection
101
+ Add to your MCP client configuration (e.g., Claude Desktop, Kiro):
33
102
 
34
- ```typescript
35
- import { findJsvmpDispatcher } from '@reverse-craft/ai-tools';
103
+ **Using OpenAI:**
36
104
 
37
- const result = await findJsvmpDispatcher(
38
- './obfuscated.js',
39
- 1, // startLine
40
- 500, // endLine
41
- { charLimit: 300 }
42
- );
105
+ ```json
106
+ {
107
+ "mcpServers": {
108
+ "ai-tools": {
109
+ "command": "npx",
110
+ "args": ["@reverse-craft/ai-tools"],
111
+ "env": {
112
+ "OPENAI_API_KEY": "your-api-key",
113
+ "OPENAI_MODEL": "gpt-4o-mini",
114
+ "OPENAI_BASE_URL": "https://api.openai.com/v1"
115
+ }
116
+ }
117
+ }
118
+ }
119
+ ```
43
120
 
44
- if (result.success) {
45
- console.log(result.formattedOutput);
46
- // Detected regions with confidence levels
47
- for (const region of result.result.regions) {
48
- console.log(`[${region.confidence}] Lines ${region.start}-${region.end}: ${region.type}`);
121
+ **Using Anthropic Claude:**
122
+
123
+ ```json
124
+ {
125
+ "mcpServers": {
126
+ "ai-tools": {
127
+ "command": "npx",
128
+ "args": ["@reverse-craft/ai-tools"],
129
+ "env": {
130
+ "LLM_PROVIDER": "anthropic",
131
+ "ANTHROPIC_API_KEY": "your-api-key",
132
+ "ANTHROPIC_MODEL": "claude-sonnet-4-20250514",
133
+ "ANTHROPIC_BASE_URL": "https://api.anthropic.com"
134
+ }
135
+ }
49
136
  }
50
137
  }
51
138
  ```
52
139
 
53
- ### LLM Configuration
140
+ **Using Google Gemini:**
141
+
142
+ ```json
143
+ {
144
+ "mcpServers": {
145
+ "ai-tools": {
146
+ "command": "npx",
147
+ "args": ["@reverse-craft/ai-tools"],
148
+ "env": {
149
+ "LLM_PROVIDER": "google",
150
+ "GOOGLE_API_KEY": "your-api-key",
151
+ "GOOGLE_MODEL": "gemini-2.0-flash",
152
+ "GOOGLE_BASE_URL": "https://generativelanguage.googleapis.com"
153
+ }
154
+ }
155
+ }
156
+ }
157
+ ```
54
158
 
55
- ```typescript
56
- import { getLLMConfig, isLLMConfigured, createLLMClient } from '@reverse-craft/ai-tools';
159
+ **Using Universal Configuration (works with any provider):**
160
+
161
+ ```json
162
+ {
163
+ "mcpServers": {
164
+ "ai-tools": {
165
+ "command": "npx",
166
+ "args": ["@reverse-craft/ai-tools"],
167
+ "env": {
168
+ "LLM_PROVIDER": "openai",
169
+ "OPENAI_API_KEY": "your-api-key",
170
+ "LLM_MODEL": "gpt-4o",
171
+ "LLM_BASE_URL": "https://your-custom-endpoint.com/v1"
172
+ }
173
+ }
174
+ }
175
+ }
176
+ ```
57
177
 
58
- // Check if LLM is configured
59
- if (isLLMConfigured()) {
60
- const config = getLLMConfig();
61
- const client = createLLMClient(config);
178
+ Or with a local installation:
179
+
180
+ ```json
181
+ {
182
+ "mcpServers": {
183
+ "ai-tools": {
184
+ "command": "node",
185
+ "args": ["/path/to/ai-tools/dist/server.js"],
186
+ "env": {
187
+ "OPENAI_API_KEY": "your-api-key"
188
+ }
189
+ }
190
+ }
62
191
  }
63
192
  ```
64
193
 
65
- ### Code Formatting for Analysis
194
+ ## MCP Tools
66
195
 
67
- ```typescript
68
- import { formatCodeForAnalysis } from '@reverse-craft/ai-tools';
196
+ ### find_jsvmp_dispatcher
69
197
 
70
- const formatted = await formatCodeForAnalysis(
71
- './app.min.js',
72
- 1, // startLine
73
- 100, // endLine
74
- 300 // charLimit
75
- );
198
+ Detect JSVMP (JavaScript Virtual Machine Protection) patterns in code using LLM analysis.
76
199
 
77
- console.log(formatted.content);
78
- // Output: "LineNo SourceLoc Code" format
79
- ```
200
+ **Parameters:**
80
201
 
81
- ## API
202
+ | Name | Type | Required | Description |
203
+ |------|------|----------|-------------|
204
+ | filePath | string | Yes | Path to the JavaScript file to analyze |
205
+ | startLine | number | Yes | Start line number (1-based) |
206
+ | endLine | number | Yes | End line number (1-based) |
207
+ | charLimit | number | No | Character limit for string truncation (default: 300) |
82
208
 
83
- ### Types
209
+ **Detection Types:**
84
210
 
85
- ```typescript
86
- // Detection types
87
- type DetectionType =
88
- | "If-Else Dispatcher"
89
- | "Switch Dispatcher"
90
- | "Instruction Array"
91
- | "Stack Operation";
211
+ - **If-Else Dispatcher** - Nested if-else chains for instruction dispatch
212
+ - **Switch Dispatcher** - Large switch statements (>20 cases) for opcode handling
213
+ - **Instruction Array** - Arrays storing bytecode instructions
214
+ - **Stack Operation** - Virtual stack push/pop patterns
92
215
 
93
- type ConfidenceLevel = "ultra_high" | "high" | "medium" | "low";
216
+ **Confidence Levels:**
94
217
 
95
- interface DetectionRegion {
96
- start: number;
97
- end: number;
98
- type: DetectionType;
99
- confidence: ConfidenceLevel;
100
- description: string;
101
- }
218
+ - `ultra_high` - Multiple JSVMP features present (loop + dispatcher + stack)
219
+ - `high` - Clear dispatcher structure (>20 cases or >10 nesting levels)
220
+ - `medium` - Partial JSVMP features
221
+ - `low` - Possible but uncertain patterns
222
+
223
+ **Example Response:**
102
224
 
103
- interface JsvmpDetectionResult {
104
- success: boolean;
105
- filePath: string;
106
- startLine: number;
107
- endLine: number;
108
- result?: DetectionResult;
109
- formattedOutput?: string;
110
- error?: string;
111
- }
112
225
  ```
226
+ === JSVMP Dispatcher Detection Result ===
227
+ File: ./obfuscated.js (1-500)
228
+
229
+ Summary: 检测到典型的 JSVMP 保护结构,包含主分发器和栈操作
230
+
231
+ Detected Regions:
232
+ [ultra_high] Lines 45-280: Switch Dispatcher
233
+ 大型 switch 语句包含 47 个 case,典型的 JSVMP 指令分发器
234
+
235
+ [high] Lines 12-44: Stack Operation
236
+ 虚拟栈初始化和操作,使用数组索引进行 push/pop
237
+ ```
238
+
239
+ ## What is JSVMP?
240
+
241
+ JSVMP (JavaScript Virtual Machine Protection) is a code protection technique that:
113
242
 
114
- ### Functions
243
+ 1. Converts JavaScript source code to custom bytecode
244
+ 2. Implements a virtual machine to execute the bytecode
245
+ 3. Uses dispatchers (switch/if-else) to handle different opcodes
246
+ 4. Maintains a virtual stack for operand storage
115
247
 
116
- - `findJsvmpDispatcher(filePath, startLine, endLine, options?)` - Detect JSVMP patterns
117
- - `formatCodeForAnalysis(filePath, startLine, endLine, charLimit?)` - Format code for LLM
118
- - `parseDetectionResult(jsonString)` - Parse LLM response
119
- - `getLLMConfig()` - Get LLM configuration from environment
120
- - `isLLMConfigured()` - Check if LLM is configured
121
- - `createLLMClient(config)` - Create LLM client instance
248
+ This makes reverse engineering significantly harder as the original logic is hidden behind VM interpretation.
122
249
 
123
250
  ## Related Packages
124
251
 
125
- - **[@reverse-craft/smart-fs](https://github.com/reverse-craft/smart-fs)** - Core library
126
- - **[@reverse-craft/smart-fs-mcp](https://github.com/reverse-craft/smart-fs-mcp)** - MCP server
252
+ - **[@reverse-craft/smart-fs](https://github.com/reverse-craft/smart-fs)** - Core library for code processing
253
+ - **[@reverse-craft/smart-fs-mcp](https://github.com/reverse-craft/smart-fs-mcp)** - MCP server for smart-fs
127
254
 
128
255
  ## License
129
256
 
@@ -0,0 +1,10 @@
1
+ /**
2
+ * Property-based tests for batch processing logic
3
+ *
4
+ * Tests the createBatches function and line number preservation.
5
+ *
6
+ * **Property 4: Original line numbers preserved in batches**
7
+ * **Validates: Requirements 4.4**
8
+ */
9
+ export {};
10
+ //# sourceMappingURL=batchProcessing.property.test.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"batchProcessing.property.test.d.ts","sourceRoot":"","sources":["../../src/__tests__/batchProcessing.property.test.ts"],"names":[],"mappings":"AAAA;;;;;;;GAOG"}
@@ -0,0 +1,11 @@
1
+ /**
2
+ * Property-based tests for error handling logic
3
+ *
4
+ * Tests the processBatchesWithErrorHandling function.
5
+ *
6
+ * **Property 8: Partial failure continues processing**
7
+ * **Property 9: Total failure reports all errors**
8
+ * **Validates: Requirements 6.1, 6.2, 6.3**
9
+ */
10
+ export {};
11
+ //# sourceMappingURL=errorHandling.property.test.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"errorHandling.property.test.d.ts","sourceRoot":"","sources":["../../src/__tests__/errorHandling.property.test.ts"],"names":[],"mappings":"AAAA;;;;;;;;GAQG"}
@@ -0,0 +1,48 @@
1
+ /**
2
+ * Property-Based Tests for LLM Configuration Module
3
+ *
4
+ * **Feature: multi-provider-llm**
5
+ *
6
+ * **Property 1: Valid Provider Selection**
7
+ * *For any* valid provider value ('openai', 'anthropic', 'google'), when `LLM_PROVIDER` is set
8
+ * to that value and the corresponding API key is present, `getLLMConfig()` SHALL return a config
9
+ * with that provider.
10
+ * **Validates: Requirements 1.1, 1.2, 1.3**
11
+ *
12
+ * **Property 2: Invalid Provider Returns Null**
13
+ * *For any* string that is not a valid provider value ('openai', 'anthropic', 'google'),
14
+ * when `LLM_PROVIDER` is set to that string, `getLLMConfig()` SHALL return null.
15
+ * **Validates: Requirements 1.5**
16
+ *
17
+ * **Property 3: Correct API Key Selection**
18
+ * *For any* provider, `getLLMConfig()` SHALL read the API key from the provider-specific
19
+ * environment variable.
20
+ * **Validates: Requirements 2.1, 2.2, 2.3**
21
+ *
22
+ * **Property 4: Missing API Key Returns Null**
23
+ * *For any* provider, if the corresponding API key environment variable is not set,
24
+ * `getLLMConfig()` SHALL return null.
25
+ * **Validates: Requirements 2.4**
26
+ *
27
+ * **Property 5: Custom Model Selection**
28
+ * *For any* provider, when the provider-specific model environment variable is set,
29
+ * `getLLMConfig()` SHALL return a config with that model value.
30
+ * **Validates: Requirements 3.1, 3.3, 3.5**
31
+ *
32
+ * **Property 6: Default Model Fallback**
33
+ * *For any* provider, when the provider-specific model environment variable is not set,
34
+ * `getLLMConfig()` SHALL return a config with the default model for that provider.
35
+ * **Validates: Requirements 3.2, 3.4, 3.6**
36
+ *
37
+ * **Property 7: Base URL Configuration**
38
+ * *For any* provider configuration, when the provider-specific BASE_URL is set, `getLLMConfig()` SHALL
39
+ * return a config with that baseUrl value.
40
+ * **Validates: Requirements 4.1, 4.2**
41
+ *
42
+ * **Property 8: Backward Compatibility**
43
+ * *For any* environment where only `OPENAI_API_KEY` is set (without `LLM_PROVIDER`),
44
+ * `getLLMConfig()` SHALL return a config with provider 'openai'.
45
+ * **Validates: Requirements 8.1, 8.2**
46
+ */
47
+ export {};
48
+ //# sourceMappingURL=llmConfig.property.test.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"llmConfig.property.test.d.ts","sourceRoot":"","sources":["../../src/__tests__/llmConfig.property.test.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6CG"}
@@ -0,0 +1,12 @@
1
+ /**
2
+ * Property-based tests for result merging logic
3
+ *
4
+ * Tests the mergeDetectionResults function.
5
+ *
6
+ * **Property 5: Merge preserves all detection regions**
7
+ * **Property 6: Merged regions are sorted by start line**
8
+ * **Property 7: Overlapping regions deduplicated by confidence**
9
+ * **Validates: Requirements 5.1, 5.2, 5.3, 5.4**
10
+ */
11
+ export {};
12
+ //# sourceMappingURL=mergeResults.property.test.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"mergeResults.property.test.d.ts","sourceRoot":"","sources":["../../src/__tests__/mergeResults.property.test.ts"],"names":[],"mappings":"AAAA;;;;;;;;;GASG"}
@@ -0,0 +1,20 @@
1
+ /**
2
+ * Property-Based Tests for Tokenizer Module
3
+ *
4
+ * **Property 1: Token counting consistency**
5
+ * *For any* valid text string, calling `countTokens` multiple times with the same input
6
+ * SHALL return the same token count.
7
+ * **Validates: Requirements 3.1**
8
+ *
9
+ * **Property 2: Batch splitting preserves all content**
10
+ * *For any* array of code lines and any maxTokens limit, splitting into batches and
11
+ * concatenating all batches SHALL produce the original content (no lines lost or duplicated).
12
+ * **Validates: Requirements 4.1, 4.2**
13
+ *
14
+ * **Property 3: Batch splitting respects line boundaries**
15
+ * *For any* batch produced by `splitByTokenLimit`, the batch content SHALL contain
16
+ * only complete lines (no partial lines).
17
+ * **Validates: Requirements 4.2**
18
+ */
19
+ export {};
20
+ //# sourceMappingURL=tokenizer.property.test.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"tokenizer.property.test.d.ts","sourceRoot":"","sources":["../../src/__tests__/tokenizer.property.test.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;GAiBG"}
package/dist/index.d.ts CHANGED
@@ -1,11 +1,16 @@
1
1
  /**
2
- * AI Tools - AI-powered code analysis tools
2
+ * AI Tools - MCP server for AI-powered JSVMP detection
3
3
  *
4
- * This package provides LLM-driven functionality for code analysis,
4
+ * This package provides an MCP server with LLM-driven functionality for code analysis,
5
5
  * including JSVMP (JavaScript Virtual Machine Protection) detection.
6
6
  *
7
+ * Run as MCP server: npx @reverse-craft/ai-tools
8
+ *
7
9
  * @packageDocumentation
8
10
  */
9
- export { type LLMConfig, type LLMClient, getLLMConfig, isLLMConfigured, createLLMClient, } from './llmConfig.js';
11
+ export { type LLMConfig, type LLMClient, type LLMProvider, PROVIDER_DEFAULTS, PROVIDER_ENV_KEYS, getLLMConfig, isLLMConfigured, createLLMClient, } from './llmConfig.js';
10
12
  export { type FormattedCode, type DetectionType, type ConfidenceLevel, type DetectionRegion, type DetectionResult, type JsvmpDetectionOptions, type JsvmpDetectionResult, formatCodeForAnalysis, parseDetectionResult, findJsvmpDispatcher, } from './jsvmpDetector.js';
13
+ export { tools } from './tools/index.js';
14
+ export { findJsvmpDispatcherTool, FindJsvmpDispatcherInputSchema } from './tools/findJsvmpDispatcherTool.js';
15
+ export { ToolDefinition, defineTool } from './tools/ToolDefinition.js';
11
16
  //# sourceMappingURL=index.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;;;GAOG;AAGH,OAAO,EACL,KAAK,SAAS,EACd,KAAK,SAAS,EACd,YAAY,EACZ,eAAe,EACf,eAAe,GAChB,MAAM,gBAAgB,CAAC;AAGxB,OAAO,EACL,KAAK,aAAa,EAClB,KAAK,aAAa,EAClB,KAAK,eAAe,EACpB,KAAK,eAAe,EACpB,KAAK,eAAe,EACpB,KAAK,qBAAqB,EAC1B,KAAK,oBAAoB,EACzB,qBAAqB,EACrB,oBAAoB,EACpB,mBAAmB,GACpB,MAAM,oBAAoB,CAAC"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;GASG;AAGH,OAAO,EACL,KAAK,SAAS,EACd,KAAK,SAAS,EACd,KAAK,WAAW,EAChB,iBAAiB,EACjB,iBAAiB,EACjB,YAAY,EACZ,eAAe,EACf,eAAe,GAChB,MAAM,gBAAgB,CAAC;AAGxB,OAAO,EACL,KAAK,aAAa,EAClB,KAAK,aAAa,EAClB,KAAK,eAAe,EACpB,KAAK,eAAe,EACpB,KAAK,eAAe,EACpB,KAAK,qBAAqB,EAC1B,KAAK,oBAAoB,EACzB,qBAAqB,EACrB,oBAAoB,EACpB,mBAAmB,GACpB,MAAM,oBAAoB,CAAC;AAG5B,OAAO,EAAE,KAAK,EAAE,MAAM,kBAAkB,CAAC;AACzC,OAAO,EAAE,uBAAuB,EAAE,8BAA8B,EAAE,MAAM,oCAAoC,CAAC;AAC7G,OAAO,EAAE,cAAc,EAAE,UAAU,EAAE,MAAM,2BAA2B,CAAC"}
@@ -2,6 +2,7 @@
2
2
  * JSVMP Detector Module
3
3
  * AI-powered detection of JSVMP (JavaScript Virtual Machine Protection) patterns
4
4
  */
5
+ import { LLMClient } from './llmConfig.js';
5
6
  /**
6
7
  * Formatted code result interface
7
8
  */
@@ -41,6 +42,7 @@ export interface DetectionResult {
41
42
  */
42
43
  export interface JsvmpDetectionOptions {
43
44
  charLimit?: number;
45
+ maxTokensPerBatch?: number;
44
46
  }
45
47
  /**
46
48
  * Result from findJsvmpDispatcher function
@@ -48,11 +50,28 @@ export interface JsvmpDetectionOptions {
48
50
  export interface JsvmpDetectionResult {
49
51
  success: boolean;
50
52
  filePath: string;
51
- startLine: number;
52
- endLine: number;
53
+ totalLines: number;
54
+ batchCount: number;
53
55
  result?: DetectionResult;
54
56
  formattedOutput?: string;
55
57
  error?: string;
58
+ partialErrors?: string[];
59
+ }
60
+ /**
61
+ * Batch information for processing
62
+ */
63
+ export interface BatchInfo {
64
+ startLine: number;
65
+ endLine: number;
66
+ content: string;
67
+ tokenCount: number;
68
+ }
69
+ /**
70
+ * Result from formatEntireFile function
71
+ */
72
+ export interface FormattedFileResult {
73
+ lines: string[];
74
+ totalLines: number;
56
75
  }
57
76
  /**
58
77
  * 格式化代码为 LLM 分析格式
@@ -71,6 +90,30 @@ export interface JsvmpDetectionResult {
71
90
  * @returns FormattedCode object with formatted content and metadata
72
91
  */
73
92
  export declare function formatCodeForAnalysis(filePath: string, startLine: number, endLine: number, charLimit?: number): Promise<FormattedCode>;
93
+ /**
94
+ * 格式化整个文件为 LLM 分析格式
95
+ * 格式: "LineNo SourceLoc Code"
96
+ *
97
+ * 处理流程:
98
+ * 1. 调用 ensureBeautified 美化代码
99
+ * 2. 调用 truncateCodeHighPerf 截断长字符串
100
+ * 3. 使用 SourceMapConsumer 获取原始坐标
101
+ * 4. 返回格式化后的行数组(保留原始行号)
102
+ *
103
+ * @param filePath - Path to the JavaScript file
104
+ * @param charLimit - Character limit for string truncation (default 300)
105
+ * @returns FormattedFileResult with formatted lines array and metadata
106
+ */
107
+ export declare function formatEntireFile(filePath: string, charLimit?: number): Promise<FormattedFileResult>;
108
+ /**
109
+ * 创建批次用于分批处理
110
+ * 使用 tokenizer 分割代码,记录每个批次的 startLine/endLine
111
+ *
112
+ * @param formattedLines - 格式化后的代码行数组
113
+ * @param maxTokensPerBatch - 每批次最大 token 数量
114
+ * @returns BatchInfo 数组
115
+ */
116
+ export declare function createBatches(formattedLines: string[], maxTokensPerBatch: number): BatchInfo[];
74
117
  /**
75
118
  * Parse and validate LLM detection result from JSON string
76
119
  *
@@ -85,14 +128,36 @@ export declare function formatCodeForAnalysis(filePath: string, startLine: numbe
85
128
  * @throws Error if JSON is invalid or structure doesn't match expected format
86
129
  */
87
130
  export declare function parseDetectionResult(jsonString: string): DetectionResult;
131
+ /**
132
+ * Merge detection results from multiple batches
133
+ * - Combines all regions from all batches
134
+ * - Combines summaries from all batches
135
+ * - Sorts regions by start line
136
+ * - Deduplicates overlapping regions (keeps higher confidence)
137
+ *
138
+ * @param results - Array of DetectionResult from each batch
139
+ * @returns Merged DetectionResult
140
+ */
141
+ export declare function mergeDetectionResults(results: DetectionResult[]): DetectionResult;
142
+ /**
143
+ * Process batches with error handling
144
+ * - Continues processing if some batches fail
145
+ * - Collects partial results and error information
146
+ *
147
+ * @param client - LLM client
148
+ * @param batches - Array of BatchInfo
149
+ * @returns Object with successful results and error messages
150
+ */
151
+ export declare function processBatchesWithErrorHandling(client: LLMClient, batches: BatchInfo[]): Promise<{
152
+ results: DetectionResult[];
153
+ errors: string[];
154
+ }>;
88
155
  /**
89
156
  * Find JSVMP dispatcher patterns in JavaScript code using LLM analysis
90
157
  *
91
158
  * @param filePath - Path to the JavaScript file to analyze
92
- * @param startLine - Start line number (1-based)
93
- * @param endLine - End line number (1-based)
94
159
  * @param options - Optional configuration
95
160
  * @returns JsvmpDetectionResult with detection results or error
96
161
  */
97
- export declare function findJsvmpDispatcher(filePath: string, startLine: number, endLine: number, options?: JsvmpDetectionOptions): Promise<JsvmpDetectionResult>;
162
+ export declare function findJsvmpDispatcher(filePath: string, options?: JsvmpDetectionOptions): Promise<JsvmpDetectionResult>;
98
163
  //# sourceMappingURL=jsvmpDetector.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"jsvmpDetector.d.ts","sourceRoot":"","sources":["../src/jsvmpDetector.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAOH;;GAEG;AACH,MAAM,WAAW,aAAa;IAC5B,OAAO,EAAE,MAAM,CAAC;IAChB,UAAU,EAAE,MAAM,CAAC;IACnB,SAAS,EAAE,MAAM,CAAC;IAClB,OAAO,EAAE,MAAM,CAAC;CACjB;AAED;;GAEG;AACH,MAAM,MAAM,aAAa,GACrB,oBAAoB,GACpB,mBAAmB,GACnB,mBAAmB,GACnB,iBAAiB,CAAC;AAEtB;;GAEG;AACH,MAAM,MAAM,eAAe,GAAG,YAAY,GAAG,MAAM,GAAG,QAAQ,GAAG,KAAK,CAAC;AAEvE;;GAEG;AACH,MAAM,WAAW,eAAe;IAC9B,KAAK,EAAE,MAAM,CAAC;IACd,GAAG,EAAE,MAAM,CAAC;IACZ,IAAI,EAAE,aAAa,CAAC;IACpB,UAAU,EAAE,eAAe,CAAC;IAC5B,WAAW,EAAE,MAAM,CAAC;CACrB;AAED;;GAEG;AACH,MAAM,WAAW,eAAe;IAC9B,OAAO,EAAE,MAAM,CAAC;IAChB,OAAO,EAAE,eAAe,EAAE,CAAC;CAC5B;AAED;;GAEG;AACH,MAAM,WAAW,qBAAqB;IACpC,SAAS,CAAC,EAAE,MAAM,CAAC;CACpB;AAED;;GAEG;AACH,MAAM,WAAW,oBAAoB;IACnC,OAAO,EAAE,OAAO,CAAC;IACjB,QAAQ,EAAE,MAAM,CAAC;IACjB,SAAS,EAAE,MAAM,CAAC;IAClB,OAAO,EAAE,MAAM,CAAC;IAChB,MAAM,CAAC,EAAE,eAAe,CAAC;IACzB,eAAe,CAAC,EAAE,MAAM,CAAC;IACzB,KAAK,CAAC,EAAE,MAAM,CAAC;CAChB;AAsBD;;;;;;;;;;;;;;;GAeG;AACH,wBAAsB,qBAAqB,CACzC,QAAQ,EAAE,MAAM,EAChB,SAAS,EAAE,MAAM,EACjB,OAAO,EAAE,MAAM,EACf,SAAS,GAAE,MAAY,GACtB,OAAO,CAAC,aAAa,CAAC,CAuDxB;AAoCD;;;;;;;;;;;;GAYG;AACH,wBAAgB,oBAAoB,CAAC,UAAU,EAAE,MAAM,GAAG,eAAe,CAoFxE;AAiCD;;;;;;;;GAQG;AACH,wBAAsB,mBAAmB,CACvC,QAAQ,EAAE,MAAM,EAChB,SAAS,EAAE,MAAM,EACjB,OAAO,EAAE,MAAM,EACf,OAAO,CAAC,EAAE,qBAAqB,GAC9B,OAAO,CAAC,oBAAoB,CAAC,CA+D/B"}
1
+ {"version":3,"file":"jsvmpDetector.d.ts","sourceRoot":"","sources":["../src/jsvmpDetector.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAKH,OAAO,EAAiC,SAAS,EAAE,MAAM,gBAAgB,CAAC;AAG1E;;GAEG;AACH,MAAM,WAAW,aAAa;IAC5B,OAAO,EAAE,MAAM,CAAC;IAChB,UAAU,EAAE,MAAM,CAAC;IACnB,SAAS,EAAE,MAAM,CAAC;IAClB,OAAO,EAAE,MAAM,CAAC;CACjB;AAED;;GAEG;AACH,MAAM,MAAM,aAAa,GACrB,oBAAoB,GACpB,mBAAmB,GACnB,mBAAmB,GACnB,iBAAiB,CAAC;AAEtB;;GAEG;AACH,MAAM,MAAM,eAAe,GAAG,YAAY,GAAG,MAAM,GAAG,QAAQ,GAAG,KAAK,CAAC;AAEvE;;GAEG;AACH,MAAM,WAAW,eAAe;IAC9B,KAAK,EAAE,MAAM,CAAC;IACd,GAAG,EAAE,MAAM,CAAC;IACZ,IAAI,EAAE,aAAa,CAAC;IACpB,UAAU,EAAE,eAAe,CAAC;IAC5B,WAAW,EAAE,MAAM,CAAC;CACrB;AAED;;GAEG;AACH,MAAM,WAAW,eAAe;IAC9B,OAAO,EAAE,MAAM,CAAC;IAChB,OAAO,EAAE,eAAe,EAAE,CAAC;CAC5B;AAED;;GAEG;AACH,MAAM,WAAW,qBAAqB;IACpC,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,iBAAiB,CAAC,EAAE,MAAM,CAAC;CAC5B;AAED;;GAEG;AACH,MAAM,WAAW,oBAAoB;IACnC,OAAO,EAAE,OAAO,CAAC;IACjB,QAAQ,EAAE,MAAM,CAAC;IACjB,UAAU,EAAE,MAAM,CAAC;IACnB,UAAU,EAAE,MAAM,CAAC;IACnB,MAAM,CAAC,EAAE,eAAe,CAAC;IACzB,eAAe,CAAC,EAAE,MAAM,CAAC;IACzB,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,aAAa,CAAC,EAAE,MAAM,EAAE,CAAC;CAC1B;AAED;;GAEG;AACH,MAAM,WAAW,SAAS;IACxB,SAAS,EAAE,MAAM,CAAC;IAClB,OAAO,EAAE,MAAM,CAAC;IAChB,OAAO,EAAE,MAAM,CAAC;IAChB,UAAU,EAAE,MAAM,CAAC;CACpB;AAED;;GAEG;AACH,MAAM,WAAW,mBAAmB;IAClC,KAAK,EAAE,MAAM,EAAE,CAAC;IAChB,UAAU,EAAE,MAAM,CAAC;CACpB;AAsBD;;;;;;;;;;;;;;;GAeG;AACH,wBAAsB,qBAAqB,CACzC,QAAQ,EAAE,MAAM,EAChB,SAAS,EAAE,MAAM,EACjB,OAAO,EAAE,MAAM,EACf,SAAS,GAAE,MAAY,GACtB,OAAO,CAAC,aAAa,CAAC,CAuDxB;AAED;;;;;;;;;;;;;GAaG;AACH,wBAAsB,gBAAgB,CACpC,QAAQ,EAAE,MAAM,EAChB,SAAS,GAAE,MAAY,GACtB,OAAO,CAAC,mBAAmB,CAAC,CAiD9B;AAcD;;;;;;;GAOG;AACH,wBAAgB,aAAa,CAC3B,cAAc,EAAE,MAAM,EAAE,EACxB,iBAAiB,EAAE,MAAM,GACxB,SAAS,EAAE,CAgCb;AAoCD;;;;;;;;;;;;GAYG;AACH,wBAAgB,oBAAoB,CAAC,UAAU,EAAE,MAAM,GAAG,eAAe,CAoFxE;AAiCD;;;;;;;;;GASG;AACH,wBAAgB,qBAAqB,CAAC,OAAO,EAAE,eAAe,EAAE,GAAG,eAAe,CA8DjF;AAiBD;;;;;;;;GAQG;AACH,wBAAsB,+BAA+B,CACnD,MAAM,EAAE,SAAS,EACjB,OAAO,EAAE,SAAS,EAAE,GACnB,OAAO,CAAC;IAAE,OAAO,EAAE,eAAe,EAAE,CAAC;IAAC,MAAM,EAAE,MAAM,EAAE,CAAA;CAAE,CAAC,CAkB3D;AAED;;;;;;GAMG;AACH,wBAAsB,mBAAmB,CACvC,QAAQ,EAAE,MAAM,EAChB,OAAO,CAAC,EAAE,qBAAqB,GAC9B,OAAO,CAAC,oBAAoB,CAAC,CA+E/B"}
@@ -2,11 +2,40 @@
2
2
  * LLM Configuration Module
3
3
  * Handles reading and validating LLM configuration from environment variables
4
4
  */
5
- export interface LLMConfig {
5
+ import type { LanguageModel } from 'ai';
6
+ /**
7
+ * Supported LLM providers
8
+ */
9
+ export type LLMProvider = 'openai' | 'anthropic' | 'google';
10
+ /**
11
+ * Provider-specific default configurations
12
+ */
13
+ export declare const PROVIDER_DEFAULTS: Record<LLMProvider, {
14
+ model: string;
15
+ }>;
16
+ /**
17
+ * Environment variable names for each provider
18
+ */
19
+ export declare const PROVIDER_ENV_KEYS: Record<LLMProvider, {
6
20
  apiKey: string;
21
+ model: string;
7
22
  baseUrl: string;
23
+ }>;
24
+ /**
25
+ * Extended LLM configuration with provider information
26
+ */
27
+ export interface LLMConfig {
28
+ provider: LLMProvider;
29
+ apiKey: string;
8
30
  model: string;
31
+ baseUrl?: string;
9
32
  }
33
+ /**
34
+ * Validates provider string against valid values
35
+ * @param value - The provider string to validate
36
+ * @returns The validated LLMProvider or null if invalid
37
+ */
38
+ export declare function validateProvider(value: string | undefined): LLMProvider | null;
10
39
  /**
11
40
  * 从环境变量读取 LLM 配置
12
41
  * @returns LLMConfig | null (null 表示未配置)
@@ -27,6 +56,12 @@ export interface LLMClient {
27
56
  */
28
57
  analyzeJSVMP(formattedCode: string): Promise<string>;
29
58
  }
59
+ /**
60
+ * Creates a provider-specific model instance using the AI SDK
61
+ * @param config - The LLM configuration
62
+ * @returns A LanguageModel instance for the configured provider
63
+ */
64
+ export declare function createProviderModel(config: LLMConfig): LanguageModel;
30
65
  /**
31
66
  * 创建 LLM 客户端实例
32
67
  */