@lockllm/sdk 1.0.0 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/CHANGELOG.md +146 -5
  2. package/README.md +238 -39
  3. package/dist/client.d.ts +1 -1
  4. package/dist/client.d.ts.map +1 -1
  5. package/dist/errors.d.ts +46 -1
  6. package/dist/errors.d.ts.map +1 -1
  7. package/dist/errors.js +104 -2
  8. package/dist/errors.js.map +1 -1
  9. package/dist/errors.mjs +100 -1
  10. package/dist/index.d.ts +6 -5
  11. package/dist/index.d.ts.map +1 -1
  12. package/dist/index.js +9 -1
  13. package/dist/index.js.map +1 -1
  14. package/dist/index.mjs +3 -2
  15. package/dist/scan.d.ts +20 -5
  16. package/dist/scan.d.ts.map +1 -1
  17. package/dist/scan.js +55 -5
  18. package/dist/scan.js.map +1 -1
  19. package/dist/scan.mjs +55 -5
  20. package/dist/types/common.d.ts +85 -0
  21. package/dist/types/common.d.ts.map +1 -1
  22. package/dist/types/errors.d.ts +33 -0
  23. package/dist/types/errors.d.ts.map +1 -1
  24. package/dist/types/scan.d.ts +102 -3
  25. package/dist/types/scan.d.ts.map +1 -1
  26. package/dist/utils/proxy-headers.d.ts +24 -0
  27. package/dist/utils/proxy-headers.d.ts.map +1 -0
  28. package/dist/utils/proxy-headers.js +175 -0
  29. package/dist/utils/proxy-headers.js.map +1 -0
  30. package/dist/utils/proxy-headers.mjs +170 -0
  31. package/dist/utils.d.ts +24 -0
  32. package/dist/utils.d.ts.map +1 -1
  33. package/dist/utils.js +28 -0
  34. package/dist/utils.js.map +1 -1
  35. package/dist/utils.mjs +27 -0
  36. package/dist/wrappers/anthropic-wrapper.d.ts +10 -1
  37. package/dist/wrappers/anthropic-wrapper.d.ts.map +1 -1
  38. package/dist/wrappers/anthropic-wrapper.js +17 -2
  39. package/dist/wrappers/anthropic-wrapper.js.map +1 -1
  40. package/dist/wrappers/anthropic-wrapper.mjs +17 -2
  41. package/dist/wrappers/generic-wrapper.d.ts +5 -0
  42. package/dist/wrappers/generic-wrapper.d.ts.map +1 -1
  43. package/dist/wrappers/generic-wrapper.js +12 -1
  44. package/dist/wrappers/generic-wrapper.js.map +1 -1
  45. package/dist/wrappers/generic-wrapper.mjs +12 -1
  46. package/dist/wrappers/openai-wrapper.d.ts +10 -1
  47. package/dist/wrappers/openai-wrapper.d.ts.map +1 -1
  48. package/dist/wrappers/openai-wrapper.js +17 -2
  49. package/dist/wrappers/openai-wrapper.js.map +1 -1
  50. package/dist/wrappers/openai-wrapper.mjs +17 -2
  51. package/package.json +2 -18
package/CHANGELOG.md CHANGED
@@ -1,12 +1,153 @@
1
1
  # Changelog
2
2
 
3
- ## [1.1.0] - 2026-01-16
3
+ ## [1.1.0] - 2026-02-18
4
+
5
+ ### Added
6
+
7
+ #### Custom Content Policy Enforcement
8
+ You can now enforce your own content rules on top of LockLLM's built-in security. Create custom policies in the [dashboard](https://www.lockllm.com/policies), and the SDK will automatically check prompts against them. When a policy is violated, you'll get a `PolicyViolationError` with the exact policy name, violated categories, and details.
9
+
10
+ ```typescript
11
+ try {
12
+ await openai.chat.completions.create({ ... });
13
+ } catch (error) {
14
+ if (error instanceof PolicyViolationError) {
15
+ console.log(error.violated_policies);
16
+ // [{ policy_name: "No competitor mentions", violated_categories: [...] }]
17
+ }
18
+ }
19
+ ```
20
+
21
+ #### AI Abuse Detection
22
+ Protect your endpoints from automated misuse. When enabled, LockLLM detects bot-generated content, repetitive prompts, and resource exhaustion attacks. If abuse is detected, you'll get an `AbuseDetectedError` with confidence scores and detailed indicator breakdowns.
23
+
24
+ ```typescript
25
+ const openai = createOpenAI({
26
+ apiKey: process.env.LOCKLLM_API_KEY,
27
+ proxyOptions: {
28
+ abuseAction: 'block' // Opt-in: block abusive requests
29
+ }
30
+ });
31
+ ```
32
+
33
+ #### Credit Balance Awareness
34
+ The SDK now returns a dedicated `InsufficientCreditsError` when your balance is too low for a request. The error includes your `current_balance` and the `estimated_cost`, so you can handle billing gracefully in your application.
35
+
36
+ #### Scan Modes and Actions
37
+ Control exactly what gets checked and what happens when threats are found:
38
+
39
+ - **Scan modes** - Choose `normal` (core security only), `policy_only` (custom policies only), or `combined` (both)
40
+ - **Actions per detection type** - Set `block` or `allow_with_warning` independently for core scans, custom policies, and abuse detection
41
+ - **Abuse detection** is opt-in - disabled by default, enable it with `abuseAction`
42
+
43
+ ```typescript
44
+ const result = await lockllm.scan(
45
+ { input: userPrompt, mode: 'combined', sensitivity: 'high' },
46
+ { scanAction: 'block', policyAction: 'allow_with_warning', abuseAction: 'block' }
47
+ );
48
+ ```
49
+
50
+ #### Proxy Options on All Wrappers
51
+ All wrapper functions (`createOpenAI`, `createAnthropic`, `createGroq`, etc.) now accept a `proxyOptions` parameter so you can configure security behavior at initialization time instead of per-request:
52
+
53
+ ```typescript
54
+ const openai = createOpenAI({
55
+ apiKey: process.env.LOCKLLM_API_KEY,
56
+ proxyOptions: {
57
+ scanMode: 'combined',
58
+ scanAction: 'block',
59
+ policyAction: 'block',
60
+ routeAction: 'auto', // Enable intelligent routing
61
+ cacheResponse: true, // Enable response caching
62
+ cacheTTL: 3600 // Cache for 1 hour
63
+ }
64
+ });
65
+ ```
66
+
67
+ #### Intelligent Routing
68
+ Let LockLLM automatically select the best model for each request based on task type and complexity. Set `routeAction: 'auto'` to enable, or `routeAction: 'custom'` to use your own routing rules from the dashboard.
69
+
70
+ #### Response Caching
71
+ Reduce costs by caching identical LLM responses. Enabled by default in proxy mode - disable it with `cacheResponse: false` or customize the TTL with `cacheTTL`.
72
+
73
+ #### Universal Proxy Mode
74
+ Access 200+ models without configuring individual provider API keys using `getUniversalProxyURL()`. Uses LockLLM credits instead of BYOK.
75
+
76
+ ```typescript
77
+ import { getUniversalProxyURL } from '@lockllm/sdk';
78
+ const url = getUniversalProxyURL();
79
+ // 'https://api.lockllm.com/v1/proxy/chat/completions'
80
+ ```
81
+
82
+ #### Proxy Response Metadata
83
+ New utilities to read detailed metadata from proxy responses - scan results, routing decisions, cache status, and credit usage:
84
+
85
+ ```typescript
86
+ import { parseProxyMetadata } from '@lockllm/sdk';
87
+ const metadata = parseProxyMetadata(response.headers);
88
+ // metadata.safe, metadata.routing, metadata.cache_status, metadata.credits_deducted, etc.
89
+ ```
90
+
91
+ #### Expanded Scan Response
92
+ Scan responses now include richer data when using advanced features:
93
+ - `policy_warnings` - Which custom policies were violated and why
94
+ - `scan_warning` - Injection details when using `allow_with_warning`
95
+ - `abuse_warnings` - Abuse indicators when abuse detection is enabled
96
+ - `routing` - Task type, complexity score, and selected model when routing is enabled
97
+
98
+ ### Changed
99
+ - The scan API is fully backward compatible - existing code works without changes. Internally, scan configuration is now sent via HTTP headers for better compatibility and caching behavior.
100
+
101
+ ### Notes
102
+ - All new features are opt-in. Existing integrations continue to work without any changes.
103
+ - Custom policies, abuse detection, and routing are configured in the [LockLLM dashboard](https://www.lockllm.com/dashboard).
104
+
105
+ ---
106
+
107
+ ## [1.0.1] - 2026-01-16
108
+
109
+ ### Changed
110
+
111
+ #### Flexible SDK Installation
112
+ - **Optional Provider SDKs**: Provider SDKs (OpenAI, Anthropic, Cohere, etc.) are no longer required dependencies. Install only what you need:
113
+ - Using OpenAI? Just install `openai` package
114
+ - Using Anthropic? Just install `@anthropic-ai/sdk` package
115
+ - Using Cohere? Just install `cohere-ai` package
116
+ - Mix and match any providers your application uses
117
+ - **Smaller Bundle Sizes**: Your application only includes the provider SDKs you actually use, reducing package size and installation time
118
+ - **Pay-As-You-Go Dependencies**: No need to install SDKs for providers you don't use
119
+
120
+ ### Benefits
121
+ - Faster installation with fewer dependencies
122
+ - Smaller `node_modules` folder
123
+ - More control over your project dependencies
124
+ - No unused packages taking up disk space
125
+
126
+ ### Migration Guide
127
+ If you're upgrading from v1.0.0 and using provider wrappers, simply install the provider SDKs you need:
128
+
129
+ ```bash
130
+ # For OpenAI (GPT models, DALL-E, etc.)
131
+ npm install openai
132
+
133
+ # For Anthropic (Claude models)
134
+ npm install @anthropic-ai/sdk
135
+
136
+ # For Cohere (Command, Embed models)
137
+ npm install cohere-ai
138
+
139
+ # Install only what you use!
140
+ ```
141
+
142
+ The SDK will work out of the box once you install the provider packages you need.
143
+
144
+ ## [1.0.0] - 2026-01-16
4
145
 
5
146
  ### Added
6
147
 
7
148
  #### Universal Provider Support
8
- - **Generic Wrapper Factory**: Added `createClient()` function to create clients for any LLM provider using their official SDK
9
- - **OpenAI-Compatible Helper**: Added `createOpenAICompatible()` for easy integration with OpenAI-compatible providers
149
+ - **Generic Wrapper Factory**: Added `createClient()` function to create clients for any of the 17 supported providers using their official SDK
150
+ - **OpenAI-Compatible Helper**: Added `createOpenAICompatible()` for easy integration with the OpenAI-compatible providers (Groq, DeepSeek, Mistral, Perplexity, etc.)
10
151
  - **15 New Provider Wrappers**: Pre-configured factory functions for all remaining providers:
11
152
  - `createGroq()` - Groq (fast inference)
12
153
  - `createDeepSeek()` - DeepSeek (reasoning models)
@@ -30,7 +171,7 @@
30
171
  - **Type Export**: Added `ProviderName` type export for better TypeScript support
31
172
 
32
173
  #### Examples
33
- - **`examples/wrapper-generic.ts`**: Comprehensive example showing three ways to integrate with any provider
174
+ - **`examples/wrapper-generic.ts`**: Comprehensive example showing three ways to integrate with any of the 17 supported providers
34
175
  - **`examples/wrapper-all-providers.ts`**: Complete example demonstrating all 17 providers
35
176
 
36
177
  #### Documentation
@@ -76,6 +217,6 @@ const client = new OpenAI({
76
217
  ```
77
218
 
78
219
  ### Notes
79
- - All 15+ providers are now fully supported with multiple integration options
220
+ - All 17+ providers are now fully supported with multiple integration options
80
221
  - Zero breaking changes - existing code continues to work
81
222
  - Backward compatible with v1.0.0
package/README.md CHANGED
@@ -10,7 +10,7 @@
10
10
 
11
11
  **All-in-One AI Security for LLM Applications**
12
12
 
13
- *Keep control of your AI. Detect prompt injection, jailbreaks, and adversarial attacks in real-time across 15+ providers with zero code changes.*
13
+ *Keep control of your AI. Detect prompt injection, jailbreaks, and adversarial attacks in real-time across 17+ providers with zero code changes.*
14
14
 
15
15
  [Quick Start](#quick-start) · [Documentation](https://www.lockllm.com/docs) · [Examples](#examples) · [Benchmarks](https://www.lockllm.com) · [API Reference](#api-reference)
16
16
 
@@ -26,9 +26,9 @@ LockLLM is a state-of-the-art AI security ecosystem that detects prompt injectio
26
26
 
27
27
  - **Real-Time Security Scanning** - Analyze every LLM request before execution with minimal latency (<250ms)
28
28
  - **Advanced ML Detection** - Models trained on real-world attack patterns for prompt injection and jailbreaks
29
- - **15+ Provider Support** - Universal coverage across OpenAI, Anthropic, Azure, Bedrock, Gemini, and more
29
+ - **17+ Provider Support** - Universal coverage across OpenAI, Anthropic, Azure, Bedrock, Gemini, and more
30
30
  - **Drop-in Integration** - Replace existing SDKs with zero code changes - just change one line
31
- - **Completely Free** - BYOK (Bring Your Own Key) model with unlimited usage and no rate limits
31
+ - **Free Unlimited Scanning** - BYOK (Bring Your Own Key) model with free unlimited scanning
32
32
  - **Privacy by Default** - Your data is never stored, only scanned in-memory and discarded
33
33
 
34
34
  ## Why LockLLM
@@ -73,27 +73,53 @@ LockLLM provides production-ready AI security that integrates seamlessly into yo
73
73
  | **Evasion & Obfuscation Detection** | Catch sophisticated obfuscation including Unicode abuse, zero-width characters, and encoding-based attacks |
74
74
  | **Multi-Layer Context Analysis** | Analyze prompts across multiple context windows to detect attacks spanning conversation turns |
75
75
  | **Token-Level Threat Scoring** | Granular threat assessment identifying which specific parts of input contain malicious patterns |
76
- | **15+ Provider Support** | OpenAI, Anthropic, Gemini, Azure, Bedrock, Groq, DeepSeek, and more |
76
+ | **17+ Provider Support** | OpenAI, Anthropic, Gemini, Azure, Bedrock, Groq, DeepSeek, and more |
77
77
  | **Drop-in Integration** | Replace `new OpenAI()` with `createOpenAI()` - no other changes needed |
78
78
  | **TypeScript Native** | Full type safety with comprehensive type definitions and IDE support |
79
79
  | **Streaming Compatible** | Works seamlessly with streaming responses from any provider |
80
80
  | **Configurable Sensitivity** | Adjust detection thresholds (low/medium/high) per use case |
81
- | **Custom Endpoints** | Support for self-hosted models, Azure resources, and private clouds |
81
+ | **Custom Endpoints** | Configure custom URLs for any provider (self-hosted, Azure, private clouds) |
82
+ | **Custom Content Policies** | Define your own content rules in the dashboard and enforce them automatically across all providers |
83
+ | **AI Abuse Detection** | Detect bot-generated content, repetition attacks, and resource exhaustion from your end-users |
84
+ | **Intelligent Routing** | Automatically select the optimal model for each request based on task type and complexity to save costs |
85
+ | **Response Caching** | Cache identical LLM responses to reduce costs and latency on repeated queries |
82
86
  | **Enterprise Privacy** | Provider keys encrypted at rest, prompts never stored |
83
87
  | **Production Ready** | Battle-tested with automatic retries, timeouts, and error handling |
84
88
 
85
89
  ## Installation
86
90
 
91
+ Choose your preferred package manager:
87
92
  ```bash
88
- # Install the SDK
93
+ # npm
89
94
  npm install @lockllm/sdk
90
95
 
91
- # For wrapper functions, install relevant peer dependencies
92
- npm install openai # For OpenAI, Groq, DeepSeek, Mistral, etc.
93
- npm install @anthropic-ai/sdk # For Anthropic Claude
94
- npm install cohere-ai # For Cohere (optional)
96
+ # pnpm (faster, saves disk space)
97
+ pnpm add @lockllm/sdk
98
+
99
+ # yarn
100
+ yarn add @lockllm/sdk
101
+ ```
102
+
103
+ ### Peer Dependencies
104
+
105
+ For wrapper functions, install the relevant provider SDKs:
106
+
107
+ ```bash
108
+ # npm
109
+ npm install openai @anthropic-ai/sdk cohere-ai
110
+
111
+ # pnpm
112
+ pnpm add openai @anthropic-ai/sdk cohere-ai
113
+
114
+ # yarn
115
+ yarn add openai @anthropic-ai/sdk cohere-ai
95
116
  ```
96
117
 
118
+ **Provider breakdown:**
119
+ - `openai` - For OpenAI, Groq, DeepSeek, Mistral, etc.
120
+ - `@anthropic-ai/sdk` - For Anthropic Claude
121
+ - `cohere-ai` - For Cohere (optional)
122
+
97
123
  **Note:** Peer dependencies are optional and only required if you use the wrapper functions for those providers.
98
124
 
99
125
  ## Quick Start
@@ -222,7 +248,7 @@ Compare detection accuracy and performance metrics at [lockllm.com/benchmarks](h
222
248
  | **Real-Time Protection** | ✅ <250ms latency | ✅ Built-in | ✅ Yes | ❌ Too slow |
223
249
  | **Setup Time** | 5 minutes | Included | Days to weeks | N/A |
224
250
  | **Maintenance** | None | None | Constant updates | Constant |
225
- | **Multi-Provider Support** | ✅ 15+ providers | Single provider | Custom per provider | N/A |
251
+ | **Multi-Provider Support** | ✅ 17+ providers | Single provider | Custom per provider | N/A |
226
252
  | **False Positives** | Low (~2-5%) | N/A | High (15-30%) | N/A |
227
253
  | **Cost** | Free (BYOK) | Free | Dev time + infrastructure | $$$ |
228
254
  | **Attack Coverage** | Comprehensive | Content policy only | Pattern-based only | Manual |
@@ -378,6 +404,9 @@ const highResult = await lockllm.scan({
378
404
  import {
379
405
  LockLLMError,
380
406
  PromptInjectionError,
407
+ PolicyViolationError,
408
+ AbuseDetectedError,
409
+ InsufficientCreditsError,
381
410
  AuthenticationError,
382
411
  RateLimitError,
383
412
  UpstreamError
@@ -395,13 +424,19 @@ try {
395
424
  console.log("Injection confidence:", error.scanResult.injection);
396
425
  console.log("Request ID:", error.requestId);
397
426
 
398
- // Log to security monitoring system
399
- await logSecurityIncident({
400
- type: 'prompt_injection',
401
- confidence: error.scanResult.injection,
402
- requestId: error.requestId,
403
- timestamp: new Date()
404
- });
427
+ } else if (error instanceof PolicyViolationError) {
428
+ // Custom policy violation detected
429
+ console.log("Policy violation:", error.violated_policies);
430
+
431
+ } else if (error instanceof AbuseDetectedError) {
432
+ // AI abuse detected (bot content, repetition, etc.)
433
+ console.log("Abuse detected:", error.abuse_details.abuse_types);
434
+ console.log("Confidence:", error.abuse_details.confidence);
435
+
436
+ } else if (error instanceof InsufficientCreditsError) {
437
+ // Not enough credits
438
+ console.log("Balance:", error.current_balance);
439
+ console.log("Cost:", error.estimated_cost);
405
440
 
406
441
  } else if (error instanceof AuthenticationError) {
407
442
  console.log("Invalid LockLLM API key");
@@ -422,7 +457,7 @@ try {
422
457
 
423
458
  ## Supported Providers
424
459
 
425
- LockLLM supports 17 AI providers with three flexible integration methods:
460
+ LockLLM supports 17+ AI providers with three flexible integration methods:
426
461
 
427
462
  ### Provider List
428
463
 
@@ -449,13 +484,16 @@ LockLLM supports 17 AI providers with three flexible integration methods:
449
484
  ### Custom Endpoints
450
485
 
451
486
  All providers support custom endpoint URLs for:
452
- - Self-hosted LLM deployments
453
- - Alternative API gateways
487
+ - Self-hosted LLM deployments (OpenAI-compatible APIs)
488
+ - Alternative API gateways and reverse proxies
454
489
  - Custom Azure OpenAI resources
455
- - Private cloud deployments
490
+ - Private cloud or air-gapped deployments
456
491
  - Development and staging environments
457
492
 
458
- Configure custom endpoints in the [LockLLM dashboard](https://www.lockllm.com/dashboard) when adding provider API keys.
493
+ **How it works:**
494
+ Configure custom endpoints in the [LockLLM dashboard](https://www.lockllm.com/dashboard) when adding any provider API key. The SDK wrappers automatically use your custom endpoint instead of the default.
495
+
496
+ **Example:** Use the OpenAI wrapper with your self-hosted Llama model by configuring a custom endpoint URL.
459
497
 
460
498
  ## How It Works
461
499
 
@@ -476,7 +514,7 @@ LockLLM uses a secure BYOK (Bring Your Own Key) model - you maintain control of
476
514
 
477
515
  - Use this single key in your SDK configuration
478
516
  - Authenticates requests to the LockLLM security gateway
479
- - Works across all 15+ providers with one key
517
+ - Works across all 17+ providers with one key
480
518
  - **This is the only key that goes in your code**
481
519
 
482
520
  ### Request Flow
@@ -562,7 +600,7 @@ interface LockLLMConfig {
562
600
  Scan a prompt for security threats before sending to an LLM.
563
601
 
564
602
  ```typescript
565
- await lockllm.scan(request: ScanRequest): Promise<ScanResponse>
603
+ await lockllm.scan(request: ScanRequest, options?: ScanOptions): Promise<ScanResponse>
566
604
  ```
567
605
 
568
606
  **Request Parameters:**
@@ -571,6 +609,14 @@ await lockllm.scan(request: ScanRequest): Promise<ScanResponse>
571
609
  interface ScanRequest {
572
610
  input: string; // Required: Text to scan
573
611
  sensitivity?: 'low' | 'medium' | 'high'; // Optional: Detection level (default: 'medium')
612
+ mode?: 'normal' | 'policy_only' | 'combined'; // Optional: Scan mode (default: 'combined')
613
+ chunk?: boolean; // Optional: Force chunking for long texts
614
+ }
615
+
616
+ interface ScanOptions {
617
+ scanAction?: 'block' | 'allow_with_warning'; // Core injection behavior
618
+ policyAction?: 'block' | 'allow_with_warning'; // Custom policy behavior
619
+ abuseAction?: 'block' | 'allow_with_warning'; // Abuse detection (opt-in)
574
620
  }
575
621
  ```
576
622
 
@@ -580,8 +626,9 @@ interface ScanRequest {
580
626
  interface ScanResponse {
581
627
  safe: boolean; // Whether input is safe (true) or malicious (false)
582
628
  label: 0 | 1; // Classification: 0=safe, 1=malicious
583
- confidence: number; // Confidence score (0-1)
584
- injection: number; // Injection risk score (0-1, higher=more risky)
629
+ confidence?: number; // Core injection confidence score (0-1)
630
+ injection?: number; // Injection risk score (0-1, higher=more risky)
631
+ policy_confidence?: number; // Policy check confidence (in combined/policy_only mode)
585
632
  sensitivity: Sensitivity; // Sensitivity level used for scan
586
633
  request_id: string; // Unique request identifier
587
634
 
@@ -590,11 +637,20 @@ interface ScanResponse {
590
637
  input_chars: number; // Number of characters processed
591
638
  };
592
639
 
593
- debug?: { // Only available with Pro plan
640
+ debug?: {
594
641
  duration_ms: number; // Total processing time
595
642
  inference_ms: number; // ML inference time
596
643
  mode: 'single' | 'chunked';
597
644
  };
645
+
646
+ // Present when using policy_only or combined mode with allow_with_warning
647
+ policy_warnings?: PolicyViolation[];
648
+ // Present when core injection detected with allow_with_warning
649
+ scan_warning?: ScanWarning;
650
+ // Present when abuse detection is enabled and abuse found
651
+ abuse_warnings?: AbuseWarning;
652
+ // Present when intelligent routing is enabled
653
+ routing?: { task_type: string; complexity: number; selected_model?: string; };
598
654
  }
599
655
  ```
600
656
 
@@ -615,6 +671,15 @@ createGroq(config: GenericClientConfig): OpenAI
615
671
  interface GenericClientConfig {
616
672
  apiKey: string; // Required: Your LockLLM API key
617
673
  baseURL?: string; // Optional: Override proxy URL
674
+ proxyOptions?: { // Optional: Security and routing configuration
675
+ scanMode?: 'normal' | 'policy_only' | 'combined';
676
+ scanAction?: 'block' | 'allow_with_warning';
677
+ policyAction?: 'block' | 'allow_with_warning';
678
+ abuseAction?: 'block' | 'allow_with_warning' | null;
679
+ routeAction?: 'disabled' | 'auto' | 'custom';
680
+ cacheResponse?: boolean;
681
+ cacheTTL?: number;
682
+ };
618
683
  [key: string]: any; // Optional: Provider-specific options
619
684
  }
620
685
  ```
@@ -631,6 +696,16 @@ const url = getProxyURL('openai');
631
696
  // Returns: 'https://api.lockllm.com/v1/proxy/openai'
632
697
  ```
633
698
 
699
+ **Get universal proxy URL (non-BYOK, 200+ models):**
700
+
701
+ ```typescript
702
+ function getUniversalProxyURL(): string
703
+
704
+ // Example
705
+ const url = getUniversalProxyURL();
706
+ // Returns: 'https://api.lockllm.com/v1/proxy/chat/completions'
707
+ ```
708
+
634
709
  **Get all proxy URLs:**
635
710
 
636
711
  ```typescript
@@ -642,6 +717,34 @@ console.log(urls.openai); // 'https://api.lockllm.com/v1/proxy/openai'
642
717
  console.log(urls.anthropic); // 'https://api.lockllm.com/v1/proxy/anthropic'
643
718
  ```
644
719
 
720
+ **Build LockLLM proxy headers:**
721
+
722
+ ```typescript
723
+ import { buildLockLLMHeaders } from '@lockllm/sdk';
724
+
725
+ const headers = buildLockLLMHeaders({
726
+ scanMode: 'combined',
727
+ scanAction: 'block',
728
+ policyAction: 'allow_with_warning',
729
+ abuseAction: 'block',
730
+ routeAction: 'auto'
731
+ });
732
+ // Returns: { 'x-lockllm-scan-mode': 'combined', ... }
733
+ ```
734
+
735
+ **Parse proxy response metadata:**
736
+
737
+ ```typescript
738
+ import { parseProxyMetadata } from '@lockllm/sdk';
739
+
740
+ // Parse response headers from any proxy request
741
+ const metadata = parseProxyMetadata(response.headers);
742
+ console.log(metadata.safe); // true/false
743
+ console.log(metadata.scan_mode); // 'combined'
744
+ console.log(metadata.cache_status); // 'HIT' or 'MISS'
745
+ console.log(metadata.routing); // { task_type, complexity, selected_model, ... }
746
+ ```
747
+
645
748
  ## Error Types
646
749
 
647
750
  LockLLM provides typed errors for comprehensive error handling:
@@ -653,6 +756,9 @@ LockLLMError (base)
653
756
  ├── AuthenticationError (401)
654
757
  ├── RateLimitError (429)
655
758
  ├── PromptInjectionError (400)
759
+ ├── PolicyViolationError (403)
760
+ ├── AbuseDetectedError (400)
761
+ ├── InsufficientCreditsError (402)
656
762
  ├── UpstreamError (502)
657
763
  ├── ConfigurationError (400)
658
764
  └── NetworkError (0)
@@ -676,6 +782,32 @@ class RateLimitError extends LockLLMError {
676
782
  retryAfter?: number; // Milliseconds until retry allowed
677
783
  }
678
784
 
785
+ class PolicyViolationError extends LockLLMError {
786
+ violated_policies: Array<{
787
+ policy_name: string;
788
+ violated_categories: Array<{ name: string }>;
789
+ violation_details?: string;
790
+ }>;
791
+ }
792
+
793
+ class AbuseDetectedError extends LockLLMError {
794
+ abuse_details: {
795
+ confidence: number;
796
+ abuse_types: string[];
797
+ indicators: {
798
+ bot_score: number;
799
+ repetition_score: number;
800
+ resource_score: number;
801
+ pattern_score: number;
802
+ };
803
+ };
804
+ }
805
+
806
+ class InsufficientCreditsError extends LockLLMError {
807
+ current_balance: number; // Current credit balance
808
+ estimated_cost: number; // Estimated cost of the request
809
+ }
810
+
679
811
  class UpstreamError extends LockLLMError {
680
812
  provider?: string; // Provider name
681
813
  upstreamStatus?: number; // Provider's status code
@@ -707,13 +839,22 @@ LockLLM adds minimal latency while providing comprehensive security protection.
707
839
 
708
840
  ## Rate Limits
709
841
 
710
- LockLLM provides generous rate limits for all users, with the Free tier supporting most production use cases.
842
+ LockLLM uses a 10-tier progressive system based on monthly usage. Higher tiers unlock faster rate limits and free monthly credits.
843
+
844
+ | Tier | Max RPM | Monthly Spending Requirement |
845
+ |------|---------|----------------------------|
846
+ | **Tier 1** (Free) | 30 RPM | $0 |
847
+ | **Tier 2** | 50 RPM | $10/month |
848
+ | **Tier 3** | 100 RPM | $50/month |
849
+ | **Tier 4** | 200 RPM | $100/month |
850
+ | **Tier 5** | 500 RPM | $250/month |
851
+ | **Tier 6** | 1,000 RPM | $500/month |
852
+ | **Tier 7** | 2,000 RPM | $1,000/month |
853
+ | **Tier 8** | 5,000 RPM | $3,000/month |
854
+ | **Tier 9** | 10,000 RPM | $5,000/month |
855
+ | **Tier 10** | 20,000 RPM | $10,000/month |
711
856
 
712
- | Tier | Requests per Minute | Best For |
713
- |------|---------------------|----------|
714
- | **Free** | 1,000 RPM | Most applications, startups, side projects |
715
- | **Pro** | 10,000 RPM | High-traffic applications, enterprise pilots |
716
- | **Enterprise** | Custom | Large-scale deployments, custom SLAs |
857
+ See [pricing](https://www.lockllm.com/pricing) for full tier details and free monthly credits.
717
858
 
718
859
  **Smart Rate Limit Handling:**
719
860
 
@@ -751,6 +892,64 @@ const lockllm = new LockLLM({
751
892
  });
752
893
  ```
753
894
 
895
+ ### Advanced Scan Options
896
+
897
+ Control scan behavior with mode, sensitivity, and action headers:
898
+
899
+ ```typescript
900
+ // Scan API with advanced options
901
+ const result = await lockllm.scan(
902
+ {
903
+ input: userPrompt,
904
+ sensitivity: 'high', // 'low' | 'medium' | 'high'
905
+ mode: 'combined', // 'normal' | 'policy_only' | 'combined'
906
+ chunk: true // Force chunking for long texts
907
+ },
908
+ {
909
+ scanAction: 'block', // Block core injection attacks
910
+ policyAction: 'allow_with_warning', // Allow but warn on policy violations
911
+ abuseAction: 'block' // Enable abuse detection (opt-in)
912
+ }
913
+ );
914
+
915
+ // Proxy mode with advanced options
916
+ const openai = createOpenAI({
917
+ apiKey: process.env.LOCKLLM_API_KEY,
918
+ proxyOptions: {
919
+ scanMode: 'combined', // Check both core + policies
920
+ scanAction: 'block', // Block injection attacks
921
+ policyAction: 'block', // Block policy violations
922
+ abuseAction: 'allow_with_warning', // Detect abuse, don't block
923
+ routeAction: 'auto' // Enable intelligent routing
924
+ }
925
+ });
926
+ ```
927
+
928
+ **Scan Modes:**
929
+ - `normal` - Core security threats only (injection, jailbreaks, etc.)
930
+ - `policy_only` - Custom policies only (skip core security)
931
+ - `combined` (default) - Both core security AND custom policies
932
+
933
+ **Sensitivity Levels:**
934
+ - `low` - Fewer false positives, may miss sophisticated attacks
935
+ - `medium` (default) - Balanced approach, recommended
936
+ - `high` - Maximum protection, may have more false positives
937
+
938
+ **Action Headers:**
939
+ - `scanAction` - Controls core injection detection: `'block'` | `'allow_with_warning'`
940
+ - `policyAction` - Controls custom policy violations: `'block'` | `'allow_with_warning'`
941
+ - `abuseAction` - Controls abuse detection (opt-in): `'block'` | `'allow_with_warning'` | `null`
942
+ - `routeAction` - Controls intelligent routing: `'disabled'` | `'auto'` | `'custom'`
943
+
944
+ **Default Behavior (no headers):**
945
+ - Scan Mode: `combined` (check both core + policies)
946
+ - Scan Action: `allow_with_warning` (detect but don't block)
947
+ - Policy Action: `allow_with_warning` (detect but don't block)
948
+ - Abuse Action: `null` (disabled, opt-in only)
949
+ - Route Action: `disabled` (no routing)
950
+
951
+ See [examples/advanced-options.ts](examples/advanced-options.ts) for complete examples.
952
+
754
953
  ## Best Practices
755
954
 
756
955
  ### Security
@@ -841,17 +1040,17 @@ For non-JavaScript environments, use the REST API directly:
841
1040
 
842
1041
  **Scan Endpoint:**
843
1042
  ```bash
844
- curl -X POST https://api.lockllm.com/scan \
845
- -H "x-api-key: YOUR_LOCKLLM_API_KEY" \
1043
+ curl -X POST https://api.lockllm.com/v1/scan \
1044
+ -H "Authorization: Bearer YOUR_LOCKLLM_API_KEY" \
846
1045
  -H "Content-Type: application/json" \
847
- -d '{"prompt": "Your text to scan", "sensitivity": "medium"}'
1046
+ -d '{"input": "Your text to scan", "sensitivity": "medium"}'
848
1047
  ```
849
1048
 
850
1049
  **Proxy Endpoints:**
851
1050
  ```bash
852
1051
  # OpenAI-compatible proxy
853
1052
  curl -X POST https://api.lockllm.com/v1/proxy/openai/chat/completions \
854
- -H "x-api-key: YOUR_LOCKLLM_API_KEY" \
1053
+ -H "Authorization: Bearer YOUR_LOCKLLM_API_KEY" \
855
1054
  -H "Content-Type: application/json" \
856
1055
  -d '{"model": "gpt-4", "messages": [{"role": "user", "content": "Hello"}]}'
857
1056
  ```
@@ -876,7 +1075,7 @@ import {
876
1075
 
877
1076
  // Type inference works automatically
878
1077
  const config: LockLLMConfig = {
879
- apiKey: 'llm_...',
1078
+ apiKey: '...',
880
1079
  timeout: 30000
881
1080
  };
882
1081
 
package/dist/client.d.ts CHANGED
@@ -30,7 +30,7 @@ export declare class LockLLM {
30
30
  * });
31
31
  * ```
32
32
  */
33
- get scan(): (request: import(".").ScanRequest, options?: import("./types/common").RequestOptions) => Promise<import(".").ScanResponse>;
33
+ get scan(): (request: import(".").ScanRequest, options?: import(".").ScanOptions) => Promise<import(".").ScanResponse>;
34
34
  /**
35
35
  * Get the current configuration
36
36
  */
@@ -1 +1 @@
1
- {"version":3,"file":"client.d.ts","sourceRoot":"","sources":["../src/client.ts"],"names":[],"mappings":"AAAA;;GAEG;AAKH,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,gBAAgB,CAAC;AAMpD,qBAAa,OAAO;IAClB,OAAO,CAAC,QAAQ,CAAC,MAAM,CAA0B;IACjD,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAa;IAClC,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAa;IAExC;;;;;;;;;;;OAWG;gBACS,MAAM,EAAE,aAAa;IA4BjC;;;;;;;;;;OAUG;IACH,IAAI,IAAI,+HAEP;IAED;;OAEG;IACH,SAAS,IAAI,QAAQ,CAAC,QAAQ,CAAC,aAAa,CAAC,CAAC;CAG/C"}
1
+ {"version":3,"file":"client.d.ts","sourceRoot":"","sources":["../src/client.ts"],"names":[],"mappings":"AAAA;;GAEG;AAKH,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,gBAAgB,CAAC;AAMpD,qBAAa,OAAO;IAClB,OAAO,CAAC,QAAQ,CAAC,MAAM,CAA0B;IACjD,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAa;IAClC,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAa;IAExC;;;;;;;;;;;OAWG;gBACS,MAAM,EAAE,aAAa;IA4BjC;;;;;;;;;;OAUG;IACH,IAAI,IAAI,+GAEP;IAED;;OAEG;IACH,SAAS,IAAI,QAAQ,CAAC,QAAQ,CAAC,aAAa,CAAC,CAAC;CAG/C"}