@continum/sdk 0.0.3 → 0.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -2,6 +2,8 @@
2
2
 
3
3
  > Governed Execution Framework for LLM Applications
4
4
 
5
+ **Current Version**: 0.0.4
6
+
5
7
  ## Quick Start
6
8
 
7
9
  ### Installation
@@ -24,20 +26,77 @@ import { Continum } from '@continum/sdk';
24
26
  const continum = new Continum({
25
27
  continumKey: process.env.CONTINUM_KEY!,
26
28
  apiKeys: {
27
- openai: process.env.OPENAI_API_KEY
29
+ openai: process.env.OPENAI_API_KEY,
30
+ anthropic: process.env.ANTHROPIC_API_KEY,
31
+ gemini: process.env.GEMINI_API_KEY
28
32
  },
29
33
  defaultSandbox: 'pii_protection'
30
34
  });
31
35
 
32
- // Make LLM calls with automatic compliance
36
+ // OpenAI - use snake_case model names
33
37
  const response = await continum.llm.openai.gpt_4o.chat({
34
38
  messages: [{ role: 'user', content: 'Hello world' }]
35
39
  });
40
+
41
+ // Anthropic - use model family names (opus, sonnet, haiku)
42
+ const response2 = await continum.llm.anthropic.opus_4_6.chat({
43
+ messages: [{ role: 'user', content: 'Review this code' }]
44
+ });
45
+ // Also supports: sonnet_4_6, sonnet_4, haiku_4_5, haiku_3_5, sonnet_3_7
46
+ // Legacy format also works: claude_3_5_sonnet
47
+
48
+ // Gemini - use snake_case with underscores
49
+ const response3 = await continum.llm.gemini.gemini_2_5_pro.chat({
50
+ messages: [{ role: 'user', content: 'Summarize this' }]
51
+ });
52
+
36
53
  // ✅ Guardian checks for PII (pre-execution)
37
54
  // ✅ User gets response instantly
38
55
  // ✅ Shadow Audit runs in background (post-execution)
39
56
  ```
40
57
 
58
+ ## Model Name Format
59
+
60
+ The SDK uses snake_case model names that get automatically transformed to the correct API format:
61
+
62
+ ### Anthropic Models
63
+
64
+ ```typescript
65
+ // Recommended format - use model family names
66
+ continum.llm.anthropic.opus_4_6.chat() // → claude-opus-4-6
67
+ continum.llm.anthropic.sonnet_4_6.chat() // → claude-sonnet-4-6
68
+ continum.llm.anthropic.sonnet_4.chat() // → claude-sonnet-4-5
69
+ continum.llm.anthropic.haiku_4_5.chat() // → claude-haiku-4-5-20251001
70
+ continum.llm.anthropic.haiku_3_5.chat() // → claude-haiku-3-5-20241022
71
+ continum.llm.anthropic.sonnet_3_7.chat() // → claude-sonnet-3-7-20250219
72
+
73
+ // Legacy format also supported (v0.0.4+)
74
+ continum.llm.anthropic.claude_3_5_sonnet.chat() // → claude-3-5-sonnet-20241022
75
+
76
+ // Alias: claude and anthropic are interchangeable
77
+ continum.llm.claude.opus_4_6.chat() // Same as anthropic.opus_4_6
78
+ ```
79
+
80
+ ### OpenAI Models
81
+
82
+ ```typescript
83
+ continum.llm.openai.gpt_5.chat() // → gpt-5
84
+ continum.llm.openai.gpt_4o.chat() // → gpt-4o
85
+ continum.llm.openai.gpt_4_turbo.chat() // → gpt-4-turbo
86
+ continum.llm.openai.o3.chat() // → o3
87
+ continum.llm.openai.o3_mini.chat() // → o3-mini
88
+ continum.llm.openai.o1.chat() // → o1
89
+ ```
90
+
91
+ ### Gemini Models
92
+
93
+ ```typescript
94
+ continum.llm.gemini.gemini_2_5_pro.chat() // → gemini-2.5-pro
95
+ continum.llm.gemini.gemini_2_5_flash.chat() // → gemini-2.5-flash
96
+ continum.llm.gemini.gemini_2_0_flash.chat() // → gemini-2.0-flash
97
+ continum.llm.gemini.gemini_1_5_pro.chat() // → gemini-1.5-pro
98
+ ```
99
+
41
100
  ## Architecture
42
101
 
43
102
  ### Two-Tier Protection System
@@ -85,8 +144,8 @@ const continum = new Continum({
85
144
  apiKeys: { openai: process.env.OPENAI_API_KEY },
86
145
  guardianConfig: {
87
146
  enabled: true, // Enable pre-LLM protection
88
- blockHighRisk: true, // Block SSN, credit cards, etc.
89
- redactMediumRisk: true, // Redact emails, phones, etc.
147
+ action: 'REDACT_AND_CONTINUE', // Guardian action mode
148
+ // Options: 'BLOCK_ON_DETECT', 'REDACT_AND_CONTINUE', 'ALLOW_ALL'
90
149
  localOnly: false, // Use remote ML for complex cases
91
150
  customPatterns: [
92
151
  {
@@ -99,6 +158,12 @@ const continum = new Continum({
99
158
  });
100
159
  ```
101
160
 
161
+ #### Guardian Action Modes
162
+
163
+ - **BLOCK_ON_DETECT**: Block request immediately if any PII is detected
164
+ - **REDACT_AND_CONTINUE**: Redact PII and continue with LLM call (default)
165
+ - **ALLOW_ALL**: Disable Guardian protection (allow everything)
166
+
102
167
  ### Shadow Audit Configuration
103
168
 
104
169
  ```typescript
@@ -54,8 +54,9 @@ class GuardianClient {
54
54
  // Tier 1: Fast local pattern matching
55
55
  const localResult = this.scanLocalPatterns(request.userInput);
56
56
  if (localResult.detectedEntities.length > 0) {
57
- // Found PII locally - apply redaction
58
- return this.buildGuardianResult(localResult, request);
57
+ // Found PII locally - apply action based on sandbox config
58
+ // Note: guardianAction should come from sandbox config in production
59
+ return this.buildGuardianResult(localResult, request, 'REDACT_AND_CONTINUE');
59
60
  }
60
61
  // Tier 2: Remote ML scan for complex cases (optional)
61
62
  // Only if local scan found nothing but we want deeper analysis
@@ -133,7 +134,7 @@ class GuardianClient {
133
134
  return '****';
134
135
  }
135
136
  }
136
- buildGuardianResult(localResult, request) {
137
+ buildGuardianResult(localResult, request, guardianAction = 'REDACT_AND_CONTINUE') {
137
138
  let cleanPrompt = request.userInput;
138
139
  const violations = [];
139
140
  const reasoningParts = [];
@@ -143,10 +144,22 @@ class GuardianClient {
143
144
  violations.push(`${entity.type}_DETECTED`);
144
145
  reasoningParts.push(`${entity.type.toLowerCase()} ${entity.redactedValue} detected`);
145
146
  }
146
- // Determine action based on entity types
147
- const hasHighRiskPII = localResult.detectedEntities.some(e => ['SSN', 'CREDIT_CARD', 'PASSPORT', 'HEALTH_ID'].includes(e.type));
147
+ // Determine action based on guardianAction configuration
148
+ let action = 'ALLOW';
149
+ if (guardianAction === 'ALLOW_ALL') {
150
+ // Guardian disabled - allow everything
151
+ action = 'ALLOW';
152
+ }
153
+ else if (guardianAction === 'BLOCK_ON_DETECT') {
154
+ // Block if any PII detected
155
+ action = 'BLOCK';
156
+ }
157
+ else if (guardianAction === 'REDACT_AND_CONTINUE') {
158
+ // Redact PII and continue with LLM call
159
+ action = 'REDACT';
160
+ }
148
161
  return {
149
- action: hasHighRiskPII ? 'BLOCK' : 'REDACT',
162
+ action,
150
163
  violations,
151
164
  reasoning: reasoningParts.join(', '),
152
165
  cleanPrompt,
@@ -29,6 +29,7 @@ function resolveModelId(provider, snakeKey) {
29
29
  'sonnet-4': 'claude-sonnet-4-5',
30
30
  'haiku-3-5': 'claude-haiku-3-5-20241022',
31
31
  'sonnet-3-7': 'claude-sonnet-3-7-20250219',
32
+ 'claude-3-5-sonnet': 'claude-3-5-sonnet-20241022',
32
33
  };
33
34
  return anthropicMap[hyphenated] ?? `claude-${hyphenated}`;
34
35
  }
@@ -51,6 +51,7 @@ export interface ContinumConfig {
51
51
  defaultSandbox?: string;
52
52
  guardianConfig?: {
53
53
  enabled?: boolean;
54
+ action?: 'BLOCK_ON_DETECT' | 'REDACT_AND_CONTINUE' | 'ALLOW_ALL';
54
55
  blockHighRisk?: boolean;
55
56
  redactMediumRisk?: boolean;
56
57
  localOnly?: boolean;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@continum/sdk",
3
- "version": "0.0.3",
3
+ "version": "0.0.5",
4
4
  "description": "Zero-latency compliance auditing for every LLM call in your application",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",