agentic-lang 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/COMMUNITY.md +220 -0
- package/CONTRIBUTING.md +194 -0
- package/FINAL_REPORT.md +398 -0
- package/FOR_OTHER_LLMS.md +286 -0
- package/IMPROVEMENTS.md +319 -0
- package/LAUNCH_GUIDE.md +388 -0
- package/LICENSE +21 -0
- package/NPM_PUBLISH.md +257 -0
- package/PROJECT_COMPLETE.md +414 -0
- package/PROJECT_OVERVIEW.md +265 -0
- package/PROJECT_TREE.txt +228 -0
- package/PUBLISHING_GUIDE.md +426 -0
- package/PUBLISH_NOW.md +337 -0
- package/QUICKSTART.md +207 -0
- package/README.md +195 -0
- package/README_ENHANCED.md +329 -0
- package/READY_TO_LAUNCH.txt +56 -0
- package/REFACTOR_PLAN.md +179 -0
- package/ROADMAP.md +201 -0
- package/SUMMARY.md +315 -0
- package/bin/agentic.js +3 -0
- package/blog/001-introducing-agentic.md +382 -0
- package/blog/002-confidence-driven-development.md +490 -0
- package/blog/003-formal-verification.md +427 -0
- package/blog/004-multi-agent-production.md +436 -0
- package/dist/cli.d.ts +7 -0
- package/dist/cli.d.ts.map +1 -0
- package/dist/cli.js +151 -0
- package/dist/cli.js.map +1 -0
- package/dist/diagnostics/diagnostic.d.ts +115 -0
- package/dist/diagnostics/diagnostic.d.ts.map +1 -0
- package/dist/diagnostics/diagnostic.js +101 -0
- package/dist/diagnostics/diagnostic.js.map +1 -0
- package/dist/diagnostics/formatter.d.ts +36 -0
- package/dist/diagnostics/formatter.d.ts.map +1 -0
- package/dist/diagnostics/formatter.js +263 -0
- package/dist/diagnostics/formatter.js.map +1 -0
- package/dist/effects/effect-system.d.ts +64 -0
- package/dist/effects/effect-system.d.ts.map +1 -0
- package/dist/effects/effect-system.js +197 -0
- package/dist/effects/effect-system.js.map +1 -0
- package/dist/generator/typescript-generator.d.ts +31 -0
- package/dist/generator/typescript-generator.d.ts.map +1 -0
- package/dist/generator/typescript-generator.js +308 -0
- package/dist/generator/typescript-generator.js.map +1 -0
- package/dist/index.d.ts +19 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +60 -0
- package/dist/index.js.map +1 -0
- package/dist/lean4/exporter.d.ts +24 -0
- package/dist/lean4/exporter.d.ts.map +1 -0
- package/dist/lean4/exporter.js +142 -0
- package/dist/lean4/exporter.js.map +1 -0
- package/dist/lsp/server.d.ts +6 -0
- package/dist/lsp/server.d.ts.map +1 -0
- package/dist/lsp/server.js +131 -0
- package/dist/lsp/server.js.map +1 -0
- package/dist/parser/lexer.d.ts +79 -0
- package/dist/parser/lexer.d.ts.map +1 -0
- package/dist/parser/lexer.js +296 -0
- package/dist/parser/lexer.js.map +1 -0
- package/dist/parser/parser-enhanced.d.ts +12 -0
- package/dist/parser/parser-enhanced.d.ts.map +1 -0
- package/dist/parser/parser-enhanced.js +206 -0
- package/dist/parser/parser-enhanced.js.map +1 -0
- package/dist/parser/parser.d.ts +34 -0
- package/dist/parser/parser.d.ts.map +1 -0
- package/dist/parser/parser.js +507 -0
- package/dist/parser/parser.js.map +1 -0
- package/dist/property-tests/generator-enhanced.d.ts +27 -0
- package/dist/property-tests/generator-enhanced.d.ts.map +1 -0
- package/dist/property-tests/generator-enhanced.js +209 -0
- package/dist/property-tests/generator-enhanced.js.map +1 -0
- package/dist/property-tests/generator-fixed.d.ts +2 -0
- package/dist/property-tests/generator-fixed.d.ts.map +1 -0
- package/dist/property-tests/generator-fixed.js +7 -0
- package/dist/property-tests/generator-fixed.js.map +1 -0
- package/dist/property-tests/generator.d.ts +28 -0
- package/dist/property-tests/generator.d.ts.map +1 -0
- package/dist/property-tests/generator.js +284 -0
- package/dist/property-tests/generator.js.map +1 -0
- package/dist/refinements/refinement-types.d.ts +96 -0
- package/dist/refinements/refinement-types.d.ts.map +1 -0
- package/dist/refinements/refinement-types.js +234 -0
- package/dist/refinements/refinement-types.js.map +1 -0
- package/dist/repl.d.ts +21 -0
- package/dist/repl.d.ts.map +1 -0
- package/dist/repl.js +317 -0
- package/dist/repl.js.map +1 -0
- package/dist/runtime/agents.d.ts +97 -0
- package/dist/runtime/agents.d.ts.map +1 -0
- package/dist/runtime/agents.js +258 -0
- package/dist/runtime/agents.js.map +1 -0
- package/dist/runtime/index.d.ts +98 -0
- package/dist/runtime/index.d.ts.map +1 -0
- package/dist/runtime/index.js +253 -0
- package/dist/runtime/index.js.map +1 -0
- package/dist/types-extended.d.ts +197 -0
- package/dist/types-extended.d.ts.map +1 -0
- package/dist/types-extended.js +7 -0
- package/dist/types-extended.js.map +1 -0
- package/dist/types.d.ts +129 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +6 -0
- package/dist/types.js.map +1 -0
- package/dist/verification/z3-engine.d.ts +75 -0
- package/dist/verification/z3-engine.d.ts.map +1 -0
- package/dist/verification/z3-engine.js +234 -0
- package/dist/verification/z3-engine.js.map +1 -0
- package/examples/advanced-features.agentic +98 -0
- package/examples/annotations.agentic +37 -0
- package/examples/auth.agentic +53 -0
- package/examples/enterprise-example.agentic +360 -0
- package/examples/minimal.agentic +3 -0
- package/examples/minimal.ts +7 -0
- package/examples/ml-pipeline.agentic +350 -0
- package/examples/multi-agent-example.agentic +212 -0
- package/examples/onboarding-tutorial.agentic +263 -0
- package/examples/production-api.agentic +304 -0
- package/examples/real-world-chatbot.agentic +351 -0
- package/examples/result-handling.agentic +34 -0
- package/examples/runtime.ts +24 -0
- package/examples/showcase.agentic +22 -0
- package/examples/showcase.ts +28 -0
- package/examples/simple-test.agentic +4 -0
- package/examples/simple-test.ts +7 -0
- package/examples/simple.agentic +20 -0
- package/examples/test2.agentic +4 -0
- package/examples/test2.ts +9 -0
- package/examples/test3.agentic +4 -0
- package/examples/test3.ts +9 -0
- package/package.json +70 -0
- package/playground/index.html +221 -0
- package/playground/playground.js +291 -0
- package/registry/package-registry.ts +319 -0
- package/scripts/build.js +50 -0
- package/scripts/validate-confidence-mutation.ts +112 -0
- package/stdlib/async/promise.agentic +216 -0
- package/stdlib/database/pool.agentic +235 -0
- package/stdlib/file/io.agentic +194 -0
- package/stdlib/http/client.agentic +168 -0
- package/video-scripts/001-agentic-in-100-seconds.md +175 -0
- package/vscode-extension/README.md +67 -0
- package/vscode-extension/language-configuration.json +31 -0
- package/vscode-extension/package.json +46 -0
- package/vscode-extension/syntaxes/agentic.tmLanguage.json +134 -0
|
@@ -0,0 +1,360 @@
|
|
|
1
|
+
// Enterprise-Grade Application Example
|
|
2
|
+
// Demonstrates: audit logging, compliance, security, observability
|
|
3
|
+
|
|
4
|
+
@module("enterprise")
|
|
5
|
+
|
|
6
|
+
// Audit logging for compliance
|
|
7
|
+
@audit_trail(
|
|
8
|
+
retention: 7 * 365d, // 7 years (regulatory requirement)
|
|
9
|
+
encryption: "AES-256",
|
|
10
|
+
immutable: true,
|
|
11
|
+
compliance: ["SOC2", "HIPAA", "GDPR"]
|
|
12
|
+
)
|
|
13
|
+
@confidence(0.96)
|
|
14
|
+
@complete
|
|
15
|
+
func processFinancialTransaction(
|
|
16
|
+
transaction: Transaction,
|
|
17
|
+
user: User
|
|
18
|
+
) -> Result<Receipt, TransactionError> {
|
|
19
|
+
@audit.start({
|
|
20
|
+
action: "financial_transaction",
|
|
21
|
+
userId: user.id,
|
|
22
|
+
amount: transaction.amount,
|
|
23
|
+
timestamp: now(),
|
|
24
|
+
ipAddress: user.ipAddress,
|
|
25
|
+
userAgent: user.userAgent
|
|
26
|
+
})
|
|
27
|
+
|
|
28
|
+
// Validate user permissions
|
|
29
|
+
@requires_permission("financial.transactions.process")
|
|
30
|
+
if !user.hasPermission("financial.transactions.process") {
|
|
31
|
+
@audit.fail("insufficient_permissions", {
|
|
32
|
+
required: "financial.transactions.process",
|
|
33
|
+
userPermissions: user.permissions
|
|
34
|
+
})
|
|
35
|
+
return Err(TransactionError.INSUFFICIENT_PERMISSIONS)
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
// Validate transaction
|
|
39
|
+
@confidence(0.94)
|
|
40
|
+
validation = validateTransaction(transaction) match {
|
|
41
|
+
Ok(_) -> {},
|
|
42
|
+
Err(e) -> {
|
|
43
|
+
@audit.fail("validation_failed", { error: e, transaction })
|
|
44
|
+
return Err(TransactionError.VALIDATION_FAILED(e))
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
// Check for fraud
|
|
49
|
+
@confidence(0.89)
|
|
50
|
+
@llm_call(model: "fraud-detection-v2")
|
|
51
|
+
fraudCheck = detectFraud(transaction, user.history) match {
|
|
52
|
+
Ok(result) if result.score < 0.3 -> {}, // Low fraud risk
|
|
53
|
+
Ok(result) if result.score < 0.7 -> {
|
|
54
|
+
// Medium risk - require additional approval
|
|
55
|
+
@requires_approval(
|
|
56
|
+
approvers: ["fraud_team"],
|
|
57
|
+
timeout: 30m,
|
|
58
|
+
escalation: "security_team"
|
|
59
|
+
)
|
|
60
|
+
@audit.warning("fraud_check_flagged", { score: result.score })
|
|
61
|
+
},
|
|
62
|
+
Ok(result) -> {
|
|
63
|
+
// High risk - reject
|
|
64
|
+
@audit.fail("fraud_detected", {
|
|
65
|
+
score: result.score,
|
|
66
|
+
reasons: result.reasons
|
|
67
|
+
})
|
|
68
|
+
return Err(TransactionError.FRAUD_DETECTED)
|
|
69
|
+
},
|
|
70
|
+
Err(e) -> {
|
|
71
|
+
@audit.error("fraud_check_error", { error: e })
|
|
72
|
+
// Fail closed - require manual review
|
|
73
|
+
@escalate_to_human("Fraud detection system unavailable")
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
// Execute transaction with two-phase commit
|
|
78
|
+
@transaction(isolation: "serializable")
|
|
79
|
+
@confidence(0.93)
|
|
80
|
+
receipt = executeTransaction(transaction) match {
|
|
81
|
+
Ok(r) -> r,
|
|
82
|
+
Err(e) -> {
|
|
83
|
+
@audit.fail("transaction_execution_failed", { error: e })
|
|
84
|
+
return Err(TransactionError.EXECUTION_FAILED(e))
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
// Success audit
|
|
89
|
+
@audit.success({
|
|
90
|
+
receiptId: receipt.id,
|
|
91
|
+
amount: transaction.amount,
|
|
92
|
+
timestamp: now(),
|
|
93
|
+
processingTime: elapsed()
|
|
94
|
+
})
|
|
95
|
+
|
|
96
|
+
// GDPR: Record data processing
|
|
97
|
+
@gdpr_record({
|
|
98
|
+
dataSubject: user.id,
|
|
99
|
+
processing: "financial_transaction",
|
|
100
|
+
legalBasis: "contract",
|
|
101
|
+
purpose: "service_delivery",
|
|
102
|
+
retention: "7_years_regulatory"
|
|
103
|
+
})
|
|
104
|
+
|
|
105
|
+
return Ok(receipt)
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
// PII handling with encryption
|
|
109
|
+
@pii_protection(
|
|
110
|
+
encrypt: ["ssn", "creditCard", "bankAccount"],
|
|
111
|
+
mask_in_logs: true,
|
|
112
|
+
requires_consent: true
|
|
113
|
+
)
|
|
114
|
+
@confidence(0.97)
|
|
115
|
+
@complete
|
|
116
|
+
func storeUserData(data: SensitiveUserData) -> Result<void, PrivacyError> {
|
|
117
|
+
// Check consent
|
|
118
|
+
if !data.user.hasConsent("data_storage") {
|
|
119
|
+
@audit.compliance_violation("missing_consent", {
|
|
120
|
+
userId: data.user.id,
|
|
121
|
+
dataTypes: ["pii", "financial"]
|
|
122
|
+
})
|
|
123
|
+
return Err(PrivacyError.MISSING_CONSENT)
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
// Encrypt PII automatically
|
|
127
|
+
encrypted = {
|
|
128
|
+
userId: data.user.id,
|
|
129
|
+
ssn: encrypt(data.ssn, key: envVar("PII_ENCRYPTION_KEY")),
|
|
130
|
+
creditCard: encrypt(data.creditCard, key: envVar("PII_ENCRYPTION_KEY")),
|
|
131
|
+
// Non-PII stored as-is
|
|
132
|
+
name: data.name,
|
|
133
|
+
email: data.email
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
database.sensitive_data.insert(encrypted) match {
|
|
137
|
+
Ok(_) -> {
|
|
138
|
+
@audit.pii_stored({
|
|
139
|
+
userId: data.user.id,
|
|
140
|
+
dataTypes: ["ssn", "creditCard"],
|
|
141
|
+
encrypted: true
|
|
142
|
+
})
|
|
143
|
+
return Ok(void)
|
|
144
|
+
},
|
|
145
|
+
Err(e) -> {
|
|
146
|
+
@audit.error("pii_storage_failed", { error: e })
|
|
147
|
+
return Err(PrivacyError.STORAGE_FAILED(e))
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
// GDPR right to erasure
|
|
153
|
+
@gdpr_compliant(right: "erasure")
|
|
154
|
+
@confidence(0.94)
|
|
155
|
+
@complete
|
|
156
|
+
@property("removes all user data")
|
|
157
|
+
@property("cascades to related records")
|
|
158
|
+
@property("generates deletion certificate")
|
|
159
|
+
func deleteUserData(userId: string, reason: string) -> Result<DeletionCertificate, Error> {
|
|
160
|
+
@audit.start({
|
|
161
|
+
action: "gdpr_erasure",
|
|
162
|
+
userId: userId,
|
|
163
|
+
reason: reason,
|
|
164
|
+
requestedBy: getCurrentUser().id
|
|
165
|
+
})
|
|
166
|
+
|
|
167
|
+
// Find all user data across systems
|
|
168
|
+
@parallel(maxConcurrency: 5)
|
|
169
|
+
deletions = [
|
|
170
|
+
database.users.delete(userId),
|
|
171
|
+
database.transactions.deleteByUser(userId),
|
|
172
|
+
database.sensitive_data.deleteByUser(userId),
|
|
173
|
+
redis.deleteUserSessions(userId),
|
|
174
|
+
s3.deleteUserFiles(userId)
|
|
175
|
+
]
|
|
176
|
+
|
|
177
|
+
// Verify all deletions succeeded
|
|
178
|
+
failures = deletions.filter(r => r.isErr())
|
|
179
|
+
|
|
180
|
+
if failures.length > 0 {
|
|
181
|
+
@audit.fail("incomplete_deletion", { failures })
|
|
182
|
+
return Err(Error.INCOMPLETE_DELETION {
|
|
183
|
+
failed: failures.length,
|
|
184
|
+
total: deletions.length
|
|
185
|
+
})
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
// Generate deletion certificate (legal requirement)
|
|
189
|
+
certificate = DeletionCertificate {
|
|
190
|
+
userId: userId,
|
|
191
|
+
deletedAt: now(),
|
|
192
|
+
requestReason: reason,
|
|
193
|
+
systemsAffected: ["database", "redis", "s3"],
|
|
194
|
+
verification: hash(deletions),
|
|
195
|
+
signature: sign(userId, now())
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
@audit.success({
|
|
199
|
+
userId: userId,
|
|
200
|
+
certificate: certificate.id
|
|
201
|
+
})
|
|
202
|
+
|
|
203
|
+
return Ok(certificate)
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
// Role-based access control
|
|
207
|
+
@rbac(
|
|
208
|
+
resource: "financial_reports",
|
|
209
|
+
requiredRoles: ["accountant", "cfo", "auditor"]
|
|
210
|
+
)
|
|
211
|
+
@confidence(0.95)
|
|
212
|
+
@complete
|
|
213
|
+
func generateFinancialReport(
|
|
214
|
+
period: Period,
|
|
215
|
+
user: User
|
|
216
|
+
) -> Result<FinancialReport, AccessError> {
|
|
217
|
+
// Automatic RBAC check before function executes
|
|
218
|
+
|
|
219
|
+
if !user.hasAnyRole(["accountant", "cfo", "auditor"]) {
|
|
220
|
+
@audit.access_denied({
|
|
221
|
+
userId: user.id,
|
|
222
|
+
resource: "financial_reports",
|
|
223
|
+
requiredRoles: ["accountant", "cfo", "auditor"],
|
|
224
|
+
userRoles: user.roles
|
|
225
|
+
})
|
|
226
|
+
return Err(AccessError.INSUFFICIENT_PRIVILEGES)
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
@audit.access_granted({
|
|
230
|
+
userId: user.id,
|
|
231
|
+
resource: "financial_reports",
|
|
232
|
+
userRole: user.roles.first()
|
|
233
|
+
})
|
|
234
|
+
|
|
235
|
+
report = compileFinancialData(period)
|
|
236
|
+
|
|
237
|
+
@audit.report_generated({
|
|
238
|
+
reportId: report.id,
|
|
239
|
+
period: period,
|
|
240
|
+
generatedBy: user.id,
|
|
241
|
+
dataPoints: report.data.length
|
|
242
|
+
})
|
|
243
|
+
|
|
244
|
+
return Ok(report)
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
// Distributed tracing for microservices
|
|
248
|
+
@opentelemetry(
|
|
249
|
+
service: "enterprise-app",
|
|
250
|
+
exporter: "otlp",
|
|
251
|
+
endpoint: "https://telemetry.company.com"
|
|
252
|
+
)
|
|
253
|
+
@confidence(0.91)
|
|
254
|
+
@effects(network, database, io, async)
|
|
255
|
+
func orchestrateWorkflow(request: WorkflowRequest) -> Result<WorkflowOutput, Error> {
|
|
256
|
+
@span("validate_request", attributes: {
|
|
257
|
+
workflowType: request.type,
|
|
258
|
+
requestedBy: request.userId
|
|
259
|
+
})
|
|
260
|
+
validation = validateRequest(request)
|
|
261
|
+
|
|
262
|
+
@span("fetch_user_data", attributes: {
|
|
263
|
+
userId: request.userId
|
|
264
|
+
})
|
|
265
|
+
@retry(maxAttempts: 3, backoff: "exponential")
|
|
266
|
+
userData = database.users.find(request.userId)
|
|
267
|
+
|
|
268
|
+
@span("external_api_call", attributes: {
|
|
269
|
+
api: "partner-service",
|
|
270
|
+
timeout: "10s"
|
|
271
|
+
})
|
|
272
|
+
@circuit_breaker(threshold: 5, timeout: 60s)
|
|
273
|
+
externalData = partnerAPI.fetch(request.partnerId)
|
|
274
|
+
|
|
275
|
+
@span("llm_processing", attributes: {
|
|
276
|
+
model: "gpt-4.5",
|
|
277
|
+
max_tokens: 1000
|
|
278
|
+
})
|
|
279
|
+
@cost_tracked(userId: request.userId, feature: "workflow_processing")
|
|
280
|
+
processed = llm.process({
|
|
281
|
+
userData: userData,
|
|
282
|
+
externalData: externalData,
|
|
283
|
+
instructions: request.instructions
|
|
284
|
+
})
|
|
285
|
+
|
|
286
|
+
@span("store_results")
|
|
287
|
+
database.results.insert(processed)
|
|
288
|
+
|
|
289
|
+
return Ok(WorkflowOutput {
|
|
290
|
+
result: processed,
|
|
291
|
+
traceId: getCurrentTraceId()
|
|
292
|
+
})
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
// Security: Input sanitization
|
|
296
|
+
@security_hardened(
|
|
297
|
+
injection_prevention: ["sql", "xss", "command"],
|
|
298
|
+
rate_limiting: { requests: 100, window: "1m" },
|
|
299
|
+
ip_whitelist: ["10.0.0.0/8", "192.168.0.0/16"]
|
|
300
|
+
)
|
|
301
|
+
@confidence(0.95)
|
|
302
|
+
@complete
|
|
303
|
+
func executeUserQuery(
|
|
304
|
+
query: UserQuery,
|
|
305
|
+
user: User
|
|
306
|
+
) -> Result<QueryResult, SecurityError> {
|
|
307
|
+
// Automatic input sanitization
|
|
308
|
+
sanitized = SecurityGuard.sanitize(query) match {
|
|
309
|
+
Ok(q) -> q,
|
|
310
|
+
Err(e) -> {
|
|
311
|
+
@audit.security_violation({
|
|
312
|
+
type: "input_validation_failed",
|
|
313
|
+
userId: user.id,
|
|
314
|
+
violationType: e.violationType,
|
|
315
|
+
blocked: true
|
|
316
|
+
})
|
|
317
|
+
return Err(SecurityError.MALICIOUS_INPUT(e))
|
|
318
|
+
}
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
// Check rate limit
|
|
322
|
+
if RateLimiter.isExceeded(user.id) {
|
|
323
|
+
@audit.rate_limit_exceeded({ userId: user.id })
|
|
324
|
+
return Err(SecurityError.RATE_LIMIT_EXCEEDED)
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
// Execute with parameterized query (prevents SQL injection)
|
|
328
|
+
result = database.query(sanitized.sql, sanitized.params)
|
|
329
|
+
|
|
330
|
+
return Ok(result)
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
// Compliance reporting
|
|
334
|
+
@compliance_report(
|
|
335
|
+
standard: "SOC2_TYPE2",
|
|
336
|
+
controls: ["access_control", "encryption", "audit_logging"]
|
|
337
|
+
)
|
|
338
|
+
@confidence(0.92)
|
|
339
|
+
func generateSOC2Report(period: Period) -> SOC2Report {
|
|
340
|
+
auditLogs = collectAuditLogs(period)
|
|
341
|
+
accessLogs = collectAccessLogs(period)
|
|
342
|
+
securityIncidents = collectSecurityIncidents(period)
|
|
343
|
+
|
|
344
|
+
evidence = {
|
|
345
|
+
totalAuditEvents: auditLogs.length,
|
|
346
|
+
accessControlViolations: accessLogs.filter(l => l.denied).length,
|
|
347
|
+
securityIncidents: securityIncidents.length,
|
|
348
|
+
encryptionCoverage: calculateEncryptionCoverage(),
|
|
349
|
+
backupVerification: verifyBackups(period)
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
return SOC2Report {
|
|
353
|
+
period: period,
|
|
354
|
+
controls: evaluateControls(evidence),
|
|
355
|
+
evidence: evidence,
|
|
356
|
+
compliance: assessCompliance(evidence),
|
|
357
|
+
generatedAt: now(),
|
|
358
|
+
signature: signReport(evidence)
|
|
359
|
+
}
|
|
360
|
+
}
|
|
@@ -0,0 +1,350 @@
|
|
|
1
|
+
// Machine Learning Pipeline Example
|
|
2
|
+
// Demonstrates: data processing, model inference, monitoring, cost optimization
|
|
3
|
+
|
|
4
|
+
@module("ml_pipeline")
|
|
5
|
+
|
|
6
|
+
// Data preprocessing with validation
|
|
7
|
+
@confidence(0.91)
|
|
8
|
+
@complete
|
|
9
|
+
@needs(validator: DataValidator)
|
|
10
|
+
@effects(io, state)
|
|
11
|
+
@property("handles missing values")
|
|
12
|
+
@property("normalizes numeric features")
|
|
13
|
+
@property("encodes categorical features")
|
|
14
|
+
func preprocessData(rawData: DataFrame) -> Result<ProcessedData, ValidationError> {
|
|
15
|
+
// Validate schema
|
|
16
|
+
@confidence(0.95)
|
|
17
|
+
validated = validator.validateSchema(rawData, schema: dataSchema) match {
|
|
18
|
+
Ok(data) -> data,
|
|
19
|
+
Err(errors) -> {
|
|
20
|
+
@context {
|
|
21
|
+
what_failed: "Schema validation",
|
|
22
|
+
errors: errors,
|
|
23
|
+
suggestions: ["Check data format", "Verify required columns present"]
|
|
24
|
+
}
|
|
25
|
+
return Err(ValidationError.SCHEMA_MISMATCH(errors))
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
// Handle missing values
|
|
30
|
+
@property("imputation preserves data distribution")
|
|
31
|
+
cleaned = validated
|
|
32
|
+
.fillna(strategy: "median", columns: numericColumns)
|
|
33
|
+
.fillna(strategy: "mode", columns: categoricalColumns)
|
|
34
|
+
|
|
35
|
+
// Normalize numeric features
|
|
36
|
+
@property("normalized values in range [0, 1]")
|
|
37
|
+
normalized = cleaned.normalize(columns: numericColumns, method: "min-max")
|
|
38
|
+
|
|
39
|
+
// Encode categorical features
|
|
40
|
+
@property("one-hot encoding preserves information")
|
|
41
|
+
encoded = normalized.oneHot(columns: categoricalColumns)
|
|
42
|
+
|
|
43
|
+
return Ok(ProcessedData {
|
|
44
|
+
data: encoded,
|
|
45
|
+
metadata: {
|
|
46
|
+
rowsProcessed: rawData.rows.length,
|
|
47
|
+
columnsProcessed: rawData.columns.length,
|
|
48
|
+
missingValuesHandled: countMissing(rawData),
|
|
49
|
+
encodingMaps: encoded.encodingMaps
|
|
50
|
+
}
|
|
51
|
+
})
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
// Model inference with cost tracking
|
|
55
|
+
@cost_optimized(
|
|
56
|
+
cached_predictions: true,
|
|
57
|
+
batch_size: 32,
|
|
58
|
+
model_selection: "quality_vs_cost"
|
|
59
|
+
)
|
|
60
|
+
@confidence(0.89)
|
|
61
|
+
@effects(llm_call, cost, async)
|
|
62
|
+
func runInference(
|
|
63
|
+
input: ProcessedData,
|
|
64
|
+
userId: string
|
|
65
|
+
) -> Result<Predictions, InferenceError> {
|
|
66
|
+
// Check cache first (zero cost)
|
|
67
|
+
cacheKey = hash(input)
|
|
68
|
+
cached = cache.get("predictions:${cacheKey}")
|
|
69
|
+
|
|
70
|
+
if cached {
|
|
71
|
+
@cost_saved(estimatedCost: 0.05)
|
|
72
|
+
@trace_decision("cache_hit", { cacheKey })
|
|
73
|
+
return Ok(cached)
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
// Check budget
|
|
77
|
+
budget = costRuntime.checkBudget(userId)
|
|
78
|
+
|
|
79
|
+
if budget.remaining < 0.10 {
|
|
80
|
+
return Err(InferenceError.BUDGET_EXCEEDED {
|
|
81
|
+
remaining: budget.remaining,
|
|
82
|
+
required: 0.10
|
|
83
|
+
})
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
// Select model based on input complexity
|
|
87
|
+
complexity = assessComplexity(input)
|
|
88
|
+
|
|
89
|
+
model = complexity match {
|
|
90
|
+
"simple" -> {
|
|
91
|
+
@llm_call(model: "gpt-4.5-turbo", max_tokens: 100)
|
|
92
|
+
@cost_tracked(userId: userId, feature: "inference_simple")
|
|
93
|
+
"gpt-4.5-turbo"
|
|
94
|
+
},
|
|
95
|
+
"moderate" -> {
|
|
96
|
+
@llm_call(model: "gpt-4.5", max_tokens: 500)
|
|
97
|
+
@cost_tracked(userId: userId, feature: "inference_moderate")
|
|
98
|
+
"gpt-4.5"
|
|
99
|
+
},
|
|
100
|
+
"complex" -> {
|
|
101
|
+
@llm_call(model: "o3", max_tokens: 2000)
|
|
102
|
+
@cost_tracked(userId: userId, feature: "inference_complex")
|
|
103
|
+
@requires_approval(if: budget.remaining < 1.00)
|
|
104
|
+
"o3"
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
// Run inference
|
|
109
|
+
@timeout(30s)
|
|
110
|
+
@retry(maxAttempts: 3, backoff: "exponential")
|
|
111
|
+
predictions = model.predict(input) match {
|
|
112
|
+
Ok(preds) -> preds,
|
|
113
|
+
Err(e) -> {
|
|
114
|
+
@context {
|
|
115
|
+
what_failed: "Model inference",
|
|
116
|
+
model: model,
|
|
117
|
+
inputSize: input.data.length,
|
|
118
|
+
suggestions: ["Check API status", "Verify input format", "Try simpler model"]
|
|
119
|
+
}
|
|
120
|
+
return Err(InferenceError.INFERENCE_FAILED(e))
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
// Cache successful predictions
|
|
125
|
+
cache.set("predictions:${cacheKey}", predictions, ttl: 1h)
|
|
126
|
+
|
|
127
|
+
// Monitor prediction confidence
|
|
128
|
+
@monitor_prediction_quality(
|
|
129
|
+
predictions: predictions,
|
|
130
|
+
threshold: 0.80,
|
|
131
|
+
alert_on_drift: true
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
return Ok(predictions)
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
// Batch inference for efficiency
|
|
138
|
+
@batch_optimization(
|
|
139
|
+
maxBatchSize: 32,
|
|
140
|
+
maxWaitTime: 5s
|
|
141
|
+
)
|
|
142
|
+
@confidence(0.87)
|
|
143
|
+
@effects(llm_call, cost, async)
|
|
144
|
+
func batchInference(
|
|
145
|
+
inputs: ProcessedData[],
|
|
146
|
+
userId: string
|
|
147
|
+
) -> Result<Predictions[], Error> {
|
|
148
|
+
// Batch requests to reduce per-request overhead
|
|
149
|
+
// 50-70% cost savings vs individual requests
|
|
150
|
+
|
|
151
|
+
batched = createBatches(inputs, size: 32)
|
|
152
|
+
|
|
153
|
+
@parallel(maxConcurrency: 4)
|
|
154
|
+
results = batched.map(batch => {
|
|
155
|
+
@llm_call(model: "gpt-4.5-turbo")
|
|
156
|
+
@cost_tracked(userId: userId, feature: "batch_inference")
|
|
157
|
+
return model.predictBatch(batch)
|
|
158
|
+
})
|
|
159
|
+
|
|
160
|
+
return Ok(results.flatten())
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
// Model monitoring and drift detection
|
|
164
|
+
@healthcheck(interval: 5m)
|
|
165
|
+
@confidence(0.88)
|
|
166
|
+
@effects(database, io)
|
|
167
|
+
func monitorModelPerformance() -> HealthStatus {
|
|
168
|
+
// Get recent predictions
|
|
169
|
+
predictions = database.predictions
|
|
170
|
+
.findRecent(limit: 1000)
|
|
171
|
+
|
|
172
|
+
// Calculate performance metrics
|
|
173
|
+
metrics = {
|
|
174
|
+
averageConfidence: mean(predictions.map(p => p.confidence)),
|
|
175
|
+
errorRate: predictions.filter(p => p.error).length / predictions.length,
|
|
176
|
+
latencyP95: percentile(predictions.map(p => p.latency), 0.95),
|
|
177
|
+
costPerPrediction: sum(predictions.map(p => p.cost)) / predictions.length
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
// Check for drift
|
|
181
|
+
if metrics.averageConfidence < 0.70 {
|
|
182
|
+
@alert("warning", {
|
|
183
|
+
message: "Model confidence declining",
|
|
184
|
+
averageConfidence: metrics.averageConfidence,
|
|
185
|
+
recommendation: "Consider retraining or model upgrade"
|
|
186
|
+
})
|
|
187
|
+
return HealthStatus.DEGRADED
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
if metrics.errorRate > 0.05 {
|
|
191
|
+
@alert("critical", {
|
|
192
|
+
message: "Error rate elevated",
|
|
193
|
+
errorRate: metrics.errorRate,
|
|
194
|
+
recommendation: "Investigate model or data issues"
|
|
195
|
+
})
|
|
196
|
+
return HealthStatus.FAILED
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
return HealthStatus.OK
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
// A/B testing for model comparison
|
|
203
|
+
@ab_test(
|
|
204
|
+
variants: {
|
|
205
|
+
control: "gpt-4.5-turbo",
|
|
206
|
+
treatment: "fine-tuned-model-v2"
|
|
207
|
+
},
|
|
208
|
+
split: 0.5, // 50/50 split
|
|
209
|
+
metric: "prediction_accuracy",
|
|
210
|
+
duration: 7d
|
|
211
|
+
)
|
|
212
|
+
@confidence(0.86)
|
|
213
|
+
func compareModels(input: ProcessedData) -> Predictions {
|
|
214
|
+
// Automatically routes 50% to each model
|
|
215
|
+
// Tracks performance metrics
|
|
216
|
+
// Reports which model is better
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
// Feature engineering with confidence tracking
|
|
220
|
+
@confidence(0.84)
|
|
221
|
+
@partial("Basic features only, domain expertise needed for advanced")
|
|
222
|
+
@uncertain("Optimal feature set not determined via experimentation")
|
|
223
|
+
func engineerFeatures(rawData: DataFrame) -> EngineeeredFeatures {
|
|
224
|
+
features = {
|
|
225
|
+
// Numeric features
|
|
226
|
+
age: rawData.age,
|
|
227
|
+
income: log(rawData.income + 1), // Log transform
|
|
228
|
+
creditScore: rawData.creditScore / 850, // Normalize
|
|
229
|
+
|
|
230
|
+
// Categorical features
|
|
231
|
+
category: oneHot(rawData.category),
|
|
232
|
+
region: embedding(rawData.region),
|
|
233
|
+
|
|
234
|
+
// Interaction features
|
|
235
|
+
ageIncomeRatio: rawData.age / (rawData.income + 1),
|
|
236
|
+
|
|
237
|
+
// Time-based features
|
|
238
|
+
dayOfWeek: rawData.timestamp.dayOfWeek(),
|
|
239
|
+
hourOfDay: rawData.timestamp.hour()
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
@trace_features({
|
|
243
|
+
engineered: Object.keys(features).length,
|
|
244
|
+
original: rawData.columns.length,
|
|
245
|
+
transformation: "numeric_log_categorical_onehot"
|
|
246
|
+
})
|
|
247
|
+
|
|
248
|
+
return features
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
// Full ML pipeline orchestration
|
|
252
|
+
@workflow(persistent: true, resumable: true)
|
|
253
|
+
@confidence(0.85)
|
|
254
|
+
@effects(io, database, llm_call, cost, async)
|
|
255
|
+
func mlPipeline(
|
|
256
|
+
dataSource: DataSource,
|
|
257
|
+
userId: string
|
|
258
|
+
) -> Result<MLPipelineOutput, PipelineError> {
|
|
259
|
+
// Step 1: Extract data
|
|
260
|
+
@checkpoint("data_extraction")
|
|
261
|
+
@retry(maxAttempts: 3)
|
|
262
|
+
rawData = dataSource.extract() match {
|
|
263
|
+
Ok(data) -> data,
|
|
264
|
+
Err(e) -> return Err(PipelineError.EXTRACTION_FAILED(e))
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
// Step 2: Preprocess
|
|
268
|
+
@checkpoint("preprocessing")
|
|
269
|
+
@confidence(0.91)
|
|
270
|
+
processed = preprocessData(rawData) match {
|
|
271
|
+
Ok(data) -> data,
|
|
272
|
+
Err(e) -> return Err(PipelineError.PREPROCESSING_FAILED(e))
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
// Step 3: Feature engineering
|
|
276
|
+
@checkpoint("feature_engineering")
|
|
277
|
+
@confidence(0.84)
|
|
278
|
+
features = engineerFeatures(processed.data)
|
|
279
|
+
|
|
280
|
+
// Step 4: Run inference
|
|
281
|
+
@checkpoint("inference")
|
|
282
|
+
@budget_limit(user_daily: 10.00, action: "throttle")
|
|
283
|
+
predictions = runInference(features, userId) match {
|
|
284
|
+
Ok(preds) -> preds,
|
|
285
|
+
Err(e) -> return Err(PipelineError.INFERENCE_FAILED(e))
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
// Step 5: Post-process and validate
|
|
289
|
+
@checkpoint("postprocessing")
|
|
290
|
+
@confidence(0.93)
|
|
291
|
+
@property("predictions in valid range")
|
|
292
|
+
validated = validatePredictions(predictions) match {
|
|
293
|
+
Ok(preds) -> preds,
|
|
294
|
+
Err(e) -> {
|
|
295
|
+
@alert("critical", "Prediction validation failed")
|
|
296
|
+
return Err(PipelineError.INVALID_PREDICTIONS(e))
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
// Step 6: Store results
|
|
301
|
+
@checkpoint("storage")
|
|
302
|
+
@transaction
|
|
303
|
+
database.predictions.insert({
|
|
304
|
+
userId: userId,
|
|
305
|
+
predictions: validated,
|
|
306
|
+
metadata: {
|
|
307
|
+
dataSource: dataSource.name,
|
|
308
|
+
timestamp: now(),
|
|
309
|
+
modelVersion: getCurrentModelVersion(),
|
|
310
|
+
cost: getTotalCost(userId)
|
|
311
|
+
}
|
|
312
|
+
})
|
|
313
|
+
|
|
314
|
+
@checkpoint("complete")
|
|
315
|
+
|
|
316
|
+
return Ok(MLPipelineOutput {
|
|
317
|
+
predictions: validated,
|
|
318
|
+
metadata: {
|
|
319
|
+
processingTime: elapsed(),
|
|
320
|
+
totalCost: getTotalCost(userId),
|
|
321
|
+
confidence: averageConfidence(validated)
|
|
322
|
+
}
|
|
323
|
+
})
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
// Monitoring dashboard data
|
|
327
|
+
@confidence(0.92)
|
|
328
|
+
@effects(database, io)
|
|
329
|
+
func getMLMetrics(period: Period) -> MLMetricsDashboard {
|
|
330
|
+
return MLMetricsDashboard {
|
|
331
|
+
totalPredictions: database.predictions.count(period),
|
|
332
|
+
averageLatency: database.predictions.averageLatency(period),
|
|
333
|
+
errorRate: database.predictions.errorRate(period),
|
|
334
|
+
costMetrics: {
|
|
335
|
+
totalCost: costRuntime.getTotal(period),
|
|
336
|
+
costPerPrediction: costRuntime.getAverage(period),
|
|
337
|
+
costByModel: costRuntime.getByModel(period)
|
|
338
|
+
},
|
|
339
|
+
qualityMetrics: {
|
|
340
|
+
averageConfidence: database.predictions.averageConfidence(period),
|
|
341
|
+
mutationScore: getLatestMutationScore(),
|
|
342
|
+
verificationStatus: getVerificationStatus()
|
|
343
|
+
},
|
|
344
|
+
usageMetrics: {
|
|
345
|
+
topUsers: database.predictions.topUsers(period, limit: 10),
|
|
346
|
+
topFeatures: database.predictions.topFeatures(period, limit: 10),
|
|
347
|
+
peakHours: database.predictions.getHourlyDistribution(period)
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
}
|