agentdb 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +38 -0
- package/LICENSE-APACHE +190 -0
- package/LICENSE-MIT +21 -0
- package/README.md +953 -0
- package/bin/agentdb.js +485 -0
- package/bin/plugin-cli-wrapper.mjs +21 -0
- package/dist/cache/query-cache.d.ts +105 -0
- package/dist/cache/query-cache.d.ts.map +1 -0
- package/dist/cache/query-cache.js +224 -0
- package/dist/cache/query-cache.js.map +1 -0
- package/dist/cache/query-cache.mjs +219 -0
- package/dist/cli/cache/query-cache.d.ts +104 -0
- package/dist/cli/cache/query-cache.js +244 -0
- package/dist/cli/cli/db-commands.d.ts +48 -0
- package/dist/cli/cli/db-commands.js +613 -0
- package/dist/cli/commands.d.ts +7 -0
- package/dist/cli/commands.d.ts.map +1 -0
- package/dist/cli/commands.js +113 -0
- package/dist/cli/commands.js.map +1 -0
- package/dist/cli/commands.mjs +104 -0
- package/dist/cli/core/backend-interface.d.ts +70 -0
- package/dist/cli/core/backend-interface.js +15 -0
- package/dist/cli/core/native-backend.d.ts +140 -0
- package/dist/cli/core/native-backend.js +432 -0
- package/dist/cli/core/vector-db.d.ts +126 -0
- package/dist/cli/core/vector-db.js +338 -0
- package/dist/cli/core/wasm-backend.d.ts +95 -0
- package/dist/cli/core/wasm-backend.js +418 -0
- package/dist/cli/db-commands.d.ts +49 -0
- package/dist/cli/db-commands.d.ts.map +1 -0
- package/dist/cli/db-commands.js +533 -0
- package/dist/cli/db-commands.js.map +1 -0
- package/dist/cli/db-commands.mjs +522 -0
- package/dist/cli/generator.d.ts +11 -0
- package/dist/cli/generator.d.ts.map +1 -0
- package/dist/cli/generator.js +567 -0
- package/dist/cli/generator.js.map +1 -0
- package/dist/cli/generator.mjs +527 -0
- package/dist/cli/help.d.ts +18 -0
- package/dist/cli/help.d.ts.map +1 -0
- package/dist/cli/help.js +676 -0
- package/dist/cli/help.js.map +1 -0
- package/dist/cli/help.mjs +667 -0
- package/dist/cli/index/hnsw.d.ts +164 -0
- package/dist/cli/index/hnsw.js +558 -0
- package/dist/cli/plugin-cli.d.ts +7 -0
- package/dist/cli/plugin-cli.d.ts.map +1 -0
- package/dist/cli/plugin-cli.js +295 -0
- package/dist/cli/plugin-cli.js.map +1 -0
- package/dist/cli/plugin-cli.mjs +289 -0
- package/dist/cli/quantization/product-quantization.d.ts +108 -0
- package/dist/cli/quantization/product-quantization.js +350 -0
- package/dist/cli/query/query-builder.d.ts +322 -0
- package/dist/cli/query/query-builder.js +600 -0
- package/dist/cli/templates.d.ts +14 -0
- package/dist/cli/templates.d.ts.map +1 -0
- package/dist/cli/templates.js +182 -0
- package/dist/cli/templates.js.map +1 -0
- package/dist/cli/templates.mjs +176 -0
- package/dist/cli/types/index.d.ts +116 -0
- package/dist/cli/types/index.js +5 -0
- package/dist/cli/types.d.ts +91 -0
- package/dist/cli/types.d.ts.map +1 -0
- package/dist/cli/types.js +6 -0
- package/dist/cli/types.js.map +1 -0
- package/dist/cli/types.mjs +4 -0
- package/dist/cli/wizard/index.d.ts +6 -0
- package/dist/cli/wizard/index.d.ts.map +1 -0
- package/dist/cli/wizard/index.js +138 -0
- package/dist/cli/wizard/index.js.map +1 -0
- package/dist/cli/wizard/index.mjs +131 -0
- package/dist/cli/wizard/prompts.d.ts +11 -0
- package/dist/cli/wizard/prompts.d.ts.map +1 -0
- package/dist/cli/wizard/prompts.js +482 -0
- package/dist/cli/wizard/prompts.js.map +1 -0
- package/dist/cli/wizard/prompts.mjs +470 -0
- package/dist/cli/wizard/validator.d.ts +13 -0
- package/dist/cli/wizard/validator.d.ts.map +1 -0
- package/dist/cli/wizard/validator.js +234 -0
- package/dist/cli/wizard/validator.js.map +1 -0
- package/dist/cli/wizard/validator.mjs +224 -0
- package/dist/core/backend-interface.d.ts +71 -0
- package/dist/core/backend-interface.d.ts.map +1 -0
- package/dist/core/backend-interface.js +16 -0
- package/dist/core/backend-interface.js.map +1 -0
- package/dist/core/backend-interface.mjs +12 -0
- package/dist/core/native-backend.d.ts +141 -0
- package/dist/core/native-backend.d.ts.map +1 -0
- package/dist/core/native-backend.js +457 -0
- package/dist/core/native-backend.js.map +1 -0
- package/dist/core/native-backend.mjs +449 -0
- package/dist/core/vector-db.d.ts +127 -0
- package/dist/core/vector-db.d.ts.map +1 -0
- package/dist/core/vector-db.js +266 -0
- package/dist/core/vector-db.js.map +1 -0
- package/dist/core/vector-db.mjs +261 -0
- package/dist/core/wasm-backend.d.ts +96 -0
- package/dist/core/wasm-backend.d.ts.map +1 -0
- package/dist/core/wasm-backend.js +393 -0
- package/dist/core/wasm-backend.js.map +1 -0
- package/dist/core/wasm-backend.mjs +385 -0
- package/dist/index/hnsw-optimized.d.ts +75 -0
- package/dist/index/hnsw-optimized.d.ts.map +1 -0
- package/dist/index/hnsw-optimized.js +412 -0
- package/dist/index/hnsw-optimized.js.map +1 -0
- package/dist/index/hnsw-optimized.mjs +407 -0
- package/dist/index/hnsw.d.ts +165 -0
- package/dist/index/hnsw.d.ts.map +1 -0
- package/dist/index/hnsw.js +521 -0
- package/dist/index/hnsw.js.map +1 -0
- package/dist/index/hnsw.mjs +516 -0
- package/dist/index.d.ts +57 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +82 -0
- package/dist/index.js.map +1 -0
- package/dist/index.mjs +63 -0
- package/dist/mcp-server.d.ts +27 -0
- package/dist/mcp-server.d.ts.map +1 -0
- package/dist/mcp-server.js +789 -0
- package/dist/mcp-server.js.map +1 -0
- package/dist/mcp-server.mjs +784 -0
- package/dist/plugins/base-plugin.d.ts +114 -0
- package/dist/plugins/base-plugin.d.ts.map +1 -0
- package/dist/plugins/base-plugin.js +313 -0
- package/dist/plugins/base-plugin.js.map +1 -0
- package/dist/plugins/base-plugin.mjs +275 -0
- package/dist/plugins/implementations/active-learning.d.ts +135 -0
- package/dist/plugins/implementations/active-learning.d.ts.map +1 -0
- package/dist/plugins/implementations/active-learning.js +372 -0
- package/dist/plugins/implementations/active-learning.js.map +1 -0
- package/dist/plugins/implementations/active-learning.mjs +367 -0
- package/dist/plugins/implementations/actor-critic.d.ts +64 -0
- package/dist/plugins/implementations/actor-critic.d.ts.map +1 -0
- package/dist/plugins/implementations/actor-critic.js +363 -0
- package/dist/plugins/implementations/actor-critic.js.map +1 -0
- package/dist/plugins/implementations/actor-critic.mjs +358 -0
- package/dist/plugins/implementations/adversarial-training.d.ts +133 -0
- package/dist/plugins/implementations/adversarial-training.d.ts.map +1 -0
- package/dist/plugins/implementations/adversarial-training.js +409 -0
- package/dist/plugins/implementations/adversarial-training.js.map +1 -0
- package/dist/plugins/implementations/adversarial-training.mjs +404 -0
- package/dist/plugins/implementations/curriculum-learning.d.ts +132 -0
- package/dist/plugins/implementations/curriculum-learning.d.ts.map +1 -0
- package/dist/plugins/implementations/curriculum-learning.js +354 -0
- package/dist/plugins/implementations/curriculum-learning.js.map +1 -0
- package/dist/plugins/implementations/curriculum-learning.mjs +349 -0
- package/dist/plugins/implementations/decision-transformer.d.ts +77 -0
- package/dist/plugins/implementations/decision-transformer.d.ts.map +1 -0
- package/dist/plugins/implementations/decision-transformer.js +422 -0
- package/dist/plugins/implementations/decision-transformer.js.map +1 -0
- package/dist/plugins/implementations/decision-transformer.mjs +417 -0
- package/dist/plugins/implementations/federated-learning.d.ts +126 -0
- package/dist/plugins/implementations/federated-learning.d.ts.map +1 -0
- package/dist/plugins/implementations/federated-learning.js +436 -0
- package/dist/plugins/implementations/federated-learning.js.map +1 -0
- package/dist/plugins/implementations/federated-learning.mjs +431 -0
- package/dist/plugins/implementations/index.d.ts +30 -0
- package/dist/plugins/implementations/index.d.ts.map +1 -0
- package/dist/plugins/implementations/index.js +45 -0
- package/dist/plugins/implementations/index.js.map +1 -0
- package/dist/plugins/implementations/index.mjs +31 -0
- package/dist/plugins/implementations/multi-task-learning.d.ts +115 -0
- package/dist/plugins/implementations/multi-task-learning.d.ts.map +1 -0
- package/dist/plugins/implementations/multi-task-learning.js +369 -0
- package/dist/plugins/implementations/multi-task-learning.js.map +1 -0
- package/dist/plugins/implementations/multi-task-learning.mjs +364 -0
- package/dist/plugins/implementations/neural-architecture-search.d.ts +148 -0
- package/dist/plugins/implementations/neural-architecture-search.d.ts.map +1 -0
- package/dist/plugins/implementations/neural-architecture-search.js +379 -0
- package/dist/plugins/implementations/neural-architecture-search.js.map +1 -0
- package/dist/plugins/implementations/neural-architecture-search.mjs +374 -0
- package/dist/plugins/implementations/q-learning.d.ts +98 -0
- package/dist/plugins/implementations/q-learning.d.ts.map +1 -0
- package/dist/plugins/implementations/q-learning.js +435 -0
- package/dist/plugins/implementations/q-learning.js.map +1 -0
- package/dist/plugins/implementations/q-learning.mjs +430 -0
- package/dist/plugins/implementations/sarsa.d.ts +103 -0
- package/dist/plugins/implementations/sarsa.d.ts.map +1 -0
- package/dist/plugins/implementations/sarsa.js +347 -0
- package/dist/plugins/implementations/sarsa.js.map +1 -0
- package/dist/plugins/implementations/sarsa.mjs +342 -0
- package/dist/plugins/index.d.ts +107 -0
- package/dist/plugins/index.d.ts.map +1 -0
- package/dist/plugins/index.js +179 -0
- package/dist/plugins/index.js.map +1 -0
- package/dist/plugins/index.mjs +168 -0
- package/dist/plugins/interface.d.ts +439 -0
- package/dist/plugins/interface.d.ts.map +1 -0
- package/dist/plugins/interface.js +12 -0
- package/dist/plugins/interface.js.map +1 -0
- package/dist/plugins/interface.mjs +10 -0
- package/dist/plugins/learning-plugin.interface.d.ts +257 -0
- package/dist/plugins/learning-plugin.interface.d.ts.map +1 -0
- package/dist/plugins/learning-plugin.interface.js +7 -0
- package/dist/plugins/learning-plugin.interface.js.map +1 -0
- package/dist/plugins/learning-plugin.interface.mjs +5 -0
- package/dist/plugins/plugin-exports.d.ts +71 -0
- package/dist/plugins/plugin-exports.d.ts.map +1 -0
- package/dist/plugins/plugin-exports.js +78 -0
- package/dist/plugins/plugin-exports.js.map +1 -0
- package/dist/plugins/plugin-exports.mjs +69 -0
- package/dist/plugins/registry.d.ts +206 -0
- package/dist/plugins/registry.d.ts.map +1 -0
- package/dist/plugins/registry.js +365 -0
- package/dist/plugins/registry.js.map +1 -0
- package/dist/plugins/registry.mjs +356 -0
- package/dist/plugins/validator.d.ts +63 -0
- package/dist/plugins/validator.d.ts.map +1 -0
- package/dist/plugins/validator.js +464 -0
- package/dist/plugins/validator.js.map +1 -0
- package/dist/plugins/validator.mjs +458 -0
- package/dist/quantization/binary-quantization.d.ts +104 -0
- package/dist/quantization/binary-quantization.d.ts.map +1 -0
- package/dist/quantization/binary-quantization.js +246 -0
- package/dist/quantization/binary-quantization.js.map +1 -0
- package/dist/quantization/binary-quantization.mjs +240 -0
- package/dist/quantization/optimized-pq.d.ts +138 -0
- package/dist/quantization/optimized-pq.d.ts.map +1 -0
- package/dist/quantization/optimized-pq.js +320 -0
- package/dist/quantization/optimized-pq.js.map +1 -0
- package/dist/quantization/optimized-pq.mjs +313 -0
- package/dist/quantization/product-quantization.d.ts +109 -0
- package/dist/quantization/product-quantization.d.ts.map +1 -0
- package/dist/quantization/product-quantization.js +287 -0
- package/dist/quantization/product-quantization.js.map +1 -0
- package/dist/quantization/product-quantization.mjs +282 -0
- package/dist/quantization/scalar-quantization.d.ts +100 -0
- package/dist/quantization/scalar-quantization.d.ts.map +1 -0
- package/dist/quantization/scalar-quantization.js +324 -0
- package/dist/quantization/scalar-quantization.js.map +1 -0
- package/dist/quantization/scalar-quantization.mjs +319 -0
- package/dist/query/index.d.ts +6 -0
- package/dist/query/index.d.ts.map +1 -0
- package/dist/query/index.js +9 -0
- package/dist/query/index.js.map +1 -0
- package/dist/query/index.mjs +4 -0
- package/dist/query/query-builder.d.ts +323 -0
- package/dist/query/query-builder.d.ts.map +1 -0
- package/dist/query/query-builder.js +524 -0
- package/dist/query/query-builder.js.map +1 -0
- package/dist/query/query-builder.mjs +519 -0
- package/dist/reasoning/context-synthesizer.d.ts +57 -0
- package/dist/reasoning/context-synthesizer.d.ts.map +1 -0
- package/dist/reasoning/context-synthesizer.js +224 -0
- package/dist/reasoning/context-synthesizer.js.map +1 -0
- package/dist/reasoning/context-synthesizer.mjs +219 -0
- package/dist/reasoning/experience-curator.d.ts +66 -0
- package/dist/reasoning/experience-curator.d.ts.map +1 -0
- package/dist/reasoning/experience-curator.js +288 -0
- package/dist/reasoning/experience-curator.js.map +1 -0
- package/dist/reasoning/experience-curator.mjs +283 -0
- package/dist/reasoning/memory-optimizer.d.ts +69 -0
- package/dist/reasoning/memory-optimizer.d.ts.map +1 -0
- package/dist/reasoning/memory-optimizer.js +331 -0
- package/dist/reasoning/memory-optimizer.js.map +1 -0
- package/dist/reasoning/memory-optimizer.mjs +326 -0
- package/dist/reasoning/pattern-matcher.d.ts +59 -0
- package/dist/reasoning/pattern-matcher.d.ts.map +1 -0
- package/dist/reasoning/pattern-matcher.js +229 -0
- package/dist/reasoning/pattern-matcher.js.map +1 -0
- package/dist/reasoning/pattern-matcher.mjs +224 -0
- package/dist/reasoningbank/adapter/agentdb-adapter.d.ts +118 -0
- package/dist/reasoningbank/adapter/agentdb-adapter.d.ts.map +1 -0
- package/dist/reasoningbank/adapter/agentdb-adapter.js +477 -0
- package/dist/reasoningbank/adapter/agentdb-adapter.js.map +1 -0
- package/dist/reasoningbank/adapter/types.d.ts +113 -0
- package/dist/reasoningbank/adapter/types.d.ts.map +1 -0
- package/dist/reasoningbank/adapter/types.js +9 -0
- package/dist/reasoningbank/adapter/types.js.map +1 -0
- package/dist/reasoningbank/cli/commands.d.ts +16 -0
- package/dist/reasoningbank/cli/commands.d.ts.map +1 -0
- package/dist/reasoningbank/cli/commands.js +272 -0
- package/dist/reasoningbank/cli/commands.js.map +1 -0
- package/dist/reasoningbank/mcp/agentdb-tools.d.ts +319 -0
- package/dist/reasoningbank/mcp/agentdb-tools.d.ts.map +1 -0
- package/dist/reasoningbank/mcp/agentdb-tools.js +301 -0
- package/dist/reasoningbank/mcp/agentdb-tools.js.map +1 -0
- package/dist/reasoningbank/migration/migrate.d.ts +25 -0
- package/dist/reasoningbank/migration/migrate.d.ts.map +1 -0
- package/dist/reasoningbank/migration/migrate.js +178 -0
- package/dist/reasoningbank/migration/migrate.js.map +1 -0
- package/dist/reasoningbank/reasoning/context-synthesizer.d.ts +37 -0
- package/dist/reasoningbank/reasoning/context-synthesizer.d.ts.map +1 -0
- package/dist/reasoningbank/reasoning/context-synthesizer.js +114 -0
- package/dist/reasoningbank/reasoning/context-synthesizer.js.map +1 -0
- package/dist/reasoningbank/reasoning/experience-curator.d.ts +39 -0
- package/dist/reasoningbank/reasoning/experience-curator.d.ts.map +1 -0
- package/dist/reasoningbank/reasoning/experience-curator.js +98 -0
- package/dist/reasoningbank/reasoning/experience-curator.js.map +1 -0
- package/dist/reasoningbank/reasoning/memory-optimizer.d.ts +44 -0
- package/dist/reasoningbank/reasoning/memory-optimizer.d.ts.map +1 -0
- package/dist/reasoningbank/reasoning/memory-optimizer.js +184 -0
- package/dist/reasoningbank/reasoning/memory-optimizer.js.map +1 -0
- package/dist/reasoningbank/reasoning/pattern-matcher.d.ts +40 -0
- package/dist/reasoningbank/reasoning/pattern-matcher.d.ts.map +1 -0
- package/dist/reasoningbank/reasoning/pattern-matcher.js +87 -0
- package/dist/reasoningbank/reasoning/pattern-matcher.js.map +1 -0
- package/dist/reasoningbank/sync/quic-sync.d.ts +77 -0
- package/dist/reasoningbank/sync/quic-sync.d.ts.map +1 -0
- package/dist/reasoningbank/sync/quic-sync.js +165 -0
- package/dist/reasoningbank/sync/quic-sync.js.map +1 -0
- package/dist/sync/conflict.d.ts +78 -0
- package/dist/sync/conflict.d.ts.map +1 -0
- package/dist/sync/conflict.js +202 -0
- package/dist/sync/conflict.js.map +1 -0
- package/dist/sync/conflict.mjs +196 -0
- package/dist/sync/coordinator.d.ts +111 -0
- package/dist/sync/coordinator.d.ts.map +1 -0
- package/dist/sync/coordinator.js +256 -0
- package/dist/sync/coordinator.js.map +1 -0
- package/dist/sync/coordinator.mjs +250 -0
- package/dist/sync/delta.d.ts +81 -0
- package/dist/sync/delta.d.ts.map +1 -0
- package/dist/sync/delta.js +245 -0
- package/dist/sync/delta.js.map +1 -0
- package/dist/sync/delta.mjs +238 -0
- package/dist/sync/index.d.ts +11 -0
- package/dist/sync/index.d.ts.map +1 -0
- package/dist/sync/index.js +22 -0
- package/dist/sync/index.js.map +1 -0
- package/dist/sync/index.mjs +9 -0
- package/dist/sync/quic-sync.d.ts +81 -0
- package/dist/sync/quic-sync.d.ts.map +1 -0
- package/dist/sync/quic-sync.js +329 -0
- package/dist/sync/quic-sync.js.map +1 -0
- package/dist/sync/quic-sync.mjs +323 -0
- package/dist/sync/types.d.ts +168 -0
- package/dist/sync/types.d.ts.map +1 -0
- package/dist/sync/types.js +8 -0
- package/dist/sync/types.js.map +1 -0
- package/dist/sync/types.mjs +6 -0
- package/dist/types/index.d.ts +117 -0
- package/dist/types/index.d.ts.map +1 -0
- package/dist/types/index.js +6 -0
- package/dist/types/index.js.map +1 -0
- package/dist/types/index.mjs +4 -0
- package/dist/wasm-loader.d.ts +32 -0
- package/dist/wasm-loader.d.ts.map +1 -0
- package/dist/wasm-loader.js +75 -0
- package/dist/wasm-loader.js.map +1 -0
- package/dist/wasm-loader.mjs +64 -0
- package/examples/adaptive-learning.ts +284 -0
- package/examples/browser/README.md +732 -0
- package/examples/browser/adaptive-recommendations/index.html +427 -0
- package/examples/browser/collaborative-filtering/index.html +310 -0
- package/examples/browser/continual-learning/index.html +736 -0
- package/examples/browser/experience-replay/index.html +616 -0
- package/examples/browser/index.html +369 -0
- package/examples/browser/meta-learning/index.html +789 -0
- package/examples/browser/neuro-symbolic/index.html +692 -0
- package/examples/browser/pattern-learning/index.html +620 -0
- package/examples/browser/quantum-inspired/index.html +728 -0
- package/examples/browser/rag/index.html +624 -0
- package/examples/browser/swarm-intelligence/index.html +811 -0
- package/examples/browser-basic.html +170 -0
- package/examples/hnsw-example.ts +148 -0
- package/examples/node-basic.js +70 -0
- package/examples/quic-sync-example.ts +310 -0
- package/examples/quick-start.js +68 -0
- package/examples/wasm-example.ts +222 -0
- package/package.json +118 -0
|
@@ -0,0 +1,358 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Actor-Critic Plugin
|
|
3
|
+
*
|
|
4
|
+
* Implements the Actor-Critic algorithm with policy gradient learning.
|
|
5
|
+
* Combines value-based and policy-based methods:
|
|
6
|
+
* - Actor: Policy network that selects actions
|
|
7
|
+
* - Critic: Value network that evaluates actions
|
|
8
|
+
*
|
|
9
|
+
* Key features:
|
|
10
|
+
* - Policy gradient learning (REINFORCE with baseline)
|
|
11
|
+
* - Advantage estimation (GAE - Generalized Advantage Estimation)
|
|
12
|
+
* - Continuous or discrete action spaces
|
|
13
|
+
* - Natural policy gradients
|
|
14
|
+
*/
|
|
15
|
+
import { BasePlugin } from '../base-plugin.mjs';
|
|
16
|
+
/**
|
|
17
|
+
* Actor network (policy)
|
|
18
|
+
*/
|
|
19
|
+
class ActorNetwork {
|
|
20
|
+
constructor(inputSize = 768, hiddenSize = 256, outputSize = 768) {
|
|
21
|
+
this.inputSize = inputSize;
|
|
22
|
+
this.hiddenSize = hiddenSize;
|
|
23
|
+
this.outputSize = outputSize;
|
|
24
|
+
// Initialize weights
|
|
25
|
+
this.weights = {
|
|
26
|
+
W1: this.initializeWeights(inputSize * hiddenSize, inputSize),
|
|
27
|
+
b1: new Float32Array(hiddenSize),
|
|
28
|
+
W2: this.initializeWeights(hiddenSize * outputSize, hiddenSize),
|
|
29
|
+
b2: new Float32Array(outputSize),
|
|
30
|
+
};
|
|
31
|
+
// Initialize Adam optimizer
|
|
32
|
+
this.optimizer = {
|
|
33
|
+
m_W1: new Float32Array(inputSize * hiddenSize),
|
|
34
|
+
v_W1: new Float32Array(inputSize * hiddenSize),
|
|
35
|
+
m_b1: new Float32Array(hiddenSize),
|
|
36
|
+
v_b1: new Float32Array(hiddenSize),
|
|
37
|
+
m_W2: new Float32Array(hiddenSize * outputSize),
|
|
38
|
+
v_W2: new Float32Array(hiddenSize * outputSize),
|
|
39
|
+
m_b2: new Float32Array(outputSize),
|
|
40
|
+
v_b2: new Float32Array(outputSize),
|
|
41
|
+
t: 0,
|
|
42
|
+
};
|
|
43
|
+
}
|
|
44
|
+
/**
|
|
45
|
+
* Initialize weights with Xavier initialization
|
|
46
|
+
*/
|
|
47
|
+
initializeWeights(size, fanIn) {
|
|
48
|
+
const weights = new Float32Array(size);
|
|
49
|
+
const std = Math.sqrt(2.0 / fanIn);
|
|
50
|
+
for (let i = 0; i < size; i++) {
|
|
51
|
+
weights[i] = (Math.random() * 2 - 1) * std;
|
|
52
|
+
}
|
|
53
|
+
return weights;
|
|
54
|
+
}
|
|
55
|
+
/**
|
|
56
|
+
* Forward pass - compute action probabilities/mean
|
|
57
|
+
*/
|
|
58
|
+
forward(state) {
|
|
59
|
+
const input = new Float32Array(state);
|
|
60
|
+
// Layer 1
|
|
61
|
+
const z1 = this.matmul(this.weights.W1, input, this.hiddenSize, this.inputSize);
|
|
62
|
+
const hidden = this.relu(this.add(z1, this.weights.b1));
|
|
63
|
+
// Layer 2
|
|
64
|
+
const z2 = this.matmul(this.weights.W2, hidden, this.outputSize, this.hiddenSize);
|
|
65
|
+
const output = this.tanh(this.add(z2, this.weights.b2)); // Tanh for bounded actions
|
|
66
|
+
return Array.from(output);
|
|
67
|
+
}
|
|
68
|
+
/**
|
|
69
|
+
* Sample action from policy (for discrete actions, use softmax)
|
|
70
|
+
*/
|
|
71
|
+
sampleAction(state) {
|
|
72
|
+
const actionMean = this.forward(state);
|
|
73
|
+
// Add Gaussian noise for exploration
|
|
74
|
+
const noise = 0.1;
|
|
75
|
+
const action = actionMean.map(mean => mean + (Math.random() * 2 - 1) * noise);
|
|
76
|
+
// Compute log probability (simplified)
|
|
77
|
+
const logProb = -0.5 * action.reduce((sum, a, i) => {
|
|
78
|
+
const diff = a - actionMean[i];
|
|
79
|
+
return sum + (diff * diff) / (noise * noise);
|
|
80
|
+
}, 0);
|
|
81
|
+
return { action, logProb };
|
|
82
|
+
}
|
|
83
|
+
/**
|
|
84
|
+
* Update weights using policy gradient
|
|
85
|
+
*/
|
|
86
|
+
updateWeights(states, actions, advantages, learningRate) {
|
|
87
|
+
this.optimizer.t++;
|
|
88
|
+
// Simplified policy gradient update
|
|
89
|
+
// In production, would compute full gradients through backprop
|
|
90
|
+
const beta1 = 0.9;
|
|
91
|
+
const beta2 = 0.999;
|
|
92
|
+
const epsilon = 1e-8;
|
|
93
|
+
// Compute policy gradient (simplified)
|
|
94
|
+
for (let i = 0; i < states.length; i++) {
|
|
95
|
+
const predicted = this.forward(states[i]);
|
|
96
|
+
const advantage = advantages[i];
|
|
97
|
+
// Gradient for each output
|
|
98
|
+
for (let j = 0; j < this.outputSize; j++) {
|
|
99
|
+
const gradient = (predicted[j] - actions[i][j]) * advantage;
|
|
100
|
+
// Update bias (simplified)
|
|
101
|
+
const g = gradient;
|
|
102
|
+
this.optimizer.m_b2[j] = beta1 * this.optimizer.m_b2[j] + (1 - beta1) * g;
|
|
103
|
+
this.optimizer.v_b2[j] = beta2 * this.optimizer.v_b2[j] + (1 - beta2) * g * g;
|
|
104
|
+
const mHat = this.optimizer.m_b2[j] / (1 - Math.pow(beta1, this.optimizer.t));
|
|
105
|
+
const vHat = this.optimizer.v_b2[j] / (1 - Math.pow(beta2, this.optimizer.t));
|
|
106
|
+
this.weights.b2[j] -= learningRate * mHat / (Math.sqrt(vHat) + epsilon);
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
// Utility methods
|
|
111
|
+
matmul(matrix, vector, rows, cols) {
|
|
112
|
+
const result = new Float32Array(rows);
|
|
113
|
+
for (let i = 0; i < rows; i++) {
|
|
114
|
+
let sum = 0;
|
|
115
|
+
for (let j = 0; j < cols; j++) {
|
|
116
|
+
sum += matrix[i * cols + j] * vector[j];
|
|
117
|
+
}
|
|
118
|
+
result[i] = sum;
|
|
119
|
+
}
|
|
120
|
+
return result;
|
|
121
|
+
}
|
|
122
|
+
relu(x) {
|
|
123
|
+
return x.map(val => Math.max(0, val));
|
|
124
|
+
}
|
|
125
|
+
tanh(x) {
|
|
126
|
+
return x.map(val => Math.tanh(val));
|
|
127
|
+
}
|
|
128
|
+
add(a, b) {
|
|
129
|
+
const result = new Float32Array(a.length);
|
|
130
|
+
for (let i = 0; i < a.length; i++) {
|
|
131
|
+
result[i] = a[i] + b[i];
|
|
132
|
+
}
|
|
133
|
+
return result;
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
/**
|
|
137
|
+
* Critic network (value function)
|
|
138
|
+
*/
|
|
139
|
+
class CriticNetwork {
|
|
140
|
+
constructor(inputSize = 768, hiddenSize = 128) {
|
|
141
|
+
this.inputSize = inputSize;
|
|
142
|
+
this.hiddenSize = hiddenSize;
|
|
143
|
+
// Initialize weights
|
|
144
|
+
this.weights = {
|
|
145
|
+
W1: this.initializeWeights(inputSize * hiddenSize, inputSize),
|
|
146
|
+
b1: new Float32Array(hiddenSize),
|
|
147
|
+
W2: this.initializeWeights(hiddenSize, hiddenSize),
|
|
148
|
+
b2: new Float32Array(1),
|
|
149
|
+
};
|
|
150
|
+
// Initialize optimizer
|
|
151
|
+
this.optimizer = {
|
|
152
|
+
m_W1: new Float32Array(inputSize * hiddenSize),
|
|
153
|
+
v_W1: new Float32Array(inputSize * hiddenSize),
|
|
154
|
+
m_b1: new Float32Array(hiddenSize),
|
|
155
|
+
v_b1: new Float32Array(hiddenSize),
|
|
156
|
+
m_W2: new Float32Array(hiddenSize),
|
|
157
|
+
v_W2: new Float32Array(hiddenSize),
|
|
158
|
+
m_b2: new Float32Array(1),
|
|
159
|
+
v_b2: new Float32Array(1),
|
|
160
|
+
t: 0,
|
|
161
|
+
};
|
|
162
|
+
}
|
|
163
|
+
initializeWeights(size, fanIn) {
|
|
164
|
+
const weights = new Float32Array(size);
|
|
165
|
+
const std = Math.sqrt(2.0 / fanIn);
|
|
166
|
+
for (let i = 0; i < size; i++) {
|
|
167
|
+
weights[i] = (Math.random() * 2 - 1) * std;
|
|
168
|
+
}
|
|
169
|
+
return weights;
|
|
170
|
+
}
|
|
171
|
+
/**
|
|
172
|
+
* Forward pass - compute state value
|
|
173
|
+
*/
|
|
174
|
+
forward(state) {
|
|
175
|
+
const input = new Float32Array(state);
|
|
176
|
+
// Layer 1
|
|
177
|
+
const z1 = this.matmul(this.weights.W1, input, this.hiddenSize, this.inputSize);
|
|
178
|
+
const hidden = this.relu(this.add(z1, this.weights.b1));
|
|
179
|
+
// Layer 2 (single output)
|
|
180
|
+
let value = this.weights.b2[0];
|
|
181
|
+
for (let i = 0; i < this.hiddenSize; i++) {
|
|
182
|
+
value += this.weights.W2[i] * hidden[i];
|
|
183
|
+
}
|
|
184
|
+
return value;
|
|
185
|
+
}
|
|
186
|
+
/**
|
|
187
|
+
* Update weights to minimize TD error
|
|
188
|
+
*/
|
|
189
|
+
updateWeights(states, targets, learningRate) {
|
|
190
|
+
this.optimizer.t++;
|
|
191
|
+
let totalLoss = 0;
|
|
192
|
+
const beta1 = 0.9;
|
|
193
|
+
const beta2 = 0.999;
|
|
194
|
+
const epsilon = 1e-8;
|
|
195
|
+
for (let i = 0; i < states.length; i++) {
|
|
196
|
+
const predicted = this.forward(states[i]);
|
|
197
|
+
const target = targets[i];
|
|
198
|
+
const error = predicted - target;
|
|
199
|
+
totalLoss += error * error;
|
|
200
|
+
// Update bias (simplified gradient)
|
|
201
|
+
const g = error;
|
|
202
|
+
this.optimizer.m_b2[0] = beta1 * this.optimizer.m_b2[0] + (1 - beta1) * g;
|
|
203
|
+
this.optimizer.v_b2[0] = beta2 * this.optimizer.v_b2[0] + (1 - beta2) * g * g;
|
|
204
|
+
const mHat = this.optimizer.m_b2[0] / (1 - Math.pow(beta1, this.optimizer.t));
|
|
205
|
+
const vHat = this.optimizer.v_b2[0] / (1 - Math.pow(beta2, this.optimizer.t));
|
|
206
|
+
this.weights.b2[0] -= learningRate * mHat / (Math.sqrt(vHat) + epsilon);
|
|
207
|
+
}
|
|
208
|
+
return totalLoss / states.length;
|
|
209
|
+
}
|
|
210
|
+
matmul(matrix, vector, rows, cols) {
|
|
211
|
+
const result = new Float32Array(rows);
|
|
212
|
+
for (let i = 0; i < rows; i++) {
|
|
213
|
+
let sum = 0;
|
|
214
|
+
for (let j = 0; j < cols; j++) {
|
|
215
|
+
sum += matrix[i * cols + j] * vector[j];
|
|
216
|
+
}
|
|
217
|
+
result[i] = sum;
|
|
218
|
+
}
|
|
219
|
+
return result;
|
|
220
|
+
}
|
|
221
|
+
relu(x) {
|
|
222
|
+
return x.map(val => Math.max(0, val));
|
|
223
|
+
}
|
|
224
|
+
add(a, b) {
|
|
225
|
+
const result = new Float32Array(a.length);
|
|
226
|
+
for (let i = 0; i < a.length; i++) {
|
|
227
|
+
result[i] = a[i] + b[i];
|
|
228
|
+
}
|
|
229
|
+
return result;
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
/**
|
|
233
|
+
* Actor-Critic Plugin Implementation
|
|
234
|
+
*/
|
|
235
|
+
export class ActorCriticPlugin extends BasePlugin {
|
|
236
|
+
constructor() {
|
|
237
|
+
super(...arguments);
|
|
238
|
+
this.name = 'actor-critic';
|
|
239
|
+
this.version = '1.0.0';
|
|
240
|
+
this.experienceBuffer = [];
|
|
241
|
+
this.gaeLambda = 0.95;
|
|
242
|
+
}
|
|
243
|
+
/**
|
|
244
|
+
* Initialize Actor-Critic plugin
|
|
245
|
+
*/
|
|
246
|
+
async onInitialize() {
|
|
247
|
+
const stateSize = this.config.algorithm.stateDim || 768;
|
|
248
|
+
const actionSize = this.config.algorithm.actionDim || 768;
|
|
249
|
+
const hiddenSize = this.config.algorithm.hiddenSize || 256;
|
|
250
|
+
// Initialize actor and critic
|
|
251
|
+
this.actor = new ActorNetwork(stateSize, hiddenSize, actionSize);
|
|
252
|
+
this.critic = new CriticNetwork(stateSize, hiddenSize / 2);
|
|
253
|
+
// GAE lambda for advantage estimation
|
|
254
|
+
this.gaeLambda = this.config.algorithm.gaeLambda || 0.95;
|
|
255
|
+
}
|
|
256
|
+
/**
|
|
257
|
+
* Select action using actor network
|
|
258
|
+
*
|
|
259
|
+
* @param state - Current state vector
|
|
260
|
+
* @param context - Optional context
|
|
261
|
+
* @returns Selected action
|
|
262
|
+
*/
|
|
263
|
+
async selectAction(state, context) {
|
|
264
|
+
this.checkInitialized();
|
|
265
|
+
// Sample action from policy
|
|
266
|
+
const { action, logProb } = this.actor.sampleAction(state);
|
|
267
|
+
return {
|
|
268
|
+
id: `actor_${Date.now()}`,
|
|
269
|
+
embedding: action,
|
|
270
|
+
source: 'policy',
|
|
271
|
+
confidence: Math.exp(logProb), // Convert log prob to confidence
|
|
272
|
+
metadata: { logProb },
|
|
273
|
+
};
|
|
274
|
+
}
|
|
275
|
+
/**
|
|
276
|
+
* Store experience in buffer
|
|
277
|
+
*/
|
|
278
|
+
async onStoreExperience(experience) {
|
|
279
|
+
this.experienceBuffer.push(experience);
|
|
280
|
+
// Train on episode completion
|
|
281
|
+
if (experience.done) {
|
|
282
|
+
await this.trainOnEpisode();
|
|
283
|
+
this.experienceBuffer = [];
|
|
284
|
+
}
|
|
285
|
+
}
|
|
286
|
+
/**
|
|
287
|
+
* Train on completed episode
|
|
288
|
+
*/
|
|
289
|
+
async trainOnEpisode() {
|
|
290
|
+
if (this.experienceBuffer.length === 0) {
|
|
291
|
+
return;
|
|
292
|
+
}
|
|
293
|
+
const actorLR = this.config.algorithm.actorLr || 0.0001;
|
|
294
|
+
const criticLR = this.config.algorithm.criticLr || 0.001;
|
|
295
|
+
const gamma = this.config.algorithm.discountFactor || 0.99;
|
|
296
|
+
// Compute values and advantages using GAE
|
|
297
|
+
const states = this.experienceBuffer.map(e => e.state);
|
|
298
|
+
const actions = this.experienceBuffer.map(e => e.action);
|
|
299
|
+
const rewards = this.experienceBuffer.map(e => e.reward);
|
|
300
|
+
const values = states.map(s => this.critic.forward(s));
|
|
301
|
+
const advantages = this.computeGAE(rewards, values, gamma, this.gaeLambda);
|
|
302
|
+
const returns = advantages.map((adv, i) => adv + values[i]);
|
|
303
|
+
// Update critic
|
|
304
|
+
this.critic.updateWeights(states, returns, criticLR);
|
|
305
|
+
// Update actor using advantages
|
|
306
|
+
this.actor.updateWeights(states, actions, advantages, actorLR);
|
|
307
|
+
}
|
|
308
|
+
/**
|
|
309
|
+
* Compute Generalized Advantage Estimation (GAE)
|
|
310
|
+
*/
|
|
311
|
+
computeGAE(rewards, values, gamma, lambda) {
|
|
312
|
+
const advantages = new Array(rewards.length);
|
|
313
|
+
let gae = 0;
|
|
314
|
+
for (let t = rewards.length - 1; t >= 0; t--) {
|
|
315
|
+
const nextValue = t === rewards.length - 1 ? 0 : values[t + 1];
|
|
316
|
+
const delta = rewards[t] + gamma * nextValue - values[t];
|
|
317
|
+
gae = delta + gamma * lambda * gae;
|
|
318
|
+
advantages[t] = gae;
|
|
319
|
+
}
|
|
320
|
+
return advantages;
|
|
321
|
+
}
|
|
322
|
+
/**
|
|
323
|
+
* Train the actor-critic networks
|
|
324
|
+
*/
|
|
325
|
+
async train(options) {
|
|
326
|
+
this.checkInitialized();
|
|
327
|
+
// For actor-critic, training happens online during episode completion
|
|
328
|
+
// This method can be used for additional offline training if needed
|
|
329
|
+
const epochs = options?.epochs || 1;
|
|
330
|
+
// Compute average value as a metric
|
|
331
|
+
let avgValue = 0;
|
|
332
|
+
let count = 0;
|
|
333
|
+
for (const exp of this.experienceBuffer) {
|
|
334
|
+
avgValue += this.critic.forward(exp.state);
|
|
335
|
+
count++;
|
|
336
|
+
}
|
|
337
|
+
avgValue = count > 0 ? avgValue / count : 0;
|
|
338
|
+
return {
|
|
339
|
+
loss: 0, // Computed during episode training
|
|
340
|
+
avgQValue: avgValue,
|
|
341
|
+
policyEntropy: 0, // Would need to compute from policy distribution
|
|
342
|
+
};
|
|
343
|
+
}
|
|
344
|
+
/**
|
|
345
|
+
* Save actor and critic networks
|
|
346
|
+
*/
|
|
347
|
+
async onSave(path) {
|
|
348
|
+
console.log(`Saving Actor-Critic model to ${path}`);
|
|
349
|
+
// In production, serialize both networks to file
|
|
350
|
+
}
|
|
351
|
+
/**
|
|
352
|
+
* Load actor and critic networks
|
|
353
|
+
*/
|
|
354
|
+
async onLoad(path) {
|
|
355
|
+
console.log(`Loading Actor-Critic model from ${path}`);
|
|
356
|
+
// In production, deserialize both networks from file
|
|
357
|
+
}
|
|
358
|
+
}
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Adversarial Training Plugin
|
|
3
|
+
*
|
|
4
|
+
* Implements robust learning through adversarial examples generation
|
|
5
|
+
* and training, improving model resilience to perturbations.
|
|
6
|
+
*
|
|
7
|
+
* Key features:
|
|
8
|
+
* - FGSM (Fast Gradient Sign Method)
|
|
9
|
+
* - PGD (Projected Gradient Descent) attacks
|
|
10
|
+
* - Adversarial augmentation
|
|
11
|
+
* - Certified defense mechanisms
|
|
12
|
+
* - Robustness evaluation
|
|
13
|
+
*/
|
|
14
|
+
import { BasePlugin } from '../base-plugin';
|
|
15
|
+
import { Action, Context, Experience, TrainOptions, TrainingMetrics, Vector } from '../learning-plugin.interface';
|
|
16
|
+
/**
|
|
17
|
+
* Attack type for adversarial generation
|
|
18
|
+
*/
|
|
19
|
+
type AttackType = 'fgsm' | 'pgd' | 'cw' | 'deepfool' | 'boundary';
|
|
20
|
+
/**
|
|
21
|
+
* Adversarial example
|
|
22
|
+
*/
|
|
23
|
+
interface AdversarialExample {
|
|
24
|
+
original: Vector;
|
|
25
|
+
adversarial: Vector;
|
|
26
|
+
perturbation: Vector;
|
|
27
|
+
confidence: number;
|
|
28
|
+
attackType: AttackType;
|
|
29
|
+
}
|
|
30
|
+
/**
|
|
31
|
+
* Adversarial Training Plugin Implementation
|
|
32
|
+
*/
|
|
33
|
+
export declare class AdversarialTrainingPlugin extends BasePlugin {
|
|
34
|
+
name: string;
|
|
35
|
+
version: string;
|
|
36
|
+
private experiences;
|
|
37
|
+
private attackType;
|
|
38
|
+
private epsilon;
|
|
39
|
+
private alpha;
|
|
40
|
+
private iterations;
|
|
41
|
+
private adversarialRatio;
|
|
42
|
+
private certifiedDefense;
|
|
43
|
+
private robustnessScores;
|
|
44
|
+
private adversarialExamples;
|
|
45
|
+
constructor(config?: Partial<any>);
|
|
46
|
+
/**
|
|
47
|
+
* Override to skip initialization check for in-memory operation
|
|
48
|
+
*/
|
|
49
|
+
protected checkInitialized(): void;
|
|
50
|
+
/**
|
|
51
|
+
* Override selectAction to provide base implementation
|
|
52
|
+
*/
|
|
53
|
+
selectAction(state: Vector | any, context?: Context): Promise<Action>;
|
|
54
|
+
/**
|
|
55
|
+
* Override to store experiences in-memory without vectorDB
|
|
56
|
+
*/
|
|
57
|
+
storeExperience(experience: Experience): Promise<void>;
|
|
58
|
+
/**
|
|
59
|
+
* Override to retrieve from local experiences
|
|
60
|
+
*/
|
|
61
|
+
retrieveSimilar(state: number[], k: number): Promise<import('../..').SearchResult<Experience>[]>;
|
|
62
|
+
/**
|
|
63
|
+
* Generate adversarial example using FGSM
|
|
64
|
+
*/
|
|
65
|
+
private generateFGSM;
|
|
66
|
+
/**
|
|
67
|
+
* Generate adversarial example using PGD
|
|
68
|
+
*/
|
|
69
|
+
private generatePGD;
|
|
70
|
+
/**
|
|
71
|
+
* Generate adversarial example using DeepFool
|
|
72
|
+
*/
|
|
73
|
+
private generateDeepFool;
|
|
74
|
+
/**
|
|
75
|
+
* Compute gradient of loss with respect to input
|
|
76
|
+
*/
|
|
77
|
+
private computeGradient;
|
|
78
|
+
/**
|
|
79
|
+
* Multi-class predictions (simplified)
|
|
80
|
+
*/
|
|
81
|
+
private multiClassPredict;
|
|
82
|
+
/**
|
|
83
|
+
* Compute decision boundary between two classes
|
|
84
|
+
*/
|
|
85
|
+
private computeDecisionBoundary;
|
|
86
|
+
/**
|
|
87
|
+
* Project adversarial example to epsilon ball
|
|
88
|
+
*/
|
|
89
|
+
private projectToEpsilonBall;
|
|
90
|
+
/**
|
|
91
|
+
* L2 norm of vector
|
|
92
|
+
*/
|
|
93
|
+
private l2Norm;
|
|
94
|
+
/**
|
|
95
|
+
* Clip values to valid range [0, 1]
|
|
96
|
+
*/
|
|
97
|
+
private clipToValid;
|
|
98
|
+
/**
|
|
99
|
+
* Generate adversarial example
|
|
100
|
+
*/
|
|
101
|
+
generateAdversarialExample(state: Vector, target: number): Promise<AdversarialExample>;
|
|
102
|
+
/**
|
|
103
|
+
* Train with adversarial examples
|
|
104
|
+
*/
|
|
105
|
+
train(options?: TrainOptions): Promise<TrainingMetrics>;
|
|
106
|
+
/**
|
|
107
|
+
* Evaluate model robustness
|
|
108
|
+
*/
|
|
109
|
+
private evaluateRobustness;
|
|
110
|
+
/**
|
|
111
|
+
* Get adversarial training statistics
|
|
112
|
+
*/
|
|
113
|
+
getAdversarialStats(): {
|
|
114
|
+
attackType: AttackType;
|
|
115
|
+
epsilon: number;
|
|
116
|
+
examplesGenerated: number;
|
|
117
|
+
averageRobustness: number;
|
|
118
|
+
avgPerturbationNorm: number;
|
|
119
|
+
};
|
|
120
|
+
/**
|
|
121
|
+
* Test model against specific attack
|
|
122
|
+
*/
|
|
123
|
+
testAgainstAttack(samples: Vector[] | Array<{
|
|
124
|
+
state: Vector;
|
|
125
|
+
label?: number;
|
|
126
|
+
}>, attackType: AttackType): Promise<{
|
|
127
|
+
cleanAccuracy: number;
|
|
128
|
+
robustAccuracy: number;
|
|
129
|
+
avgPerturbation?: number;
|
|
130
|
+
}>;
|
|
131
|
+
}
|
|
132
|
+
export {};
|
|
133
|
+
//# sourceMappingURL=adversarial-training.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"adversarial-training.d.ts","sourceRoot":"","sources":["../../../src/plugins/implementations/adversarial-training.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;GAYG;AAEH,OAAO,EAAE,UAAU,EAAE,MAAM,gBAAgB,CAAC;AAC5C,OAAO,EACL,MAAM,EACN,OAAO,EACP,UAAU,EACV,YAAY,EACZ,eAAe,EACf,MAAM,EACP,MAAM,8BAA8B,CAAC;AAEtC;;GAEG;AACH,KAAK,UAAU,GACX,MAAM,GACN,KAAK,GACL,IAAI,GACJ,UAAU,GACV,UAAU,CAAC;AAEf;;GAEG;AACH,UAAU,kBAAkB;IAC1B,QAAQ,EAAE,MAAM,CAAC;IACjB,WAAW,EAAE,MAAM,CAAC;IACpB,YAAY,EAAE,MAAM,CAAC;IACrB,UAAU,EAAE,MAAM,CAAC;IACnB,UAAU,EAAE,UAAU,CAAC;CACxB;AAED;;GAEG;AACH,qBAAa,yBAA0B,SAAQ,UAAU;IACvD,IAAI,SAA0B;IAC9B,OAAO,SAAW;IAElB,OAAO,CAAC,WAAW,CAAoB;IACvC,OAAO,CAAC,UAAU,CAAsB;IACxC,OAAO,CAAC,OAAO,CAAe;IAC9B,OAAO,CAAC,KAAK,CAAgB;IAC7B,OAAO,CAAC,UAAU,CAAc;IAGhC,OAAO,CAAC,gBAAgB,CAAe;IACvC,OAAO,CAAC,gBAAgB,CAAkB;IAG1C,OAAO,CAAC,gBAAgB,CAAgB;IACxC,OAAO,CAAC,mBAAmB,CAA4B;gBAE3C,MAAM,CAAC,EAAE,OAAO,CAAC,GAAG,CAAC;IAejC;;OAEG;IACH,SAAS,CAAC,gBAAgB,IAAI,IAAI;IAIlC;;OAEG;IACG,YAAY,CAAC,KAAK,EAAE,MAAM,GAAG,GAAG,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,MAAM,CAAC;IAa3E;;OAEG;IACG,eAAe,CAAC,UAAU,EAAE,UAAU,GAAG,OAAO,CAAC,IAAI,CAAC;IAI5D;;OAEG;IACG,eAAe,CAAC,KAAK,EAAE,MAAM,EAAE,EAAE,CAAC,EAAE,MAAM,GAAG,OAAO,CAAC,OAAO,OAAO,EAAE,YAAY,CAAC,UAAU,CAAC,EAAE,CAAC;IAStG;;OAEG;YACW,YAAY;IAmC1B;;OAEG;YACW,WAAW;IA2CzB;;OAEG;YACW,gBAAgB;IA4C9B;;OAEG;YACW,eAAe;IA0B7B;;OAEG;YACW,iBAAiB;IAa/B;;OAEG;YACW,uBAAuB;IAcrC;;OAEG;IACH,OAAO,CAAC,oBAAoB;IAa5B;;OAEG;IACH,OAAO,CAAC,MAAM;IAId;;OAEG;IACH,OAAO,CAAC,WAAW;IAInB;;OAEG;IACG,0BAA0B,CAC9B,KAAK,EAAE,MAAM,EACb,MAAM,EAAE,MAAM,GACb,OAAO,CAAC,kBAAkB,CAAC;IAa9B;;OAEG;IACG,KAAK,CAAC,OAAO,CAAC,EAAE,YAAY,GAAG,OAAO,CAAC,eAAe,CAAC;IAkE7D;;OAEG;YACW,kBAAkB;IA2BhC;;OAEG;IACH,mBAAmB,IAAI;QACrB,UAAU,EAAE,UAAU,CAAC;QACvB,OAAO,EAAE,MAAM,CAAC;QAChB,iBAAiB,EAAE,MAAM,CAAC;QAC1B,iBAAiB,EAAE,MAAM,CAAC;QAC1B,mBAAmB,EAAE,MAAM,CAAC;KAC7B;IAmBD;;OAEG;IACG,iBAAiB,CACrB,OAAO,EAAE,MAAM,EAAE,GAAG,KAAK,CAAC;QAAE,KAAK,EAAE,MAAM,CAAC;QAAC,KAAK,CAAC,EAAE,MAAM,CAAA;KAAE,CAAC,EAC5D,UAAU,EAAE,UAAU,GACrB,OAAO,CAAC;QAAE,aAAa,EAAE,MAAM,CAAC;QAAC,cAAc,EAAE,MAAM,CAAC;QAAC,eAAe,CAAC,EAAE,MAAM,CAAA;KAAE,CAAC;CA2CxF"}
|