agentdb 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +38 -0
- package/LICENSE-APACHE +190 -0
- package/LICENSE-MIT +21 -0
- package/README.md +953 -0
- package/bin/agentdb.js +485 -0
- package/bin/plugin-cli-wrapper.mjs +21 -0
- package/dist/cache/query-cache.d.ts +105 -0
- package/dist/cache/query-cache.d.ts.map +1 -0
- package/dist/cache/query-cache.js +224 -0
- package/dist/cache/query-cache.js.map +1 -0
- package/dist/cache/query-cache.mjs +219 -0
- package/dist/cli/cache/query-cache.d.ts +104 -0
- package/dist/cli/cache/query-cache.js +244 -0
- package/dist/cli/cli/db-commands.d.ts +48 -0
- package/dist/cli/cli/db-commands.js +613 -0
- package/dist/cli/commands.d.ts +7 -0
- package/dist/cli/commands.d.ts.map +1 -0
- package/dist/cli/commands.js +113 -0
- package/dist/cli/commands.js.map +1 -0
- package/dist/cli/commands.mjs +104 -0
- package/dist/cli/core/backend-interface.d.ts +70 -0
- package/dist/cli/core/backend-interface.js +15 -0
- package/dist/cli/core/native-backend.d.ts +140 -0
- package/dist/cli/core/native-backend.js +432 -0
- package/dist/cli/core/vector-db.d.ts +126 -0
- package/dist/cli/core/vector-db.js +338 -0
- package/dist/cli/core/wasm-backend.d.ts +95 -0
- package/dist/cli/core/wasm-backend.js +418 -0
- package/dist/cli/db-commands.d.ts +49 -0
- package/dist/cli/db-commands.d.ts.map +1 -0
- package/dist/cli/db-commands.js +533 -0
- package/dist/cli/db-commands.js.map +1 -0
- package/dist/cli/db-commands.mjs +522 -0
- package/dist/cli/generator.d.ts +11 -0
- package/dist/cli/generator.d.ts.map +1 -0
- package/dist/cli/generator.js +567 -0
- package/dist/cli/generator.js.map +1 -0
- package/dist/cli/generator.mjs +527 -0
- package/dist/cli/help.d.ts +18 -0
- package/dist/cli/help.d.ts.map +1 -0
- package/dist/cli/help.js +676 -0
- package/dist/cli/help.js.map +1 -0
- package/dist/cli/help.mjs +667 -0
- package/dist/cli/index/hnsw.d.ts +164 -0
- package/dist/cli/index/hnsw.js +558 -0
- package/dist/cli/plugin-cli.d.ts +7 -0
- package/dist/cli/plugin-cli.d.ts.map +1 -0
- package/dist/cli/plugin-cli.js +295 -0
- package/dist/cli/plugin-cli.js.map +1 -0
- package/dist/cli/plugin-cli.mjs +289 -0
- package/dist/cli/quantization/product-quantization.d.ts +108 -0
- package/dist/cli/quantization/product-quantization.js +350 -0
- package/dist/cli/query/query-builder.d.ts +322 -0
- package/dist/cli/query/query-builder.js +600 -0
- package/dist/cli/templates.d.ts +14 -0
- package/dist/cli/templates.d.ts.map +1 -0
- package/dist/cli/templates.js +182 -0
- package/dist/cli/templates.js.map +1 -0
- package/dist/cli/templates.mjs +176 -0
- package/dist/cli/types/index.d.ts +116 -0
- package/dist/cli/types/index.js +5 -0
- package/dist/cli/types.d.ts +91 -0
- package/dist/cli/types.d.ts.map +1 -0
- package/dist/cli/types.js +6 -0
- package/dist/cli/types.js.map +1 -0
- package/dist/cli/types.mjs +4 -0
- package/dist/cli/wizard/index.d.ts +6 -0
- package/dist/cli/wizard/index.d.ts.map +1 -0
- package/dist/cli/wizard/index.js +138 -0
- package/dist/cli/wizard/index.js.map +1 -0
- package/dist/cli/wizard/index.mjs +131 -0
- package/dist/cli/wizard/prompts.d.ts +11 -0
- package/dist/cli/wizard/prompts.d.ts.map +1 -0
- package/dist/cli/wizard/prompts.js +482 -0
- package/dist/cli/wizard/prompts.js.map +1 -0
- package/dist/cli/wizard/prompts.mjs +470 -0
- package/dist/cli/wizard/validator.d.ts +13 -0
- package/dist/cli/wizard/validator.d.ts.map +1 -0
- package/dist/cli/wizard/validator.js +234 -0
- package/dist/cli/wizard/validator.js.map +1 -0
- package/dist/cli/wizard/validator.mjs +224 -0
- package/dist/core/backend-interface.d.ts +71 -0
- package/dist/core/backend-interface.d.ts.map +1 -0
- package/dist/core/backend-interface.js +16 -0
- package/dist/core/backend-interface.js.map +1 -0
- package/dist/core/backend-interface.mjs +12 -0
- package/dist/core/native-backend.d.ts +141 -0
- package/dist/core/native-backend.d.ts.map +1 -0
- package/dist/core/native-backend.js +457 -0
- package/dist/core/native-backend.js.map +1 -0
- package/dist/core/native-backend.mjs +449 -0
- package/dist/core/vector-db.d.ts +127 -0
- package/dist/core/vector-db.d.ts.map +1 -0
- package/dist/core/vector-db.js +266 -0
- package/dist/core/vector-db.js.map +1 -0
- package/dist/core/vector-db.mjs +261 -0
- package/dist/core/wasm-backend.d.ts +96 -0
- package/dist/core/wasm-backend.d.ts.map +1 -0
- package/dist/core/wasm-backend.js +393 -0
- package/dist/core/wasm-backend.js.map +1 -0
- package/dist/core/wasm-backend.mjs +385 -0
- package/dist/index/hnsw-optimized.d.ts +75 -0
- package/dist/index/hnsw-optimized.d.ts.map +1 -0
- package/dist/index/hnsw-optimized.js +412 -0
- package/dist/index/hnsw-optimized.js.map +1 -0
- package/dist/index/hnsw-optimized.mjs +407 -0
- package/dist/index/hnsw.d.ts +165 -0
- package/dist/index/hnsw.d.ts.map +1 -0
- package/dist/index/hnsw.js +521 -0
- package/dist/index/hnsw.js.map +1 -0
- package/dist/index/hnsw.mjs +516 -0
- package/dist/index.d.ts +57 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +82 -0
- package/dist/index.js.map +1 -0
- package/dist/index.mjs +63 -0
- package/dist/mcp-server.d.ts +27 -0
- package/dist/mcp-server.d.ts.map +1 -0
- package/dist/mcp-server.js +789 -0
- package/dist/mcp-server.js.map +1 -0
- package/dist/mcp-server.mjs +784 -0
- package/dist/plugins/base-plugin.d.ts +114 -0
- package/dist/plugins/base-plugin.d.ts.map +1 -0
- package/dist/plugins/base-plugin.js +313 -0
- package/dist/plugins/base-plugin.js.map +1 -0
- package/dist/plugins/base-plugin.mjs +275 -0
- package/dist/plugins/implementations/active-learning.d.ts +135 -0
- package/dist/plugins/implementations/active-learning.d.ts.map +1 -0
- package/dist/plugins/implementations/active-learning.js +372 -0
- package/dist/plugins/implementations/active-learning.js.map +1 -0
- package/dist/plugins/implementations/active-learning.mjs +367 -0
- package/dist/plugins/implementations/actor-critic.d.ts +64 -0
- package/dist/plugins/implementations/actor-critic.d.ts.map +1 -0
- package/dist/plugins/implementations/actor-critic.js +363 -0
- package/dist/plugins/implementations/actor-critic.js.map +1 -0
- package/dist/plugins/implementations/actor-critic.mjs +358 -0
- package/dist/plugins/implementations/adversarial-training.d.ts +133 -0
- package/dist/plugins/implementations/adversarial-training.d.ts.map +1 -0
- package/dist/plugins/implementations/adversarial-training.js +409 -0
- package/dist/plugins/implementations/adversarial-training.js.map +1 -0
- package/dist/plugins/implementations/adversarial-training.mjs +404 -0
- package/dist/plugins/implementations/curriculum-learning.d.ts +132 -0
- package/dist/plugins/implementations/curriculum-learning.d.ts.map +1 -0
- package/dist/plugins/implementations/curriculum-learning.js +354 -0
- package/dist/plugins/implementations/curriculum-learning.js.map +1 -0
- package/dist/plugins/implementations/curriculum-learning.mjs +349 -0
- package/dist/plugins/implementations/decision-transformer.d.ts +77 -0
- package/dist/plugins/implementations/decision-transformer.d.ts.map +1 -0
- package/dist/plugins/implementations/decision-transformer.js +422 -0
- package/dist/plugins/implementations/decision-transformer.js.map +1 -0
- package/dist/plugins/implementations/decision-transformer.mjs +417 -0
- package/dist/plugins/implementations/federated-learning.d.ts +126 -0
- package/dist/plugins/implementations/federated-learning.d.ts.map +1 -0
- package/dist/plugins/implementations/federated-learning.js +436 -0
- package/dist/plugins/implementations/federated-learning.js.map +1 -0
- package/dist/plugins/implementations/federated-learning.mjs +431 -0
- package/dist/plugins/implementations/index.d.ts +30 -0
- package/dist/plugins/implementations/index.d.ts.map +1 -0
- package/dist/plugins/implementations/index.js +45 -0
- package/dist/plugins/implementations/index.js.map +1 -0
- package/dist/plugins/implementations/index.mjs +31 -0
- package/dist/plugins/implementations/multi-task-learning.d.ts +115 -0
- package/dist/plugins/implementations/multi-task-learning.d.ts.map +1 -0
- package/dist/plugins/implementations/multi-task-learning.js +369 -0
- package/dist/plugins/implementations/multi-task-learning.js.map +1 -0
- package/dist/plugins/implementations/multi-task-learning.mjs +364 -0
- package/dist/plugins/implementations/neural-architecture-search.d.ts +148 -0
- package/dist/plugins/implementations/neural-architecture-search.d.ts.map +1 -0
- package/dist/plugins/implementations/neural-architecture-search.js +379 -0
- package/dist/plugins/implementations/neural-architecture-search.js.map +1 -0
- package/dist/plugins/implementations/neural-architecture-search.mjs +374 -0
- package/dist/plugins/implementations/q-learning.d.ts +98 -0
- package/dist/plugins/implementations/q-learning.d.ts.map +1 -0
- package/dist/plugins/implementations/q-learning.js +435 -0
- package/dist/plugins/implementations/q-learning.js.map +1 -0
- package/dist/plugins/implementations/q-learning.mjs +430 -0
- package/dist/plugins/implementations/sarsa.d.ts +103 -0
- package/dist/plugins/implementations/sarsa.d.ts.map +1 -0
- package/dist/plugins/implementations/sarsa.js +347 -0
- package/dist/plugins/implementations/sarsa.js.map +1 -0
- package/dist/plugins/implementations/sarsa.mjs +342 -0
- package/dist/plugins/index.d.ts +107 -0
- package/dist/plugins/index.d.ts.map +1 -0
- package/dist/plugins/index.js +179 -0
- package/dist/plugins/index.js.map +1 -0
- package/dist/plugins/index.mjs +168 -0
- package/dist/plugins/interface.d.ts +439 -0
- package/dist/plugins/interface.d.ts.map +1 -0
- package/dist/plugins/interface.js +12 -0
- package/dist/plugins/interface.js.map +1 -0
- package/dist/plugins/interface.mjs +10 -0
- package/dist/plugins/learning-plugin.interface.d.ts +257 -0
- package/dist/plugins/learning-plugin.interface.d.ts.map +1 -0
- package/dist/plugins/learning-plugin.interface.js +7 -0
- package/dist/plugins/learning-plugin.interface.js.map +1 -0
- package/dist/plugins/learning-plugin.interface.mjs +5 -0
- package/dist/plugins/plugin-exports.d.ts +71 -0
- package/dist/plugins/plugin-exports.d.ts.map +1 -0
- package/dist/plugins/plugin-exports.js +78 -0
- package/dist/plugins/plugin-exports.js.map +1 -0
- package/dist/plugins/plugin-exports.mjs +69 -0
- package/dist/plugins/registry.d.ts +206 -0
- package/dist/plugins/registry.d.ts.map +1 -0
- package/dist/plugins/registry.js +365 -0
- package/dist/plugins/registry.js.map +1 -0
- package/dist/plugins/registry.mjs +356 -0
- package/dist/plugins/validator.d.ts +63 -0
- package/dist/plugins/validator.d.ts.map +1 -0
- package/dist/plugins/validator.js +464 -0
- package/dist/plugins/validator.js.map +1 -0
- package/dist/plugins/validator.mjs +458 -0
- package/dist/quantization/binary-quantization.d.ts +104 -0
- package/dist/quantization/binary-quantization.d.ts.map +1 -0
- package/dist/quantization/binary-quantization.js +246 -0
- package/dist/quantization/binary-quantization.js.map +1 -0
- package/dist/quantization/binary-quantization.mjs +240 -0
- package/dist/quantization/optimized-pq.d.ts +138 -0
- package/dist/quantization/optimized-pq.d.ts.map +1 -0
- package/dist/quantization/optimized-pq.js +320 -0
- package/dist/quantization/optimized-pq.js.map +1 -0
- package/dist/quantization/optimized-pq.mjs +313 -0
- package/dist/quantization/product-quantization.d.ts +109 -0
- package/dist/quantization/product-quantization.d.ts.map +1 -0
- package/dist/quantization/product-quantization.js +287 -0
- package/dist/quantization/product-quantization.js.map +1 -0
- package/dist/quantization/product-quantization.mjs +282 -0
- package/dist/quantization/scalar-quantization.d.ts +100 -0
- package/dist/quantization/scalar-quantization.d.ts.map +1 -0
- package/dist/quantization/scalar-quantization.js +324 -0
- package/dist/quantization/scalar-quantization.js.map +1 -0
- package/dist/quantization/scalar-quantization.mjs +319 -0
- package/dist/query/index.d.ts +6 -0
- package/dist/query/index.d.ts.map +1 -0
- package/dist/query/index.js +9 -0
- package/dist/query/index.js.map +1 -0
- package/dist/query/index.mjs +4 -0
- package/dist/query/query-builder.d.ts +323 -0
- package/dist/query/query-builder.d.ts.map +1 -0
- package/dist/query/query-builder.js +524 -0
- package/dist/query/query-builder.js.map +1 -0
- package/dist/query/query-builder.mjs +519 -0
- package/dist/reasoning/context-synthesizer.d.ts +57 -0
- package/dist/reasoning/context-synthesizer.d.ts.map +1 -0
- package/dist/reasoning/context-synthesizer.js +224 -0
- package/dist/reasoning/context-synthesizer.js.map +1 -0
- package/dist/reasoning/context-synthesizer.mjs +219 -0
- package/dist/reasoning/experience-curator.d.ts +66 -0
- package/dist/reasoning/experience-curator.d.ts.map +1 -0
- package/dist/reasoning/experience-curator.js +288 -0
- package/dist/reasoning/experience-curator.js.map +1 -0
- package/dist/reasoning/experience-curator.mjs +283 -0
- package/dist/reasoning/memory-optimizer.d.ts +69 -0
- package/dist/reasoning/memory-optimizer.d.ts.map +1 -0
- package/dist/reasoning/memory-optimizer.js +331 -0
- package/dist/reasoning/memory-optimizer.js.map +1 -0
- package/dist/reasoning/memory-optimizer.mjs +326 -0
- package/dist/reasoning/pattern-matcher.d.ts +59 -0
- package/dist/reasoning/pattern-matcher.d.ts.map +1 -0
- package/dist/reasoning/pattern-matcher.js +229 -0
- package/dist/reasoning/pattern-matcher.js.map +1 -0
- package/dist/reasoning/pattern-matcher.mjs +224 -0
- package/dist/reasoningbank/adapter/agentdb-adapter.d.ts +118 -0
- package/dist/reasoningbank/adapter/agentdb-adapter.d.ts.map +1 -0
- package/dist/reasoningbank/adapter/agentdb-adapter.js +477 -0
- package/dist/reasoningbank/adapter/agentdb-adapter.js.map +1 -0
- package/dist/reasoningbank/adapter/types.d.ts +113 -0
- package/dist/reasoningbank/adapter/types.d.ts.map +1 -0
- package/dist/reasoningbank/adapter/types.js +9 -0
- package/dist/reasoningbank/adapter/types.js.map +1 -0
- package/dist/reasoningbank/cli/commands.d.ts +16 -0
- package/dist/reasoningbank/cli/commands.d.ts.map +1 -0
- package/dist/reasoningbank/cli/commands.js +272 -0
- package/dist/reasoningbank/cli/commands.js.map +1 -0
- package/dist/reasoningbank/mcp/agentdb-tools.d.ts +319 -0
- package/dist/reasoningbank/mcp/agentdb-tools.d.ts.map +1 -0
- package/dist/reasoningbank/mcp/agentdb-tools.js +301 -0
- package/dist/reasoningbank/mcp/agentdb-tools.js.map +1 -0
- package/dist/reasoningbank/migration/migrate.d.ts +25 -0
- package/dist/reasoningbank/migration/migrate.d.ts.map +1 -0
- package/dist/reasoningbank/migration/migrate.js +178 -0
- package/dist/reasoningbank/migration/migrate.js.map +1 -0
- package/dist/reasoningbank/reasoning/context-synthesizer.d.ts +37 -0
- package/dist/reasoningbank/reasoning/context-synthesizer.d.ts.map +1 -0
- package/dist/reasoningbank/reasoning/context-synthesizer.js +114 -0
- package/dist/reasoningbank/reasoning/context-synthesizer.js.map +1 -0
- package/dist/reasoningbank/reasoning/experience-curator.d.ts +39 -0
- package/dist/reasoningbank/reasoning/experience-curator.d.ts.map +1 -0
- package/dist/reasoningbank/reasoning/experience-curator.js +98 -0
- package/dist/reasoningbank/reasoning/experience-curator.js.map +1 -0
- package/dist/reasoningbank/reasoning/memory-optimizer.d.ts +44 -0
- package/dist/reasoningbank/reasoning/memory-optimizer.d.ts.map +1 -0
- package/dist/reasoningbank/reasoning/memory-optimizer.js +184 -0
- package/dist/reasoningbank/reasoning/memory-optimizer.js.map +1 -0
- package/dist/reasoningbank/reasoning/pattern-matcher.d.ts +40 -0
- package/dist/reasoningbank/reasoning/pattern-matcher.d.ts.map +1 -0
- package/dist/reasoningbank/reasoning/pattern-matcher.js +87 -0
- package/dist/reasoningbank/reasoning/pattern-matcher.js.map +1 -0
- package/dist/reasoningbank/sync/quic-sync.d.ts +77 -0
- package/dist/reasoningbank/sync/quic-sync.d.ts.map +1 -0
- package/dist/reasoningbank/sync/quic-sync.js +165 -0
- package/dist/reasoningbank/sync/quic-sync.js.map +1 -0
- package/dist/sync/conflict.d.ts +78 -0
- package/dist/sync/conflict.d.ts.map +1 -0
- package/dist/sync/conflict.js +202 -0
- package/dist/sync/conflict.js.map +1 -0
- package/dist/sync/conflict.mjs +196 -0
- package/dist/sync/coordinator.d.ts +111 -0
- package/dist/sync/coordinator.d.ts.map +1 -0
- package/dist/sync/coordinator.js +256 -0
- package/dist/sync/coordinator.js.map +1 -0
- package/dist/sync/coordinator.mjs +250 -0
- package/dist/sync/delta.d.ts +81 -0
- package/dist/sync/delta.d.ts.map +1 -0
- package/dist/sync/delta.js +245 -0
- package/dist/sync/delta.js.map +1 -0
- package/dist/sync/delta.mjs +238 -0
- package/dist/sync/index.d.ts +11 -0
- package/dist/sync/index.d.ts.map +1 -0
- package/dist/sync/index.js +22 -0
- package/dist/sync/index.js.map +1 -0
- package/dist/sync/index.mjs +9 -0
- package/dist/sync/quic-sync.d.ts +81 -0
- package/dist/sync/quic-sync.d.ts.map +1 -0
- package/dist/sync/quic-sync.js +329 -0
- package/dist/sync/quic-sync.js.map +1 -0
- package/dist/sync/quic-sync.mjs +323 -0
- package/dist/sync/types.d.ts +168 -0
- package/dist/sync/types.d.ts.map +1 -0
- package/dist/sync/types.js +8 -0
- package/dist/sync/types.js.map +1 -0
- package/dist/sync/types.mjs +6 -0
- package/dist/types/index.d.ts +117 -0
- package/dist/types/index.d.ts.map +1 -0
- package/dist/types/index.js +6 -0
- package/dist/types/index.js.map +1 -0
- package/dist/types/index.mjs +4 -0
- package/dist/wasm-loader.d.ts +32 -0
- package/dist/wasm-loader.d.ts.map +1 -0
- package/dist/wasm-loader.js +75 -0
- package/dist/wasm-loader.js.map +1 -0
- package/dist/wasm-loader.mjs +64 -0
- package/examples/adaptive-learning.ts +284 -0
- package/examples/browser/README.md +732 -0
- package/examples/browser/adaptive-recommendations/index.html +427 -0
- package/examples/browser/collaborative-filtering/index.html +310 -0
- package/examples/browser/continual-learning/index.html +736 -0
- package/examples/browser/experience-replay/index.html +616 -0
- package/examples/browser/index.html +369 -0
- package/examples/browser/meta-learning/index.html +789 -0
- package/examples/browser/neuro-symbolic/index.html +692 -0
- package/examples/browser/pattern-learning/index.html +620 -0
- package/examples/browser/quantum-inspired/index.html +728 -0
- package/examples/browser/rag/index.html +624 -0
- package/examples/browser/swarm-intelligence/index.html +811 -0
- package/examples/browser-basic.html +170 -0
- package/examples/hnsw-example.ts +148 -0
- package/examples/node-basic.js +70 -0
- package/examples/quic-sync-example.ts +310 -0
- package/examples/quick-start.js +68 -0
- package/examples/wasm-example.ts +222 -0
- package/package.json +118 -0
|
@@ -0,0 +1,430 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Q-Learning Plugin
|
|
3
|
+
*
|
|
4
|
+
* Implements the Q-Learning algorithm with experience replay.
|
|
5
|
+
* Q-Learning is a model-free, off-policy RL algorithm that learns
|
|
6
|
+
* the optimal action-value function (Q-function).
|
|
7
|
+
*
|
|
8
|
+
* Key features:
|
|
9
|
+
* - Epsilon-greedy exploration
|
|
10
|
+
* - Experience replay buffer
|
|
11
|
+
* - Optional prioritized experience replay
|
|
12
|
+
* - Temporal difference learning
|
|
13
|
+
*/
|
|
14
|
+
import { BasePlugin } from '../base-plugin.mjs';
|
|
15
|
+
/**
|
|
16
|
+
* Experience replay buffer
|
|
17
|
+
*/
|
|
18
|
+
class ReplayBuffer {
|
|
19
|
+
constructor(capacity) {
|
|
20
|
+
this.buffer = [];
|
|
21
|
+
this.position = 0;
|
|
22
|
+
this.capacity = capacity;
|
|
23
|
+
}
|
|
24
|
+
/**
|
|
25
|
+
* Add experience to buffer
|
|
26
|
+
*/
|
|
27
|
+
add(experience) {
|
|
28
|
+
if (this.buffer.length < this.capacity) {
|
|
29
|
+
this.buffer.push(experience);
|
|
30
|
+
}
|
|
31
|
+
else {
|
|
32
|
+
this.buffer[this.position] = experience;
|
|
33
|
+
}
|
|
34
|
+
this.position = (this.position + 1) % this.capacity;
|
|
35
|
+
}
|
|
36
|
+
/**
|
|
37
|
+
* Sample random batch
|
|
38
|
+
*/
|
|
39
|
+
sample(batchSize) {
|
|
40
|
+
const batch = [];
|
|
41
|
+
for (let i = 0; i < Math.min(batchSize, this.buffer.length); i++) {
|
|
42
|
+
const index = Math.floor(Math.random() * this.buffer.length);
|
|
43
|
+
batch.push(this.buffer[index]);
|
|
44
|
+
}
|
|
45
|
+
return batch;
|
|
46
|
+
}
|
|
47
|
+
/**
|
|
48
|
+
* Get buffer size
|
|
49
|
+
*/
|
|
50
|
+
size() {
|
|
51
|
+
return this.buffer.length;
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* Check if buffer has enough samples
|
|
55
|
+
*/
|
|
56
|
+
hasEnough(minSize) {
|
|
57
|
+
return this.buffer.length >= minSize;
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
/**
|
|
61
|
+
* Prioritized Experience Replay Buffer
|
|
62
|
+
*/
|
|
63
|
+
class PrioritizedReplayBuffer extends ReplayBuffer {
|
|
64
|
+
constructor(capacity, alpha = 0.6, beta = 0.4, betaIncrement = 0.001) {
|
|
65
|
+
super(capacity);
|
|
66
|
+
this.priorities = [];
|
|
67
|
+
this.alpha = alpha;
|
|
68
|
+
this.beta = beta;
|
|
69
|
+
this.betaIncrement = betaIncrement;
|
|
70
|
+
}
|
|
71
|
+
/**
|
|
72
|
+
* Add experience with priority
|
|
73
|
+
*/
|
|
74
|
+
addWithPriority(experience, priority) {
|
|
75
|
+
this.add(experience);
|
|
76
|
+
if (this.priorities.length < this.size()) {
|
|
77
|
+
this.priorities.push(priority);
|
|
78
|
+
}
|
|
79
|
+
else {
|
|
80
|
+
const pos = this.position - 1;
|
|
81
|
+
this.priorities[pos >= 0 ? pos : this.priorities.length - 1] = priority;
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
/**
|
|
85
|
+
* Sample batch using prioritized sampling
|
|
86
|
+
*/
|
|
87
|
+
samplePrioritized(batchSize) {
|
|
88
|
+
const n = this.size();
|
|
89
|
+
const batch = [];
|
|
90
|
+
const indices = [];
|
|
91
|
+
const weights = [];
|
|
92
|
+
// Compute sampling probabilities
|
|
93
|
+
const probs = this.priorities.map(p => Math.pow(p, this.alpha));
|
|
94
|
+
const totalProb = probs.reduce((sum, p) => sum + p, 0);
|
|
95
|
+
// Sample
|
|
96
|
+
for (let i = 0; i < Math.min(batchSize, n); i++) {
|
|
97
|
+
const rand = Math.random() * totalProb;
|
|
98
|
+
let cumProb = 0;
|
|
99
|
+
let index = 0;
|
|
100
|
+
for (let j = 0; j < probs.length; j++) {
|
|
101
|
+
cumProb += probs[j];
|
|
102
|
+
if (rand <= cumProb) {
|
|
103
|
+
index = j;
|
|
104
|
+
break;
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
batch.push(this.buffer[index]);
|
|
108
|
+
indices.push(index);
|
|
109
|
+
// Compute importance sampling weight
|
|
110
|
+
const prob = probs[index] / totalProb;
|
|
111
|
+
const weight = Math.pow(n * prob, -this.beta);
|
|
112
|
+
weights.push(weight);
|
|
113
|
+
}
|
|
114
|
+
// Normalize weights
|
|
115
|
+
const maxWeight = Math.max(...weights);
|
|
116
|
+
const normalizedWeights = weights.map(w => w / maxWeight);
|
|
117
|
+
// Anneal beta
|
|
118
|
+
this.beta = Math.min(1.0, this.beta + this.betaIncrement);
|
|
119
|
+
return { experiences: batch, indices, weights: normalizedWeights };
|
|
120
|
+
}
|
|
121
|
+
/**
|
|
122
|
+
* Update priority for specific experience
|
|
123
|
+
*/
|
|
124
|
+
updatePriority(index, priority) {
|
|
125
|
+
if (index >= 0 && index < this.priorities.length) {
|
|
126
|
+
this.priorities[index] = priority;
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
/**
|
|
131
|
+
* Q-Learning Plugin Implementation
|
|
132
|
+
*/
|
|
133
|
+
export class QLearningPlugin extends BasePlugin {
|
|
134
|
+
constructor() {
|
|
135
|
+
super(...arguments);
|
|
136
|
+
this.name = 'q-learning';
|
|
137
|
+
this.version = '1.0.0';
|
|
138
|
+
this.qTable = new Map();
|
|
139
|
+
this.epsilon = 1.0;
|
|
140
|
+
this.epsilonMin = 0.01;
|
|
141
|
+
this.epsilonDecay = 0.995;
|
|
142
|
+
this.usePrioritized = false;
|
|
143
|
+
this.trainCounter = 0;
|
|
144
|
+
}
|
|
145
|
+
/**
|
|
146
|
+
* Initialize Q-Learning plugin
|
|
147
|
+
*/
|
|
148
|
+
async onInitialize() {
|
|
149
|
+
// Initialize epsilon
|
|
150
|
+
this.epsilon = this.config.algorithm.epsilonStart || 1.0;
|
|
151
|
+
this.epsilonMin = this.config.algorithm.epsilonEnd || 0.01;
|
|
152
|
+
this.epsilonDecay = this.config.algorithm.epsilonDecay || 0.995;
|
|
153
|
+
// Initialize replay buffer
|
|
154
|
+
const replayConfig = this.config.experienceReplay;
|
|
155
|
+
if (replayConfig && replayConfig.type === 'prioritized') {
|
|
156
|
+
this.usePrioritized = true;
|
|
157
|
+
this.replayBuffer = new PrioritizedReplayBuffer(replayConfig.capacity, replayConfig.alpha, replayConfig.beta, replayConfig.betaIncrement);
|
|
158
|
+
}
|
|
159
|
+
else if (replayConfig && replayConfig.type === 'uniform') {
|
|
160
|
+
this.replayBuffer = new ReplayBuffer(replayConfig.capacity);
|
|
161
|
+
}
|
|
162
|
+
else {
|
|
163
|
+
// Default replay buffer
|
|
164
|
+
this.replayBuffer = new ReplayBuffer(10000);
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
/**
|
|
168
|
+
* Select action using epsilon-greedy policy
|
|
169
|
+
*
|
|
170
|
+
* @param state - Current state vector
|
|
171
|
+
* @param context - Optional context
|
|
172
|
+
* @returns Selected action
|
|
173
|
+
*/
|
|
174
|
+
async selectAction(state, context) {
|
|
175
|
+
this.checkInitialized();
|
|
176
|
+
// Epsilon-greedy exploration
|
|
177
|
+
if (Math.random() < this.epsilon) {
|
|
178
|
+
return this.randomAction(state);
|
|
179
|
+
}
|
|
180
|
+
// Exploit: Select action with highest Q-value
|
|
181
|
+
return this.greedyAction(state);
|
|
182
|
+
}
|
|
183
|
+
/**
|
|
184
|
+
* Select random action (exploration)
|
|
185
|
+
*/
|
|
186
|
+
async randomAction(state) {
|
|
187
|
+
// Find similar states to get action space
|
|
188
|
+
const similar = await this.retrieveSimilar(state, 10);
|
|
189
|
+
if (similar.length === 0) {
|
|
190
|
+
// No similar states, return random embedding
|
|
191
|
+
return {
|
|
192
|
+
id: 'random',
|
|
193
|
+
embedding: Array.from({ length: 768 }, () => Math.random() * 2 - 1),
|
|
194
|
+
source: 'policy',
|
|
195
|
+
confidence: 0,
|
|
196
|
+
metadata: { exploration: true },
|
|
197
|
+
};
|
|
198
|
+
}
|
|
199
|
+
// Random action from similar states
|
|
200
|
+
const randomIdx = Math.floor(Math.random() * similar.length);
|
|
201
|
+
const randomExp = similar[randomIdx];
|
|
202
|
+
if (!randomExp.metadata) {
|
|
203
|
+
// Fallback to random embedding
|
|
204
|
+
return {
|
|
205
|
+
id: 'random',
|
|
206
|
+
embedding: Array.from({ length: 768 }, () => Math.random() * 2 - 1),
|
|
207
|
+
source: 'policy',
|
|
208
|
+
confidence: 0,
|
|
209
|
+
metadata: { exploration: true },
|
|
210
|
+
};
|
|
211
|
+
}
|
|
212
|
+
return {
|
|
213
|
+
id: randomExp.id,
|
|
214
|
+
embedding: randomExp.metadata.action,
|
|
215
|
+
source: 'policy',
|
|
216
|
+
confidence: 0,
|
|
217
|
+
metadata: { exploration: true },
|
|
218
|
+
};
|
|
219
|
+
}
|
|
220
|
+
/**
|
|
221
|
+
* Select greedy action (exploitation)
|
|
222
|
+
*/
|
|
223
|
+
async greedyAction(state) {
|
|
224
|
+
const stateKey = this.hashState(state);
|
|
225
|
+
// Get Q-values for this state
|
|
226
|
+
const qValues = this.qTable.get(stateKey);
|
|
227
|
+
if (!qValues || qValues.size === 0) {
|
|
228
|
+
// No Q-values yet, explore similar states
|
|
229
|
+
const similar = await this.retrieveSimilar(state, 1);
|
|
230
|
+
if (similar.length > 0 && similar[0].metadata) {
|
|
231
|
+
return {
|
|
232
|
+
id: similar[0].id,
|
|
233
|
+
embedding: similar[0].metadata.action,
|
|
234
|
+
source: 'policy',
|
|
235
|
+
confidence: similar[0].score,
|
|
236
|
+
metadata: { exploration: false },
|
|
237
|
+
};
|
|
238
|
+
}
|
|
239
|
+
// Fallback to random
|
|
240
|
+
return this.randomAction(state);
|
|
241
|
+
}
|
|
242
|
+
// Find action with maximum Q-value
|
|
243
|
+
let maxQ = -Infinity;
|
|
244
|
+
let bestAction = '';
|
|
245
|
+
for (const [action, qValue] of qValues.entries()) {
|
|
246
|
+
if (qValue > maxQ) {
|
|
247
|
+
maxQ = qValue;
|
|
248
|
+
bestAction = action;
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
// Retrieve action embedding from storage
|
|
252
|
+
const actionData = await this.getActionEmbedding(bestAction);
|
|
253
|
+
return {
|
|
254
|
+
id: bestAction,
|
|
255
|
+
embedding: actionData,
|
|
256
|
+
source: 'policy',
|
|
257
|
+
confidence: this.normalizeQValue(maxQ),
|
|
258
|
+
metadata: { exploration: false, qValue: maxQ },
|
|
259
|
+
};
|
|
260
|
+
}
|
|
261
|
+
/**
|
|
262
|
+
* Store experience and update Q-table
|
|
263
|
+
*/
|
|
264
|
+
async onStoreExperience(experience) {
|
|
265
|
+
// Add to replay buffer
|
|
266
|
+
if (this.usePrioritized) {
|
|
267
|
+
const tdError = this.computeTDError(experience);
|
|
268
|
+
this.replayBuffer.addWithPriority(experience, Math.abs(tdError) + 1e-6);
|
|
269
|
+
}
|
|
270
|
+
else {
|
|
271
|
+
this.replayBuffer.add(experience);
|
|
272
|
+
}
|
|
273
|
+
// Train periodically
|
|
274
|
+
this.trainCounter++;
|
|
275
|
+
const trainEvery = this.config.training.trainEvery || 100;
|
|
276
|
+
if (this.trainCounter >= trainEvery && this.replayBuffer.hasEnough(this.config.training.minExperiences)) {
|
|
277
|
+
await this.train({ epochs: 1, verbose: false });
|
|
278
|
+
this.trainCounter = 0;
|
|
279
|
+
}
|
|
280
|
+
}
|
|
281
|
+
/**
|
|
282
|
+
* Train Q-Learning on replay buffer
|
|
283
|
+
*/
|
|
284
|
+
async train(options) {
|
|
285
|
+
this.checkInitialized();
|
|
286
|
+
const epochs = options?.epochs || 1;
|
|
287
|
+
const batchSize = options?.batchSize || this.config.training.batchSize;
|
|
288
|
+
const learningRate = options?.learningRate || this.config.algorithm.learningRate;
|
|
289
|
+
const gamma = this.config.algorithm.discountFactor;
|
|
290
|
+
let totalLoss = 0;
|
|
291
|
+
let avgQValue = 0;
|
|
292
|
+
for (let epoch = 0; epoch < epochs; epoch++) {
|
|
293
|
+
let epochLoss = 0;
|
|
294
|
+
let epochQSum = 0;
|
|
295
|
+
// Sample batch
|
|
296
|
+
let batch;
|
|
297
|
+
let weights = [];
|
|
298
|
+
if (this.usePrioritized) {
|
|
299
|
+
const sampled = this.replayBuffer.samplePrioritized(batchSize);
|
|
300
|
+
batch = sampled.experiences;
|
|
301
|
+
weights = sampled.weights;
|
|
302
|
+
}
|
|
303
|
+
else {
|
|
304
|
+
batch = this.replayBuffer.sample(batchSize);
|
|
305
|
+
weights = new Array(batch.length).fill(1.0);
|
|
306
|
+
}
|
|
307
|
+
// Update Q-values
|
|
308
|
+
for (let i = 0; i < batch.length; i++) {
|
|
309
|
+
const exp = batch[i];
|
|
310
|
+
const weight = weights[i];
|
|
311
|
+
const stateKey = this.hashState(exp.state);
|
|
312
|
+
const actionKey = this.hashAction(exp.action);
|
|
313
|
+
// Get current Q-value
|
|
314
|
+
const currentQ = this.getQValue(stateKey, actionKey);
|
|
315
|
+
// Compute target Q-value
|
|
316
|
+
const nextStateKey = this.hashState(exp.nextState);
|
|
317
|
+
const maxNextQ = exp.done ? 0 : this.getMaxQValue(nextStateKey);
|
|
318
|
+
const targetQ = exp.reward + gamma * maxNextQ;
|
|
319
|
+
// TD error
|
|
320
|
+
const tdError = targetQ - currentQ;
|
|
321
|
+
// Update Q-value
|
|
322
|
+
const newQ = currentQ + learningRate * weight * tdError;
|
|
323
|
+
this.setQValue(stateKey, actionKey, newQ);
|
|
324
|
+
// Track metrics
|
|
325
|
+
epochLoss += tdError * tdError * weight;
|
|
326
|
+
epochQSum += newQ;
|
|
327
|
+
// Update priority if using prioritized replay
|
|
328
|
+
if (this.usePrioritized) {
|
|
329
|
+
const sampled = this.replayBuffer.samplePrioritized(batchSize);
|
|
330
|
+
this.replayBuffer.updatePriority(sampled.indices[i], Math.abs(tdError) + 1e-6);
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
totalLoss = epochLoss / batch.length;
|
|
334
|
+
avgQValue = epochQSum / batch.length;
|
|
335
|
+
}
|
|
336
|
+
// Decay epsilon
|
|
337
|
+
this.epsilon = Math.max(this.epsilonMin, this.epsilon * this.epsilonDecay);
|
|
338
|
+
return {
|
|
339
|
+
loss: totalLoss,
|
|
340
|
+
avgQValue,
|
|
341
|
+
epsilon: this.epsilon,
|
|
342
|
+
};
|
|
343
|
+
}
|
|
344
|
+
/**
|
|
345
|
+
* Compute TD error for prioritized replay
|
|
346
|
+
*/
|
|
347
|
+
computeTDError(experience) {
|
|
348
|
+
const stateKey = this.hashState(experience.state);
|
|
349
|
+
const actionKey = this.hashAction(experience.action);
|
|
350
|
+
const gamma = this.config.algorithm.discountFactor;
|
|
351
|
+
const currentQ = this.getQValue(stateKey, actionKey);
|
|
352
|
+
const nextStateKey = this.hashState(experience.nextState);
|
|
353
|
+
const maxNextQ = experience.done ? 0 : this.getMaxQValue(nextStateKey);
|
|
354
|
+
const targetQ = experience.reward + gamma * maxNextQ;
|
|
355
|
+
return targetQ - currentQ;
|
|
356
|
+
}
|
|
357
|
+
/**
|
|
358
|
+
* Get Q-value for state-action pair
|
|
359
|
+
*/
|
|
360
|
+
getQValue(stateKey, actionKey) {
|
|
361
|
+
const qValues = this.qTable.get(stateKey);
|
|
362
|
+
return qValues?.get(actionKey) || 0;
|
|
363
|
+
}
|
|
364
|
+
/**
|
|
365
|
+
* Set Q-value for state-action pair
|
|
366
|
+
*/
|
|
367
|
+
setQValue(stateKey, actionKey, value) {
|
|
368
|
+
if (!this.qTable.has(stateKey)) {
|
|
369
|
+
this.qTable.set(stateKey, new Map());
|
|
370
|
+
}
|
|
371
|
+
this.qTable.get(stateKey).set(actionKey, value);
|
|
372
|
+
}
|
|
373
|
+
/**
|
|
374
|
+
* Get maximum Q-value for a state
|
|
375
|
+
*/
|
|
376
|
+
getMaxQValue(stateKey) {
|
|
377
|
+
const qValues = this.qTable.get(stateKey);
|
|
378
|
+
if (!qValues || qValues.size === 0) {
|
|
379
|
+
return 0;
|
|
380
|
+
}
|
|
381
|
+
return Math.max(...Array.from(qValues.values()));
|
|
382
|
+
}
|
|
383
|
+
/**
|
|
384
|
+
* Hash state vector to string key
|
|
385
|
+
*/
|
|
386
|
+
hashState(state) {
|
|
387
|
+
// Simple hash - in production, use better hashing or clustering
|
|
388
|
+
return state.slice(0, 10).map((x) => x.toFixed(2)).join(',');
|
|
389
|
+
}
|
|
390
|
+
/**
|
|
391
|
+
* Hash action to string key
|
|
392
|
+
*/
|
|
393
|
+
hashAction(action) {
|
|
394
|
+
if (typeof action === 'string') {
|
|
395
|
+
return action;
|
|
396
|
+
}
|
|
397
|
+
if (Array.isArray(action)) {
|
|
398
|
+
return action.slice(0, 10).map((x) => x.toFixed(2)).join(',');
|
|
399
|
+
}
|
|
400
|
+
return String(action);
|
|
401
|
+
}
|
|
402
|
+
/**
|
|
403
|
+
* Get action embedding from ID
|
|
404
|
+
*/
|
|
405
|
+
async getActionEmbedding(actionId) {
|
|
406
|
+
// In production, retrieve from database
|
|
407
|
+
// For now, return random embedding
|
|
408
|
+
return Array.from({ length: 768 }, () => Math.random() * 2 - 1);
|
|
409
|
+
}
|
|
410
|
+
/**
|
|
411
|
+
* Normalize Q-value to 0-1 confidence
|
|
412
|
+
*/
|
|
413
|
+
normalizeQValue(qValue) {
|
|
414
|
+
return 1 / (1 + Math.exp(-qValue)); // Sigmoid
|
|
415
|
+
}
|
|
416
|
+
/**
|
|
417
|
+
* Save Q-table
|
|
418
|
+
*/
|
|
419
|
+
async onSave(path) {
|
|
420
|
+
console.log(`Saving Q-Learning model to ${path}`);
|
|
421
|
+
// In production, serialize Q-table to file
|
|
422
|
+
}
|
|
423
|
+
/**
|
|
424
|
+
* Load Q-table
|
|
425
|
+
*/
|
|
426
|
+
async onLoad(path) {
|
|
427
|
+
console.log(`Loading Q-Learning model from ${path}`);
|
|
428
|
+
// In production, deserialize Q-table from file
|
|
429
|
+
}
|
|
430
|
+
}
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* SARSA Plugin
|
|
3
|
+
*
|
|
4
|
+
* Implements the SARSA (State-Action-Reward-State-Action) algorithm
|
|
5
|
+
* with eligibility traces (SARSA(λ)).
|
|
6
|
+
*
|
|
7
|
+
* SARSA is an on-policy TD control algorithm that learns from the
|
|
8
|
+
* actual actions taken by the current policy, unlike Q-Learning which
|
|
9
|
+
* is off-policy.
|
|
10
|
+
*
|
|
11
|
+
* Key features:
|
|
12
|
+
* - On-policy learning
|
|
13
|
+
* - Eligibility traces for faster learning
|
|
14
|
+
* - Epsilon-greedy exploration
|
|
15
|
+
* - More conservative than Q-Learning
|
|
16
|
+
*/
|
|
17
|
+
import { BasePlugin } from '../base-plugin';
|
|
18
|
+
import { Action, Context, Experience, TrainOptions, TrainingMetrics } from '../learning-plugin.interface';
|
|
19
|
+
/**
|
|
20
|
+
* SARSA Plugin Implementation
|
|
21
|
+
*/
|
|
22
|
+
export declare class SARSAPlugin extends BasePlugin {
|
|
23
|
+
name: string;
|
|
24
|
+
version: string;
|
|
25
|
+
private qTable;
|
|
26
|
+
private epsilon;
|
|
27
|
+
private epsilonMin;
|
|
28
|
+
private epsilonDecay;
|
|
29
|
+
private eligibilityTraces;
|
|
30
|
+
private lambda;
|
|
31
|
+
private lastState;
|
|
32
|
+
private lastAction;
|
|
33
|
+
/**
|
|
34
|
+
* Initialize SARSA plugin
|
|
35
|
+
*/
|
|
36
|
+
protected onInitialize(): Promise<void>;
|
|
37
|
+
/**
|
|
38
|
+
* Select action using epsilon-greedy policy
|
|
39
|
+
*
|
|
40
|
+
* @param state - Current state vector
|
|
41
|
+
* @param context - Optional context
|
|
42
|
+
* @returns Selected action
|
|
43
|
+
*/
|
|
44
|
+
selectAction(state: number[], context?: Context): Promise<Action>;
|
|
45
|
+
/**
|
|
46
|
+
* Select random action (exploration)
|
|
47
|
+
*/
|
|
48
|
+
private randomAction;
|
|
49
|
+
/**
|
|
50
|
+
* Select greedy action (exploitation)
|
|
51
|
+
*/
|
|
52
|
+
private greedyAction;
|
|
53
|
+
/**
|
|
54
|
+
* Store experience and perform SARSA update
|
|
55
|
+
*
|
|
56
|
+
* SARSA uses the actual next action taken, not the max Q-value
|
|
57
|
+
*/
|
|
58
|
+
protected onStoreExperience(experience: Experience): Promise<void>;
|
|
59
|
+
/**
|
|
60
|
+
* Perform SARSA(λ) update with eligibility traces
|
|
61
|
+
*/
|
|
62
|
+
private sarsaUpdate;
|
|
63
|
+
/**
|
|
64
|
+
* Train SARSA on stored experiences
|
|
65
|
+
*
|
|
66
|
+
* For online learning, this is called after each experience.
|
|
67
|
+
* For offline learning, this processes batches of episodes.
|
|
68
|
+
*/
|
|
69
|
+
train(options?: TrainOptions): Promise<TrainingMetrics>;
|
|
70
|
+
/**
|
|
71
|
+
* Get Q-value for state-action pair
|
|
72
|
+
*/
|
|
73
|
+
private getQValue;
|
|
74
|
+
/**
|
|
75
|
+
* Set Q-value for state-action pair
|
|
76
|
+
*/
|
|
77
|
+
private setQValue;
|
|
78
|
+
/**
|
|
79
|
+
* Hash state vector to string key
|
|
80
|
+
*/
|
|
81
|
+
private hashState;
|
|
82
|
+
/**
|
|
83
|
+
* Hash action to string key
|
|
84
|
+
*/
|
|
85
|
+
private hashAction;
|
|
86
|
+
/**
|
|
87
|
+
* Get action embedding from ID
|
|
88
|
+
*/
|
|
89
|
+
private getActionEmbedding;
|
|
90
|
+
/**
|
|
91
|
+
* Normalize Q-value to 0-1 confidence
|
|
92
|
+
*/
|
|
93
|
+
private normalizeQValue;
|
|
94
|
+
/**
|
|
95
|
+
* Save Q-table and eligibility traces
|
|
96
|
+
*/
|
|
97
|
+
protected onSave(path: string): Promise<void>;
|
|
98
|
+
/**
|
|
99
|
+
* Load Q-table and eligibility traces
|
|
100
|
+
*/
|
|
101
|
+
protected onLoad(path: string): Promise<void>;
|
|
102
|
+
}
|
|
103
|
+
//# sourceMappingURL=sarsa.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"sarsa.d.ts","sourceRoot":"","sources":["../../../src/plugins/implementations/sarsa.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;GAeG;AAEH,OAAO,EAAE,UAAU,EAAE,MAAM,gBAAgB,CAAC;AAC5C,OAAO,EACL,MAAM,EACN,OAAO,EACP,UAAU,EACV,YAAY,EACZ,eAAe,EAChB,MAAM,8BAA8B,CAAC;AA8EtC;;GAEG;AACH,qBAAa,WAAY,SAAQ,UAAU;IAClC,IAAI,SAAW;IACf,OAAO,SAAW;IAEzB,OAAO,CAAC,MAAM,CAA+C;IAC7D,OAAO,CAAC,OAAO,CAAe;IAC9B,OAAO,CAAC,UAAU,CAAgB;IAClC,OAAO,CAAC,YAAY,CAAiB;IACrC,OAAO,CAAC,iBAAiB,CAAoB;IAC7C,OAAO,CAAC,MAAM,CAAe;IAC7B,OAAO,CAAC,SAAS,CAAyB;IAC1C,OAAO,CAAC,UAAU,CAAa;IAE/B;;OAEG;cACa,YAAY,IAAI,OAAO,CAAC,IAAI,CAAC;IAc7C;;;;;;OAMG;IACG,YAAY,CAAC,KAAK,EAAE,MAAM,EAAE,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,MAAM,CAAC;IAYvE;;OAEG;YACW,YAAY;IAuC1B;;OAEG;YACW,YAAY;IA+C1B;;;;OAIG;cACa,iBAAiB,CAAC,UAAU,EAAE,UAAU,GAAG,OAAO,CAAC,IAAI,CAAC;IAyBxE;;OAEG;YACW,WAAW;IAuCzB;;;;;OAKG;IACG,KAAK,CAAC,OAAO,CAAC,EAAE,YAAY,GAAG,OAAO,CAAC,eAAe,CAAC;IA4B7D;;OAEG;IACH,OAAO,CAAC,SAAS;IAKjB;;OAEG;IACH,OAAO,CAAC,SAAS;IAOjB;;OAEG;IACH,OAAO,CAAC,SAAS;IAKjB;;OAEG;IACH,OAAO,CAAC,UAAU;IAYlB;;OAEG;YACW,kBAAkB;IAMhC;;OAEG;IACH,OAAO,CAAC,eAAe;IAIvB;;OAEG;cACa,MAAM,CAAC,IAAI,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAKnD;;OAEG;cACa,MAAM,CAAC,IAAI,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;CAIpD"}
|