@sparkleideas/neural 3.5.2-patch.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. package/README.md +260 -0
  2. package/__tests__/README.md +235 -0
  3. package/__tests__/algorithms.test.ts +582 -0
  4. package/__tests__/patterns.test.ts +549 -0
  5. package/__tests__/sona.test.ts +445 -0
  6. package/docs/SONA_INTEGRATION.md +460 -0
  7. package/docs/SONA_QUICKSTART.md +168 -0
  8. package/examples/sona-usage.ts +318 -0
  9. package/package.json +23 -0
  10. package/src/algorithms/a2c.d.ts +86 -0
  11. package/src/algorithms/a2c.d.ts.map +1 -0
  12. package/src/algorithms/a2c.js +361 -0
  13. package/src/algorithms/a2c.js.map +1 -0
  14. package/src/algorithms/a2c.ts +478 -0
  15. package/src/algorithms/curiosity.d.ts +82 -0
  16. package/src/algorithms/curiosity.d.ts.map +1 -0
  17. package/src/algorithms/curiosity.js +392 -0
  18. package/src/algorithms/curiosity.js.map +1 -0
  19. package/src/algorithms/curiosity.ts +509 -0
  20. package/src/algorithms/decision-transformer.d.ts +82 -0
  21. package/src/algorithms/decision-transformer.d.ts.map +1 -0
  22. package/src/algorithms/decision-transformer.js +415 -0
  23. package/src/algorithms/decision-transformer.js.map +1 -0
  24. package/src/algorithms/decision-transformer.ts +521 -0
  25. package/src/algorithms/dqn.d.ts +72 -0
  26. package/src/algorithms/dqn.d.ts.map +1 -0
  27. package/src/algorithms/dqn.js +303 -0
  28. package/src/algorithms/dqn.js.map +1 -0
  29. package/src/algorithms/dqn.ts +382 -0
  30. package/src/algorithms/index.d.ts +32 -0
  31. package/src/algorithms/index.d.ts.map +1 -0
  32. package/src/algorithms/index.js +74 -0
  33. package/src/algorithms/index.js.map +1 -0
  34. package/src/algorithms/index.ts +122 -0
  35. package/src/algorithms/ppo.d.ts +72 -0
  36. package/src/algorithms/ppo.d.ts.map +1 -0
  37. package/src/algorithms/ppo.js +331 -0
  38. package/src/algorithms/ppo.js.map +1 -0
  39. package/src/algorithms/ppo.ts +429 -0
  40. package/src/algorithms/q-learning.d.ts +77 -0
  41. package/src/algorithms/q-learning.d.ts.map +1 -0
  42. package/src/algorithms/q-learning.js +259 -0
  43. package/src/algorithms/q-learning.js.map +1 -0
  44. package/src/algorithms/q-learning.ts +333 -0
  45. package/src/algorithms/sarsa.d.ts +82 -0
  46. package/src/algorithms/sarsa.d.ts.map +1 -0
  47. package/src/algorithms/sarsa.js +297 -0
  48. package/src/algorithms/sarsa.js.map +1 -0
  49. package/src/algorithms/sarsa.ts +383 -0
  50. package/src/algorithms/tmp.json +0 -0
  51. package/src/application/index.ts +11 -0
  52. package/src/application/services/neural-application-service.ts +217 -0
  53. package/src/domain/entities/pattern.ts +169 -0
  54. package/src/domain/index.ts +18 -0
  55. package/src/domain/services/learning-service.ts +256 -0
  56. package/src/index.d.ts +118 -0
  57. package/src/index.d.ts.map +1 -0
  58. package/src/index.js +201 -0
  59. package/src/index.js.map +1 -0
  60. package/src/index.ts +363 -0
  61. package/src/modes/balanced.d.ts +60 -0
  62. package/src/modes/balanced.d.ts.map +1 -0
  63. package/src/modes/balanced.js +234 -0
  64. package/src/modes/balanced.js.map +1 -0
  65. package/src/modes/balanced.ts +299 -0
  66. package/src/modes/base.ts +163 -0
  67. package/src/modes/batch.d.ts +82 -0
  68. package/src/modes/batch.d.ts.map +1 -0
  69. package/src/modes/batch.js +316 -0
  70. package/src/modes/batch.js.map +1 -0
  71. package/src/modes/batch.ts +434 -0
  72. package/src/modes/edge.d.ts +85 -0
  73. package/src/modes/edge.d.ts.map +1 -0
  74. package/src/modes/edge.js +310 -0
  75. package/src/modes/edge.js.map +1 -0
  76. package/src/modes/edge.ts +409 -0
  77. package/src/modes/index.d.ts +55 -0
  78. package/src/modes/index.d.ts.map +1 -0
  79. package/src/modes/index.js +83 -0
  80. package/src/modes/index.js.map +1 -0
  81. package/src/modes/index.ts +16 -0
  82. package/src/modes/real-time.d.ts +58 -0
  83. package/src/modes/real-time.d.ts.map +1 -0
  84. package/src/modes/real-time.js +196 -0
  85. package/src/modes/real-time.js.map +1 -0
  86. package/src/modes/real-time.ts +257 -0
  87. package/src/modes/research.d.ts +79 -0
  88. package/src/modes/research.d.ts.map +1 -0
  89. package/src/modes/research.js +389 -0
  90. package/src/modes/research.js.map +1 -0
  91. package/src/modes/research.ts +486 -0
  92. package/src/modes/tmp.json +0 -0
  93. package/src/pattern-learner.d.ts +117 -0
  94. package/src/pattern-learner.d.ts.map +1 -0
  95. package/src/pattern-learner.js +603 -0
  96. package/src/pattern-learner.js.map +1 -0
  97. package/src/pattern-learner.ts +757 -0
  98. package/src/reasoning-bank.d.ts +259 -0
  99. package/src/reasoning-bank.d.ts.map +1 -0
  100. package/src/reasoning-bank.js +993 -0
  101. package/src/reasoning-bank.js.map +1 -0
  102. package/src/reasoning-bank.ts +1279 -0
  103. package/src/reasoningbank-adapter.ts +697 -0
  104. package/src/sona-integration.d.ts +168 -0
  105. package/src/sona-integration.d.ts.map +1 -0
  106. package/src/sona-integration.js +316 -0
  107. package/src/sona-integration.js.map +1 -0
  108. package/src/sona-integration.ts +432 -0
  109. package/src/sona-manager.d.ts +147 -0
  110. package/src/sona-manager.d.ts.map +1 -0
  111. package/src/sona-manager.js +695 -0
  112. package/src/sona-manager.js.map +1 -0
  113. package/src/sona-manager.ts +835 -0
  114. package/src/tmp.json +0 -0
  115. package/src/types.d.ts +431 -0
  116. package/src/types.d.ts.map +1 -0
  117. package/src/types.js +11 -0
  118. package/src/types.js.map +1 -0
  119. package/src/types.ts +590 -0
  120. package/tmp.json +0 -0
  121. package/tsconfig.json +9 -0
  122. package/vitest.config.ts +19 -0
@@ -0,0 +1,32 @@
1
+ /**
2
+ * RL Algorithms Index
3
+ *
4
+ * Exports all reinforcement learning algorithm implementations.
5
+ */
6
+ export { PPOAlgorithm, createPPO, DEFAULT_PPO_CONFIG, } from './ppo.js';
7
+ export type { PPOConfig } from '../types.js';
8
+ export { DQNAlgorithm, createDQN, DEFAULT_DQN_CONFIG, } from './dqn.js';
9
+ export type { DQNConfig } from '../types.js';
10
+ export { A2CAlgorithm, createA2C, DEFAULT_A2C_CONFIG, } from './a2c.js';
11
+ export type { A2CConfig } from './a2c.js';
12
+ export { DecisionTransformer, createDecisionTransformer, DEFAULT_DT_CONFIG, } from './decision-transformer.js';
13
+ export type { DecisionTransformerConfig } from '../types.js';
14
+ export { QLearning, createQLearning, DEFAULT_QLEARNING_CONFIG, } from './q-learning.js';
15
+ export type { QLearningConfig } from './q-learning.js';
16
+ export { SARSAAlgorithm, createSARSA, DEFAULT_SARSA_CONFIG, } from './sarsa.js';
17
+ export type { SARSAConfig } from './sarsa.js';
18
+ export { CuriosityModule, createCuriosity, DEFAULT_CURIOSITY_CONFIG, } from './curiosity.js';
19
+ export type { CuriosityConfig } from '../types.js';
20
+ /**
21
+ * Algorithm factory
22
+ */
23
+ import type { RLAlgorithm, RLConfig } from '../types.js';
24
+ /**
25
+ * Create an RL algorithm by name
26
+ */
27
+ export declare function createAlgorithm(algorithm: RLAlgorithm, config?: Partial<RLConfig>): unknown;
28
+ /**
29
+ * Get default configuration for an algorithm
30
+ */
31
+ export declare function getDefaultConfig(algorithm: RLAlgorithm): RLConfig;
32
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["index.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAGH,OAAO,EACL,YAAY,EACZ,SAAS,EACT,kBAAkB,GACnB,MAAM,UAAU,CAAC;AAClB,YAAY,EAAE,SAAS,EAAE,MAAM,aAAa,CAAC;AAG7C,OAAO,EACL,YAAY,EACZ,SAAS,EACT,kBAAkB,GACnB,MAAM,UAAU,CAAC;AAClB,YAAY,EAAE,SAAS,EAAE,MAAM,aAAa,CAAC;AAG7C,OAAO,EACL,YAAY,EACZ,SAAS,EACT,kBAAkB,GACnB,MAAM,UAAU,CAAC;AAClB,YAAY,EAAE,SAAS,EAAE,MAAM,UAAU,CAAC;AAG1C,OAAO,EACL,mBAAmB,EACnB,yBAAyB,EACzB,iBAAiB,GAClB,MAAM,2BAA2B,CAAC;AACnC,YAAY,EAAE,yBAAyB,EAAE,MAAM,aAAa,CAAC;AAG7D,OAAO,EACL,SAAS,EACT,eAAe,EACf,wBAAwB,GACzB,MAAM,iBAAiB,CAAC;AACzB,YAAY,EAAE,eAAe,EAAE,MAAM,iBAAiB,CAAC;AAGvD,OAAO,EACL,cAAc,EACd,WAAW,EACX,oBAAoB,GACrB,MAAM,YAAY,CAAC;AACpB,YAAY,EAAE,WAAW,EAAE,MAAM,YAAY,CAAC;AAG9C,OAAO,EACL,eAAe,EACf,eAAe,EACf,wBAAwB,GACzB,MAAM,gBAAgB,CAAC;AACxB,YAAY,EAAE,eAAe,EAAE,MAAM,aAAa,CAAC;AAEnD;;GAEG;AACH,OAAO,KAAK,EAAE,WAAW,EAAE,QAAQ,EAAE,MAAM,aAAa,CAAC;AASzD;;GAEG;AACH,wBAAgB,eAAe,CAAC,SAAS,EAAE,WAAW,EAAE,MAAM,CAAC,EAAE,OAAO,CAAC,QAAQ,CAAC,GAAG,OAAO,CAoB3F;AAED;;GAEG;AACH,wBAAgB,gBAAgB,CAAC,SAAS,EAAE,WAAW,GAAG,QAAQ,CAmBjE"}
@@ -0,0 +1,74 @@
1
+ /**
2
+ * RL Algorithms Index
3
+ *
4
+ * Exports all reinforcement learning algorithm implementations.
5
+ */
6
+ // PPO - Proximal Policy Optimization
7
+ export { PPOAlgorithm, createPPO, DEFAULT_PPO_CONFIG, } from './ppo.js';
8
+ // DQN - Deep Q-Network
9
+ export { DQNAlgorithm, createDQN, DEFAULT_DQN_CONFIG, } from './dqn.js';
10
+ // A2C - Advantage Actor-Critic
11
+ export { A2CAlgorithm, createA2C, DEFAULT_A2C_CONFIG, } from './a2c.js';
12
+ // Decision Transformer
13
+ export { DecisionTransformer, createDecisionTransformer, DEFAULT_DT_CONFIG, } from './decision-transformer.js';
14
+ // Q-Learning (Tabular)
15
+ export { QLearning, createQLearning, DEFAULT_QLEARNING_CONFIG, } from './q-learning.js';
16
+ // SARSA
17
+ export { SARSAAlgorithm, createSARSA, DEFAULT_SARSA_CONFIG, } from './sarsa.js';
18
+ // Curiosity-Driven Exploration
19
+ export { CuriosityModule, createCuriosity, DEFAULT_CURIOSITY_CONFIG, } from './curiosity.js';
20
+ import { createPPO, DEFAULT_PPO_CONFIG } from './ppo.js';
21
+ import { createDQN, DEFAULT_DQN_CONFIG } from './dqn.js';
22
+ import { createA2C, DEFAULT_A2C_CONFIG } from './a2c.js';
23
+ import { createDecisionTransformer, DEFAULT_DT_CONFIG } from './decision-transformer.js';
24
+ import { createQLearning, DEFAULT_QLEARNING_CONFIG } from './q-learning.js';
25
+ import { createSARSA, DEFAULT_SARSA_CONFIG } from './sarsa.js';
26
+ import { createCuriosity, DEFAULT_CURIOSITY_CONFIG } from './curiosity.js';
27
+ /**
28
+ * Create an RL algorithm by name
29
+ */
30
+ export function createAlgorithm(algorithm, config) {
31
+ // Use type assertions since config is validated by algorithm switch
32
+ switch (algorithm) {
33
+ case 'ppo':
34
+ return createPPO(config);
35
+ case 'dqn':
36
+ return createDQN(config);
37
+ case 'a2c':
38
+ return createA2C(config);
39
+ case 'decision-transformer':
40
+ return createDecisionTransformer(config);
41
+ case 'q-learning':
42
+ return createQLearning(config);
43
+ case 'sarsa':
44
+ return createSARSA(config);
45
+ case 'curiosity':
46
+ return createCuriosity(config);
47
+ default:
48
+ throw new Error(`Unknown algorithm: ${algorithm}`);
49
+ }
50
+ }
51
+ /**
52
+ * Get default configuration for an algorithm
53
+ */
54
+ export function getDefaultConfig(algorithm) {
55
+ switch (algorithm) {
56
+ case 'ppo':
57
+ return { ...DEFAULT_PPO_CONFIG };
58
+ case 'dqn':
59
+ return { ...DEFAULT_DQN_CONFIG };
60
+ case 'a2c':
61
+ return { ...DEFAULT_A2C_CONFIG };
62
+ case 'decision-transformer':
63
+ return { ...DEFAULT_DT_CONFIG };
64
+ case 'q-learning':
65
+ return { ...DEFAULT_QLEARNING_CONFIG };
66
+ case 'sarsa':
67
+ return { ...DEFAULT_SARSA_CONFIG };
68
+ case 'curiosity':
69
+ return { ...DEFAULT_CURIOSITY_CONFIG };
70
+ default:
71
+ throw new Error(`Unknown algorithm: ${algorithm}`);
72
+ }
73
+ }
74
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.js","sourceRoot":"","sources":["index.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,qCAAqC;AACrC,OAAO,EACL,YAAY,EACZ,SAAS,EACT,kBAAkB,GACnB,MAAM,UAAU,CAAC;AAGlB,uBAAuB;AACvB,OAAO,EACL,YAAY,EACZ,SAAS,EACT,kBAAkB,GACnB,MAAM,UAAU,CAAC;AAGlB,+BAA+B;AAC/B,OAAO,EACL,YAAY,EACZ,SAAS,EACT,kBAAkB,GACnB,MAAM,UAAU,CAAC;AAGlB,uBAAuB;AACvB,OAAO,EACL,mBAAmB,EACnB,yBAAyB,EACzB,iBAAiB,GAClB,MAAM,2BAA2B,CAAC;AAGnC,uBAAuB;AACvB,OAAO,EACL,SAAS,EACT,eAAe,EACf,wBAAwB,GACzB,MAAM,iBAAiB,CAAC;AAGzB,QAAQ;AACR,OAAO,EACL,cAAc,EACd,WAAW,EACX,oBAAoB,GACrB,MAAM,YAAY,CAAC;AAGpB,+BAA+B;AAC/B,OAAO,EACL,eAAe,EACf,eAAe,EACf,wBAAwB,GACzB,MAAM,gBAAgB,CAAC;AAOxB,OAAO,EAAE,SAAS,EAAE,kBAAkB,EAAE,MAAM,UAAU,CAAC;AACzD,OAAO,EAAE,SAAS,EAAE,kBAAkB,EAAE,MAAM,UAAU,CAAC;AACzD,OAAO,EAAE,SAAS,EAAE,kBAAkB,EAAE,MAAM,UAAU,CAAC;AACzD,OAAO,EAAE,yBAAyB,EAAE,iBAAiB,EAAE,MAAM,2BAA2B,CAAC;AACzF,OAAO,EAAE,eAAe,EAAE,wBAAwB,EAAE,MAAM,iBAAiB,CAAC;AAC5E,OAAO,EAAE,WAAW,EAAE,oBAAoB,EAAE,MAAM,YAAY,CAAC;AAC/D,OAAO,EAAE,eAAe,EAAE,wBAAwB,EAAE,MAAM,gBAAgB,CAAC;AAE3E;;GAEG;AACH,MAAM,UAAU,eAAe,CAAC,SAAsB,EAAE,MAA0B;IAChF,oEAAoE;IACpE,QAAQ,SAAS,EAAE,CAAC;QAClB,KAAK,KAAK;YACR,OAAO,SAAS,CAAC,MAAyC,CAAC,CAAC;QAC9D,KAAK,KAAK;YACR,OAAO,SAAS,CAAC,MAAyC,CAAC,CAAC;QAC9D,KAAK,KAAK;YACR,OAAO,SAAS,CAAC,MAAyC,CAAC,CAAC;QAC9D,KAAK,sBAAsB;YACzB,OAAO,yBAAyB,CAAC,MAAyD,CAAC,CAAC;QAC9F,KAAK,YAAY;YACf,OAAO,eAAe,CAAC,MAA+C,CAAC,CAAC;QAC1E,KAAK,OAAO;YACV,OAAO,WAAW,CAAC,MAA2C,CAAC,CAAC;QAClE,KAAK,WAAW;YACd,OAAO,eAAe,CAAC,MAA+C,CAAC,CAAC;QAC1E;YACE,MAAM,IAAI,KAAK,CAAC,sBAAsB,SAAS,EAAE,CAAC,CAAC;IACvD,CAAC;AACH,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,gBAAgB,CAAC,SAAsB;IACrD,QAAQ,SAAS,EAAE,CAAC;QAClB,KAAK,KAAK;YACR,OAAO,EAAE,GAAG,kBAAkB,EAAE,CAAC;QACnC,KAAK,KAAK;YACR,OAAO,EAAE,GAAG,kBAAkB,EAAE,CAAC;QACnC,KAAK,KAAK;YACR,OAAO,EAAE,GAAG,kBAAkB,EAAE,CAAC;QACnC,KAAK,sBAAsB;YACzB,OAAO,EAAE,GAAG,iBAAiB,EAAE,CAAC;QAClC,KAAK,YAAY;YACf,OAAO,EAAE,GAAG,wBAAwB,EAAE,CAAC;QACzC,KAAK,OAAO;YACV,OAAO,EAAE,GAAG,oBAAoB,EAAE,CAAC;QACrC,KAAK,WAAW;YACd,OAAO,EAAE,GAAG,wBAAwB,EAAE,CAAC;QACzC;YACE,MAAM,IAAI,KAAK,CAAC,sBAAsB,SAAS,EAAE,CAAC,CAAC;IACvD,CAAC;AACH,CAAC"}
@@ -0,0 +1,122 @@
1
+ /**
2
+ * RL Algorithms Index
3
+ *
4
+ * Exports all reinforcement learning algorithm implementations.
5
+ */
6
+
7
+ // PPO - Proximal Policy Optimization
8
+ export {
9
+ PPOAlgorithm,
10
+ createPPO,
11
+ DEFAULT_PPO_CONFIG,
12
+ } from './ppo.js';
13
+ export type { PPOConfig } from '../types.js';
14
+
15
+ // DQN - Deep Q-Network
16
+ export {
17
+ DQNAlgorithm,
18
+ createDQN,
19
+ DEFAULT_DQN_CONFIG,
20
+ } from './dqn.js';
21
+ export type { DQNConfig } from '../types.js';
22
+
23
+ // A2C - Advantage Actor-Critic
24
+ export {
25
+ A2CAlgorithm,
26
+ createA2C,
27
+ DEFAULT_A2C_CONFIG,
28
+ } from './a2c.js';
29
+ export type { A2CConfig } from './a2c.js';
30
+
31
+ // Decision Transformer
32
+ export {
33
+ DecisionTransformer,
34
+ createDecisionTransformer,
35
+ DEFAULT_DT_CONFIG,
36
+ } from './decision-transformer.js';
37
+ export type { DecisionTransformerConfig } from '../types.js';
38
+
39
+ // Q-Learning (Tabular)
40
+ export {
41
+ QLearning,
42
+ createQLearning,
43
+ DEFAULT_QLEARNING_CONFIG,
44
+ } from './q-learning.js';
45
+ export type { QLearningConfig } from './q-learning.js';
46
+
47
+ // SARSA
48
+ export {
49
+ SARSAAlgorithm,
50
+ createSARSA,
51
+ DEFAULT_SARSA_CONFIG,
52
+ } from './sarsa.js';
53
+ export type { SARSAConfig } from './sarsa.js';
54
+
55
+ // Curiosity-Driven Exploration
56
+ export {
57
+ CuriosityModule,
58
+ createCuriosity,
59
+ DEFAULT_CURIOSITY_CONFIG,
60
+ } from './curiosity.js';
61
+ export type { CuriosityConfig } from '../types.js';
62
+
63
+ /**
64
+ * Algorithm factory
65
+ */
66
+ import type { RLAlgorithm, RLConfig } from '../types.js';
67
+ import { createPPO, DEFAULT_PPO_CONFIG } from './ppo.js';
68
+ import { createDQN, DEFAULT_DQN_CONFIG } from './dqn.js';
69
+ import { createA2C, DEFAULT_A2C_CONFIG } from './a2c.js';
70
+ import { createDecisionTransformer, DEFAULT_DT_CONFIG } from './decision-transformer.js';
71
+ import { createQLearning, DEFAULT_QLEARNING_CONFIG } from './q-learning.js';
72
+ import { createSARSA, DEFAULT_SARSA_CONFIG } from './sarsa.js';
73
+ import { createCuriosity, DEFAULT_CURIOSITY_CONFIG } from './curiosity.js';
74
+
75
+ /**
76
+ * Create an RL algorithm by name
77
+ */
78
+ export function createAlgorithm(algorithm: RLAlgorithm, config?: Partial<RLConfig>): unknown {
79
+ // Use type assertions since config is validated by algorithm switch
80
+ switch (algorithm) {
81
+ case 'ppo':
82
+ return createPPO(config as Parameters<typeof createPPO>[0]);
83
+ case 'dqn':
84
+ return createDQN(config as Parameters<typeof createDQN>[0]);
85
+ case 'a2c':
86
+ return createA2C(config as Parameters<typeof createA2C>[0]);
87
+ case 'decision-transformer':
88
+ return createDecisionTransformer(config as Parameters<typeof createDecisionTransformer>[0]);
89
+ case 'q-learning':
90
+ return createQLearning(config as Parameters<typeof createQLearning>[0]);
91
+ case 'sarsa':
92
+ return createSARSA(config as Parameters<typeof createSARSA>[0]);
93
+ case 'curiosity':
94
+ return createCuriosity(config as Parameters<typeof createCuriosity>[0]);
95
+ default:
96
+ throw new Error(`Unknown algorithm: ${algorithm}`);
97
+ }
98
+ }
99
+
100
+ /**
101
+ * Get default configuration for an algorithm
102
+ */
103
+ export function getDefaultConfig(algorithm: RLAlgorithm): RLConfig {
104
+ switch (algorithm) {
105
+ case 'ppo':
106
+ return { ...DEFAULT_PPO_CONFIG };
107
+ case 'dqn':
108
+ return { ...DEFAULT_DQN_CONFIG };
109
+ case 'a2c':
110
+ return { ...DEFAULT_A2C_CONFIG };
111
+ case 'decision-transformer':
112
+ return { ...DEFAULT_DT_CONFIG };
113
+ case 'q-learning':
114
+ return { ...DEFAULT_QLEARNING_CONFIG };
115
+ case 'sarsa':
116
+ return { ...DEFAULT_SARSA_CONFIG };
117
+ case 'curiosity':
118
+ return { ...DEFAULT_CURIOSITY_CONFIG };
119
+ default:
120
+ throw new Error(`Unknown algorithm: ${algorithm}`);
121
+ }
122
+ }
@@ -0,0 +1,72 @@
1
+ /**
2
+ * Proximal Policy Optimization (PPO)
3
+ *
4
+ * Implements PPO algorithm for stable policy learning with:
5
+ * - Clipped surrogate objective
6
+ * - GAE (Generalized Advantage Estimation)
7
+ * - Value function clipping
8
+ * - Entropy bonus
9
+ *
10
+ * Performance Target: <10ms per update step
11
+ */
12
+ import type { PPOConfig, Trajectory } from '../types.js';
13
+ /**
14
+ * Default PPO configuration
15
+ */
16
+ export declare const DEFAULT_PPO_CONFIG: PPOConfig;
17
+ /**
18
+ * PPO Algorithm Implementation
19
+ */
20
+ export declare class PPOAlgorithm {
21
+ private config;
22
+ private policyWeights;
23
+ private valueWeights;
24
+ private policyMomentum;
25
+ private valueMomentum;
26
+ private buffer;
27
+ private updateCount;
28
+ private totalLoss;
29
+ private approxKL;
30
+ private clipFraction;
31
+ constructor(config?: Partial<PPOConfig>);
32
+ /**
33
+ * Add experience from trajectory
34
+ */
35
+ addExperience(trajectory: Trajectory): void;
36
+ /**
37
+ * Perform PPO update
38
+ * Target: <10ms
39
+ */
40
+ update(): {
41
+ policyLoss: number;
42
+ valueLoss: number;
43
+ entropy: number;
44
+ };
45
+ /**
46
+ * Get action from policy
47
+ */
48
+ getAction(state: Float32Array): {
49
+ action: number;
50
+ logProb: number;
51
+ value: number;
52
+ };
53
+ /**
54
+ * Get statistics
55
+ */
56
+ getStats(): Record<string, number>;
57
+ private computeValue;
58
+ private computeLogits;
59
+ private computeLogProb;
60
+ private hashAction;
61
+ private softmax;
62
+ private sampleAction;
63
+ private computeGAE;
64
+ private computeReturns;
65
+ private shuffleBuffer;
66
+ private updateMiniBatch;
67
+ }
68
+ /**
69
+ * Factory function
70
+ */
71
+ export declare function createPPO(config?: Partial<PPOConfig>): PPOAlgorithm;
72
+ //# sourceMappingURL=ppo.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"ppo.d.ts","sourceRoot":"","sources":["ppo.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;GAUG;AAEH,OAAO,KAAK,EACV,SAAS,EACT,UAAU,EAEX,MAAM,aAAa,CAAC;AAErB;;GAEG;AACH,eAAO,MAAM,kBAAkB,EAAE,SAahC,CAAC;AAeF;;GAEG;AACH,qBAAa,YAAY;IACvB,OAAO,CAAC,MAAM,CAAY;IAG1B,OAAO,CAAC,aAAa,CAAe;IACpC,OAAO,CAAC,YAAY,CAAe;IAGnC,OAAO,CAAC,cAAc,CAAe;IACrC,OAAO,CAAC,aAAa,CAAe;IAGpC,OAAO,CAAC,MAAM,CAAuB;IAGrC,OAAO,CAAC,WAAW,CAAK;IACxB,OAAO,CAAC,SAAS,CAAK;IACtB,OAAO,CAAC,QAAQ,CAAK;IACrB,OAAO,CAAC,YAAY,CAAK;gBAEb,MAAM,GAAE,OAAO,CAAC,SAAS,CAAM;IAkB3C;;OAEG;IACH,aAAa,CAAC,UAAU,EAAE,UAAU,GAAG,IAAI;IAkC3C;;;OAGG;IACH,MAAM,IAAI;QAAE,UAAU,EAAE,MAAM,CAAC;QAAC,SAAS,EAAE,MAAM,CAAC;QAAC,OAAO,EAAE,MAAM,CAAA;KAAE;IAkEpE;;OAEG;IACH,SAAS,CAAC,KAAK,EAAE,YAAY,GAAG;QAAE,MAAM,EAAE,MAAM,CAAC;QAAC,OAAO,EAAE,MAAM,CAAC;QAAC,KAAK,EAAE,MAAM,CAAA;KAAE;IAYlF;;OAEG;IACH,QAAQ,IAAI,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC;IAclC,OAAO,CAAC,YAAY;IAQpB,OAAO,CAAC,aAAa;IAcrB,OAAO,CAAC,cAAc;IAOtB,OAAO,CAAC,UAAU;IASlB,OAAO,CAAC,OAAO;IAiBf,OAAO,CAAC,YAAY;IAUpB,OAAO,CAAC,UAAU;IAclB,OAAO,CAAC,cAAc;IAYtB,OAAO,CAAC,aAAa;IAOrB,OAAO,CAAC,eAAe;CA8FxB;AAED;;GAEG;AACH,wBAAgB,SAAS,CAAC,MAAM,CAAC,EAAE,OAAO,CAAC,SAAS,CAAC,GAAG,YAAY,CAEnE"}
@@ -0,0 +1,331 @@
1
+ /**
2
+ * Proximal Policy Optimization (PPO)
3
+ *
4
+ * Implements PPO algorithm for stable policy learning with:
5
+ * - Clipped surrogate objective
6
+ * - GAE (Generalized Advantage Estimation)
7
+ * - Value function clipping
8
+ * - Entropy bonus
9
+ *
10
+ * Performance Target: <10ms per update step
11
+ */
12
+ /**
13
+ * Default PPO configuration
14
+ */
15
+ export const DEFAULT_PPO_CONFIG = {
16
+ algorithm: 'ppo',
17
+ learningRate: 0.0003,
18
+ gamma: 0.99,
19
+ entropyCoef: 0.01,
20
+ valueLossCoef: 0.5,
21
+ maxGradNorm: 0.5,
22
+ epochs: 4,
23
+ miniBatchSize: 64,
24
+ clipRange: 0.2,
25
+ clipRangeVf: null,
26
+ targetKL: 0.01,
27
+ gaeLambda: 0.95,
28
+ };
29
+ /**
30
+ * PPO Algorithm Implementation
31
+ */
32
+ export class PPOAlgorithm {
33
+ config;
34
+ // Policy network weights (simplified linear model for speed)
35
+ policyWeights;
36
+ valueWeights;
37
+ // Optimizer state
38
+ policyMomentum;
39
+ valueMomentum;
40
+ // Experience buffer
41
+ buffer = [];
42
+ // Statistics
43
+ updateCount = 0;
44
+ totalLoss = 0;
45
+ approxKL = 0;
46
+ clipFraction = 0;
47
+ constructor(config = {}) {
48
+ this.config = { ...DEFAULT_PPO_CONFIG, ...config };
49
+ // Initialize weights (768 input dim, simplified)
50
+ const dim = 768;
51
+ this.policyWeights = new Float32Array(dim);
52
+ this.valueWeights = new Float32Array(dim);
53
+ this.policyMomentum = new Float32Array(dim);
54
+ this.valueMomentum = new Float32Array(dim);
55
+ // Xavier initialization
56
+ const scale = Math.sqrt(2 / dim);
57
+ for (let i = 0; i < dim; i++) {
58
+ this.policyWeights[i] = (Math.random() - 0.5) * scale;
59
+ this.valueWeights[i] = (Math.random() - 0.5) * scale;
60
+ }
61
+ }
62
+ /**
63
+ * Add experience from trajectory
64
+ */
65
+ addExperience(trajectory) {
66
+ if (trajectory.steps.length === 0)
67
+ return;
68
+ // Compute values for each step
69
+ const values = trajectory.steps.map(step => this.computeValue(step.stateAfter));
70
+ // Compute advantages using GAE
71
+ const advantages = this.computeGAE(trajectory.steps.map(s => s.reward), values);
72
+ // Compute returns
73
+ const returns = this.computeReturns(trajectory.steps.map(s => s.reward));
74
+ // Add to buffer
75
+ for (let i = 0; i < trajectory.steps.length; i++) {
76
+ const step = trajectory.steps[i];
77
+ this.buffer.push({
78
+ state: step.stateAfter,
79
+ action: this.hashAction(step.action),
80
+ reward: step.reward,
81
+ value: values[i],
82
+ logProb: this.computeLogProb(step.stateAfter, step.action),
83
+ advantage: advantages[i],
84
+ return_: returns[i],
85
+ });
86
+ }
87
+ }
88
+ /**
89
+ * Perform PPO update
90
+ * Target: <10ms
91
+ */
92
+ update() {
93
+ const startTime = performance.now();
94
+ if (this.buffer.length < this.config.miniBatchSize) {
95
+ return { policyLoss: 0, valueLoss: 0, entropy: 0 };
96
+ }
97
+ // Normalize advantages
98
+ const advantages = this.buffer.map(e => e.advantage);
99
+ const advMean = advantages.reduce((a, b) => a + b, 0) / advantages.length;
100
+ const advStd = Math.sqrt(advantages.reduce((a, b) => a + (b - advMean) ** 2, 0) / advantages.length) + 1e-8;
101
+ for (const exp of this.buffer) {
102
+ exp.advantage = (exp.advantage - advMean) / advStd;
103
+ }
104
+ let totalPolicyLoss = 0;
105
+ let totalValueLoss = 0;
106
+ let totalEntropy = 0;
107
+ let totalClipFrac = 0;
108
+ let totalKL = 0;
109
+ let numUpdates = 0;
110
+ // Multiple epochs
111
+ for (let epoch = 0; epoch < this.config.epochs; epoch++) {
112
+ // Shuffle buffer
113
+ this.shuffleBuffer();
114
+ // Process mini-batches
115
+ for (let i = 0; i < this.buffer.length; i += this.config.miniBatchSize) {
116
+ const batch = this.buffer.slice(i, i + this.config.miniBatchSize);
117
+ if (batch.length < this.config.miniBatchSize / 2)
118
+ continue;
119
+ const result = this.updateMiniBatch(batch);
120
+ totalPolicyLoss += result.policyLoss;
121
+ totalValueLoss += result.valueLoss;
122
+ totalEntropy += result.entropy;
123
+ totalClipFrac += result.clipFrac;
124
+ totalKL += result.kl;
125
+ numUpdates++;
126
+ // Early stopping if KL too high
127
+ if (result.kl > this.config.targetKL * 1.5) {
128
+ break;
129
+ }
130
+ }
131
+ }
132
+ // Clear buffer
133
+ this.buffer = [];
134
+ this.updateCount++;
135
+ const elapsed = performance.now() - startTime;
136
+ if (elapsed > 10) {
137
+ console.warn(`PPO update exceeded target: ${elapsed.toFixed(2)}ms > 10ms`);
138
+ }
139
+ return {
140
+ policyLoss: numUpdates > 0 ? totalPolicyLoss / numUpdates : 0,
141
+ valueLoss: numUpdates > 0 ? totalValueLoss / numUpdates : 0,
142
+ entropy: numUpdates > 0 ? totalEntropy / numUpdates : 0,
143
+ };
144
+ }
145
+ /**
146
+ * Get action from policy
147
+ */
148
+ getAction(state) {
149
+ const logits = this.computeLogits(state);
150
+ const probs = this.softmax(logits);
151
+ const action = this.sampleAction(probs);
152
+ return {
153
+ action,
154
+ logProb: Math.log(probs[action] + 1e-8),
155
+ value: this.computeValue(state),
156
+ };
157
+ }
158
+ /**
159
+ * Get statistics
160
+ */
161
+ getStats() {
162
+ return {
163
+ updateCount: this.updateCount,
164
+ bufferSize: this.buffer.length,
165
+ avgLoss: this.updateCount > 0 ? this.totalLoss / this.updateCount : 0,
166
+ approxKL: this.approxKL,
167
+ clipFraction: this.clipFraction,
168
+ };
169
+ }
170
+ // ==========================================================================
171
+ // Private Methods
172
+ // ==========================================================================
173
+ computeValue(state) {
174
+ let value = 0;
175
+ for (let i = 0; i < Math.min(state.length, this.valueWeights.length); i++) {
176
+ value += state[i] * this.valueWeights[i];
177
+ }
178
+ return value;
179
+ }
180
+ computeLogits(state) {
181
+ // Simplified: 4 discrete actions
182
+ const numActions = 4;
183
+ const logits = new Float32Array(numActions);
184
+ for (let a = 0; a < numActions; a++) {
185
+ for (let i = 0; i < Math.min(state.length, this.policyWeights.length); i++) {
186
+ logits[a] += state[i] * this.policyWeights[i] * (1 + a * 0.1);
187
+ }
188
+ }
189
+ return logits;
190
+ }
191
+ computeLogProb(state, action) {
192
+ const logits = this.computeLogits(state);
193
+ const probs = this.softmax(logits);
194
+ const actionIdx = this.hashAction(action);
195
+ return Math.log(probs[actionIdx] + 1e-8);
196
+ }
197
+ hashAction(action) {
198
+ // Simple hash to action index (0-3)
199
+ let hash = 0;
200
+ for (let i = 0; i < action.length; i++) {
201
+ hash = (hash * 31 + action.charCodeAt(i)) % 4;
202
+ }
203
+ return hash;
204
+ }
205
+ softmax(logits) {
206
+ const max = Math.max(...logits);
207
+ const exps = new Float32Array(logits.length);
208
+ let sum = 0;
209
+ for (let i = 0; i < logits.length; i++) {
210
+ exps[i] = Math.exp(logits[i] - max);
211
+ sum += exps[i];
212
+ }
213
+ for (let i = 0; i < exps.length; i++) {
214
+ exps[i] /= sum;
215
+ }
216
+ return exps;
217
+ }
218
+ sampleAction(probs) {
219
+ const r = Math.random();
220
+ let cumSum = 0;
221
+ for (let i = 0; i < probs.length; i++) {
222
+ cumSum += probs[i];
223
+ if (r < cumSum)
224
+ return i;
225
+ }
226
+ return probs.length - 1;
227
+ }
228
+ computeGAE(rewards, values) {
229
+ const advantages = new Array(rewards.length).fill(0);
230
+ let lastGae = 0;
231
+ for (let t = rewards.length - 1; t >= 0; t--) {
232
+ const nextValue = t < rewards.length - 1 ? values[t + 1] : 0;
233
+ const delta = rewards[t] + this.config.gamma * nextValue - values[t];
234
+ lastGae = delta + this.config.gamma * this.config.gaeLambda * lastGae;
235
+ advantages[t] = lastGae;
236
+ }
237
+ return advantages;
238
+ }
239
+ computeReturns(rewards) {
240
+ const returns = new Array(rewards.length).fill(0);
241
+ let cumReturn = 0;
242
+ for (let t = rewards.length - 1; t >= 0; t--) {
243
+ cumReturn = rewards[t] + this.config.gamma * cumReturn;
244
+ returns[t] = cumReturn;
245
+ }
246
+ return returns;
247
+ }
248
+ shuffleBuffer() {
249
+ for (let i = this.buffer.length - 1; i > 0; i--) {
250
+ const j = Math.floor(Math.random() * (i + 1));
251
+ [this.buffer[i], this.buffer[j]] = [this.buffer[j], this.buffer[i]];
252
+ }
253
+ }
254
+ updateMiniBatch(batch) {
255
+ let policyLoss = 0;
256
+ let valueLoss = 0;
257
+ let entropy = 0;
258
+ let clipFrac = 0;
259
+ let kl = 0;
260
+ const policyGrad = new Float32Array(this.policyWeights.length);
261
+ const valueGrad = new Float32Array(this.valueWeights.length);
262
+ for (const exp of batch) {
263
+ // Current policy
264
+ const logits = this.computeLogits(exp.state);
265
+ const probs = this.softmax(logits);
266
+ const newLogProb = Math.log(probs[exp.action] + 1e-8);
267
+ const currentValue = this.computeValue(exp.state);
268
+ // Ratio for PPO
269
+ const ratio = Math.exp(newLogProb - exp.logProb);
270
+ // Clipped surrogate objective
271
+ const surr1 = ratio * exp.advantage;
272
+ const surr2 = Math.max(Math.min(ratio, 1 + this.config.clipRange), 1 - this.config.clipRange) * exp.advantage;
273
+ const policyLossI = -Math.min(surr1, surr2);
274
+ policyLoss += policyLossI;
275
+ // Track clipping
276
+ if (Math.abs(ratio - 1) > this.config.clipRange) {
277
+ clipFrac++;
278
+ }
279
+ // KL divergence approximation
280
+ kl += (exp.logProb - newLogProb);
281
+ // Value loss
282
+ let valueLossI;
283
+ if (this.config.clipRangeVf !== null) {
284
+ const valuePred = currentValue;
285
+ const valueClipped = exp.value + Math.max(Math.min(valuePred - exp.value, this.config.clipRangeVf), -this.config.clipRangeVf);
286
+ const vf1 = (valuePred - exp.return_) ** 2;
287
+ const vf2 = (valueClipped - exp.return_) ** 2;
288
+ valueLossI = Math.max(vf1, vf2);
289
+ }
290
+ else {
291
+ valueLossI = (currentValue - exp.return_) ** 2;
292
+ }
293
+ valueLoss += valueLossI;
294
+ // Entropy
295
+ let entropyI = 0;
296
+ for (const p of probs) {
297
+ if (p > 0)
298
+ entropyI -= p * Math.log(p);
299
+ }
300
+ entropy += entropyI;
301
+ // Compute gradients (simplified)
302
+ for (let i = 0; i < Math.min(exp.state.length, policyGrad.length); i++) {
303
+ policyGrad[i] += exp.state[i] * policyLossI * 0.01;
304
+ valueGrad[i] += exp.state[i] * valueLossI * 0.01;
305
+ }
306
+ }
307
+ // Apply gradients with momentum
308
+ const lr = this.config.learningRate;
309
+ const beta = 0.9;
310
+ for (let i = 0; i < this.policyWeights.length; i++) {
311
+ this.policyMomentum[i] = beta * this.policyMomentum[i] + (1 - beta) * policyGrad[i];
312
+ this.policyWeights[i] -= lr * this.policyMomentum[i];
313
+ this.valueMomentum[i] = beta * this.valueMomentum[i] + (1 - beta) * valueGrad[i];
314
+ this.valueWeights[i] -= lr * this.valueMomentum[i];
315
+ }
316
+ return {
317
+ policyLoss: policyLoss / batch.length,
318
+ valueLoss: valueLoss / batch.length,
319
+ entropy: entropy / batch.length,
320
+ clipFrac: clipFrac / batch.length,
321
+ kl: kl / batch.length,
322
+ };
323
+ }
324
+ }
325
+ /**
326
+ * Factory function
327
+ */
328
+ export function createPPO(config) {
329
+ return new PPOAlgorithm(config);
330
+ }
331
+ //# sourceMappingURL=ppo.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"ppo.js","sourceRoot":"","sources":["ppo.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;GAUG;AAQH;;GAEG;AACH,MAAM,CAAC,MAAM,kBAAkB,GAAc;IAC3C,SAAS,EAAE,KAAK;IAChB,YAAY,EAAE,MAAM;IACpB,KAAK,EAAE,IAAI;IACX,WAAW,EAAE,IAAI;IACjB,aAAa,EAAE,GAAG;IAClB,WAAW,EAAE,GAAG;IAChB,MAAM,EAAE,CAAC;IACT,aAAa,EAAE,EAAE;IACjB,SAAS,EAAE,GAAG;IACd,WAAW,EAAE,IAAI;IACjB,QAAQ,EAAE,IAAI;IACd,SAAS,EAAE,IAAI;CAChB,CAAC;AAeF;;GAEG;AACH,MAAM,OAAO,YAAY;IACf,MAAM,CAAY;IAE1B,6DAA6D;IACrD,aAAa,CAAe;IAC5B,YAAY,CAAe;IAEnC,kBAAkB;IACV,cAAc,CAAe;IAC7B,aAAa,CAAe;IAEpC,oBAAoB;IACZ,MAAM,GAAoB,EAAE,CAAC;IAErC,aAAa;IACL,WAAW,GAAG,CAAC,CAAC;IAChB,SAAS,GAAG,CAAC,CAAC;IACd,QAAQ,GAAG,CAAC,CAAC;IACb,YAAY,GAAG,CAAC,CAAC;IAEzB,YAAY,SAA6B,EAAE;QACzC,IAAI,CAAC,MAAM,GAAG,EAAE,GAAG,kBAAkB,EAAE,GAAG,MAAM,EAAE,CAAC;QAEnD,iDAAiD;QACjD,MAAM,GAAG,GAAG,GAAG,CAAC;QAChB,IAAI,CAAC,aAAa,GAAG,IAAI,YAAY,CAAC,GAAG,CAAC,CAAC;QAC3C,IAAI,CAAC,YAAY,GAAG,IAAI,YAAY,CAAC,GAAG,CAAC,CAAC;QAC1C,IAAI,CAAC,cAAc,GAAG,IAAI,YAAY,CAAC,GAAG,CAAC,CAAC;QAC5C,IAAI,CAAC,aAAa,GAAG,IAAI,YAAY,CAAC,GAAG,CAAC,CAAC;QAE3C,wBAAwB;QACxB,MAAM,KAAK,GAAG,IAAI,CAAC,IAAI,CAAC,CAAC,GAAG,GAAG,CAAC,CAAC;QACjC,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,GAAG,EAAE,CAAC,EAAE,EAAE,CAAC;YAC7B,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,MAAM,EAAE,GAAG,GAAG,CAAC,GAAG,KAAK,CAAC;YACtD,IAAI,CAAC,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,MAAM,EAAE,GAAG,GAAG,CAAC,GAAG,KAAK,CAAC;QACvD,CAAC;IACH,CAAC;IAED;;OAEG;IACH,aAAa,CAAC,UAAsB;QAClC,IAAI,UAAU,CAAC,KAAK,CAAC,MAAM,KAAK,CAAC;YAAE,OAAO;QAE1C,+BAA+B;QAC/B,MAAM,MAAM,GAAG,UAAU,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CACzC,IAAI,CAAC,YAAY,CAAC,IAAI,CAAC,UAAU,CAAC,CACnC,CAAC;QAEF,+BAA+B;QAC/B,MAAM,UAAU,GAAG,IAAI,CAAC,UAAU,CAChC,UAAU,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,EACnC,MAAM,CACP,CAAC;QAEF,kBAAkB;QAClB,MAAM,OAAO,GAAG,IAAI,CAAC,cAAc,CACjC,UAAU,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,CACpC,CAAC;QAEF,gBAAgB;QAChB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,UAAU,CAAC,KAAK,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;YACjD,MAAM,IAAI,GAAG,UAAU,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;YACjC,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC;gBACf,KAAK,EAAE,IAAI,CAAC,UAAU;gBACtB,MAAM,EAAE,IAAI,CAAC,UAAU,CAAC,IAAI,CAAC,MAAM,CAAC;gBACpC,MAAM,EAAE,IAAI,CAAC,MAAM;gBACnB,KAAK,EAAE,MAAM,CAAC,CAAC,CAAC;gBAChB,OAAO,EAAE,IAAI,CAAC,cAAc,CAAC,IAAI,CAAC,UAAU,EAAE,IAAI,CAAC,MAAM,CAAC;gBAC1D,SAAS,EAAE,UAAU,CAAC,CAAC,CAAC;gBACxB,OAAO,EAAE,OAAO,CAAC,CAAC,CAAC;aACpB,CAAC,CAAC;QACL,CAAC;IACH,CAAC;IAED;;;OAGG;IACH,MAAM;QACJ,MAAM,SAAS,GAAG,WAAW,CAAC,GAAG,EAAE,CAAC;QAEpC,IAAI,IAAI,CAAC,MAAM,CAAC,MAAM,GAAG,IAAI,CAAC,MAAM,CAAC,aAAa,EAAE,CAAC;YACnD,OAAO,EAAE,UAAU,EAAE,CAAC,EAAE,SAAS,EAAE,CAAC,EAAE,OAAO,EAAE,CAAC,EAAE,CAAC;QACrD,CAAC;QAED,uBAAuB;QACvB,MAAM,UAAU,GAAG,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC;QACrD,MAAM,OAAO,GAAG,UAAU,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,GAAG,UAAU,CAAC,MAAM,CAAC;QAC1E,MAAM,MAAM,GAAG,IAAI,CAAC,IAAI,CACtB,UAAU,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,OAAO,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,GAAG,UAAU,CAAC,MAAM,CAC3E,GAAG,IAAI,CAAC;QAET,KAAK,MAAM,GAAG,IAAI,IAAI,CAAC,MAAM,EAAE,CAAC;YAC9B,GAAG,CAAC,SAAS,GAAG,CAAC,GAAG,CAAC,SAAS,GAAG,OAAO,CAAC,GAAG,MAAM,CAAC;QACrD,CAAC;QAED,IAAI,eAAe,GAAG,CAAC,CAAC;QACxB,IAAI,cAAc,GAAG,CAAC,CAAC;QACvB,IAAI,YAAY,GAAG,CAAC,CAAC;QACrB,IAAI,aAAa,GAAG,CAAC,CAAC;QACtB,IAAI,OAAO,GAAG,CAAC,CAAC;QAChB,IAAI,UAAU,GAAG,CAAC,CAAC;QAEnB,kBAAkB;QAClB,KAAK,IAAI,KAAK,GAAG,CAAC,EAAE,KAAK,GAAG,IAAI,CAAC,MAAM,CAAC,MAAM,EAAE,KAAK,EAAE,EAAE,CAAC;YACxD,iBAAiB;YACjB,IAAI,CAAC,aAAa,EAAE,CAAC;YAErB,uBAAuB;YACvB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,IAAI,IAAI,CAAC,MAAM,CAAC,aAAa,EAAE,CAAC;gBACvE,MAAM,KAAK,GAAG,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,GAAG,IAAI,CAAC,MAAM,CAAC,aAAa,CAAC,CAAC;gBAClE,IAAI,KAAK,CAAC,MAAM,GAAG,IAAI,CAAC,MAAM,CAAC,aAAa,GAAG,CAAC;oBAAE,SAAS;gBAE3D,MAAM,MAAM,GAAG,IAAI,CAAC,eAAe,CAAC,KAAK,CAAC,CAAC;gBAC3C,eAAe,IAAI,MAAM,CAAC,UAAU,CAAC;gBACrC,cAAc,IAAI,MAAM,CAAC,SAAS,CAAC;gBACnC,YAAY,IAAI,MAAM,CAAC,OAAO,CAAC;gBAC/B,aAAa,IAAI,MAAM,CAAC,QAAQ,CAAC;gBACjC,OAAO,IAAI,MAAM,CAAC,EAAE,CAAC;gBACrB,UAAU,EAAE,CAAC;gBAEb,gCAAgC;gBAChC,IAAI,MAAM,CAAC,EAAE,GAAG,IAAI,CAAC,MAAM,CAAC,QAAQ,GAAG,GAAG,EAAE,CAAC;oBAC3C,MAAM;gBACR,CAAC;YACH,CAAC;QACH,CAAC;QAED,eAAe;QACf,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC;QACjB,IAAI,CAAC,WAAW,EAAE,CAAC;QAEnB,MAAM,OAAO,GAAG,WAAW,CAAC,GAAG,EAAE,GAAG,SAAS,CAAC;QAC9C,IAAI,OAAO,GAAG,EAAE,EAAE,CAAC;YACjB,OAAO,CAAC,IAAI,CAAC,+BAA+B,OAAO,CAAC,OAAO,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC;QAC7E,CAAC;QAED,OAAO;YACL,UAAU,EAAE,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,eAAe,GAAG,UAAU,CAAC,CAAC,CAAC,CAAC;YAC7D,SAAS,EAAE,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,cAAc,GAAG,UAAU,CAAC,CAAC,CAAC,CAAC;YAC3D,OAAO,EAAE,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,YAAY,GAAG,UAAU,CAAC,CAAC,CAAC,CAAC;SACxD,CAAC;IACJ,CAAC;IAED;;OAEG;IACH,SAAS,CAAC,KAAmB;QAC3B,MAAM,MAAM,GAAG,IAAI,CAAC,aAAa,CAAC,KAAK,CAAC,CAAC;QACzC,MAAM,KAAK,GAAG,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC;QACnC,MAAM,MAAM,GAAG,IAAI,CAAC,YAAY,CAAC,KAAK,CAAC,CAAC;QAExC,OAAO;YACL,MAAM;YACN,OAAO,EAAE,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,CAAC,GAAG,IAAI,CAAC;YACvC,KAAK,EAAE,IAAI,CAAC,YAAY,CAAC,KAAK,CAAC;SAChC,CAAC;IACJ,CAAC;IAED;;OAEG;IACH,QAAQ;QACN,OAAO;YACL,WAAW,EAAE,IAAI,CAAC,WAAW;YAC7B,UAAU,EAAE,IAAI,CAAC,MAAM,CAAC,MAAM;YAC9B,OAAO,EAAE,IAAI,CAAC,WAAW,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,GAAG,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC;YACrE,QAAQ,EAAE,IAAI,CAAC,QAAQ;YACvB,YAAY,EAAE,IAAI,CAAC,YAAY;SAChC,CAAC;IACJ,CAAC;IAED,6EAA6E;IAC7E,kBAAkB;IAClB,6EAA6E;IAErE,YAAY,CAAC,KAAmB;QACtC,IAAI,KAAK,GAAG,CAAC,CAAC;QACd,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,EAAE,IAAI,CAAC,YAAY,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC;YAC1E,KAAK,IAAI,KAAK,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC;QAC3C,CAAC;QACD,OAAO,KAAK,CAAC;IACf,CAAC;IAEO,aAAa,CAAC,KAAmB;QACvC,iCAAiC;QACjC,MAAM,UAAU,GAAG,CAAC,CAAC;QACrB,MAAM,MAAM,GAAG,IAAI,YAAY,CAAC,UAAU,CAAC,CAAC;QAE5C,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,UAAU,EAAE,CAAC,EAAE,EAAE,CAAC;YACpC,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,EAAE,IAAI,CAAC,aAAa,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC;gBAC3E,MAAM,CAAC,CAAC,CAAC,IAAI,KAAK,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC,GAAG,GAAG,CAAC,CAAC;YAChE,CAAC;QACH,CAAC;QAED,OAAO,MAAM,CAAC;IAChB,CAAC;IAEO,cAAc,CAAC,KAAmB,EAAE,MAAc;QACxD,MAAM,MAAM,GAAG,IAAI,CAAC,aAAa,CAAC,KAAK,CAAC,CAAC;QACzC,MAAM,KAAK,GAAG,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC;QACnC,MAAM,SAAS,GAAG,IAAI,CAAC,UAAU,CAAC,MAAM,CAAC,CAAC;QAC1C,OAAO,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,SAAS,CAAC,GAAG,IAAI,CAAC,CAAC;IAC3C,CAAC;IAEO,UAAU,CAAC,MAAc;QAC/B,oCAAoC;QACpC,IAAI,IAAI,GAAG,CAAC,CAAC;QACb,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;YACvC,IAAI,GAAG,CAAC,IAAI,GAAG,EAAE,GAAG,MAAM,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;QAChD,CAAC;QACD,OAAO,IAAI,CAAC;IACd,CAAC;IAEO,OAAO,CAAC,MAAoB;QAClC,MAAM,GAAG,GAAG,IAAI,CAAC,GAAG,CAAC,GAAG,MAAM,CAAC,CAAC;QAChC,MAAM,IAAI,GAAG,IAAI,YAAY,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;QAC7C,IAAI,GAAG,GAAG,CAAC,CAAC;QAEZ,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;YACvC,IAAI,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,GAAG,CAAC,CAAC;YACpC,GAAG,IAAI,IAAI,CAAC,CAAC,CAAC,CAAC;QACjB,CAAC;QAED,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;YACrC,IAAI,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC;QACjB,CAAC;QAED,OAAO,IAAI,CAAC;IACd,CAAC;IAEO,YAAY,CAAC,KAAmB;QACtC,MAAM,CAAC,GAAG,IAAI,CAAC,MAAM,EAAE,CAAC;QACxB,IAAI,MAAM,GAAG,CAAC,CAAC;QACf,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,KAAK,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;YACtC,MAAM,IAAI,KAAK,CAAC,CAAC,CAAC,CAAC;YACnB,IAAI,CAAC,GAAG,MAAM;gBAAE,OAAO,CAAC,CAAC;QAC3B,CAAC;QACD,OAAO,KAAK,CAAC,MAAM,GAAG,CAAC,CAAC;IAC1B,CAAC;IAEO,UAAU,CAAC,OAAiB,EAAE,MAAgB;QACpD,MAAM,UAAU,GAAG,IAAI,KAAK,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;QACrD,IAAI,OAAO,GAAG,CAAC,CAAC;QAEhB,KAAK,IAAI,CAAC,GAAG,OAAO,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC;YAC7C,MAAM,SAAS,GAAG,CAAC,GAAG,OAAO,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;YAC7D,MAAM,KAAK,GAAG,OAAO,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,MAAM,CAAC,KAAK,GAAG,SAAS,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC;YACrE,OAAO,GAAG,KAAK,GAAG,IAAI,CAAC,MAAM,CAAC,KAAK,GAAG,IAAI,CAAC,MAAM,CAAC,SAAS,GAAG,OAAO,CAAC;YACtE,UAAU,CAAC,CAAC,CAAC,GAAG,OAAO,CAAC;QAC1B,CAAC;QAED,OAAO,UAAU,CAAC;IACpB,CAAC;IAEO,cAAc,CAAC,OAAiB;QACtC,MAAM,OAAO,GAAG,IAAI,KAAK,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;QAClD,IAAI,SAAS,GAAG,CAAC,CAAC;QAElB,KAAK,IAAI,CAAC,GAAG,OAAO,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC;YAC7C,SAAS,GAAG,OAAO,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,MAAM,CAAC,KAAK,GAAG,SAAS,CAAC;YACvD,OAAO,CAAC,CAAC,CAAC,GAAG,SAAS,CAAC;QACzB,CAAC;QAED,OAAO,OAAO,CAAC;IACjB,CAAC;IAEO,aAAa;QACnB,KAAK,IAAI,CAAC,GAAG,IAAI,CAAC,MAAM,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC;YAChD,MAAM,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC;YAC9C,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;QACtE,CAAC;IACH,CAAC;IAEO,eAAe,CAAC,KAAsB;QAO5C,IAAI,UAAU,GAAG,CAAC,CAAC;QACnB,IAAI,SAAS,GAAG,CAAC,CAAC;QAClB,IAAI,OAAO,GAAG,CAAC,CAAC;QAChB,IAAI,QAAQ,GAAG,CAAC,CAAC;QACjB,IAAI,EAAE,GAAG,CAAC,CAAC;QAEX,MAAM,UAAU,GAAG,IAAI,YAAY,CAAC,IAAI,CAAC,aAAa,CAAC,MAAM,CAAC,CAAC;QAC/D,MAAM,SAAS,GAAG,IAAI,YAAY,CAAC,IAAI,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC;QAE7D,KAAK,MAAM,GAAG,IAAI,KAAK,EAAE,CAAC;YACxB,iBAAiB;YACjB,MAAM,MAAM,GAAG,IAAI,CAAC,aAAa,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;YAC7C,MAAM,KAAK,GAAG,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC;YACnC,MAAM,UAAU,GAAG,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC;YACtD,MAAM,YAAY,GAAG,IAAI,CAAC,YAAY,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;YAElD,gBAAgB;YAChB,MAAM,KAAK,GAAG,IAAI,CAAC,GAAG,CAAC,UAAU,GAAG,GAAG,CAAC,OAAO,CAAC,CAAC;YAEjD,8BAA8B;YAC9B,MAAM,KAAK,GAAG,KAAK,GAAG,GAAG,CAAC,SAAS,CAAC;YACpC,MAAM,KAAK,GAAG,IAAI,CAAC,GAAG,CACpB,IAAI,CAAC,GAAG,CAAC,KAAK,EAAE,CAAC,GAAG,IAAI,CAAC,MAAM,CAAC,SAAS,CAAC,EAC1C,CAAC,GAAG,IAAI,CAAC,MAAM,CAAC,SAAS,CAC1B,GAAG,GAAG,CAAC,SAAS,CAAC;YAElB,MAAM,WAAW,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,EAAE,KAAK,CAAC,CAAC;YAC5C,UAAU,IAAI,WAAW,CAAC;YAE1B,iBAAiB;YACjB,IAAI,IAAI,CAAC,GAAG,CAAC,KAAK,GAAG,CAAC,CAAC,GAAG,IAAI,CAAC,MAAM,CAAC,SAAS,EAAE,CAAC;gBAChD,QAAQ,EAAE,CAAC;YACb,CAAC;YAED,8BAA8B;YAC9B,EAAE,IAAI,CAAC,GAAG,CAAC,OAAO,GAAG,UAAU,CAAC,CAAC;YAEjC,aAAa;YACb,IAAI,UAAkB,CAAC;YACvB,IAAI,IAAI,CAAC,MAAM,CAAC,WAAW,KAAK,IAAI,EAAE,CAAC;gBACrC,MAAM,SAAS,GAAG,YAAY,CAAC;gBAC/B,MAAM,YAAY,GAAG,GAAG,CAAC,KAAK,GAAG,IAAI,CAAC,GAAG,CACvC,IAAI,CAAC,GAAG,CAAC,SAAS,GAAG,GAAG,CAAC,KAAK,EAAE,IAAI,CAAC,MAAM,CAAC,WAAW,CAAC,EACxD,CAAC,IAAI,CAAC,MAAM,CAAC,WAAW,CACzB,CAAC;gBACF,MAAM,GAAG,GAAG,CAAC,SAAS,GAAG,GAAG,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC;gBAC3C,MAAM,GAAG,GAAG,CAAC,YAAY,GAAG,GAAG,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC;gBAC9C,UAAU,GAAG,IAAI,CAAC,GAAG,CAAC,GAAG,EAAE,GAAG,CAAC,CAAC;YAClC,CAAC;iBAAM,CAAC;gBACN,UAAU,GAAG,CAAC,YAAY,GAAG,GAAG,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC;YACjD,CAAC;YACD,SAAS,IAAI,UAAU,CAAC;YAExB,UAAU;YACV,IAAI,QAAQ,GAAG,CAAC,CAAC;YACjB,KAAK,MAAM,CAAC,IAAI,KAAK,EAAE,CAAC;gBACtB,IAAI,CAAC,GAAG,CAAC;oBAAE,QAAQ,IAAI,CAAC,GAAG,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC;YACzC,CAAC;YACD,OAAO,IAAI,QAAQ,CAAC;YAEpB,iCAAiC;YACjC,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,EAAE,UAAU,CAAC,MAAM,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC;gBACvE,UAAU,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,WAAW,GAAG,IAAI,CAAC;gBACnD,SAAS,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,UAAU,GAAG,IAAI,CAAC;YACnD,CAAC;QACH,CAAC;QAED,gCAAgC;QAChC,MAAM,EAAE,GAAG,IAAI,CAAC,MAAM,CAAC,YAAY,CAAC;QACpC,MAAM,IAAI,GAAG,GAAG,CAAC;QAEjB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,CAAC,aAAa,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;YACnD,IAAI,CAAC,cAAc,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,IAAI,CAAC,cAAc,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,IAAI,CAAC,GAAG,UAAU,CAAC,CAAC,CAAC,CAAC;YACpF,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,IAAI,EAAE,GAAG,IAAI,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC;YAErD,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,GAAG,IAAI,GAAG,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,IAAI,CAAC,GAAG,SAAS,CAAC,CAAC,CAAC,CAAC;YACjF,IAAI,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI,EAAE,GAAG,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC;QACrD,CAAC;QAED,OAAO;YACL,UAAU,EAAE,UAAU,GAAG,KAAK,CAAC,MAAM;YACrC,SAAS,EAAE,SAAS,GAAG,KAAK,CAAC,MAAM;YACnC,OAAO,EAAE,OAAO,GAAG,KAAK,CAAC,MAAM;YAC/B,QAAQ,EAAE,QAAQ,GAAG,KAAK,CAAC,MAAM;YACjC,EAAE,EAAE,EAAE,GAAG,KAAK,CAAC,MAAM;SACtB,CAAC;IACJ,CAAC;CACF;AAED;;GAEG;AACH,MAAM,UAAU,SAAS,CAAC,MAA2B;IACnD,OAAO,IAAI,YAAY,CAAC,MAAM,CAAC,CAAC;AAClC,CAAC"}