prepia 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +312 -0
  3. package/bin/prepia.mjs +119 -0
  4. package/package.json +53 -0
  5. package/skill/SKILL.md +148 -0
  6. package/skill/config.json +29 -0
  7. package/src/analytics/dashboard.mjs +84 -0
  8. package/src/analytics/tracker.mjs +131 -0
  9. package/src/api/middleware.mjs +219 -0
  10. package/src/api/routes.mjs +142 -0
  11. package/src/api/server.mjs +150 -0
  12. package/src/cache/disk-store.mjs +199 -0
  13. package/src/cache/manager.mjs +142 -0
  14. package/src/cache/memory-store.mjs +205 -0
  15. package/src/chain/dag.mjs +209 -0
  16. package/src/chain/executor.mjs +103 -0
  17. package/src/chain/scheduler.mjs +89 -0
  18. package/src/client/adapters.mjs +483 -0
  19. package/src/client/connector.mjs +391 -0
  20. package/src/client/index.mjs +483 -0
  21. package/src/client/websocket.mjs +353 -0
  22. package/src/core/context-packager.mjs +169 -0
  23. package/src/core/engine.mjs +338 -0
  24. package/src/core/event-bus.mjs +84 -0
  25. package/src/core/prepimshot.mjs +120 -0
  26. package/src/core/task-decomposer.mjs +158 -0
  27. package/src/edge/lite.mjs +90 -0
  28. package/src/guard/checker.mjs +123 -0
  29. package/src/guard/fact-checker.mjs +105 -0
  30. package/src/guard/hallucination.mjs +108 -0
  31. package/src/index.mjs +67 -0
  32. package/src/models/local-model.mjs +171 -0
  33. package/src/models/provider.mjs +192 -0
  34. package/src/models/router.mjs +156 -0
  35. package/src/morph/optimizer.mjs +142 -0
  36. package/src/network/p2p.mjs +146 -0
  37. package/src/persona/detector.mjs +118 -0
  38. package/src/plugins/loader.mjs +120 -0
  39. package/src/plugins/registry.mjs +164 -0
  40. package/src/plugins/sandbox.mjs +79 -0
  41. package/src/rate/limiter.mjs +145 -0
  42. package/src/rate/shield.mjs +150 -0
  43. package/src/script/executor.mjs +164 -0
  44. package/src/script/parser.mjs +134 -0
  45. package/src/security/privacy.mjs +108 -0
  46. package/src/security/sanitizer.mjs +133 -0
  47. package/src/shadow/daemon.mjs +128 -0
  48. package/src/stream/handler.mjs +204 -0
  49. package/src/tools/calculator.mjs +312 -0
  50. package/src/tools/file-ops.mjs +138 -0
  51. package/src/tools/http-client.mjs +127 -0
  52. package/src/tools/orchestrator.mjs +205 -0
  53. package/src/tools/web-scraper.mjs +159 -0
  54. package/src/tools/web-search.mjs +129 -0
  55. package/src/vault/knowledge-base.mjs +207 -0
  56. package/src/vault/pattern-learner.mjs +192 -0
  57. package/workflows/analyze.json +32 -0
  58. package/workflows/automate.json +32 -0
  59. package/workflows/research.json +37 -0
  60. package/workflows/summarize.json +32 -0
@@ -0,0 +1,192 @@
1
+ /**
2
+ * @fileoverview LLM provider abstraction layer.
3
+ * Unified interface for OpenAI, Anthropic, and Gemini style APIs.
4
+ * @module models/provider
5
+ */
6
+
7
+ /**
8
+ * @typedef {Object} LLMRequest
9
+ * @property {string} prompt - The prompt text
10
+ * @property {string} [system] - System message
11
+ * @property {number} [maxTokens=2048] - Max tokens to generate
12
+ * @property {number} [temperature=0.7] - Temperature
13
+ * @property {boolean} [stream=false] - Enable streaming
14
+ */
15
+
16
+ /**
17
+ * @typedef {Object} LLMResponse
18
+ * @property {string} content - Generated text
19
+ * @property {Object} usage - Token usage { prompt, completion, total }
20
+ * @property {string} model - Model used
21
+ * @property {string} provider - Provider name
22
+ * @property {number} latency - Response time in ms
23
+ */
24
+
25
+ /**
26
+ * Format a request for the OpenAI API format.
27
+ * @param {LLMRequest} request
28
+ * @param {Object} config - Provider config
29
+ * @returns {Object}
30
+ */
31
+ export function formatOpenAI(request, config) {
32
+ const messages = [];
33
+ if (request.system) {
34
+ messages.push({ role: 'system', content: request.system });
35
+ }
36
+ messages.push({ role: 'user', content: request.prompt });
37
+
38
+ return {
39
+ url: `${config.baseUrl || 'https://api.openai.com'}/v1/chat/completions`,
40
+ headers: {
41
+ 'Content-Type': 'application/json',
42
+ 'Authorization': `Bearer ${config.apiKey}`,
43
+ },
44
+ body: {
45
+ model: config.model || 'gpt-4o-mini',
46
+ messages,
47
+ max_tokens: request.maxTokens ?? 2048,
48
+ temperature: request.temperature ?? 0.7,
49
+ stream: request.stream ?? false,
50
+ },
51
+ };
52
+ }
53
+
54
+ /**
55
+ * Format a request for the Anthropic API format.
56
+ * @param {LLMRequest} request
57
+ * @param {Object} config
58
+ * @returns {Object}
59
+ */
60
+ export function formatAnthropic(request, config) {
61
+ return {
62
+ url: `${config.baseUrl || 'https://api.anthropic.com'}/v1/messages`,
63
+ headers: {
64
+ 'Content-Type': 'application/json',
65
+ 'x-api-key': config.apiKey,
66
+ 'anthropic-version': '2023-06-01',
67
+ },
68
+ body: {
69
+ model: config.model || 'claude-3-5-sonnet-20241022',
70
+ max_tokens: request.maxTokens ?? 2048,
71
+ messages: [{ role: 'user', content: request.prompt }],
72
+ ...(request.system ? { system: request.system } : {}),
73
+ },
74
+ };
75
+ }
76
+
77
+ /**
78
+ * Format a request for the Gemini API format.
79
+ * @param {LLMRequest} request
80
+ * @param {Object} config
81
+ * @returns {Object}
82
+ */
83
+ export function formatGemini(request, config) {
84
+ const contents = [];
85
+ if (request.system) {
86
+ contents.push({ role: 'user', parts: [{ text: request.system }] });
87
+ contents.push({ role: 'model', parts: [{ text: 'Understood.' }] });
88
+ }
89
+ contents.push({ role: 'user', parts: [{ text: request.prompt }] });
90
+
91
+ return {
92
+ url: `${config.baseUrl || 'https://generativelanguage.googleapis.com'}/v1beta/models/${config.model || 'gemini-pro'}:generateContent?key=${config.apiKey}`,
93
+ headers: { 'Content-Type': 'application/json' },
94
+ body: {
95
+ contents,
96
+ generationConfig: {
97
+ maxOutputTokens: request.maxTokens ?? 2048,
98
+ temperature: request.temperature ?? 0.7,
99
+ },
100
+ },
101
+ };
102
+ }
103
+
104
+ /**
105
+ * Parse an OpenAI-format response.
106
+ * @param {Object} response - Raw API response
107
+ * @returns {Partial<LLMResponse>}
108
+ */
109
+ export function parseOpenAI(response) {
110
+ const choice = response.choices?.[0];
111
+ return {
112
+ content: choice?.message?.content || '',
113
+ usage: {
114
+ prompt: response.usage?.prompt_tokens ?? 0,
115
+ completion: response.usage?.completion_tokens ?? 0,
116
+ total: response.usage?.total_tokens ?? 0,
117
+ },
118
+ model: response.model || '',
119
+ };
120
+ }
121
+
122
+ /**
123
+ * Parse an Anthropic-format response.
124
+ * @param {Object} response
125
+ * @returns {Partial<LLMResponse>}
126
+ */
127
+ export function parseAnthropic(response) {
128
+ return {
129
+ content: response.content?.[0]?.text || '',
130
+ usage: {
131
+ prompt: response.usage?.input_tokens ?? 0,
132
+ completion: response.usage?.output_tokens ?? 0,
133
+ total: (response.usage?.input_tokens ?? 0) + (response.usage?.output_tokens ?? 0),
134
+ },
135
+ model: response.model || '',
136
+ };
137
+ }
138
+
139
+ /**
140
+ * Parse a Gemini-format response.
141
+ * @param {Object} response
142
+ * @returns {Partial<LLMResponse>}
143
+ */
144
+ export function parseGemini(response) {
145
+ const candidate = response.candidates?.[0];
146
+ return {
147
+ content: candidate?.content?.parts?.[0]?.text || '',
148
+ usage: {
149
+ prompt: response.usageMetadata?.promptTokenCount ?? 0,
150
+ completion: response.usageMetadata?.candidatesTokenCount ?? 0,
151
+ total: response.usageMetadata?.totalTokenCount ?? 0,
152
+ },
153
+ model: '',
154
+ };
155
+ }
156
+
157
+ /**
158
+ * Format a request for a given provider type.
159
+ * @param {string} type - Provider type (openai, anthropic, gemini)
160
+ * @param {LLMRequest} request
161
+ * @param {Object} config
162
+ * @returns {Object}
163
+ */
164
+ export function formatRequest(type, request, config) {
165
+ switch (type) {
166
+ case 'openai': return formatOpenAI(request, config);
167
+ case 'anthropic': return formatAnthropic(request, config);
168
+ case 'gemini': return formatGemini(request, config);
169
+ default: throw new Error(`Unknown provider type: ${type}`);
170
+ }
171
+ }
172
+
173
+ /**
174
+ * Parse a response from a given provider type.
175
+ * @param {string} type
176
+ * @param {Object} response
177
+ * @returns {Partial<LLMResponse>}
178
+ */
179
+ export function parseResponse(type, response) {
180
+ switch (type) {
181
+ case 'openai': return parseOpenAI(response);
182
+ case 'anthropic': return parseAnthropic(response);
183
+ case 'gemini': return parseGemini(response);
184
+ default: throw new Error(`Unknown provider type: ${type}`);
185
+ }
186
+ }
187
+
188
+ export default {
189
+ formatOpenAI, formatAnthropic, formatGemini,
190
+ parseOpenAI, parseAnthropic, parseGemini,
191
+ formatRequest, parseResponse,
192
+ };
@@ -0,0 +1,156 @@
1
+ /**
2
+ * @fileoverview Multi-LLM routing with fallback and priority-based selection.
3
+ * @module models/router
4
+ */
5
+
6
+ import { EventEmitter } from 'node:events';
7
+ import { formatRequest, parseResponse } from './provider.mjs';
8
+
9
+ /**
10
+ * @typedef {Object} ProviderConfig
11
+ * @property {string} name - Provider name
12
+ * @property {string} type - Provider type (openai, anthropic, gemini)
13
+ * @property {string} apiKey - API key
14
+ * @property {string} [model] - Model name
15
+ * @property {string} [baseUrl] - Base URL override
16
+ * @property {number} [priority=0] - Higher = preferred
17
+ * @property {number} [maxRetries=2] - Max retries before fallback
18
+ */
19
+
20
+ export class ModelRouter extends EventEmitter {
21
+ /**
22
+ * @param {Object} [options]
23
+ * @param {ProviderConfig[]} [options.providers] - Available providers
24
+ */
25
+ constructor(options = {}) {
26
+ super();
27
+ /** @type {ProviderConfig[]} */
28
+ this._providers = (options.providers || []).sort((a, b) => (b.priority ?? 0) - (a.priority ?? 0));
29
+ this._usage = new Map();
30
+ }
31
+
32
+ /**
33
+ * Add a provider.
34
+ * @param {ProviderConfig} config
35
+ */
36
+ addProvider(config) {
37
+ this._providers.push(config);
38
+ this._providers.sort((a, b) => (b.priority ?? 0) - (a.priority ?? 0));
39
+ }
40
+
41
+ /**
42
+ * Remove a provider.
43
+ * @param {string} name
44
+ * @returns {boolean}
45
+ */
46
+ removeProvider(name) {
47
+ const idx = this._providers.findIndex(p => p.name === name);
48
+ if (idx >= 0) {
49
+ this._providers.splice(idx, 1);
50
+ return true;
51
+ }
52
+ return false;
53
+ }
54
+
55
+ /**
56
+ * Get list of provider names.
57
+ * @returns {string[]}
58
+ */
59
+ listProviders() {
60
+ return this._providers.map(p => p.name);
61
+ }
62
+
63
+ /**
64
+ * Send a request to the best available provider with fallback.
65
+ * @param {import('./provider.mjs').LLMRequest} request
66
+ * @param {Object} [options]
67
+ * @param {string} [options.preferredProvider] - Force a specific provider
68
+ * @returns {Promise<import('./provider.mjs').LLMResponse>}
69
+ */
70
+ async send(request, options = {}) {
71
+ const providers = options.preferredProvider
72
+ ? this._providers.filter(p => p.name === options.preferredProvider).concat(this._providers.filter(p => p.name !== options.preferredProvider))
73
+ : [...this._providers];
74
+
75
+ if (providers.length === 0) {
76
+ throw new Error('No providers configured');
77
+ }
78
+
79
+ let lastError;
80
+ for (const provider of providers) {
81
+ try {
82
+ const result = await this._sendToProvider(provider, request);
83
+ this.emit('provider:success', { provider: provider.name, usage: result.usage });
84
+ return result;
85
+ } catch (err) {
86
+ lastError = err;
87
+ this.emit('provider:error', { provider: provider.name, error: err.message });
88
+ continue;
89
+ }
90
+ }
91
+
92
+ throw new Error(`All providers failed. Last error: ${lastError?.message}`);
93
+ }
94
+
95
+ /**
96
+ * Send a request to a specific provider.
97
+ * @param {ProviderConfig} provider
98
+ * @param {import('./provider.mjs').LLMRequest} request
99
+ * @returns {Promise<import('./provider.mjs').LLMResponse>}
100
+ * @private
101
+ */
102
+ async _sendToProvider(provider, request) {
103
+ const start = Date.now();
104
+ const formatted = formatRequest(provider.type, request, provider);
105
+
106
+ const response = await fetch(formatted.url, {
107
+ method: 'POST',
108
+ headers: formatted.headers,
109
+ body: JSON.stringify(formatted.body),
110
+ signal: AbortSignal.timeout(60000),
111
+ });
112
+
113
+ if (!response.ok) {
114
+ const errBody = await response.text().catch(() => 'Unknown error');
115
+ throw new Error(`${provider.name} API error ${response.status}: ${errBody}`);
116
+ }
117
+
118
+ const data = await response.json();
119
+ const parsed = parseResponse(provider.type, data);
120
+
121
+ // Track usage
122
+ const prev = this._usage.get(provider.name) || { requests: 0, tokens: 0 };
123
+ this._usage.set(provider.name, {
124
+ requests: prev.requests + 1,
125
+ tokens: prev.tokens + (parsed.usage?.total ?? 0),
126
+ });
127
+
128
+ return {
129
+ ...parsed,
130
+ provider: provider.name,
131
+ latency: Date.now() - start,
132
+ };
133
+ }
134
+
135
+ /**
136
+ * Get usage stats for all providers.
137
+ * @returns {Object}
138
+ */
139
+ getUsage() {
140
+ const result = {};
141
+ for (const [name, usage] of this._usage) {
142
+ result[name] = { ...usage };
143
+ }
144
+ return result;
145
+ }
146
+
147
+ /**
148
+ * Get the number of configured providers.
149
+ * @returns {number}
150
+ */
151
+ get providerCount() {
152
+ return this._providers.length;
153
+ }
154
+ }
155
+
156
+ export default ModelRouter;
@@ -0,0 +1,142 @@
1
+ /**
2
+ * @fileoverview Self-evolving workflow optimization.
3
+ * @module morph/optimizer
4
+ */
5
+
6
+ export class Optimizer {
7
+ constructor() {
8
+ /** @type {Map<string, Object>} Workflow performance data */
9
+ this._workflows = new Map();
10
+ }
11
+
12
+ /**
13
+ * Record a workflow execution.
14
+ * @param {string} workflowId - Workflow identifier
15
+ * @param {Object} result - Execution result
16
+ * @param {boolean} result.success - Whether it succeeded
17
+ * @param {number} result.duration - Duration in ms
18
+ * @param {number} [result.tokensUsed=0] - Tokens consumed
19
+ * @param {Object} [result.params] - Parameters used
20
+ */
21
+ record(workflowId, result) {
22
+ const existing = this._workflows.get(workflowId) || {
23
+ id: workflowId,
24
+ runs: 0,
25
+ successes: 0,
26
+ totalDuration: 0,
27
+ totalTokens: 0,
28
+ avgDuration: 0,
29
+ successRate: 0,
30
+ lastRun: null,
31
+ };
32
+
33
+ existing.runs++;
34
+ if (result.success) existing.successes++;
35
+ existing.totalDuration += result.duration || 0;
36
+ existing.totalTokens += result.tokensUsed || 0;
37
+ existing.avgDuration = existing.totalDuration / existing.runs;
38
+ existing.successRate = existing.successes / existing.runs;
39
+ existing.lastRun = Date.now();
40
+
41
+ this._workflows.set(workflowId, existing);
42
+ }
43
+
44
+ /**
45
+ * Get optimization suggestions for a workflow.
46
+ * @param {string} workflowId
47
+ * @returns {Object[]}
48
+ */
49
+ getOptimizations(workflowId) {
50
+ const data = this._workflows.get(workflowId);
51
+ if (!data || data.runs < 3) return [];
52
+
53
+ const suggestions = [];
54
+
55
+ if (data.successRate < 0.8) {
56
+ suggestions.push({
57
+ type: 'reliability',
58
+ priority: 'high',
59
+ message: `Low success rate (${(data.successRate * 100).toFixed(0)}%). Consider simplifying the workflow or adding error handling.`,
60
+ });
61
+ }
62
+
63
+ if (data.avgDuration > 30000) {
64
+ suggestions.push({
65
+ type: 'performance',
66
+ priority: 'medium',
67
+ message: `Average duration is ${(data.avgDuration / 1000).toFixed(1)}s. Consider caching or parallel execution.`,
68
+ });
69
+ }
70
+
71
+ if (data.totalTokens > 100000) {
72
+ suggestions.push({
73
+ type: 'cost',
74
+ priority: 'medium',
75
+ message: `High token usage (${data.totalTokens}). Consider context compression or local processing.`,
76
+ });
77
+ }
78
+
79
+ return suggestions;
80
+ }
81
+
82
+ /**
83
+ * Identify bottlenecks across all workflows.
84
+ * @returns {Object[]}
85
+ */
86
+ identifyBottlenecks() {
87
+ const bottlenecks = [];
88
+
89
+ for (const data of this._workflows.values()) {
90
+ if (data.runs < 2) continue;
91
+
92
+ if (data.avgDuration > 20000) {
93
+ bottlenecks.push({
94
+ workflowId: data.id,
95
+ issue: 'slow_execution',
96
+ avgDuration: data.avgDuration,
97
+ severity: data.avgDuration > 60000 ? 'critical' : 'warning',
98
+ });
99
+ }
100
+
101
+ if (data.successRate < 0.7) {
102
+ bottlenecks.push({
103
+ workflowId: data.id,
104
+ issue: 'low_success_rate',
105
+ successRate: data.successRate,
106
+ severity: data.successRate < 0.5 ? 'critical' : 'warning',
107
+ });
108
+ }
109
+ }
110
+
111
+ return bottlenecks.sort((a, b) => {
112
+ const severityOrder = { critical: 0, warning: 1 };
113
+ return (severityOrder[a.severity] ?? 2) - (severityOrder[b.severity] ?? 2);
114
+ });
115
+ }
116
+
117
+ /**
118
+ * Get all tracked workflows.
119
+ * @returns {Object[]}
120
+ */
121
+ getWorkflows() {
122
+ return Array.from(this._workflows.values());
123
+ }
124
+
125
+ /**
126
+ * Get stats for a specific workflow.
127
+ * @param {string} workflowId
128
+ * @returns {Object|undefined}
129
+ */
130
+ getStats(workflowId) {
131
+ return this._workflows.get(workflowId);
132
+ }
133
+
134
+ /**
135
+ * Clear all tracking data.
136
+ */
137
+ clear() {
138
+ this._workflows.clear();
139
+ }
140
+ }
141
+
142
+ export default Optimizer;
@@ -0,0 +1,146 @@
1
+ /**
2
+ * @fileoverview Distributed task sharing - PrepiNet P2P network.
3
+ * @module network/p2p
4
+ */
5
+
6
+ import { EventEmitter } from 'node:events';
7
+ import crypto from 'node:crypto';
8
+
9
+ export class P2PNetwork extends EventEmitter {
10
+ /**
11
+ * @param {Object} [options]
12
+ * @param {string} [options.nodeId] - Unique node identifier
13
+ * @param {number} [options.port=0] - Listening port
14
+ */
15
+ constructor(options = {}) {
16
+ super();
17
+ this._nodeId = options.nodeId || crypto.randomUUID();
18
+ this._port = options.port ?? 0;
19
+ /** @type {Map<string, Object>} Known peers */
20
+ this._peers = new Map();
21
+ /** @type {Map<string, Object>} Shared cache entries */
22
+ this._sharedCache = new Map();
23
+ this._running = false;
24
+ }
25
+
26
+ /**
27
+ * Get this node's ID.
28
+ * @returns {string}
29
+ */
30
+ get nodeId() {
31
+ return this._nodeId;
32
+ }
33
+
34
+ /**
35
+ * Add a peer.
36
+ * @param {Object} peer
37
+ * @param {string} peer.id - Peer node ID
38
+ * @param {string} peer.host - Peer host
39
+ * @param {number} peer.port - Peer port
40
+ */
41
+ addPeer(peer) {
42
+ if (peer.id === this._nodeId) return;
43
+ this._peers.set(peer.id, {
44
+ ...peer,
45
+ lastSeen: Date.now(),
46
+ status: 'connected',
47
+ });
48
+ this.emit('peer:added', peer);
49
+ }
50
+
51
+ /**
52
+ * Remove a peer.
53
+ * @param {string} peerId
54
+ */
55
+ removePeer(peerId) {
56
+ this._peers.delete(peerId);
57
+ this.emit('peer:removed', { id: peerId });
58
+ }
59
+
60
+ /**
61
+ * Get all known peers.
62
+ * @returns {Object[]}
63
+ */
64
+ getPeers() {
65
+ return Array.from(this._peers.values());
66
+ }
67
+
68
+ /**
69
+ * Share a cache entry with the network.
70
+ * @param {string} key - Cache key
71
+ * @param {*} value - Cached value
72
+ * @param {number} [ttl=300000] - TTL in ms
73
+ */
74
+ shareCache(key, value, ttl = 300000) {
75
+ this._sharedCache.set(key, {
76
+ key,
77
+ value,
78
+ sharedAt: Date.now(),
79
+ expiresAt: Date.now() + ttl,
80
+ source: this._nodeId,
81
+ });
82
+ this.emit('cache:shared', { key });
83
+ }
84
+
85
+ /**
86
+ * Query the shared cache.
87
+ * @param {string} key
88
+ * @returns {*}
89
+ */
90
+ queryCache(key) {
91
+ const entry = this._sharedCache.get(key);
92
+ if (!entry) return undefined;
93
+ if (Date.now() > entry.expiresAt) {
94
+ this._sharedCache.delete(key);
95
+ return undefined;
96
+ }
97
+ return entry.value;
98
+ }
99
+
100
+ /**
101
+ * Get the node count (including self).
102
+ * @returns {number}
103
+ */
104
+ get nodeCount() {
105
+ return this._peers.size + 1;
106
+ }
107
+
108
+ /**
109
+ * Get network stats.
110
+ * @returns {Object}
111
+ */
112
+ stats() {
113
+ return {
114
+ nodeId: this._nodeId,
115
+ peers: this._peers.size,
116
+ sharedEntries: this._sharedCache.size,
117
+ running: this._running,
118
+ };
119
+ }
120
+
121
+ /**
122
+ * Start the P2P network.
123
+ */
124
+ start() {
125
+ this._running = true;
126
+ this.emit('network:started', { nodeId: this._nodeId });
127
+ }
128
+
129
+ /**
130
+ * Stop the P2P network.
131
+ */
132
+ stop() {
133
+ this._running = false;
134
+ this.emit('network:stopped', { nodeId: this._nodeId });
135
+ }
136
+
137
+ /**
138
+ * Check if network is running.
139
+ * @returns {boolean}
140
+ */
141
+ get isRunning() {
142
+ return this._running;
143
+ }
144
+ }
145
+
146
+ export default P2PNetwork;