@props-labs/mesh-os 0.1.23 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/dist/core/__fixtures__/mock_responses.d.ts +318 -0
  2. package/dist/core/__fixtures__/mock_responses.js +333 -0
  3. package/dist/core/__fixtures__/sample_embeddings.d.ts +33 -0
  4. package/dist/core/__fixtures__/sample_embeddings.js +12355 -0
  5. package/dist/core/agents.d.ts +51 -0
  6. package/dist/core/agents.js +170 -0
  7. package/dist/core/memories.d.ts +138 -0
  8. package/dist/core/memories.js +417 -0
  9. package/dist/core/workflows.d.ts +84 -25
  10. package/dist/core/workflows.js +224 -135
  11. package/package.json +3 -3
  12. package/src/templates/hasura/metadata/actions.yaml +6 -0
  13. package/src/templates/hasura/metadata/cron_triggers.yaml +1 -0
  14. package/src/templates/hasura/metadata/databases/databases.yaml +1 -1
  15. package/src/templates/hasura/metadata/databases/default/functions/functions.yaml +80 -0
  16. package/src/templates/hasura/metadata/databases/default/tables/tables.yaml +274 -9
  17. package/src/templates/hasura/metadata/query_collections.yaml +1 -0
  18. package/src/templates/hasura/metadata/rest_endpoints.yaml +1 -0
  19. package/src/templates/hasura/migrations/default/0_cleanup/down.sql +2 -0
  20. package/src/templates/hasura/migrations/default/0_cleanup/up.sql +59 -0
  21. package/src/templates/hasura/migrations/default/1_init/down.sql +27 -21
  22. package/src/templates/hasura/migrations/default/1_init/up.sql +446 -174
  23. package/src/templates/hasura/migrations/default/2_sample_data/down.sql +3 -0
  24. package/src/templates/hasura/migrations/default/2_sample_data/up.sql +288 -0
  25. package/src/templates/hasura/migrations/default/3_agent_relations/down.sql +76 -0
  26. package/src/templates/hasura/migrations/default/3_agent_relations/up.sql +469 -0
  27. package/src/templates/hasura/metadata/config.yaml +0 -1
  28. package/src/templates/hasura/metadata/databases/default/tables/public_agents.yaml +0 -14
  29. package/src/templates/hasura/metadata/databases/default/tables/public_memories.yaml +0 -23
  30. package/src/templates/hasura/metadata/databases/default/tables/public_memory_edges.yaml +0 -57
  31. package/src/templates/hasura/metadata/databases/default/tables/track_tables.yaml +0 -14
  32. package/src/templates/hasura/metadata/metadata.json +0 -80
  33. package/src/templates/hasura/migrations/default/2_metadata_filtering/down.sql +0 -4
  34. package/src/templates/hasura/migrations/default/2_metadata_filtering/up.sql +0 -44
  35. package/src/templates/hasura/migrations/default/3_memory_expiry/down.sql +0 -55
  36. package/src/templates/hasura/migrations/default/3_memory_expiry/up.sql +0 -108
  37. package/src/templates/hasura/migrations/default/4_remove_slug_validation/down.sql +0 -20
  38. package/src/templates/hasura/migrations/default/4_remove_slug_validation/up.sql +0 -5
  39. package/src/templates/hasura/migrations/default/5_entities/down.sql +0 -13
  40. package/src/templates/hasura/migrations/default/5_entities/up.sql +0 -155
@@ -0,0 +1,417 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.MemoryManager = void 0;
4
+ const zod_1 = require("zod");
5
+ // Validation schemas
6
+ const memorySchema = zod_1.z.object({
7
+ id: zod_1.z.string().uuid(),
8
+ type: zod_1.z.string(),
9
+ status: zod_1.z.enum(['active', 'archived', 'deleted']),
10
+ metadata: zod_1.z.record(zod_1.z.any()),
11
+ content: zod_1.z.string(),
12
+ created_at: zod_1.z.string(),
13
+ updated_at: zod_1.z.string(),
14
+ agent_id: zod_1.z.string().uuid().nullable().optional()
15
+ });
16
+ const memoryChunkSchema = zod_1.z.object({
17
+ id: zod_1.z.string().uuid(),
18
+ memory_id: zod_1.z.string().uuid(),
19
+ chunk_index: zod_1.z.number(),
20
+ content: zod_1.z.string(),
21
+ embedding: zod_1.z.array(zod_1.z.number()).optional(),
22
+ metadata: zod_1.z.record(zod_1.z.any()),
23
+ created_at: zod_1.z.string(),
24
+ updated_at: zod_1.z.string(),
25
+ agent_id: zod_1.z.string().uuid().nullable().optional()
26
+ });
27
+ const typeSchemaSchema = zod_1.z.object({
28
+ type: zod_1.z.string(),
29
+ schema: zod_1.z.record(zod_1.z.any()),
30
+ metadata_schema: zod_1.z.record(zod_1.z.any()).nullable().optional(),
31
+ embedding_config: zod_1.z.object({
32
+ model: zod_1.z.string(),
33
+ dimensions: zod_1.z.number()
34
+ }).nullable().optional(),
35
+ chunking_config: zod_1.z.object({
36
+ chunk_size: zod_1.z.number(),
37
+ chunk_overlap: zod_1.z.number()
38
+ }).nullable().optional(),
39
+ validation_rules: zod_1.z.record(zod_1.z.any()).nullable().optional(),
40
+ behaviors: zod_1.z.record(zod_1.z.any()).nullable().optional(),
41
+ created_at: zod_1.z.string(),
42
+ updated_at: zod_1.z.string()
43
+ });
44
+ class MemoryManager {
45
+ constructor(url, headers, createEmbedding) {
46
+ this.url = url;
47
+ this.headers = headers;
48
+ this.createEmbedding = createEmbedding;
49
+ }
50
+ /**
51
+ * Execute a GraphQL query against Hasura
52
+ */
53
+ async executeQuery(query, variables) {
54
+ const response = await fetch(this.url, {
55
+ method: 'POST',
56
+ headers: {
57
+ 'Content-Type': 'application/json',
58
+ ...this.headers
59
+ },
60
+ body: JSON.stringify({
61
+ query,
62
+ variables
63
+ })
64
+ });
65
+ if (!response.ok) {
66
+ throw new Error(`Failed to execute query: ${response.statusText}`);
67
+ }
68
+ const result = (await response.json());
69
+ if (result.errors) {
70
+ throw new Error(`GraphQL error: ${result.errors[0].message}`);
71
+ }
72
+ return result.data;
73
+ }
74
+ /**
75
+ * Get a memory by its ID
76
+ */
77
+ async get(id) {
78
+ const query = `
79
+ query GetMemory($id: uuid!) {
80
+ memories_by_pk(id: $id) {
81
+ id
82
+ type
83
+ status
84
+ metadata
85
+ content
86
+ created_at
87
+ updated_at
88
+ agent_id
89
+ }
90
+ }
91
+ `;
92
+ const result = await this.executeQuery(query, { id });
93
+ const memory = result.memories_by_pk;
94
+ if (!memory) {
95
+ throw new Error(`Memory not found with id: ${id}`);
96
+ }
97
+ return memorySchema.parse(memory);
98
+ }
99
+ /**
100
+ * Get a type schema by its type name
101
+ */
102
+ async getTypeSchema(type) {
103
+ const query = `
104
+ query GetTypeSchema($type: String!) {
105
+ type_schemas_by_pk(type: $type) {
106
+ type
107
+ schema
108
+ metadata_schema
109
+ embedding_config
110
+ chunking_config
111
+ validation_rules
112
+ behaviors
113
+ created_at
114
+ updated_at
115
+ }
116
+ }
117
+ `;
118
+ const result = await this.executeQuery(query, { type });
119
+ const typeSchema = result.type_schemas_by_pk;
120
+ if (!typeSchema) {
121
+ throw new Error(`Type schema not found for type: ${type}`);
122
+ }
123
+ return typeSchemaSchema.parse(typeSchema);
124
+ }
125
+ /**
126
+ * Create chunks from content based on type schema configuration
127
+ */
128
+ createChunks(content, chunkSize, chunkOverlap) {
129
+ const words = content.split(' ');
130
+ const chunks = [];
131
+ let currentIndex = 0;
132
+ while (currentIndex < words.length) {
133
+ const chunkWords = words.slice(currentIndex, currentIndex + chunkSize);
134
+ chunks.push({
135
+ content: chunkWords.join(' '),
136
+ index: chunks.length
137
+ });
138
+ currentIndex += chunkSize - chunkOverlap;
139
+ }
140
+ return chunks;
141
+ }
142
+ /**
143
+ * Create a new memory with chunks and embeddings
144
+ */
145
+ async create(input) {
146
+ // First get the type schema to validate against
147
+ const typeSchema = await this.getTypeSchema(input.type);
148
+ // Create the memory
149
+ const createMemoryQuery = `
150
+ mutation CreateMemory($memory: memories_insert_input!) {
151
+ insert_memories_one(object: $memory) {
152
+ id
153
+ type
154
+ status
155
+ metadata
156
+ content
157
+ created_at
158
+ updated_at
159
+ agent_id
160
+ }
161
+ }
162
+ `;
163
+ const memoryResult = await this.executeQuery(createMemoryQuery, {
164
+ memory: {
165
+ type: input.type,
166
+ content: input.content,
167
+ metadata: input.metadata || {},
168
+ status: 'active',
169
+ agent_id: input.agent_id
170
+ }
171
+ });
172
+ const memory = memoryResult.insert_memories_one;
173
+ // Create chunks based on type schema configuration
174
+ const chunkSize = typeSchema.chunking_config?.chunk_size || 1000;
175
+ const chunkOverlap = typeSchema.chunking_config?.chunk_overlap || 100;
176
+ const chunks = this.createChunks(input.content, chunkSize, chunkOverlap);
177
+ // Create embeddings for each chunk
178
+ const chunksWithEmbeddings = await Promise.all(chunks.map(async (chunk) => ({
179
+ ...chunk,
180
+ embedding: await this.createEmbedding(chunk.content)
181
+ })));
182
+ // Insert chunks
183
+ const createChunksQuery = `
184
+ mutation CreateMemoryChunks($chunks: [memory_chunks_insert_input!]!) {
185
+ insert_memory_chunks(objects: $chunks) {
186
+ affected_rows
187
+ }
188
+ }
189
+ `;
190
+ await this.executeQuery(createChunksQuery, {
191
+ chunks: chunksWithEmbeddings.map(chunk => ({
192
+ memory_id: memory.id,
193
+ chunk_index: chunk.index,
194
+ content: chunk.content,
195
+ embedding: `[${chunk.embedding.join(',')}]`,
196
+ metadata: {},
197
+ agent_id: input.agent_id
198
+ }))
199
+ });
200
+ return {
201
+ memory: memorySchema.parse(memory),
202
+ chunkCount: chunks.length
203
+ };
204
+ }
205
+ /**
206
+ * Search memories by semantic similarity using chunk embeddings
207
+ */
208
+ async search(options) {
209
+ const { query, threshold = 0.7, limit = 10, agentId, metadataFilter, createdAtFilter } = options;
210
+ // Create embedding for the query
211
+ const embedding = await this.createEmbedding(query);
212
+ // Ensure the embedding is normalized
213
+ const magnitude = Math.sqrt(embedding.reduce((acc, val) => acc + val * val, 0));
214
+ const normalizedEmbedding = embedding.map(val => val / magnitude);
215
+ const embeddingStr = `[${normalizedEmbedding.join(',')}]`;
216
+ const gqlQuery = `
217
+ query SearchMemoryChunks($args: search_memory_chunks_args!) {
218
+ search_memory_chunks(args: $args) {
219
+ chunk_id
220
+ memory_id
221
+ chunk_index
222
+ chunk_content
223
+ chunk_metadata
224
+ chunk_created_at
225
+ chunk_updated_at
226
+ memory_content
227
+ memory_type
228
+ memory_status
229
+ memory_metadata
230
+ memory_created_at
231
+ memory_updated_at
232
+ agent_id
233
+ similarity
234
+ }
235
+ }
236
+ `;
237
+ const result = await this.executeQuery(gqlQuery, {
238
+ args: {
239
+ query_embedding: embeddingStr,
240
+ match_threshold: threshold,
241
+ match_count: limit,
242
+ filter_agent_id: agentId,
243
+ memory_metadata_filter: metadataFilter,
244
+ chunk_metadata_filter: null,
245
+ created_at_filter: createdAtFilter
246
+ }
247
+ });
248
+ // Convert to SearchMemoryResult format and deduplicate by memory_id
249
+ // keeping the highest similarity score for each memory
250
+ const memoryMap = new Map();
251
+ for (const chunk of result.search_memory_chunks) {
252
+ const existing = memoryMap.get(chunk.memory_id);
253
+ if (!existing || chunk.similarity > existing.similarity) {
254
+ memoryMap.set(chunk.memory_id, {
255
+ id: chunk.memory_id,
256
+ type: chunk.memory_type,
257
+ status: chunk.memory_status,
258
+ content: chunk.memory_content,
259
+ metadata: chunk.memory_metadata,
260
+ created_at: chunk.memory_created_at,
261
+ updated_at: chunk.memory_updated_at,
262
+ agent_id: chunk.agent_id,
263
+ similarity: chunk.similarity
264
+ });
265
+ }
266
+ }
267
+ // Sort by similarity and return results
268
+ return Array.from(memoryMap.values())
269
+ .sort((a, b) => b.similarity - a.similarity)
270
+ .slice(0, limit);
271
+ }
272
+ /**
273
+ * Clean up test memories and their chunks
274
+ */
275
+ async cleanup(memoryIds) {
276
+ if (memoryIds.length === 0)
277
+ return;
278
+ const cleanupQuery = `
279
+ mutation CleanupTestMemories($ids: [uuid!]!) {
280
+ delete_memory_chunks(where: { memory_id: { _in: $ids }}) {
281
+ affected_rows
282
+ }
283
+ delete_memories(where: { id: { _in: $ids }}) {
284
+ affected_rows
285
+ }
286
+ }
287
+ `;
288
+ await this.executeQuery(cleanupQuery, { ids: memoryIds });
289
+ }
290
+ /**
291
+ * Update an existing memory and optionally its chunks
292
+ */
293
+ async update(input) {
294
+ // First get the existing memory to validate it exists
295
+ const existingMemory = await this.get(input.id);
296
+ // Prepare update object with only changed fields
297
+ const updateFields = {};
298
+ if (input.status !== undefined)
299
+ updateFields.status = input.status;
300
+ if (input.metadata !== undefined)
301
+ updateFields.metadata = input.metadata;
302
+ if (input.content !== undefined)
303
+ updateFields.content = input.content;
304
+ // If no fields to update, return existing memory
305
+ if (Object.keys(updateFields).length === 0) {
306
+ return { memory: existingMemory };
307
+ }
308
+ // Update the memory
309
+ const updateMemoryQuery = `
310
+ mutation UpdateMemory($id: uuid!, $updates: memories_set_input!) {
311
+ update_memories_by_pk(pk_columns: {id: $id}, _set: $updates) {
312
+ id
313
+ type
314
+ status
315
+ metadata
316
+ content
317
+ created_at
318
+ updated_at
319
+ agent_id
320
+ }
321
+ }
322
+ `;
323
+ const memoryResult = await this.executeQuery(updateMemoryQuery, {
324
+ id: input.id,
325
+ updates: updateFields
326
+ });
327
+ const updatedMemory = memoryResult.update_memories_by_pk;
328
+ // If content was updated, we need to update chunks
329
+ let chunkCount;
330
+ if (input.content !== undefined) {
331
+ // Get the type schema for chunking config
332
+ const typeSchema = await this.getTypeSchema(updatedMemory.type);
333
+ const chunkSize = typeSchema.chunking_config?.chunk_size || 1000;
334
+ const chunkOverlap = typeSchema.chunking_config?.chunk_overlap || 100;
335
+ // Create new chunks
336
+ const chunks = this.createChunks(input.content, chunkSize, chunkOverlap);
337
+ // Generate embeddings for new chunks
338
+ const chunksWithEmbeddings = await Promise.all(chunks.map(async (chunk) => ({
339
+ ...chunk,
340
+ embedding: await this.createEmbedding(chunk.content)
341
+ })));
342
+ // Delete existing chunks
343
+ const deleteChunksQuery = `
344
+ mutation DeleteMemoryChunks($memory_id: uuid!) {
345
+ delete_memory_chunks(where: {memory_id: {_eq: $memory_id}}) {
346
+ affected_rows
347
+ }
348
+ }
349
+ `;
350
+ await this.executeQuery(deleteChunksQuery, { memory_id: input.id });
351
+ // Insert new chunks
352
+ const createChunksQuery = `
353
+ mutation CreateMemoryChunks($chunks: [memory_chunks_insert_input!]!) {
354
+ insert_memory_chunks(objects: $chunks) {
355
+ affected_rows
356
+ }
357
+ }
358
+ `;
359
+ await this.executeQuery(createChunksQuery, {
360
+ chunks: chunksWithEmbeddings.map(chunk => ({
361
+ memory_id: updatedMemory.id,
362
+ chunk_index: chunk.index,
363
+ content: chunk.content,
364
+ embedding: `[${chunk.embedding.join(',')}]`,
365
+ metadata: {},
366
+ agent_id: updatedMemory.agent_id
367
+ }))
368
+ });
369
+ chunkCount = chunks.length;
370
+ }
371
+ return {
372
+ memory: memorySchema.parse(updatedMemory),
373
+ chunkCount
374
+ };
375
+ }
376
+ /**
377
+ * Mark a memory as deleted
378
+ */
379
+ async delete(id) {
380
+ return (await this.update({
381
+ id,
382
+ status: 'deleted'
383
+ })).memory;
384
+ }
385
+ /**
386
+ * Mark a memory as archived
387
+ */
388
+ async archive(id) {
389
+ return (await this.update({
390
+ id,
391
+ status: 'archived'
392
+ })).memory;
393
+ }
394
+ /**
395
+ * List all available type schemas
396
+ */
397
+ async listSchemas() {
398
+ const query = `
399
+ query ListTypeSchemas {
400
+ type_schemas {
401
+ type
402
+ schema
403
+ metadata_schema
404
+ embedding_config
405
+ chunking_config
406
+ validation_rules
407
+ behaviors
408
+ created_at
409
+ updated_at
410
+ }
411
+ }
412
+ `;
413
+ const result = await this.executeQuery(query, {});
414
+ return result.type_schemas.map(schema => typeSchemaSchema.parse(schema));
415
+ }
416
+ }
417
+ exports.MemoryManager = MemoryManager;
@@ -1,45 +1,104 @@
1
- /**
2
- * Workflow management functionality for MeshOS.
3
- */
4
- import { Workflow } from './taxonomy';
1
+ export interface WorkflowSchema {
2
+ type: string;
3
+ input_schema: Record<string, any>;
4
+ output_schema: Record<string, any>;
5
+ metadata_schema?: Record<string, any> | null;
6
+ validation_rules?: Record<string, any> | null;
7
+ behaviors?: Record<string, any> | null;
8
+ created_at: string;
9
+ updated_at: string;
10
+ }
11
+ export interface WorkflowRun {
12
+ id: string;
13
+ type: string;
14
+ status: 'pending' | 'running' | 'completed' | 'failed' | 'cancelled';
15
+ input: Record<string, any>;
16
+ metadata: Record<string, any>;
17
+ created_at: string;
18
+ updated_at: string;
19
+ agent_id?: string | null;
20
+ }
21
+ export interface WorkflowResult {
22
+ id: string;
23
+ workflow_id: string;
24
+ type: 'interim' | 'final';
25
+ result: Record<string, any>;
26
+ metadata: Record<string, any>;
27
+ created_at: string;
28
+ updated_at: string;
29
+ agent_id?: string | null;
30
+ }
31
+ export interface CreateWorkflowRunInput {
32
+ type: string;
33
+ input: Record<string, any>;
34
+ metadata?: Record<string, any>;
35
+ agent_id?: string;
36
+ }
37
+ export interface UpdateWorkflowStatusInput {
38
+ id: string;
39
+ status: WorkflowRun['status'];
40
+ metadata?: Record<string, any>;
41
+ }
42
+ export interface CreateWorkflowResultInput {
43
+ workflow_id: string;
44
+ type: WorkflowResult['type'];
45
+ result: Record<string, any>;
46
+ metadata?: Record<string, any>;
47
+ agent_id?: string;
48
+ }
49
+ export interface ListWorkflowRunsOptions {
50
+ type?: string;
51
+ status?: WorkflowRun['status'];
52
+ agent_id?: string;
53
+ limit?: number;
54
+ offset?: number;
55
+ order_by?: Array<{
56
+ field: string;
57
+ direction: 'asc' | 'desc';
58
+ }>;
59
+ }
5
60
  export declare class WorkflowManager {
6
61
  private url;
7
62
  private headers;
8
63
  constructor(url: string, headers: Record<string, string>);
9
64
  /**
10
- * Execute a GraphQL query.
65
+ * Execute a GraphQL query against Hasura
11
66
  */
12
67
  private executeQuery;
13
68
  /**
14
- * Get a workflow by job ID.
69
+ * List available workflow schemas
70
+ */
71
+ listSchemas(): Promise<WorkflowSchema[]>;
72
+ /**
73
+ * Get a workflow schema by type
74
+ */
75
+ getSchema(type: string): Promise<WorkflowSchema>;
76
+ /**
77
+ * Create a new workflow run
78
+ */
79
+ createRun(input: CreateWorkflowRunInput): Promise<WorkflowRun>;
80
+ /**
81
+ * Update a workflow run's status
15
82
  */
16
- getByJobId(jobId: string): Promise<Workflow | null>;
83
+ updateStatus(input: UpdateWorkflowStatusInput): Promise<WorkflowRun>;
17
84
  /**
18
- * Create a new workflow.
85
+ * Create a workflow result
19
86
  */
20
- create(workflow: Omit<Workflow, 'id' | 'createdAt' | 'updatedAt'>): Promise<Workflow>;
87
+ createResult(input: CreateWorkflowResultInput): Promise<WorkflowResult>;
21
88
  /**
22
- * Get a workflow by ID.
89
+ * Get a workflow run by ID
23
90
  */
24
- get(id: string): Promise<Workflow | null>;
91
+ getRun(id: string): Promise<WorkflowRun>;
25
92
  /**
26
- * Get workflows with optional filtering and sorting.
93
+ * Get all results for a workflow run
27
94
  */
28
- list(options?: {
29
- where?: Record<string, unknown>;
30
- orderBy?: Array<{
31
- column: string;
32
- order: 'asc' | 'desc';
33
- }>;
34
- limit?: number;
35
- offset?: number;
36
- }): Promise<Workflow[]>;
95
+ getResults(workflowId: string): Promise<WorkflowResult[]>;
37
96
  /**
38
- * Update a workflow.
97
+ * Get the final result for a workflow run
39
98
  */
40
- update(id: string, updates: Partial<Omit<Workflow, 'id' | 'createdAt' | 'updatedAt'>>): Promise<Workflow>;
99
+ getFinalResult(workflowId: string): Promise<WorkflowResult | null>;
41
100
  /**
42
- * Delete a workflow.
101
+ * List workflow runs with optional filtering and sorting
43
102
  */
44
- delete(id: string): Promise<boolean>;
103
+ listRuns(options?: ListWorkflowRunsOptions): Promise<WorkflowRun[]>;
45
104
  }