@props-labs/mesh-os 0.1.20 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/dist/core/__fixtures__/mock_responses.d.ts +318 -0
  2. package/dist/core/__fixtures__/mock_responses.js +333 -0
  3. package/dist/core/__fixtures__/sample_embeddings.d.ts +33 -0
  4. package/dist/core/__fixtures__/sample_embeddings.js +12355 -0
  5. package/dist/core/agents.d.ts +51 -0
  6. package/dist/core/agents.js +170 -0
  7. package/dist/core/client.d.ts +3 -1
  8. package/dist/core/client.js +10 -34
  9. package/dist/core/memories.d.ts +138 -0
  10. package/dist/core/memories.js +417 -0
  11. package/dist/core/taxonomy.d.ts +44 -0
  12. package/dist/core/taxonomy.js +25 -1
  13. package/dist/core/workflows.d.ts +104 -0
  14. package/dist/core/workflows.js +332 -0
  15. package/package.json +3 -3
  16. package/src/templates/hasura/metadata/actions.yaml +6 -0
  17. package/src/templates/hasura/metadata/cron_triggers.yaml +1 -0
  18. package/src/templates/hasura/metadata/databases/databases.yaml +1 -1
  19. package/src/templates/hasura/metadata/databases/default/functions/functions.yaml +80 -0
  20. package/src/templates/hasura/metadata/databases/default/tables/tables.yaml +274 -9
  21. package/src/templates/hasura/metadata/query_collections.yaml +1 -0
  22. package/src/templates/hasura/metadata/rest_endpoints.yaml +1 -0
  23. package/src/templates/hasura/migrations/default/0_cleanup/down.sql +2 -0
  24. package/src/templates/hasura/migrations/default/0_cleanup/up.sql +59 -0
  25. package/src/templates/hasura/migrations/default/1_init/down.sql +27 -21
  26. package/src/templates/hasura/migrations/default/1_init/up.sql +446 -174
  27. package/src/templates/hasura/migrations/default/2_sample_data/down.sql +3 -0
  28. package/src/templates/hasura/migrations/default/2_sample_data/up.sql +288 -0
  29. package/src/templates/hasura/migrations/default/3_agent_relations/down.sql +76 -0
  30. package/src/templates/hasura/migrations/default/3_agent_relations/up.sql +469 -0
  31. package/src/templates/hasura/metadata/config.yaml +0 -1
  32. package/src/templates/hasura/metadata/databases/default/tables/public_agents.yaml +0 -14
  33. package/src/templates/hasura/metadata/databases/default/tables/public_memories.yaml +0 -23
  34. package/src/templates/hasura/metadata/databases/default/tables/public_memory_edges.yaml +0 -57
  35. package/src/templates/hasura/metadata/databases/default/tables/track_tables.yaml +0 -14
  36. package/src/templates/hasura/metadata/metadata.json +0 -80
  37. package/src/templates/hasura/migrations/default/2_metadata_filtering/down.sql +0 -4
  38. package/src/templates/hasura/migrations/default/2_metadata_filtering/up.sql +0 -44
  39. package/src/templates/hasura/migrations/default/3_memory_expiry/down.sql +0 -55
  40. package/src/templates/hasura/migrations/default/3_memory_expiry/up.sql +0 -108
  41. package/src/templates/hasura/migrations/default/4_remove_slug_validation/down.sql +0 -20
  42. package/src/templates/hasura/migrations/default/4_remove_slug_validation/up.sql +0 -5
  43. package/src/templates/hasura/migrations/default/5_entities/down.sql +0 -13
  44. package/src/templates/hasura/migrations/default/5_entities/up.sql +0 -155
@@ -0,0 +1,417 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.MemoryManager = void 0;
4
+ const zod_1 = require("zod");
5
+ // Validation schemas
6
+ const memorySchema = zod_1.z.object({
7
+ id: zod_1.z.string().uuid(),
8
+ type: zod_1.z.string(),
9
+ status: zod_1.z.enum(['active', 'archived', 'deleted']),
10
+ metadata: zod_1.z.record(zod_1.z.any()),
11
+ content: zod_1.z.string(),
12
+ created_at: zod_1.z.string(),
13
+ updated_at: zod_1.z.string(),
14
+ agent_id: zod_1.z.string().uuid().nullable().optional()
15
+ });
16
+ const memoryChunkSchema = zod_1.z.object({
17
+ id: zod_1.z.string().uuid(),
18
+ memory_id: zod_1.z.string().uuid(),
19
+ chunk_index: zod_1.z.number(),
20
+ content: zod_1.z.string(),
21
+ embedding: zod_1.z.array(zod_1.z.number()).optional(),
22
+ metadata: zod_1.z.record(zod_1.z.any()),
23
+ created_at: zod_1.z.string(),
24
+ updated_at: zod_1.z.string(),
25
+ agent_id: zod_1.z.string().uuid().nullable().optional()
26
+ });
27
+ const typeSchemaSchema = zod_1.z.object({
28
+ type: zod_1.z.string(),
29
+ schema: zod_1.z.record(zod_1.z.any()),
30
+ metadata_schema: zod_1.z.record(zod_1.z.any()).nullable().optional(),
31
+ embedding_config: zod_1.z.object({
32
+ model: zod_1.z.string(),
33
+ dimensions: zod_1.z.number()
34
+ }).nullable().optional(),
35
+ chunking_config: zod_1.z.object({
36
+ chunk_size: zod_1.z.number(),
37
+ chunk_overlap: zod_1.z.number()
38
+ }).nullable().optional(),
39
+ validation_rules: zod_1.z.record(zod_1.z.any()).nullable().optional(),
40
+ behaviors: zod_1.z.record(zod_1.z.any()).nullable().optional(),
41
+ created_at: zod_1.z.string(),
42
+ updated_at: zod_1.z.string()
43
+ });
44
+ class MemoryManager {
45
+ constructor(url, headers, createEmbedding) {
46
+ this.url = url;
47
+ this.headers = headers;
48
+ this.createEmbedding = createEmbedding;
49
+ }
50
+ /**
51
+ * Execute a GraphQL query against Hasura
52
+ */
53
+ async executeQuery(query, variables) {
54
+ const response = await fetch(this.url, {
55
+ method: 'POST',
56
+ headers: {
57
+ 'Content-Type': 'application/json',
58
+ ...this.headers
59
+ },
60
+ body: JSON.stringify({
61
+ query,
62
+ variables
63
+ })
64
+ });
65
+ if (!response.ok) {
66
+ throw new Error(`Failed to execute query: ${response.statusText}`);
67
+ }
68
+ const result = (await response.json());
69
+ if (result.errors) {
70
+ throw new Error(`GraphQL error: ${result.errors[0].message}`);
71
+ }
72
+ return result.data;
73
+ }
74
+ /**
75
+ * Get a memory by its ID
76
+ */
77
+ async get(id) {
78
+ const query = `
79
+ query GetMemory($id: uuid!) {
80
+ memories_by_pk(id: $id) {
81
+ id
82
+ type
83
+ status
84
+ metadata
85
+ content
86
+ created_at
87
+ updated_at
88
+ agent_id
89
+ }
90
+ }
91
+ `;
92
+ const result = await this.executeQuery(query, { id });
93
+ const memory = result.memories_by_pk;
94
+ if (!memory) {
95
+ throw new Error(`Memory not found with id: ${id}`);
96
+ }
97
+ return memorySchema.parse(memory);
98
+ }
99
+ /**
100
+ * Get a type schema by its type name
101
+ */
102
+ async getTypeSchema(type) {
103
+ const query = `
104
+ query GetTypeSchema($type: String!) {
105
+ type_schemas_by_pk(type: $type) {
106
+ type
107
+ schema
108
+ metadata_schema
109
+ embedding_config
110
+ chunking_config
111
+ validation_rules
112
+ behaviors
113
+ created_at
114
+ updated_at
115
+ }
116
+ }
117
+ `;
118
+ const result = await this.executeQuery(query, { type });
119
+ const typeSchema = result.type_schemas_by_pk;
120
+ if (!typeSchema) {
121
+ throw new Error(`Type schema not found for type: ${type}`);
122
+ }
123
+ return typeSchemaSchema.parse(typeSchema);
124
+ }
125
+ /**
126
+ * Create chunks from content based on type schema configuration
127
+ */
128
+ createChunks(content, chunkSize, chunkOverlap) {
129
+ const words = content.split(' ');
130
+ const chunks = [];
131
+ let currentIndex = 0;
132
+ while (currentIndex < words.length) {
133
+ const chunkWords = words.slice(currentIndex, currentIndex + chunkSize);
134
+ chunks.push({
135
+ content: chunkWords.join(' '),
136
+ index: chunks.length
137
+ });
138
+ currentIndex += chunkSize - chunkOverlap;
139
+ }
140
+ return chunks;
141
+ }
142
+ /**
143
+ * Create a new memory with chunks and embeddings
144
+ */
145
+ async create(input) {
146
+ // First get the type schema to validate against
147
+ const typeSchema = await this.getTypeSchema(input.type);
148
+ // Create the memory
149
+ const createMemoryQuery = `
150
+ mutation CreateMemory($memory: memories_insert_input!) {
151
+ insert_memories_one(object: $memory) {
152
+ id
153
+ type
154
+ status
155
+ metadata
156
+ content
157
+ created_at
158
+ updated_at
159
+ agent_id
160
+ }
161
+ }
162
+ `;
163
+ const memoryResult = await this.executeQuery(createMemoryQuery, {
164
+ memory: {
165
+ type: input.type,
166
+ content: input.content,
167
+ metadata: input.metadata || {},
168
+ status: 'active',
169
+ agent_id: input.agent_id
170
+ }
171
+ });
172
+ const memory = memoryResult.insert_memories_one;
173
+ // Create chunks based on type schema configuration
174
+ const chunkSize = typeSchema.chunking_config?.chunk_size || 1000;
175
+ const chunkOverlap = typeSchema.chunking_config?.chunk_overlap || 100;
176
+ const chunks = this.createChunks(input.content, chunkSize, chunkOverlap);
177
+ // Create embeddings for each chunk
178
+ const chunksWithEmbeddings = await Promise.all(chunks.map(async (chunk) => ({
179
+ ...chunk,
180
+ embedding: await this.createEmbedding(chunk.content)
181
+ })));
182
+ // Insert chunks
183
+ const createChunksQuery = `
184
+ mutation CreateMemoryChunks($chunks: [memory_chunks_insert_input!]!) {
185
+ insert_memory_chunks(objects: $chunks) {
186
+ affected_rows
187
+ }
188
+ }
189
+ `;
190
+ await this.executeQuery(createChunksQuery, {
191
+ chunks: chunksWithEmbeddings.map(chunk => ({
192
+ memory_id: memory.id,
193
+ chunk_index: chunk.index,
194
+ content: chunk.content,
195
+ embedding: `[${chunk.embedding.join(',')}]`,
196
+ metadata: {},
197
+ agent_id: input.agent_id
198
+ }))
199
+ });
200
+ return {
201
+ memory: memorySchema.parse(memory),
202
+ chunkCount: chunks.length
203
+ };
204
+ }
205
+ /**
206
+ * Search memories by semantic similarity using chunk embeddings
207
+ */
208
+ async search(options) {
209
+ const { query, threshold = 0.7, limit = 10, agentId, metadataFilter, createdAtFilter } = options;
210
+ // Create embedding for the query
211
+ const embedding = await this.createEmbedding(query);
212
+ // Ensure the embedding is normalized
213
+ const magnitude = Math.sqrt(embedding.reduce((acc, val) => acc + val * val, 0));
214
+ const normalizedEmbedding = embedding.map(val => val / magnitude);
215
+ const embeddingStr = `[${normalizedEmbedding.join(',')}]`;
216
+ const gqlQuery = `
217
+ query SearchMemoryChunks($args: search_memory_chunks_args!) {
218
+ search_memory_chunks(args: $args) {
219
+ chunk_id
220
+ memory_id
221
+ chunk_index
222
+ chunk_content
223
+ chunk_metadata
224
+ chunk_created_at
225
+ chunk_updated_at
226
+ memory_content
227
+ memory_type
228
+ memory_status
229
+ memory_metadata
230
+ memory_created_at
231
+ memory_updated_at
232
+ agent_id
233
+ similarity
234
+ }
235
+ }
236
+ `;
237
+ const result = await this.executeQuery(gqlQuery, {
238
+ args: {
239
+ query_embedding: embeddingStr,
240
+ match_threshold: threshold,
241
+ match_count: limit,
242
+ filter_agent_id: agentId,
243
+ memory_metadata_filter: metadataFilter,
244
+ chunk_metadata_filter: null,
245
+ created_at_filter: createdAtFilter
246
+ }
247
+ });
248
+ // Convert to SearchMemoryResult format and deduplicate by memory_id
249
+ // keeping the highest similarity score for each memory
250
+ const memoryMap = new Map();
251
+ for (const chunk of result.search_memory_chunks) {
252
+ const existing = memoryMap.get(chunk.memory_id);
253
+ if (!existing || chunk.similarity > existing.similarity) {
254
+ memoryMap.set(chunk.memory_id, {
255
+ id: chunk.memory_id,
256
+ type: chunk.memory_type,
257
+ status: chunk.memory_status,
258
+ content: chunk.memory_content,
259
+ metadata: chunk.memory_metadata,
260
+ created_at: chunk.memory_created_at,
261
+ updated_at: chunk.memory_updated_at,
262
+ agent_id: chunk.agent_id,
263
+ similarity: chunk.similarity
264
+ });
265
+ }
266
+ }
267
+ // Sort by similarity and return results
268
+ return Array.from(memoryMap.values())
269
+ .sort((a, b) => b.similarity - a.similarity)
270
+ .slice(0, limit);
271
+ }
272
+ /**
273
+ * Clean up test memories and their chunks
274
+ */
275
+ async cleanup(memoryIds) {
276
+ if (memoryIds.length === 0)
277
+ return;
278
+ const cleanupQuery = `
279
+ mutation CleanupTestMemories($ids: [uuid!]!) {
280
+ delete_memory_chunks(where: { memory_id: { _in: $ids }}) {
281
+ affected_rows
282
+ }
283
+ delete_memories(where: { id: { _in: $ids }}) {
284
+ affected_rows
285
+ }
286
+ }
287
+ `;
288
+ await this.executeQuery(cleanupQuery, { ids: memoryIds });
289
+ }
290
+ /**
291
+ * Update an existing memory and optionally its chunks
292
+ */
293
+ async update(input) {
294
+ // First get the existing memory to validate it exists
295
+ const existingMemory = await this.get(input.id);
296
+ // Prepare update object with only changed fields
297
+ const updateFields = {};
298
+ if (input.status !== undefined)
299
+ updateFields.status = input.status;
300
+ if (input.metadata !== undefined)
301
+ updateFields.metadata = input.metadata;
302
+ if (input.content !== undefined)
303
+ updateFields.content = input.content;
304
+ // If no fields to update, return existing memory
305
+ if (Object.keys(updateFields).length === 0) {
306
+ return { memory: existingMemory };
307
+ }
308
+ // Update the memory
309
+ const updateMemoryQuery = `
310
+ mutation UpdateMemory($id: uuid!, $updates: memories_set_input!) {
311
+ update_memories_by_pk(pk_columns: {id: $id}, _set: $updates) {
312
+ id
313
+ type
314
+ status
315
+ metadata
316
+ content
317
+ created_at
318
+ updated_at
319
+ agent_id
320
+ }
321
+ }
322
+ `;
323
+ const memoryResult = await this.executeQuery(updateMemoryQuery, {
324
+ id: input.id,
325
+ updates: updateFields
326
+ });
327
+ const updatedMemory = memoryResult.update_memories_by_pk;
328
+ // If content was updated, we need to update chunks
329
+ let chunkCount;
330
+ if (input.content !== undefined) {
331
+ // Get the type schema for chunking config
332
+ const typeSchema = await this.getTypeSchema(updatedMemory.type);
333
+ const chunkSize = typeSchema.chunking_config?.chunk_size || 1000;
334
+ const chunkOverlap = typeSchema.chunking_config?.chunk_overlap || 100;
335
+ // Create new chunks
336
+ const chunks = this.createChunks(input.content, chunkSize, chunkOverlap);
337
+ // Generate embeddings for new chunks
338
+ const chunksWithEmbeddings = await Promise.all(chunks.map(async (chunk) => ({
339
+ ...chunk,
340
+ embedding: await this.createEmbedding(chunk.content)
341
+ })));
342
+ // Delete existing chunks
343
+ const deleteChunksQuery = `
344
+ mutation DeleteMemoryChunks($memory_id: uuid!) {
345
+ delete_memory_chunks(where: {memory_id: {_eq: $memory_id}}) {
346
+ affected_rows
347
+ }
348
+ }
349
+ `;
350
+ await this.executeQuery(deleteChunksQuery, { memory_id: input.id });
351
+ // Insert new chunks
352
+ const createChunksQuery = `
353
+ mutation CreateMemoryChunks($chunks: [memory_chunks_insert_input!]!) {
354
+ insert_memory_chunks(objects: $chunks) {
355
+ affected_rows
356
+ }
357
+ }
358
+ `;
359
+ await this.executeQuery(createChunksQuery, {
360
+ chunks: chunksWithEmbeddings.map(chunk => ({
361
+ memory_id: updatedMemory.id,
362
+ chunk_index: chunk.index,
363
+ content: chunk.content,
364
+ embedding: `[${chunk.embedding.join(',')}]`,
365
+ metadata: {},
366
+ agent_id: updatedMemory.agent_id
367
+ }))
368
+ });
369
+ chunkCount = chunks.length;
370
+ }
371
+ return {
372
+ memory: memorySchema.parse(updatedMemory),
373
+ chunkCount
374
+ };
375
+ }
376
+ /**
377
+ * Mark a memory as deleted
378
+ */
379
+ async delete(id) {
380
+ return (await this.update({
381
+ id,
382
+ status: 'deleted'
383
+ })).memory;
384
+ }
385
+ /**
386
+ * Mark a memory as archived
387
+ */
388
+ async archive(id) {
389
+ return (await this.update({
390
+ id,
391
+ status: 'archived'
392
+ })).memory;
393
+ }
394
+ /**
395
+ * List all available type schemas
396
+ */
397
+ async listSchemas() {
398
+ const query = `
399
+ query ListTypeSchemas {
400
+ type_schemas {
401
+ type
402
+ schema
403
+ metadata_schema
404
+ embedding_config
405
+ chunking_config
406
+ validation_rules
407
+ behaviors
408
+ created_at
409
+ updated_at
410
+ }
411
+ }
412
+ `;
413
+ const result = await this.executeQuery(query, {});
414
+ return result.type_schemas.map(schema => typeSchemaSchema.parse(schema));
415
+ }
416
+ }
417
+ exports.MemoryManager = MemoryManager;
@@ -168,6 +168,18 @@ export declare const CoreAgentStatus: {
168
168
  readonly PAUSED: "paused";
169
169
  };
170
170
  export type AgentStatus = typeof CoreAgentStatus[keyof typeof CoreAgentStatus] | string;
171
+ /**
172
+ * Core workflow status types.
173
+ * The type allows for both predefined and custom status strings.
174
+ */
175
+ export declare const CoreWorkflowStatus: {
176
+ readonly PENDING: "pending";
177
+ readonly RUNNING: "running";
178
+ readonly COMPLETED: "completed";
179
+ readonly FAILED: "failed";
180
+ readonly CANCELLED: "cancelled";
181
+ };
182
+ export type WorkflowStatus = typeof CoreWorkflowStatus[keyof typeof CoreWorkflowStatus] | string;
171
183
  /**
172
184
  * Version history entry.
173
185
  */
@@ -320,3 +332,35 @@ export interface TimestampFilter {
320
332
  _lte?: string;
321
333
  _eq?: string;
322
334
  }
335
+ /**
336
+ * Workflow schema definition.
337
+ */
338
+ export declare const workflowSchema: z.ZodObject<{
339
+ id: z.ZodOptional<z.ZodString>;
340
+ jobId: z.ZodString;
341
+ type: z.ZodString;
342
+ status: z.ZodDefault<z.ZodString>;
343
+ result: z.ZodOptional<z.ZodUnknown>;
344
+ metadata: z.ZodDefault<z.ZodRecord<z.ZodString, z.ZodUnknown>>;
345
+ createdAt: z.ZodOptional<z.ZodString>;
346
+ updatedAt: z.ZodOptional<z.ZodString>;
347
+ }, "strip", z.ZodTypeAny, {
348
+ type: string;
349
+ status: string;
350
+ metadata: Record<string, unknown>;
351
+ jobId: string;
352
+ id?: string | undefined;
353
+ createdAt?: string | undefined;
354
+ updatedAt?: string | undefined;
355
+ result?: unknown;
356
+ }, {
357
+ type: string;
358
+ jobId: string;
359
+ id?: string | undefined;
360
+ status?: string | undefined;
361
+ metadata?: Record<string, unknown> | undefined;
362
+ createdAt?: string | undefined;
363
+ updatedAt?: string | undefined;
364
+ result?: unknown;
365
+ }>;
366
+ export type Workflow = z.infer<typeof workflowSchema>;
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.edgeMetadataSchema = exports.memoryMetadataSchema = exports.CoreAgentStatus = exports.RelevanceTag = exports.EdgeType = exports.MediaSubtype = exports.DecisionSubtype = exports.KnowledgeSubtype = exports.ActivitySubtype = exports.entityMemoryLinkSchema = exports.entitySchema = exports.EntityStatus = exports.EntityRelationshipType = exports.DataType = void 0;
3
+ exports.workflowSchema = exports.edgeMetadataSchema = exports.memoryMetadataSchema = exports.CoreWorkflowStatus = exports.CoreAgentStatus = exports.RelevanceTag = exports.EdgeType = exports.MediaSubtype = exports.DecisionSubtype = exports.KnowledgeSubtype = exports.ActivitySubtype = exports.entityMemoryLinkSchema = exports.entitySchema = exports.EntityStatus = exports.EntityRelationshipType = exports.DataType = void 0;
4
4
  /**
5
5
  * Taxonomy and classification models for MeshOS.
6
6
  */
@@ -143,6 +143,17 @@ exports.CoreAgentStatus = {
143
143
  ERROR: 'error',
144
144
  PAUSED: 'paused'
145
145
  };
146
+ /**
147
+ * Core workflow status types.
148
+ * The type allows for both predefined and custom status strings.
149
+ */
150
+ exports.CoreWorkflowStatus = {
151
+ PENDING: 'pending',
152
+ RUNNING: 'running',
153
+ COMPLETED: 'completed',
154
+ FAILED: 'failed',
155
+ CANCELLED: 'cancelled'
156
+ };
146
157
  /**
147
158
  * Standardized metadata structure for memories.
148
159
  */
@@ -187,3 +198,16 @@ exports.edgeMetadataSchema = zod_1.z.object({
187
198
  bidirectional: zod_1.z.boolean().default(false),
188
199
  additional: zod_1.z.record(zod_1.z.unknown()).default({}),
189
200
  });
201
+ /**
202
+ * Workflow schema definition.
203
+ */
204
+ exports.workflowSchema = zod_1.z.object({
205
+ id: zod_1.z.string().uuid().optional(),
206
+ jobId: zod_1.z.string(),
207
+ type: zod_1.z.string(),
208
+ status: zod_1.z.string().default(exports.CoreWorkflowStatus.PENDING),
209
+ result: zod_1.z.unknown().optional(),
210
+ metadata: zod_1.z.record(zod_1.z.unknown()).default({}),
211
+ createdAt: zod_1.z.string().optional(),
212
+ updatedAt: zod_1.z.string().optional(),
213
+ });
@@ -0,0 +1,104 @@
1
+ export interface WorkflowSchema {
2
+ type: string;
3
+ input_schema: Record<string, any>;
4
+ output_schema: Record<string, any>;
5
+ metadata_schema?: Record<string, any> | null;
6
+ validation_rules?: Record<string, any> | null;
7
+ behaviors?: Record<string, any> | null;
8
+ created_at: string;
9
+ updated_at: string;
10
+ }
11
+ export interface WorkflowRun {
12
+ id: string;
13
+ type: string;
14
+ status: 'pending' | 'running' | 'completed' | 'failed' | 'cancelled';
15
+ input: Record<string, any>;
16
+ metadata: Record<string, any>;
17
+ created_at: string;
18
+ updated_at: string;
19
+ agent_id?: string | null;
20
+ }
21
+ export interface WorkflowResult {
22
+ id: string;
23
+ workflow_id: string;
24
+ type: 'interim' | 'final';
25
+ result: Record<string, any>;
26
+ metadata: Record<string, any>;
27
+ created_at: string;
28
+ updated_at: string;
29
+ agent_id?: string | null;
30
+ }
31
+ export interface CreateWorkflowRunInput {
32
+ type: string;
33
+ input: Record<string, any>;
34
+ metadata?: Record<string, any>;
35
+ agent_id?: string;
36
+ }
37
+ export interface UpdateWorkflowStatusInput {
38
+ id: string;
39
+ status: WorkflowRun['status'];
40
+ metadata?: Record<string, any>;
41
+ }
42
+ export interface CreateWorkflowResultInput {
43
+ workflow_id: string;
44
+ type: WorkflowResult['type'];
45
+ result: Record<string, any>;
46
+ metadata?: Record<string, any>;
47
+ agent_id?: string;
48
+ }
49
+ export interface ListWorkflowRunsOptions {
50
+ type?: string;
51
+ status?: WorkflowRun['status'];
52
+ agent_id?: string;
53
+ limit?: number;
54
+ offset?: number;
55
+ order_by?: Array<{
56
+ field: string;
57
+ direction: 'asc' | 'desc';
58
+ }>;
59
+ }
60
+ export declare class WorkflowManager {
61
+ private url;
62
+ private headers;
63
+ constructor(url: string, headers: Record<string, string>);
64
+ /**
65
+ * Execute a GraphQL query against Hasura
66
+ */
67
+ private executeQuery;
68
+ /**
69
+ * List available workflow schemas
70
+ */
71
+ listSchemas(): Promise<WorkflowSchema[]>;
72
+ /**
73
+ * Get a workflow schema by type
74
+ */
75
+ getSchema(type: string): Promise<WorkflowSchema>;
76
+ /**
77
+ * Create a new workflow run
78
+ */
79
+ createRun(input: CreateWorkflowRunInput): Promise<WorkflowRun>;
80
+ /**
81
+ * Update a workflow run's status
82
+ */
83
+ updateStatus(input: UpdateWorkflowStatusInput): Promise<WorkflowRun>;
84
+ /**
85
+ * Create a workflow result
86
+ */
87
+ createResult(input: CreateWorkflowResultInput): Promise<WorkflowResult>;
88
+ /**
89
+ * Get a workflow run by ID
90
+ */
91
+ getRun(id: string): Promise<WorkflowRun>;
92
+ /**
93
+ * Get all results for a workflow run
94
+ */
95
+ getResults(workflowId: string): Promise<WorkflowResult[]>;
96
+ /**
97
+ * Get the final result for a workflow run
98
+ */
99
+ getFinalResult(workflowId: string): Promise<WorkflowResult | null>;
100
+ /**
101
+ * List workflow runs with optional filtering and sorting
102
+ */
103
+ listRuns(options?: ListWorkflowRunsOptions): Promise<WorkflowRun[]>;
104
+ }