griptape-nodes 0.44.0__py3-none-any.whl → 0.45.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. griptape_nodes/__init__.py +5 -1
  2. griptape_nodes/app/api.py +2 -35
  3. griptape_nodes/app/app.py +70 -3
  4. griptape_nodes/app/watch.py +5 -2
  5. griptape_nodes/drivers/storage/base_storage_driver.py +37 -0
  6. griptape_nodes/drivers/storage/griptape_cloud_storage_driver.py +2 -1
  7. griptape_nodes/exe_types/core_types.py +109 -9
  8. griptape_nodes/exe_types/node_types.py +19 -5
  9. griptape_nodes/node_library/workflow_registry.py +29 -0
  10. griptape_nodes/retained_mode/events/app_events.py +3 -2
  11. griptape_nodes/retained_mode/events/base_events.py +9 -0
  12. griptape_nodes/retained_mode/events/sync_events.py +60 -0
  13. griptape_nodes/retained_mode/events/workflow_events.py +231 -0
  14. griptape_nodes/retained_mode/griptape_nodes.py +8 -0
  15. griptape_nodes/retained_mode/managers/library_manager.py +6 -18
  16. griptape_nodes/retained_mode/managers/node_manager.py +2 -2
  17. griptape_nodes/retained_mode/managers/operation_manager.py +7 -0
  18. griptape_nodes/retained_mode/managers/settings.py +5 -0
  19. griptape_nodes/retained_mode/managers/sync_manager.py +498 -0
  20. griptape_nodes/retained_mode/managers/workflow_manager.py +682 -28
  21. griptape_nodes/retained_mode/retained_mode.py +23 -0
  22. griptape_nodes/updater/__init__.py +4 -2
  23. griptape_nodes/utils/uv_utils.py +18 -0
  24. {griptape_nodes-0.44.0.dist-info → griptape_nodes-0.45.0.dist-info}/METADATA +2 -1
  25. {griptape_nodes-0.44.0.dist-info → griptape_nodes-0.45.0.dist-info}/RECORD +27 -24
  26. {griptape_nodes-0.44.0.dist-info → griptape_nodes-0.45.0.dist-info}/WHEEL +0 -0
  27. {griptape_nodes-0.44.0.dist-info → griptape_nodes-0.45.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,498 @@
1
+ from __future__ import annotations
2
+
3
+ import hashlib
4
+ import logging
5
+ import threading
6
+ import uuid
7
+ from collections import defaultdict
8
+ from concurrent.futures import ThreadPoolExecutor
9
+ from pathlib import Path
10
+ from typing import TYPE_CHECKING
11
+
12
+ from watchfiles import Change, PythonFilter, watch
13
+
14
+ from griptape_nodes.drivers.storage.griptape_cloud_storage_driver import GriptapeCloudStorageDriver
15
+ from griptape_nodes.retained_mode.events.app_events import AppInitializationComplete
16
+ from griptape_nodes.retained_mode.events.base_events import AppEvent
17
+ from griptape_nodes.retained_mode.events.sync_events import (
18
+ StartSyncAllCloudWorkflowsRequest,
19
+ StartSyncAllCloudWorkflowsResultFailure,
20
+ StartSyncAllCloudWorkflowsResultSuccess,
21
+ SyncComplete,
22
+ )
23
+ from griptape_nodes.retained_mode.events.workflow_events import (
24
+ RegisterWorkflowsFromConfigRequest,
25
+ RegisterWorkflowsFromConfigResultSuccess,
26
+ )
27
+
28
+ if TYPE_CHECKING:
29
+ from griptape_nodes.retained_mode.events.base_events import ResultPayload
30
+ from griptape_nodes.retained_mode.managers.config_manager import ConfigManager
31
+ from griptape_nodes.retained_mode.managers.event_manager import EventManager
32
+
33
+
34
+ logger = logging.getLogger("griptape_nodes")
35
+
36
+
37
+ class SyncManager:
38
+ """Manager for syncing workflows with cloud storage."""
39
+
40
+ def __init__(self, event_manager: EventManager, config_manager: ConfigManager) -> None:
41
+ self._active_sync_tasks: dict[str, threading.Thread] = {}
42
+ self._watch_task: threading.Thread | None = None
43
+ self._watching_stopped = threading.Event()
44
+ self._config_manager = config_manager
45
+ self._event_manager = event_manager
46
+
47
+ # Hash tracking to prevent sync loops
48
+ self._file_hashes = defaultdict(str)
49
+ self._hash_lock = threading.Lock()
50
+
51
+ # Initialize sync directory
52
+ self._sync_dir = self._config_manager.workspace_path / self._config_manager.get_config_value(
53
+ "synced_workflows_directory"
54
+ )
55
+ self._sync_dir.mkdir(parents=True, exist_ok=True)
56
+
57
+ event_manager.assign_manager_to_request_type(
58
+ StartSyncAllCloudWorkflowsRequest,
59
+ self.on_start_sync_all_cloud_workflows_request,
60
+ )
61
+
62
+ event_manager.add_listener_to_app_event(
63
+ AppInitializationComplete,
64
+ self.on_app_initialization_complete,
65
+ )
66
+
67
+ def _set_expected_hash(self, path: str | Path, content: bytes) -> None:
68
+ """Set the expected SHA256 hash for a file we're about to write.
69
+
70
+ This should be called before writing a file to establish what content
71
+ we expect to see, allowing us to distinguish our own writes from
72
+ external modifications.
73
+
74
+ Args:
75
+ path: Path to the file that will be written
76
+ content: The exact bytes that will be written to the file
77
+ """
78
+ path_str = str(Path(path).resolve())
79
+ file_hash = hashlib.sha256(content).hexdigest()
80
+ with self._hash_lock:
81
+ self._file_hashes[path_str] = file_hash
82
+ logger.debug("Set expected hash for %s: %s", path_str, file_hash[:8])
83
+
84
+ def _is_expected_content(self, path: str) -> bool:
85
+ """Check if file content matches what we wrote (indicating a self-triggered event).
86
+
87
+ This prevents sync loops by identifying when a file system event was caused
88
+ by our own write operation rather than an external modification.
89
+
90
+ Args:
91
+ path: Path to the file to check
92
+
93
+ Returns:
94
+ True if the file content matches our expected hash (self-triggered event),
95
+ False if it doesn't match or no expected hash exists (external change)
96
+ """
97
+ path_str = str(Path(path).resolve())
98
+ with self._hash_lock:
99
+ expected_hash = self._file_hashes.get(path_str)
100
+
101
+ if not expected_hash:
102
+ # No expected hash means this wasn't a self-triggered event
103
+ return False
104
+
105
+ try:
106
+ actual_hash = hashlib.sha256(Path(path).read_bytes()).hexdigest()
107
+
108
+ if actual_hash == expected_hash:
109
+ # Content matches - this was our own write, clean up and ignore
110
+ with self._hash_lock:
111
+ self._file_hashes.pop(path_str, None)
112
+ logger.debug("File content matches expected hash for %s", path_str)
113
+ return True
114
+ # Content doesn't match - this is an external modification
115
+ logger.debug("File content does not match expected hash for %s", path_str)
116
+ except Exception as e:
117
+ logger.debug("Error checking file hash for %s: %s", path_str, str(e))
118
+
119
+ return False
120
+
121
+ def on_start_sync_all_cloud_workflows_request(self, _request: StartSyncAllCloudWorkflowsRequest) -> ResultPayload:
122
+ """Start syncing all cloud workflows to local synced_workflows directory."""
123
+ try:
124
+ storage_driver = self._get_cloud_storage_driver()
125
+ sync_dir = self._sync_dir
126
+
127
+ # List all assets in the bucket to get count
128
+ files = storage_driver.list_files()
129
+ workflow_files = [file for file in files if file.endswith(".py")]
130
+
131
+ if not workflow_files:
132
+ logger.info("No workflow files found in cloud storage")
133
+ return StartSyncAllCloudWorkflowsResultSuccess(sync_directory=str(sync_dir), total_workflows=0)
134
+
135
+ # Start background sync with unique ID
136
+ sync_task_id = str(uuid.uuid4())
137
+ sync_thread = threading.Thread(
138
+ target=self._sync_workflows_background,
139
+ args=(sync_task_id, workflow_files, storage_driver, sync_dir),
140
+ name=f"SyncWorkflows-{sync_task_id}",
141
+ daemon=True,
142
+ )
143
+
144
+ self._active_sync_tasks[sync_task_id] = sync_thread
145
+ sync_thread.start()
146
+ except Exception as e:
147
+ logger.error("Failed to start cloud workflow sync: %s", str(e))
148
+ return StartSyncAllCloudWorkflowsResultFailure()
149
+ else:
150
+ logger.info("Started background sync for %d workflow files", len(workflow_files))
151
+ return StartSyncAllCloudWorkflowsResultSuccess(
152
+ sync_directory=str(sync_dir), total_workflows=len(workflow_files)
153
+ )
154
+
155
+ def on_app_initialization_complete(self, _payload: AppInitializationComplete) -> None:
156
+ """Automatically start syncing cloud workflows when the app initializes."""
157
+ try:
158
+ # Check if cloud storage is configured before attempting sync
159
+ self._get_cloud_storage_driver()
160
+ # Start file watching after successful sync
161
+ self._start_file_watching()
162
+
163
+ logger.info("App initialization complete - starting automatic cloud workflow sync")
164
+
165
+ # Create and handle the sync request
166
+ sync_request = StartSyncAllCloudWorkflowsRequest()
167
+
168
+ # Use handle_request to process through normal event system
169
+ from griptape_nodes.retained_mode.griptape_nodes import GriptapeNodes
170
+
171
+ result = GriptapeNodes.handle_request(sync_request)
172
+
173
+ if isinstance(result, StartSyncAllCloudWorkflowsResultSuccess):
174
+ logger.info(
175
+ "Automatic cloud workflow sync started successfully - %d workflows will be synced to %s",
176
+ result.total_workflows,
177
+ result.sync_directory,
178
+ )
179
+
180
+ else:
181
+ logger.debug("Automatic cloud workflow sync failed to start (likely cloud not configured)")
182
+
183
+ except Exception as e:
184
+ logger.debug("Automatic cloud workflow sync skipped: %s", str(e))
185
+
186
+ def _get_cloud_storage_driver(self) -> GriptapeCloudStorageDriver:
187
+ """Get configured cloud storage driver.
188
+
189
+ Returns:
190
+ Configured GriptapeCloudStorageDriver instance.
191
+
192
+ Raises:
193
+ RuntimeError: If required cloud configuration is missing.
194
+ """
195
+ from griptape_nodes.retained_mode.griptape_nodes import GriptapeNodes
196
+
197
+ secrets_manager = GriptapeNodes.SecretsManager()
198
+
199
+ # Get cloud storage configuration from secrets
200
+ bucket_id = secrets_manager.get_secret("GT_CLOUD_BUCKET_ID", should_error_on_not_found=False)
201
+ base_url = secrets_manager.get_secret("GT_CLOUD_BASE_URL", should_error_on_not_found=False)
202
+ api_key = secrets_manager.get_secret("GT_CLOUD_API_KEY")
203
+
204
+ if not bucket_id:
205
+ msg = "Cloud storage bucket_id not configured. Set GT_CLOUD_BUCKET_ID secret."
206
+ raise RuntimeError(msg)
207
+ if not api_key:
208
+ msg = "Cloud storage api_key not configured. Set GT_CLOUD_API_KEY secret."
209
+ raise RuntimeError(msg)
210
+
211
+ return GriptapeCloudStorageDriver(
212
+ bucket_id=bucket_id,
213
+ base_url=base_url,
214
+ api_key=api_key,
215
+ static_files_directory=self._config_manager.get_config_value(
216
+ "synced_workflows_directory", default="synced_workflows"
217
+ ),
218
+ )
219
+
220
+ def _download_cloud_workflow_to_sync_dir(self, filename: str) -> bool:
221
+ """Download a workflow file from cloud storage to the sync directory.
222
+
223
+ Args:
224
+ filename: Name of the workflow file to download from cloud
225
+
226
+ Returns:
227
+ True if download was successful, False otherwise
228
+ """
229
+ try:
230
+ storage_driver = self._get_cloud_storage_driver()
231
+ sync_dir = self._sync_dir
232
+
233
+ # Download file content from cloud
234
+ file_content = storage_driver.download_file(filename)
235
+
236
+ # Write to local sync directory
237
+ local_file_path = sync_dir / filename
238
+
239
+ # Check if file exists and has same content hash
240
+ if local_file_path.exists():
241
+ try:
242
+ existing_content = local_file_path.read_bytes()
243
+
244
+ existing_hash = hashlib.sha256(existing_content).hexdigest()
245
+ new_hash = hashlib.sha256(file_content).hexdigest()
246
+
247
+ if existing_hash == new_hash:
248
+ logger.debug("Skipping write - file already has same content hash: %s", filename)
249
+ return True
250
+ except Exception as e:
251
+ logger.debug("Error checking existing file hash for %s: %s", filename, str(e))
252
+
253
+ # Set expected hash before writing to prevent sync loops
254
+ self._set_expected_hash(local_file_path, file_content)
255
+
256
+ local_file_path.write_bytes(file_content)
257
+
258
+ logger.info("Successfully downloaded cloud workflow to sync directory: %s", filename)
259
+ except Exception as e:
260
+ logger.error("Failed to download cloud workflow '%s': %s", filename, str(e))
261
+ return False
262
+ else:
263
+ return True
264
+
265
+ def _upload_workflow_file(self, file_path: Path) -> None:
266
+ """Upload a single workflow file to cloud storage.
267
+
268
+ Args:
269
+ file_path: Path to the workflow file to upload.
270
+ """
271
+ try:
272
+ # Check if valid workflow file
273
+ if not file_path.name.endswith(".py"):
274
+ logger.error("Invalid workflow file path: %s", file_path)
275
+ return
276
+
277
+ # Proceed with upload
278
+ storage_driver = self._get_cloud_storage_driver()
279
+
280
+ # Read file content
281
+ file_content = file_path.read_bytes()
282
+
283
+ # Upload to cloud storage using the upload_file method
284
+ filename = file_path.name
285
+ storage_driver.upload_file(filename, file_content)
286
+
287
+ logger.info("Successfully uploaded workflow file to cloud: %s", filename)
288
+
289
+ except Exception as e:
290
+ logger.error("Failed to upload workflow file '%s': %s", file_path.name, str(e))
291
+
292
+ def _delete_workflow_file(self, file_path: Path) -> None:
293
+ """Delete a workflow file from cloud storage.
294
+
295
+ Args:
296
+ file_path: Path to the workflow file that was deleted locally.
297
+ """
298
+ try:
299
+ storage_driver = self._get_cloud_storage_driver()
300
+ filename = file_path.name
301
+
302
+ # Use the storage driver's delete method
303
+ storage_driver.delete_file(filename)
304
+ logger.info("Successfully deleted workflow file from cloud: %s", filename)
305
+
306
+ except Exception as e:
307
+ logger.error("Failed to delete workflow file '%s' from cloud: %s", file_path.name, str(e))
308
+
309
+ def _start_file_watching(self) -> None:
310
+ """Start watching the synced_workflows directory for changes."""
311
+ try:
312
+ sync_dir = self._sync_dir
313
+
314
+ # Stop any existing watching
315
+ if self._watch_task and self._watch_task.is_alive():
316
+ self._watching_stopped.set()
317
+ self._watch_task.join(timeout=2.0)
318
+
319
+ # Reset the stop event for new watching
320
+ self._watching_stopped.clear()
321
+
322
+ # Start new watching thread
323
+ self._watch_task = threading.Thread(
324
+ target=self._watch_files_thread,
325
+ args=(str(sync_dir),),
326
+ name="WatchFiles-SyncManager",
327
+ daemon=True,
328
+ )
329
+ self._watch_task.start()
330
+
331
+ logger.info("Started watching synced workflows directory: %s", sync_dir)
332
+
333
+ except Exception as e:
334
+ logger.error("Failed to start file watching: %s", str(e))
335
+
336
+ def _watch_files_thread(self, sync_dir: str) -> None:
337
+ """Background thread that watches for file changes using watchfiles."""
338
+ try:
339
+ logger.debug("File watching thread started for directory: %s", sync_dir)
340
+
341
+ # Watch for changes in the sync directory using PythonFilter
342
+ for changes in watch(sync_dir, watch_filter=PythonFilter(), stop_event=self._watching_stopped):
343
+ if self._watching_stopped.is_set():
344
+ break
345
+
346
+ for change, path_str in changes:
347
+ path = Path(path_str)
348
+
349
+ # Check if this was a self-triggered event for add/modify
350
+ if change in (Change.added, Change.modified) and self._is_expected_content(path_str):
351
+ logger.debug("Ignoring self-triggered %s event for: %s", change.name, path_str)
352
+ continue
353
+
354
+ # Handle the file change
355
+ self._handle_file_change(change, path, path_str)
356
+
357
+ except Exception as e:
358
+ if not self._watching_stopped.is_set():
359
+ logger.error("Error in file watching thread: %s", str(e))
360
+ finally:
361
+ logger.debug("File watching thread stopped")
362
+
363
+ def _handle_file_change(self, change: Change, path: Path, path_str: str) -> None:
364
+ """Handle a file system change event."""
365
+ if change == Change.added:
366
+ logger.info("Detected external creation of workflow file: %s", path_str)
367
+ self._upload_workflow_file(path)
368
+ elif change == Change.modified:
369
+ logger.info("Detected external modification of workflow file: %s", path_str)
370
+ self._upload_workflow_file(path)
371
+ elif change == Change.deleted:
372
+ logger.info("Detected deletion of workflow file: %s", path_str)
373
+ self._delete_workflow_file(path)
374
+
375
+ def _download_single_workflow(
376
+ self, file_name: str, storage_driver: GriptapeCloudStorageDriver, sync_dir: Path
377
+ ) -> tuple[str, bool, str | None]:
378
+ """Download a single workflow file.
379
+
380
+ Args:
381
+ file_name: Name of the workflow file to download from cloud
382
+ storage_driver: Griptape Cloud storage driver instance for downloading
383
+ sync_dir: Local directory path where the workflow file will be saved
384
+
385
+ Returns:
386
+ (filename, success, error_message)
387
+ """
388
+ try:
389
+ # Download file content
390
+ file_content = storage_driver.download_file(file_name)
391
+
392
+ # Extract just the filename (remove any directory prefixes)
393
+ local_filename = Path(file_name).name
394
+ local_file_path = sync_dir / local_filename
395
+
396
+ # Check if file exists and has same content hash
397
+ should_write = True
398
+ if local_file_path.exists():
399
+ try:
400
+ existing_content = local_file_path.read_bytes()
401
+
402
+ existing_hash = hashlib.sha256(existing_content).hexdigest()
403
+ new_hash = hashlib.sha256(file_content).hexdigest()
404
+
405
+ if existing_hash == new_hash:
406
+ logger.debug("Skipping write - file already has same content hash: %s", local_filename)
407
+ should_write = False
408
+ except Exception as e:
409
+ logger.debug("Error checking existing file hash for %s: %s", local_filename, str(e))
410
+
411
+ if should_write:
412
+ # Set expected hash before writing to prevent sync loops
413
+ self._set_expected_hash(local_file_path, file_content)
414
+
415
+ # Write to local file
416
+ local_file_path.write_bytes(file_content)
417
+
418
+ except Exception as e:
419
+ error_msg = str(e)
420
+ logger.warning("Failed to sync workflow '%s': %s", file_name, error_msg)
421
+ return file_name, False, error_msg
422
+ else:
423
+ logger.debug("Successfully synced workflow: %s", local_filename)
424
+ return local_filename, True, None
425
+
426
+ def _sync_workflows_background(
427
+ self, sync_id: str, workflow_files: list[str], storage_driver: GriptapeCloudStorageDriver, sync_dir: Path
428
+ ) -> None:
429
+ """Background thread function to sync workflows."""
430
+ from griptape_nodes.app.app import event_queue
431
+
432
+ synced_workflows = []
433
+ failed_downloads = []
434
+ total_workflows = len(workflow_files)
435
+
436
+ logger.info("Starting background sync of %d workflows (sync_id: %s)", total_workflows, sync_id)
437
+
438
+ # Use thread pool for concurrent downloads
439
+ with ThreadPoolExecutor() as executor:
440
+ # Submit all download tasks
441
+ future_to_filename = {
442
+ executor.submit(self._download_single_workflow, filename, storage_driver, sync_dir): filename
443
+ for filename in workflow_files
444
+ }
445
+
446
+ # Collect results as they complete
447
+ for future in future_to_filename:
448
+ filename, success, error = future.result()
449
+ if success:
450
+ synced_workflows.append(filename)
451
+ else:
452
+ failed_downloads.append(filename)
453
+
454
+ if failed_downloads:
455
+ logger.warning("Failed to sync %d workflows: %s", len(failed_downloads), failed_downloads)
456
+
457
+ logger.info(
458
+ "Background sync completed: %d of %d workflows synced to %s (sync_id: %s)",
459
+ len(synced_workflows),
460
+ len(workflow_files),
461
+ sync_dir,
462
+ sync_id,
463
+ )
464
+
465
+ # Emit sync complete event
466
+ sync_complete_event = SyncComplete(
467
+ sync_directory=str(sync_dir),
468
+ synced_workflows=synced_workflows,
469
+ failed_workflows=failed_downloads,
470
+ total_workflows=total_workflows,
471
+ )
472
+ event_queue.put(AppEvent(payload=sync_complete_event))
473
+
474
+ # Register workflows from the synced directory
475
+ if synced_workflows:
476
+ logger.info("Registering %d synced workflows from configuration", len(synced_workflows))
477
+ try:
478
+ from griptape_nodes.retained_mode.griptape_nodes import GriptapeNodes
479
+
480
+ register_request = RegisterWorkflowsFromConfigRequest(
481
+ config_section="app_events.on_app_initialization_complete.workflows_to_register"
482
+ )
483
+ register_result = GriptapeNodes.handle_request(register_request)
484
+
485
+ if isinstance(register_result, RegisterWorkflowsFromConfigResultSuccess):
486
+ logger.info(
487
+ "Successfully registered %d workflows after sync completion: %s",
488
+ len(register_result.succeeded_workflows),
489
+ register_result.succeeded_workflows,
490
+ )
491
+ else:
492
+ logger.warning("Failed to register workflows after sync completion")
493
+ except Exception as e:
494
+ logger.error("Error registering workflows after sync: %s", str(e))
495
+
496
+ # Clean up task tracking
497
+ if sync_id in self._active_sync_tasks:
498
+ del self._active_sync_tasks[sync_id]