ralphx 0.3.4__py3-none-any.whl → 0.3.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ralphx/__init__.py +1 -1
- ralphx/api/routes/auth.py +703 -94
- ralphx/api/routes/config.py +3 -56
- ralphx/api/routes/export_import.py +6 -9
- ralphx/api/routes/loops.py +4 -4
- ralphx/api/routes/planning.py +19 -5
- ralphx/api/routes/templates.py +2 -2
- ralphx/api/routes/workflows.py +1 -22
- ralphx/cli.py +4 -1
- ralphx/core/auth.py +346 -171
- ralphx/core/database.py +588 -164
- ralphx/core/executor.py +0 -3
- ralphx/core/loop.py +15 -2
- ralphx/core/loop_templates.py +3 -3
- ralphx/core/planning_service.py +109 -21
- ralphx/core/preview.py +9 -25
- ralphx/core/project_db.py +124 -72
- ralphx/core/project_export.py +1 -5
- ralphx/core/project_import.py +14 -29
- ralphx/core/sample_project.py +1 -5
- ralphx/core/templates.py +9 -9
- ralphx/core/workflow_export.py +4 -7
- ralphx/core/workflow_import.py +3 -27
- ralphx/mcp/__init__.py +6 -2
- ralphx/mcp/registry.py +3 -3
- ralphx/mcp/tools/workflows.py +114 -32
- ralphx/mcp_server.py +6 -2
- ralphx/static/assets/index-0ovNnfOq.css +1 -0
- ralphx/static/assets/index-CY9s08ZB.js +251 -0
- ralphx/static/assets/index-CY9s08ZB.js.map +1 -0
- ralphx/static/index.html +2 -2
- {ralphx-0.3.4.dist-info → ralphx-0.3.5.dist-info}/METADATA +33 -12
- {ralphx-0.3.4.dist-info → ralphx-0.3.5.dist-info}/RECORD +35 -35
- ralphx/static/assets/index-CcRDyY3b.css +0 -1
- ralphx/static/assets/index-CcxfTosc.js +0 -251
- ralphx/static/assets/index-CcxfTosc.js.map +0 -1
- {ralphx-0.3.4.dist-info → ralphx-0.3.5.dist-info}/WHEEL +0 -0
- {ralphx-0.3.4.dist-info → ralphx-0.3.5.dist-info}/entry_points.txt +0 -0
ralphx/core/project_db.py
CHANGED
|
@@ -14,7 +14,8 @@ This makes projects portable - clone a repo with .ralphx/ and all data comes wit
|
|
|
14
14
|
"""
|
|
15
15
|
|
|
16
16
|
import json
|
|
17
|
-
import
|
|
17
|
+
import logging
|
|
18
|
+
import shutil
|
|
18
19
|
import sqlite3
|
|
19
20
|
import threading
|
|
20
21
|
from contextlib import contextmanager
|
|
@@ -22,30 +23,11 @@ from datetime import datetime
|
|
|
22
23
|
from pathlib import Path
|
|
23
24
|
from typing import Any, Iterator, Optional
|
|
24
25
|
|
|
26
|
+
logger = logging.getLogger(__name__)
|
|
25
27
|
|
|
26
|
-
# Schema version for project DB
|
|
27
|
-
PROJECT_SCHEMA_VERSION = 15
|
|
28
|
-
|
|
29
|
-
# Namespace validation pattern: lowercase, alphanumeric, underscores, dashes, max 64 chars
|
|
30
|
-
# Must start with a letter
|
|
31
|
-
NAMESPACE_PATTERN = re.compile(r'^[a-z][a-z0-9_-]{0,63}$')
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
def validate_namespace(namespace: str) -> bool:
|
|
35
|
-
"""Validate namespace format.
|
|
36
|
-
|
|
37
|
-
Namespaces must:
|
|
38
|
-
- Start with a lowercase letter
|
|
39
|
-
- Contain only lowercase letters, digits, underscores, and dashes
|
|
40
|
-
- Be 1-64 characters long
|
|
41
|
-
|
|
42
|
-
Args:
|
|
43
|
-
namespace: The namespace string to validate.
|
|
44
28
|
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
"""
|
|
48
|
-
return bool(NAMESPACE_PATTERN.match(namespace))
|
|
29
|
+
# Schema version for project DB
|
|
30
|
+
PROJECT_SCHEMA_VERSION = 16
|
|
49
31
|
|
|
50
32
|
# Project database schema - all project-specific data
|
|
51
33
|
PROJECT_SCHEMA_SQL = """
|
|
@@ -297,7 +279,6 @@ CREATE TABLE IF NOT EXISTS workflows (
|
|
|
297
279
|
id TEXT PRIMARY KEY,
|
|
298
280
|
template_id TEXT, -- Optional reference to template
|
|
299
281
|
name TEXT NOT NULL,
|
|
300
|
-
namespace TEXT NOT NULL, -- Workflow identifier
|
|
301
282
|
status TEXT DEFAULT 'draft', -- draft, active, paused, completed
|
|
302
283
|
current_step INTEGER DEFAULT 1,
|
|
303
284
|
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
@@ -467,7 +448,6 @@ CREATE INDEX IF NOT EXISTS idx_loop_resources_type ON loop_resources(resource_ty
|
|
|
467
448
|
|
|
468
449
|
-- Workflow indexes
|
|
469
450
|
CREATE INDEX IF NOT EXISTS idx_workflows_status ON workflows(status);
|
|
470
|
-
CREATE INDEX IF NOT EXISTS idx_workflows_namespace ON workflows(namespace);
|
|
471
451
|
CREATE INDEX IF NOT EXISTS idx_workflow_steps_workflow ON workflow_steps(workflow_id, step_number);
|
|
472
452
|
CREATE INDEX IF NOT EXISTS idx_workflow_steps_status ON workflow_steps(status);
|
|
473
453
|
CREATE INDEX IF NOT EXISTS idx_planning_sessions_workflow ON planning_sessions(workflow_id);
|
|
@@ -598,9 +578,29 @@ class ProjectDatabase:
|
|
|
598
578
|
(PROJECT_SCHEMA_VERSION,),
|
|
599
579
|
)
|
|
600
580
|
elif current_version < PROJECT_SCHEMA_VERSION:
|
|
581
|
+
# Create backup before running migrations
|
|
582
|
+
self._backup_before_migration(current_version)
|
|
601
583
|
# Run migrations (for future versions > 6)
|
|
602
584
|
self._run_migrations(conn, current_version)
|
|
603
585
|
|
|
586
|
+
def _backup_before_migration(self, from_version: int) -> None:
|
|
587
|
+
"""Create a backup of the database before running migrations.
|
|
588
|
+
|
|
589
|
+
Creates a timestamped backup file in the same directory as the database.
|
|
590
|
+
This allows recovery if a migration fails or causes data loss.
|
|
591
|
+
|
|
592
|
+
Args:
|
|
593
|
+
from_version: Current schema version before migration.
|
|
594
|
+
"""
|
|
595
|
+
try:
|
|
596
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
597
|
+
backup_path = self.db_path.with_suffix(f".v{from_version}.{timestamp}.bak")
|
|
598
|
+
shutil.copy2(self.db_path, backup_path)
|
|
599
|
+
logger.info(f"Created database backup before migration: {backup_path}")
|
|
600
|
+
except Exception as e:
|
|
601
|
+
logger.warning(f"Failed to create backup before migration: {e}")
|
|
602
|
+
# Don't fail the migration if backup fails - just warn
|
|
603
|
+
|
|
604
604
|
def _run_migrations(self, conn: sqlite3.Connection, from_version: int) -> None:
|
|
605
605
|
"""Run schema migrations from a version to the latest.
|
|
606
606
|
|
|
@@ -658,6 +658,11 @@ class ProjectDatabase:
|
|
|
658
658
|
# Migration from v14 to v15: Add workflow_resource_versions table
|
|
659
659
|
if from_version == 14:
|
|
660
660
|
self._migrate_v14_to_v15(conn)
|
|
661
|
+
from_version = 15 # Continue to next migration
|
|
662
|
+
|
|
663
|
+
# Migration from v15 to v16: Remove namespace from workflows table
|
|
664
|
+
if from_version == 15:
|
|
665
|
+
self._migrate_v15_to_v16(conn)
|
|
661
666
|
|
|
662
667
|
# Seed workflow templates for fresh databases
|
|
663
668
|
self._seed_workflow_templates(conn)
|
|
@@ -960,6 +965,55 @@ class ProjectDatabase:
|
|
|
960
965
|
ON workflow_resource_versions(workflow_resource_id, version_number DESC)
|
|
961
966
|
""")
|
|
962
967
|
|
|
968
|
+
def _migrate_v15_to_v16(self, conn: sqlite3.Connection) -> None:
|
|
969
|
+
"""Migrate from schema v15 to v16.
|
|
970
|
+
|
|
971
|
+
Removes:
|
|
972
|
+
- namespace column from workflows table (deprecated, replaced by workflow_id)
|
|
973
|
+
- idx_workflows_namespace index
|
|
974
|
+
|
|
975
|
+
SQLite doesn't support DROP COLUMN directly, so we recreate the table.
|
|
976
|
+
|
|
977
|
+
IMPORTANT: We must disable foreign keys before dropping the old table,
|
|
978
|
+
otherwise the ON DELETE CASCADE on workflow_steps will delete all steps!
|
|
979
|
+
"""
|
|
980
|
+
# 0. Disable foreign keys to prevent CASCADE deletes during table swap
|
|
981
|
+
conn.execute("PRAGMA foreign_keys=OFF")
|
|
982
|
+
|
|
983
|
+
# 1. Create new table without namespace
|
|
984
|
+
conn.execute("""
|
|
985
|
+
CREATE TABLE workflows_new (
|
|
986
|
+
id TEXT PRIMARY KEY,
|
|
987
|
+
template_id TEXT,
|
|
988
|
+
name TEXT NOT NULL,
|
|
989
|
+
status TEXT DEFAULT 'draft',
|
|
990
|
+
current_step INTEGER DEFAULT 1,
|
|
991
|
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
992
|
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
993
|
+
archived_at TIMESTAMP
|
|
994
|
+
)
|
|
995
|
+
""")
|
|
996
|
+
|
|
997
|
+
# 2. Copy data (excluding namespace)
|
|
998
|
+
conn.execute("""
|
|
999
|
+
INSERT INTO workflows_new (id, template_id, name, status, current_step, created_at, updated_at, archived_at)
|
|
1000
|
+
SELECT id, template_id, name, status, current_step, created_at, updated_at, archived_at
|
|
1001
|
+
FROM workflows
|
|
1002
|
+
""")
|
|
1003
|
+
|
|
1004
|
+
# 3. Drop old table and index
|
|
1005
|
+
conn.execute("DROP INDEX IF EXISTS idx_workflows_namespace")
|
|
1006
|
+
conn.execute("DROP TABLE workflows")
|
|
1007
|
+
|
|
1008
|
+
# 4. Rename new table
|
|
1009
|
+
conn.execute("ALTER TABLE workflows_new RENAME TO workflows")
|
|
1010
|
+
|
|
1011
|
+
# 5. Recreate the status index on the new table
|
|
1012
|
+
conn.execute("CREATE INDEX IF NOT EXISTS idx_workflows_status ON workflows(status)")
|
|
1013
|
+
|
|
1014
|
+
# 6. Re-enable foreign keys
|
|
1015
|
+
conn.execute("PRAGMA foreign_keys=ON")
|
|
1016
|
+
|
|
963
1017
|
# ========== Loops ==========
|
|
964
1018
|
|
|
965
1019
|
def create_loop(
|
|
@@ -1808,9 +1862,8 @@ class ProjectDatabase:
|
|
|
1808
1862
|
"""Release all claims held by a specific loop.
|
|
1809
1863
|
|
|
1810
1864
|
Used when deleting a loop to prevent orphaned claims.
|
|
1811
|
-
|
|
1812
|
-
|
|
1813
|
-
Items without a namespace are restored to 'pending'.
|
|
1865
|
+
Released items are restored to 'pending' status so they can be
|
|
1866
|
+
picked up by other loops.
|
|
1814
1867
|
|
|
1815
1868
|
Args:
|
|
1816
1869
|
loop_name: Name of the loop whose claims should be released.
|
|
@@ -1826,7 +1879,7 @@ class ProjectDatabase:
|
|
|
1826
1879
|
UPDATE work_items
|
|
1827
1880
|
SET claimed_by = NULL,
|
|
1828
1881
|
claimed_at = NULL,
|
|
1829
|
-
status =
|
|
1882
|
+
status = 'pending',
|
|
1830
1883
|
updated_at = ?
|
|
1831
1884
|
WHERE claimed_by = ? AND status = 'claimed'
|
|
1832
1885
|
""",
|
|
@@ -1840,9 +1893,8 @@ class ProjectDatabase:
|
|
|
1840
1893
|
This is an atomic operation that checks ownership and releases in one step
|
|
1841
1894
|
to prevent TOCTOU race conditions.
|
|
1842
1895
|
|
|
1843
|
-
|
|
1844
|
-
|
|
1845
|
-
Items without a namespace are restored to 'pending'.
|
|
1896
|
+
Released items are restored to 'pending' status so they can be
|
|
1897
|
+
picked up by other loops.
|
|
1846
1898
|
|
|
1847
1899
|
Args:
|
|
1848
1900
|
id: Work item ID.
|
|
@@ -1858,7 +1910,7 @@ class ProjectDatabase:
|
|
|
1858
1910
|
UPDATE work_items
|
|
1859
1911
|
SET claimed_by = NULL,
|
|
1860
1912
|
claimed_at = NULL,
|
|
1861
|
-
status =
|
|
1913
|
+
status = 'pending',
|
|
1862
1914
|
updated_at = ?
|
|
1863
1915
|
WHERE id = ? AND claimed_by = ? AND status = 'claimed'
|
|
1864
1916
|
""",
|
|
@@ -3093,11 +3145,12 @@ class ProjectDatabase:
|
|
|
3093
3145
|
now = datetime.utcnow().isoformat()
|
|
3094
3146
|
|
|
3095
3147
|
# Build Product workflow template
|
|
3148
|
+
# Uses processing_type to reference PROCESSING_TYPES in mcp/tools/workflows.py
|
|
3096
3149
|
build_product_phases = json.dumps([
|
|
3097
3150
|
{
|
|
3098
3151
|
"number": 1,
|
|
3099
|
-
"name": "
|
|
3100
|
-
"
|
|
3152
|
+
"name": "Design Document",
|
|
3153
|
+
"processing_type": "design_doc",
|
|
3101
3154
|
"description": "Describe what you want to build. Claude will help create a design document.",
|
|
3102
3155
|
"outputs": ["design_doc", "guardrails"],
|
|
3103
3156
|
"skippable": True,
|
|
@@ -3105,10 +3158,9 @@ class ProjectDatabase:
|
|
|
3105
3158
|
},
|
|
3106
3159
|
{
|
|
3107
3160
|
"number": 2,
|
|
3108
|
-
"name": "Story Generation",
|
|
3109
|
-
"
|
|
3110
|
-
"
|
|
3111
|
-
"description": "Claude generates detailed user stories from the design document.",
|
|
3161
|
+
"name": "Story Generation (Extract)",
|
|
3162
|
+
"processing_type": "extractgen_requirements",
|
|
3163
|
+
"description": "Claude extracts user stories from the design document.",
|
|
3112
3164
|
"inputs": ["design_doc", "guardrails"],
|
|
3113
3165
|
"outputs": ["stories"],
|
|
3114
3166
|
"skippable": True,
|
|
@@ -3116,9 +3168,18 @@ class ProjectDatabase:
|
|
|
3116
3168
|
},
|
|
3117
3169
|
{
|
|
3118
3170
|
"number": 3,
|
|
3171
|
+
"name": "Story Generation (Web)",
|
|
3172
|
+
"processing_type": "webgen_requirements",
|
|
3173
|
+
"description": "Claude discovers additional requirements via web research.",
|
|
3174
|
+
"inputs": ["design_doc", "guardrails", "stories"],
|
|
3175
|
+
"outputs": ["stories"],
|
|
3176
|
+
"skippable": True,
|
|
3177
|
+
"skipCondition": "Skip web research"
|
|
3178
|
+
},
|
|
3179
|
+
{
|
|
3180
|
+
"number": 4,
|
|
3119
3181
|
"name": "Implementation",
|
|
3120
|
-
"
|
|
3121
|
-
"loopType": "consumer",
|
|
3182
|
+
"processing_type": "implementation",
|
|
3122
3183
|
"description": "Claude implements each story, committing code to git.",
|
|
3123
3184
|
"inputs": ["stories", "design_doc", "guardrails"],
|
|
3124
3185
|
"outputs": ["code"],
|
|
@@ -3138,23 +3199,31 @@ class ProjectDatabase:
|
|
|
3138
3199
|
),
|
|
3139
3200
|
)
|
|
3140
3201
|
|
|
3141
|
-
# From Design Doc workflow - skips
|
|
3202
|
+
# From Design Doc workflow - skips design doc, starts with story generation
|
|
3142
3203
|
from_design_doc_phases = json.dumps([
|
|
3143
3204
|
{
|
|
3144
3205
|
"number": 1,
|
|
3145
|
-
"name": "Story Generation",
|
|
3146
|
-
"
|
|
3147
|
-
"
|
|
3148
|
-
"description": "Claude generates detailed user stories from your design document.",
|
|
3206
|
+
"name": "Story Generation (Extract)",
|
|
3207
|
+
"processing_type": "extractgen_requirements",
|
|
3208
|
+
"description": "Claude extracts user stories from your design document.",
|
|
3149
3209
|
"inputs": ["design_doc"],
|
|
3150
3210
|
"outputs": ["stories"],
|
|
3151
3211
|
"skippable": False
|
|
3152
3212
|
},
|
|
3153
3213
|
{
|
|
3154
3214
|
"number": 2,
|
|
3215
|
+
"name": "Story Generation (Web)",
|
|
3216
|
+
"processing_type": "webgen_requirements",
|
|
3217
|
+
"description": "Claude discovers additional requirements via web research.",
|
|
3218
|
+
"inputs": ["design_doc", "stories"],
|
|
3219
|
+
"outputs": ["stories"],
|
|
3220
|
+
"skippable": True,
|
|
3221
|
+
"skipCondition": "Skip web research"
|
|
3222
|
+
},
|
|
3223
|
+
{
|
|
3224
|
+
"number": 3,
|
|
3155
3225
|
"name": "Implementation",
|
|
3156
|
-
"
|
|
3157
|
-
"loopType": "consumer",
|
|
3226
|
+
"processing_type": "implementation",
|
|
3158
3227
|
"description": "Claude implements each story, committing code to git.",
|
|
3159
3228
|
"inputs": ["stories", "design_doc"],
|
|
3160
3229
|
"outputs": ["code"],
|
|
@@ -3179,8 +3248,7 @@ class ProjectDatabase:
|
|
|
3179
3248
|
{
|
|
3180
3249
|
"number": 1,
|
|
3181
3250
|
"name": "Implementation",
|
|
3182
|
-
"
|
|
3183
|
-
"loopType": "consumer",
|
|
3251
|
+
"processing_type": "implementation",
|
|
3184
3252
|
"description": "Claude implements each story, committing code to git.",
|
|
3185
3253
|
"inputs": ["stories"],
|
|
3186
3254
|
"outputs": ["code"],
|
|
@@ -3200,12 +3268,12 @@ class ProjectDatabase:
|
|
|
3200
3268
|
),
|
|
3201
3269
|
)
|
|
3202
3270
|
|
|
3203
|
-
#
|
|
3271
|
+
# Design Doc Only workflow - just the interactive design doc step
|
|
3204
3272
|
planning_only_phases = json.dumps([
|
|
3205
3273
|
{
|
|
3206
3274
|
"number": 1,
|
|
3207
|
-
"name": "
|
|
3208
|
-
"
|
|
3275
|
+
"name": "Design Document",
|
|
3276
|
+
"processing_type": "design_doc",
|
|
3209
3277
|
"description": "Collaborate with Claude to create a comprehensive design document.",
|
|
3210
3278
|
"outputs": ["design_doc", "guardrails"],
|
|
3211
3279
|
"skippable": False
|
|
@@ -3273,7 +3341,6 @@ class ProjectDatabase:
|
|
|
3273
3341
|
self,
|
|
3274
3342
|
id: str,
|
|
3275
3343
|
name: str,
|
|
3276
|
-
namespace: str,
|
|
3277
3344
|
template_id: Optional[str] = None,
|
|
3278
3345
|
status: str = "draft",
|
|
3279
3346
|
) -> dict:
|
|
@@ -3282,29 +3349,19 @@ class ProjectDatabase:
|
|
|
3282
3349
|
Args:
|
|
3283
3350
|
id: Unique workflow identifier.
|
|
3284
3351
|
name: User-facing workflow name.
|
|
3285
|
-
namespace: Namespace to link all phases.
|
|
3286
3352
|
template_id: Optional template ID this workflow is based on.
|
|
3287
3353
|
status: Initial status (default: draft).
|
|
3288
3354
|
|
|
3289
3355
|
Returns:
|
|
3290
3356
|
The created workflow dict.
|
|
3291
|
-
|
|
3292
|
-
Raises:
|
|
3293
|
-
ValueError: If namespace is invalid.
|
|
3294
3357
|
"""
|
|
3295
|
-
if not validate_namespace(namespace):
|
|
3296
|
-
raise ValueError(
|
|
3297
|
-
f"Invalid namespace '{namespace}'. Must match pattern: "
|
|
3298
|
-
"lowercase letter followed by up to 63 lowercase letters, digits, underscores, or dashes."
|
|
3299
|
-
)
|
|
3300
|
-
|
|
3301
3358
|
with self._writer() as conn:
|
|
3302
3359
|
now = datetime.utcnow().isoformat()
|
|
3303
3360
|
conn.execute(
|
|
3304
3361
|
"""INSERT INTO workflows
|
|
3305
|
-
(id, template_id, name,
|
|
3306
|
-
VALUES (?, ?, ?, ?,
|
|
3307
|
-
(id, template_id, name,
|
|
3362
|
+
(id, template_id, name, status, current_step, created_at, updated_at)
|
|
3363
|
+
VALUES (?, ?, ?, ?, 1, ?, ?)""",
|
|
3364
|
+
(id, template_id, name, status, now, now),
|
|
3308
3365
|
)
|
|
3309
3366
|
return self.get_workflow(id)
|
|
3310
3367
|
|
|
@@ -3318,7 +3375,6 @@ class ProjectDatabase:
|
|
|
3318
3375
|
def list_workflows(
|
|
3319
3376
|
self,
|
|
3320
3377
|
status: Optional[str] = None,
|
|
3321
|
-
namespace: Optional[str] = None,
|
|
3322
3378
|
include_archived: bool = False,
|
|
3323
3379
|
archived_only: bool = False,
|
|
3324
3380
|
) -> list[dict]:
|
|
@@ -3326,7 +3382,6 @@ class ProjectDatabase:
|
|
|
3326
3382
|
|
|
3327
3383
|
Args:
|
|
3328
3384
|
status: Filter by workflow status.
|
|
3329
|
-
namespace: Filter by namespace.
|
|
3330
3385
|
include_archived: If True, include archived workflows.
|
|
3331
3386
|
archived_only: If True, only return archived workflows.
|
|
3332
3387
|
"""
|
|
@@ -3337,9 +3392,6 @@ class ProjectDatabase:
|
|
|
3337
3392
|
if status:
|
|
3338
3393
|
conditions.append("status = ?")
|
|
3339
3394
|
params.append(status)
|
|
3340
|
-
if namespace:
|
|
3341
|
-
conditions.append("namespace = ?")
|
|
3342
|
-
params.append(namespace)
|
|
3343
3395
|
|
|
3344
3396
|
# Handle archived filtering
|
|
3345
3397
|
if archived_only:
|
ralphx/core/project_export.py
CHANGED
|
@@ -34,7 +34,6 @@ class WorkflowSummary:
|
|
|
34
34
|
"""Summary of a workflow in the project."""
|
|
35
35
|
id: str
|
|
36
36
|
name: str
|
|
37
|
-
namespace: str
|
|
38
37
|
steps_count: int
|
|
39
38
|
items_count: int
|
|
40
39
|
resources_count: int
|
|
@@ -137,7 +136,6 @@ class ProjectExporter:
|
|
|
137
136
|
summaries.append(WorkflowSummary(
|
|
138
137
|
id=wf['id'],
|
|
139
138
|
name=wf['name'],
|
|
140
|
-
namespace=wf['namespace'],
|
|
141
139
|
steps_count=wf_preview.steps_count,
|
|
142
140
|
items_count=wf_preview.items_total,
|
|
143
141
|
resources_count=wf_preview.resources_count,
|
|
@@ -231,7 +229,7 @@ class ProjectExporter:
|
|
|
231
229
|
)
|
|
232
230
|
|
|
233
231
|
for wf in workflows:
|
|
234
|
-
wf_prefix = f"workflows/{wf['
|
|
232
|
+
wf_prefix = f"workflows/{wf['id']}/"
|
|
235
233
|
|
|
236
234
|
# Get workflow data
|
|
237
235
|
steps = self.db.list_workflow_steps(wf['id'])
|
|
@@ -261,7 +259,6 @@ class ProjectExporter:
|
|
|
261
259
|
'id': wf['id'],
|
|
262
260
|
'template_id': wf.get('template_id'),
|
|
263
261
|
'name': wf['name'],
|
|
264
|
-
'namespace': wf['namespace'],
|
|
265
262
|
'status': 'draft',
|
|
266
263
|
'current_step': 1,
|
|
267
264
|
'created_at': wf.get('created_at'),
|
|
@@ -432,7 +429,6 @@ class ProjectExporter:
|
|
|
432
429
|
{
|
|
433
430
|
'id': w['id'],
|
|
434
431
|
'name': w['name'],
|
|
435
|
-
'namespace': w['namespace'],
|
|
436
432
|
}
|
|
437
433
|
for w in workflows
|
|
438
434
|
],
|
ralphx/core/project_import.py
CHANGED
|
@@ -30,7 +30,6 @@ class WorkflowPreviewInfo:
|
|
|
30
30
|
"""Preview info for a workflow in the project export."""
|
|
31
31
|
id: str
|
|
32
32
|
name: str
|
|
33
|
-
namespace: str
|
|
34
33
|
steps_count: int
|
|
35
34
|
items_count: int
|
|
36
35
|
resources_count: int
|
|
@@ -263,8 +262,13 @@ class ProjectImporter:
|
|
|
263
262
|
|
|
264
263
|
# Get workflow info from manifest
|
|
265
264
|
for wf_info in manifest.get('contents', {}).get('workflows', []):
|
|
266
|
-
|
|
267
|
-
|
|
265
|
+
wf_id = wf_info['id']
|
|
266
|
+
# Support both new (workflow_id) and old (namespace) path formats
|
|
267
|
+
wf_prefix = f"workflows/{wf_id}/"
|
|
268
|
+
# Check for old namespace-based paths for backward compatibility
|
|
269
|
+
old_namespace = wf_info.get('namespace')
|
|
270
|
+
if old_namespace and f"workflows/{old_namespace}/workflow.json" in zf.namelist():
|
|
271
|
+
wf_prefix = f"workflows/{old_namespace}/"
|
|
268
272
|
|
|
269
273
|
# Count items
|
|
270
274
|
items_count = 0
|
|
@@ -304,7 +308,6 @@ class ProjectImporter:
|
|
|
304
308
|
workflows_info.append(WorkflowPreviewInfo(
|
|
305
309
|
id=wf_info['id'],
|
|
306
310
|
name=wf_info['name'],
|
|
307
|
-
namespace=wf_namespace,
|
|
308
311
|
steps_count=steps_count,
|
|
309
312
|
items_count=items_count,
|
|
310
313
|
resources_count=resources_count,
|
|
@@ -354,7 +357,6 @@ class ProjectImporter:
|
|
|
354
357
|
workflows=[WorkflowPreviewInfo(
|
|
355
358
|
id=wf_info.get('id', ''),
|
|
356
359
|
name=wf_info.get('name', ''),
|
|
357
|
-
namespace=wf_info.get('namespace', ''),
|
|
358
360
|
steps_count=contents.get('steps', 0),
|
|
359
361
|
items_count=contents.get('items_total', 0),
|
|
360
362
|
resources_count=contents.get('resources', 0),
|
|
@@ -484,8 +486,13 @@ class ProjectImporter:
|
|
|
484
486
|
import hashlib
|
|
485
487
|
import uuid
|
|
486
488
|
|
|
487
|
-
|
|
488
|
-
|
|
489
|
+
wf_id = wf_info['id']
|
|
490
|
+
# Support both new (workflow_id) and old (namespace) path formats
|
|
491
|
+
wf_prefix = f"workflows/{wf_id}/"
|
|
492
|
+
# Check for old namespace-based paths for backward compatibility
|
|
493
|
+
old_namespace = wf_info.get('namespace')
|
|
494
|
+
if old_namespace and f"workflows/{old_namespace}/workflow.json" in zf.namelist():
|
|
495
|
+
wf_prefix = f"workflows/{old_namespace}/"
|
|
489
496
|
|
|
490
497
|
# Read workflow data
|
|
491
498
|
workflow_data = json.loads(zf.read(f"{wf_prefix}workflow.json").decode('utf-8'))
|
|
@@ -522,15 +529,10 @@ class ProjectImporter:
|
|
|
522
529
|
hash_suffix = hashlib.md5(f"{old_id}-{uuid.uuid4().hex}".encode()).hexdigest()[:8]
|
|
523
530
|
id_mapping[old_id] = f"{old_id}-{hash_suffix}"
|
|
524
531
|
|
|
525
|
-
# Generate unique namespace
|
|
526
|
-
base_namespace = workflow_data['workflow']['namespace']
|
|
527
|
-
namespace = self._generate_unique_namespace(base_namespace)
|
|
528
|
-
|
|
529
532
|
# Create workflow
|
|
530
533
|
workflow = self.db.create_workflow(
|
|
531
534
|
id=new_wf_id,
|
|
532
535
|
name=workflow_data['workflow']['name'],
|
|
533
|
-
namespace=namespace,
|
|
534
536
|
template_id=workflow_data['workflow'].get('template_id'),
|
|
535
537
|
status='draft',
|
|
536
538
|
)
|
|
@@ -666,20 +668,3 @@ class ProjectImporter:
|
|
|
666
668
|
id_mapping=id_mapping,
|
|
667
669
|
warnings=warnings,
|
|
668
670
|
)
|
|
669
|
-
|
|
670
|
-
def _generate_unique_namespace(self, base_namespace: str) -> str:
|
|
671
|
-
"""Generate a unique namespace."""
|
|
672
|
-
import uuid
|
|
673
|
-
|
|
674
|
-
existing_workflows = self.db.list_workflows()
|
|
675
|
-
existing_namespaces = {w['namespace'] for w in existing_workflows}
|
|
676
|
-
|
|
677
|
-
if base_namespace not in existing_namespaces:
|
|
678
|
-
return base_namespace
|
|
679
|
-
|
|
680
|
-
for i in range(1, 1000):
|
|
681
|
-
candidate = f"{base_namespace[:56]}-{i}"
|
|
682
|
-
if candidate not in existing_namespaces:
|
|
683
|
-
return candidate
|
|
684
|
-
|
|
685
|
-
return f"{base_namespace[:50]}-{uuid.uuid4().hex[:8]}"
|
ralphx/core/sample_project.py
CHANGED
|
@@ -124,14 +124,10 @@ def _create_workflow_with_stories(
|
|
|
124
124
|
logger.warning(f"Template '{template_id}' not found")
|
|
125
125
|
return None
|
|
126
126
|
|
|
127
|
-
#
|
|
128
|
-
namespace = f"excuse-gen-{uuid.uuid4().hex[:7]}"
|
|
129
|
-
|
|
130
|
-
# Create workflow
|
|
127
|
+
# Create workflow (namespace parameter removed in schema v16)
|
|
131
128
|
workflow = project_db.create_workflow(
|
|
132
129
|
id=workflow_id,
|
|
133
130
|
name="Build Excuse Generator",
|
|
134
|
-
namespace=namespace,
|
|
135
131
|
template_id=template_id,
|
|
136
132
|
status="draft",
|
|
137
133
|
)
|
ralphx/core/templates.py
CHANGED
|
@@ -9,15 +9,15 @@ from typing import Optional
|
|
|
9
9
|
|
|
10
10
|
# Base loop templates
|
|
11
11
|
TEMPLATES: dict[str, dict] = {
|
|
12
|
-
"
|
|
13
|
-
"name": "
|
|
14
|
-
"display_name": "
|
|
12
|
+
"extractgen_requirements": {
|
|
13
|
+
"name": "extractgen_requirements",
|
|
14
|
+
"display_name": "Extract Requirements Loop",
|
|
15
15
|
"description": "Discover and document user stories from design documents or web research",
|
|
16
16
|
"type": "generator",
|
|
17
17
|
"category": "discovery",
|
|
18
18
|
"config": {
|
|
19
|
-
"name": "
|
|
20
|
-
"display_name": "
|
|
19
|
+
"name": "extractgen_requirements",
|
|
20
|
+
"display_name": "Extract Requirements Loop",
|
|
21
21
|
"type": "generator",
|
|
22
22
|
"description": "Discover and document user stories from design documents",
|
|
23
23
|
"item_types": {
|
|
@@ -34,7 +34,7 @@ TEMPLATES: dict[str, dict] = {
|
|
|
34
34
|
"model": "claude-sonnet-4-20250514",
|
|
35
35
|
"timeout": 180,
|
|
36
36
|
"tools": [],
|
|
37
|
-
"prompt_template": "prompts/
|
|
37
|
+
"prompt_template": "prompts/extractgen_requirements_turbo.md",
|
|
38
38
|
},
|
|
39
39
|
{
|
|
40
40
|
"name": "deep",
|
|
@@ -42,7 +42,7 @@ TEMPLATES: dict[str, dict] = {
|
|
|
42
42
|
"model": "claude-sonnet-4-20250514",
|
|
43
43
|
"timeout": 900,
|
|
44
44
|
"tools": ["web_search"],
|
|
45
|
-
"prompt_template": "prompts/
|
|
45
|
+
"prompt_template": "prompts/extractgen_requirements_deep.md",
|
|
46
46
|
},
|
|
47
47
|
],
|
|
48
48
|
"mode_selection": {
|
|
@@ -109,7 +109,7 @@ TEMPLATES: dict[str, dict] = {
|
|
|
109
109
|
"input": {
|
|
110
110
|
"singular": "story",
|
|
111
111
|
"plural": "stories",
|
|
112
|
-
"source": "
|
|
112
|
+
"source": "extractgen_requirements",
|
|
113
113
|
"description": "Stories to implement",
|
|
114
114
|
},
|
|
115
115
|
"output": {
|
|
@@ -219,7 +219,7 @@ def get_template(name: str) -> Optional[dict]:
|
|
|
219
219
|
"""Get a template by name.
|
|
220
220
|
|
|
221
221
|
Args:
|
|
222
|
-
name: Template name (e.g., '
|
|
222
|
+
name: Template name (e.g., 'extractgen_requirements', 'implementation')
|
|
223
223
|
|
|
224
224
|
Returns:
|
|
225
225
|
Template dict or None if not found
|
ralphx/core/workflow_export.py
CHANGED
|
@@ -63,7 +63,7 @@ class SecretMatch:
|
|
|
63
63
|
class ExportPreview:
|
|
64
64
|
"""Preview of what will be exported."""
|
|
65
65
|
workflow_name: str
|
|
66
|
-
|
|
66
|
+
workflow_id: str
|
|
67
67
|
steps_count: int
|
|
68
68
|
items_total: int
|
|
69
69
|
items_by_step: dict[int, int] # step_id -> count
|
|
@@ -155,7 +155,7 @@ class WorkflowExporter:
|
|
|
155
155
|
|
|
156
156
|
return ExportPreview(
|
|
157
157
|
workflow_name=workflow['name'],
|
|
158
|
-
|
|
158
|
+
workflow_id=workflow['id'],
|
|
159
159
|
steps_count=len(steps),
|
|
160
160
|
items_total=total_items, # Show real count, not truncated count
|
|
161
161
|
items_by_step=items_by_step,
|
|
@@ -247,8 +247,7 @@ class WorkflowExporter:
|
|
|
247
247
|
|
|
248
248
|
# Generate filename
|
|
249
249
|
timestamp = datetime.utcnow().strftime('%Y%m%d-%H%M%S')
|
|
250
|
-
|
|
251
|
-
filename = f"workflow-{namespace}-{timestamp}.ralphx.zip"
|
|
250
|
+
filename = f"workflow-{workflow['id']}-{timestamp}.ralphx.zip"
|
|
252
251
|
|
|
253
252
|
zip_bytes = zip_buffer.getvalue()
|
|
254
253
|
|
|
@@ -342,7 +341,7 @@ class WorkflowExporter:
|
|
|
342
341
|
snippet=redacted[:50] + '...' if len(redacted) > 50 else redacted,
|
|
343
342
|
))
|
|
344
343
|
|
|
345
|
-
# Scan workflow name
|
|
344
|
+
# Scan workflow name (unlikely but check)
|
|
346
345
|
scan_text(workflow.get('name', ''), 'workflow.name')
|
|
347
346
|
|
|
348
347
|
# Scan resources
|
|
@@ -383,7 +382,6 @@ class WorkflowExporter:
|
|
|
383
382
|
'workflow': {
|
|
384
383
|
'id': workflow['id'],
|
|
385
384
|
'name': workflow['name'],
|
|
386
|
-
'namespace': workflow['namespace'],
|
|
387
385
|
'template_id': workflow.get('template_id'),
|
|
388
386
|
},
|
|
389
387
|
'contents': {
|
|
@@ -447,7 +445,6 @@ class WorkflowExporter:
|
|
|
447
445
|
'id': workflow['id'],
|
|
448
446
|
'template_id': workflow.get('template_id'),
|
|
449
447
|
'name': workflow['name'],
|
|
450
|
-
'namespace': workflow['namespace'],
|
|
451
448
|
'status': 'draft', # Reset status on export
|
|
452
449
|
'current_step': 1, # Reset to beginning
|
|
453
450
|
'created_at': workflow.get('created_at'),
|