ralphx 0.2.2__py3-none-any.whl → 0.3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. ralphx/__init__.py +1 -1
  2. ralphx/api/main.py +9 -1
  3. ralphx/api/routes/auth.py +730 -65
  4. ralphx/api/routes/config.py +3 -56
  5. ralphx/api/routes/export_import.py +795 -0
  6. ralphx/api/routes/loops.py +4 -4
  7. ralphx/api/routes/planning.py +19 -5
  8. ralphx/api/routes/projects.py +84 -2
  9. ralphx/api/routes/templates.py +115 -2
  10. ralphx/api/routes/workflows.py +22 -22
  11. ralphx/cli.py +21 -6
  12. ralphx/core/auth.py +346 -171
  13. ralphx/core/database.py +615 -167
  14. ralphx/core/executor.py +0 -3
  15. ralphx/core/loop.py +15 -2
  16. ralphx/core/loop_templates.py +69 -3
  17. ralphx/core/planning_service.py +109 -21
  18. ralphx/core/preview.py +9 -25
  19. ralphx/core/project_db.py +175 -75
  20. ralphx/core/project_export.py +469 -0
  21. ralphx/core/project_import.py +670 -0
  22. ralphx/core/sample_project.py +430 -0
  23. ralphx/core/templates.py +46 -9
  24. ralphx/core/workflow_executor.py +35 -5
  25. ralphx/core/workflow_export.py +606 -0
  26. ralphx/core/workflow_import.py +1149 -0
  27. ralphx/examples/sample_project/DESIGN.md +345 -0
  28. ralphx/examples/sample_project/README.md +37 -0
  29. ralphx/examples/sample_project/guardrails.md +57 -0
  30. ralphx/examples/sample_project/stories.jsonl +10 -0
  31. ralphx/mcp/__init__.py +6 -2
  32. ralphx/mcp/registry.py +3 -3
  33. ralphx/mcp/server.py +99 -29
  34. ralphx/mcp/tools/__init__.py +4 -0
  35. ralphx/mcp/tools/help.py +204 -0
  36. ralphx/mcp/tools/workflows.py +114 -32
  37. ralphx/mcp_server.py +6 -2
  38. ralphx/static/assets/index-0ovNnfOq.css +1 -0
  39. ralphx/static/assets/index-CY9s08ZB.js +251 -0
  40. ralphx/static/assets/index-CY9s08ZB.js.map +1 -0
  41. ralphx/static/index.html +14 -0
  42. {ralphx-0.2.2.dist-info → ralphx-0.3.5.dist-info}/METADATA +34 -12
  43. {ralphx-0.2.2.dist-info → ralphx-0.3.5.dist-info}/RECORD +45 -30
  44. {ralphx-0.2.2.dist-info → ralphx-0.3.5.dist-info}/WHEEL +0 -0
  45. {ralphx-0.2.2.dist-info → ralphx-0.3.5.dist-info}/entry_points.txt +0 -0
ralphx/core/project_db.py CHANGED
@@ -14,7 +14,8 @@ This makes projects portable - clone a repo with .ralphx/ and all data comes wit
14
14
  """
15
15
 
16
16
  import json
17
- import re
17
+ import logging
18
+ import shutil
18
19
  import sqlite3
19
20
  import threading
20
21
  from contextlib import contextmanager
@@ -22,30 +23,11 @@ from datetime import datetime
22
23
  from pathlib import Path
23
24
  from typing import Any, Iterator, Optional
24
25
 
25
-
26
- # Schema version for project DB
27
- PROJECT_SCHEMA_VERSION = 15
28
-
29
- # Namespace validation pattern: lowercase, alphanumeric, underscores, dashes, max 64 chars
30
- # Must start with a letter
31
- NAMESPACE_PATTERN = re.compile(r'^[a-z][a-z0-9_-]{0,63}$')
26
+ logger = logging.getLogger(__name__)
32
27
 
33
28
 
34
- def validate_namespace(namespace: str) -> bool:
35
- """Validate namespace format.
36
-
37
- Namespaces must:
38
- - Start with a lowercase letter
39
- - Contain only lowercase letters, digits, underscores, and dashes
40
- - Be 1-64 characters long
41
-
42
- Args:
43
- namespace: The namespace string to validate.
44
-
45
- Returns:
46
- True if valid, False otherwise.
47
- """
48
- return bool(NAMESPACE_PATTERN.match(namespace))
29
+ # Schema version for project DB
30
+ PROJECT_SCHEMA_VERSION = 16
49
31
 
50
32
  # Project database schema - all project-specific data
51
33
  PROJECT_SCHEMA_SQL = """
@@ -297,7 +279,6 @@ CREATE TABLE IF NOT EXISTS workflows (
297
279
  id TEXT PRIMARY KEY,
298
280
  template_id TEXT, -- Optional reference to template
299
281
  name TEXT NOT NULL,
300
- namespace TEXT NOT NULL, -- Workflow identifier
301
282
  status TEXT DEFAULT 'draft', -- draft, active, paused, completed
302
283
  current_step INTEGER DEFAULT 1,
303
284
  created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
@@ -467,7 +448,6 @@ CREATE INDEX IF NOT EXISTS idx_loop_resources_type ON loop_resources(resource_ty
467
448
 
468
449
  -- Workflow indexes
469
450
  CREATE INDEX IF NOT EXISTS idx_workflows_status ON workflows(status);
470
- CREATE INDEX IF NOT EXISTS idx_workflows_namespace ON workflows(namespace);
471
451
  CREATE INDEX IF NOT EXISTS idx_workflow_steps_workflow ON workflow_steps(workflow_id, step_number);
472
452
  CREATE INDEX IF NOT EXISTS idx_workflow_steps_status ON workflow_steps(status);
473
453
  CREATE INDEX IF NOT EXISTS idx_planning_sessions_workflow ON planning_sessions(workflow_id);
@@ -598,9 +578,29 @@ class ProjectDatabase:
598
578
  (PROJECT_SCHEMA_VERSION,),
599
579
  )
600
580
  elif current_version < PROJECT_SCHEMA_VERSION:
581
+ # Create backup before running migrations
582
+ self._backup_before_migration(current_version)
601
583
  # Run migrations (for future versions > 6)
602
584
  self._run_migrations(conn, current_version)
603
585
 
586
+ def _backup_before_migration(self, from_version: int) -> None:
587
+ """Create a backup of the database before running migrations.
588
+
589
+ Creates a timestamped backup file in the same directory as the database.
590
+ This allows recovery if a migration fails or causes data loss.
591
+
592
+ Args:
593
+ from_version: Current schema version before migration.
594
+ """
595
+ try:
596
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
597
+ backup_path = self.db_path.with_suffix(f".v{from_version}.{timestamp}.bak")
598
+ shutil.copy2(self.db_path, backup_path)
599
+ logger.info(f"Created database backup before migration: {backup_path}")
600
+ except Exception as e:
601
+ logger.warning(f"Failed to create backup before migration: {e}")
602
+ # Don't fail the migration if backup fails - just warn
603
+
604
604
  def _run_migrations(self, conn: sqlite3.Connection, from_version: int) -> None:
605
605
  """Run schema migrations from a version to the latest.
606
606
 
@@ -658,6 +658,11 @@ class ProjectDatabase:
658
658
  # Migration from v14 to v15: Add workflow_resource_versions table
659
659
  if from_version == 14:
660
660
  self._migrate_v14_to_v15(conn)
661
+ from_version = 15 # Continue to next migration
662
+
663
+ # Migration from v15 to v16: Remove namespace from workflows table
664
+ if from_version == 15:
665
+ self._migrate_v15_to_v16(conn)
661
666
 
662
667
  # Seed workflow templates for fresh databases
663
668
  self._seed_workflow_templates(conn)
@@ -960,6 +965,55 @@ class ProjectDatabase:
960
965
  ON workflow_resource_versions(workflow_resource_id, version_number DESC)
961
966
  """)
962
967
 
968
+ def _migrate_v15_to_v16(self, conn: sqlite3.Connection) -> None:
969
+ """Migrate from schema v15 to v16.
970
+
971
+ Removes:
972
+ - namespace column from workflows table (deprecated, replaced by workflow_id)
973
+ - idx_workflows_namespace index
974
+
975
+ SQLite doesn't support DROP COLUMN directly, so we recreate the table.
976
+
977
+ IMPORTANT: We must disable foreign keys before dropping the old table,
978
+ otherwise the ON DELETE CASCADE on workflow_steps will delete all steps!
979
+ """
980
+ # 0. Disable foreign keys to prevent CASCADE deletes during table swap
981
+ conn.execute("PRAGMA foreign_keys=OFF")
982
+
983
+ # 1. Create new table without namespace
984
+ conn.execute("""
985
+ CREATE TABLE workflows_new (
986
+ id TEXT PRIMARY KEY,
987
+ template_id TEXT,
988
+ name TEXT NOT NULL,
989
+ status TEXT DEFAULT 'draft',
990
+ current_step INTEGER DEFAULT 1,
991
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
992
+ updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
993
+ archived_at TIMESTAMP
994
+ )
995
+ """)
996
+
997
+ # 2. Copy data (excluding namespace)
998
+ conn.execute("""
999
+ INSERT INTO workflows_new (id, template_id, name, status, current_step, created_at, updated_at, archived_at)
1000
+ SELECT id, template_id, name, status, current_step, created_at, updated_at, archived_at
1001
+ FROM workflows
1002
+ """)
1003
+
1004
+ # 3. Drop old table and index
1005
+ conn.execute("DROP INDEX IF EXISTS idx_workflows_namespace")
1006
+ conn.execute("DROP TABLE workflows")
1007
+
1008
+ # 4. Rename new table
1009
+ conn.execute("ALTER TABLE workflows_new RENAME TO workflows")
1010
+
1011
+ # 5. Recreate the status index on the new table
1012
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_workflows_status ON workflows(status)")
1013
+
1014
+ # 6. Re-enable foreign keys
1015
+ conn.execute("PRAGMA foreign_keys=ON")
1016
+
963
1017
  # ========== Loops ==========
964
1018
 
965
1019
  def create_loop(
@@ -1808,9 +1862,8 @@ class ProjectDatabase:
1808
1862
  """Release all claims held by a specific loop.
1809
1863
 
1810
1864
  Used when deleting a loop to prevent orphaned claims.
1811
- Items with a namespace set are restored to 'completed' status so they
1812
- can be picked up by consumer loops again.
1813
- Items without a namespace are restored to 'pending'.
1865
+ Released items are restored to 'pending' status so they can be
1866
+ picked up by other loops.
1814
1867
 
1815
1868
  Args:
1816
1869
  loop_name: Name of the loop whose claims should be released.
@@ -1826,7 +1879,7 @@ class ProjectDatabase:
1826
1879
  UPDATE work_items
1827
1880
  SET claimed_by = NULL,
1828
1881
  claimed_at = NULL,
1829
- status = CASE WHEN namespace IS NOT NULL THEN 'completed' ELSE 'pending' END,
1882
+ status = 'pending',
1830
1883
  updated_at = ?
1831
1884
  WHERE claimed_by = ? AND status = 'claimed'
1832
1885
  """,
@@ -1840,9 +1893,8 @@ class ProjectDatabase:
1840
1893
  This is an atomic operation that checks ownership and releases in one step
1841
1894
  to prevent TOCTOU race conditions.
1842
1895
 
1843
- Items with a namespace set are restored to 'completed' status so they
1844
- can be picked up by consumer loops again.
1845
- Items without a namespace are restored to 'pending'.
1896
+ Released items are restored to 'pending' status so they can be
1897
+ picked up by other loops.
1846
1898
 
1847
1899
  Args:
1848
1900
  id: Work item ID.
@@ -1858,7 +1910,7 @@ class ProjectDatabase:
1858
1910
  UPDATE work_items
1859
1911
  SET claimed_by = NULL,
1860
1912
  claimed_at = NULL,
1861
- status = CASE WHEN namespace IS NOT NULL THEN 'completed' ELSE 'pending' END,
1913
+ status = 'pending',
1862
1914
  updated_at = ?
1863
1915
  WHERE id = ? AND claimed_by = ? AND status = 'claimed'
1864
1916
  """,
@@ -3093,11 +3145,12 @@ class ProjectDatabase:
3093
3145
  now = datetime.utcnow().isoformat()
3094
3146
 
3095
3147
  # Build Product workflow template
3148
+ # Uses processing_type to reference PROCESSING_TYPES in mcp/tools/workflows.py
3096
3149
  build_product_phases = json.dumps([
3097
3150
  {
3098
3151
  "number": 1,
3099
- "name": "Planning",
3100
- "type": "interactive",
3152
+ "name": "Design Document",
3153
+ "processing_type": "design_doc",
3101
3154
  "description": "Describe what you want to build. Claude will help create a design document.",
3102
3155
  "outputs": ["design_doc", "guardrails"],
3103
3156
  "skippable": True,
@@ -3105,10 +3158,9 @@ class ProjectDatabase:
3105
3158
  },
3106
3159
  {
3107
3160
  "number": 2,
3108
- "name": "Story Generation",
3109
- "type": "autonomous",
3110
- "loopType": "generator",
3111
- "description": "Claude generates detailed user stories from the design document.",
3161
+ "name": "Story Generation (Extract)",
3162
+ "processing_type": "extractgen_requirements",
3163
+ "description": "Claude extracts user stories from the design document.",
3112
3164
  "inputs": ["design_doc", "guardrails"],
3113
3165
  "outputs": ["stories"],
3114
3166
  "skippable": True,
@@ -3116,9 +3168,18 @@ class ProjectDatabase:
3116
3168
  },
3117
3169
  {
3118
3170
  "number": 3,
3171
+ "name": "Story Generation (Web)",
3172
+ "processing_type": "webgen_requirements",
3173
+ "description": "Claude discovers additional requirements via web research.",
3174
+ "inputs": ["design_doc", "guardrails", "stories"],
3175
+ "outputs": ["stories"],
3176
+ "skippable": True,
3177
+ "skipCondition": "Skip web research"
3178
+ },
3179
+ {
3180
+ "number": 4,
3119
3181
  "name": "Implementation",
3120
- "type": "autonomous",
3121
- "loopType": "consumer",
3182
+ "processing_type": "implementation",
3122
3183
  "description": "Claude implements each story, committing code to git.",
3123
3184
  "inputs": ["stories", "design_doc", "guardrails"],
3124
3185
  "outputs": ["code"],
@@ -3138,23 +3199,31 @@ class ProjectDatabase:
3138
3199
  ),
3139
3200
  )
3140
3201
 
3141
- # From Design Doc workflow - skips planning, starts with story generation
3202
+ # From Design Doc workflow - skips design doc, starts with story generation
3142
3203
  from_design_doc_phases = json.dumps([
3143
3204
  {
3144
3205
  "number": 1,
3145
- "name": "Story Generation",
3146
- "type": "autonomous",
3147
- "loopType": "generator",
3148
- "description": "Claude generates detailed user stories from your design document.",
3206
+ "name": "Story Generation (Extract)",
3207
+ "processing_type": "extractgen_requirements",
3208
+ "description": "Claude extracts user stories from your design document.",
3149
3209
  "inputs": ["design_doc"],
3150
3210
  "outputs": ["stories"],
3151
3211
  "skippable": False
3152
3212
  },
3153
3213
  {
3154
3214
  "number": 2,
3215
+ "name": "Story Generation (Web)",
3216
+ "processing_type": "webgen_requirements",
3217
+ "description": "Claude discovers additional requirements via web research.",
3218
+ "inputs": ["design_doc", "stories"],
3219
+ "outputs": ["stories"],
3220
+ "skippable": True,
3221
+ "skipCondition": "Skip web research"
3222
+ },
3223
+ {
3224
+ "number": 3,
3155
3225
  "name": "Implementation",
3156
- "type": "autonomous",
3157
- "loopType": "consumer",
3226
+ "processing_type": "implementation",
3158
3227
  "description": "Claude implements each story, committing code to git.",
3159
3228
  "inputs": ["stories", "design_doc"],
3160
3229
  "outputs": ["code"],
@@ -3179,8 +3248,7 @@ class ProjectDatabase:
3179
3248
  {
3180
3249
  "number": 1,
3181
3250
  "name": "Implementation",
3182
- "type": "autonomous",
3183
- "loopType": "consumer",
3251
+ "processing_type": "implementation",
3184
3252
  "description": "Claude implements each story, committing code to git.",
3185
3253
  "inputs": ["stories"],
3186
3254
  "outputs": ["code"],
@@ -3200,12 +3268,12 @@ class ProjectDatabase:
3200
3268
  ),
3201
3269
  )
3202
3270
 
3203
- # Planning Only workflow - just the interactive planning step
3271
+ # Design Doc Only workflow - just the interactive design doc step
3204
3272
  planning_only_phases = json.dumps([
3205
3273
  {
3206
3274
  "number": 1,
3207
- "name": "Planning",
3208
- "type": "interactive",
3275
+ "name": "Design Document",
3276
+ "processing_type": "design_doc",
3209
3277
  "description": "Collaborate with Claude to create a comprehensive design document.",
3210
3278
  "outputs": ["design_doc", "guardrails"],
3211
3279
  "skippable": False
@@ -3273,7 +3341,6 @@ class ProjectDatabase:
3273
3341
  self,
3274
3342
  id: str,
3275
3343
  name: str,
3276
- namespace: str,
3277
3344
  template_id: Optional[str] = None,
3278
3345
  status: str = "draft",
3279
3346
  ) -> dict:
@@ -3282,29 +3349,19 @@ class ProjectDatabase:
3282
3349
  Args:
3283
3350
  id: Unique workflow identifier.
3284
3351
  name: User-facing workflow name.
3285
- namespace: Namespace to link all phases.
3286
3352
  template_id: Optional template ID this workflow is based on.
3287
3353
  status: Initial status (default: draft).
3288
3354
 
3289
3355
  Returns:
3290
3356
  The created workflow dict.
3291
-
3292
- Raises:
3293
- ValueError: If namespace is invalid.
3294
3357
  """
3295
- if not validate_namespace(namespace):
3296
- raise ValueError(
3297
- f"Invalid namespace '{namespace}'. Must match pattern: "
3298
- "lowercase letter followed by up to 63 lowercase letters, digits, underscores, or dashes."
3299
- )
3300
-
3301
3358
  with self._writer() as conn:
3302
3359
  now = datetime.utcnow().isoformat()
3303
3360
  conn.execute(
3304
3361
  """INSERT INTO workflows
3305
- (id, template_id, name, namespace, status, current_step, created_at, updated_at)
3306
- VALUES (?, ?, ?, ?, ?, 1, ?, ?)""",
3307
- (id, template_id, name, namespace, status, now, now),
3362
+ (id, template_id, name, status, current_step, created_at, updated_at)
3363
+ VALUES (?, ?, ?, ?, 1, ?, ?)""",
3364
+ (id, template_id, name, status, now, now),
3308
3365
  )
3309
3366
  return self.get_workflow(id)
3310
3367
 
@@ -3318,7 +3375,6 @@ class ProjectDatabase:
3318
3375
  def list_workflows(
3319
3376
  self,
3320
3377
  status: Optional[str] = None,
3321
- namespace: Optional[str] = None,
3322
3378
  include_archived: bool = False,
3323
3379
  archived_only: bool = False,
3324
3380
  ) -> list[dict]:
@@ -3326,7 +3382,6 @@ class ProjectDatabase:
3326
3382
 
3327
3383
  Args:
3328
3384
  status: Filter by workflow status.
3329
- namespace: Filter by namespace.
3330
3385
  include_archived: If True, include archived workflows.
3331
3386
  archived_only: If True, only return archived workflows.
3332
3387
  """
@@ -3337,9 +3392,6 @@ class ProjectDatabase:
3337
3392
  if status:
3338
3393
  conditions.append("status = ?")
3339
3394
  params.append(status)
3340
- if namespace:
3341
- conditions.append("namespace = ?")
3342
- params.append(namespace)
3343
3395
 
3344
3396
  # Handle archived filtering
3345
3397
  if archived_only:
@@ -4779,6 +4831,8 @@ class ProjectDatabase:
4779
4831
  workflow_id: str,
4780
4832
  step_id: int,
4781
4833
  messages: Optional[list] = None,
4834
+ artifacts: Optional[dict] = None,
4835
+ status: str = "active",
4782
4836
  ) -> dict:
4783
4837
  """Create a planning session for an interactive step.
4784
4838
 
@@ -4787,6 +4841,8 @@ class ProjectDatabase:
4787
4841
  workflow_id: Parent workflow ID.
4788
4842
  step_id: Parent step ID.
4789
4843
  messages: Initial messages (default: empty list).
4844
+ artifacts: Optional artifacts dict.
4845
+ status: Session status (default: 'active').
4790
4846
 
4791
4847
  Returns:
4792
4848
  The created session dict.
@@ -4794,11 +4850,12 @@ class ProjectDatabase:
4794
4850
  with self._writer() as conn:
4795
4851
  now = datetime.utcnow().isoformat()
4796
4852
  messages_json = json.dumps(messages or [])
4853
+ artifacts_json = json.dumps(artifacts) if artifacts else None
4797
4854
  conn.execute(
4798
4855
  """INSERT INTO planning_sessions
4799
- (id, workflow_id, step_id, messages, status, created_at, updated_at)
4800
- VALUES (?, ?, ?, ?, 'active', ?, ?)""",
4801
- (id, workflow_id, step_id, messages_json, now, now),
4856
+ (id, workflow_id, step_id, messages, artifacts, status, created_at, updated_at)
4857
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?)""",
4858
+ (id, workflow_id, step_id, messages_json, artifacts_json, status, now, now),
4802
4859
  )
4803
4860
  return self.get_planning_session(id)
4804
4861
 
@@ -4853,6 +4910,49 @@ class ProjectDatabase:
4853
4910
  return result
4854
4911
  return None
4855
4912
 
4913
+ def list_planning_sessions(
4914
+ self,
4915
+ workflow_id: Optional[str] = None,
4916
+ status: Optional[str] = None,
4917
+ ) -> list[dict]:
4918
+ """List planning sessions with optional filtering.
4919
+
4920
+ Args:
4921
+ workflow_id: Filter by workflow ID.
4922
+ status: Filter by status ('active', 'completed').
4923
+
4924
+ Returns:
4925
+ List of planning session dicts.
4926
+ """
4927
+ with self._reader() as conn:
4928
+ conditions = []
4929
+ params: list[Any] = []
4930
+
4931
+ if workflow_id:
4932
+ conditions.append("workflow_id = ?")
4933
+ params.append(workflow_id)
4934
+ if status:
4935
+ conditions.append("status = ?")
4936
+ params.append(status)
4937
+
4938
+ where_clause = " AND ".join(conditions) if conditions else "1=1"
4939
+ cursor = conn.execute(
4940
+ f"""SELECT * FROM planning_sessions
4941
+ WHERE {where_clause}
4942
+ ORDER BY created_at DESC""",
4943
+ params,
4944
+ )
4945
+
4946
+ results = []
4947
+ for row in cursor.fetchall():
4948
+ result = dict(row)
4949
+ if result.get("messages"):
4950
+ result["messages"] = json.loads(result["messages"])
4951
+ if result.get("artifacts"):
4952
+ result["artifacts"] = json.loads(result["artifacts"])
4953
+ results.append(result)
4954
+ return results
4955
+
4856
4956
  def add_planning_message(
4857
4957
  self,
4858
4958
  session_id: str,