@heytherevibin/skillforge 0.2.1 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,128 @@
1
+ """Best-effort redaction of secrets and user home paths in exported context (defense in depth)."""
2
+ from __future__ import annotations
3
+
4
+ import os
5
+ import re
6
+ from pathlib import Path
7
+
8
+ _HOME_RESOLVED: str | None = None
9
+
10
+
11
+ def redaction_enabled() -> bool:
12
+ return os.getenv("SKILLFORGE_REDACT_CONTEXT", "1").strip().lower() not in ("0", "false", "no", "")
13
+
14
+
15
+ def redact_home_in_paths_enabled() -> bool:
16
+ return os.getenv("SKILLFORGE_REDACT_HOME_IN_PATHS", "1").strip().lower() not in ("0", "false", "no", "")
17
+
18
+
19
+ def _home_prefix() -> str | None:
20
+ global _HOME_RESOLVED
21
+ if _HOME_RESOLVED is not None:
22
+ return _HOME_RESOLVED or None
23
+ try:
24
+ _HOME_RESOLVED = str(Path.home().resolve())
25
+ except Exception:
26
+ _HOME_RESOLVED = ""
27
+ return _HOME_RESOLVED or None
28
+
29
+
30
+ COMPILED: list[tuple[re.Pattern[str], str]] = [
31
+ (re.compile(r"sk-ant-api\d\d-[A-Za-z0-9_\-]{20,}"), "[REDACTED_ANTHROPIC_KEY]"),
32
+ (re.compile(r"\bAIza[0-9A-Za-z\-_]{35}\b"), "[REDACTED_GOOGLE_API_KEY]"),
33
+ (re.compile(r"xox[baprs]-[0-9A-Za-z\-]{10,}"), "[REDACTED_SLACK_TOKEN]"),
34
+ (re.compile(r"gh[pP]_[0-9A-Za-z]{36,}"), "[REDACTED_GITHUB_TOKEN]"),
35
+ (re.compile(r"github_pat_[0-9A-Za-z_]{20,}"), "[REDACTED_GITHUB_PAT]"),
36
+ (re.compile(
37
+ r"-----BEGIN [A-Z ]*PRIVATE KEY-----[\s\S]*?-----END [A-Z ]*PRIVATE KEY-----",
38
+ re.MULTILINE,
39
+ ), "[REDACTED_PRIVATE_KEY]"),
40
+ (re.compile(r"\bAKIA[0-9A-Z]{16}\b"), "[REDACTED_AWS_ACCESS_KEY_ID]"),
41
+ (re.compile(r"\bASIA[0-9A-Z]{16}\b"), "[REDACTED_AWS_TEMP_KEY_ID]"),
42
+ # OAuth / Bearer-style (avoid eating normal words — require length)
43
+ (re.compile(r"\bBearer\s+[A-Za-z0-9\-._~+/]{16,}={0,2}\b", re.IGNORECASE), "Bearer [REDACTED]"),
44
+ (re.compile(r"\bBasic\s+[A-Za-z0-9+/]{16,}={0,2}\b", re.IGNORECASE), "Basic [REDACTED]"),
45
+ # Env assignment leaks in pasted logs
46
+ (re.compile(
47
+ r"\b(ANTHROPIC_API_KEY|OPENAI_API_KEY|"
48
+ r"AWS_SECRET_ACCESS_KEY|AWS_SESSION_TOKEN|GITHUB_TOKEN|"
49
+ r"HF_TOKEN|HUGGINGFACE_TOKEN|SLACK_BOT_TOKEN|DATABASE_URL|"
50
+ r"SUPABASE_SERVICE_ROLE_KEY|SUPABASE_JWT_SECRET)\s*=\s*(\S+)",
51
+ re.IGNORECASE,
52
+ ), r"\1=[REDACTED]"),
53
+ ]
54
+
55
+
56
+ def redact_secret_patterns(text: str) -> tuple[str, int]:
57
+ """Replace known secret shapes; returns ``(new_text, number_of_pattern_matches)``."""
58
+ if not text:
59
+ return text, 0
60
+ hits = 0
61
+ out = text
62
+ for pat, repl in COMPILED:
63
+ found = pat.findall(out)
64
+ if found:
65
+ hits += len(found)
66
+ out = pat.sub(repl, out)
67
+ return out, hits
68
+
69
+
70
+ def redact_home_path_prefix(path: str) -> tuple[str, int]:
71
+ """If ``path`` starts with the resolved home directory, replace that prefix with ``[HOME]``."""
72
+ if not path or not redact_home_in_paths_enabled():
73
+ return path, 0
74
+ home = _home_prefix()
75
+ if not home:
76
+ return path, 0
77
+ # Normalize slashes for comparison
78
+ norm = path.replace("\\", "/")
79
+ home_n = home.replace("\\", "/")
80
+ if norm == home_n or norm.rstrip("/") == home_n.rstrip("/"):
81
+ return "[HOME]", 1
82
+ if norm.startswith(home_n + "/") or norm.startswith(home_n + "\\"):
83
+ rest = path[len(home) :].lstrip("/\\")
84
+ return "[HOME]/" + rest.replace("\\", "/"), 1
85
+ # Windows-style profile (best effort when HOME is /Users/x but path is C:\Users\x)
86
+ if len(path) > 3 and path[1] == ":":
87
+ try:
88
+ from os.path import expanduser
89
+
90
+ eu = expanduser("~")
91
+ if eu and path.lower().startswith(eu.lower().replace("/", "\\")):
92
+ return "[HOME]/" + path[len(eu) :].lstrip("\\/").replace("\\", "/"), 1
93
+ except Exception:
94
+ pass
95
+ return path, 0
96
+
97
+
98
+ def redact_context_path_field(path: str | None) -> tuple[str | None, int]:
99
+ if not path:
100
+ return path, 0
101
+ s, n = redact_home_path_prefix(path)
102
+ return s, n
103
+
104
+
105
+ def sanitize_context_items(items: list[dict]) -> tuple[int, int]:
106
+ """Mutate each item's ``text`` / ``path`` in place. Returns ``(secret_hits, path_hits)``."""
107
+ sh = ph = 0
108
+ for c in items:
109
+ t = c.get("text") or ""
110
+ nt, h = redact_secret_patterns(t)
111
+ if h:
112
+ sh += h
113
+ c["text"] = nt
114
+ p = c.get("path")
115
+ if p is not None:
116
+ np, h2 = redact_context_path_field(str(p))
117
+ if h2:
118
+ ph += h2
119
+ c["path"] = np
120
+ return sh, ph
121
+
122
+
123
+ def redact_display_path(p: str | Path) -> str:
124
+ """Single path string safe for logs / ``_meta`` (home prefix only + pattern redaction)."""
125
+ s = str(p)
126
+ s, _ = redact_home_path_prefix(s)
127
+ s, _ = redact_secret_patterns(s)
128
+ return s
@@ -9,7 +9,14 @@ import time
9
9
  from pathlib import Path
10
10
 
11
11
  from app.db_paths import resolve_orchestrator_db
12
- from app.main import build_router_and_skills, init_db, run_route_turn
12
+ from app.main import (
13
+ build_router_and_skills,
14
+ format_context_items_markdown,
15
+ init_db,
16
+ run_route_turn,
17
+ )
18
+ from app.mcp_contract import MCP_RESPONSE_SCHEMA_VERSION, build_route_skills_meta
19
+ from app.redaction import redaction_enabled, redact_display_path
13
20
 
14
21
 
15
22
  def _parse_args(argv: list[str] | None) -> argparse.Namespace:
@@ -28,6 +35,11 @@ def _parse_args(argv: list[str] | None) -> argparse.Namespace:
28
35
  p.add_argument("--session-id", default="", help="Stable session id (reuse across turns for reroute stats).")
29
36
  p.add_argument("--user-id", default="", help="Logical user id for weights/sessions/events.")
30
37
  p.add_argument("--json-meta", action="store_true", help="Print routing metadata as JSON on stderr after output.")
38
+ p.add_argument(
39
+ "--include-project-rag",
40
+ action="store_true",
41
+ help="Append chunks from `skillforge index` (same DB as --project-root). Requires --project-root.",
42
+ )
31
43
  return p.parse_args(argv)
32
44
 
33
45
 
@@ -38,6 +50,9 @@ async def _run(args: argparse.Namespace) -> int:
38
50
  return 2
39
51
 
40
52
  pr = (args.project_root or "").strip() or None
53
+ if args.include_project_rag and not pr:
54
+ print("skillforge route: --include-project-rag requires --project-root.", file=sys.stderr)
55
+ return 2
41
56
  db_path = resolve_orchestrator_db(pr)
42
57
  con = init_db(db_path)
43
58
 
@@ -53,6 +68,8 @@ async def _run(args: argparse.Namespace) -> int:
53
68
  conversation=[],
54
69
  user_id=user_id,
55
70
  session_id=session_id,
71
+ project_root=pr,
72
+ include_project_rag=bool(args.include_project_rag),
56
73
  )
57
74
  finally:
58
75
  con.close()
@@ -60,6 +77,7 @@ async def _run(args: argparse.Namespace) -> int:
60
77
  picked_names = result["picked_names"]
61
78
  reasoning = result["reasoning"]
62
79
  sid = result["session_id"]
80
+ context_items = result.get("context_items") or []
63
81
 
64
82
  if pr:
65
83
  try:
@@ -73,36 +91,41 @@ async def _run(args: argparse.Namespace) -> int:
73
91
  "route_ms": round(result["route_ms"], 1),
74
92
  "user_id": user_id,
75
93
  "source": "cli_route",
94
+ "schema_version": MCP_RESPONSE_SCHEMA_VERSION,
95
+ "context_mode": router.context_mode,
96
+ "context_items_count": len(context_items),
97
+ "project_rag_items_count": (result.get("event") or {}).get("project_rag_items_count", 0),
76
98
  }
77
99
  (d / "last_route.json").write_text(json.dumps(snap, indent=2), encoding="utf-8")
78
100
  except OSError:
79
101
  pass
80
102
 
103
+ db_disp = redact_display_path(db_path) if redaction_enabled() else str(db_path)
81
104
  blocks = [
82
- f"# Skillforge — routed {len(picked_names)} skill(s)",
83
- f"_DB:_ `{db_path}`",
105
+ f"# Skillforge — routed {len(picked_names)} skill(s); context=`{router.context_mode}`",
106
+ f"_DB:_ `{db_disp}`",
84
107
  f"_Reasoning: {reasoning}_" if reasoning else "",
85
108
  "",
86
109
  ]
87
- for n in picked_names:
88
- s = skills.get(n)
89
- if s:
90
- blocks.append(f"---\n## Skill: {s.name}\n\n{s.body}\n")
91
- if not picked_names:
110
+ if context_items:
111
+ blocks.append(format_context_items_markdown(context_items))
112
+ elif not picked_names:
92
113
  blocks.append("_No skills matched this prompt closely enough to load._")
93
- print("\n".join(b for b in blocks if b is not None))
114
+ response_text = "\n".join(b for b in blocks if b is not None)
115
+ print(response_text)
94
116
 
95
117
  if args.json_meta:
96
- meta = {
97
- "picked": picked_names,
98
- "reasoning": reasoning,
99
- "session_id": sid,
100
- "user_id": user_id,
101
- "rerouted": result["rerouted"],
102
- "change_pct": round(result["change"] * 100, 1),
103
- "route_ms": round(result["route_ms"], 1),
104
- "orchestrator_db": str(db_path),
105
- }
118
+ meta = build_route_skills_meta(
119
+ result=result,
120
+ picked_names=picked_names,
121
+ user_id=user_id,
122
+ db_path=db_path,
123
+ skills_map=skills,
124
+ response_text=response_text,
125
+ context_items=context_items,
126
+ fusion=(result.get("event") or {}).get("context_fusion"),
127
+ context_redaction=(result.get("event") or {}).get("context_redaction"),
128
+ )
106
129
  print(json.dumps(meta, indent=2), file=sys.stderr)
107
130
 
108
131
  return 0
@@ -0,0 +1,133 @@
1
+ """Pluggable route policies: regex on prompt → force-include skill names.
2
+
3
+ Load order (first file that exists / first successful parse wins for env):
4
+
5
+ 1. ``SKILLFORGE_ROUTE_POLICIES`` — JSON object inline (e.g. ``{\"rules\":[...]}``).
6
+ 2. ``SKILLFORGE_ROUTE_POLICIES_FILE`` — path to a JSON file.
7
+ 3. ``<project_root>/.skillforge/policies.json``
8
+ 4. ``<project_root>/skillforge-policies.json``
9
+
10
+ Rule shape::
11
+
12
+ {
13
+ "rules": [
14
+ {
15
+ "if_text_matches": "(?i)(auth|oauth|jwt|password)",
16
+ "include": ["security-review"]
17
+ }
18
+ ]
19
+ }
20
+
21
+ ``if_text_matches`` is passed to ``re.search`` (``re.DOTALL``). ``include`` is a skill
22
+ name or list of names. Forced skills are appended after router picks until
23
+ ``MAX_ACTIVE_SKILLS`` is reached.
24
+ """
25
+ from __future__ import annotations
26
+
27
+ import json
28
+ import os
29
+ import re
30
+ import sqlite3
31
+ from pathlib import Path
32
+ from typing import Any
33
+
34
+
35
+ def load_route_policies_config(project_root: str | None) -> dict[str, Any]:
36
+ """Return a dict with key ``rules`` (list). Empty rules if nothing configured."""
37
+ raw_env = os.getenv("SKILLFORGE_ROUTE_POLICIES", "").strip()
38
+ if raw_env:
39
+ try:
40
+ data = json.loads(raw_env)
41
+ return data if isinstance(data, dict) else {"rules": []}
42
+ except json.JSONDecodeError:
43
+ return {"rules": []}
44
+
45
+ paths: list[Path] = []
46
+ path_env = os.getenv("SKILLFORGE_ROUTE_POLICIES_FILE", "").strip()
47
+ if path_env:
48
+ paths.append(Path(path_env).expanduser())
49
+ if project_root:
50
+ pr = Path(project_root).expanduser().resolve()
51
+ paths.append(pr / ".skillforge" / "policies.json")
52
+ paths.append(pr / "skillforge-policies.json")
53
+
54
+ for p in paths:
55
+ if p.is_file():
56
+ try:
57
+ data = json.loads(p.read_text(encoding="utf-8"))
58
+ return data if isinstance(data, dict) else {"rules": []}
59
+ except (OSError, json.JSONDecodeError):
60
+ continue
61
+ return {"rules": []}
62
+
63
+
64
+ def merge_policy_includes(
65
+ prompt: str,
66
+ picked_names: list[str],
67
+ policies: dict[str, Any],
68
+ by_name: dict[str, Any],
69
+ con: sqlite3.Connection,
70
+ user_id: str,
71
+ *,
72
+ max_active: int,
73
+ ) -> tuple[list[str], list[dict[str, Any]]]:
74
+ """Append policy-driven skills after ``picked_names`` without duplicates.
75
+
76
+ Returns (merged_pick_list, audit_rows for events / explain_route).
77
+ """
78
+ # Local import avoids circular import at module load time.
79
+ from app.main import get_skill_weight
80
+
81
+ rules = policies.get("rules") if isinstance(policies, dict) else None
82
+ if not isinstance(rules, list):
83
+ rules = []
84
+
85
+ audit: list[dict[str, Any]] = []
86
+ merged = list(picked_names)
87
+ extras: list[str] = []
88
+
89
+ for rule in rules:
90
+ if not isinstance(rule, dict):
91
+ continue
92
+ pat = rule.get("if_text_matches") or rule.get("pattern") or ""
93
+ if not isinstance(pat, str) or not pat.strip():
94
+ continue
95
+ try:
96
+ matched = bool(re.search(pat, prompt, flags=re.DOTALL))
97
+ except re.error:
98
+ audit.append({"pattern": pat, "effect": "invalid_regex"})
99
+ continue
100
+ if not matched:
101
+ continue
102
+
103
+ inc = rule.get("include")
104
+ if isinstance(inc, str):
105
+ inc = [inc]
106
+ if not isinstance(inc, list):
107
+ continue
108
+
109
+ for name in inc:
110
+ if not isinstance(name, str) or not name.strip():
111
+ continue
112
+ name = name.strip()
113
+ if name not in by_name:
114
+ audit.append({"pattern": pat, "skill": name, "effect": "unknown_skill"})
115
+ continue
116
+ _w, disabled = get_skill_weight(con, name, user_id=user_id)
117
+ if disabled:
118
+ audit.append({"pattern": pat, "skill": name, "effect": "disabled"})
119
+ continue
120
+ if name in merged or name in extras:
121
+ audit.append({"pattern": pat, "skill": name, "effect": "already_in_list"})
122
+ continue
123
+ extras.append(name)
124
+ audit.append({"pattern": pat, "skill": name, "effect": "added"})
125
+
126
+ for n in extras:
127
+ if len(merged) >= max_active:
128
+ audit.append({"skill": n, "effect": "skipped_max_active", "max": max_active})
129
+ break
130
+ if n not in merged:
131
+ merged.append(n)
132
+
133
+ return merged, audit
@@ -0,0 +1,95 @@
1
+ """Conversation-aware routing text, skill routing cards, and sparse retrieval signals."""
2
+ from __future__ import annotations
3
+
4
+ import os
5
+ import re
6
+ from typing import Any, Protocol
7
+
8
+ import numpy as np
9
+
10
+ _TOKEN_RE = re.compile(r"[a-z0-9][a-z0-9_\-./]{2,}", re.I)
11
+
12
+
13
+ class _SkillCard(Protocol):
14
+ title: str
15
+ description: str
16
+ triggers: str
17
+ anti_triggers: str
18
+
19
+
20
+ def build_route_query_text(
21
+ prompt: str,
22
+ conversation: list[Any] | None,
23
+ *,
24
+ max_turns: int | None = None,
25
+ max_chars_per_msg: int | None = None,
26
+ ) -> str:
27
+ """Merge recent turns with the current user message for embedding shortlist / hybrid scores.
28
+
29
+ When ``SKILLFORGE_ROUTER_CONV_MAX_TURNS`` is 0 (default), returns ``prompt`` only (legacy behavior).
30
+ """
31
+ conv = conversation or []
32
+ mt = max_turns
33
+ if mt is None:
34
+ mt = int(os.getenv("SKILLFORGE_ROUTER_CONV_MAX_TURNS", "0"))
35
+ mc = max_chars_per_msg
36
+ if mc is None:
37
+ mc = int(os.getenv("SKILLFORGE_ROUTER_CONV_MSG_CHARS", "320"))
38
+ prompt = (prompt or "").strip()
39
+ if mt <= 0 or not conv:
40
+ return prompt
41
+ tail = conv[-mt:]
42
+ parts: list[str] = []
43
+ for m in tail:
44
+ if not isinstance(m, dict):
45
+ continue
46
+ role = str(m.get("role") or "user")
47
+ content = str(m.get("content") or "").strip()
48
+ if not content:
49
+ continue
50
+ if len(content) > mc:
51
+ content = content[:mc] + "…"
52
+ parts.append(f"{role}: {content}")
53
+ if not parts:
54
+ return prompt
55
+ return "Conversation context:\n" + "\n".join(parts) + "\n\nCurrent user message:\n" + prompt
56
+
57
+
58
+ def skill_routing_card(s: _SkillCard) -> str:
59
+ """Text embedded for each skill + used in hybrid / router prompts."""
60
+ title = (s.title or "").strip()
61
+ desc = (s.description or "").strip()
62
+ tr = (getattr(s, "triggers", None) or "").strip()
63
+ anti = (getattr(s, "anti_triggers", None) or "").strip()
64
+ parts = [f"{title}: {desc}"]
65
+ if tr:
66
+ parts.append(f"Triggers: {tr}")
67
+ if anti:
68
+ parts.append(f"Anti-triggers: {anti}")
69
+ return "\n".join(parts)
70
+
71
+
72
+ def tokenize_skills_query(text: str) -> list[str]:
73
+ return [t.lower() for t in _TOKEN_RE.findall(text or "")]
74
+
75
+
76
+ def normalize_minmax(arr: np.ndarray) -> np.ndarray:
77
+ a = np.asarray(arr, dtype=np.float64).reshape(-1)
78
+ if a.size == 0:
79
+ return a
80
+ lo, hi = float(a.min()), float(a.max())
81
+ if hi <= lo:
82
+ return np.zeros_like(a)
83
+ return (a - lo) / (hi - lo)
84
+
85
+
86
+ def keyword_overlap_scores(route_query: str, skill_cards: list[str]) -> np.ndarray:
87
+ """Per-skill overlap counts (unnormalized); combine with dense via hybrid alpha."""
88
+ qt = set(tokenize_skills_query(route_query))
89
+ if not qt:
90
+ return np.zeros(len(skill_cards), dtype=np.float64)
91
+ out: list[float] = []
92
+ for card in skill_cards:
93
+ ct = set(tokenize_skills_query(card))
94
+ out.append(float(len(qt & ct)))
95
+ return np.array(out, dtype=np.float64)
@@ -1,7 +1,4 @@
1
- fastapi>=0.110
2
- uvicorn[standard]>=0.27
3
1
  anthropic>=0.39
4
2
  sentence-transformers>=2.7
5
3
  numpy>=1.26
6
- pydantic>=2.6
7
- httpx>=0.27
4
+ rank-bm25>=0.2.2
@@ -0,0 +1,34 @@
1
+ """Unit tests for skill body chunking (no ML)."""
2
+ from __future__ import annotations
3
+
4
+ from app.chunking import chunk_raw_document, chunk_skill_body
5
+
6
+
7
+ def test_chunk_respects_headings() -> None:
8
+ body = "# Title\n\nintro\n\n## A\n\none\n\n## B\n\ntwo three"
9
+ chunks = chunk_skill_body(body, max_chars=500, overlap=50)
10
+ assert len(chunks) >= 2
11
+ names = [c.text for c in chunks]
12
+ assert any("one" in t for t in names)
13
+ assert any("two three" in t for t in names)
14
+
15
+
16
+ def test_chunk_line_numbers_monotonic() -> None:
17
+ body = "a\nb\nc\nd"
18
+ chunks = chunk_skill_body(body, max_chars=5, overlap=0)
19
+ assert chunks
20
+ for c in chunks:
21
+ assert c.line_start <= c.line_end
22
+ assert c.line_start >= 1
23
+
24
+
25
+ def test_empty_body() -> None:
26
+ assert chunk_skill_body("", max_chars=100, overlap=0) == []
27
+
28
+
29
+ def test_chunk_raw_document_small_file() -> None:
30
+ body = "line1\nline2\nline3"
31
+ chunks = chunk_raw_document(body, max_chars=100, overlap=0)
32
+ assert len(chunks) == 1
33
+ assert chunks[0].line_start == 1
34
+ assert "line1" in chunks[0].text
@@ -0,0 +1,45 @@
1
+ """Tests for MMR context fusion (numpy only)."""
2
+ from __future__ import annotations
3
+
4
+ import numpy as np
5
+
6
+ from app.context_fusion import mmr_select
7
+
8
+
9
+ def test_mmr_prefers_diverse_second_item() -> None:
10
+ """Two near-duplicate high-rel docs: second pick should favor the orthogonal one when lambda < 1."""
11
+ # query-aligned
12
+ e0 = np.array([1.0, 0.0, 0.0], dtype=np.float32)
13
+ e1 = np.array([0.99, 0.14, 0.0], dtype=np.float32) # almost same as e0
14
+ e2 = np.array([0.0, 1.0, 0.0], dtype=np.float32) # different direction
15
+ emb = np.stack([e0, e1, e2], axis=0)
16
+ rel = np.array([1.0, 0.98, 0.5], dtype=np.float64)
17
+ lens = np.array([10, 10, 10], dtype=np.int64)
18
+ ovh = np.full(3, 8, dtype=np.int64)
19
+ order, trace = mmr_select(
20
+ emb,
21
+ rel,
22
+ lens,
23
+ char_budget=500,
24
+ overhead_per_chunk=ovh,
25
+ lambda_mult=0.5,
26
+ )
27
+ assert order[0] == 0
28
+ assert order[1] == 2
29
+ assert len(trace) == len(order)
30
+
31
+
32
+ def test_mmr_respects_char_budget() -> None:
33
+ emb = np.eye(3, dtype=np.float32)
34
+ rel = np.array([1.0, 0.9, 0.8])
35
+ lens = np.array([100, 100, 100], dtype=np.int64)
36
+ ovh = np.array([10, 10, 10], dtype=np.int64)
37
+ order, _ = mmr_select(
38
+ emb,
39
+ rel,
40
+ lens,
41
+ char_budget=150,
42
+ overhead_per_chunk=ovh,
43
+ lambda_mult=1.0,
44
+ )
45
+ assert len(order) == 1