instructvault 0.2.8__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
instructvault/cli.py CHANGED
@@ -8,8 +8,10 @@ from rich import print as rprint
8
8
  from .bundle import write_bundle
9
9
  from .diff import unified_diff
10
10
  from .eval import run_dataset, run_inline_tests
11
- from .io import load_dataset_jsonl, load_prompt_spec
11
+ from .io import load_dataset_jsonl, load_prompt_spec, load_prompt_dict
12
+ from .policy import load_policy_module, run_spec_policy
12
13
  from .junit import write_junit_xml
14
+ import yaml
13
15
  from .render import check_required_vars, render_messages
14
16
  from .scaffold import init_repo
15
17
  from .store import PromptStore
@@ -30,16 +32,25 @@ def init(repo: Path = typer.Option(Path("."), "--repo")):
30
32
  @app.command()
31
33
  def validate(path: Path = typer.Argument(...),
32
34
  repo: Path = typer.Option(Path("."), "--repo"),
33
- json_out: bool = typer.Option(False, "--json")):
35
+ json_out: bool = typer.Option(False, "--json"),
36
+ policy: Optional[str] = typer.Option(None, "--policy")):
34
37
  base = path if path.is_absolute() else repo / path
35
38
  files = _gather_prompt_files(base)
36
39
  if not files:
37
40
  raise typer.BadParameter("No prompt files found")
38
41
  ok = True
39
42
  results = []
43
+ pol = load_policy_module(policy)
40
44
  for f in files:
41
45
  try:
42
46
  spec = load_prompt_spec(f.read_text(encoding="utf-8"), allow_no_tests=False)
47
+ errors = run_spec_policy(pol, load_prompt_dict(f.read_text(encoding="utf-8")))
48
+ if errors:
49
+ ok = False
50
+ results.append({"path": str(f), "ok": False, "error": "; ".join(errors)})
51
+ if not json_out:
52
+ rprint(f"[red]FAIL[/red] {f} {errors}")
53
+ continue
43
54
  try:
44
55
  rel_path = f.relative_to(repo).as_posix()
45
56
  except ValueError:
@@ -66,15 +77,18 @@ def render(prompt_path: str = typer.Argument(...),
66
77
  ref: Optional[str] = typer.Option(None, "--ref"),
67
78
  repo: Path = typer.Option(Path("."), "--repo"),
68
79
  json_out: bool = typer.Option(False, "--json"),
69
- allow_no_tests: bool = typer.Option(False, "--allow-no-tests")):
80
+ allow_no_tests: bool = typer.Option(False, "--allow-no-tests"),
81
+ safe: bool = typer.Option(False, "--safe"),
82
+ strict_vars: bool = typer.Option(False, "--strict-vars"),
83
+ redact: bool = typer.Option(False, "--redact")):
70
84
  store = PromptStore(repo_root=repo)
71
85
  spec = load_prompt_spec(store.read_text(prompt_path, ref=ref), allow_no_tests=allow_no_tests)
72
86
  try:
73
87
  vars_dict = json.loads(vars_json)
74
88
  except Exception:
75
89
  raise typer.BadParameter("Invalid JSON for --vars")
76
- check_required_vars(spec, vars_dict)
77
- msgs = render_messages(spec, vars_dict)
90
+ check_required_vars(spec, vars_dict, safe=safe, strict_vars=strict_vars, redact=redact)
91
+ msgs = render_messages(spec, vars_dict, safe=safe, strict_vars=strict_vars, redact=redact)
78
92
  if json_out:
79
93
  rprint(json.dumps([{"role": m.role, "content": m.content} for m in msgs]))
80
94
  else:
@@ -107,6 +121,32 @@ def resolve(ref: str = typer.Argument(...),
107
121
  else:
108
122
  rprint(sha)
109
123
 
124
+ @app.command()
125
+ def migrate(path: Path = typer.Argument(...),
126
+ repo: Path = typer.Option(Path("."), "--repo"),
127
+ apply: bool = typer.Option(False, "--apply")):
128
+ base = path if path.is_absolute() else repo / path
129
+ files = _gather_prompt_files(base)
130
+ if not files:
131
+ raise typer.BadParameter("No prompt files found")
132
+ needs = []
133
+ for f in files:
134
+ data = load_prompt_dict(f.read_text(encoding="utf-8"))
135
+ if "spec_version" not in data:
136
+ needs.append(f)
137
+ if not needs:
138
+ rprint("[green]No migration needed[/green]")
139
+ raise typer.Exit(code=0)
140
+ for f in needs:
141
+ if apply:
142
+ data = load_prompt_dict(f.read_text(encoding="utf-8"))
143
+ data["spec_version"] = "1.0"
144
+ f.write_text(yaml.safe_dump(data, sort_keys=False), encoding="utf-8")
145
+ rprint(f"[green]Updated:[/green] {f}")
146
+ else:
147
+ rprint(f"[yellow]Missing spec_version:[/yellow] {f}")
148
+ raise typer.Exit(code=0 if apply else 1)
149
+
110
150
  @app.command()
111
151
  def bundle(prompts: Path = typer.Option(Path("prompts"), "--prompts"),
112
152
  out: Path = typer.Option(Path("out/ivault.bundle.json"), "--out"),
@@ -123,17 +163,22 @@ def eval(prompt_path: str = typer.Argument(...),
123
163
  report: Optional[Path] = typer.Option(None, "--report"),
124
164
  junit: Optional[Path] = typer.Option(None, "--junit"),
125
165
  repo: Path = typer.Option(Path("."), "--repo"),
126
- json_out: bool = typer.Option(False, "--json")):
166
+ json_out: bool = typer.Option(False, "--json"),
167
+ safe: bool = typer.Option(False, "--safe"),
168
+ strict_vars: bool = typer.Option(False, "--strict-vars"),
169
+ redact: bool = typer.Option(False, "--redact"),
170
+ policy: Optional[str] = typer.Option(None, "--policy")):
127
171
  store = PromptStore(repo_root=repo)
128
172
  spec = load_prompt_spec(store.read_text(prompt_path, ref=ref), allow_no_tests=False)
173
+ pol = load_policy_module(policy)
129
174
 
130
- ok1, r1 = run_inline_tests(spec)
175
+ ok1, r1 = run_inline_tests(spec, safe=safe, strict_vars=strict_vars, redact=redact, policy=pol)
131
176
  results = list(r1)
132
177
  ok = ok1
133
178
 
134
179
  if dataset is not None:
135
180
  rows = load_dataset_jsonl(dataset.read_text(encoding="utf-8"))
136
- ok2, r2 = run_dataset(spec, rows)
181
+ ok2, r2 = run_dataset(spec, rows, safe=safe, strict_vars=strict_vars, redact=redact, policy=pol)
137
182
  ok = ok and ok2
138
183
  results.extend(r2)
139
184
 
instructvault/eval.py CHANGED
@@ -1,8 +1,11 @@
1
1
  from __future__ import annotations
2
2
  from dataclasses import dataclass
3
3
  from typing import List, Optional, Tuple
4
+ import json
5
+ import re
4
6
  from .spec import AssertSpec, DatasetRow, PromptSpec
5
7
  from .render import check_required_vars, render_joined_text
8
+ from .policy import run_render_policy
6
9
 
7
10
  @dataclass(frozen=True)
8
11
  class TestResult:
@@ -19,15 +22,34 @@ def _match_assert(assert_spec: AssertSpec, text: str) -> bool:
19
22
  ok = ok and all(s.lower() in t for s in assert_spec.contains_all)
20
23
  if assert_spec.not_contains:
21
24
  ok = ok and all(s.lower() not in t for s in assert_spec.not_contains)
25
+ if assert_spec.matches:
26
+ ok = ok and all(re.search(p, text) for p in assert_spec.matches)
27
+ if assert_spec.not_matches:
28
+ ok = ok and all(not re.search(p, text) for p in assert_spec.not_matches)
29
+ if assert_spec.json_schema:
30
+ try:
31
+ obj = json.loads(text)
32
+ except Exception:
33
+ return False
34
+ try:
35
+ import jsonschema # type: ignore
36
+ except Exception as e:
37
+ raise ValueError("jsonschema is required for json_schema assertions") from e
38
+ jsonschema.validate(instance=obj, schema=assert_spec.json_schema)
22
39
  return ok
23
40
 
24
- def run_inline_tests(spec: PromptSpec) -> Tuple[bool, List[TestResult]]:
41
+ def run_inline_tests(spec: PromptSpec, *, safe: bool = False, strict_vars: bool = False, redact: bool = False, policy: Optional[object] = None) -> Tuple[bool, List[TestResult]]:
25
42
  results: List[TestResult] = []
26
43
  all_ok = True
27
44
  for t in spec.tests:
28
45
  try:
29
- check_required_vars(spec, t.vars)
30
- out = render_joined_text(spec, t.vars)
46
+ check_required_vars(spec, t.vars, safe=safe, strict_vars=strict_vars, redact=redact)
47
+ out = render_joined_text(spec, t.vars, safe=safe, strict_vars=strict_vars, redact=redact)
48
+ errors = run_render_policy(policy, out, {"prompt": spec.name, "test": t.name, "kind": "inline"})
49
+ if errors:
50
+ results.append(TestResult(t.name, False, "; ".join(errors)))
51
+ all_ok = False
52
+ continue
31
53
  passed = _match_assert(t.assert_, out)
32
54
  results.append(TestResult(t.name, passed))
33
55
  all_ok = all_ok and passed
@@ -36,14 +58,19 @@ def run_inline_tests(spec: PromptSpec) -> Tuple[bool, List[TestResult]]:
36
58
  all_ok = False
37
59
  return all_ok, results
38
60
 
39
- def run_dataset(spec: PromptSpec, rows: List[DatasetRow]) -> Tuple[bool, List[TestResult]]:
61
+ def run_dataset(spec: PromptSpec, rows: List[DatasetRow], *, safe: bool = False, strict_vars: bool = False, redact: bool = False, policy: Optional[object] = None) -> Tuple[bool, List[TestResult]]:
40
62
  results: List[TestResult] = []
41
63
  all_ok = True
42
64
  for i, row in enumerate(rows, start=1):
43
65
  name = f"dataset_row_{i}"
44
66
  try:
45
- check_required_vars(spec, row.vars)
46
- out = render_joined_text(spec, row.vars)
67
+ check_required_vars(spec, row.vars, safe=safe, strict_vars=strict_vars, redact=redact)
68
+ out = render_joined_text(spec, row.vars, safe=safe, strict_vars=strict_vars, redact=redact)
69
+ errors = run_render_policy(policy, out, {"prompt": spec.name, "test": name, "kind": "dataset"})
70
+ if errors:
71
+ results.append(TestResult(name, False, "; ".join(errors)))
72
+ all_ok = False
73
+ continue
47
74
  passed = _match_assert(row.assert_, out)
48
75
  results.append(TestResult(name, passed))
49
76
  all_ok = all_ok and passed
instructvault/io.py CHANGED
@@ -15,6 +15,15 @@ def load_prompt_spec(yaml_text: str, *, allow_no_tests: bool = True) -> PromptSp
15
15
  data = yaml.safe_load(yaml_text) or {}
16
16
  return PromptSpec.model_validate(data, context={"allow_no_tests": allow_no_tests})
17
17
 
18
+ def load_prompt_dict(text: str) -> Dict[str, Any]:
19
+ raw = text.strip()
20
+ if raw.startswith("{") or raw.startswith("["):
21
+ try:
22
+ return json.loads(raw) if raw else {}
23
+ except Exception:
24
+ return yaml.safe_load(text) or {}
25
+ return yaml.safe_load(text) or {}
26
+
18
27
  def load_dataset_jsonl(text: str) -> List[DatasetRow]:
19
28
  rows: List[DatasetRow] = []
20
29
  for i, line in enumerate(text.splitlines(), start=1):
@@ -0,0 +1,33 @@
1
+ from __future__ import annotations
2
+ import importlib.util
3
+ from pathlib import Path
4
+ from typing import Any, Dict, List, Optional
5
+
6
+ def load_policy_module(path: Optional[str]) -> Optional[object]:
7
+ if not path:
8
+ return None
9
+ p = Path(path)
10
+ spec = importlib.util.spec_from_file_location("ivault_policy", p)
11
+ if spec is None or spec.loader is None:
12
+ raise ValueError(f"Could not load policy module: {path}")
13
+ mod = importlib.util.module_from_spec(spec)
14
+ spec.loader.exec_module(mod)
15
+ return mod
16
+
17
+ def run_spec_policy(mod: Optional[object], spec_dict: Dict[str, Any]) -> List[str]:
18
+ if mod is None:
19
+ return []
20
+ fn = getattr(mod, "check_spec", None)
21
+ if fn is None:
22
+ return []
23
+ res = fn(spec_dict)
24
+ return list(res) if res else []
25
+
26
+ def run_render_policy(mod: Optional[object], text: str, context: Dict[str, Any]) -> List[str]:
27
+ if mod is None:
28
+ return []
29
+ fn = getattr(mod, "check_render", None)
30
+ if fn is None:
31
+ return []
32
+ res = fn(text, context)
33
+ return list(res) if res else []
instructvault/render.py CHANGED
@@ -1,22 +1,57 @@
1
1
  from __future__ import annotations
2
2
  from typing import Any, Dict, List
3
+ import re
3
4
  from jinja2 import Environment, StrictUndefined
4
5
  from .spec import PromptMessage, PromptSpec
5
6
 
6
7
  _env = Environment(undefined=StrictUndefined, autoescape=False)
7
8
 
8
- def check_required_vars(spec: PromptSpec, vars: Dict[str, Any]) -> None:
9
+ _SECRET_PATTERNS = [
10
+ ("openai_key", re.compile(r"sk-[A-Za-z0-9]{20,}")),
11
+ ("aws_key", re.compile(r"AKIA[0-9A-Z]{16}")),
12
+ ("pypi_token", re.compile(r"pypi-[A-Za-z0-9]{20,}")),
13
+ ("generic_token", re.compile(r"(?:api|token|secret)[=_:\\s-]{1,}[A-Za-z0-9\\-]{16,}", re.IGNORECASE)),
14
+ ]
15
+
16
+ def _scan_for_secrets(text: str) -> List[str]:
17
+ hits: List[str] = []
18
+ for name, pat in _SECRET_PATTERNS:
19
+ if pat.search(text):
20
+ hits.append(name)
21
+ return hits
22
+
23
+ def check_required_vars(spec: PromptSpec, vars: Dict[str, Any], *, safe: bool = False, strict_vars: bool = False, redact: bool = False) -> None:
9
24
  missing = [k for k in spec.variables.required if k not in vars]
10
25
  if missing:
11
26
  raise ValueError(f"Missing required vars: {missing}")
27
+ if strict_vars:
28
+ allowed = set(spec.variables.required + spec.variables.optional)
29
+ extra = [k for k in vars.keys() if k not in allowed]
30
+ if extra:
31
+ raise ValueError(f"Unexpected vars: {extra}")
32
+ if safe and not redact:
33
+ for v in vars.values():
34
+ if isinstance(v, str):
35
+ hits = _scan_for_secrets(v)
36
+ if hits:
37
+ raise ValueError(f"Potential secret detected in vars: {hits}")
12
38
 
13
- def render_messages(spec: PromptSpec, vars: Dict[str, Any]) -> List[PromptMessage]:
39
+ def render_messages(spec: PromptSpec, vars: Dict[str, Any], *, safe: bool = False, strict_vars: bool = False, redact: bool = False) -> List[PromptMessage]:
14
40
  rendered: List[PromptMessage] = []
15
41
  for m in spec.messages:
16
42
  tmpl = _env.from_string(m.content)
17
- rendered.append(PromptMessage(role=m.role, content=tmpl.render(**vars)))
43
+ content = tmpl.render(**vars)
44
+ if safe:
45
+ hits = _scan_for_secrets(content)
46
+ if hits:
47
+ if redact:
48
+ for _, pat in _SECRET_PATTERNS:
49
+ content = pat.sub("[REDACTED]", content)
50
+ else:
51
+ raise ValueError(f"Potential secret detected in rendered output: {hits}")
52
+ rendered.append(PromptMessage(role=m.role, content=content))
18
53
  return rendered
19
54
 
20
- def render_joined_text(spec: PromptSpec, vars: Dict[str, Any]) -> str:
21
- msgs = render_messages(spec, vars)
55
+ def render_joined_text(spec: PromptSpec, vars: Dict[str, Any], *, safe: bool = False, strict_vars: bool = False, redact: bool = False) -> str:
56
+ msgs = render_messages(spec, vars, safe=safe, strict_vars=strict_vars, redact=redact)
22
57
  return "\n\n".join([f"{m.role}: {m.content}" for m in msgs])
instructvault/sdk.py CHANGED
@@ -27,7 +27,7 @@ class InstructVault:
27
27
  if self.store is None:
28
28
  raise ValueError("No repo_root configured")
29
29
  return load_prompt_spec(self.store.read_text(prompt_path, ref=ref), allow_no_tests=True)
30
- def render(self, prompt_path: str, vars: Dict[str, Any], ref: Optional[str] = None) -> List[PromptMessage]:
30
+ def render(self, prompt_path: str, vars: Dict[str, Any], ref: Optional[str] = None, *, safe: bool = False, strict_vars: bool = False, redact: bool = False) -> List[PromptMessage]:
31
31
  spec = self.load_prompt(prompt_path, ref=ref)
32
- check_required_vars(spec, vars)
33
- return render_messages(spec, vars)
32
+ check_required_vars(spec, vars, safe=safe, strict_vars=strict_vars, redact=redact)
33
+ return render_messages(spec, vars, safe=safe, strict_vars=strict_vars, redact=redact)
instructvault/spec.py CHANGED
@@ -25,9 +25,12 @@ class AssertSpec(BaseModel):
25
25
  contains_any: Optional[List[str]] = None
26
26
  contains_all: Optional[List[str]] = None
27
27
  not_contains: Optional[List[str]] = None
28
+ matches: Optional[List[str]] = None
29
+ not_matches: Optional[List[str]] = None
30
+ json_schema: Optional[Dict[str, Any]] = None
28
31
  @model_validator(mode="after")
29
32
  def _require_one(self) -> "AssertSpec":
30
- if not (self.contains_any or self.contains_all or self.not_contains):
33
+ if not (self.contains_any or self.contains_all or self.not_contains or self.matches or self.not_matches or self.json_schema):
31
34
  raise ValueError("assert must include at least one of contains_any, contains_all, not_contains")
32
35
  return self
33
36
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: instructvault
3
- Version: 0.2.8
3
+ Version: 0.3.0
4
4
  Summary: Git-first prompt registry + CI evals + lightweight runtime SDK (ivault).
5
5
  Project-URL: Homepage, https://github.com/05satyam/instruct_vault
6
6
  Project-URL: Repository, https://github.com/05satyam/instruct_vault
@@ -9,6 +9,7 @@ License: Apache-2.0
9
9
  License-File: LICENSE
10
10
  Requires-Python: >=3.10
11
11
  Requires-Dist: jinja2>=3.1
12
+ Requires-Dist: jsonschema>=4.21
12
13
  Requires-Dist: pydantic>=2.7
13
14
  Requires-Dist: pyyaml>=6.0
14
15
  Requires-Dist: rich>=13.7
@@ -127,6 +128,10 @@ ivault validate prompts
127
128
  ivault render prompts/support_reply.prompt.yml --vars '{"ticket_text":"My app crashed.","customer_name":"Sam"}'
128
129
  ```
129
130
 
131
+ Safety tip: add `--safe` to scan rendered output for common secret patterns.
132
+ Use `--strict-vars` to forbid unknown vars and `--redact` to mask detected secrets.
133
+ Use `--policy /path/to/policy.py` to enforce custom compliance rules.
134
+
130
135
  ### 4) Add dataset‑driven eval
131
136
  `datasets/support_cases.jsonl`
132
137
  ```jsonl
@@ -142,6 +147,11 @@ Note: Prompts must include at least one inline test. Datasets are optional.
142
147
  Migration tip: if you need to render a prompt that doesn’t yet include tests, use
143
148
  `ivault render --allow-no-tests` or add a minimal test first.
144
149
 
150
+ Spec migration check:
151
+ ```bash
152
+ ivault migrate prompts
153
+ ```
154
+
145
155
  ### 5) Version prompts with tags
146
156
  ```bash
147
157
  git add prompts datasets
@@ -179,6 +189,10 @@ vault = InstructVault(bundle_path="out/ivault.bundle.json")
179
189
  - `examples/notebooks/instructvault_openai_colab.ipynb`
180
190
  [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/05satyam/instruct_vault/blob/main/examples/notebooks/instructvault_openai_colab.ipynb)
181
191
 
192
+ ## Example Policies
193
+ - `examples/policies/policy_example.py`
194
+ - `examples/policies/policy_pack.py`
195
+
182
196
  ## How teams use this in production
183
197
  1) Prompt changes go through PRs
184
198
  2) CI runs `validate` + `eval`
@@ -208,11 +222,13 @@ Then send `x-ivault-api-key` in requests (or keep it behind your org gateway).
208
222
  If you don’t set the env var, no auth is required.
209
223
 
210
224
  ## Docs
225
+ - `docs/spec.md`
211
226
  - `docs/vision.md`
212
227
  - `docs/governance.md`
213
228
  - `docs/ci.md`
214
229
  - `docs/playground.md`
215
230
  - `docs/cookbooks.md`
231
+ - `docs/audit_logging.md`
216
232
  - `docs/dropin_guide.md`
217
233
  - `docs/release_checklist.md`
218
234
  - `docs/ci_templates/gitlab-ci.yml`
@@ -0,0 +1,18 @@
1
+ instructvault/__init__.py,sha256=cg7j0qh6W84D-K0uSOLKKAP2JquW4NRXwZRDDLk5E18,59
2
+ instructvault/bundle.py,sha256=6bfHNxJsE3zuZBLX5ZiMAhn1Dw6BnFHRa55fN6XIPRI,3008
3
+ instructvault/cli.py,sha256=19oxI0RZcHSk3TyvaEaqWjrbB6DTRbtsb0M6b54dmTA,8566
4
+ instructvault/diff.py,sha256=vz_vmKDXasNFoVKHCk2u_TsboHk1BdwvX0wCnJI1ATQ,252
5
+ instructvault/eval.py,sha256=BgmJG7msEdCEW_UdpUpBdUlQ6F1yL8mtCnscwwCGCvc,3548
6
+ instructvault/io.py,sha256=vlGaaEw5A8qnjsujxGjL9Xt2200-HEuYepm1gxQ1CKQ,1336
7
+ instructvault/junit.py,sha256=sIEcIiGD3Xk6uCYjnE5p_07j8dPoS_RAc2eoy3BIBeQ,1133
8
+ instructvault/policy.py,sha256=rQlPlVPWFudOCu6eXowFwGFZooUHplzi__cB_TrpLe4,1066
9
+ instructvault/render.py,sha256=P4djgHlyuUDBapUD206z218JZxvbcXIO6mrHxH4ldKg,2525
10
+ instructvault/scaffold.py,sha256=f5gwXE3dUPuJYTedZRqBs8w5SQEgt1dgDSuqW2dxrMg,1685
11
+ instructvault/sdk.py,sha256=4M3d-KqyuWLcXkaccZjhI-BsBnD4cU_GynkJPJ3ged4,1901
12
+ instructvault/spec.py,sha256=ybv-0rQ4Vqxrj277u7JAVttHQCGmix5zMmtZ4_SN7A8,2639
13
+ instructvault/store.py,sha256=NhN49w7xrkeij0lQDr-CEdANYLpNVBXumv_cKqLmiYY,1056
14
+ instructvault-0.3.0.dist-info/METADATA,sha256=po0yI-tsInHwHeJSokID1Vuvn9cKt67KgqbEMtsDSOg,7606
15
+ instructvault-0.3.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
16
+ instructvault-0.3.0.dist-info/entry_points.txt,sha256=cdcMJQwBk9c95LwfN2W6x2xO43FwPjhfV3jHE7TTuHg,49
17
+ instructvault-0.3.0.dist-info/licenses/LICENSE,sha256=VFbCvIsyizmkz4NrZPMdcPhyRK5uM0HhAjv3GBUbb7Y,135
18
+ instructvault-0.3.0.dist-info/RECORD,,
@@ -1,17 +0,0 @@
1
- instructvault/__init__.py,sha256=cg7j0qh6W84D-K0uSOLKKAP2JquW4NRXwZRDDLk5E18,59
2
- instructvault/bundle.py,sha256=6bfHNxJsE3zuZBLX5ZiMAhn1Dw6BnFHRa55fN6XIPRI,3008
3
- instructvault/cli.py,sha256=v5vP-sgVpXRs-YGvxH8VWIarFqUD1IsXdB9lseaFJDA,6310
4
- instructvault/diff.py,sha256=vz_vmKDXasNFoVKHCk2u_TsboHk1BdwvX0wCnJI1ATQ,252
5
- instructvault/eval.py,sha256=-yrFHCEUrONvzfKLP8s_RktFU74Ergp9tQJvzfrMR9s,1949
6
- instructvault/io.py,sha256=n1yQfiy93Duz-8tJ_HpbCEq8MUn2jlLpSmUY6XBg8G4,1037
7
- instructvault/junit.py,sha256=sIEcIiGD3Xk6uCYjnE5p_07j8dPoS_RAc2eoy3BIBeQ,1133
8
- instructvault/render.py,sha256=vcVnqIXGytskZEKbUofoKgIVflQSYhsmdpEtZs1X19A,919
9
- instructvault/scaffold.py,sha256=f5gwXE3dUPuJYTedZRqBs8w5SQEgt1dgDSuqW2dxrMg,1685
10
- instructvault/sdk.py,sha256=abqFrmc9Q5LUqC_ZrwM12DlpTZZkXqRuzN0T2x9lqqY,1727
11
- instructvault/spec.py,sha256=ZtVXosHy0f3hRB5CP9xbVzSdW8fDnf0-AR46ehG9-MA,2450
12
- instructvault/store.py,sha256=NhN49w7xrkeij0lQDr-CEdANYLpNVBXumv_cKqLmiYY,1056
13
- instructvault-0.2.8.dist-info/METADATA,sha256=R7vOJvngSwH-SOLTdiyDAPueNgmrUNnHHtN6NEpEsWI,7143
14
- instructvault-0.2.8.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
15
- instructvault-0.2.8.dist-info/entry_points.txt,sha256=cdcMJQwBk9c95LwfN2W6x2xO43FwPjhfV3jHE7TTuHg,49
16
- instructvault-0.2.8.dist-info/licenses/LICENSE,sha256=VFbCvIsyizmkz4NrZPMdcPhyRK5uM0HhAjv3GBUbb7Y,135
17
- instructvault-0.2.8.dist-info/RECORD,,