lackpy 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lackpy/__init__.py ADDED
@@ -0,0 +1,24 @@
1
+ """lackpy: Python that lacks most of Python."""
2
+
3
+ __version__ = "0.1.0"
4
+
5
+ from .service import LackpyService
6
+ from .lang.validator import validate, ValidationResult
7
+ from .lang.grader import Grade, compute_grade
8
+ from .lang.grammar import ALLOWED_NODES, FORBIDDEN_NODES, FORBIDDEN_NAMES, ALLOWED_BUILTINS
9
+ from .kit.toolbox import Toolbox, ToolSpec, ArgSpec
10
+ from .kit.registry import resolve_kit, ResolvedKit
11
+ from .run.runner import RestrictedRunner
12
+ from .run.base import ExecutionResult
13
+ from .run.trace import Trace, TraceEntry
14
+
15
+ __all__ = [
16
+ "LackpyService",
17
+ "validate", "ValidationResult",
18
+ "Grade", "compute_grade",
19
+ "ALLOWED_NODES", "FORBIDDEN_NODES", "FORBIDDEN_NAMES", "ALLOWED_BUILTINS",
20
+ "Toolbox", "ToolSpec", "ArgSpec",
21
+ "resolve_kit", "ResolvedKit",
22
+ "RestrictedRunner", "ExecutionResult",
23
+ "Trace", "TraceEntry",
24
+ ]
lackpy/cli.py ADDED
@@ -0,0 +1,255 @@
1
+ """Command-line interface for lackpy."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import argparse
6
+ import asyncio
7
+ import json
8
+ import sys
9
+ from pathlib import Path
10
+ from typing import Any
11
+
12
+
13
+ def _parse_kit(kit_str: str) -> str | list[str]:
14
+ if "," in kit_str:
15
+ return [k.strip() for k in kit_str.split(",")]
16
+ return kit_str
17
+
18
+
19
+ def build_parser() -> argparse.ArgumentParser:
20
+ parser = argparse.ArgumentParser(
21
+ prog="lackpy",
22
+ description="lackpy — micro-inferencer for restricted Python programs",
23
+ )
24
+ parser.add_argument(
25
+ "--workspace", type=Path, default=None,
26
+ help="Workspace directory (default: cwd)",
27
+ )
28
+
29
+ subparsers = parser.add_subparsers(dest="command")
30
+
31
+ # delegate
32
+ delegate_p = subparsers.add_parser("delegate", help="Generate and run a program from intent")
33
+ delegate_p.add_argument("intent", help="Natural language intent")
34
+ delegate_p.add_argument("--kit", default=None, help="Kit name, comma-separated list, or @file")
35
+ delegate_p.add_argument("--sandbox", default=None, help="Sandbox profile")
36
+
37
+ # generate
38
+ generate_p = subparsers.add_parser("generate", help="Generate a program from intent without running")
39
+ generate_p.add_argument("intent", help="Natural language intent")
40
+ generate_p.add_argument("--kit", default=None, help="Kit name or comma-separated list")
41
+
42
+ # run
43
+ run_p = subparsers.add_parser("run", help="Run a program file")
44
+ run_p.add_argument("file", help="Program file to run")
45
+ run_p.add_argument("--kit", default=None, help="Kit name or comma-separated list")
46
+
47
+ # create
48
+ create_p = subparsers.add_parser("create", help="Validate and save a program as a template")
49
+ create_p.add_argument("file", help="Program file to save as template")
50
+ create_p.add_argument("--name", required=True, help="Template name")
51
+ create_p.add_argument("--kit", default=None, help="Kit name or comma-separated list")
52
+ create_p.add_argument("--pattern", default=None, help="Intent pattern regex")
53
+
54
+ # validate
55
+ validate_p = subparsers.add_parser("validate", help="Validate a program without running")
56
+ validate_p.add_argument("file", help="Program file to validate")
57
+ validate_p.add_argument("--kit", default=None, help="Kit name or comma-separated list")
58
+
59
+ # spec
60
+ subparsers.add_parser("spec", help="Print language spec")
61
+
62
+ # status
63
+ subparsers.add_parser("status", help="Show lackpy status and configuration")
64
+
65
+ # kit
66
+ kit_p = subparsers.add_parser("kit", help="Manage kits")
67
+ kit_sub = kit_p.add_subparsers(dest="kit_command")
68
+
69
+ kit_sub.add_parser("list", help="List available kits")
70
+
71
+ kit_info_p = kit_sub.add_parser("info", help="Show kit info")
72
+ kit_info_p.add_argument("name", help="Kit name or comma-separated tools")
73
+ kit_info_p.add_argument("--tools", nargs="+", default=None, help="Tool names")
74
+
75
+ kit_create_p = kit_sub.add_parser("create", help="Create a new kit")
76
+ kit_create_p.add_argument("name", help="Kit name")
77
+ kit_create_p.add_argument("--tools", nargs="+", required=True, help="Tool names to include")
78
+ kit_create_p.add_argument("--description", default=None, help="Kit description")
79
+
80
+ # toolbox
81
+ toolbox_p = subparsers.add_parser("toolbox", help="Manage toolbox")
82
+ toolbox_sub = toolbox_p.add_subparsers(dest="toolbox_command")
83
+
84
+ toolbox_sub.add_parser("list", help="List all registered tools")
85
+
86
+ toolbox_show_p = toolbox_sub.add_parser("show", help="Show tool details")
87
+ toolbox_show_p.add_argument("name", help="Tool name")
88
+
89
+ # template
90
+ template_p = subparsers.add_parser("template", help="Manage templates")
91
+ template_sub = template_p.add_subparsers(dest="template_command")
92
+
93
+ template_sub.add_parser("list", help="List available templates")
94
+
95
+ template_test_p = template_sub.add_parser("test", help="Test a template")
96
+ template_test_p.add_argument("name", help="Template name")
97
+
98
+ # init
99
+ init_p = subparsers.add_parser("init", help="Initialize .lackpy workspace")
100
+ init_p.add_argument("--ollama-model", default="qwen2.5-coder:1.5b", help="Default Ollama model")
101
+
102
+ return parser
103
+
104
+
105
+ def _init_config(workspace: Path, ollama_model: str) -> None:
106
+ config_dir = workspace / ".lackpy"
107
+ config_dir.mkdir(parents=True, exist_ok=True)
108
+ (config_dir / "templates").mkdir(exist_ok=True)
109
+ (config_dir / "kits").mkdir(exist_ok=True)
110
+ config_file = config_dir / "config.toml"
111
+ if config_file.exists():
112
+ print(f"Config already exists at {config_file}", file=sys.stderr)
113
+ return
114
+ config_file.write_text(f"""\
115
+ [inference]
116
+ order = ["templates", "rules", "ollama-local"]
117
+
118
+ [inference.providers.ollama-local]
119
+ plugin = "ollama"
120
+ host = "http://localhost:11434"
121
+ model = "{ollama_model}"
122
+
123
+ [kit]
124
+ default = "debug"
125
+
126
+ [sandbox]
127
+ enabled = false
128
+ timeout_seconds = 120
129
+ memory_mb = 512
130
+ """)
131
+ print(f"Initialized lackpy workspace at {config_dir}")
132
+
133
+
134
+ def main(argv: list[str] | None = None) -> int:
135
+ parser = build_parser()
136
+ args = parser.parse_args(argv)
137
+
138
+ if args.command is None:
139
+ parser.print_help()
140
+ return 0
141
+
142
+ workspace = args.workspace or Path.cwd()
143
+
144
+ if args.command == "init":
145
+ _init_config(workspace, args.ollama_model)
146
+ return 0
147
+
148
+ if args.command == "spec":
149
+ from .lang.spec import get_spec
150
+ print(json.dumps(get_spec(), indent=2))
151
+ return 0
152
+
153
+ from .service import LackpyService
154
+ svc = LackpyService(workspace=workspace)
155
+
156
+ if args.command == "status":
157
+ config = svc._config
158
+ info = {
159
+ "workspace": str(workspace),
160
+ "config_dir": str(config.config_dir),
161
+ "inference_order": config.inference_order,
162
+ "kit_default": config.kit_default,
163
+ "sandbox_enabled": config.sandbox_enabled,
164
+ "tools": len(svc.toolbox.tools),
165
+ }
166
+ print(json.dumps(info, indent=2))
167
+ return 0
168
+
169
+ if args.command == "toolbox":
170
+ if args.toolbox_command == "list":
171
+ tools = svc.toolbox_list()
172
+ print(json.dumps(tools, indent=2))
173
+ elif args.toolbox_command == "show":
174
+ tools = svc.toolbox_list()
175
+ match = [t for t in tools if t["name"] == args.name]
176
+ if not match:
177
+ print(f"Tool '{args.name}' not found", file=sys.stderr)
178
+ return 1
179
+ print(json.dumps(match[0], indent=2))
180
+ else:
181
+ print("Usage: lackpy toolbox {list|show}", file=sys.stderr)
182
+ return 1
183
+ return 0
184
+
185
+ if args.command == "kit":
186
+ if args.kit_command == "list":
187
+ kits = svc.kit_list()
188
+ print(json.dumps(kits, indent=2))
189
+ elif args.kit_command == "info":
190
+ kit = _parse_kit(args.name) if args.tools is None else args.tools
191
+ info = svc.kit_info(kit)
192
+ print(json.dumps(info, indent=2))
193
+ elif args.kit_command == "create":
194
+ result = svc.kit_create(args.name, args.tools, args.description)
195
+ print(json.dumps(result, indent=2))
196
+ else:
197
+ print("Usage: lackpy kit {list|info|create}", file=sys.stderr)
198
+ return 1
199
+ return 0
200
+
201
+ if args.command == "template":
202
+ if args.template_command == "list":
203
+ templates_dir = svc._config.config_dir / "templates"
204
+ if not templates_dir.exists():
205
+ print("[]")
206
+ else:
207
+ tmpls = [{"name": p.stem, "path": str(p)} for p in sorted(templates_dir.glob("*.tmpl"))]
208
+ print(json.dumps(tmpls, indent=2))
209
+ elif args.template_command == "test":
210
+ print(f"Testing template '{args.name}' not yet implemented", file=sys.stderr)
211
+ return 1
212
+ else:
213
+ print("Usage: lackpy template {list|test}", file=sys.stderr)
214
+ return 1
215
+ return 0
216
+
217
+ kit = _parse_kit(args.kit) if getattr(args, "kit", None) else None
218
+
219
+ if args.command == "validate":
220
+ program = Path(args.file).read_text()
221
+ result = svc.validate(program, kit=kit)
222
+ out: dict[str, Any] = {"valid": result.valid, "errors": result.errors, "calls": list(result.calls)}
223
+ print(json.dumps(out, indent=2))
224
+ return 0 if result.valid else 1
225
+
226
+ if args.command == "run":
227
+ program = Path(args.file).read_text()
228
+ result = asyncio.run(svc.run_program(program, kit=kit))
229
+ out = {"success": result.success, "output": result.output, "error": result.error}
230
+ print(json.dumps(out, indent=2))
231
+ return 0 if result.success else 1
232
+
233
+ if args.command == "generate":
234
+ result = asyncio.run(svc.generate(args.intent, kit=kit))
235
+ print(result.program)
236
+ return 0
237
+
238
+ if args.command == "create":
239
+ program = Path(args.file).read_text()
240
+ result = asyncio.run(svc.create(program, kit=kit, name=args.name, pattern=args.pattern))
241
+ print(json.dumps(result, indent=2))
242
+ return 0 if result.get("success") else 1
243
+
244
+ if args.command == "delegate":
245
+ sandbox = getattr(args, "sandbox", None)
246
+ result = asyncio.run(svc.delegate(args.intent, kit=kit, sandbox=sandbox))
247
+ print(json.dumps(result, indent=2))
248
+ return 0 if result["success"] else 1
249
+
250
+ print(f"Unknown command: {args.command}", file=sys.stderr)
251
+ return 1
252
+
253
+
254
+ if __name__ == "__main__":
255
+ sys.exit(main())
lackpy/config.py ADDED
@@ -0,0 +1,56 @@
1
+ """Configuration loading from .lackpy/config.toml."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import sys
6
+ from dataclasses import dataclass, field
7
+ from pathlib import Path
8
+ from typing import Any
9
+
10
+ if sys.version_info >= (3, 11):
11
+ import tomllib
12
+ else:
13
+ try:
14
+ import tomllib
15
+ except ImportError:
16
+ import tomli as tomllib # type: ignore[no-redef]
17
+
18
+
19
+ @dataclass
20
+ class LackpyConfig:
21
+ inference_order: list[str] = field(default_factory=lambda: ["templates", "rules"])
22
+ inference_providers: dict[str, dict[str, Any]] = field(default_factory=dict)
23
+ kit_default: str = "debug"
24
+ sandbox_enabled: bool = False
25
+ sandbox_timeout: int = 120
26
+ sandbox_memory_mb: int = 512
27
+ tool_providers: dict[str, dict[str, Any]] = field(default_factory=dict)
28
+ config_dir: Path = field(default_factory=lambda: Path(".lackpy"))
29
+
30
+
31
+ def load_config(workspace: Path | None = None) -> LackpyConfig:
32
+ if workspace is None:
33
+ workspace = Path.cwd()
34
+ config_dir = workspace / ".lackpy"
35
+ config_file = config_dir / "config.toml"
36
+ if not config_file.exists():
37
+ return LackpyConfig(config_dir=config_dir)
38
+ with open(config_file, "rb") as f:
39
+ data = tomllib.load(f)
40
+ inference = data.get("inference", {})
41
+ kit = data.get("kit", {})
42
+ sandbox = data.get("sandbox", {})
43
+ tool_providers = data.get("tool_providers", {})
44
+ providers: dict[str, dict[str, Any]] = {}
45
+ for name, cfg in inference.get("providers", {}).items():
46
+ providers[name] = cfg
47
+ return LackpyConfig(
48
+ inference_order=inference.get("order", ["templates", "rules"]),
49
+ inference_providers=providers,
50
+ kit_default=kit.get("default", "debug"),
51
+ sandbox_enabled=sandbox.get("enabled", False),
52
+ sandbox_timeout=sandbox.get("timeout_seconds", 120),
53
+ sandbox_memory_mb=sandbox.get("memory_mb", 512),
54
+ tool_providers=tool_providers,
55
+ config_dir=config_dir,
56
+ )
@@ -0,0 +1 @@
1
+ """lackpy inference: program generation from natural language intent."""
@@ -0,0 +1,60 @@
1
+ """Priority-ordered inference dispatch across provider plugins."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import time
6
+ from dataclasses import dataclass
7
+ from typing import Any
8
+
9
+ from ..lang.validator import validate
10
+ from .sanitize import sanitize_output
11
+
12
+
13
+ @dataclass
14
+ class GenerationResult:
15
+ program: str
16
+ provider_name: str
17
+ generation_time_ms: float
18
+
19
+
20
+ class InferenceDispatcher:
21
+ def __init__(self, providers: list[Any]) -> None:
22
+ self._providers = providers
23
+
24
+ async def generate(self, intent: str, namespace_desc: str, allowed_names: set[str],
25
+ params_desc: str | None = None, extra_rules: list | None = None) -> GenerationResult:
26
+ start = time.perf_counter()
27
+ errors_by_provider: dict[str, list[str]] = {}
28
+
29
+ for provider in self._providers:
30
+ if not provider.available():
31
+ continue
32
+
33
+ raw = await provider.generate(intent, namespace_desc)
34
+ if raw is None:
35
+ continue
36
+
37
+ program = sanitize_output(raw)
38
+ validation = validate(program, allowed_names=allowed_names, extra_rules=extra_rules)
39
+ if validation.valid:
40
+ elapsed = (time.perf_counter() - start) * 1000
41
+ return GenerationResult(program=program, provider_name=provider.name, generation_time_ms=elapsed)
42
+
43
+ errors_by_provider[provider.name] = validation.errors
44
+ raw = await provider.generate(intent, namespace_desc, error_feedback=validation.errors)
45
+ if raw is None:
46
+ continue
47
+
48
+ program = sanitize_output(raw)
49
+ validation = validate(program, allowed_names=allowed_names, extra_rules=extra_rules)
50
+ if validation.valid:
51
+ elapsed = (time.perf_counter() - start) * 1000
52
+ return GenerationResult(program=program, provider_name=provider.name, generation_time_ms=elapsed)
53
+
54
+ errors_by_provider[provider.name] = validation.errors
55
+
56
+ provider_names = [p.name for p in self._providers if p.available()]
57
+ raise RuntimeError(
58
+ f"All {len(provider_names)} providers failed to produce a valid program. "
59
+ f"Tried: {', '.join(provider_names)}. Last errors: {errors_by_provider}"
60
+ )
lackpy/infer/prompt.py ADDED
@@ -0,0 +1,51 @@
1
+ """System prompt construction for inference providers."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from ..lang.grammar import ALLOWED_BUILTINS
6
+
7
+ _TEMPLATE = """\
8
+ You are a Jupyter notebook cell generator. Write a single cell \
9
+ using ONLY the pre-loaded kernel namespace below.
10
+
11
+ Output ONLY the cell contents — no markdown, no explanation, no code fences.
12
+
13
+ Assign tool results to variables and reuse them. Never call the same function twice \
14
+ when you can reuse a variable.
15
+
16
+ Kernel namespace:
17
+ {namespace_desc}
18
+
19
+ Builtins: {builtins_list}
20
+ {params_section}\
21
+ Not available: import, def, class, while, try/except, lambda, open
22
+
23
+ The cell's last expression is displayed as output."""
24
+
25
+
26
+ def build_system_prompt(namespace_desc: str, params_desc: str | None = None) -> str:
27
+ builtins_list = ", ".join(sorted(ALLOWED_BUILTINS))
28
+ params_section = ""
29
+ if params_desc:
30
+ params_section = (
31
+ f"\nPre-set variables (already defined, use directly):\n"
32
+ f"{params_desc}\n\n"
33
+ )
34
+ return _TEMPLATE.format(
35
+ namespace_desc=namespace_desc,
36
+ builtins_list=builtins_list,
37
+ params_section=params_section,
38
+ )
39
+
40
+
41
+ def format_params_description(params: dict) -> str:
42
+ lines = []
43
+ for name, value in params.items():
44
+ if isinstance(value, dict) and "value" in value:
45
+ ptype = value.get("type", type(value["value"]).__name__)
46
+ desc = value.get("description", "")
47
+ lines.append(f" {name}: {ptype}" + (f" — {desc}" if desc else ""))
48
+ else:
49
+ ptype = type(value).__name__
50
+ lines.append(f" {name}: {ptype}")
51
+ return "\n".join(lines)
@@ -0,0 +1 @@
1
+ """Inference provider plugins."""
@@ -0,0 +1,46 @@
1
+ """Tier 3: Anthropic API fallback inference."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from ..prompt import build_system_prompt
6
+
7
+
8
+ class AnthropicProvider:
9
+ def __init__(self, model: str = "claude-haiku-4-5-20251001", max_tokens: int = 1024) -> None:
10
+ self._model = model
11
+ self._max_tokens = max_tokens
12
+
13
+ @property
14
+ def name(self) -> str:
15
+ return "anthropic"
16
+
17
+ def available(self) -> bool:
18
+ try:
19
+ import anthropic # noqa: F401
20
+ return True
21
+ except ImportError:
22
+ return False
23
+
24
+ async def _create_message(self, system: str, messages: list[dict]) -> object:
25
+ import anthropic
26
+ client = anthropic.AsyncAnthropic()
27
+ return await client.messages.create(
28
+ model=self._model, max_tokens=self._max_tokens,
29
+ system=system, messages=messages,
30
+ )
31
+
32
+ async def generate(self, intent: str, namespace_desc: str,
33
+ config: dict | None = None, error_feedback: list[str] | None = None) -> str | None:
34
+ if not self.available():
35
+ return None
36
+ system = build_system_prompt(namespace_desc)
37
+ user_msg = intent
38
+ if error_feedback:
39
+ user_msg += "\n\nPrevious attempt had these errors, please fix:\n" + "\n".join(f"- {e}" for e in error_feedback)
40
+ messages = [{"role": "user", "content": user_msg}]
41
+ try:
42
+ response = await self._create_message(system, messages)
43
+ content = response.content[0].text
44
+ return content.strip() if content else None
45
+ except Exception:
46
+ return None
@@ -0,0 +1,15 @@
1
+ """Base protocol for inference providers."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Protocol
6
+
7
+
8
+ class InferenceProvider(Protocol):
9
+ @property
10
+ def name(self) -> str: ...
11
+ def available(self) -> bool: ...
12
+ async def generate(
13
+ self, intent: str, namespace_desc: str,
14
+ config: dict | None = None, error_feedback: list[str] | None = None,
15
+ ) -> str | None: ...
@@ -0,0 +1,50 @@
1
+ """Tier 2: Ollama local model inference."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from ..prompt import build_system_prompt
6
+
7
+
8
+ class OllamaProvider:
9
+ def __init__(self, host: str = "http://localhost:11434", model: str = "qwen2.5-coder:1.5b",
10
+ temperature: float = 0.2, keep_alive: str = "30m") -> None:
11
+ self._host = host
12
+ self._model = model
13
+ self._temperature = temperature
14
+ self._keep_alive = keep_alive
15
+
16
+ @property
17
+ def name(self) -> str:
18
+ return "ollama"
19
+
20
+ def available(self) -> bool:
21
+ try:
22
+ import ollama # noqa: F401
23
+ return True
24
+ except ImportError:
25
+ return False
26
+
27
+ async def _chat(self, messages: list[dict], **kwargs) -> dict:
28
+ import ollama
29
+ client = ollama.AsyncClient(host=self._host)
30
+ return await client.chat(
31
+ model=self._model, messages=messages,
32
+ options={"temperature": self._temperature},
33
+ keep_alive=self._keep_alive, **kwargs,
34
+ )
35
+
36
+ async def generate(self, intent: str, namespace_desc: str,
37
+ config: dict | None = None, error_feedback: list[str] | None = None) -> str | None:
38
+ if not self.available():
39
+ return None
40
+ system = build_system_prompt(namespace_desc)
41
+ user_msg = intent
42
+ if error_feedback:
43
+ user_msg += "\n\nPrevious attempt had these errors, please fix:\n" + "\n".join(f"- {e}" for e in error_feedback)
44
+ messages = [{"role": "system", "content": system}, {"role": "user", "content": user_msg}]
45
+ try:
46
+ response = await self._chat(messages)
47
+ content = response["message"]["content"]
48
+ return content.strip() if content else None
49
+ except Exception:
50
+ return None
@@ -0,0 +1,46 @@
1
+ """Tier 1: Rule-based inference — deterministic keyword-to-program mapping."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import re
6
+
7
+
8
+ class RulesProvider:
9
+ @property
10
+ def name(self) -> str:
11
+ return "rules"
12
+
13
+ def available(self) -> bool:
14
+ return True
15
+
16
+ async def generate(self, intent: str, namespace_desc: str,
17
+ config: dict | None = None, error_feedback: list[str] | None = None) -> str | None:
18
+ lower = intent.lower().strip()
19
+ original = intent.strip()
20
+
21
+ m = re.match(r"read (?:the )?file (.+)", lower)
22
+ if m and "read(" in namespace_desc:
23
+ path = re.match(r"read (?:the )?file (.+)", original, re.IGNORECASE).group(1).strip().strip("'\"")
24
+ return f"content = read('{path}')\ncontent"
25
+
26
+ m = re.match(r"find (?:the )?definitions? (?:of |for )?(.+)", lower)
27
+ if m and "find_definitions(" in namespace_desc:
28
+ name = re.match(r"find (?:the )?definitions? (?:of |for )?(.+)", original, re.IGNORECASE).group(1).strip().strip("'\"")
29
+ return f"results = find_definitions('{name}')\nresults"
30
+
31
+ m = re.match(r"find (?:all )?(?:callers?|usages?|references?) (?:of |for )?(.+)", lower)
32
+ if m and "find_callers(" in namespace_desc:
33
+ name = re.match(r"find (?:all )?(?:callers?|usages?|references?) (?:of |for )?(.+)", original, re.IGNORECASE).group(1).strip().strip("'\"")
34
+ return f"results = find_callers('{name}')\nresults"
35
+
36
+ m = re.match(r"(?:find|list) all (\w+) files", lower)
37
+ if m and "glob(" in namespace_desc:
38
+ ext = m.group(1).strip()
39
+ return f"files = glob('**/*.{ext}')\nfiles"
40
+
41
+ m = re.match(r"glob (.+)", lower)
42
+ if m and "glob(" in namespace_desc:
43
+ pattern = re.match(r"glob (.+)", original, re.IGNORECASE).group(1).strip().strip("'\"")
44
+ return f"files = glob('{pattern}')\nfiles"
45
+
46
+ return None