veryfront 0.1.71 → 0.1.72

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. package/esm/cli/commands/files/command-help.d.ts +3 -0
  2. package/esm/cli/commands/files/command-help.d.ts.map +1 -0
  3. package/esm/cli/commands/files/command-help.js +38 -0
  4. package/esm/cli/commands/files/command.d.ts +105 -0
  5. package/esm/cli/commands/files/command.d.ts.map +1 -0
  6. package/esm/cli/commands/files/command.js +250 -0
  7. package/esm/cli/commands/files/handler.d.ts +3 -0
  8. package/esm/cli/commands/files/handler.d.ts.map +1 -0
  9. package/esm/cli/commands/files/handler.js +4 -0
  10. package/esm/cli/commands/files/index.d.ts +4 -0
  11. package/esm/cli/commands/files/index.d.ts.map +1 -0
  12. package/esm/cli/commands/files/index.js +2 -0
  13. package/esm/cli/commands/knowledge/command-help.d.ts +3 -0
  14. package/esm/cli/commands/knowledge/command-help.d.ts.map +1 -0
  15. package/esm/cli/commands/knowledge/command-help.js +38 -0
  16. package/esm/cli/commands/knowledge/command.d.ts +122 -0
  17. package/esm/cli/commands/knowledge/command.d.ts.map +1 -0
  18. package/esm/cli/commands/knowledge/command.js +382 -0
  19. package/esm/cli/commands/knowledge/handler.d.ts +3 -0
  20. package/esm/cli/commands/knowledge/handler.d.ts.map +1 -0
  21. package/esm/cli/commands/knowledge/handler.js +4 -0
  22. package/esm/cli/commands/knowledge/index.d.ts +3 -0
  23. package/esm/cli/commands/knowledge/index.d.ts.map +1 -0
  24. package/esm/cli/commands/knowledge/index.js +2 -0
  25. package/esm/cli/commands/knowledge/parser-source.d.ts +2 -0
  26. package/esm/cli/commands/knowledge/parser-source.d.ts.map +1 -0
  27. package/esm/cli/commands/knowledge/parser-source.js +415 -0
  28. package/esm/cli/commands/uploads/command-help.d.ts +3 -0
  29. package/esm/cli/commands/uploads/command-help.d.ts.map +1 -0
  30. package/esm/cli/commands/uploads/command-help.js +43 -0
  31. package/esm/cli/commands/uploads/command.d.ts +140 -0
  32. package/esm/cli/commands/uploads/command.d.ts.map +1 -0
  33. package/esm/cli/commands/uploads/command.js +323 -0
  34. package/esm/cli/commands/uploads/handler.d.ts +3 -0
  35. package/esm/cli/commands/uploads/handler.d.ts.map +1 -0
  36. package/esm/cli/commands/uploads/handler.js +4 -0
  37. package/esm/cli/commands/uploads/index.d.ts +4 -0
  38. package/esm/cli/commands/uploads/index.d.ts.map +1 -0
  39. package/esm/cli/commands/uploads/index.js +2 -0
  40. package/esm/cli/help/command-definitions.d.ts.map +1 -1
  41. package/esm/cli/help/command-definitions.js +6 -0
  42. package/esm/cli/router.d.ts.map +1 -1
  43. package/esm/cli/router.js +6 -0
  44. package/esm/deno.js +1 -1
  45. package/esm/src/errors/error-registry.d.ts +2 -0
  46. package/esm/src/errors/error-registry.d.ts.map +1 -1
  47. package/esm/src/errors/error-registry.js +8 -0
  48. package/esm/src/errors/index.d.ts +1 -1
  49. package/esm/src/errors/index.d.ts.map +1 -1
  50. package/esm/src/errors/index.js +1 -1
  51. package/esm/src/platform/compat/media-types.d.ts +5 -0
  52. package/esm/src/platform/compat/media-types.d.ts.map +1 -0
  53. package/esm/src/platform/compat/media-types.js +19 -0
  54. package/esm/src/platform/index.d.ts +1 -0
  55. package/esm/src/platform/index.d.ts.map +1 -1
  56. package/esm/src/platform/index.js +2 -0
  57. package/esm/src/server/bootstrap.js +5 -1
  58. package/package.json +2 -1
  59. package/src/cli/commands/files/command-help.ts +40 -0
  60. package/src/cli/commands/files/command.ts +328 -0
  61. package/src/cli/commands/files/handler.ts +6 -0
  62. package/src/cli/commands/files/index.ts +19 -0
  63. package/src/cli/commands/knowledge/command-help.ts +40 -0
  64. package/src/cli/commands/knowledge/command.ts +513 -0
  65. package/src/cli/commands/knowledge/handler.ts +6 -0
  66. package/src/cli/commands/knowledge/index.ts +2 -0
  67. package/src/cli/commands/knowledge/parser-source.ts +415 -0
  68. package/src/cli/commands/uploads/command-help.ts +45 -0
  69. package/src/cli/commands/uploads/command.ts +465 -0
  70. package/src/cli/commands/uploads/handler.ts +6 -0
  71. package/src/cli/commands/uploads/index.ts +23 -0
  72. package/src/cli/help/command-definitions.ts +6 -0
  73. package/src/cli/router.ts +6 -0
  74. package/src/deno.js +1 -1
  75. package/src/src/errors/error-registry.ts +9 -0
  76. package/src/src/errors/index.ts +1 -0
  77. package/src/src/platform/compat/media-types.ts +23 -0
  78. package/src/src/platform/index.ts +3 -0
  79. package/src/src/server/bootstrap.ts +6 -1
@@ -0,0 +1,415 @@
1
+ export const knowledgeIngestPythonSource = String.raw `#!/usr/bin/env python3
2
+ import argparse
3
+ import csv
4
+ import json
5
+ import re
6
+ from datetime import date
7
+ from pathlib import Path
8
+ from typing import Any, Optional
9
+
10
+
11
+ def yaml_quote(value: Any) -> str:
12
+ return json.dumps("" if value is None else str(value), ensure_ascii=False)
13
+
14
+
15
+ CODE_FENCE = chr(96) * 3
16
+
17
+
18
+ def slugify(value: str) -> str:
19
+ slug = re.sub(r"[^a-z0-9]+", "-", value.lower()).strip("-")
20
+ return slug or "document"
21
+
22
+
23
+ def titleize_filename(path: Path) -> str:
24
+ text = path.stem.replace("_", " ").replace("-", " ").strip()
25
+ return text.title() or path.name
26
+
27
+
28
+ def clean_text(value: str) -> str:
29
+ text = value.replace("\r\n", "\n").replace("\r", "\n")
30
+ text = re.sub(r"\n{3,}", "\n\n", text)
31
+ return text.strip()
32
+
33
+
34
+ def table_to_markdown(rows: list[list[Any]]) -> str:
35
+ if not rows:
36
+ return ""
37
+
38
+ normalized: list[list[str]] = []
39
+ max_cols = 0
40
+ for row in rows:
41
+ normalized_row = [str(cell or "").replace("|", "\\|").replace("\n", " ").strip() for cell in row]
42
+ max_cols = max(max_cols, len(normalized_row))
43
+ normalized.append(normalized_row)
44
+
45
+ if max_cols == 0:
46
+ return ""
47
+
48
+ for row in normalized:
49
+ while len(row) < max_cols:
50
+ row.append("")
51
+
52
+ header = normalized[0]
53
+ body = normalized[1:]
54
+ lines = [
55
+ "| " + " | ".join(header) + " |",
56
+ "| " + " | ".join(["---"] * max_cols) + " |",
57
+ ]
58
+ for row in body:
59
+ lines.append("| " + " | ".join(row) + " |")
60
+ return "\n".join(lines)
61
+
62
+
63
+ def build_frontmatter(source: str, source_type: str, description: str) -> str:
64
+ return "\n".join([
65
+ "---",
66
+ f"source: {yaml_quote(source)}",
67
+ f"source_type: {yaml_quote(source_type)}",
68
+ f"added: {yaml_quote(date.today().isoformat())}",
69
+ f"description: {yaml_quote(description)}",
70
+ "---",
71
+ ])
72
+
73
+
74
+ def parse_csv_like(path: str, delimiter: str = ","):
75
+ warnings: list[str] = []
76
+ with open(path, newline="", encoding="utf-8-sig") as file:
77
+ reader = csv.reader(file, delimiter=delimiter)
78
+ rows = list(reader)
79
+
80
+ if not rows:
81
+ return "_Empty file._", {"rows": 0, "columns": 0}, warnings
82
+
83
+ header = rows[0]
84
+ data = rows[1:]
85
+ limited_rows = [header] + data[:200]
86
+ parts = [
87
+ f"**Rows:** {len(data)} | **Columns:** {len(header)}",
88
+ "",
89
+ table_to_markdown(limited_rows),
90
+ ]
91
+ if len(data) > 200:
92
+ warnings.append(f"Truncated {len(data) - 200} rows from markdown output")
93
+ parts.append(f"\n_...and {len(data) - 200} more rows (truncated)._")
94
+ stats = {"rows": len(data), "columns": len(header)}
95
+ return "\n".join(parts).strip(), stats, warnings
96
+
97
+
98
+ def parse_pdf(path: str):
99
+ import pdfplumber
100
+
101
+ warnings: list[str] = []
102
+ sections: list[str] = []
103
+ page_count = 0
104
+ table_count = 0
105
+
106
+ with pdfplumber.open(path) as pdf:
107
+ for index, page in enumerate(pdf.pages, 1):
108
+ page_count += 1
109
+ text = (page.extract_text() or "").strip()
110
+ tables = page.extract_tables() or []
111
+ table_count += len(tables)
112
+
113
+ parts = [f"## Page {index}"]
114
+ if text:
115
+ parts.append(text)
116
+
117
+ for table_index, table in enumerate(tables, 1):
118
+ if table:
119
+ parts.append(f"\n### Table {table_index}")
120
+ parts.append(table_to_markdown(table))
121
+
122
+ sections.append("\n\n".join(parts).strip())
123
+
124
+ content = "\n\n---\n\n".join(section for section in sections if section)
125
+ stats = {"pages": page_count, "tables": table_count}
126
+ return content or "_No extractable text found in PDF._", stats, warnings
127
+
128
+
129
+ def iter_docx_elements(document):
130
+ from docx.oxml.ns import qn
131
+
132
+ body = document.element.body
133
+ for child in body:
134
+ if child.tag == qn("w:p"):
135
+ paragraph = next((p for p in document.paragraphs if p._element is child), None)
136
+ if paragraph:
137
+ yield {
138
+ "type": "paragraph",
139
+ "text": paragraph.text,
140
+ "style": paragraph.style.name if paragraph.style else None,
141
+ }
142
+ elif child.tag == qn("w:tbl"):
143
+ table = next((t for t in document.tables if t._element is child), None)
144
+ if table:
145
+ rows = []
146
+ for row in table.rows:
147
+ rows.append([cell.text.strip() for cell in row.cells])
148
+ yield {"type": "table", "rows": rows}
149
+
150
+
151
+ def parse_docx(path: str):
152
+ from docx import Document
153
+
154
+ warnings: list[str] = []
155
+ document = Document(path)
156
+ parts: list[str] = []
157
+ paragraph_count = 0
158
+ table_count = 0
159
+
160
+ for element in iter_docx_elements(document):
161
+ if element["type"] == "paragraph":
162
+ text = element["text"].strip()
163
+ if not text:
164
+ continue
165
+ paragraph_count += 1
166
+ style = element["style"] or ""
167
+ if "Heading 1" in style:
168
+ parts.append(f"# {text}")
169
+ elif "Heading 2" in style:
170
+ parts.append(f"## {text}")
171
+ elif "Heading 3" in style:
172
+ parts.append(f"### {text}")
173
+ elif "List" in style:
174
+ parts.append(f"- {text}")
175
+ else:
176
+ parts.append(text)
177
+ elif element["type"] == "table":
178
+ table_count += 1
179
+ parts.append(table_to_markdown(element["rows"]))
180
+
181
+ stats = {"paragraphs": paragraph_count, "tables": table_count}
182
+ return "\n\n".join(part for part in parts if part).strip(), stats, warnings
183
+
184
+
185
+ def parse_excel(path: str):
186
+ import pandas as pd
187
+
188
+ warnings: list[str] = []
189
+ workbook = pd.read_excel(path, sheet_name=None, dtype=str)
190
+ parts: list[str] = []
191
+ sheet_names: list[str] = []
192
+ total_rows = 0
193
+
194
+ for sheet_name, frame in workbook.items():
195
+ sheet_names.append(sheet_name)
196
+ frame = frame.fillna("")
197
+ rows = frame.values.tolist()
198
+ header = [str(column) for column in frame.columns.tolist()]
199
+ total_rows += len(rows)
200
+ parts.append(f"## Sheet: {sheet_name}")
201
+ parts.append(f"**Rows:** {len(rows)} | **Columns:** {len(header)}")
202
+ limited_rows = [header] + rows[:200]
203
+ parts.append(table_to_markdown(limited_rows))
204
+ if len(rows) > 200:
205
+ warnings.append(f"Truncated {len(rows) - 200} rows from sheet {sheet_name}")
206
+ parts.append(f"_...and {len(rows) - 200} more rows (truncated)._")
207
+
208
+ stats = {"sheets": len(sheet_names), "rows": total_rows, "sheet_names": sheet_names}
209
+ return "\n\n".join(part for part in parts if part).strip(), stats, warnings
210
+
211
+
212
+ def parse_pptx(path: str):
213
+ from pptx import Presentation
214
+
215
+ warnings: list[str] = []
216
+ presentation = Presentation(path)
217
+ parts: list[str] = []
218
+ slide_count = 0
219
+ table_count = 0
220
+
221
+ for index, slide in enumerate(presentation.slides, 1):
222
+ slide_count += 1
223
+ texts: list[str] = []
224
+ for shape in slide.shapes:
225
+ if getattr(shape, "has_text_frame", False):
226
+ for paragraph in shape.text_frame.paragraphs:
227
+ text = paragraph.text.strip()
228
+ if text:
229
+ texts.append(text)
230
+ if getattr(shape, "has_table", False):
231
+ table_count += 1
232
+ rows = []
233
+ for row in shape.table.rows:
234
+ rows.append([cell.text.strip() for cell in row.cells])
235
+ texts.append(table_to_markdown(rows))
236
+ if texts:
237
+ parts.append(f"## Slide {index}")
238
+ parts.append("\n\n".join(texts))
239
+
240
+ stats = {"slides": slide_count, "tables": table_count}
241
+ return "\n\n".join(parts).strip(), stats, warnings
242
+
243
+
244
+ def parse_html(path: str):
245
+ from bs4 import BeautifulSoup
246
+
247
+ warnings: list[str] = []
248
+ with open(path, encoding="utf-8") as file:
249
+ soup = BeautifulSoup(file.read(), "lxml")
250
+
251
+ for tag in soup(["script", "style", "nav", "footer", "header"]):
252
+ tag.decompose()
253
+
254
+ table_parts: list[str] = []
255
+ for table_index, table in enumerate(soup.find_all("table"), 1):
256
+ rows = []
257
+ for row in table.find_all("tr"):
258
+ rows.append([cell.get_text(" ", strip=True) for cell in row.find_all(["th", "td"])])
259
+ if rows:
260
+ table_parts.append(f"### Table {table_index}")
261
+ table_parts.append(table_to_markdown(rows))
262
+ table.decompose()
263
+
264
+ text = clean_text(soup.get_text("\n"))
265
+ parts = [text] if text else []
266
+ if table_parts:
267
+ parts.extend(table_parts)
268
+
269
+ stats = {"tables": len(table_parts) // 2, "characters": len(text)}
270
+ return "\n\n".join(part for part in parts if part).strip(), stats, warnings
271
+
272
+
273
+ def parse_text(path: str):
274
+ warnings: list[str] = []
275
+ with open(path, encoding="utf-8") as file:
276
+ text = clean_text(file.read())
277
+ stats = {"characters": len(text), "lines": len(text.splitlines()) if text else 0}
278
+ return text, stats, warnings
279
+
280
+
281
+ def parse_json(path: str):
282
+ warnings: list[str] = []
283
+ with open(path, encoding="utf-8") as file:
284
+ data = json.load(file)
285
+
286
+ if isinstance(data, list) and data and isinstance(data[0], dict):
287
+ headers = list(data[0].keys())
288
+ rows = [headers] + [[row.get(header, "") for header in headers] for row in data[:200]]
289
+ content_parts = [
290
+ f"**Records:** {len(data)} | **Fields:** {len(headers)}",
291
+ "",
292
+ table_to_markdown(rows),
293
+ ]
294
+ if len(data) > 200:
295
+ warnings.append(f"Truncated {len(data) - 200} records from markdown output")
296
+ content_parts.append(f"\n_...and {len(data) - 200} more records (truncated)._")
297
+ stats = {"records": len(data), "fields": len(headers)}
298
+ return "\n".join(content_parts).strip(), stats, warnings
299
+
300
+ rendered = json.dumps(data, indent=2, ensure_ascii=False)
301
+ stats = {"top_level_type": type(data).__name__}
302
+ return f"{CODE_FENCE}json\n{rendered}\n{CODE_FENCE}", stats, warnings
303
+
304
+
305
+ def select_parser(path: Path):
306
+ ext = path.suffix.lower()
307
+ if ext == ".pdf":
308
+ return "pdf", parse_pdf
309
+ if ext in {".csv", ".tsv"}:
310
+ delimiter = "\t" if ext == ".tsv" else ","
311
+ return ext.lstrip("."), lambda file_path: parse_csv_like(file_path, delimiter)
312
+ if ext in {".xlsx", ".xls"}:
313
+ return ext.lstrip("."), parse_excel
314
+ if ext == ".docx":
315
+ return "docx", parse_docx
316
+ if ext == ".pptx":
317
+ return "pptx", parse_pptx
318
+ if ext in {".html", ".htm"}:
319
+ return "html", parse_html
320
+ if ext in {".txt", ".md", ".mdx"}:
321
+ return ext.lstrip("."), parse_text
322
+ if ext == ".json":
323
+ return "json", parse_json
324
+ raise ValueError(f"Unsupported file type: {ext}")
325
+
326
+
327
+ def build_summary(source_type: str, stats: dict[str, Any]) -> str:
328
+ if source_type in {"csv", "tsv"}:
329
+ return f"Parsed {stats.get('rows', 0)} rows across {stats.get('columns', 0)} columns."
330
+ if source_type in {"xlsx", "xls"}:
331
+ return f"Parsed {stats.get('sheets', 0)} sheet(s) with {stats.get('rows', 0)} total rows."
332
+ if source_type == "pdf":
333
+ return f"Extracted {stats.get('pages', 0)} page(s) and {stats.get('tables', 0)} table(s)."
334
+ if source_type == "docx":
335
+ return f"Extracted {stats.get('paragraphs', 0)} paragraphs and {stats.get('tables', 0)} tables."
336
+ if source_type == "pptx":
337
+ return f"Extracted {stats.get('slides', 0)} slide(s)."
338
+ if source_type == "json":
339
+ if "records" in stats:
340
+ return f"Parsed {stats.get('records', 0)} record(s) across {stats.get('fields', 0)} fields."
341
+ return f"Converted JSON ({stats.get('top_level_type', 'object')}) to markdown."
342
+ if source_type == "html":
343
+ return f"Converted HTML with {stats.get('tables', 0)} table(s) to markdown."
344
+ return f"Converted document to markdown ({stats.get('characters', 0)} chars)."
345
+
346
+
347
+ def ingest_document_to_knowledge(file_path: str, output_dir: Optional[str] = None, description: Optional[str] = None, slug: Optional[str] = None, source_reference: Optional[str] = None):
348
+ path = Path(file_path)
349
+ if not path.exists():
350
+ raise FileNotFoundError(f"File not found: {file_path}")
351
+
352
+ output_root = Path(output_dir or "/workspace/knowledge")
353
+ output_root.mkdir(parents=True, exist_ok=True)
354
+
355
+ if not slug:
356
+ slug = slugify(path.stem)
357
+
358
+ source_type, parser = select_parser(path)
359
+ content, stats, warnings = parser(str(path))
360
+ content = clean_text(content)
361
+
362
+ resolved_description = description or f"Parsed from {path.name}"
363
+ title = titleize_filename(path)
364
+ frontmatter = build_frontmatter(source_reference or path.name, source_type, resolved_description)
365
+ markdown = f"{frontmatter}\n\n# {title}\n\n{content}\n"
366
+
367
+ output_path = output_root / f"{slug}.md"
368
+ output_path.write_text(markdown, encoding="utf-8")
369
+
370
+ return {
371
+ "success": True,
372
+ "source_path": str(path),
373
+ "source_filename": path.name,
374
+ "source_type": source_type,
375
+ "slug": slug,
376
+ "sandbox_output_path": str(output_path),
377
+ "suggested_project_path": f"knowledge/{slug}.md",
378
+ "description": resolved_description,
379
+ "title": title,
380
+ "summary": build_summary(source_type, stats),
381
+ "stats": stats,
382
+ "warnings": warnings,
383
+ }
384
+
385
+
386
+ def main():
387
+ parser = argparse.ArgumentParser(description="Convert a local document into knowledge-base markdown")
388
+ parser.add_argument("--input-json", required=True)
389
+ parser.add_argument("--output-json", required=True)
390
+ args = parser.parse_args()
391
+
392
+ try:
393
+ payload = json.loads(Path(args.input_json).read_text(encoding="utf-8"))
394
+ result = ingest_document_to_knowledge(
395
+ file_path=payload["file_path"],
396
+ output_dir=payload.get("output_dir"),
397
+ description=payload.get("description"),
398
+ slug=payload.get("slug"),
399
+ source_reference=payload.get("source_reference"),
400
+ )
401
+ except ModuleNotFoundError as error:
402
+ missing_package = error.name or "required package"
403
+ raise SystemExit(
404
+ "Missing Python package '"
405
+ + missing_package
406
+ + "'. Install knowledge parser dependencies with: "
407
+ + "pip install pandas openpyxl xlrd pdfplumber python-docx python-pptx beautifulsoup4 lxml"
408
+ )
409
+
410
+ Path(args.output_json).write_text(json.dumps(result, ensure_ascii=False, indent=2), encoding="utf-8")
411
+
412
+
413
+ if __name__ == "__main__":
414
+ main()
415
+ `;
@@ -0,0 +1,3 @@
1
+ import type { CommandHelp } from "../../help/types.js";
2
+ export declare const uploadsHelp: CommandHelp;
3
+ //# sourceMappingURL=command-help.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"command-help.d.ts","sourceRoot":"","sources":["../../../../src/cli/commands/uploads/command-help.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,WAAW,EAAE,MAAM,qBAAqB,CAAC;AAEvD,eAAO,MAAM,WAAW,EAAE,WA0CzB,CAAC"}
@@ -0,0 +1,43 @@
1
+ export const uploadsHelp = {
2
+ name: "uploads",
3
+ description: "List, pull, upload, and delete project uploads",
4
+ usage: "veryfront uploads <command> [options]",
5
+ options: [
6
+ {
7
+ flag: "--project, -p <slug>",
8
+ description: "Project slug override (otherwise inferred from env/config)",
9
+ },
10
+ {
11
+ flag: "--path <prefix>",
12
+ description: "Folder prefix to scope list/pull operations",
13
+ },
14
+ {
15
+ flag: "--all",
16
+ description: "Pull all file uploads under the given --path prefix",
17
+ },
18
+ {
19
+ flag: "--from <path>",
20
+ description: "Local file used by uploads put",
21
+ },
22
+ {
23
+ flag: "--output-dir <dir>",
24
+ description: "Directory to write downloaded uploads into",
25
+ default: "./uploads",
26
+ },
27
+ {
28
+ flag: "--json, -j",
29
+ description: "Output machine-readable JSON",
30
+ },
31
+ ],
32
+ examples: [
33
+ "veryfront uploads list --json",
34
+ "veryfront uploads pull contracts/q1.pdf --output-dir /workspace/uploads",
35
+ "veryfront uploads put contracts/q1.pdf --from /workspace/uploads/q1.pdf",
36
+ "veryfront uploads delete contracts/q1.pdf",
37
+ ],
38
+ notes: [
39
+ "Subcommands: list, pull, put, delete",
40
+ "Uses the project uploads store, not project source files",
41
+ "`put` reuses the same upload path for create/update semantics",
42
+ ],
43
+ };
@@ -0,0 +1,140 @@
1
+ import { z } from "zod";
2
+ import { type ApiClient } from "../../shared/config.js";
3
+ import type { ParsedArgs } from "../../shared/types.js";
4
+ export interface UploadItem {
5
+ type: "file" | "folder";
6
+ path: string;
7
+ file_name?: string;
8
+ size?: number;
9
+ content_type?: string | null;
10
+ status?: string;
11
+ visibility?: string;
12
+ created_at?: string;
13
+ updated_at?: string;
14
+ }
15
+ interface CreateUploadResponse {
16
+ file_upload_url: string;
17
+ file_path: string;
18
+ upload_id: string;
19
+ required_headers: Record<string, string>;
20
+ }
21
+ declare const UploadListArgsSchema: z.ZodObject<{
22
+ projectSlug: z.ZodOptional<z.ZodString>;
23
+ projectDir: z.ZodOptional<z.ZodString>;
24
+ path: z.ZodOptional<z.ZodString>;
25
+ limit: z.ZodOptional<z.ZodNumber>;
26
+ recursive: z.ZodDefault<z.ZodBoolean>;
27
+ json: z.ZodDefault<z.ZodBoolean>;
28
+ quiet: z.ZodDefault<z.ZodBoolean>;
29
+ }, "strip", z.ZodTypeAny, {
30
+ json: boolean;
31
+ recursive: boolean;
32
+ quiet: boolean;
33
+ path?: string | undefined;
34
+ projectSlug?: string | undefined;
35
+ projectDir?: string | undefined;
36
+ limit?: number | undefined;
37
+ }, {
38
+ path?: string | undefined;
39
+ projectSlug?: string | undefined;
40
+ json?: boolean | undefined;
41
+ recursive?: boolean | undefined;
42
+ projectDir?: string | undefined;
43
+ limit?: number | undefined;
44
+ quiet?: boolean | undefined;
45
+ }>;
46
+ export type UploadListOptions = z.infer<typeof UploadListArgsSchema>;
47
+ declare const UploadPullArgsSchema: z.ZodObject<{
48
+ projectSlug: z.ZodOptional<z.ZodString>;
49
+ projectDir: z.ZodOptional<z.ZodString>;
50
+ uploads: z.ZodDefault<z.ZodArray<z.ZodString, "many">>;
51
+ path: z.ZodOptional<z.ZodString>;
52
+ all: z.ZodDefault<z.ZodBoolean>;
53
+ outputDir: z.ZodDefault<z.ZodString>;
54
+ json: z.ZodDefault<z.ZodBoolean>;
55
+ quiet: z.ZodDefault<z.ZodBoolean>;
56
+ }, "strip", z.ZodTypeAny, {
57
+ outputDir: string;
58
+ json: boolean;
59
+ all: boolean;
60
+ uploads: string[];
61
+ quiet: boolean;
62
+ path?: string | undefined;
63
+ projectSlug?: string | undefined;
64
+ projectDir?: string | undefined;
65
+ }, {
66
+ path?: string | undefined;
67
+ projectSlug?: string | undefined;
68
+ outputDir?: string | undefined;
69
+ json?: boolean | undefined;
70
+ projectDir?: string | undefined;
71
+ all?: boolean | undefined;
72
+ uploads?: string[] | undefined;
73
+ quiet?: boolean | undefined;
74
+ }>;
75
+ export type UploadPullOptions = z.infer<typeof UploadPullArgsSchema>;
76
+ declare const UploadPutArgsSchema: z.ZodObject<{
77
+ projectSlug: z.ZodOptional<z.ZodString>;
78
+ projectDir: z.ZodOptional<z.ZodString>;
79
+ uploadPath: z.ZodString;
80
+ from: z.ZodString;
81
+ contentType: z.ZodOptional<z.ZodString>;
82
+ json: z.ZodDefault<z.ZodBoolean>;
83
+ quiet: z.ZodDefault<z.ZodBoolean>;
84
+ }, "strip", z.ZodTypeAny, {
85
+ json: boolean;
86
+ from: string;
87
+ quiet: boolean;
88
+ uploadPath: string;
89
+ projectSlug?: string | undefined;
90
+ projectDir?: string | undefined;
91
+ contentType?: string | undefined;
92
+ }, {
93
+ from: string;
94
+ uploadPath: string;
95
+ projectSlug?: string | undefined;
96
+ json?: boolean | undefined;
97
+ projectDir?: string | undefined;
98
+ contentType?: string | undefined;
99
+ quiet?: boolean | undefined;
100
+ }>;
101
+ export type UploadPutOptions = z.infer<typeof UploadPutArgsSchema>;
102
+ declare const UploadDeleteArgsSchema: z.ZodObject<{
103
+ projectSlug: z.ZodOptional<z.ZodString>;
104
+ projectDir: z.ZodOptional<z.ZodString>;
105
+ uploadPath: z.ZodString;
106
+ json: z.ZodDefault<z.ZodBoolean>;
107
+ quiet: z.ZodDefault<z.ZodBoolean>;
108
+ }, "strip", z.ZodTypeAny, {
109
+ json: boolean;
110
+ quiet: boolean;
111
+ uploadPath: string;
112
+ projectSlug?: string | undefined;
113
+ projectDir?: string | undefined;
114
+ }, {
115
+ uploadPath: string;
116
+ projectSlug?: string | undefined;
117
+ json?: boolean | undefined;
118
+ projectDir?: string | undefined;
119
+ quiet?: boolean | undefined;
120
+ }>;
121
+ export type UploadDeleteOptions = z.infer<typeof UploadDeleteArgsSchema>;
122
+ export declare function parseUploadsListArgs(args: ParsedArgs): z.SafeParseReturnType<unknown, UploadListOptions>;
123
+ export declare function parseUploadsPullArgs(args: ParsedArgs): z.SafeParseReturnType<unknown, UploadPullOptions>;
124
+ export declare function parseUploadsPutArgs(args: ParsedArgs): z.SafeParseReturnType<unknown, UploadPutOptions>;
125
+ export declare function parseUploadsDeleteArgs(args: ParsedArgs): z.SafeParseReturnType<unknown, UploadDeleteOptions>;
126
+ export declare function buildUploadsListUrl(projectSlug: string): string;
127
+ export declare function buildUploadCreateUrl(projectSlug: string): string;
128
+ export declare function buildUploadSignedUrlPath(projectSlug: string, uploadPath: string): string;
129
+ export declare function listAllUploads(client: ApiClient, projectSlug: string, options?: Partial<Pick<UploadListOptions, "path" | "recursive" | "limit">>): Promise<UploadItem[]>;
130
+ export declare function resolveUploadOutputPath(uploadPath: string, outputDir: string): string;
131
+ export declare function downloadUploadToFile(client: ApiClient, projectSlug: string, uploadPath: string, outputDir: string): Promise<{
132
+ uploadPath: string;
133
+ localPath: string;
134
+ bytes: number;
135
+ }>;
136
+ export declare function uploadLocalFileToUploads(client: ApiClient, projectSlug: string, uploadPath: string, localPath: string, contentType?: string): Promise<CreateUploadResponse>;
137
+ export declare function deleteUpload(client: ApiClient, projectSlug: string, uploadPath: string): Promise<void>;
138
+ export declare function uploadsCommand(args: ParsedArgs): Promise<void>;
139
+ export {};
140
+ //# sourceMappingURL=command.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"command.d.ts","sourceRoot":"","sources":["../../../../src/cli/commands/uploads/command.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,CAAC,EAAE,MAAM,KAAK,CAAC;AAKxB,OAAO,EAAE,KAAK,SAAS,EAA0C,MAAM,wBAAwB,CAAC;AAChG,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,uBAAuB,CAAC;AAExD,MAAM,WAAW,UAAU;IACzB,IAAI,EAAE,MAAM,GAAG,QAAQ,CAAC;IACxB,IAAI,EAAE,MAAM,CAAC;IACb,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,YAAY,CAAC,EAAE,MAAM,GAAG,IAAI,CAAC;IAC7B,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB,UAAU,CAAC,EAAE,MAAM,CAAC;CACrB;AAeD,UAAU,oBAAoB;IAC5B,eAAe,EAAE,MAAM,CAAC;IACxB,SAAS,EAAE,MAAM,CAAC;IAClB,SAAS,EAAE,MAAM,CAAC;IAClB,gBAAgB,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;CAC1C;AAED,QAAA,MAAM,oBAAoB;;;;;;;;;;;;;;;;;;;;;;;;EAQxB,CAAC;AAEH,MAAM,MAAM,iBAAiB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,oBAAoB,CAAC,CAAC;AAErE,QAAA,MAAM,oBAAoB;;;;;;;;;;;;;;;;;;;;;;;;;;;EASxB,CAAC;AAEH,MAAM,MAAM,iBAAiB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,oBAAoB,CAAC,CAAC;AAErE,QAAA,MAAM,mBAAmB;;;;;;;;;;;;;;;;;;;;;;;;EAQvB,CAAC;AAEH,MAAM,MAAM,gBAAgB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,mBAAmB,CAAC,CAAC;AAEnE,QAAA,MAAM,sBAAsB;;;;;;;;;;;;;;;;;;EAM1B,CAAC;AAEH,MAAM,MAAM,mBAAmB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,sBAAsB,CAAC,CAAC;AAmDzE,wBAAgB,oBAAoB,CAClC,IAAI,EAAE,UAAU,GACf,CAAC,CAAC,mBAAmB,CAAC,OAAO,EAAE,iBAAiB,CAAC,CAUnD;AAED,wBAAgB,oBAAoB,CAClC,IAAI,EAAE,UAAU,GACf,CAAC,CAAC,mBAAmB,CAAC,OAAO,EAAE,iBAAiB,CAAC,CAWnD;AAED,wBAAgB,mBAAmB,CACjC,IAAI,EAAE,UAAU,GACf,CAAC,CAAC,mBAAmB,CAAC,OAAO,EAAE,gBAAgB,CAAC,CAUlD;AAED,wBAAgB,sBAAsB,CACpC,IAAI,EAAE,UAAU,GACf,CAAC,CAAC,mBAAmB,CAAC,OAAO,EAAE,mBAAmB,CAAC,CAQrD;AAED,wBAAgB,mBAAmB,CAAC,WAAW,EAAE,MAAM,GAAG,MAAM,CAE/D;AAED,wBAAgB,oBAAoB,CAAC,WAAW,EAAE,MAAM,GAAG,MAAM,CAEhE;AAED,wBAAgB,wBAAwB,CAAC,WAAW,EAAE,MAAM,EAAE,UAAU,EAAE,MAAM,GAAG,MAAM,CAIxF;AAMD,wBAAsB,cAAc,CAClC,MAAM,EAAE,SAAS,EACjB,WAAW,EAAE,MAAM,EACnB,OAAO,GAAE,OAAO,CAAC,IAAI,CAAC,iBAAiB,EAAE,MAAM,GAAG,WAAW,GAAG,OAAO,CAAC,CAAM,GAC7E,OAAO,CAAC,UAAU,EAAE,CAAC,CAqBvB;AAED,wBAAgB,uBAAuB,CAAC,UAAU,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,GAAG,MAAM,CAUrF;AAED,wBAAsB,oBAAoB,CACxC,MAAM,EAAE,SAAS,EACjB,WAAW,EAAE,MAAM,EACnB,UAAU,EAAE,MAAM,EAClB,SAAS,EAAE,MAAM,GAChB,OAAO,CAAC;IAAE,UAAU,EAAE,MAAM,CAAC;IAAC,SAAS,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAA;CAAE,CAAC,CAiBnE;AAED,wBAAsB,wBAAwB,CAC5C,MAAM,EAAE,SAAS,EACjB,WAAW,EAAE,MAAM,EACnB,UAAU,EAAE,MAAM,EAClB,SAAS,EAAE,MAAM,EACjB,WAAW,CAAC,EAAE,MAAM,GACnB,OAAO,CAAC,oBAAoB,CAAC,CA0B/B;AAED,wBAAsB,YAAY,CAChC,MAAM,EAAE,SAAS,EACjB,WAAW,EAAE,MAAM,EACnB,UAAU,EAAE,MAAM,GACjB,OAAO,CAAC,IAAI,CAAC,CAEf;AAED,wBAAsB,cAAc,CAAC,IAAI,EAAE,UAAU,GAAG,OAAO,CAAC,IAAI,CAAC,CAqJpE"}