@cloudflare/sandbox 0.8.11 → 0.8.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bridge/index.js +7 -7
- package/dist/bridge/index.js.map +1 -1
- package/dist/index.d.ts +1 -1
- package/dist/index.js +1 -1
- package/dist/openai/index.d.ts +1 -1
- package/dist/opencode/index.d.ts +1 -1
- package/dist/opencode/index.d.ts.map +1 -1
- package/dist/{file-stream-Bn2PceyF.js → sandbox-CUVJMlma.js} +1125 -904
- package/dist/sandbox-CUVJMlma.js.map +1 -0
- package/dist/{sandbox-C0Tjs0dj.d.ts → sandbox-Chr1Ebo-.d.ts} +105 -22
- package/dist/sandbox-Chr1Ebo-.d.ts.map +1 -0
- package/package.json +1 -1
- package/dist/file-stream-Bn2PceyF.js.map +0 -1
- package/dist/sandbox-C0Tjs0dj.d.ts.map +0 -1
|
@@ -1645,18 +1645,14 @@ var BackupClient = class extends BaseHttpClient {
|
|
|
1645
1645
|
* @param sessionId - Session context
|
|
1646
1646
|
*/
|
|
1647
1647
|
async createArchive(dir, archivePath, sessionId, gitignore = false, excludes = []) {
|
|
1648
|
-
|
|
1649
|
-
|
|
1650
|
-
|
|
1651
|
-
|
|
1652
|
-
|
|
1653
|
-
|
|
1654
|
-
|
|
1655
|
-
|
|
1656
|
-
return await this.post("/api/backup/create", data);
|
|
1657
|
-
} catch (error) {
|
|
1658
|
-
throw error;
|
|
1659
|
-
}
|
|
1648
|
+
const data = {
|
|
1649
|
+
dir,
|
|
1650
|
+
archivePath,
|
|
1651
|
+
gitignore,
|
|
1652
|
+
excludes,
|
|
1653
|
+
sessionId
|
|
1654
|
+
};
|
|
1655
|
+
return await this.post("/api/backup/create", data);
|
|
1660
1656
|
}
|
|
1661
1657
|
/**
|
|
1662
1658
|
* Tell the container to restore a squashfs archive into a directory.
|
|
@@ -1665,16 +1661,12 @@ var BackupClient = class extends BaseHttpClient {
|
|
|
1665
1661
|
* @param sessionId - Session context
|
|
1666
1662
|
*/
|
|
1667
1663
|
async restoreArchive(dir, archivePath, sessionId) {
|
|
1668
|
-
|
|
1669
|
-
|
|
1670
|
-
|
|
1671
|
-
|
|
1672
|
-
|
|
1673
|
-
|
|
1674
|
-
return await this.post("/api/backup/restore", data);
|
|
1675
|
-
} catch (error) {
|
|
1676
|
-
throw error;
|
|
1677
|
-
}
|
|
1664
|
+
const data = {
|
|
1665
|
+
dir,
|
|
1666
|
+
archivePath,
|
|
1667
|
+
sessionId
|
|
1668
|
+
};
|
|
1669
|
+
return await this.post("/api/backup/restore", data);
|
|
1678
1670
|
}
|
|
1679
1671
|
};
|
|
1680
1672
|
|
|
@@ -1770,280 +1762,200 @@ var DesktopClient = class extends BaseHttpClient {
|
|
|
1770
1762
|
* Get desktop lifecycle and process health status.
|
|
1771
1763
|
*/
|
|
1772
1764
|
async status() {
|
|
1773
|
-
|
|
1774
|
-
return await this.get("/api/desktop/status");
|
|
1775
|
-
} catch (error) {
|
|
1776
|
-
throw error;
|
|
1777
|
-
}
|
|
1765
|
+
return await this.get("/api/desktop/status");
|
|
1778
1766
|
}
|
|
1779
1767
|
async screenshot(options) {
|
|
1780
|
-
|
|
1781
|
-
|
|
1782
|
-
|
|
1783
|
-
|
|
1784
|
-
|
|
1785
|
-
|
|
1786
|
-
|
|
1768
|
+
const wantsBytes = options?.format === "bytes";
|
|
1769
|
+
const data = {
|
|
1770
|
+
format: "base64",
|
|
1771
|
+
...options?.imageFormat !== void 0 && { imageFormat: options.imageFormat },
|
|
1772
|
+
...options?.quality !== void 0 && { quality: options.quality },
|
|
1773
|
+
...options?.showCursor !== void 0 && { showCursor: options.showCursor }
|
|
1774
|
+
};
|
|
1775
|
+
const response = await this.post("/api/desktop/screenshot", data);
|
|
1776
|
+
if (wantsBytes) {
|
|
1777
|
+
const binaryString = atob(response.data);
|
|
1778
|
+
const bytes = new Uint8Array(binaryString.length);
|
|
1779
|
+
for (let i = 0; i < binaryString.length; i++) bytes[i] = binaryString.charCodeAt(i);
|
|
1780
|
+
return {
|
|
1781
|
+
...response,
|
|
1782
|
+
data: bytes
|
|
1787
1783
|
};
|
|
1788
|
-
const response = await this.post("/api/desktop/screenshot", data);
|
|
1789
|
-
if (wantsBytes) {
|
|
1790
|
-
const binaryString = atob(response.data);
|
|
1791
|
-
const bytes = new Uint8Array(binaryString.length);
|
|
1792
|
-
for (let i = 0; i < binaryString.length; i++) bytes[i] = binaryString.charCodeAt(i);
|
|
1793
|
-
return {
|
|
1794
|
-
...response,
|
|
1795
|
-
data: bytes
|
|
1796
|
-
};
|
|
1797
|
-
}
|
|
1798
|
-
return response;
|
|
1799
|
-
} catch (error) {
|
|
1800
|
-
throw error;
|
|
1801
1784
|
}
|
|
1785
|
+
return response;
|
|
1802
1786
|
}
|
|
1803
1787
|
async screenshotRegion(region, options) {
|
|
1804
|
-
|
|
1805
|
-
|
|
1806
|
-
|
|
1807
|
-
|
|
1808
|
-
|
|
1809
|
-
|
|
1810
|
-
|
|
1811
|
-
|
|
1788
|
+
const wantsBytes = options?.format === "bytes";
|
|
1789
|
+
const data = {
|
|
1790
|
+
region,
|
|
1791
|
+
format: "base64",
|
|
1792
|
+
...options?.imageFormat !== void 0 && { imageFormat: options.imageFormat },
|
|
1793
|
+
...options?.quality !== void 0 && { quality: options.quality },
|
|
1794
|
+
...options?.showCursor !== void 0 && { showCursor: options.showCursor }
|
|
1795
|
+
};
|
|
1796
|
+
const response = await this.post("/api/desktop/screenshot/region", data);
|
|
1797
|
+
if (wantsBytes) {
|
|
1798
|
+
const binaryString = atob(response.data);
|
|
1799
|
+
const bytes = new Uint8Array(binaryString.length);
|
|
1800
|
+
for (let i = 0; i < binaryString.length; i++) bytes[i] = binaryString.charCodeAt(i);
|
|
1801
|
+
return {
|
|
1802
|
+
...response,
|
|
1803
|
+
data: bytes
|
|
1812
1804
|
};
|
|
1813
|
-
const response = await this.post("/api/desktop/screenshot/region", data);
|
|
1814
|
-
if (wantsBytes) {
|
|
1815
|
-
const binaryString = atob(response.data);
|
|
1816
|
-
const bytes = new Uint8Array(binaryString.length);
|
|
1817
|
-
for (let i = 0; i < binaryString.length; i++) bytes[i] = binaryString.charCodeAt(i);
|
|
1818
|
-
return {
|
|
1819
|
-
...response,
|
|
1820
|
-
data: bytes
|
|
1821
|
-
};
|
|
1822
|
-
}
|
|
1823
|
-
return response;
|
|
1824
|
-
} catch (error) {
|
|
1825
|
-
throw error;
|
|
1826
1805
|
}
|
|
1806
|
+
return response;
|
|
1827
1807
|
}
|
|
1828
1808
|
/**
|
|
1829
1809
|
* Single-click at the given coordinates.
|
|
1830
1810
|
*/
|
|
1831
1811
|
async click(x, y, options) {
|
|
1832
|
-
|
|
1833
|
-
|
|
1834
|
-
|
|
1835
|
-
|
|
1836
|
-
|
|
1837
|
-
|
|
1838
|
-
});
|
|
1839
|
-
} catch (error) {
|
|
1840
|
-
throw error;
|
|
1841
|
-
}
|
|
1812
|
+
await this.post("/api/desktop/mouse/click", {
|
|
1813
|
+
x,
|
|
1814
|
+
y,
|
|
1815
|
+
button: options?.button ?? "left",
|
|
1816
|
+
clickCount: 1
|
|
1817
|
+
});
|
|
1842
1818
|
}
|
|
1843
1819
|
/**
|
|
1844
1820
|
* Double-click at the given coordinates.
|
|
1845
1821
|
*/
|
|
1846
1822
|
async doubleClick(x, y, options) {
|
|
1847
|
-
|
|
1848
|
-
|
|
1849
|
-
|
|
1850
|
-
|
|
1851
|
-
|
|
1852
|
-
|
|
1853
|
-
});
|
|
1854
|
-
} catch (error) {
|
|
1855
|
-
throw error;
|
|
1856
|
-
}
|
|
1823
|
+
await this.post("/api/desktop/mouse/click", {
|
|
1824
|
+
x,
|
|
1825
|
+
y,
|
|
1826
|
+
button: options?.button ?? "left",
|
|
1827
|
+
clickCount: 2
|
|
1828
|
+
});
|
|
1857
1829
|
}
|
|
1858
1830
|
/**
|
|
1859
1831
|
* Triple-click at the given coordinates.
|
|
1860
1832
|
*/
|
|
1861
1833
|
async tripleClick(x, y, options) {
|
|
1862
|
-
|
|
1863
|
-
|
|
1864
|
-
|
|
1865
|
-
|
|
1866
|
-
|
|
1867
|
-
|
|
1868
|
-
});
|
|
1869
|
-
} catch (error) {
|
|
1870
|
-
throw error;
|
|
1871
|
-
}
|
|
1834
|
+
await this.post("/api/desktop/mouse/click", {
|
|
1835
|
+
x,
|
|
1836
|
+
y,
|
|
1837
|
+
button: options?.button ?? "left",
|
|
1838
|
+
clickCount: 3
|
|
1839
|
+
});
|
|
1872
1840
|
}
|
|
1873
1841
|
/**
|
|
1874
1842
|
* Right-click at the given coordinates.
|
|
1875
1843
|
*/
|
|
1876
1844
|
async rightClick(x, y) {
|
|
1877
|
-
|
|
1878
|
-
|
|
1879
|
-
|
|
1880
|
-
|
|
1881
|
-
|
|
1882
|
-
|
|
1883
|
-
});
|
|
1884
|
-
} catch (error) {
|
|
1885
|
-
throw error;
|
|
1886
|
-
}
|
|
1845
|
+
await this.post("/api/desktop/mouse/click", {
|
|
1846
|
+
x,
|
|
1847
|
+
y,
|
|
1848
|
+
button: "right",
|
|
1849
|
+
clickCount: 1
|
|
1850
|
+
});
|
|
1887
1851
|
}
|
|
1888
1852
|
/**
|
|
1889
1853
|
* Middle-click at the given coordinates.
|
|
1890
1854
|
*/
|
|
1891
1855
|
async middleClick(x, y) {
|
|
1892
|
-
|
|
1893
|
-
|
|
1894
|
-
|
|
1895
|
-
|
|
1896
|
-
|
|
1897
|
-
|
|
1898
|
-
});
|
|
1899
|
-
} catch (error) {
|
|
1900
|
-
throw error;
|
|
1901
|
-
}
|
|
1856
|
+
await this.post("/api/desktop/mouse/click", {
|
|
1857
|
+
x,
|
|
1858
|
+
y,
|
|
1859
|
+
button: "middle",
|
|
1860
|
+
clickCount: 1
|
|
1861
|
+
});
|
|
1902
1862
|
}
|
|
1903
1863
|
/**
|
|
1904
1864
|
* Press and hold a mouse button.
|
|
1905
1865
|
*/
|
|
1906
1866
|
async mouseDown(x, y, options) {
|
|
1907
|
-
|
|
1908
|
-
|
|
1909
|
-
|
|
1910
|
-
|
|
1911
|
-
|
|
1912
|
-
});
|
|
1913
|
-
} catch (error) {
|
|
1914
|
-
throw error;
|
|
1915
|
-
}
|
|
1867
|
+
await this.post("/api/desktop/mouse/down", {
|
|
1868
|
+
...x !== void 0 && { x },
|
|
1869
|
+
...y !== void 0 && { y },
|
|
1870
|
+
button: options?.button ?? "left"
|
|
1871
|
+
});
|
|
1916
1872
|
}
|
|
1917
1873
|
/**
|
|
1918
1874
|
* Release a held mouse button.
|
|
1919
1875
|
*/
|
|
1920
1876
|
async mouseUp(x, y, options) {
|
|
1921
|
-
|
|
1922
|
-
|
|
1923
|
-
|
|
1924
|
-
|
|
1925
|
-
|
|
1926
|
-
});
|
|
1927
|
-
} catch (error) {
|
|
1928
|
-
throw error;
|
|
1929
|
-
}
|
|
1877
|
+
await this.post("/api/desktop/mouse/up", {
|
|
1878
|
+
...x !== void 0 && { x },
|
|
1879
|
+
...y !== void 0 && { y },
|
|
1880
|
+
button: options?.button ?? "left"
|
|
1881
|
+
});
|
|
1930
1882
|
}
|
|
1931
1883
|
/**
|
|
1932
1884
|
* Move the mouse cursor to coordinates.
|
|
1933
1885
|
*/
|
|
1934
1886
|
async moveMouse(x, y) {
|
|
1935
|
-
|
|
1936
|
-
|
|
1937
|
-
|
|
1938
|
-
|
|
1939
|
-
});
|
|
1940
|
-
} catch (error) {
|
|
1941
|
-
throw error;
|
|
1942
|
-
}
|
|
1887
|
+
await this.post("/api/desktop/mouse/move", {
|
|
1888
|
+
x,
|
|
1889
|
+
y
|
|
1890
|
+
});
|
|
1943
1891
|
}
|
|
1944
1892
|
/**
|
|
1945
1893
|
* Drag from start coordinates to end coordinates.
|
|
1946
1894
|
*/
|
|
1947
1895
|
async drag(startX, startY, endX, endY, options) {
|
|
1948
|
-
|
|
1949
|
-
|
|
1950
|
-
|
|
1951
|
-
|
|
1952
|
-
|
|
1953
|
-
|
|
1954
|
-
|
|
1955
|
-
});
|
|
1956
|
-
} catch (error) {
|
|
1957
|
-
throw error;
|
|
1958
|
-
}
|
|
1896
|
+
await this.post("/api/desktop/mouse/drag", {
|
|
1897
|
+
startX,
|
|
1898
|
+
startY,
|
|
1899
|
+
endX,
|
|
1900
|
+
endY,
|
|
1901
|
+
button: options?.button ?? "left"
|
|
1902
|
+
});
|
|
1959
1903
|
}
|
|
1960
1904
|
/**
|
|
1961
1905
|
* Scroll at coordinates in the specified direction.
|
|
1962
1906
|
*/
|
|
1963
1907
|
async scroll(x, y, direction, amount = 3) {
|
|
1964
|
-
|
|
1965
|
-
|
|
1966
|
-
|
|
1967
|
-
|
|
1968
|
-
|
|
1969
|
-
|
|
1970
|
-
});
|
|
1971
|
-
} catch (error) {
|
|
1972
|
-
throw error;
|
|
1973
|
-
}
|
|
1908
|
+
await this.post("/api/desktop/mouse/scroll", {
|
|
1909
|
+
x,
|
|
1910
|
+
y,
|
|
1911
|
+
direction,
|
|
1912
|
+
amount
|
|
1913
|
+
});
|
|
1974
1914
|
}
|
|
1975
1915
|
/**
|
|
1976
1916
|
* Get the current cursor coordinates.
|
|
1977
1917
|
*/
|
|
1978
1918
|
async getCursorPosition() {
|
|
1979
|
-
|
|
1980
|
-
return await this.get("/api/desktop/mouse/position");
|
|
1981
|
-
} catch (error) {
|
|
1982
|
-
throw error;
|
|
1983
|
-
}
|
|
1919
|
+
return await this.get("/api/desktop/mouse/position");
|
|
1984
1920
|
}
|
|
1985
1921
|
/**
|
|
1986
1922
|
* Type text into the focused element.
|
|
1987
1923
|
*/
|
|
1988
1924
|
async type(text, options) {
|
|
1989
|
-
|
|
1990
|
-
|
|
1991
|
-
|
|
1992
|
-
|
|
1993
|
-
});
|
|
1994
|
-
} catch (error) {
|
|
1995
|
-
throw error;
|
|
1996
|
-
}
|
|
1925
|
+
await this.post("/api/desktop/keyboard/type", {
|
|
1926
|
+
text,
|
|
1927
|
+
...options?.delayMs !== void 0 && { delayMs: options.delayMs }
|
|
1928
|
+
});
|
|
1997
1929
|
}
|
|
1998
1930
|
/**
|
|
1999
1931
|
* Press and release a key or key combination.
|
|
2000
1932
|
*/
|
|
2001
1933
|
async press(key) {
|
|
2002
|
-
|
|
2003
|
-
await this.post("/api/desktop/keyboard/press", { key });
|
|
2004
|
-
} catch (error) {
|
|
2005
|
-
throw error;
|
|
2006
|
-
}
|
|
1934
|
+
await this.post("/api/desktop/keyboard/press", { key });
|
|
2007
1935
|
}
|
|
2008
1936
|
/**
|
|
2009
1937
|
* Press and hold a key.
|
|
2010
1938
|
*/
|
|
2011
1939
|
async keyDown(key) {
|
|
2012
|
-
|
|
2013
|
-
await this.post("/api/desktop/keyboard/down", { key });
|
|
2014
|
-
} catch (error) {
|
|
2015
|
-
throw error;
|
|
2016
|
-
}
|
|
1940
|
+
await this.post("/api/desktop/keyboard/down", { key });
|
|
2017
1941
|
}
|
|
2018
1942
|
/**
|
|
2019
1943
|
* Release a held key.
|
|
2020
1944
|
*/
|
|
2021
1945
|
async keyUp(key) {
|
|
2022
|
-
|
|
2023
|
-
await this.post("/api/desktop/keyboard/up", { key });
|
|
2024
|
-
} catch (error) {
|
|
2025
|
-
throw error;
|
|
2026
|
-
}
|
|
1946
|
+
await this.post("/api/desktop/keyboard/up", { key });
|
|
2027
1947
|
}
|
|
2028
1948
|
/**
|
|
2029
1949
|
* Get the active desktop screen size.
|
|
2030
1950
|
*/
|
|
2031
1951
|
async getScreenSize() {
|
|
2032
|
-
|
|
2033
|
-
return await this.get("/api/desktop/screen/size");
|
|
2034
|
-
} catch (error) {
|
|
2035
|
-
throw error;
|
|
2036
|
-
}
|
|
1952
|
+
return await this.get("/api/desktop/screen/size");
|
|
2037
1953
|
}
|
|
2038
1954
|
/**
|
|
2039
1955
|
* Get health status for a specific desktop process.
|
|
2040
1956
|
*/
|
|
2041
1957
|
async getProcessStatus(name) {
|
|
2042
|
-
|
|
2043
|
-
return await this.get(`/api/desktop/process/${encodeURIComponent(name)}/status`);
|
|
2044
|
-
} catch (error) {
|
|
2045
|
-
throw error;
|
|
2046
|
-
}
|
|
1958
|
+
return await this.get(`/api/desktop/process/${encodeURIComponent(name)}/status`);
|
|
2047
1959
|
}
|
|
2048
1960
|
};
|
|
2049
1961
|
|
|
@@ -2060,16 +1972,12 @@ var FileClient = class extends BaseHttpClient {
|
|
|
2060
1972
|
* @param options - Optional settings (recursive)
|
|
2061
1973
|
*/
|
|
2062
1974
|
async mkdir(path$1, sessionId, options) {
|
|
2063
|
-
|
|
2064
|
-
|
|
2065
|
-
|
|
2066
|
-
|
|
2067
|
-
|
|
2068
|
-
|
|
2069
|
-
return await this.post("/api/mkdir", data);
|
|
2070
|
-
} catch (error) {
|
|
2071
|
-
throw error;
|
|
2072
|
-
}
|
|
1975
|
+
const data = {
|
|
1976
|
+
path: path$1,
|
|
1977
|
+
sessionId,
|
|
1978
|
+
recursive: options?.recursive ?? false
|
|
1979
|
+
};
|
|
1980
|
+
return await this.post("/api/mkdir", data);
|
|
2073
1981
|
}
|
|
2074
1982
|
/**
|
|
2075
1983
|
* Write content to a file
|
|
@@ -2079,17 +1987,13 @@ var FileClient = class extends BaseHttpClient {
|
|
|
2079
1987
|
* @param options - Optional settings (encoding)
|
|
2080
1988
|
*/
|
|
2081
1989
|
async writeFile(path$1, content, sessionId, options) {
|
|
2082
|
-
|
|
2083
|
-
|
|
2084
|
-
|
|
2085
|
-
|
|
2086
|
-
|
|
2087
|
-
|
|
2088
|
-
|
|
2089
|
-
return await this.post("/api/write", data);
|
|
2090
|
-
} catch (error) {
|
|
2091
|
-
throw error;
|
|
2092
|
-
}
|
|
1990
|
+
const data = {
|
|
1991
|
+
path: path$1,
|
|
1992
|
+
content,
|
|
1993
|
+
sessionId,
|
|
1994
|
+
encoding: options?.encoding
|
|
1995
|
+
};
|
|
1996
|
+
return await this.post("/api/write", data);
|
|
2093
1997
|
}
|
|
2094
1998
|
/**
|
|
2095
1999
|
* Read content from a file
|
|
@@ -2098,16 +2002,12 @@ var FileClient = class extends BaseHttpClient {
|
|
|
2098
2002
|
* @param options - Optional settings (encoding)
|
|
2099
2003
|
*/
|
|
2100
2004
|
async readFile(path$1, sessionId, options) {
|
|
2101
|
-
|
|
2102
|
-
|
|
2103
|
-
|
|
2104
|
-
|
|
2105
|
-
|
|
2106
|
-
|
|
2107
|
-
return await this.post("/api/read", data);
|
|
2108
|
-
} catch (error) {
|
|
2109
|
-
throw error;
|
|
2110
|
-
}
|
|
2005
|
+
const data = {
|
|
2006
|
+
path: path$1,
|
|
2007
|
+
sessionId,
|
|
2008
|
+
encoding: options?.encoding
|
|
2009
|
+
};
|
|
2010
|
+
return await this.post("/api/read", data);
|
|
2111
2011
|
}
|
|
2112
2012
|
/**
|
|
2113
2013
|
* Stream a file using Server-Sent Events
|
|
@@ -2116,15 +2016,11 @@ var FileClient = class extends BaseHttpClient {
|
|
|
2116
2016
|
* @param sessionId - The session ID for this operation
|
|
2117
2017
|
*/
|
|
2118
2018
|
async readFileStream(path$1, sessionId) {
|
|
2119
|
-
|
|
2120
|
-
|
|
2121
|
-
|
|
2122
|
-
|
|
2123
|
-
|
|
2124
|
-
return await this.doStreamFetch("/api/read/stream", data);
|
|
2125
|
-
} catch (error) {
|
|
2126
|
-
throw error;
|
|
2127
|
-
}
|
|
2019
|
+
const data = {
|
|
2020
|
+
path: path$1,
|
|
2021
|
+
sessionId
|
|
2022
|
+
};
|
|
2023
|
+
return await this.doStreamFetch("/api/read/stream", data);
|
|
2128
2024
|
}
|
|
2129
2025
|
/**
|
|
2130
2026
|
* Delete a file
|
|
@@ -2132,15 +2028,11 @@ var FileClient = class extends BaseHttpClient {
|
|
|
2132
2028
|
* @param sessionId - The session ID for this operation
|
|
2133
2029
|
*/
|
|
2134
2030
|
async deleteFile(path$1, sessionId) {
|
|
2135
|
-
|
|
2136
|
-
|
|
2137
|
-
|
|
2138
|
-
|
|
2139
|
-
|
|
2140
|
-
return await this.post("/api/delete", data);
|
|
2141
|
-
} catch (error) {
|
|
2142
|
-
throw error;
|
|
2143
|
-
}
|
|
2031
|
+
const data = {
|
|
2032
|
+
path: path$1,
|
|
2033
|
+
sessionId
|
|
2034
|
+
};
|
|
2035
|
+
return await this.post("/api/delete", data);
|
|
2144
2036
|
}
|
|
2145
2037
|
/**
|
|
2146
2038
|
* Rename a file
|
|
@@ -2149,16 +2041,12 @@ var FileClient = class extends BaseHttpClient {
|
|
|
2149
2041
|
* @param sessionId - The session ID for this operation
|
|
2150
2042
|
*/
|
|
2151
2043
|
async renameFile(path$1, newPath, sessionId) {
|
|
2152
|
-
|
|
2153
|
-
|
|
2154
|
-
|
|
2155
|
-
|
|
2156
|
-
|
|
2157
|
-
|
|
2158
|
-
return await this.post("/api/rename", data);
|
|
2159
|
-
} catch (error) {
|
|
2160
|
-
throw error;
|
|
2161
|
-
}
|
|
2044
|
+
const data = {
|
|
2045
|
+
oldPath: path$1,
|
|
2046
|
+
newPath,
|
|
2047
|
+
sessionId
|
|
2048
|
+
};
|
|
2049
|
+
return await this.post("/api/rename", data);
|
|
2162
2050
|
}
|
|
2163
2051
|
/**
|
|
2164
2052
|
* Move a file
|
|
@@ -2167,16 +2055,12 @@ var FileClient = class extends BaseHttpClient {
|
|
|
2167
2055
|
* @param sessionId - The session ID for this operation
|
|
2168
2056
|
*/
|
|
2169
2057
|
async moveFile(path$1, newPath, sessionId) {
|
|
2170
|
-
|
|
2171
|
-
|
|
2172
|
-
|
|
2173
|
-
|
|
2174
|
-
|
|
2175
|
-
|
|
2176
|
-
return await this.post("/api/move", data);
|
|
2177
|
-
} catch (error) {
|
|
2178
|
-
throw error;
|
|
2179
|
-
}
|
|
2058
|
+
const data = {
|
|
2059
|
+
sourcePath: path$1,
|
|
2060
|
+
destinationPath: newPath,
|
|
2061
|
+
sessionId
|
|
2062
|
+
};
|
|
2063
|
+
return await this.post("/api/move", data);
|
|
2180
2064
|
}
|
|
2181
2065
|
/**
|
|
2182
2066
|
* List files in a directory
|
|
@@ -2185,16 +2069,12 @@ var FileClient = class extends BaseHttpClient {
|
|
|
2185
2069
|
* @param options - Optional settings (recursive, includeHidden)
|
|
2186
2070
|
*/
|
|
2187
2071
|
async listFiles(path$1, sessionId, options) {
|
|
2188
|
-
|
|
2189
|
-
|
|
2190
|
-
|
|
2191
|
-
|
|
2192
|
-
|
|
2193
|
-
|
|
2194
|
-
return await this.post("/api/list-files", data);
|
|
2195
|
-
} catch (error) {
|
|
2196
|
-
throw error;
|
|
2197
|
-
}
|
|
2072
|
+
const data = {
|
|
2073
|
+
path: path$1,
|
|
2074
|
+
sessionId,
|
|
2075
|
+
options: options || {}
|
|
2076
|
+
};
|
|
2077
|
+
return await this.post("/api/list-files", data);
|
|
2198
2078
|
}
|
|
2199
2079
|
/**
|
|
2200
2080
|
* Check if a file or directory exists
|
|
@@ -2202,15 +2082,11 @@ var FileClient = class extends BaseHttpClient {
|
|
|
2202
2082
|
* @param sessionId - The session ID for this operation
|
|
2203
2083
|
*/
|
|
2204
2084
|
async exists(path$1, sessionId) {
|
|
2205
|
-
|
|
2206
|
-
|
|
2207
|
-
|
|
2208
|
-
|
|
2209
|
-
|
|
2210
|
-
return await this.post("/api/exists", data);
|
|
2211
|
-
} catch (error) {
|
|
2212
|
-
throw error;
|
|
2213
|
-
}
|
|
2085
|
+
const data = {
|
|
2086
|
+
path: path$1,
|
|
2087
|
+
sessionId
|
|
2088
|
+
};
|
|
2089
|
+
return await this.post("/api/exists", data);
|
|
2214
2090
|
}
|
|
2215
2091
|
};
|
|
2216
2092
|
|
|
@@ -2232,26 +2108,22 @@ var GitClient = class GitClient extends BaseHttpClient {
|
|
|
2232
2108
|
* @param options - Optional settings (branch, targetDir, depth, timeoutMs)
|
|
2233
2109
|
*/
|
|
2234
2110
|
async checkout(repoUrl, sessionId, options) {
|
|
2235
|
-
|
|
2236
|
-
|
|
2237
|
-
|
|
2238
|
-
|
|
2239
|
-
|
|
2240
|
-
|
|
2241
|
-
|
|
2242
|
-
|
|
2243
|
-
|
|
2244
|
-
|
|
2245
|
-
if (options
|
|
2246
|
-
|
|
2247
|
-
data.depth = options.depth;
|
|
2248
|
-
}
|
|
2249
|
-
if (!Number.isInteger(timeoutMs) || timeoutMs <= 0) throw new Error(`Invalid timeout value: ${timeoutMs}. Must be a positive integer number of milliseconds.`);
|
|
2250
|
-
data.timeoutMs = timeoutMs;
|
|
2251
|
-
return await this.post("/api/git/checkout", data, void 0, { requestTimeoutMs: timeoutMs + GitClient.REQUEST_TIMEOUT_BUFFER_MS });
|
|
2252
|
-
} catch (error) {
|
|
2253
|
-
throw error;
|
|
2111
|
+
const timeoutMs = options?.timeoutMs ?? DEFAULT_GIT_CLONE_TIMEOUT_MS;
|
|
2112
|
+
let targetDir = options?.targetDir;
|
|
2113
|
+
if (!targetDir) targetDir = `/workspace/${extractRepoName(repoUrl)}`;
|
|
2114
|
+
const data = {
|
|
2115
|
+
repoUrl,
|
|
2116
|
+
sessionId,
|
|
2117
|
+
targetDir
|
|
2118
|
+
};
|
|
2119
|
+
if (options?.branch) data.branch = options.branch;
|
|
2120
|
+
if (options?.depth !== void 0) {
|
|
2121
|
+
if (!Number.isInteger(options.depth) || options.depth <= 0) throw new Error(`Invalid depth value: ${options.depth}. Must be a positive integer (e.g., 1, 5, 10).`);
|
|
2122
|
+
data.depth = options.depth;
|
|
2254
2123
|
}
|
|
2124
|
+
if (!Number.isInteger(timeoutMs) || timeoutMs <= 0) throw new Error(`Invalid timeout value: ${timeoutMs}. Must be a positive integer number of milliseconds.`);
|
|
2125
|
+
data.timeoutMs = timeoutMs;
|
|
2126
|
+
return await this.post("/api/git/checkout", data, void 0, { requestTimeoutMs: timeoutMs + GitClient.REQUEST_TIMEOUT_BUFFER_MS });
|
|
2255
2127
|
}
|
|
2256
2128
|
};
|
|
2257
2129
|
|
|
@@ -2444,16 +2316,12 @@ var PortClient = class extends BaseHttpClient {
|
|
|
2444
2316
|
* @param name - Optional name for the port
|
|
2445
2317
|
*/
|
|
2446
2318
|
async exposePort(port, sessionId, name) {
|
|
2447
|
-
|
|
2448
|
-
|
|
2449
|
-
|
|
2450
|
-
|
|
2451
|
-
|
|
2452
|
-
|
|
2453
|
-
return await this.post("/api/expose-port", data);
|
|
2454
|
-
} catch (error) {
|
|
2455
|
-
throw error;
|
|
2456
|
-
}
|
|
2319
|
+
const data = {
|
|
2320
|
+
port,
|
|
2321
|
+
sessionId,
|
|
2322
|
+
name
|
|
2323
|
+
};
|
|
2324
|
+
return await this.post("/api/expose-port", data);
|
|
2457
2325
|
}
|
|
2458
2326
|
/**
|
|
2459
2327
|
* Unexpose a port and remove its preview URL
|
|
@@ -2461,24 +2329,16 @@ var PortClient = class extends BaseHttpClient {
|
|
|
2461
2329
|
* @param sessionId - The session ID for this operation
|
|
2462
2330
|
*/
|
|
2463
2331
|
async unexposePort(port, sessionId) {
|
|
2464
|
-
|
|
2465
|
-
|
|
2466
|
-
return await this.delete(url);
|
|
2467
|
-
} catch (error) {
|
|
2468
|
-
throw error;
|
|
2469
|
-
}
|
|
2332
|
+
const url = `/api/exposed-ports/${port}?session=${encodeURIComponent(sessionId)}`;
|
|
2333
|
+
return await this.delete(url);
|
|
2470
2334
|
}
|
|
2471
2335
|
/**
|
|
2472
2336
|
* Get all currently exposed ports
|
|
2473
2337
|
* @param sessionId - The session ID for this operation
|
|
2474
2338
|
*/
|
|
2475
2339
|
async getExposedPorts(sessionId) {
|
|
2476
|
-
|
|
2477
|
-
|
|
2478
|
-
return await this.get(url);
|
|
2479
|
-
} catch (error) {
|
|
2480
|
-
throw error;
|
|
2481
|
-
}
|
|
2340
|
+
const url = `/api/exposed-ports?session=${encodeURIComponent(sessionId)}`;
|
|
2341
|
+
return await this.get(url);
|
|
2482
2342
|
}
|
|
2483
2343
|
/**
|
|
2484
2344
|
* Watch a port for readiness via SSE stream
|
|
@@ -2486,11 +2346,7 @@ var PortClient = class extends BaseHttpClient {
|
|
|
2486
2346
|
* @returns SSE stream that emits PortWatchEvent objects
|
|
2487
2347
|
*/
|
|
2488
2348
|
async watchPort(request) {
|
|
2489
|
-
|
|
2490
|
-
return await this.doStreamFetch("/api/port-watch", request);
|
|
2491
|
-
} catch (error) {
|
|
2492
|
-
throw error;
|
|
2493
|
-
}
|
|
2349
|
+
return await this.doStreamFetch("/api/port-watch", request);
|
|
2494
2350
|
}
|
|
2495
2351
|
};
|
|
2496
2352
|
|
|
@@ -2507,90 +2363,62 @@ var ProcessClient = class extends BaseHttpClient {
|
|
|
2507
2363
|
* @param options - Optional settings (processId)
|
|
2508
2364
|
*/
|
|
2509
2365
|
async startProcess(command, sessionId, options) {
|
|
2510
|
-
|
|
2511
|
-
|
|
2512
|
-
|
|
2513
|
-
|
|
2514
|
-
|
|
2515
|
-
|
|
2516
|
-
|
|
2517
|
-
|
|
2518
|
-
|
|
2519
|
-
|
|
2520
|
-
|
|
2521
|
-
|
|
2522
|
-
return await this.post("/api/process/start", data);
|
|
2523
|
-
} catch (error) {
|
|
2524
|
-
throw error;
|
|
2525
|
-
}
|
|
2366
|
+
const data = {
|
|
2367
|
+
command,
|
|
2368
|
+
sessionId,
|
|
2369
|
+
...options?.origin !== void 0 && { origin: options.origin },
|
|
2370
|
+
...options?.processId !== void 0 && { processId: options.processId },
|
|
2371
|
+
...options?.timeoutMs !== void 0 && { timeoutMs: options.timeoutMs },
|
|
2372
|
+
...options?.env !== void 0 && { env: options.env },
|
|
2373
|
+
...options?.cwd !== void 0 && { cwd: options.cwd },
|
|
2374
|
+
...options?.encoding !== void 0 && { encoding: options.encoding },
|
|
2375
|
+
...options?.autoCleanup !== void 0 && { autoCleanup: options.autoCleanup }
|
|
2376
|
+
};
|
|
2377
|
+
return await this.post("/api/process/start", data);
|
|
2526
2378
|
}
|
|
2527
2379
|
/**
|
|
2528
2380
|
* List all processes (sandbox-scoped, not session-scoped)
|
|
2529
2381
|
*/
|
|
2530
2382
|
async listProcesses() {
|
|
2531
|
-
|
|
2532
|
-
return await this.get(`/api/process/list`);
|
|
2533
|
-
} catch (error) {
|
|
2534
|
-
throw error;
|
|
2535
|
-
}
|
|
2383
|
+
return await this.get(`/api/process/list`);
|
|
2536
2384
|
}
|
|
2537
2385
|
/**
|
|
2538
2386
|
* Get information about a specific process (sandbox-scoped, not session-scoped)
|
|
2539
2387
|
* @param processId - ID of the process to retrieve
|
|
2540
2388
|
*/
|
|
2541
2389
|
async getProcess(processId) {
|
|
2542
|
-
|
|
2543
|
-
|
|
2544
|
-
return await this.get(url);
|
|
2545
|
-
} catch (error) {
|
|
2546
|
-
throw error;
|
|
2547
|
-
}
|
|
2390
|
+
const url = `/api/process/${processId}`;
|
|
2391
|
+
return await this.get(url);
|
|
2548
2392
|
}
|
|
2549
2393
|
/**
|
|
2550
2394
|
* Kill a specific process (sandbox-scoped, not session-scoped)
|
|
2551
2395
|
* @param processId - ID of the process to kill
|
|
2552
2396
|
*/
|
|
2553
2397
|
async killProcess(processId) {
|
|
2554
|
-
|
|
2555
|
-
|
|
2556
|
-
return await this.delete(url);
|
|
2557
|
-
} catch (error) {
|
|
2558
|
-
throw error;
|
|
2559
|
-
}
|
|
2398
|
+
const url = `/api/process/${processId}`;
|
|
2399
|
+
return await this.delete(url);
|
|
2560
2400
|
}
|
|
2561
2401
|
/**
|
|
2562
2402
|
* Kill all running processes (sandbox-scoped, not session-scoped)
|
|
2563
2403
|
*/
|
|
2564
2404
|
async killAllProcesses() {
|
|
2565
|
-
|
|
2566
|
-
return await this.delete(`/api/process/kill-all`);
|
|
2567
|
-
} catch (error) {
|
|
2568
|
-
throw error;
|
|
2569
|
-
}
|
|
2405
|
+
return await this.delete(`/api/process/kill-all`);
|
|
2570
2406
|
}
|
|
2571
2407
|
/**
|
|
2572
2408
|
* Get logs from a specific process (sandbox-scoped, not session-scoped)
|
|
2573
2409
|
* @param processId - ID of the process to get logs from
|
|
2574
2410
|
*/
|
|
2575
2411
|
async getProcessLogs(processId) {
|
|
2576
|
-
|
|
2577
|
-
|
|
2578
|
-
return await this.get(url);
|
|
2579
|
-
} catch (error) {
|
|
2580
|
-
throw error;
|
|
2581
|
-
}
|
|
2412
|
+
const url = `/api/process/${processId}/logs`;
|
|
2413
|
+
return await this.get(url);
|
|
2582
2414
|
}
|
|
2583
2415
|
/**
|
|
2584
2416
|
* Stream logs from a specific process (sandbox-scoped, not session-scoped)
|
|
2585
2417
|
* @param processId - ID of the process to stream logs from
|
|
2586
2418
|
*/
|
|
2587
2419
|
async streamProcessLogs(processId) {
|
|
2588
|
-
|
|
2589
|
-
|
|
2590
|
-
return await this.doStreamFetch(url, void 0, "GET");
|
|
2591
|
-
} catch (error) {
|
|
2592
|
-
throw error;
|
|
2593
|
-
}
|
|
2420
|
+
const url = `/api/process/${processId}/stream`;
|
|
2421
|
+
return await this.doStreamFetch(url, void 0, "GET");
|
|
2594
2422
|
}
|
|
2595
2423
|
};
|
|
2596
2424
|
|
|
@@ -2604,43 +2432,27 @@ var UtilityClient = class extends BaseHttpClient {
|
|
|
2604
2432
|
* Ping the sandbox to check if it's responsive
|
|
2605
2433
|
*/
|
|
2606
2434
|
async ping() {
|
|
2607
|
-
|
|
2608
|
-
return (await this.get("/api/ping")).message;
|
|
2609
|
-
} catch (error) {
|
|
2610
|
-
throw error;
|
|
2611
|
-
}
|
|
2435
|
+
return (await this.get("/api/ping")).message;
|
|
2612
2436
|
}
|
|
2613
2437
|
/**
|
|
2614
2438
|
* Get list of available commands in the sandbox environment
|
|
2615
2439
|
*/
|
|
2616
2440
|
async getCommands() {
|
|
2617
|
-
|
|
2618
|
-
return (await this.get("/api/commands")).availableCommands;
|
|
2619
|
-
} catch (error) {
|
|
2620
|
-
throw error;
|
|
2621
|
-
}
|
|
2441
|
+
return (await this.get("/api/commands")).availableCommands;
|
|
2622
2442
|
}
|
|
2623
2443
|
/**
|
|
2624
2444
|
* Create a new execution session
|
|
2625
2445
|
* @param options - Session configuration (id, env, cwd)
|
|
2626
2446
|
*/
|
|
2627
2447
|
async createSession(options) {
|
|
2628
|
-
|
|
2629
|
-
return await this.post("/api/session/create", options);
|
|
2630
|
-
} catch (error) {
|
|
2631
|
-
throw error;
|
|
2632
|
-
}
|
|
2448
|
+
return await this.post("/api/session/create", options);
|
|
2633
2449
|
}
|
|
2634
2450
|
/**
|
|
2635
2451
|
* Delete an execution session
|
|
2636
2452
|
* @param sessionId - Session ID to delete
|
|
2637
2453
|
*/
|
|
2638
2454
|
async deleteSession(sessionId) {
|
|
2639
|
-
|
|
2640
|
-
return await this.post("/api/session/delete", { sessionId });
|
|
2641
|
-
} catch (error) {
|
|
2642
|
-
throw error;
|
|
2643
|
-
}
|
|
2455
|
+
return await this.post("/api/session/delete", { sessionId });
|
|
2644
2456
|
}
|
|
2645
2457
|
/**
|
|
2646
2458
|
* Get the container version
|
|
@@ -2682,12 +2494,8 @@ var WatchClient = class extends BaseHttpClient {
|
|
|
2682
2494
|
* @param request - Watch request with path and options
|
|
2683
2495
|
*/
|
|
2684
2496
|
async watch(request) {
|
|
2685
|
-
|
|
2686
|
-
|
|
2687
|
-
return await this.waitForReadiness(stream);
|
|
2688
|
-
} catch (error) {
|
|
2689
|
-
throw error;
|
|
2690
|
-
}
|
|
2497
|
+
const stream = await this.doStreamFetch("/api/watch", request);
|
|
2498
|
+
return await this.waitForReadiness(stream);
|
|
2691
2499
|
}
|
|
2692
2500
|
/**
|
|
2693
2501
|
* Read SSE chunks until the `watching` event appears, then return a
|
|
@@ -2873,6 +2681,128 @@ const BACKUP_ALLOWED_PREFIXES = [
|
|
|
2873
2681
|
"/app"
|
|
2874
2682
|
];
|
|
2875
2683
|
|
|
2684
|
+
//#endregion
|
|
2685
|
+
//#region src/file-stream.ts
|
|
2686
|
+
/**
|
|
2687
|
+
* Parse SSE (Server-Sent Events) lines from a stream
|
|
2688
|
+
*/
|
|
2689
|
+
async function* parseSSE(stream) {
|
|
2690
|
+
const reader = stream.getReader();
|
|
2691
|
+
const decoder = new TextDecoder();
|
|
2692
|
+
let buffer = "";
|
|
2693
|
+
let currentEvent = { data: [] };
|
|
2694
|
+
try {
|
|
2695
|
+
while (true) {
|
|
2696
|
+
const { done, value } = await reader.read();
|
|
2697
|
+
if (done) break;
|
|
2698
|
+
buffer += decoder.decode(value, { stream: true });
|
|
2699
|
+
const parsed = parseSSEFrames(buffer, currentEvent);
|
|
2700
|
+
buffer = parsed.remaining;
|
|
2701
|
+
currentEvent = parsed.currentEvent;
|
|
2702
|
+
for (const frame of parsed.events) try {
|
|
2703
|
+
yield JSON.parse(frame.data);
|
|
2704
|
+
} catch {}
|
|
2705
|
+
}
|
|
2706
|
+
const finalParsed = parseSSEFrames(`${buffer}\n\n`, currentEvent);
|
|
2707
|
+
for (const frame of finalParsed.events) try {
|
|
2708
|
+
yield JSON.parse(frame.data);
|
|
2709
|
+
} catch {}
|
|
2710
|
+
} finally {
|
|
2711
|
+
try {
|
|
2712
|
+
await reader.cancel();
|
|
2713
|
+
} catch {}
|
|
2714
|
+
reader.releaseLock();
|
|
2715
|
+
}
|
|
2716
|
+
}
|
|
2717
|
+
/**
|
|
2718
|
+
* Stream a file from the sandbox with automatic base64 decoding for binary files
|
|
2719
|
+
*
|
|
2720
|
+
* @param stream - The ReadableStream from readFileStream()
|
|
2721
|
+
* @returns AsyncGenerator that yields FileChunk (string for text, Uint8Array for binary)
|
|
2722
|
+
*
|
|
2723
|
+
* @example
|
|
2724
|
+
* ```ts
|
|
2725
|
+
* const stream = await sandbox.readFileStream('/path/to/file.png');
|
|
2726
|
+
* for await (const chunk of streamFile(stream)) {
|
|
2727
|
+
* if (chunk instanceof Uint8Array) {
|
|
2728
|
+
* // Binary chunk
|
|
2729
|
+
* console.log('Binary chunk:', chunk.length, 'bytes');
|
|
2730
|
+
* } else {
|
|
2731
|
+
* // Text chunk
|
|
2732
|
+
* console.log('Text chunk:', chunk);
|
|
2733
|
+
* }
|
|
2734
|
+
* }
|
|
2735
|
+
* ```
|
|
2736
|
+
*/
|
|
2737
|
+
async function* streamFile(stream) {
|
|
2738
|
+
let metadata = null;
|
|
2739
|
+
for await (const event of parseSSE(stream)) switch (event.type) {
|
|
2740
|
+
case "metadata":
|
|
2741
|
+
metadata = {
|
|
2742
|
+
mimeType: event.mimeType,
|
|
2743
|
+
size: event.size,
|
|
2744
|
+
isBinary: event.isBinary,
|
|
2745
|
+
encoding: event.encoding
|
|
2746
|
+
};
|
|
2747
|
+
break;
|
|
2748
|
+
case "chunk":
|
|
2749
|
+
if (!metadata) throw new Error("Received chunk before metadata");
|
|
2750
|
+
if (metadata.isBinary && metadata.encoding === "base64") {
|
|
2751
|
+
const binaryString = atob(event.data);
|
|
2752
|
+
const bytes = new Uint8Array(binaryString.length);
|
|
2753
|
+
for (let i = 0; i < binaryString.length; i++) bytes[i] = binaryString.charCodeAt(i);
|
|
2754
|
+
yield bytes;
|
|
2755
|
+
} else yield event.data;
|
|
2756
|
+
break;
|
|
2757
|
+
case "complete":
|
|
2758
|
+
if (!metadata) throw new Error("Stream completed without metadata");
|
|
2759
|
+
return metadata;
|
|
2760
|
+
case "error": throw new Error(`File streaming error: ${event.error}`);
|
|
2761
|
+
}
|
|
2762
|
+
throw new Error("Stream ended unexpectedly");
|
|
2763
|
+
}
|
|
2764
|
+
/**
|
|
2765
|
+
* Collect an entire file into memory from a stream
|
|
2766
|
+
*
|
|
2767
|
+
* @param stream - The ReadableStream from readFileStream()
|
|
2768
|
+
* @returns Object containing the file content and metadata
|
|
2769
|
+
*
|
|
2770
|
+
* @example
|
|
2771
|
+
* ```ts
|
|
2772
|
+
* const stream = await sandbox.readFileStream('/path/to/file.txt');
|
|
2773
|
+
* const { content, metadata } = await collectFile(stream);
|
|
2774
|
+
* console.log('Content:', content);
|
|
2775
|
+
* console.log('MIME type:', metadata.mimeType);
|
|
2776
|
+
* ```
|
|
2777
|
+
*/
|
|
2778
|
+
async function collectFile(stream) {
|
|
2779
|
+
const chunks = [];
|
|
2780
|
+
const generator = streamFile(stream);
|
|
2781
|
+
let result = await generator.next();
|
|
2782
|
+
while (!result.done) {
|
|
2783
|
+
chunks.push(result.value);
|
|
2784
|
+
result = await generator.next();
|
|
2785
|
+
}
|
|
2786
|
+
const metadata = result.value;
|
|
2787
|
+
if (!metadata) throw new Error("Failed to get file metadata");
|
|
2788
|
+
if (metadata.isBinary) {
|
|
2789
|
+
const totalLength = chunks.reduce((sum, chunk) => sum + (chunk instanceof Uint8Array ? chunk.length : 0), 0);
|
|
2790
|
+
const combined = new Uint8Array(totalLength);
|
|
2791
|
+
let offset = 0;
|
|
2792
|
+
for (const chunk of chunks) if (chunk instanceof Uint8Array) {
|
|
2793
|
+
combined.set(chunk, offset);
|
|
2794
|
+
offset += chunk.length;
|
|
2795
|
+
}
|
|
2796
|
+
return {
|
|
2797
|
+
content: combined,
|
|
2798
|
+
metadata
|
|
2799
|
+
};
|
|
2800
|
+
} else return {
|
|
2801
|
+
content: chunks.filter((c) => typeof c === "string").join(""),
|
|
2802
|
+
metadata
|
|
2803
|
+
};
|
|
2804
|
+
}
|
|
2805
|
+
|
|
2876
2806
|
//#endregion
|
|
2877
2807
|
//#region src/security.ts
|
|
2878
2808
|
/**
|
|
@@ -3124,69 +3054,234 @@ function asyncIterableToSSEStream(events, options) {
|
|
|
3124
3054
|
}
|
|
3125
3055
|
|
|
3126
3056
|
//#endregion
|
|
3127
|
-
//#region src/
|
|
3128
|
-
const DEFAULT_POLL_INTERVAL_MS = 1e3;
|
|
3129
|
-
const DEFAULT_ECHO_SUPPRESS_TTL_MS = 2e3;
|
|
3130
|
-
const MAX_BACKOFF_MS = 3e4;
|
|
3131
|
-
const SYNC_CONCURRENCY = 5;
|
|
3057
|
+
//#region src/storage-mount/errors.ts
|
|
3132
3058
|
/**
|
|
3133
|
-
*
|
|
3059
|
+
* Bucket mount and unmount error classes
|
|
3134
3060
|
*
|
|
3135
|
-
*
|
|
3136
|
-
*
|
|
3061
|
+
* Validation errors (InvalidMountConfigError, MissingCredentialsError) are thrown
|
|
3062
|
+
* before any container interaction. BucketUnmountError is thrown after a failed
|
|
3063
|
+
* fusermount call inside the container.
|
|
3137
3064
|
*/
|
|
3138
|
-
|
|
3139
|
-
|
|
3140
|
-
|
|
3141
|
-
|
|
3142
|
-
|
|
3143
|
-
|
|
3144
|
-
|
|
3145
|
-
|
|
3146
|
-
|
|
3147
|
-
echoSuppressTtlMs;
|
|
3148
|
-
snapshot = /* @__PURE__ */ new Map();
|
|
3149
|
-
echoSuppressSet = /* @__PURE__ */ new Set();
|
|
3150
|
-
pollTimer = null;
|
|
3151
|
-
watchReconnectTimer = null;
|
|
3152
|
-
watchAbortController = null;
|
|
3153
|
-
running = false;
|
|
3154
|
-
consecutivePollFailures = 0;
|
|
3155
|
-
consecutiveWatchFailures = 0;
|
|
3156
|
-
constructor(options) {
|
|
3157
|
-
this.bucket = options.bucket;
|
|
3158
|
-
this.mountPath = options.mountPath;
|
|
3159
|
-
this.prefix = options.prefix;
|
|
3160
|
-
this.readOnly = options.readOnly;
|
|
3161
|
-
this.client = options.client;
|
|
3162
|
-
this.sessionId = options.sessionId;
|
|
3163
|
-
this.logger = options.logger.child({ operation: "local-mount-sync" });
|
|
3164
|
-
this.pollIntervalMs = options.pollIntervalMs ?? DEFAULT_POLL_INTERVAL_MS;
|
|
3165
|
-
this.echoSuppressTtlMs = options.echoSuppressTtlMs ?? DEFAULT_ECHO_SUPPRESS_TTL_MS;
|
|
3166
|
-
}
|
|
3167
|
-
/**
|
|
3168
|
-
* Start bidirectional sync. Performs initial full sync, then starts
|
|
3169
|
-
* the R2 poll loop and (if not readOnly) the container watch loop.
|
|
3170
|
-
*/
|
|
3171
|
-
async start() {
|
|
3172
|
-
this.running = true;
|
|
3173
|
-
await this.client.files.mkdir(this.mountPath, this.sessionId, { recursive: true });
|
|
3174
|
-
await this.fullSyncR2ToContainer();
|
|
3175
|
-
this.schedulePoll();
|
|
3176
|
-
if (!this.readOnly) this.startContainerWatch();
|
|
3177
|
-
this.logger.info("Local mount sync started", {
|
|
3178
|
-
mountPath: this.mountPath,
|
|
3179
|
-
prefix: this.prefix,
|
|
3180
|
-
readOnly: this.readOnly,
|
|
3181
|
-
pollIntervalMs: this.pollIntervalMs
|
|
3182
|
-
});
|
|
3065
|
+
/**
|
|
3066
|
+
* Base error for bucket mounting operations
|
|
3067
|
+
*/
|
|
3068
|
+
var BucketMountError = class extends Error {
|
|
3069
|
+
code;
|
|
3070
|
+
constructor(message, code = ErrorCode.BUCKET_MOUNT_ERROR) {
|
|
3071
|
+
super(message);
|
|
3072
|
+
this.name = "BucketMountError";
|
|
3073
|
+
this.code = code;
|
|
3183
3074
|
}
|
|
3184
|
-
|
|
3185
|
-
|
|
3186
|
-
|
|
3187
|
-
|
|
3188
|
-
|
|
3189
|
-
|
|
3075
|
+
};
|
|
3076
|
+
/**
|
|
3077
|
+
* Thrown when S3FS mount command fails
|
|
3078
|
+
*/
|
|
3079
|
+
var S3FSMountError = class extends BucketMountError {
|
|
3080
|
+
constructor(message) {
|
|
3081
|
+
super(message, ErrorCode.S3FS_MOUNT_ERROR);
|
|
3082
|
+
this.name = "S3FSMountError";
|
|
3083
|
+
}
|
|
3084
|
+
};
|
|
3085
|
+
/**
|
|
3086
|
+
* Thrown when fusermount -u fails to unmount a FUSE filesystem
|
|
3087
|
+
*/
|
|
3088
|
+
var BucketUnmountError = class extends BucketMountError {
|
|
3089
|
+
constructor(message) {
|
|
3090
|
+
super(message, ErrorCode.BUCKET_UNMOUNT_ERROR);
|
|
3091
|
+
this.name = "BucketUnmountError";
|
|
3092
|
+
}
|
|
3093
|
+
};
|
|
3094
|
+
/**
|
|
3095
|
+
* Thrown when no credentials found in environment
|
|
3096
|
+
*/
|
|
3097
|
+
var MissingCredentialsError = class extends BucketMountError {
|
|
3098
|
+
constructor(message) {
|
|
3099
|
+
super(message, ErrorCode.MISSING_CREDENTIALS);
|
|
3100
|
+
this.name = "MissingCredentialsError";
|
|
3101
|
+
}
|
|
3102
|
+
};
|
|
3103
|
+
/**
|
|
3104
|
+
* Thrown when bucket name, mount path, or options are invalid
|
|
3105
|
+
*/
|
|
3106
|
+
var InvalidMountConfigError = class extends BucketMountError {
|
|
3107
|
+
constructor(message) {
|
|
3108
|
+
super(message, ErrorCode.INVALID_MOUNT_CONFIG);
|
|
3109
|
+
this.name = "InvalidMountConfigError";
|
|
3110
|
+
}
|
|
3111
|
+
};
|
|
3112
|
+
|
|
3113
|
+
//#endregion
|
|
3114
|
+
//#region src/storage-mount/credential-detection.ts
|
|
3115
|
+
/**
|
|
3116
|
+
* Detect credentials for bucket mounting from environment variables
|
|
3117
|
+
* Priority order:
|
|
3118
|
+
* 1. Explicit options.credentials
|
|
3119
|
+
* 2. Standard AWS env vars: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY
|
|
3120
|
+
* 3. Standard R2 env vars: R2_ACCESS_KEY_ID, R2_SECRET_ACCESS_KEY
|
|
3121
|
+
* 4. Error: no credentials found
|
|
3122
|
+
*
|
|
3123
|
+
* @param options - Mount options
|
|
3124
|
+
* @param envVars - Environment variables
|
|
3125
|
+
* @returns Detected credentials
|
|
3126
|
+
* @throws MissingCredentialsError if no credentials found
|
|
3127
|
+
*/
|
|
3128
|
+
function detectCredentials(options, envVars) {
|
|
3129
|
+
if (options.credentials) return options.credentials;
|
|
3130
|
+
const awsAccessKeyId = envVars.AWS_ACCESS_KEY_ID;
|
|
3131
|
+
const awsSecretAccessKey = envVars.AWS_SECRET_ACCESS_KEY;
|
|
3132
|
+
if (awsAccessKeyId && awsSecretAccessKey) return {
|
|
3133
|
+
accessKeyId: awsAccessKeyId,
|
|
3134
|
+
secretAccessKey: awsSecretAccessKey
|
|
3135
|
+
};
|
|
3136
|
+
/**
|
|
3137
|
+
* Priority 3: Standard R2 env vars
|
|
3138
|
+
*
|
|
3139
|
+
* AWS vars still take precedence over R2 vars in case both are set
|
|
3140
|
+
*/
|
|
3141
|
+
const r2AccessKeyId = envVars.R2_ACCESS_KEY_ID;
|
|
3142
|
+
const r2SecretAccessKey = envVars.R2_SECRET_ACCESS_KEY;
|
|
3143
|
+
if (r2AccessKeyId && r2SecretAccessKey) return {
|
|
3144
|
+
accessKeyId: r2AccessKeyId,
|
|
3145
|
+
secretAccessKey: r2SecretAccessKey
|
|
3146
|
+
};
|
|
3147
|
+
throw new MissingCredentialsError("No credentials found. Set R2_ACCESS_KEY_ID and R2_SECRET_ACCESS_KEY or AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables, or pass explicit credentials in options.");
|
|
3148
|
+
}
|
|
3149
|
+
|
|
3150
|
+
//#endregion
|
|
3151
|
+
//#region src/storage-mount/provider-detection.ts
|
|
3152
|
+
/**
|
|
3153
|
+
* Detect provider from endpoint URL using pattern matching
|
|
3154
|
+
*/
|
|
3155
|
+
function detectProviderFromUrl(endpoint) {
|
|
3156
|
+
try {
|
|
3157
|
+
const hostname = new URL(endpoint).hostname.toLowerCase();
|
|
3158
|
+
if (hostname.endsWith(".r2.cloudflarestorage.com")) return "r2";
|
|
3159
|
+
if (hostname.endsWith(".amazonaws.com") || hostname === "s3.amazonaws.com") return "s3";
|
|
3160
|
+
if (hostname === "storage.googleapis.com") return "gcs";
|
|
3161
|
+
return null;
|
|
3162
|
+
} catch {
|
|
3163
|
+
return null;
|
|
3164
|
+
}
|
|
3165
|
+
}
|
|
3166
|
+
/**
|
|
3167
|
+
* Get s3fs flags for a given provider
|
|
3168
|
+
*
|
|
3169
|
+
* Based on s3fs-fuse wiki recommendations:
|
|
3170
|
+
* https://github.com/s3fs-fuse/s3fs-fuse/wiki/Non-Amazon-S3
|
|
3171
|
+
*/
|
|
3172
|
+
function getProviderFlags(provider) {
|
|
3173
|
+
if (!provider) return ["use_path_request_style"];
|
|
3174
|
+
switch (provider) {
|
|
3175
|
+
case "r2": return ["nomixupload"];
|
|
3176
|
+
case "s3": return [];
|
|
3177
|
+
case "gcs": return [];
|
|
3178
|
+
default: return ["use_path_request_style"];
|
|
3179
|
+
}
|
|
3180
|
+
}
|
|
3181
|
+
/**
|
|
3182
|
+
* Resolve s3fs options by combining provider defaults with user overrides
|
|
3183
|
+
*/
|
|
3184
|
+
function resolveS3fsOptions(provider, userOptions) {
|
|
3185
|
+
const providerFlags = getProviderFlags(provider);
|
|
3186
|
+
if (!userOptions || userOptions.length === 0) return providerFlags;
|
|
3187
|
+
const allFlags = [...providerFlags, ...userOptions];
|
|
3188
|
+
const flagMap = /* @__PURE__ */ new Map();
|
|
3189
|
+
for (const flag of allFlags) {
|
|
3190
|
+
const [flagName] = flag.split("=");
|
|
3191
|
+
flagMap.set(flagName, flag);
|
|
3192
|
+
}
|
|
3193
|
+
return Array.from(flagMap.values());
|
|
3194
|
+
}
|
|
3195
|
+
|
|
3196
|
+
//#endregion
|
|
3197
|
+
//#region src/storage-mount/validation.ts
|
|
3198
|
+
function validatePrefix(prefix) {
|
|
3199
|
+
if (!prefix.startsWith("/")) throw new InvalidMountConfigError(`Prefix must start with '/': "${prefix}"`);
|
|
3200
|
+
}
|
|
3201
|
+
function validateBucketName(bucket, mountPath) {
|
|
3202
|
+
if (bucket.includes(":")) {
|
|
3203
|
+
const [bucketName, prefixPart] = bucket.split(":");
|
|
3204
|
+
throw new InvalidMountConfigError(`Bucket name cannot contain ':'. To mount a prefix, use the 'prefix' option:\n mountBucket('${bucketName}', '${mountPath}', { ...options, prefix: '${prefixPart}' })`);
|
|
3205
|
+
}
|
|
3206
|
+
if (!/^[a-z0-9]([a-z0-9.-]{0,61}[a-z0-9])?$/.test(bucket)) throw new InvalidMountConfigError(`Invalid bucket name: "${bucket}". Bucket names must be 3-63 characters, lowercase alphanumeric, dots, or hyphens, and cannot start/end with dots or hyphens.`);
|
|
3207
|
+
}
|
|
3208
|
+
/**
|
|
3209
|
+
* Builds the s3fs source string from bucket name and optional prefix.
|
|
3210
|
+
* Format: "bucket" or "bucket:/prefix/" for subdirectory mounts.
|
|
3211
|
+
*
|
|
3212
|
+
* @param bucket - The bucket name
|
|
3213
|
+
* @param prefix - Optional prefix/subdirectory path
|
|
3214
|
+
* @returns The s3fs source string
|
|
3215
|
+
*/
|
|
3216
|
+
function buildS3fsSource(bucket, prefix) {
|
|
3217
|
+
return prefix ? `${bucket}:${prefix}` : bucket;
|
|
3218
|
+
}
|
|
3219
|
+
|
|
3220
|
+
//#endregion
|
|
3221
|
+
//#region src/local-mount-sync.ts
|
|
3222
|
+
const DEFAULT_POLL_INTERVAL_MS = 1e3;
|
|
3223
|
+
const DEFAULT_ECHO_SUPPRESS_TTL_MS = 2e3;
|
|
3224
|
+
const MAX_BACKOFF_MS = 3e4;
|
|
3225
|
+
const SYNC_CONCURRENCY = 5;
|
|
3226
|
+
/**
|
|
3227
|
+
* Manages bidirectional sync between an R2 binding and a container directory.
|
|
3228
|
+
*
|
|
3229
|
+
* R2 -> Container: polls bucket.list() to detect changes, then transfers diffs.
|
|
3230
|
+
* Container -> R2: uses inotifywait via the watch API to detect file changes.
|
|
3231
|
+
*/
|
|
3232
|
+
var LocalMountSyncManager = class {
|
|
3233
|
+
bucket;
|
|
3234
|
+
mountPath;
|
|
3235
|
+
prefix;
|
|
3236
|
+
readOnly;
|
|
3237
|
+
client;
|
|
3238
|
+
sessionId;
|
|
3239
|
+
logger;
|
|
3240
|
+
pollIntervalMs;
|
|
3241
|
+
echoSuppressTtlMs;
|
|
3242
|
+
snapshot = /* @__PURE__ */ new Map();
|
|
3243
|
+
echoSuppressSet = /* @__PURE__ */ new Set();
|
|
3244
|
+
pollTimer = null;
|
|
3245
|
+
watchReconnectTimer = null;
|
|
3246
|
+
watchAbortController = null;
|
|
3247
|
+
running = false;
|
|
3248
|
+
consecutivePollFailures = 0;
|
|
3249
|
+
consecutiveWatchFailures = 0;
|
|
3250
|
+
constructor(options) {
|
|
3251
|
+
this.bucket = options.bucket;
|
|
3252
|
+
this.mountPath = options.mountPath;
|
|
3253
|
+
if (options.prefix !== void 0) validatePrefix(options.prefix);
|
|
3254
|
+
this.prefix = options.prefix?.replace(/^\//, "") || void 0;
|
|
3255
|
+
this.readOnly = options.readOnly;
|
|
3256
|
+
this.client = options.client;
|
|
3257
|
+
this.sessionId = options.sessionId;
|
|
3258
|
+
this.logger = options.logger.child({ operation: "local-mount-sync" });
|
|
3259
|
+
this.pollIntervalMs = options.pollIntervalMs ?? DEFAULT_POLL_INTERVAL_MS;
|
|
3260
|
+
this.echoSuppressTtlMs = options.echoSuppressTtlMs ?? DEFAULT_ECHO_SUPPRESS_TTL_MS;
|
|
3261
|
+
}
|
|
3262
|
+
/**
|
|
3263
|
+
* Start bidirectional sync. Performs initial full sync, then starts
|
|
3264
|
+
* the R2 poll loop and (if not readOnly) the container watch loop.
|
|
3265
|
+
*/
|
|
3266
|
+
async start() {
|
|
3267
|
+
this.running = true;
|
|
3268
|
+
await this.client.files.mkdir(this.mountPath, this.sessionId, { recursive: true });
|
|
3269
|
+
await this.fullSyncR2ToContainer();
|
|
3270
|
+
this.schedulePoll();
|
|
3271
|
+
if (!this.readOnly) this.startContainerWatch();
|
|
3272
|
+
this.logger.info("Local mount sync started", {
|
|
3273
|
+
mountPath: this.mountPath,
|
|
3274
|
+
prefix: this.prefix,
|
|
3275
|
+
readOnly: this.readOnly,
|
|
3276
|
+
pollIntervalMs: this.pollIntervalMs
|
|
3277
|
+
});
|
|
3278
|
+
}
|
|
3279
|
+
/**
|
|
3280
|
+
* Stop all sync activity and clean up resources.
|
|
3281
|
+
*/
|
|
3282
|
+
async stop() {
|
|
3283
|
+
this.running = false;
|
|
3284
|
+
if (this.pollTimer) {
|
|
3190
3285
|
clearTimeout(this.pollTimer);
|
|
3191
3286
|
this.pollTimer = null;
|
|
3192
3287
|
}
|
|
@@ -3532,181 +3627,23 @@ function isLocalhostPattern(hostname) {
|
|
|
3532
3627
|
}
|
|
3533
3628
|
|
|
3534
3629
|
//#endregion
|
|
3535
|
-
//#region src/
|
|
3536
|
-
/**
|
|
3537
|
-
* Bucket mount and unmount error classes
|
|
3538
|
-
*
|
|
3539
|
-
* Validation errors (InvalidMountConfigError, MissingCredentialsError) are thrown
|
|
3540
|
-
* before any container interaction. BucketUnmountError is thrown after a failed
|
|
3541
|
-
* fusermount call inside the container.
|
|
3542
|
-
*/
|
|
3543
|
-
/**
|
|
3544
|
-
* Base error for bucket mounting operations
|
|
3545
|
-
*/
|
|
3546
|
-
var BucketMountError = class extends Error {
|
|
3547
|
-
code;
|
|
3548
|
-
constructor(message, code = ErrorCode.BUCKET_MOUNT_ERROR) {
|
|
3549
|
-
super(message);
|
|
3550
|
-
this.name = "BucketMountError";
|
|
3551
|
-
this.code = code;
|
|
3552
|
-
}
|
|
3553
|
-
};
|
|
3554
|
-
/**
|
|
3555
|
-
* Thrown when S3FS mount command fails
|
|
3556
|
-
*/
|
|
3557
|
-
var S3FSMountError = class extends BucketMountError {
|
|
3558
|
-
constructor(message) {
|
|
3559
|
-
super(message, ErrorCode.S3FS_MOUNT_ERROR);
|
|
3560
|
-
this.name = "S3FSMountError";
|
|
3561
|
-
}
|
|
3562
|
-
};
|
|
3563
|
-
/**
|
|
3564
|
-
* Thrown when fusermount -u fails to unmount a FUSE filesystem
|
|
3565
|
-
*/
|
|
3566
|
-
var BucketUnmountError = class extends BucketMountError {
|
|
3567
|
-
constructor(message) {
|
|
3568
|
-
super(message, ErrorCode.BUCKET_UNMOUNT_ERROR);
|
|
3569
|
-
this.name = "BucketUnmountError";
|
|
3570
|
-
}
|
|
3571
|
-
};
|
|
3630
|
+
//#region src/version.ts
|
|
3572
3631
|
/**
|
|
3573
|
-
*
|
|
3632
|
+
* SDK version - automatically synchronized with package.json by Changesets
|
|
3633
|
+
* This file is auto-updated by .github/changeset-version.ts during releases
|
|
3634
|
+
* DO NOT EDIT MANUALLY - Changes will be overwritten on the next version bump
|
|
3574
3635
|
*/
|
|
3575
|
-
|
|
3576
|
-
constructor(message) {
|
|
3577
|
-
super(message, ErrorCode.MISSING_CREDENTIALS);
|
|
3578
|
-
this.name = "MissingCredentialsError";
|
|
3579
|
-
}
|
|
3580
|
-
};
|
|
3581
|
-
/**
|
|
3582
|
-
* Thrown when bucket name, mount path, or options are invalid
|
|
3583
|
-
*/
|
|
3584
|
-
var InvalidMountConfigError = class extends BucketMountError {
|
|
3585
|
-
constructor(message) {
|
|
3586
|
-
super(message, ErrorCode.INVALID_MOUNT_CONFIG);
|
|
3587
|
-
this.name = "InvalidMountConfigError";
|
|
3588
|
-
}
|
|
3589
|
-
};
|
|
3590
|
-
|
|
3591
|
-
//#endregion
|
|
3592
|
-
//#region src/storage-mount/credential-detection.ts
|
|
3593
|
-
/**
|
|
3594
|
-
* Detect credentials for bucket mounting from environment variables
|
|
3595
|
-
* Priority order:
|
|
3596
|
-
* 1. Explicit options.credentials
|
|
3597
|
-
* 2. Standard AWS env vars: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY
|
|
3598
|
-
* 3. Standard R2 env vars: R2_ACCESS_KEY_ID, R2_SECRET_ACCESS_KEY
|
|
3599
|
-
* 4. Error: no credentials found
|
|
3600
|
-
*
|
|
3601
|
-
* @param options - Mount options
|
|
3602
|
-
* @param envVars - Environment variables
|
|
3603
|
-
* @returns Detected credentials
|
|
3604
|
-
* @throws MissingCredentialsError if no credentials found
|
|
3605
|
-
*/
|
|
3606
|
-
function detectCredentials(options, envVars) {
|
|
3607
|
-
if (options.credentials) return options.credentials;
|
|
3608
|
-
const awsAccessKeyId = envVars.AWS_ACCESS_KEY_ID;
|
|
3609
|
-
const awsSecretAccessKey = envVars.AWS_SECRET_ACCESS_KEY;
|
|
3610
|
-
if (awsAccessKeyId && awsSecretAccessKey) return {
|
|
3611
|
-
accessKeyId: awsAccessKeyId,
|
|
3612
|
-
secretAccessKey: awsSecretAccessKey
|
|
3613
|
-
};
|
|
3614
|
-
/**
|
|
3615
|
-
* Priority 3: Standard R2 env vars
|
|
3616
|
-
*
|
|
3617
|
-
* AWS vars still take precedence over R2 vars in case both are set
|
|
3618
|
-
*/
|
|
3619
|
-
const r2AccessKeyId = envVars.R2_ACCESS_KEY_ID;
|
|
3620
|
-
const r2SecretAccessKey = envVars.R2_SECRET_ACCESS_KEY;
|
|
3621
|
-
if (r2AccessKeyId && r2SecretAccessKey) return {
|
|
3622
|
-
accessKeyId: r2AccessKeyId,
|
|
3623
|
-
secretAccessKey: r2SecretAccessKey
|
|
3624
|
-
};
|
|
3625
|
-
throw new MissingCredentialsError("No credentials found. Set R2_ACCESS_KEY_ID and R2_SECRET_ACCESS_KEY or AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables, or pass explicit credentials in options.");
|
|
3626
|
-
}
|
|
3627
|
-
|
|
3628
|
-
//#endregion
|
|
3629
|
-
//#region src/storage-mount/provider-detection.ts
|
|
3630
|
-
/**
|
|
3631
|
-
* Detect provider from endpoint URL using pattern matching
|
|
3632
|
-
*/
|
|
3633
|
-
function detectProviderFromUrl(endpoint) {
|
|
3634
|
-
try {
|
|
3635
|
-
const hostname = new URL(endpoint).hostname.toLowerCase();
|
|
3636
|
-
if (hostname.endsWith(".r2.cloudflarestorage.com")) return "r2";
|
|
3637
|
-
if (hostname.endsWith(".amazonaws.com") || hostname === "s3.amazonaws.com") return "s3";
|
|
3638
|
-
if (hostname === "storage.googleapis.com") return "gcs";
|
|
3639
|
-
return null;
|
|
3640
|
-
} catch {
|
|
3641
|
-
return null;
|
|
3642
|
-
}
|
|
3643
|
-
}
|
|
3644
|
-
/**
|
|
3645
|
-
* Get s3fs flags for a given provider
|
|
3646
|
-
*
|
|
3647
|
-
* Based on s3fs-fuse wiki recommendations:
|
|
3648
|
-
* https://github.com/s3fs-fuse/s3fs-fuse/wiki/Non-Amazon-S3
|
|
3649
|
-
*/
|
|
3650
|
-
function getProviderFlags(provider) {
|
|
3651
|
-
if (!provider) return ["use_path_request_style"];
|
|
3652
|
-
switch (provider) {
|
|
3653
|
-
case "r2": return ["nomixupload"];
|
|
3654
|
-
case "s3": return [];
|
|
3655
|
-
case "gcs": return [];
|
|
3656
|
-
default: return ["use_path_request_style"];
|
|
3657
|
-
}
|
|
3658
|
-
}
|
|
3659
|
-
/**
|
|
3660
|
-
* Resolve s3fs options by combining provider defaults with user overrides
|
|
3661
|
-
*/
|
|
3662
|
-
function resolveS3fsOptions(provider, userOptions) {
|
|
3663
|
-
const providerFlags = getProviderFlags(provider);
|
|
3664
|
-
if (!userOptions || userOptions.length === 0) return providerFlags;
|
|
3665
|
-
const allFlags = [...providerFlags, ...userOptions];
|
|
3666
|
-
const flagMap = /* @__PURE__ */ new Map();
|
|
3667
|
-
for (const flag of allFlags) {
|
|
3668
|
-
const [flagName] = flag.split("=");
|
|
3669
|
-
flagMap.set(flagName, flag);
|
|
3670
|
-
}
|
|
3671
|
-
return Array.from(flagMap.values());
|
|
3672
|
-
}
|
|
3673
|
-
|
|
3674
|
-
//#endregion
|
|
3675
|
-
//#region src/storage-mount/validation.ts
|
|
3676
|
-
function validatePrefix(prefix) {
|
|
3677
|
-
if (!prefix.startsWith("/")) throw new InvalidMountConfigError(`Prefix must start with '/': "${prefix}"`);
|
|
3678
|
-
}
|
|
3679
|
-
function validateBucketName(bucket, mountPath) {
|
|
3680
|
-
if (bucket.includes(":")) {
|
|
3681
|
-
const [bucketName, prefixPart] = bucket.split(":");
|
|
3682
|
-
throw new InvalidMountConfigError(`Bucket name cannot contain ':'. To mount a prefix, use the 'prefix' option:\n mountBucket('${bucketName}', '${mountPath}', { ...options, prefix: '${prefixPart}' })`);
|
|
3683
|
-
}
|
|
3684
|
-
if (!/^[a-z0-9]([a-z0-9.-]{0,61}[a-z0-9])?$/.test(bucket)) throw new InvalidMountConfigError(`Invalid bucket name: "${bucket}". Bucket names must be 3-63 characters, lowercase alphanumeric, dots, or hyphens, and cannot start/end with dots or hyphens.`);
|
|
3685
|
-
}
|
|
3686
|
-
/**
|
|
3687
|
-
* Builds the s3fs source string from bucket name and optional prefix.
|
|
3688
|
-
* Format: "bucket" or "bucket:/prefix/" for subdirectory mounts.
|
|
3689
|
-
*
|
|
3690
|
-
* @param bucket - The bucket name
|
|
3691
|
-
* @param prefix - Optional prefix/subdirectory path
|
|
3692
|
-
* @returns The s3fs source string
|
|
3693
|
-
*/
|
|
3694
|
-
function buildS3fsSource(bucket, prefix) {
|
|
3695
|
-
return prefix ? `${bucket}:${prefix}` : bucket;
|
|
3696
|
-
}
|
|
3697
|
-
|
|
3698
|
-
//#endregion
|
|
3699
|
-
//#region src/version.ts
|
|
3700
|
-
/**
|
|
3701
|
-
* SDK version - automatically synchronized with package.json by Changesets
|
|
3702
|
-
* This file is auto-updated by .github/changeset-version.ts during releases
|
|
3703
|
-
* DO NOT EDIT MANUALLY - Changes will be overwritten on the next version bump
|
|
3704
|
-
*/
|
|
3705
|
-
const SDK_VERSION = "0.8.11";
|
|
3636
|
+
const SDK_VERSION = "0.8.14";
|
|
3706
3637
|
|
|
3707
3638
|
//#endregion
|
|
3708
3639
|
//#region src/sandbox.ts
|
|
3709
3640
|
const sandboxConfigurationCache = /* @__PURE__ */ new WeakMap();
|
|
3641
|
+
const BACKUP_DEFAULT_TTL_SECONDS = 259200;
|
|
3642
|
+
const BACKUP_MAX_NAME_LENGTH = 256;
|
|
3643
|
+
const BACKUP_CONTAINER_DIR = "/var/backups";
|
|
3644
|
+
const BACKUP_STORAGE_PREFIX = "backups";
|
|
3645
|
+
const BACKUP_ARCHIVE_OBJECT_NAME = "data.sqsh";
|
|
3646
|
+
const BACKUP_METADATA_OBJECT_NAME = "meta.json";
|
|
3710
3647
|
function getNamespaceConfigurationCache(namespace) {
|
|
3711
3648
|
const existing = sandboxConfigurationCache.get(namespace);
|
|
3712
3649
|
if (existing) return existing;
|
|
@@ -3723,14 +3660,14 @@ function buildSandboxConfiguration(effectiveId, options, cached) {
|
|
|
3723
3660
|
name: effectiveId,
|
|
3724
3661
|
normalizeId: options?.normalizeId
|
|
3725
3662
|
};
|
|
3726
|
-
if (options?.baseUrl !== void 0 && cached?.baseUrl !== options.baseUrl) configuration.baseUrl = options.baseUrl;
|
|
3727
3663
|
if (options?.sleepAfter !== void 0 && cached?.sleepAfter !== options.sleepAfter) configuration.sleepAfter = options.sleepAfter;
|
|
3728
3664
|
if (options?.keepAlive !== void 0 && cached?.keepAlive !== options.keepAlive) configuration.keepAlive = options.keepAlive;
|
|
3729
3665
|
if (options?.containerTimeouts && !sameContainerTimeouts(cached?.containerTimeouts, options.containerTimeouts)) configuration.containerTimeouts = options.containerTimeouts;
|
|
3666
|
+
if (options?.transport !== void 0 && cached?.transport !== options.transport) configuration.transport = options.transport;
|
|
3730
3667
|
return configuration;
|
|
3731
3668
|
}
|
|
3732
3669
|
function hasSandboxConfiguration(configuration) {
|
|
3733
|
-
return configuration.sandboxName !== void 0 || configuration.
|
|
3670
|
+
return configuration.sandboxName !== void 0 || configuration.sleepAfter !== void 0 || configuration.keepAlive !== void 0 || configuration.containerTimeouts !== void 0 || configuration.transport !== void 0;
|
|
3734
3671
|
}
|
|
3735
3672
|
function mergeSandboxConfiguration(cached, configuration) {
|
|
3736
3673
|
return {
|
|
@@ -3739,20 +3676,20 @@ function mergeSandboxConfiguration(cached, configuration) {
|
|
|
3739
3676
|
sandboxName: configuration.sandboxName.name,
|
|
3740
3677
|
normalizeId: configuration.sandboxName.normalizeId
|
|
3741
3678
|
},
|
|
3742
|
-
...configuration.baseUrl !== void 0 && { baseUrl: configuration.baseUrl },
|
|
3743
3679
|
...configuration.sleepAfter !== void 0 && { sleepAfter: configuration.sleepAfter },
|
|
3744
3680
|
...configuration.keepAlive !== void 0 && { keepAlive: configuration.keepAlive },
|
|
3745
|
-
...configuration.containerTimeouts !== void 0 && { containerTimeouts: configuration.containerTimeouts }
|
|
3681
|
+
...configuration.containerTimeouts !== void 0 && { containerTimeouts: configuration.containerTimeouts },
|
|
3682
|
+
...configuration.transport !== void 0 && { transport: configuration.transport }
|
|
3746
3683
|
};
|
|
3747
3684
|
}
|
|
3748
3685
|
function applySandboxConfiguration(stub, configuration) {
|
|
3749
3686
|
if (stub.configure) return stub.configure(configuration);
|
|
3750
3687
|
const operations = [];
|
|
3751
3688
|
if (configuration.sandboxName) operations.push(stub.setSandboxName?.(configuration.sandboxName.name, configuration.sandboxName.normalizeId) ?? Promise.resolve());
|
|
3752
|
-
if (configuration.baseUrl !== void 0) operations.push(stub.setBaseUrl?.(configuration.baseUrl) ?? Promise.resolve());
|
|
3753
3689
|
if (configuration.sleepAfter !== void 0) operations.push(stub.setSleepAfter?.(configuration.sleepAfter) ?? Promise.resolve());
|
|
3754
3690
|
if (configuration.keepAlive !== void 0) operations.push(stub.setKeepAlive?.(configuration.keepAlive) ?? Promise.resolve());
|
|
3755
3691
|
if (configuration.containerTimeouts !== void 0) operations.push(stub.setContainerTimeouts?.(configuration.containerTimeouts) ?? Promise.resolve());
|
|
3692
|
+
if (configuration.transport !== void 0) operations.push(stub.setTransport?.(configuration.transport) ?? Promise.resolve());
|
|
3756
3693
|
return Promise.all(operations).then(() => void 0);
|
|
3757
3694
|
}
|
|
3758
3695
|
function getSandbox(ns, id, options) {
|
|
@@ -3823,13 +3760,21 @@ var Sandbox = class Sandbox extends Container {
|
|
|
3823
3760
|
codeInterpreter;
|
|
3824
3761
|
sandboxName = null;
|
|
3825
3762
|
normalizeId = false;
|
|
3826
|
-
baseUrl = null;
|
|
3827
3763
|
defaultSession = null;
|
|
3764
|
+
containerGeneration = 0;
|
|
3765
|
+
defaultSessionInit = null;
|
|
3828
3766
|
envVars = {};
|
|
3829
3767
|
logger;
|
|
3830
3768
|
keepAliveEnabled = false;
|
|
3831
3769
|
activeMounts = /* @__PURE__ */ new Map();
|
|
3832
3770
|
transport = "http";
|
|
3771
|
+
/**
|
|
3772
|
+
* True once transport has been written to storage at least once (either
|
|
3773
|
+
* via setTransport or restored on cold start). Gates the idempotency
|
|
3774
|
+
* check so a first explicit call persists even when the requested value
|
|
3775
|
+
* already equals the env-derived in-memory default.
|
|
3776
|
+
*/
|
|
3777
|
+
hasStoredTransport = false;
|
|
3833
3778
|
backupBucket = null;
|
|
3834
3779
|
/**
|
|
3835
3780
|
* Serializes backup operations to prevent concurrent create/restore on the same sandbox.
|
|
@@ -3863,6 +3808,15 @@ var Sandbox = class Sandbox extends Container {
|
|
|
3863
3808
|
*/
|
|
3864
3809
|
containerTimeouts = { ...this.DEFAULT_CONTAINER_TIMEOUTS };
|
|
3865
3810
|
/**
|
|
3811
|
+
* True once containerTimeouts has been written to storage at least once
|
|
3812
|
+
* (either via setContainerTimeouts or restored on cold start). Gates the
|
|
3813
|
+
* idempotency check in setContainerTimeouts so a first explicit call
|
|
3814
|
+
* persists even when the requested values already equal the in-memory
|
|
3815
|
+
* defaults, distinguishing "user intent recorded" from "running on
|
|
3816
|
+
* env/SDK defaults".
|
|
3817
|
+
*/
|
|
3818
|
+
hasStoredContainerTimeouts = false;
|
|
3819
|
+
/**
|
|
3866
3820
|
* Desktop environment operations.
|
|
3867
3821
|
* Within the DO, this getter provides direct access to DesktopClient.
|
|
3868
3822
|
* Over RPC, the getSandbox() proxy intercepts this property and routes
|
|
@@ -3967,16 +3921,17 @@ var Sandbox = class Sandbox extends Container {
|
|
|
3967
3921
|
this.client = this.createSandboxClient();
|
|
3968
3922
|
this.codeInterpreter = new CodeInterpreter(this);
|
|
3969
3923
|
this.ctx.blockConcurrencyWhile(async () => {
|
|
3970
|
-
this.sandboxName = await this.ctx.storage.get("sandboxName")
|
|
3971
|
-
this.normalizeId = await this.ctx.storage.get("normalizeId")
|
|
3972
|
-
this.defaultSession = await this.ctx.storage.get("defaultSession")
|
|
3973
|
-
this.keepAliveEnabled = await this.ctx.storage.get("keepAliveEnabled")
|
|
3924
|
+
this.sandboxName = await this.ctx.storage.get("sandboxName") ?? null;
|
|
3925
|
+
this.normalizeId = await this.ctx.storage.get("normalizeId") ?? false;
|
|
3926
|
+
this.defaultSession = await this.ctx.storage.get("defaultSession") ?? null;
|
|
3927
|
+
this.keepAliveEnabled = await this.ctx.storage.get("keepAliveEnabled") ?? false;
|
|
3974
3928
|
const storedTimeouts = await this.ctx.storage.get("containerTimeouts");
|
|
3975
3929
|
if (storedTimeouts) {
|
|
3976
3930
|
this.containerTimeouts = {
|
|
3977
3931
|
...this.containerTimeouts,
|
|
3978
3932
|
...storedTimeouts
|
|
3979
3933
|
};
|
|
3934
|
+
this.hasStoredContainerTimeouts = true;
|
|
3980
3935
|
this.client.setRetryTimeoutMs(this.computeRetryTimeoutMs());
|
|
3981
3936
|
}
|
|
3982
3937
|
const storedSleepAfter = await this.ctx.storage.get("sleepAfter");
|
|
@@ -3984,6 +3939,15 @@ var Sandbox = class Sandbox extends Container {
|
|
|
3984
3939
|
this.sleepAfter = storedSleepAfter;
|
|
3985
3940
|
this.renewActivityTimeout();
|
|
3986
3941
|
}
|
|
3942
|
+
const storedTransport = await this.ctx.storage.get("transport");
|
|
3943
|
+
if (storedTransport && storedTransport !== this.transport) {
|
|
3944
|
+
this.transport = storedTransport;
|
|
3945
|
+
const previousClient = this.client;
|
|
3946
|
+
this.client = this.createSandboxClient();
|
|
3947
|
+
this.codeInterpreter = new CodeInterpreter(this);
|
|
3948
|
+
previousClient.disconnect();
|
|
3949
|
+
}
|
|
3950
|
+
if (storedTransport) this.hasStoredTransport = true;
|
|
3987
3951
|
if (this.interceptHttps) this.envVars = {
|
|
3988
3952
|
...this.envVars,
|
|
3989
3953
|
SANDBOX_INTERCEPT_HTTPS: "1"
|
|
@@ -3991,34 +3955,29 @@ var Sandbox = class Sandbox extends Container {
|
|
|
3991
3955
|
});
|
|
3992
3956
|
}
|
|
3993
3957
|
async setSandboxName(name, normalizeId) {
|
|
3994
|
-
if (
|
|
3995
|
-
|
|
3996
|
-
|
|
3997
|
-
|
|
3998
|
-
|
|
3999
|
-
}
|
|
3958
|
+
if (this.sandboxName !== null) return;
|
|
3959
|
+
const effectiveNormalizeId = normalizeId ?? false;
|
|
3960
|
+
await Promise.all([this.ctx.storage.put("sandboxName", name), this.ctx.storage.put("normalizeId", effectiveNormalizeId)]);
|
|
3961
|
+
this.sandboxName = name;
|
|
3962
|
+
this.normalizeId = effectiveNormalizeId;
|
|
4000
3963
|
}
|
|
4001
3964
|
async configure(configuration) {
|
|
4002
3965
|
if (configuration.sandboxName) await this.setSandboxName(configuration.sandboxName.name, configuration.sandboxName.normalizeId);
|
|
4003
|
-
if (configuration.baseUrl !== void 0) await this.setBaseUrl(configuration.baseUrl);
|
|
4004
3966
|
if (configuration.sleepAfter !== void 0) await this.setSleepAfter(configuration.sleepAfter);
|
|
4005
3967
|
if (configuration.keepAlive !== void 0) await this.setKeepAlive(configuration.keepAlive);
|
|
4006
3968
|
if (configuration.containerTimeouts !== void 0) await this.setContainerTimeouts(configuration.containerTimeouts);
|
|
4007
|
-
|
|
4008
|
-
async setBaseUrl(baseUrl) {
|
|
4009
|
-
if (!this.baseUrl) {
|
|
4010
|
-
this.baseUrl = baseUrl;
|
|
4011
|
-
await this.ctx.storage.put("baseUrl", baseUrl);
|
|
4012
|
-
} else if (this.baseUrl !== baseUrl) throw new Error("Base URL already set and different from one previously provided");
|
|
3969
|
+
if (configuration.transport !== void 0) await this.setTransport(configuration.transport);
|
|
4013
3970
|
}
|
|
4014
3971
|
async setSleepAfter(sleepAfter) {
|
|
4015
|
-
this.sleepAfter
|
|
3972
|
+
if (this.sleepAfter === sleepAfter) return;
|
|
4016
3973
|
await this.ctx.storage.put("sleepAfter", sleepAfter);
|
|
3974
|
+
this.sleepAfter = sleepAfter;
|
|
4017
3975
|
this.renewActivityTimeout();
|
|
4018
3976
|
}
|
|
4019
3977
|
async setKeepAlive(keepAlive) {
|
|
4020
|
-
this.keepAliveEnabled
|
|
3978
|
+
if (this.keepAliveEnabled === keepAlive) return;
|
|
4021
3979
|
await this.ctx.storage.put("keepAliveEnabled", keepAlive);
|
|
3980
|
+
this.keepAliveEnabled = keepAlive;
|
|
4022
3981
|
if (!keepAlive) this.renewActivityTimeout();
|
|
4023
3982
|
}
|
|
4024
3983
|
async setEnvVars(envVars) {
|
|
@@ -4042,19 +4001,45 @@ var Sandbox = class Sandbox extends Container {
|
|
|
4042
4001
|
}
|
|
4043
4002
|
}
|
|
4044
4003
|
/**
|
|
4045
|
-
* RPC method to configure container startup timeouts
|
|
4004
|
+
* RPC method to configure container startup timeouts. Idempotent once
|
|
4005
|
+
* the values have been persisted: re-applying the same timeout set is a
|
|
4006
|
+
* no-op. The transport retry budget is recomputed only when at least
|
|
4007
|
+
* one timeout actually changes. Storage is written before the in-memory
|
|
4008
|
+
* mirror and derived state are updated.
|
|
4046
4009
|
*/
|
|
4047
4010
|
async setContainerTimeouts(timeouts) {
|
|
4048
4011
|
const validated = { ...this.containerTimeouts };
|
|
4049
4012
|
if (timeouts.instanceGetTimeoutMS !== void 0) validated.instanceGetTimeoutMS = this.validateTimeout(timeouts.instanceGetTimeoutMS, "instanceGetTimeoutMS", 5e3, 3e5);
|
|
4050
4013
|
if (timeouts.portReadyTimeoutMS !== void 0) validated.portReadyTimeoutMS = this.validateTimeout(timeouts.portReadyTimeoutMS, "portReadyTimeoutMS", 1e4, 6e5);
|
|
4051
4014
|
if (timeouts.waitIntervalMS !== void 0) validated.waitIntervalMS = this.validateTimeout(timeouts.waitIntervalMS, "waitIntervalMS", 100, 5e3);
|
|
4015
|
+
if (this.hasStoredContainerTimeouts && validated.instanceGetTimeoutMS === this.containerTimeouts.instanceGetTimeoutMS && validated.portReadyTimeoutMS === this.containerTimeouts.portReadyTimeoutMS && validated.waitIntervalMS === this.containerTimeouts.waitIntervalMS) return;
|
|
4016
|
+
await this.ctx.storage.put("containerTimeouts", validated);
|
|
4052
4017
|
this.containerTimeouts = validated;
|
|
4053
|
-
|
|
4018
|
+
this.hasStoredContainerTimeouts = true;
|
|
4054
4019
|
this.client.setRetryTimeoutMs(this.computeRetryTimeoutMs());
|
|
4055
4020
|
this.logger.debug("Container timeouts updated", this.containerTimeouts);
|
|
4056
4021
|
}
|
|
4057
4022
|
/**
|
|
4023
|
+
* RPC method to set the transport protocol. Idempotent once the value
|
|
4024
|
+
* has been persisted: re-applying the same transport is a no-op.
|
|
4025
|
+
* Storage is written before the in-memory state and client are updated.
|
|
4026
|
+
*/
|
|
4027
|
+
async setTransport(transport) {
|
|
4028
|
+
if (transport !== "http" && transport !== "websocket") {
|
|
4029
|
+
this.logger.warn(`Invalid transport value: "${transport}". Must be "http" or "websocket". Ignoring.`);
|
|
4030
|
+
return;
|
|
4031
|
+
}
|
|
4032
|
+
if (this.hasStoredTransport && this.transport === transport) return;
|
|
4033
|
+
await this.ctx.storage.put("transport", transport);
|
|
4034
|
+
const previousClient = this.client;
|
|
4035
|
+
this.transport = transport;
|
|
4036
|
+
this.hasStoredTransport = true;
|
|
4037
|
+
this.client = this.createSandboxClient();
|
|
4038
|
+
this.codeInterpreter = new CodeInterpreter(this);
|
|
4039
|
+
previousClient.disconnect();
|
|
4040
|
+
this.logger.debug("Transport updated", { transport });
|
|
4041
|
+
}
|
|
4042
|
+
/**
|
|
4058
4043
|
* Validate a timeout value is within acceptable range
|
|
4059
4044
|
* Throws error if invalid - used for user-provided values
|
|
4060
4045
|
*/
|
|
@@ -4102,6 +4087,7 @@ var Sandbox = class Sandbox extends Container {
|
|
|
4102
4087
|
* @throws InvalidMountConfigError if bucket name, mount path, or endpoint is invalid
|
|
4103
4088
|
*/
|
|
4104
4089
|
async mountBucket(bucket, mountPath, options) {
|
|
4090
|
+
if (options.prefix !== void 0) validatePrefix(options.prefix);
|
|
4105
4091
|
if ("localBucket" in options && options.localBucket) {
|
|
4106
4092
|
await this.mountBucketLocal(bucket, mountPath, options);
|
|
4107
4093
|
return;
|
|
@@ -4294,7 +4280,6 @@ var Sandbox = class Sandbox extends Container {
|
|
|
4294
4280
|
validateBucketName(bucket, mountPath);
|
|
4295
4281
|
if (!mountPath.startsWith("/")) throw new InvalidMountConfigError(`Mount path must be absolute (start with /): "${mountPath}"`);
|
|
4296
4282
|
if (this.activeMounts.has(mountPath)) throw new InvalidMountConfigError(`Mount path "${mountPath}" is already in use by bucket "${this.activeMounts.get(mountPath)?.bucket}". Unmount the existing bucket first or use a different mount path.`);
|
|
4297
|
-
if (options.prefix !== void 0) validatePrefix(options.prefix);
|
|
4298
4283
|
}
|
|
4299
4284
|
/**
|
|
4300
4285
|
* Generate unique password file path for s3fs credentials
|
|
@@ -4376,6 +4361,7 @@ var Sandbox = class Sandbox extends Container {
|
|
|
4376
4361
|
await this.deletePasswordFile(mountInfo.passwordFilePath);
|
|
4377
4362
|
}
|
|
4378
4363
|
}
|
|
4364
|
+
await this.ctx.storage.delete("portTokens");
|
|
4379
4365
|
outcome = "success";
|
|
4380
4366
|
await super.destroy();
|
|
4381
4367
|
} catch (error) {
|
|
@@ -4392,11 +4378,84 @@ var Sandbox = class Sandbox extends Container {
|
|
|
4392
4378
|
});
|
|
4393
4379
|
}
|
|
4394
4380
|
}
|
|
4395
|
-
onStart() {
|
|
4381
|
+
async onStart() {
|
|
4396
4382
|
this.logger.debug("Sandbox started");
|
|
4397
4383
|
this.checkVersionCompatibility().catch((error) => {
|
|
4398
4384
|
this.logger.error("Version compatibility check failed", error instanceof Error ? error : new Error(String(error)));
|
|
4399
4385
|
});
|
|
4386
|
+
try {
|
|
4387
|
+
await this.restoreExposedPorts();
|
|
4388
|
+
} catch (error) {
|
|
4389
|
+
this.logger.error("Failed to restore exposed ports after container start", error instanceof Error ? error : new Error(String(error)));
|
|
4390
|
+
}
|
|
4391
|
+
}
|
|
4392
|
+
/**
|
|
4393
|
+
* Re-expose ports on the container runtime using tokens persisted in DO
|
|
4394
|
+
* storage. Called from onStart() after a container (re)start.
|
|
4395
|
+
*
|
|
4396
|
+
* The DO storage holds the source of truth for which ports should be
|
|
4397
|
+
* exposed, which tokens authorize them, and the friendly name (if any)
|
|
4398
|
+
* that the caller set when first exposing the port. If a port is already
|
|
4399
|
+
* exposed on the container this is a no-op for that port. Individual port
|
|
4400
|
+
* failures are logged but do not abort the overall restore — a transient
|
|
4401
|
+
* failure for one port must not prevent the others from being restored.
|
|
4402
|
+
*/
|
|
4403
|
+
async restoreExposedPorts() {
|
|
4404
|
+
const savedTokens = await this.readPortTokens();
|
|
4405
|
+
const portEntries = Object.entries(savedTokens);
|
|
4406
|
+
if (portEntries.length === 0) return;
|
|
4407
|
+
const startTime = Date.now();
|
|
4408
|
+
let restored = 0;
|
|
4409
|
+
let skipped = 0;
|
|
4410
|
+
let failed = 0;
|
|
4411
|
+
const sessionId = await this.ensureDefaultSession();
|
|
4412
|
+
const exposedSet = await this.client.ports.getExposedPorts(sessionId).then((response) => new Set(response.ports.map((p) => p.port))).catch((error) => {
|
|
4413
|
+
this.logger.warn("Failed to fetch exposed ports for restore; assuming none exposed", { error: error instanceof Error ? error.message : String(error) });
|
|
4414
|
+
return /* @__PURE__ */ new Set();
|
|
4415
|
+
});
|
|
4416
|
+
for (const [portStr, entry] of portEntries) {
|
|
4417
|
+
const port = Number.parseInt(portStr, 10);
|
|
4418
|
+
if (!Number.isFinite(port) || !validatePort(port)) {
|
|
4419
|
+
this.logger.warn("Skipping restore of invalid port in storage", { port: portStr });
|
|
4420
|
+
failed++;
|
|
4421
|
+
continue;
|
|
4422
|
+
}
|
|
4423
|
+
if (exposedSet.has(port)) {
|
|
4424
|
+
skipped++;
|
|
4425
|
+
continue;
|
|
4426
|
+
}
|
|
4427
|
+
try {
|
|
4428
|
+
await this.client.ports.exposePort(port, sessionId, entry.name);
|
|
4429
|
+
restored++;
|
|
4430
|
+
} catch (error) {
|
|
4431
|
+
failed++;
|
|
4432
|
+
this.logger.warn("Failed to re-expose port on container restart", {
|
|
4433
|
+
port,
|
|
4434
|
+
error: error instanceof Error ? error.message : String(error)
|
|
4435
|
+
});
|
|
4436
|
+
}
|
|
4437
|
+
}
|
|
4438
|
+
logCanonicalEvent(this.logger, {
|
|
4439
|
+
event: "port.restore",
|
|
4440
|
+
outcome: failed === 0 ? "success" : "error",
|
|
4441
|
+
durationMs: Date.now() - startTime,
|
|
4442
|
+
restored,
|
|
4443
|
+
skipped,
|
|
4444
|
+
failed,
|
|
4445
|
+
total: portEntries.length
|
|
4446
|
+
});
|
|
4447
|
+
}
|
|
4448
|
+
/**
|
|
4449
|
+
* Read the `portTokens` map from DO storage, normalizing the legacy
|
|
4450
|
+
* string-valued format (just a token) to the current object format
|
|
4451
|
+
* ({ token, name? }). The legacy format predates port-name persistence and
|
|
4452
|
+
* can appear on any DO whose storage was written before that change.
|
|
4453
|
+
*/
|
|
4454
|
+
async readPortTokens() {
|
|
4455
|
+
const raw = await this.ctx.storage.get("portTokens") ?? {};
|
|
4456
|
+
const normalized = {};
|
|
4457
|
+
for (const [port, value] of Object.entries(raw)) normalized[port] = typeof value === "string" ? { token: value } : value;
|
|
4458
|
+
return normalized;
|
|
4400
4459
|
}
|
|
4401
4460
|
/**
|
|
4402
4461
|
* Check if the container version matches the SDK version
|
|
@@ -4427,10 +4486,12 @@ var Sandbox = class Sandbox extends Container {
|
|
|
4427
4486
|
}
|
|
4428
4487
|
async onStop() {
|
|
4429
4488
|
this.logger.debug("Sandbox stopped");
|
|
4430
|
-
|
|
4489
|
+
this.containerGeneration++;
|
|
4431
4490
|
this.defaultSession = null;
|
|
4491
|
+
this.defaultSessionInit = null;
|
|
4492
|
+
for (const [, m] of this.activeMounts) if (m.mountType === "local-sync") await m.syncManager.stop().catch(() => {});
|
|
4432
4493
|
this.activeMounts.clear();
|
|
4433
|
-
await
|
|
4494
|
+
await this.ctx.storage.delete("defaultSession");
|
|
4434
4495
|
}
|
|
4435
4496
|
onError(error) {
|
|
4436
4497
|
this.logger.error("Sandbox error", error instanceof Error ? error : new Error(String(error)));
|
|
@@ -4681,33 +4742,46 @@ var Sandbox = class Sandbox extends Container {
|
|
|
4681
4742
|
return 3e3;
|
|
4682
4743
|
}
|
|
4683
4744
|
/**
|
|
4684
|
-
*
|
|
4685
|
-
*
|
|
4686
|
-
*
|
|
4687
|
-
*
|
|
4688
|
-
* container already has this session (from a previous instance), we sync
|
|
4689
|
-
* our state rather than failing on duplicate creation.
|
|
4745
|
+
* Return the default session id, lazily creating the container session
|
|
4746
|
+
* on first use. Called by every public method that needs a session.
|
|
4747
|
+
* Concurrent callers that target the same sessionId share one
|
|
4748
|
+
* in-flight initialization promise.
|
|
4690
4749
|
*/
|
|
4691
4750
|
async ensureDefaultSession() {
|
|
4692
4751
|
const sessionId = `sandbox-${this.sandboxName || "default"}`;
|
|
4693
4752
|
if (this.defaultSession === sessionId) return this.defaultSession;
|
|
4753
|
+
const generation = this.containerGeneration;
|
|
4754
|
+
const pending = this.defaultSessionInit;
|
|
4755
|
+
if (pending?.sessionId === sessionId && pending.generation === generation) return pending.promise;
|
|
4756
|
+
const promise = this.initializeDefaultSession(sessionId, generation);
|
|
4757
|
+
const init = {
|
|
4758
|
+
sessionId,
|
|
4759
|
+
generation,
|
|
4760
|
+
promise
|
|
4761
|
+
};
|
|
4762
|
+
this.defaultSessionInit = init;
|
|
4763
|
+
try {
|
|
4764
|
+
return await promise;
|
|
4765
|
+
} finally {
|
|
4766
|
+
if (this.defaultSessionInit === init) this.defaultSessionInit = null;
|
|
4767
|
+
}
|
|
4768
|
+
}
|
|
4769
|
+
async initializeDefaultSession(sessionId, generation) {
|
|
4694
4770
|
try {
|
|
4695
4771
|
await this.client.utils.createSession({
|
|
4696
4772
|
id: sessionId,
|
|
4697
4773
|
env: this.envVars || {},
|
|
4698
4774
|
cwd: "/workspace"
|
|
4699
4775
|
});
|
|
4700
|
-
this.defaultSession = sessionId;
|
|
4701
|
-
await this.ctx.storage.put("defaultSession", sessionId);
|
|
4702
|
-
this.logger.debug("Default session initialized", { sessionId });
|
|
4703
4776
|
} catch (error) {
|
|
4704
|
-
if (error instanceof SessionAlreadyExistsError)
|
|
4705
|
-
|
|
4706
|
-
this.defaultSession = sessionId;
|
|
4707
|
-
await this.ctx.storage.put("defaultSession", sessionId);
|
|
4708
|
-
} else throw error;
|
|
4777
|
+
if (!(error instanceof SessionAlreadyExistsError)) throw error;
|
|
4778
|
+
this.logger.debug("Session exists in container but not in DO state, syncing", { sessionId });
|
|
4709
4779
|
}
|
|
4710
|
-
|
|
4780
|
+
if (generation !== this.containerGeneration) throw new Error("Default session initialization was invalidated by a container stop");
|
|
4781
|
+
await this.ctx.storage.put("defaultSession", sessionId);
|
|
4782
|
+
this.defaultSession = sessionId;
|
|
4783
|
+
this.logger.debug("Default session initialized", { sessionId });
|
|
4784
|
+
return sessionId;
|
|
4711
4785
|
}
|
|
4712
4786
|
async exec(command, options) {
|
|
4713
4787
|
const session = await this.ensureDefaultSession();
|
|
@@ -5270,8 +5344,8 @@ var Sandbox = class Sandbox extends Container {
|
|
|
5270
5344
|
token: options?.token
|
|
5271
5345
|
})).url;
|
|
5272
5346
|
} catch {
|
|
5273
|
-
const
|
|
5274
|
-
if (
|
|
5347
|
+
const existingEntry = (await this.readPortTokens())["6080"];
|
|
5348
|
+
if (existingEntry && this.sandboxName) url = this.constructPreviewUrl(6080, this.sandboxName, hostname, existingEntry.token);
|
|
5275
5349
|
else throw new Error("Failed to get desktop stream URL: port 6080 could not be exposed and no existing token found.");
|
|
5276
5350
|
}
|
|
5277
5351
|
try {
|
|
@@ -5330,6 +5404,12 @@ var Sandbox = class Sandbox extends Container {
|
|
|
5330
5404
|
/**
|
|
5331
5405
|
* Expose a port and get a preview URL for accessing services running in the sandbox
|
|
5332
5406
|
*
|
|
5407
|
+
* Preview URLs survive transient container restarts: the token and any
|
|
5408
|
+
* friendly name are persisted in Durable Object storage, and the port is
|
|
5409
|
+
* automatically re-exposed on the container when it comes back up. Tokens
|
|
5410
|
+
* are cleared only on explicit `unexposePort()` or full sandbox
|
|
5411
|
+
* `destroy()`.
|
|
5412
|
+
*
|
|
5333
5413
|
* @param port - Port number to expose (1024-65535)
|
|
5334
5414
|
* @param options - Configuration options
|
|
5335
5415
|
* @param options.hostname - Your Worker's domain name (required for preview URL construction)
|
|
@@ -5370,12 +5450,15 @@ var Sandbox = class Sandbox extends Container {
|
|
|
5370
5450
|
this.validateCustomToken(options.token);
|
|
5371
5451
|
token = options.token;
|
|
5372
5452
|
} else token = this.generatePortToken();
|
|
5373
|
-
const tokens = await this.
|
|
5374
|
-
const existingPort = Object.entries(tokens).find(([p,
|
|
5453
|
+
const tokens = await this.readPortTokens();
|
|
5454
|
+
const existingPort = Object.entries(tokens).find(([p, entry]) => entry.token === token && p !== port.toString());
|
|
5375
5455
|
if (existingPort) throw new SecurityError(`Token '${token}' is already in use by port ${existingPort[0]}. Please use a different token.`);
|
|
5376
5456
|
const sessionId = await this.ensureDefaultSession();
|
|
5377
5457
|
await this.client.ports.exposePort(port, sessionId, options?.name);
|
|
5378
|
-
tokens[port.toString()] =
|
|
5458
|
+
tokens[port.toString()] = {
|
|
5459
|
+
token,
|
|
5460
|
+
name: options?.name
|
|
5461
|
+
};
|
|
5379
5462
|
await this.ctx.storage.put("portTokens", tokens);
|
|
5380
5463
|
const url = this.constructPreviewUrl(port, this.sandboxName, options.hostname, token);
|
|
5381
5464
|
outcome = "success";
|
|
@@ -5405,13 +5488,17 @@ var Sandbox = class Sandbox extends Container {
|
|
|
5405
5488
|
let caughtError;
|
|
5406
5489
|
try {
|
|
5407
5490
|
if (!validatePort(port)) throw new SecurityError(`Invalid port number: ${port}. Must be 1024-65535, excluding 3000 (sandbox control plane).`);
|
|
5408
|
-
const
|
|
5409
|
-
await this.client.ports.unexposePort(port, sessionId);
|
|
5410
|
-
const tokens = await this.ctx.storage.get("portTokens") || {};
|
|
5491
|
+
const tokens = await this.readPortTokens();
|
|
5411
5492
|
if (tokens[port.toString()]) {
|
|
5412
5493
|
delete tokens[port.toString()];
|
|
5413
5494
|
await this.ctx.storage.put("portTokens", tokens);
|
|
5414
5495
|
}
|
|
5496
|
+
const sessionId = await this.ensureDefaultSession();
|
|
5497
|
+
try {
|
|
5498
|
+
await this.client.ports.unexposePort(port, sessionId);
|
|
5499
|
+
} catch (error) {
|
|
5500
|
+
if (!(error instanceof PortNotExposedError)) throw error;
|
|
5501
|
+
}
|
|
5415
5502
|
outcome = "success";
|
|
5416
5503
|
} catch (error) {
|
|
5417
5504
|
caughtError = error instanceof Error ? error : new Error(String(error));
|
|
@@ -5430,15 +5517,18 @@ var Sandbox = class Sandbox extends Container {
|
|
|
5430
5517
|
const sessionId = await this.ensureDefaultSession();
|
|
5431
5518
|
const response = await this.client.ports.getExposedPorts(sessionId);
|
|
5432
5519
|
if (!this.sandboxName) throw new Error("Sandbox name not available. Ensure sandbox is accessed through getSandbox()");
|
|
5433
|
-
const tokens = await this.
|
|
5434
|
-
return response.ports.
|
|
5435
|
-
const
|
|
5436
|
-
if (!
|
|
5437
|
-
|
|
5438
|
-
|
|
5520
|
+
const tokens = await this.readPortTokens();
|
|
5521
|
+
return response.ports.flatMap((port) => {
|
|
5522
|
+
const entry = tokens[port.port.toString()];
|
|
5523
|
+
if (!entry) {
|
|
5524
|
+
this.logger.warn("Port exposed on container but no token in storage; omitting from preview URL list", { port: port.port });
|
|
5525
|
+
return [];
|
|
5526
|
+
}
|
|
5527
|
+
return [{
|
|
5528
|
+
url: this.constructPreviewUrl(port.port, this.sandboxName, hostname, entry.token),
|
|
5439
5529
|
port: port.port,
|
|
5440
5530
|
status: port.status
|
|
5441
|
-
};
|
|
5531
|
+
}];
|
|
5442
5532
|
});
|
|
5443
5533
|
}
|
|
5444
5534
|
async isPortExposed(port) {
|
|
@@ -5451,14 +5541,10 @@ var Sandbox = class Sandbox extends Container {
|
|
|
5451
5541
|
}
|
|
5452
5542
|
}
|
|
5453
5543
|
async validatePortToken(port, token) {
|
|
5454
|
-
|
|
5455
|
-
|
|
5456
|
-
if (!storedToken) {
|
|
5457
|
-
this.logger.error("Port is exposed but has no token - bug detected", void 0, { port });
|
|
5458
|
-
return false;
|
|
5459
|
-
}
|
|
5544
|
+
const entry = (await this.readPortTokens())[port.toString()];
|
|
5545
|
+
if (!entry) return false;
|
|
5460
5546
|
const encoder = new TextEncoder();
|
|
5461
|
-
const a = encoder.encode(
|
|
5547
|
+
const a = encoder.encode(entry.token);
|
|
5462
5548
|
const b = encoder.encode(token);
|
|
5463
5549
|
try {
|
|
5464
5550
|
return crypto.subtle.timingSafeEqual(a, b);
|
|
@@ -5732,18 +5818,6 @@ var Sandbox = class Sandbox extends Container {
|
|
|
5732
5818
|
};
|
|
5733
5819
|
}
|
|
5734
5820
|
/**
|
|
5735
|
-
* Generate a presigned GET URL for downloading an object from R2.
|
|
5736
|
-
* The container can curl this URL directly without credentials.
|
|
5737
|
-
*/
|
|
5738
|
-
async generatePresignedGetUrl(r2Key) {
|
|
5739
|
-
const { client, accountId, bucketName } = this.requirePresignedUrlSupport();
|
|
5740
|
-
const encodedBucket = encodeURIComponent(bucketName);
|
|
5741
|
-
const encodedKey = r2Key.split("/").map((seg) => encodeURIComponent(seg)).join("/");
|
|
5742
|
-
const url = new URL(`https://${accountId}.r2.cloudflarestorage.com/${encodedBucket}/${encodedKey}`);
|
|
5743
|
-
url.searchParams.set("X-Amz-Expires", String(Sandbox.PRESIGNED_URL_EXPIRY_SECONDS));
|
|
5744
|
-
return (await client.sign(new Request(url), { aws: { signQuery: true } })).url;
|
|
5745
|
-
}
|
|
5746
|
-
/**
|
|
5747
5821
|
* Generate a presigned PUT URL for uploading an object to R2.
|
|
5748
5822
|
* The container can curl PUT to this URL directly without credentials.
|
|
5749
5823
|
*/
|
|
@@ -5880,15 +5954,14 @@ var Sandbox = class Sandbox extends Container {
|
|
|
5880
5954
|
* under the `backups/` prefix after the desired retention period.
|
|
5881
5955
|
*/
|
|
5882
5956
|
async createBackup(options) {
|
|
5957
|
+
if (options.localBucket) return this.enqueueBackupOp(() => this.doCreateBackupLocal(options));
|
|
5883
5958
|
this.requireBackupBucket();
|
|
5884
5959
|
return this.enqueueBackupOp(() => this.doCreateBackup(options));
|
|
5885
5960
|
}
|
|
5886
5961
|
async doCreateBackup(options) {
|
|
5887
5962
|
const bucket = this.requireBackupBucket();
|
|
5888
5963
|
this.requirePresignedUrlSupport();
|
|
5889
|
-
const
|
|
5890
|
-
const MAX_NAME_LENGTH = 256;
|
|
5891
|
-
const { dir, name, ttl = DEFAULT_TTL_SECONDS, gitignore = false, excludes = [] } = options;
|
|
5964
|
+
const { dir, name, ttl = BACKUP_DEFAULT_TTL_SECONDS, gitignore = false, excludes = [] } = options;
|
|
5892
5965
|
const backupStartTime = Date.now();
|
|
5893
5966
|
let backupId;
|
|
5894
5967
|
let sizeBytes;
|
|
@@ -5898,11 +5971,11 @@ var Sandbox = class Sandbox extends Container {
|
|
|
5898
5971
|
try {
|
|
5899
5972
|
Sandbox.validateBackupDir(dir, "BackupOptions.dir");
|
|
5900
5973
|
if (name !== void 0) {
|
|
5901
|
-
if (typeof name !== "string" || name.length >
|
|
5902
|
-
message: `BackupOptions.name must be a string of at most ${
|
|
5974
|
+
if (typeof name !== "string" || name.length > BACKUP_MAX_NAME_LENGTH) throw new InvalidBackupConfigError({
|
|
5975
|
+
message: `BackupOptions.name must be a string of at most ${BACKUP_MAX_NAME_LENGTH} characters`,
|
|
5903
5976
|
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
5904
5977
|
httpStatus: 400,
|
|
5905
|
-
context: { reason: `name must be a string of at most ${
|
|
5978
|
+
context: { reason: `name must be a string of at most ${BACKUP_MAX_NAME_LENGTH} characters` },
|
|
5906
5979
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
5907
5980
|
});
|
|
5908
5981
|
if (/[\u0000-\u001f\u007f]/.test(name)) throw new InvalidBackupConfigError({
|
|
@@ -5936,7 +6009,7 @@ var Sandbox = class Sandbox extends Container {
|
|
|
5936
6009
|
});
|
|
5937
6010
|
backupSession = await this.ensureBackupSession();
|
|
5938
6011
|
backupId = crypto.randomUUID();
|
|
5939
|
-
const archivePath =
|
|
6012
|
+
const archivePath = `${BACKUP_CONTAINER_DIR}/${backupId}.sqsh`;
|
|
5940
6013
|
const createResult = await this.client.backup.createArchive(dir, archivePath, backupSession, gitignore, excludes);
|
|
5941
6014
|
if (!createResult.success) throw new BackupCreateError({
|
|
5942
6015
|
message: "Container failed to create backup archive",
|
|
@@ -5949,8 +6022,8 @@ var Sandbox = class Sandbox extends Container {
|
|
|
5949
6022
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
5950
6023
|
});
|
|
5951
6024
|
sizeBytes = createResult.sizeBytes;
|
|
5952
|
-
const r2Key =
|
|
5953
|
-
const metaKey =
|
|
6025
|
+
const r2Key = `${BACKUP_STORAGE_PREFIX}/${backupId}/${BACKUP_ARCHIVE_OBJECT_NAME}`;
|
|
6026
|
+
const metaKey = `${BACKUP_STORAGE_PREFIX}/${backupId}/${BACKUP_METADATA_OBJECT_NAME}`;
|
|
5954
6027
|
await this.uploadBackupPresigned(archivePath, r2Key, createResult.sizeBytes, backupId, dir, backupSession);
|
|
5955
6028
|
const metadata = {
|
|
5956
6029
|
id: backupId,
|
|
@@ -5970,9 +6043,9 @@ var Sandbox = class Sandbox extends Container {
|
|
|
5970
6043
|
} catch (error) {
|
|
5971
6044
|
caughtError = error instanceof Error ? error : new Error(String(error));
|
|
5972
6045
|
if (backupId && backupSession) {
|
|
5973
|
-
const archivePath =
|
|
5974
|
-
const r2Key =
|
|
5975
|
-
const metaKey =
|
|
6046
|
+
const archivePath = `${BACKUP_CONTAINER_DIR}/${backupId}.sqsh`;
|
|
6047
|
+
const r2Key = `${BACKUP_STORAGE_PREFIX}/${backupId}/${BACKUP_ARCHIVE_OBJECT_NAME}`;
|
|
6048
|
+
const metaKey = `${BACKUP_STORAGE_PREFIX}/${backupId}/${BACKUP_METADATA_OBJECT_NAME}`;
|
|
5976
6049
|
await this.execWithSession(`rm -f ${shellEscape(archivePath)}`, backupSession, { origin: "internal" }).catch(() => {});
|
|
5977
6050
|
await bucket.delete(r2Key).catch(() => {});
|
|
5978
6051
|
await bucket.delete(metaKey).catch(() => {});
|
|
@@ -5993,61 +6066,200 @@ var Sandbox = class Sandbox extends Container {
|
|
|
5993
6066
|
}
|
|
5994
6067
|
}
|
|
5995
6068
|
/**
|
|
5996
|
-
*
|
|
5997
|
-
*
|
|
5998
|
-
*
|
|
5999
|
-
* 1. DO reads metadata from R2 and checks TTL
|
|
6000
|
-
* 2. Container mounts the backup archive from R2 via s3fs
|
|
6001
|
-
* 3. Container mounts the squashfs archive with FUSE overlayfs
|
|
6002
|
-
*
|
|
6003
|
-
* The target directory becomes an overlay mount with the backup as a
|
|
6004
|
-
* read-only lower layer and a writable upper layer for copy-on-write.
|
|
6005
|
-
* Any processes writing to the directory should be stopped first.
|
|
6006
|
-
*
|
|
6007
|
-
* **Mount Lifecycle**: The FUSE overlay mount persists only while the
|
|
6008
|
-
* container is running. When the sandbox sleeps or the container restarts,
|
|
6009
|
-
* the mount is lost and the directory becomes empty. Re-restore from the
|
|
6010
|
-
* backup handle to recover. This is an ephemeral restore, not a persistent
|
|
6011
|
-
* extraction.
|
|
6012
|
-
*
|
|
6013
|
-
* The backup is restored into `backup.dir`. This may differ from the
|
|
6014
|
-
* directory that was originally backed up, allowing cross-directory restore.
|
|
6015
|
-
*
|
|
6016
|
-
* Overlapping backups are independent: restoring a parent directory
|
|
6017
|
-
* overwrites everything inside it, including subdirectories that were
|
|
6018
|
-
* backed up separately. When restoring both, restore the parent first.
|
|
6019
|
-
*
|
|
6020
|
-
* Concurrent backup/restore calls on the same sandbox are serialized.
|
|
6069
|
+
* Local-dev implementation of createBackup.
|
|
6070
|
+
* Uses the R2 binding directly instead of presigned URLs.
|
|
6071
|
+
* Archive format is identical to production (squashfs + meta.json).
|
|
6021
6072
|
*/
|
|
6022
|
-
async
|
|
6023
|
-
|
|
6024
|
-
|
|
6025
|
-
|
|
6026
|
-
|
|
6027
|
-
const restoreStartTime = Date.now();
|
|
6028
|
-
const bucket = this.requireBackupBucket();
|
|
6029
|
-
this.requirePresignedUrlSupport();
|
|
6030
|
-
const { id, dir } = backup;
|
|
6073
|
+
async doCreateBackupLocal(options) {
|
|
6074
|
+
const { dir, name, ttl = BACKUP_DEFAULT_TTL_SECONDS, gitignore = false, excludes = [] } = options;
|
|
6075
|
+
const backupStartTime = Date.now();
|
|
6076
|
+
let backupId;
|
|
6077
|
+
let sizeBytes;
|
|
6031
6078
|
let outcome = "error";
|
|
6032
6079
|
let caughtError;
|
|
6033
6080
|
let backupSession;
|
|
6081
|
+
const bucket = this.env.BACKUP_BUCKET;
|
|
6082
|
+
if (!bucket || !isR2Bucket(bucket)) throw new InvalidBackupConfigError({
|
|
6083
|
+
message: "BACKUP_BUCKET R2 binding not found in env. Add a BACKUP_BUCKET R2 binding to your wrangler.jsonc for local backup support.",
|
|
6084
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
6085
|
+
httpStatus: 400,
|
|
6086
|
+
context: { reason: "Missing BACKUP_BUCKET R2 binding" },
|
|
6087
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6088
|
+
});
|
|
6034
6089
|
try {
|
|
6035
|
-
|
|
6036
|
-
|
|
6037
|
-
|
|
6038
|
-
|
|
6039
|
-
|
|
6040
|
-
|
|
6041
|
-
|
|
6042
|
-
|
|
6043
|
-
|
|
6044
|
-
|
|
6045
|
-
|
|
6046
|
-
|
|
6090
|
+
Sandbox.validateBackupDir(dir, "BackupOptions.dir");
|
|
6091
|
+
if (name !== void 0) {
|
|
6092
|
+
if (typeof name !== "string" || name.length > BACKUP_MAX_NAME_LENGTH) throw new InvalidBackupConfigError({
|
|
6093
|
+
message: `BackupOptions.name must be a string of at most ${BACKUP_MAX_NAME_LENGTH} characters`,
|
|
6094
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
6095
|
+
httpStatus: 400,
|
|
6096
|
+
context: { reason: `name must be a string of at most ${BACKUP_MAX_NAME_LENGTH} characters` },
|
|
6097
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6098
|
+
});
|
|
6099
|
+
if (/[\u0000-\u001f\u007f]/.test(name)) throw new InvalidBackupConfigError({
|
|
6100
|
+
message: "BackupOptions.name must not contain control characters",
|
|
6101
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
6102
|
+
httpStatus: 400,
|
|
6103
|
+
context: { reason: "name must not contain control characters" },
|
|
6104
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6105
|
+
});
|
|
6106
|
+
}
|
|
6107
|
+
if (ttl <= 0) throw new InvalidBackupConfigError({
|
|
6108
|
+
message: "BackupOptions.ttl must be a positive number of seconds",
|
|
6109
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
6110
|
+
httpStatus: 400,
|
|
6111
|
+
context: { reason: "ttl must be a positive number of seconds" },
|
|
6112
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6113
|
+
});
|
|
6114
|
+
if (typeof gitignore !== "boolean") throw new InvalidBackupConfigError({
|
|
6115
|
+
message: "BackupOptions.gitignore must be a boolean",
|
|
6116
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
6117
|
+
httpStatus: 400,
|
|
6118
|
+
context: { reason: "gitignore must be a boolean" },
|
|
6119
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6120
|
+
});
|
|
6121
|
+
if (!Array.isArray(excludes) || !excludes.every((e) => typeof e === "string")) throw new InvalidBackupConfigError({
|
|
6122
|
+
message: "BackupOptions.excludes must be an array of strings",
|
|
6123
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
6124
|
+
httpStatus: 400,
|
|
6125
|
+
context: { reason: "excludes must be an array of strings" },
|
|
6126
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6127
|
+
});
|
|
6128
|
+
backupSession = await this.ensureBackupSession();
|
|
6129
|
+
backupId = crypto.randomUUID();
|
|
6130
|
+
const archivePath = `${BACKUP_CONTAINER_DIR}/${backupId}.sqsh`;
|
|
6131
|
+
const createResult = await this.client.backup.createArchive(dir, archivePath, backupSession, gitignore, excludes);
|
|
6132
|
+
if (!createResult.success) throw new BackupCreateError({
|
|
6133
|
+
message: "Container failed to create backup archive",
|
|
6134
|
+
code: ErrorCode.BACKUP_CREATE_FAILED,
|
|
6135
|
+
httpStatus: 500,
|
|
6136
|
+
context: {
|
|
6137
|
+
dir,
|
|
6138
|
+
backupId
|
|
6139
|
+
},
|
|
6140
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6141
|
+
});
|
|
6142
|
+
sizeBytes = createResult.sizeBytes;
|
|
6143
|
+
const r2Key = `${BACKUP_STORAGE_PREFIX}/${backupId}/${BACKUP_ARCHIVE_OBJECT_NAME}`;
|
|
6144
|
+
const metaKey = `${BACKUP_STORAGE_PREFIX}/${backupId}/${BACKUP_METADATA_OBJECT_NAME}`;
|
|
6145
|
+
const { content } = await collectFile(await this.client.files.readFileStream(archivePath, backupSession));
|
|
6146
|
+
const archiveData = content instanceof Uint8Array ? content : new TextEncoder().encode(content);
|
|
6147
|
+
await bucket.put(r2Key, archiveData);
|
|
6148
|
+
const head = await bucket.head(r2Key);
|
|
6149
|
+
if (!head || head.size !== createResult.sizeBytes) throw new BackupCreateError({
|
|
6150
|
+
message: `Upload verification failed: expected ${createResult.sizeBytes} bytes, got ${head?.size ?? 0}`,
|
|
6151
|
+
code: ErrorCode.BACKUP_CREATE_FAILED,
|
|
6152
|
+
httpStatus: 500,
|
|
6153
|
+
context: {
|
|
6154
|
+
dir,
|
|
6155
|
+
backupId
|
|
6156
|
+
},
|
|
6157
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6158
|
+
});
|
|
6159
|
+
const metadata = {
|
|
6160
|
+
id: backupId,
|
|
6161
|
+
dir,
|
|
6162
|
+
name: name || null,
|
|
6163
|
+
sizeBytes: createResult.sizeBytes,
|
|
6164
|
+
ttl,
|
|
6165
|
+
createdAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
6166
|
+
};
|
|
6167
|
+
await bucket.put(metaKey, JSON.stringify(metadata));
|
|
6168
|
+
outcome = "success";
|
|
6169
|
+
await this.execWithSession(`rm -f ${shellEscape(archivePath)}`, backupSession, { origin: "internal" }).catch(() => {});
|
|
6170
|
+
return {
|
|
6171
|
+
id: backupId,
|
|
6172
|
+
dir,
|
|
6173
|
+
localBucket: true
|
|
6174
|
+
};
|
|
6175
|
+
} catch (error) {
|
|
6176
|
+
caughtError = error instanceof Error ? error : new Error(String(error));
|
|
6177
|
+
if (backupId && backupSession) {
|
|
6178
|
+
const archivePath = `${BACKUP_CONTAINER_DIR}/${backupId}.sqsh`;
|
|
6179
|
+
const r2Key = `${BACKUP_STORAGE_PREFIX}/${backupId}/${BACKUP_ARCHIVE_OBJECT_NAME}`;
|
|
6180
|
+
const metaKey = `${BACKUP_STORAGE_PREFIX}/${backupId}/${BACKUP_METADATA_OBJECT_NAME}`;
|
|
6181
|
+
await this.execWithSession(`rm -f ${shellEscape(archivePath)}`, backupSession, { origin: "internal" }).catch(() => {});
|
|
6182
|
+
await bucket.delete(r2Key).catch(() => {});
|
|
6183
|
+
await bucket.delete(metaKey).catch(() => {});
|
|
6184
|
+
}
|
|
6185
|
+
throw error;
|
|
6186
|
+
} finally {
|
|
6187
|
+
if (backupSession) await this.client.utils.deleteSession(backupSession).catch(() => {});
|
|
6188
|
+
logCanonicalEvent(this.logger, {
|
|
6189
|
+
event: "backup.create",
|
|
6190
|
+
outcome,
|
|
6191
|
+
durationMs: Date.now() - backupStartTime,
|
|
6192
|
+
backupId,
|
|
6193
|
+
dir,
|
|
6194
|
+
name,
|
|
6195
|
+
sizeBytes,
|
|
6196
|
+
provider: "local-binding",
|
|
6197
|
+
error: caughtError
|
|
6198
|
+
});
|
|
6199
|
+
}
|
|
6200
|
+
}
|
|
6201
|
+
/**
|
|
6202
|
+
* Restore a backup from R2 into a directory.
|
|
6203
|
+
*
|
|
6204
|
+
* **Production flow** (`localBucket` not set):
|
|
6205
|
+
* 1. DO reads metadata from R2 and checks TTL
|
|
6206
|
+
* 2. Container mounts the backup archive from R2 via s3fs
|
|
6207
|
+
* 3. Container mounts the squashfs archive with FUSE overlayfs
|
|
6208
|
+
*
|
|
6209
|
+
* The target directory becomes an overlay mount with the backup as a
|
|
6210
|
+
* read-only lower layer and a writable upper layer for copy-on-write.
|
|
6211
|
+
* Any processes writing to the directory should be stopped first.
|
|
6212
|
+
*
|
|
6213
|
+
* **Mount Lifecycle**: The FUSE overlay mount persists only while the
|
|
6214
|
+
* container is running. When the sandbox sleeps or the container restarts,
|
|
6215
|
+
* the mount is lost and the directory becomes empty. Re-restore from the
|
|
6216
|
+
* backup handle to recover. This is an ephemeral restore, not a persistent
|
|
6217
|
+
* extraction.
|
|
6218
|
+
*
|
|
6219
|
+
* **Local-dev flow** (`localBucket: true` on the originating `createBackup` call):
|
|
6220
|
+
* 1. DO reads metadata and checks TTL via R2 binding
|
|
6221
|
+
* 2. DO downloads the archive from R2 and writes it to the container
|
|
6222
|
+
* 3. Container extracts the archive with `unsquashfs` (no FUSE needed)
|
|
6223
|
+
*
|
|
6224
|
+
* The backup is restored into `backup.dir`. This may differ from the
|
|
6225
|
+
* directory that was originally backed up, allowing cross-directory restore.
|
|
6226
|
+
*
|
|
6227
|
+
* Overlapping backups are independent: restoring a parent directory
|
|
6228
|
+
* overwrites everything inside it, including subdirectories that were
|
|
6229
|
+
* backed up separately. When restoring both, restore the parent first.
|
|
6230
|
+
*
|
|
6231
|
+
* Concurrent backup/restore calls on the same sandbox are serialized.
|
|
6232
|
+
*/
|
|
6233
|
+
async restoreBackup(backup) {
|
|
6234
|
+
if (backup.localBucket) return this.enqueueBackupOp(() => this.doRestoreBackupLocal(backup));
|
|
6235
|
+
this.requireBackupBucket();
|
|
6236
|
+
return this.enqueueBackupOp(() => this.doRestoreBackup(backup));
|
|
6237
|
+
}
|
|
6238
|
+
async doRestoreBackup(backup) {
|
|
6239
|
+
const restoreStartTime = Date.now();
|
|
6240
|
+
const bucket = this.requireBackupBucket();
|
|
6241
|
+
this.requirePresignedUrlSupport();
|
|
6242
|
+
const { id, dir } = backup;
|
|
6243
|
+
let outcome = "error";
|
|
6244
|
+
let caughtError;
|
|
6245
|
+
let backupSession;
|
|
6246
|
+
try {
|
|
6247
|
+
if (!id || typeof id !== "string") throw new InvalidBackupConfigError({
|
|
6248
|
+
message: "Invalid backup: missing or invalid id",
|
|
6249
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
6250
|
+
httpStatus: 400,
|
|
6251
|
+
context: { reason: "missing or invalid id" },
|
|
6252
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6253
|
+
});
|
|
6254
|
+
if (!Sandbox.UUID_REGEX.test(id)) throw new InvalidBackupConfigError({
|
|
6255
|
+
message: "Invalid backup: id must be a valid UUID (e.g. from createBackup)",
|
|
6256
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
6257
|
+
httpStatus: 400,
|
|
6258
|
+
context: { reason: "id must be a valid UUID" },
|
|
6047
6259
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6048
6260
|
});
|
|
6049
6261
|
Sandbox.validateBackupDir(dir, "Invalid backup: dir");
|
|
6050
|
-
const metaKey =
|
|
6262
|
+
const metaKey = `${BACKUP_STORAGE_PREFIX}/${id}/${BACKUP_METADATA_OBJECT_NAME}`;
|
|
6051
6263
|
const metaObject = await bucket.get(metaKey);
|
|
6052
6264
|
if (!metaObject) throw new BackupNotFoundError({
|
|
6053
6265
|
message: `Backup not found: ${id}. Verify the backup ID is correct and the backup has not been deleted.`,
|
|
@@ -6080,7 +6292,7 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6080
6292
|
},
|
|
6081
6293
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6082
6294
|
});
|
|
6083
|
-
const r2Key =
|
|
6295
|
+
const r2Key = `${BACKUP_STORAGE_PREFIX}/${id}/${BACKUP_ARCHIVE_OBJECT_NAME}`;
|
|
6084
6296
|
if (!await bucket.head(r2Key)) throw new BackupNotFoundError({
|
|
6085
6297
|
message: `Backup archive not found in R2: ${id}. The archive may have been deleted by R2 lifecycle rules.`,
|
|
6086
6298
|
code: ErrorCode.BACKUP_NOT_FOUND,
|
|
@@ -6089,7 +6301,7 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6089
6301
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6090
6302
|
});
|
|
6091
6303
|
backupSession = await this.ensureBackupSession();
|
|
6092
|
-
const r2MountPath =
|
|
6304
|
+
const r2MountPath = `${BACKUP_CONTAINER_DIR}/r2mount/${id}`;
|
|
6093
6305
|
const archivePath = `${r2MountPath}/data.sqsh`;
|
|
6094
6306
|
const mountGlob = `/var/backups/mounts/r2mount/${id}/data`;
|
|
6095
6307
|
await this.execWithSession(`/usr/bin/fusermount3 -uz ${shellEscape(dir)} 2>/dev/null || true`, backupSession, { origin: "internal" }).catch(() => {});
|
|
@@ -6133,130 +6345,139 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6133
6345
|
});
|
|
6134
6346
|
}
|
|
6135
6347
|
}
|
|
6136
|
-
|
|
6137
|
-
|
|
6138
|
-
|
|
6139
|
-
|
|
6140
|
-
|
|
6141
|
-
|
|
6142
|
-
|
|
6143
|
-
|
|
6144
|
-
|
|
6145
|
-
|
|
6146
|
-
|
|
6147
|
-
|
|
6148
|
-
|
|
6149
|
-
|
|
6150
|
-
|
|
6151
|
-
|
|
6152
|
-
|
|
6153
|
-
|
|
6154
|
-
|
|
6155
|
-
currentEvent = parsed.currentEvent;
|
|
6156
|
-
for (const frame of parsed.events) try {
|
|
6157
|
-
yield JSON.parse(frame.data);
|
|
6158
|
-
} catch {}
|
|
6159
|
-
}
|
|
6160
|
-
const finalParsed = parseSSEFrames(`${buffer}\n\n`, currentEvent);
|
|
6161
|
-
for (const frame of finalParsed.events) try {
|
|
6162
|
-
yield JSON.parse(frame.data);
|
|
6163
|
-
} catch {}
|
|
6164
|
-
} finally {
|
|
6348
|
+
/**
|
|
6349
|
+
* Local-dev implementation of restoreBackup.
|
|
6350
|
+
* Uses the R2 binding directly instead of presigned URLs, and
|
|
6351
|
+
* unsquashfs for extraction instead of squashfuse + fuse-overlayfs.
|
|
6352
|
+
*/
|
|
6353
|
+
async doRestoreBackupLocal(backup) {
|
|
6354
|
+
const restoreStartTime = Date.now();
|
|
6355
|
+
const { id, dir } = backup;
|
|
6356
|
+
let outcome = "error";
|
|
6357
|
+
let caughtError;
|
|
6358
|
+
let backupSession;
|
|
6359
|
+
const bucket = this.env.BACKUP_BUCKET;
|
|
6360
|
+
if (!bucket || !isR2Bucket(bucket)) throw new InvalidBackupConfigError({
|
|
6361
|
+
message: "BACKUP_BUCKET R2 binding not found in env. Add a BACKUP_BUCKET R2 binding to your wrangler.jsonc for local backup support.",
|
|
6362
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
6363
|
+
httpStatus: 400,
|
|
6364
|
+
context: { reason: "Missing BACKUP_BUCKET R2 binding" },
|
|
6365
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6366
|
+
});
|
|
6165
6367
|
try {
|
|
6166
|
-
|
|
6167
|
-
|
|
6168
|
-
|
|
6169
|
-
|
|
6170
|
-
}
|
|
6171
|
-
|
|
6172
|
-
|
|
6173
|
-
|
|
6174
|
-
|
|
6175
|
-
|
|
6176
|
-
|
|
6177
|
-
|
|
6178
|
-
|
|
6179
|
-
|
|
6180
|
-
|
|
6181
|
-
|
|
6182
|
-
|
|
6183
|
-
|
|
6184
|
-
|
|
6185
|
-
|
|
6186
|
-
|
|
6187
|
-
|
|
6188
|
-
|
|
6189
|
-
|
|
6190
|
-
|
|
6191
|
-
|
|
6192
|
-
|
|
6193
|
-
|
|
6194
|
-
|
|
6195
|
-
|
|
6196
|
-
|
|
6197
|
-
|
|
6198
|
-
|
|
6199
|
-
|
|
6368
|
+
if (!id || typeof id !== "string") throw new InvalidBackupConfigError({
|
|
6369
|
+
message: "Invalid backup: missing or invalid id",
|
|
6370
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
6371
|
+
httpStatus: 400,
|
|
6372
|
+
context: { reason: "missing or invalid id" },
|
|
6373
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6374
|
+
});
|
|
6375
|
+
if (!Sandbox.UUID_REGEX.test(id)) throw new InvalidBackupConfigError({
|
|
6376
|
+
message: "Invalid backup: id must be a valid UUID (e.g. from createBackup)",
|
|
6377
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
6378
|
+
httpStatus: 400,
|
|
6379
|
+
context: { reason: "id must be a valid UUID" },
|
|
6380
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6381
|
+
});
|
|
6382
|
+
Sandbox.validateBackupDir(dir, "Invalid backup: dir");
|
|
6383
|
+
const metaKey = `${BACKUP_STORAGE_PREFIX}/${id}/${BACKUP_METADATA_OBJECT_NAME}`;
|
|
6384
|
+
const metaObject = await bucket.get(metaKey);
|
|
6385
|
+
if (!metaObject) throw new BackupNotFoundError({
|
|
6386
|
+
message: `Backup not found: ${id}. Verify the backup ID is correct and the backup has not been deleted.`,
|
|
6387
|
+
code: ErrorCode.BACKUP_NOT_FOUND,
|
|
6388
|
+
httpStatus: 404,
|
|
6389
|
+
context: { backupId: id },
|
|
6390
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6391
|
+
});
|
|
6392
|
+
const metadata = await metaObject.json();
|
|
6393
|
+
const TTL_BUFFER_MS = 60 * 1e3;
|
|
6394
|
+
const createdAt = new Date(metadata.createdAt).getTime();
|
|
6395
|
+
if (Number.isNaN(createdAt)) throw new BackupRestoreError({
|
|
6396
|
+
message: `Backup metadata has invalid createdAt timestamp: ${metadata.createdAt}`,
|
|
6397
|
+
code: ErrorCode.BACKUP_RESTORE_FAILED,
|
|
6398
|
+
httpStatus: 500,
|
|
6399
|
+
context: {
|
|
6400
|
+
dir,
|
|
6401
|
+
backupId: id
|
|
6402
|
+
},
|
|
6403
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6404
|
+
});
|
|
6405
|
+
const expiresAt = createdAt + metadata.ttl * 1e3;
|
|
6406
|
+
if (Date.now() + TTL_BUFFER_MS > expiresAt) throw new BackupExpiredError({
|
|
6407
|
+
message: `Backup ${id} has expired (created: ${metadata.createdAt}, TTL: ${metadata.ttl}s). Create a new backup.`,
|
|
6408
|
+
code: ErrorCode.BACKUP_EXPIRED,
|
|
6409
|
+
httpStatus: 400,
|
|
6410
|
+
context: {
|
|
6411
|
+
backupId: id,
|
|
6412
|
+
expiredAt: new Date(expiresAt).toISOString()
|
|
6413
|
+
},
|
|
6414
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6415
|
+
});
|
|
6416
|
+
const r2Key = `${BACKUP_STORAGE_PREFIX}/${id}/${BACKUP_ARCHIVE_OBJECT_NAME}`;
|
|
6417
|
+
const archiveObject = await bucket.get(r2Key);
|
|
6418
|
+
if (!archiveObject) throw new BackupNotFoundError({
|
|
6419
|
+
message: `Backup archive not found in R2: ${id}. The archive may have been deleted by R2 lifecycle rules.`,
|
|
6420
|
+
code: ErrorCode.BACKUP_NOT_FOUND,
|
|
6421
|
+
httpStatus: 404,
|
|
6422
|
+
context: { backupId: id },
|
|
6423
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6424
|
+
});
|
|
6425
|
+
backupSession = await this.ensureBackupSession();
|
|
6426
|
+
const archivePath = `${BACKUP_CONTAINER_DIR}/${id}.sqsh`;
|
|
6427
|
+
const archiveBuffer = await archiveObject.arrayBuffer();
|
|
6428
|
+
const base64Content = Buffer.from(archiveBuffer).toString("base64");
|
|
6429
|
+
await this.execWithSession(`mkdir -p ${BACKUP_CONTAINER_DIR}`, backupSession, { origin: "internal" });
|
|
6430
|
+
const writeResult = await this.client.files.writeFile(archivePath, base64Content, backupSession, { encoding: "base64" });
|
|
6431
|
+
if (!writeResult.success) throw new BackupRestoreError({
|
|
6432
|
+
message: `Failed to write backup archive to ${archivePath}: ${"error" in writeResult && typeof writeResult.error === "object" && writeResult.error !== null && "message" in writeResult.error && typeof writeResult.error.message === "string" ? writeResult.error.message : `File write returned success: false for '${archivePath}'`}`,
|
|
6433
|
+
code: ErrorCode.BACKUP_RESTORE_FAILED,
|
|
6434
|
+
httpStatus: 500,
|
|
6435
|
+
context: {
|
|
6436
|
+
dir,
|
|
6437
|
+
backupId: id
|
|
6438
|
+
},
|
|
6439
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6440
|
+
});
|
|
6441
|
+
const extractResult = await this.execWithSession(`/usr/bin/unsquashfs -f -d ${shellEscape(dir)} ${shellEscape(archivePath)}`, backupSession, { origin: "internal" });
|
|
6442
|
+
if (extractResult.exitCode !== 0) throw new BackupRestoreError({
|
|
6443
|
+
message: `unsquashfs extraction failed (exit code ${extractResult.exitCode}): ${extractResult.stderr}`,
|
|
6444
|
+
code: ErrorCode.BACKUP_RESTORE_FAILED,
|
|
6445
|
+
httpStatus: 500,
|
|
6446
|
+
context: {
|
|
6447
|
+
dir,
|
|
6448
|
+
backupId: id
|
|
6449
|
+
},
|
|
6450
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6451
|
+
});
|
|
6452
|
+
await this.execWithSession(`rm -f ${shellEscape(archivePath)}`, backupSession, { origin: "internal" }).catch(() => {});
|
|
6453
|
+
outcome = "success";
|
|
6454
|
+
return {
|
|
6455
|
+
success: true,
|
|
6456
|
+
dir,
|
|
6457
|
+
id
|
|
6200
6458
|
};
|
|
6201
|
-
|
|
6202
|
-
|
|
6203
|
-
if (
|
|
6204
|
-
|
|
6205
|
-
|
|
6206
|
-
|
|
6207
|
-
|
|
6208
|
-
|
|
6209
|
-
|
|
6210
|
-
|
|
6211
|
-
|
|
6212
|
-
|
|
6213
|
-
|
|
6214
|
-
|
|
6215
|
-
|
|
6216
|
-
|
|
6217
|
-
|
|
6218
|
-
|
|
6219
|
-
* Collect an entire file into memory from a stream
|
|
6220
|
-
*
|
|
6221
|
-
* @param stream - The ReadableStream from readFileStream()
|
|
6222
|
-
* @returns Object containing the file content and metadata
|
|
6223
|
-
*
|
|
6224
|
-
* @example
|
|
6225
|
-
* ```ts
|
|
6226
|
-
* const stream = await sandbox.readFileStream('/path/to/file.txt');
|
|
6227
|
-
* const { content, metadata } = await collectFile(stream);
|
|
6228
|
-
* console.log('Content:', content);
|
|
6229
|
-
* console.log('MIME type:', metadata.mimeType);
|
|
6230
|
-
* ```
|
|
6231
|
-
*/
|
|
6232
|
-
async function collectFile(stream) {
|
|
6233
|
-
const chunks = [];
|
|
6234
|
-
const generator = streamFile(stream);
|
|
6235
|
-
let result = await generator.next();
|
|
6236
|
-
while (!result.done) {
|
|
6237
|
-
chunks.push(result.value);
|
|
6238
|
-
result = await generator.next();
|
|
6239
|
-
}
|
|
6240
|
-
const metadata = result.value;
|
|
6241
|
-
if (!metadata) throw new Error("Failed to get file metadata");
|
|
6242
|
-
if (metadata.isBinary) {
|
|
6243
|
-
const totalLength = chunks.reduce((sum, chunk) => sum + (chunk instanceof Uint8Array ? chunk.length : 0), 0);
|
|
6244
|
-
const combined = new Uint8Array(totalLength);
|
|
6245
|
-
let offset = 0;
|
|
6246
|
-
for (const chunk of chunks) if (chunk instanceof Uint8Array) {
|
|
6247
|
-
combined.set(chunk, offset);
|
|
6248
|
-
offset += chunk.length;
|
|
6459
|
+
} catch (error) {
|
|
6460
|
+
caughtError = error instanceof Error ? error : new Error(String(error));
|
|
6461
|
+
if (id && backupSession) {
|
|
6462
|
+
const archivePath = `${BACKUP_CONTAINER_DIR}/${id}.sqsh`;
|
|
6463
|
+
await this.execWithSession(`rm -f ${shellEscape(archivePath)}`, backupSession, { origin: "internal" }).catch(() => {});
|
|
6464
|
+
}
|
|
6465
|
+
throw error;
|
|
6466
|
+
} finally {
|
|
6467
|
+
if (backupSession) await this.client.utils.deleteSession(backupSession).catch(() => {});
|
|
6468
|
+
logCanonicalEvent(this.logger, {
|
|
6469
|
+
event: "backup.restore",
|
|
6470
|
+
outcome,
|
|
6471
|
+
durationMs: Date.now() - restoreStartTime,
|
|
6472
|
+
backupId: id,
|
|
6473
|
+
dir,
|
|
6474
|
+
provider: "local-binding",
|
|
6475
|
+
error: caughtError
|
|
6476
|
+
});
|
|
6249
6477
|
}
|
|
6250
|
-
|
|
6251
|
-
|
|
6252
|
-
metadata
|
|
6253
|
-
};
|
|
6254
|
-
} else return {
|
|
6255
|
-
content: chunks.filter((c) => typeof c === "string").join(""),
|
|
6256
|
-
metadata
|
|
6257
|
-
};
|
|
6258
|
-
}
|
|
6478
|
+
}
|
|
6479
|
+
};
|
|
6259
6480
|
|
|
6260
6481
|
//#endregion
|
|
6261
|
-
export { DesktopInvalidOptionsError as A, CommandClient as C, BackupNotFoundError as D, BackupExpiredError as E, InvalidBackupConfigError as F, ProcessExitedBeforeReadyError as I, ProcessReadyTimeoutError as L, DesktopProcessCrashedError as M, DesktopStartFailedError as N, BackupRestoreError as O, DesktopUnavailableError as P, DesktopClient as S, BackupCreateError as T, UtilityClient as _, BucketMountError as a, GitClient as b, MissingCredentialsError as c,
|
|
6262
|
-
//# sourceMappingURL=
|
|
6482
|
+
export { DesktopInvalidOptionsError as A, CommandClient as C, BackupNotFoundError as D, BackupExpiredError as E, InvalidBackupConfigError as F, ProcessExitedBeforeReadyError as I, ProcessReadyTimeoutError as L, DesktopProcessCrashedError as M, DesktopStartFailedError as N, BackupRestoreError as O, DesktopUnavailableError as P, DesktopClient as S, BackupCreateError as T, UtilityClient as _, BucketMountError as a, GitClient as b, MissingCredentialsError as c, parseSSEStream as d, responseToAsyncIterable as f, SandboxClient as g, streamFile as h, proxyTerminal as i, DesktopNotStartedError as j, DesktopInvalidCoordinatesError as k, S3FSMountError as l, collectFile as m, getSandbox as n, BucketUnmountError as o, CodeInterpreter as p, proxyToSandbox as r, InvalidMountConfigError as s, Sandbox as t, asyncIterableToSSEStream as u, ProcessClient as v, BackupClient as w, FileClient as x, PortClient as y };
|
|
6483
|
+
//# sourceMappingURL=sandbox-CUVJMlma.js.map
|