convex-ents 0.3.0 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/deletion.d.ts +4 -2
- package/dist/deletion.js +111 -1094
- package/dist/deletion.js.map +1 -1
- package/dist/index.js +111 -50
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -1604,11 +1604,7 @@ function getDeletionConfig(entDefinitions, table) {
|
|
|
1604
1604
|
// src/deletion.ts
|
|
1605
1605
|
var import_server3 = require("convex/server");
|
|
1606
1606
|
var import_values2 = require("convex/values");
|
|
1607
|
-
var vApproach = import_values2.v.union(
|
|
1608
|
-
import_values2.v.literal("schedule"),
|
|
1609
|
-
import_values2.v.literal("deleteOne"),
|
|
1610
|
-
import_values2.v.literal("paginate")
|
|
1611
|
-
);
|
|
1607
|
+
var vApproach = import_values2.v.union(import_values2.v.literal("cascade"), import_values2.v.literal("paginate"));
|
|
1612
1608
|
function scheduledDeleteFactory(entDefinitions, options) {
|
|
1613
1609
|
const selfRef = options?.scheduledDelete ?? (0, import_server3.makeFunctionReference)(
|
|
1614
1610
|
"functions:scheduledDelete"
|
|
@@ -1663,10 +1659,8 @@ function scheduledDeleteFactory(entDefinitions, options) {
|
|
|
1663
1659
|
return;
|
|
1664
1660
|
}
|
|
1665
1661
|
await progressScheduledDeletion(
|
|
1666
|
-
ctx,
|
|
1667
|
-
|
|
1668
|
-
selfRef,
|
|
1669
|
-
origin,
|
|
1662
|
+
{ ctx, entDefinitions, selfRef, origin },
|
|
1663
|
+
newCounter(),
|
|
1670
1664
|
inProgress ? stack : [
|
|
1671
1665
|
{
|
|
1672
1666
|
id: originId,
|
|
@@ -1683,12 +1677,11 @@ function getEdgeArgs(entDefinitions, table) {
|
|
|
1683
1677
|
return Object.values(edges).flatMap((edgeDefinition) => {
|
|
1684
1678
|
if (edgeDefinition.cardinality === "single" && edgeDefinition.type === "ref" || edgeDefinition.cardinality === "multiple" && edgeDefinition.type === "field") {
|
|
1685
1679
|
const table2 = edgeDefinition.to;
|
|
1686
|
-
const targetDeletionConfig = getDeletionConfig(entDefinitions, table2);
|
|
1687
1680
|
const targetEdges = getEdgeDefinitions(entDefinitions, table2);
|
|
1688
1681
|
const hasCascadingEdges = Object.values(targetEdges).some(
|
|
1689
1682
|
(edgeDefinition2) => edgeDefinition2.cardinality === "single" && edgeDefinition2.type === "ref" || edgeDefinition2.cardinality === "multiple"
|
|
1690
1683
|
);
|
|
1691
|
-
const approach =
|
|
1684
|
+
const approach = hasCascadingEdges ? "cascade" : "paginate";
|
|
1692
1685
|
const indexName = edgeDefinition.ref;
|
|
1693
1686
|
return [{ table: table2, indexName, approach }];
|
|
1694
1687
|
} else if (edgeDefinition.cardinality === "multiple") {
|
|
@@ -1712,39 +1705,49 @@ function getEdgeArgs(entDefinitions, table) {
|
|
|
1712
1705
|
}
|
|
1713
1706
|
});
|
|
1714
1707
|
}
|
|
1715
|
-
async function progressScheduledDeletion(
|
|
1708
|
+
async function progressScheduledDeletion(cascade, counter, stack) {
|
|
1709
|
+
const { ctx } = cascade;
|
|
1716
1710
|
const last = stack[stack.length - 1];
|
|
1717
1711
|
if ("id" in last) {
|
|
1718
1712
|
const edgeArgs = last.edges[0];
|
|
1719
1713
|
if (edgeArgs === void 0) {
|
|
1720
1714
|
await ctx.db.delete(last.id);
|
|
1721
1715
|
if (stack.length > 1) {
|
|
1722
|
-
await
|
|
1723
|
-
origin,
|
|
1724
|
-
stack: stack.slice(0, -1),
|
|
1725
|
-
inProgress: true
|
|
1726
|
-
});
|
|
1716
|
+
await continueOrSchedule(cascade, counter, stack.slice(0, -1));
|
|
1727
1717
|
}
|
|
1728
1718
|
} else {
|
|
1729
1719
|
const updated = { ...last, edges: last.edges.slice(1) };
|
|
1730
|
-
await
|
|
1731
|
-
|
|
1732
|
-
|
|
1733
|
-
selfRef,
|
|
1734
|
-
origin,
|
|
1720
|
+
await paginateOrCascade(
|
|
1721
|
+
cascade,
|
|
1722
|
+
counter,
|
|
1735
1723
|
stack.slice(0, -1).concat(updated),
|
|
1736
|
-
{
|
|
1724
|
+
{
|
|
1725
|
+
cursor: null,
|
|
1726
|
+
fieldValue: last.id,
|
|
1727
|
+
...edgeArgs
|
|
1728
|
+
}
|
|
1737
1729
|
);
|
|
1738
1730
|
}
|
|
1739
1731
|
} else {
|
|
1740
|
-
await
|
|
1732
|
+
await paginateOrCascade(cascade, counter, stack, last);
|
|
1741
1733
|
}
|
|
1742
1734
|
}
|
|
1743
|
-
|
|
1744
|
-
|
|
1745
|
-
|
|
1746
|
-
|
|
1747
|
-
}
|
|
1735
|
+
var MAXIMUM_DOCUMENTS_READ = 8192 / 4;
|
|
1736
|
+
var MAXIMUM_BYTES_READ = 2 ** 18;
|
|
1737
|
+
async function paginateOrCascade(cascade, counter, stack, { table, approach, indexName, fieldValue, cursor }) {
|
|
1738
|
+
const { ctx, entDefinitions } = cascade;
|
|
1739
|
+
const { page, continueCursor, isDone, bytesRead } = await paginate(
|
|
1740
|
+
ctx,
|
|
1741
|
+
{ table, indexName, fieldValue },
|
|
1742
|
+
{
|
|
1743
|
+
cursor,
|
|
1744
|
+
...limitsBasedOnCounter(
|
|
1745
|
+
counter,
|
|
1746
|
+
approach === "paginate" ? { numItems: MAXIMUM_DOCUMENTS_READ } : { numItems: 1 }
|
|
1747
|
+
)
|
|
1748
|
+
}
|
|
1749
|
+
);
|
|
1750
|
+
const updatedCounter = incrementCounter(counter, page.length, bytesRead);
|
|
1748
1751
|
const updated = {
|
|
1749
1752
|
approach,
|
|
1750
1753
|
table,
|
|
@@ -1753,34 +1756,92 @@ async function paginate(ctx, entDefinitions, selfRef, origin, stack, { table, ap
|
|
|
1753
1756
|
fieldValue
|
|
1754
1757
|
};
|
|
1755
1758
|
const relevantStack = cursor === null ? stack : stack.slice(0, -1);
|
|
1756
|
-
|
|
1759
|
+
const updatedStack = isDone && (approach === "paginate" || page.length === 0) ? relevantStack : relevantStack.concat(
|
|
1760
|
+
approach === "cascade" ? [
|
|
1761
|
+
updated,
|
|
1762
|
+
{
|
|
1763
|
+
id: page[0]._id,
|
|
1764
|
+
table,
|
|
1765
|
+
edges: getEdgeArgs(entDefinitions, table)
|
|
1766
|
+
}
|
|
1767
|
+
] : [updated]
|
|
1768
|
+
);
|
|
1769
|
+
if (approach === "paginate") {
|
|
1770
|
+
await Promise.all(page.map((doc) => ctx.db.delete(doc._id)));
|
|
1771
|
+
}
|
|
1772
|
+
await continueOrSchedule(cascade, updatedCounter, updatedStack);
|
|
1773
|
+
}
|
|
1774
|
+
async function continueOrSchedule(cascade, counter, stack) {
|
|
1775
|
+
if (shouldSchedule(counter)) {
|
|
1776
|
+
const { ctx, selfRef, origin } = cascade;
|
|
1757
1777
|
await ctx.scheduler.runAfter(0, selfRef, {
|
|
1758
1778
|
origin,
|
|
1759
|
-
stack
|
|
1760
|
-
updated,
|
|
1761
|
-
{
|
|
1762
|
-
id: page[0]._id,
|
|
1763
|
-
table,
|
|
1764
|
-
edges: getEdgeArgs(entDefinitions, table)
|
|
1765
|
-
}
|
|
1766
|
-
]),
|
|
1779
|
+
stack,
|
|
1767
1780
|
inProgress: true
|
|
1768
1781
|
});
|
|
1769
1782
|
} else {
|
|
1770
|
-
|
|
1771
|
-
|
|
1772
|
-
|
|
1773
|
-
|
|
1774
|
-
|
|
1775
|
-
|
|
1776
|
-
|
|
1783
|
+
await progressScheduledDeletion(cascade, counter, stack);
|
|
1784
|
+
}
|
|
1785
|
+
}
|
|
1786
|
+
function newCounter() {
|
|
1787
|
+
return {
|
|
1788
|
+
numDocuments: 0,
|
|
1789
|
+
numBytesRead: 0
|
|
1790
|
+
};
|
|
1791
|
+
}
|
|
1792
|
+
function incrementCounter(counter, numDocuments, numBytesRead) {
|
|
1793
|
+
return {
|
|
1794
|
+
numDocuments: counter.numDocuments + numDocuments,
|
|
1795
|
+
numBytesRead: counter.numBytesRead + numBytesRead
|
|
1796
|
+
};
|
|
1797
|
+
}
|
|
1798
|
+
function limitsBasedOnCounter(counter, { numItems }) {
|
|
1799
|
+
return {
|
|
1800
|
+
numItems: Math.max(1, numItems - counter.numDocuments),
|
|
1801
|
+
maximumBytesRead: Math.max(1, MAXIMUM_BYTES_READ - counter.numBytesRead)
|
|
1802
|
+
};
|
|
1803
|
+
}
|
|
1804
|
+
function shouldSchedule(counter) {
|
|
1805
|
+
return counter.numDocuments >= MAXIMUM_DOCUMENTS_READ || counter.numBytesRead >= MAXIMUM_BYTES_READ;
|
|
1806
|
+
}
|
|
1807
|
+
async function paginate(ctx, {
|
|
1808
|
+
table,
|
|
1809
|
+
indexName,
|
|
1810
|
+
fieldValue
|
|
1811
|
+
}, {
|
|
1812
|
+
cursor,
|
|
1813
|
+
numItems,
|
|
1814
|
+
maximumBytesRead
|
|
1815
|
+
}) {
|
|
1816
|
+
const query = ctx.db.query(table).withIndex(
|
|
1817
|
+
indexName,
|
|
1818
|
+
(q) => q.eq(indexName, fieldValue).gt(
|
|
1819
|
+
"_creationTime",
|
|
1820
|
+
cursor
|
|
1821
|
+
)
|
|
1822
|
+
);
|
|
1823
|
+
let bytesRead = 0;
|
|
1824
|
+
const results = [];
|
|
1825
|
+
let isDone = true;
|
|
1826
|
+
for await (const doc of query) {
|
|
1827
|
+
if (results.length >= numItems) {
|
|
1828
|
+
isDone = false;
|
|
1829
|
+
break;
|
|
1777
1830
|
}
|
|
1778
|
-
|
|
1779
|
-
|
|
1780
|
-
|
|
1781
|
-
|
|
1782
|
-
}
|
|
1831
|
+
const size = JSON.stringify((0, import_values2.convexToJson)(doc)).length * 8;
|
|
1832
|
+
if (bytesRead + size > maximumBytesRead) {
|
|
1833
|
+
isDone = false;
|
|
1834
|
+
break;
|
|
1835
|
+
}
|
|
1836
|
+
bytesRead += size;
|
|
1837
|
+
results.push(doc);
|
|
1783
1838
|
}
|
|
1839
|
+
return {
|
|
1840
|
+
page: results,
|
|
1841
|
+
continueCursor: results.length === 0 ? cursor : results[results.length - 1]._creationTime,
|
|
1842
|
+
isDone,
|
|
1843
|
+
bytesRead
|
|
1844
|
+
};
|
|
1784
1845
|
}
|
|
1785
1846
|
// Annotate the CommonJS export names for ESM import in node:
|
|
1786
1847
|
0 && (module.exports = {
|