faros-js-client 0.6.0 → 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,150 @@
1
+ import { Dictionary } from 'ts-essentials';
2
+ import { SchemaLoader } from '../schema';
3
+ import { OriginProvider } from './graphql-writer';
4
+ import { Logger, TimestampedRecord } from './types';
5
+ export interface GraphQLBackend {
6
+ healthCheck(): Promise<void>;
7
+ postQuery(query: any, variables?: any): Promise<any>;
8
+ }
9
+ export interface Upsert {
10
+ id?: string;
11
+ readonly model: string;
12
+ readonly object: Dictionary<any>;
13
+ readonly foreignKeys: Dictionary<Upsert>;
14
+ }
15
+ export declare class UpsertBuffer {
16
+ private readonly upsertBuffer;
17
+ add(upsert: Upsert): void;
18
+ size(): number;
19
+ pop(model: string): Upsert[] | undefined;
20
+ get(model: string): Upsert[] | undefined;
21
+ private getInternal;
22
+ clear(): number;
23
+ }
24
+ export declare function serialize(obj: Dictionary<any>): string;
25
+ export declare function serializeValue(obj: any): string;
26
+ /**
27
+ * Like lodash pick with (1) null value replacement and (2) type-aware
28
+ * normalization.
29
+ */
30
+ export declare function strictPick(obj: any, keys: string[], keyTypes?: Dictionary<string>, nullValue?: string): any;
31
+ /**
32
+ * Groups objects by primary key and then merges all related objects into
33
+ * a single object where last-wins for overlapping properties.
34
+ */
35
+ export declare function mergeByPrimaryKey(objects: any[], primaryKeys: string[]): any[];
36
+ /**
37
+ * Execute async callback for each batch of upserts.
38
+ */
39
+ export declare function batchIterator<T>(batches: Upsert[][], callback: (batch: Upsert[]) => Promise<T>): AsyncIterable<T>;
40
+ /**
41
+ * Separates objects into arrays of objects
42
+ * that all have the same keys.
43
+ */
44
+ export declare function groupByKeys(objects: any[]): any[][];
45
+ /**
46
+ * Separates upserts into levels. Level 0 has no dependencies on other levels.
47
+ * Level 1 depends on Level 0, 2 on 1 and so on.
48
+ */
49
+ export declare function toLevels(upserts: Upsert[]): Upsert[][];
50
+ export declare function toPostgresArrayLiteral(value: any[]): string;
51
+ /**
52
+ * Client for writing records as GraphQL mutations. The client supports 3
53
+ * kinds of writes: Upserts, Updates and Deletes.
54
+ *
55
+ * For upserts (when upsertBatchSize is greater than 0), the high-level
56
+ * algorithm is:
57
+ *
58
+ * > For each record, build a tree of Upserts. The tree has more than one node
59
+ * if the current record represents a
60
+ * nested entity (e.g. branch record referencing repo which, in-turn,
61
+ * references org).
62
+ * > Buffer Upserts and index each by model (e.g. vcs_Branch)
63
+ * > When batch size limit is reached, execute a single insert mutation per
64
+ * model. Start with leaves of Upsert tree.
65
+ * > After inserting a batch, copy the id of each record back to the Upsert
66
+ * object. When inserting subsequent batches,
67
+ * required foreign keys will be read from the Upsert tree and copied to
68
+ * the appropriate batch mutation.
69
+ *
70
+ * Note: there is a complication in this algorithm for self-referent models
71
+ * (i.e. org_Employee's manager relationship).
72
+ * For these models, we split the batch into "levels". The first level consists
73
+ * of records with no dependencies on
74
+ * records of the same type. The second depends on the first and so on.
75
+ *
76
+ * For Updates and Deletes (which are much less frequent) we have a separate
77
+ * write buffer. This buffer is
78
+ * flushed if it reaches capacity. There is no attempt to combine these into
79
+ * bulk mutations (as done w/ upserts).
80
+ */
81
+ export declare class GraphQLClient {
82
+ private readonly logger;
83
+ private readonly schemaLoader;
84
+ private readonly backend;
85
+ private schema;
86
+ private tableNames;
87
+ private tableDependencies;
88
+ private supportsSetCtx;
89
+ private readonly mutationBatchSize;
90
+ private readonly upsertBatchSize;
91
+ private readonly writeBuffer;
92
+ private readonly upsertBuffer;
93
+ private readonly selfReferentModels;
94
+ private readonly updateResetLimit;
95
+ private readonly resetPageSize;
96
+ private resetLimitMillis;
97
+ constructor(logger: Logger, schemaLoader: SchemaLoader, backend: GraphQLBackend, upsertBatchSize: number, mutationBatchSize: number, updateResetLimit?: boolean, resetPageSize?: number);
98
+ healthCheck(): Promise<void>;
99
+ loadSchema(): Promise<void>;
100
+ private getSchema;
101
+ resetData(originProvider: OriginProvider, models: ReadonlyArray<string>, isResetSync: boolean, keepReferencedRecords: boolean): Promise<void>;
102
+ deleteById(model: string, ids: string[], session: string): Promise<void>;
103
+ writeRecord(model: string, record: Dictionary<any>, origin: string): Promise<void>;
104
+ private flushUpsertBuffer;
105
+ private execUpsert;
106
+ private doFlushUpsertBuffer;
107
+ writeTimestampedRecord(record: TimestampedRecord): Promise<void>;
108
+ private writeUpdateRecord;
109
+ private writeDeletionRecord;
110
+ private postQuery;
111
+ flush(): Promise<void>;
112
+ private flushWriteBuffer;
113
+ /**
114
+ * Constructs a gql query from an array of json mutations.
115
+ * The outputted qql mutation might look like:
116
+ *
117
+ * mutation {
118
+ * i1: insert_cicd_Artifact_one(object: {uid: "u1b"}) {
119
+ * id
120
+ * refreshedAt
121
+ * }
122
+ * i2: insert_cicd_Artifact_one(object: {uid: "u2b"}) {
123
+ * id
124
+ * refreshedAt
125
+ * }
126
+ * }
127
+ *
128
+ * Notable here are the i1/i2 aliases. These are required when multiple
129
+ * operations
130
+ * share the same name (e.g. insert_cicd_Artifact_one) and are supported in
131
+ * jsonToGraphQLQuery with __aliasFor directive.
132
+ *
133
+ * @return batch gql mutation or undefined if the input is undefined, empty
134
+ * or doesn't contain any mutations.
135
+ */
136
+ static batchMutation(queries: any[]): string | undefined;
137
+ private createWhereClause;
138
+ private createMutationObject;
139
+ private addUpsert;
140
+ private objectWithForeignKeys;
141
+ /**
142
+ * returns serialized version of keys fields
143
+ */
144
+ private serializedPrimaryKey;
145
+ private toUpsertOps;
146
+ private isValidField;
147
+ private formatFieldValue;
148
+ private createConflictClause;
149
+ private createUpsertConflictClause;
150
+ }