edgebase_core 0.1.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE +21 -0
- data/README.md +77 -0
- data/lib/edgebase_core/context_manager.rb +23 -0
- data/lib/edgebase_core/errors.rb +41 -0
- data/lib/edgebase_core/field_ops.rb +21 -0
- data/lib/edgebase_core/generated/api_core.rb +915 -0
- data/lib/edgebase_core/generated/client_wrappers.rb +268 -0
- data/lib/edgebase_core/http_client.rb +219 -0
- data/lib/edgebase_core/storage.rb +161 -0
- data/lib/edgebase_core/table_ref.rb +472 -0
- data/lib/edgebase_core.rb +10 -0
- data/llms.txt +88 -0
- metadata +60 -0
|
@@ -0,0 +1,472 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "json"
|
|
4
|
+
|
|
5
|
+
module EdgebaseCore
|
|
6
|
+
# Query filter tuple.
|
|
7
|
+
FilterTuple = Struct.new(:field_name, :op, :value) do
|
|
8
|
+
def to_json_array
|
|
9
|
+
[field_name, op, value]
|
|
10
|
+
end
|
|
11
|
+
end
|
|
12
|
+
|
|
13
|
+
# Builder for OR conditions.
|
|
14
|
+
class OrBuilder
|
|
15
|
+
def initialize
|
|
16
|
+
@filters = []
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
def where(field_name, op, value)
|
|
20
|
+
@filters << FilterTuple.new(field_name, op, value)
|
|
21
|
+
self
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
def get_filters
|
|
25
|
+
@filters.dup
|
|
26
|
+
end
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
# Collection query result — unified type for offset and cursor pagination.
|
|
30
|
+
ListResult = Struct.new(:items, :total, :page, :per_page, :has_more, :cursor, keyword_init: true) do
|
|
31
|
+
def initialize(items: [], total: nil, page: nil, per_page: nil, has_more: nil, cursor: nil)
|
|
32
|
+
super(items: items, total: total, page: page, per_page: per_page, has_more: has_more, cursor: cursor)
|
|
33
|
+
end
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
# Batch operation result.
|
|
37
|
+
BatchResult = Struct.new(:total_processed, :total_succeeded, :errors, keyword_init: true)
|
|
38
|
+
|
|
39
|
+
# Upsert operation result.
|
|
40
|
+
UpsertResult = Struct.new(:record, :inserted, keyword_init: true)
|
|
41
|
+
|
|
42
|
+
# DatabaseLive database change event.
|
|
43
|
+
DbChange = Struct.new(:event, :table, :id, :record, :old_record, keyword_init: true) do
|
|
44
|
+
def self.from_json(data)
|
|
45
|
+
new(
|
|
46
|
+
event: data["event"] || "",
|
|
47
|
+
table: data["table"] || "",
|
|
48
|
+
id: data["id"],
|
|
49
|
+
record: data["record"],
|
|
50
|
+
old_record: data["oldRecord"]
|
|
51
|
+
)
|
|
52
|
+
end
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
def self.build_database_live_channel(namespace, table, instance_id = nil, doc_id = nil)
|
|
56
|
+
base = instance_id ? "dblive:#{namespace}:#{instance_id}:#{table}" : "dblive:#{namespace}:#{table}"
|
|
57
|
+
doc_id ? "#{base}:#{doc_id}" : base
|
|
58
|
+
end
|
|
59
|
+
|
|
60
|
+
# ── Core dispatch helpers ──────────────────────────────────────────────────
|
|
61
|
+
|
|
62
|
+
module CoreDispatch
|
|
63
|
+
module_function
|
|
64
|
+
|
|
65
|
+
def core_get(core, method, namespace, instance_id, table, doc_id: nil, query: nil)
|
|
66
|
+
if instance_id
|
|
67
|
+
case method
|
|
68
|
+
when "list" then core.db_list_records(namespace, instance_id, table, query: query)
|
|
69
|
+
when "get" then core.db_get_record(namespace, instance_id, table, doc_id, query: query)
|
|
70
|
+
when "count" then core.db_count_records(namespace, instance_id, table, query: query)
|
|
71
|
+
when "search" then core.db_search_records(namespace, instance_id, table, query: query)
|
|
72
|
+
end
|
|
73
|
+
else
|
|
74
|
+
case method
|
|
75
|
+
when "list" then core.db_single_list_records(namespace, table, query: query)
|
|
76
|
+
when "get" then core.db_single_get_record(namespace, table, doc_id, query: query)
|
|
77
|
+
when "count" then core.db_single_count_records(namespace, table, query: query)
|
|
78
|
+
when "search" then core.db_single_search_records(namespace, table, query: query)
|
|
79
|
+
end
|
|
80
|
+
end
|
|
81
|
+
end
|
|
82
|
+
|
|
83
|
+
def core_insert(core, namespace, instance_id, table, body, query = nil)
|
|
84
|
+
if instance_id
|
|
85
|
+
core.db_insert_record(namespace, instance_id, table, body, query: query)
|
|
86
|
+
else
|
|
87
|
+
core.db_single_insert_record(namespace, table, body, query: query)
|
|
88
|
+
end
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
def core_update(core, namespace, instance_id, table, doc_id, body)
|
|
92
|
+
if instance_id
|
|
93
|
+
core.db_update_record(namespace, instance_id, table, doc_id, body)
|
|
94
|
+
else
|
|
95
|
+
core.db_single_update_record(namespace, table, doc_id, body)
|
|
96
|
+
end
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
def core_delete(core, namespace, instance_id, table, doc_id)
|
|
100
|
+
if instance_id
|
|
101
|
+
core.db_delete_record(namespace, instance_id, table, doc_id)
|
|
102
|
+
else
|
|
103
|
+
core.db_single_delete_record(namespace, table, doc_id)
|
|
104
|
+
end
|
|
105
|
+
end
|
|
106
|
+
|
|
107
|
+
def core_batch(core, namespace, instance_id, table, body, query = nil)
|
|
108
|
+
if instance_id
|
|
109
|
+
core.db_batch_records(namespace, instance_id, table, body, query: query)
|
|
110
|
+
else
|
|
111
|
+
core.db_single_batch_records(namespace, table, body, query: query)
|
|
112
|
+
end
|
|
113
|
+
end
|
|
114
|
+
|
|
115
|
+
def core_batch_by_filter(core, namespace, instance_id, table, body)
|
|
116
|
+
if instance_id
|
|
117
|
+
core.db_batch_by_filter(namespace, instance_id, table, body, query: nil)
|
|
118
|
+
else
|
|
119
|
+
core.db_single_batch_by_filter(namespace, table, body, query: nil)
|
|
120
|
+
end
|
|
121
|
+
end
|
|
122
|
+
end
|
|
123
|
+
|
|
124
|
+
# ── TableRef ───────────────────────────────────────────────────────────────
|
|
125
|
+
|
|
126
|
+
# Immutable table reference with query builder.
|
|
127
|
+
#
|
|
128
|
+
# All chaining methods return a new instance — safe for reference sharing.
|
|
129
|
+
# All HTTP calls delegate to Generated Core (no hardcoded paths).
|
|
130
|
+
#
|
|
131
|
+
# posts = client.db("shared").table("posts")
|
|
132
|
+
# result = posts.where("status", "==", "published")
|
|
133
|
+
# .order_by("createdAt", "desc")
|
|
134
|
+
# .limit(20)
|
|
135
|
+
# .get_list
|
|
136
|
+
class TableRef
|
|
137
|
+
attr_reader :_name, :_namespace, :_instance_id, :_filters, :_or_filters,
|
|
138
|
+
:_sorts, :_limit, :_offset, :_page, :_search, :_after, :_before
|
|
139
|
+
|
|
140
|
+
def initialize(core, name, database_live: nil, namespace: "shared", instance_id: nil,
|
|
141
|
+
filters: nil, or_filters: nil, sorts: nil,
|
|
142
|
+
limit_value: nil, offset_value: nil, page_value: nil,
|
|
143
|
+
search_value: nil, after_value: nil, before_value: nil)
|
|
144
|
+
@core = core
|
|
145
|
+
@_name = name
|
|
146
|
+
@database_live = database_live
|
|
147
|
+
@_namespace = namespace
|
|
148
|
+
@_instance_id = instance_id
|
|
149
|
+
@_filters = filters || []
|
|
150
|
+
@_or_filters = or_filters || []
|
|
151
|
+
@_sorts = sorts || []
|
|
152
|
+
@_limit = limit_value
|
|
153
|
+
@_offset = offset_value
|
|
154
|
+
@_page = page_value
|
|
155
|
+
@_search = search_value
|
|
156
|
+
@_after = after_value
|
|
157
|
+
@_before = before_value
|
|
158
|
+
end
|
|
159
|
+
|
|
160
|
+
# ── Query Builder (immutable) ──────────────────────────────────────────
|
|
161
|
+
|
|
162
|
+
def where(field_name, op, value)
|
|
163
|
+
clone_with(filters: [*@_filters, FilterTuple.new(field_name, op, value)])
|
|
164
|
+
end
|
|
165
|
+
|
|
166
|
+
def or_(&block)
|
|
167
|
+
builder = OrBuilder.new
|
|
168
|
+
block.call(builder)
|
|
169
|
+
clone_with(or_filters: [*@_or_filters, *builder.get_filters])
|
|
170
|
+
end
|
|
171
|
+
|
|
172
|
+
def order_by(field_name, direction = "asc")
|
|
173
|
+
clone_with(sorts: [*@_sorts, [field_name, direction]])
|
|
174
|
+
end
|
|
175
|
+
|
|
176
|
+
def limit(n)
|
|
177
|
+
clone_with(limit_value: n)
|
|
178
|
+
end
|
|
179
|
+
|
|
180
|
+
def offset(n)
|
|
181
|
+
clone_with(offset_value: n)
|
|
182
|
+
end
|
|
183
|
+
|
|
184
|
+
# Set page number for offset pagination (1-based).
|
|
185
|
+
def page(n)
|
|
186
|
+
clone_with(page_value: n)
|
|
187
|
+
end
|
|
188
|
+
|
|
189
|
+
def search(query)
|
|
190
|
+
clone_with(search_value: query)
|
|
191
|
+
end
|
|
192
|
+
|
|
193
|
+
# Set cursor for forward pagination. Mutually exclusive with offset().
|
|
194
|
+
def after(cursor)
|
|
195
|
+
clone_with(after_value: cursor, before_value: nil)
|
|
196
|
+
end
|
|
197
|
+
|
|
198
|
+
# Set cursor for backward pagination. Mutually exclusive with offset().
|
|
199
|
+
def before(cursor)
|
|
200
|
+
clone_with(before_value: cursor, after_value: nil)
|
|
201
|
+
end
|
|
202
|
+
|
|
203
|
+
# ── CRUD ───────────────────────────────────────────────────────────────
|
|
204
|
+
|
|
205
|
+
def get_list
|
|
206
|
+
params = build_query_params
|
|
207
|
+
if @_search
|
|
208
|
+
params["search"] = @_search
|
|
209
|
+
data = CoreDispatch.core_get(@core, "search", @_namespace, @_instance_id, @_name, query: params)
|
|
210
|
+
else
|
|
211
|
+
data = CoreDispatch.core_get(@core, "list", @_namespace, @_instance_id, @_name, query: params)
|
|
212
|
+
end
|
|
213
|
+
return ListResult.new(items: []) unless data.is_a?(Hash)
|
|
214
|
+
|
|
215
|
+
ListResult.new(
|
|
216
|
+
items: data["items"] || [],
|
|
217
|
+
total: data["total"],
|
|
218
|
+
page: data["page"],
|
|
219
|
+
per_page: data["perPage"],
|
|
220
|
+
has_more: data["hasMore"],
|
|
221
|
+
cursor: data["cursor"]
|
|
222
|
+
)
|
|
223
|
+
end
|
|
224
|
+
|
|
225
|
+
# Get a single record by ID.
|
|
226
|
+
def get_one(doc_id)
|
|
227
|
+
CoreDispatch.core_get(
|
|
228
|
+
@core, "get", @_namespace, @_instance_id, @_name,
|
|
229
|
+
doc_id: doc_id, query: {}
|
|
230
|
+
)
|
|
231
|
+
end
|
|
232
|
+
|
|
233
|
+
def insert(record)
|
|
234
|
+
CoreDispatch.core_insert(@core, @_namespace, @_instance_id, @_name, record)
|
|
235
|
+
end
|
|
236
|
+
|
|
237
|
+
def upsert(record, conflict_target: nil)
|
|
238
|
+
query = { "upsert" => "true" }
|
|
239
|
+
query["conflictTarget"] = conflict_target if conflict_target
|
|
240
|
+
data = CoreDispatch.core_insert(@core, @_namespace, @_instance_id, @_name, record, query)
|
|
241
|
+
UpsertResult.new(
|
|
242
|
+
record: data.is_a?(Hash) ? data : {},
|
|
243
|
+
inserted: data.is_a?(Hash) && data["action"] == "inserted"
|
|
244
|
+
)
|
|
245
|
+
end
|
|
246
|
+
|
|
247
|
+
def count
|
|
248
|
+
params = build_query_params
|
|
249
|
+
data = CoreDispatch.core_get(@core, "count", @_namespace, @_instance_id, @_name, query: params)
|
|
250
|
+
data.is_a?(Hash) ? (data["total"] || 0) : 0
|
|
251
|
+
end
|
|
252
|
+
|
|
253
|
+
# Get the first record matching the current query conditions.
|
|
254
|
+
def get_first
|
|
255
|
+
result = self.limit(1).get_list
|
|
256
|
+
result.items.first
|
|
257
|
+
end
|
|
258
|
+
|
|
259
|
+
def update(doc_id, data)
|
|
260
|
+
doc(doc_id).update(data)
|
|
261
|
+
end
|
|
262
|
+
|
|
263
|
+
def delete(doc_id)
|
|
264
|
+
doc(doc_id).delete
|
|
265
|
+
end
|
|
266
|
+
|
|
267
|
+
def sql(query, params = [])
|
|
268
|
+
body = {
|
|
269
|
+
"namespace" => @_namespace,
|
|
270
|
+
"sql" => query,
|
|
271
|
+
"params" => params
|
|
272
|
+
}
|
|
273
|
+
body["id"] = @_instance_id unless @_instance_id.nil?
|
|
274
|
+
result = @core.http.post("/sql", body)
|
|
275
|
+
result.is_a?(Hash) ? (result["items"] || []) : []
|
|
276
|
+
end
|
|
277
|
+
|
|
278
|
+
# ── Batch ──────────────────────────────────────────────────────────────
|
|
279
|
+
|
|
280
|
+
# Create multiple records. Auto-chunks into 500-item batches.
|
|
281
|
+
def insert_many(records)
|
|
282
|
+
chunk_size = 500
|
|
283
|
+
if records.length <= chunk_size
|
|
284
|
+
data = CoreDispatch.core_batch(@core, @_namespace, @_instance_id, @_name, { "inserts" => records })
|
|
285
|
+
return data.is_a?(Hash) ? (data["inserted"] || []) : []
|
|
286
|
+
end
|
|
287
|
+
|
|
288
|
+
all_inserted = []
|
|
289
|
+
records.each_slice(chunk_size) do |chunk|
|
|
290
|
+
data = CoreDispatch.core_batch(@core, @_namespace, @_instance_id, @_name, { "inserts" => chunk })
|
|
291
|
+
all_inserted.concat(data["inserted"] || []) if data.is_a?(Hash)
|
|
292
|
+
end
|
|
293
|
+
all_inserted
|
|
294
|
+
end
|
|
295
|
+
|
|
296
|
+
# Upsert multiple records. Auto-chunks 500 items.
|
|
297
|
+
def upsert_many(records, conflict_target: nil)
|
|
298
|
+
chunk_size = 500
|
|
299
|
+
query = { "upsert" => "true" }
|
|
300
|
+
query["conflictTarget"] = conflict_target if conflict_target
|
|
301
|
+
|
|
302
|
+
if records.length <= chunk_size
|
|
303
|
+
data = CoreDispatch.core_batch(@core, @_namespace, @_instance_id, @_name, { "inserts" => records }, query)
|
|
304
|
+
return data.is_a?(Hash) ? (data["inserted"] || []) : []
|
|
305
|
+
end
|
|
306
|
+
|
|
307
|
+
all_inserted = []
|
|
308
|
+
records.each_slice(chunk_size) do |chunk|
|
|
309
|
+
data = CoreDispatch.core_batch(@core, @_namespace, @_instance_id, @_name, { "inserts" => chunk }, query)
|
|
310
|
+
all_inserted.concat(data["inserted"] || []) if data.is_a?(Hash)
|
|
311
|
+
end
|
|
312
|
+
all_inserted
|
|
313
|
+
end
|
|
314
|
+
|
|
315
|
+
# Update records matching query builder filters.
|
|
316
|
+
def update_many(update)
|
|
317
|
+
raise ArgumentError, "update_many requires at least one where() filter" if @_filters.empty?
|
|
318
|
+
batch_by_filter("update", update)
|
|
319
|
+
end
|
|
320
|
+
|
|
321
|
+
# Delete records matching query builder filters.
|
|
322
|
+
def delete_many
|
|
323
|
+
raise ArgumentError, "delete_many requires at least one where() filter" if @_filters.empty?
|
|
324
|
+
batch_by_filter("delete", nil)
|
|
325
|
+
end
|
|
326
|
+
|
|
327
|
+
# ── Doc ─────────────────────────────────────────────────────────────────
|
|
328
|
+
|
|
329
|
+
def doc(doc_id)
|
|
330
|
+
DocRef.new(
|
|
331
|
+
@core, @_name, doc_id, @database_live,
|
|
332
|
+
namespace: @_namespace, instance_id: @_instance_id
|
|
333
|
+
)
|
|
334
|
+
end
|
|
335
|
+
|
|
336
|
+
# ── DatabaseLive ────────────────────────────────────────────────────────────
|
|
337
|
+
|
|
338
|
+
def on_snapshot(&callback)
|
|
339
|
+
raise "DatabaseLive not available" unless @database_live
|
|
340
|
+
@database_live.subscribe_callback(
|
|
341
|
+
EdgebaseCore.build_database_live_channel(@_namespace, @_name, @_instance_id),
|
|
342
|
+
callback
|
|
343
|
+
)
|
|
344
|
+
end
|
|
345
|
+
|
|
346
|
+
# ── Internal ────────────────────────────────────────────────────────────
|
|
347
|
+
|
|
348
|
+
def build_query_params
|
|
349
|
+
has_cursor = !@_after.nil? || !@_before.nil?
|
|
350
|
+
has_offset = !@_offset.nil? || !@_page.nil?
|
|
351
|
+
if has_cursor && has_offset
|
|
352
|
+
raise ArgumentError,
|
|
353
|
+
"Cannot use page()/offset() with after()/before() — choose offset or cursor pagination"
|
|
354
|
+
end
|
|
355
|
+
|
|
356
|
+
params = {}
|
|
357
|
+
unless @_filters.empty?
|
|
358
|
+
params["filter"] = JSON.generate(@_filters.map(&:to_json_array))
|
|
359
|
+
end
|
|
360
|
+
unless @_or_filters.empty?
|
|
361
|
+
params["orFilter"] = JSON.generate(@_or_filters.map(&:to_json_array))
|
|
362
|
+
end
|
|
363
|
+
unless @_sorts.empty?
|
|
364
|
+
params["sort"] = @_sorts.map { |f, d| "#{f}:#{d}" }.join(",")
|
|
365
|
+
end
|
|
366
|
+
params["limit"] = @_limit.to_s unless @_limit.nil?
|
|
367
|
+
params["page"] = @_page.to_s unless @_page.nil?
|
|
368
|
+
params["offset"] = @_offset.to_s unless @_offset.nil?
|
|
369
|
+
params["after"] = @_after unless @_after.nil?
|
|
370
|
+
params["before"] = @_before unless @_before.nil?
|
|
371
|
+
params
|
|
372
|
+
end
|
|
373
|
+
|
|
374
|
+
private
|
|
375
|
+
|
|
376
|
+
def clone_with(**kwargs)
|
|
377
|
+
TableRef.new(
|
|
378
|
+
@core, @_name,
|
|
379
|
+
database_live: @database_live,
|
|
380
|
+
namespace: @_namespace,
|
|
381
|
+
instance_id: @_instance_id,
|
|
382
|
+
filters: kwargs.fetch(:filters, @_filters),
|
|
383
|
+
or_filters: kwargs.fetch(:or_filters, @_or_filters),
|
|
384
|
+
sorts: kwargs.fetch(:sorts, @_sorts),
|
|
385
|
+
limit_value: kwargs.fetch(:limit_value, @_limit),
|
|
386
|
+
offset_value: kwargs.fetch(:offset_value, @_offset),
|
|
387
|
+
page_value: kwargs.fetch(:page_value, @_page),
|
|
388
|
+
search_value: kwargs.fetch(:search_value, @_search),
|
|
389
|
+
after_value: kwargs.fetch(:after_value, @_after),
|
|
390
|
+
before_value: kwargs.fetch(:before_value, @_before)
|
|
391
|
+
)
|
|
392
|
+
end
|
|
393
|
+
|
|
394
|
+
def batch_by_filter(action, update)
|
|
395
|
+
max_iterations = 100
|
|
396
|
+
total_processed = 0
|
|
397
|
+
total_succeeded = 0
|
|
398
|
+
errors = []
|
|
399
|
+
filter_json = @_filters.map(&:to_json_array)
|
|
400
|
+
|
|
401
|
+
max_iterations.times do |chunk_index|
|
|
402
|
+
body = {
|
|
403
|
+
"action" => action,
|
|
404
|
+
"filter" => filter_json,
|
|
405
|
+
"limit" => 500
|
|
406
|
+
}
|
|
407
|
+
body["orFilter"] = @_or_filters.map(&:to_json_array) unless @_or_filters.empty?
|
|
408
|
+
body["update"] = update if action == "update" && update
|
|
409
|
+
|
|
410
|
+
begin
|
|
411
|
+
data = CoreDispatch.core_batch_by_filter(@core, @_namespace, @_instance_id, @_name, body)
|
|
412
|
+
processed = data.is_a?(Hash) ? (data["processed"] || 0) : 0
|
|
413
|
+
succeeded = data.is_a?(Hash) ? (data["succeeded"] || 0) : 0
|
|
414
|
+
total_processed += processed
|
|
415
|
+
total_succeeded += succeeded
|
|
416
|
+
|
|
417
|
+
break if processed == 0
|
|
418
|
+
# For 'update', don't loop — updated records still match the filter
|
|
419
|
+
break if action == "update"
|
|
420
|
+
rescue StandardError => e
|
|
421
|
+
errors << { "chunkIndex" => chunk_index, "chunkSize" => 500, "error" => e.to_s }
|
|
422
|
+
break
|
|
423
|
+
end
|
|
424
|
+
end
|
|
425
|
+
|
|
426
|
+
BatchResult.new(
|
|
427
|
+
total_processed: total_processed,
|
|
428
|
+
total_succeeded: total_succeeded,
|
|
429
|
+
errors: errors
|
|
430
|
+
)
|
|
431
|
+
end
|
|
432
|
+
end
|
|
433
|
+
|
|
434
|
+
# ── DocRef ───────────────────────────────────────────────────────────────────
|
|
435
|
+
|
|
436
|
+
# Document reference for single-document operations.
|
|
437
|
+
class DocRef
|
|
438
|
+
attr_reader :table_name, :id
|
|
439
|
+
|
|
440
|
+
def initialize(core, table_name, doc_id, database_live = nil, namespace: "shared", instance_id: nil)
|
|
441
|
+
@core = core
|
|
442
|
+
@table_name = table_name
|
|
443
|
+
@id = doc_id
|
|
444
|
+
@database_live = database_live
|
|
445
|
+
@_namespace = namespace
|
|
446
|
+
@_instance_id = instance_id
|
|
447
|
+
end
|
|
448
|
+
|
|
449
|
+
def get
|
|
450
|
+
CoreDispatch.core_get(
|
|
451
|
+
@core, "get", @_namespace, @_instance_id, @table_name,
|
|
452
|
+
doc_id: @id, query: {}
|
|
453
|
+
)
|
|
454
|
+
end
|
|
455
|
+
|
|
456
|
+
def update(data)
|
|
457
|
+
CoreDispatch.core_update(@core, @_namespace, @_instance_id, @table_name, @id, data)
|
|
458
|
+
end
|
|
459
|
+
|
|
460
|
+
def delete
|
|
461
|
+
CoreDispatch.core_delete(@core, @_namespace, @_instance_id, @table_name, @id)
|
|
462
|
+
end
|
|
463
|
+
|
|
464
|
+
def on_snapshot(&callback)
|
|
465
|
+
raise "DatabaseLive not available" unless @database_live
|
|
466
|
+
@database_live.subscribe_callback(
|
|
467
|
+
EdgebaseCore.build_database_live_channel(@_namespace, @table_name, @_instance_id, @id),
|
|
468
|
+
callback
|
|
469
|
+
)
|
|
470
|
+
end
|
|
471
|
+
end
|
|
472
|
+
end
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "edgebase_core/errors"
|
|
4
|
+
require_relative "edgebase_core/context_manager"
|
|
5
|
+
require_relative "edgebase_core/field_ops"
|
|
6
|
+
require_relative "edgebase_core/http_client"
|
|
7
|
+
require_relative "edgebase_core/generated/api_core"
|
|
8
|
+
require_relative "edgebase_core/generated/client_wrappers"
|
|
9
|
+
require_relative "edgebase_core/table_ref"
|
|
10
|
+
require_relative "edgebase_core/storage"
|
data/llms.txt
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
# EdgeBase Ruby Core SDK
|
|
2
|
+
|
|
3
|
+
Use this file as a quick-reference contract for AI coding assistants working with `edgebase_core`.
|
|
4
|
+
|
|
5
|
+
## Package Boundary
|
|
6
|
+
|
|
7
|
+
Use `edgebase_core` for low-level EdgeBase building blocks.
|
|
8
|
+
|
|
9
|
+
This package is shared infrastructure for `edgebase_admin`. Most app code should install `edgebase_admin` instead of using `edgebase_core` directly.
|
|
10
|
+
|
|
11
|
+
`edgebase_core` does not provide admin auth, push, analytics, KV, D1, or Vectorize helpers.
|
|
12
|
+
|
|
13
|
+
## Source Of Truth
|
|
14
|
+
|
|
15
|
+
- Package README: https://github.com/edge-base/edgebase/blob/main/packages/sdk/ruby/packages/core/README.md
|
|
16
|
+
- SDK Overview: https://edgebase.fun/docs/sdks
|
|
17
|
+
- Database Admin SDK: https://edgebase.fun/docs/database/admin-sdk
|
|
18
|
+
- Storage docs: https://edgebase.fun/docs/storage/upload-download
|
|
19
|
+
|
|
20
|
+
If docs, snippets, and assumptions disagree, prefer the current package API over guessed patterns from another runtime.
|
|
21
|
+
|
|
22
|
+
## Canonical Examples
|
|
23
|
+
|
|
24
|
+
### Build an HTTP client
|
|
25
|
+
|
|
26
|
+
```ruby
|
|
27
|
+
require "edgebase_core"
|
|
28
|
+
|
|
29
|
+
client = EdgebaseCore::HttpClient.new(
|
|
30
|
+
"https://your-project.edgebase.fun",
|
|
31
|
+
service_key: ENV.fetch("EDGEBASE_SERVICE_KEY")
|
|
32
|
+
)
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
### Work with storage
|
|
36
|
+
|
|
37
|
+
```ruby
|
|
38
|
+
storage = EdgebaseCore::StorageClient.new(client)
|
|
39
|
+
bucket = storage.bucket("avatars")
|
|
40
|
+
bucket.upload("user-1.jpg", "binary-data", content_type: "image/jpeg")
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
### Use field operations
|
|
44
|
+
|
|
45
|
+
```ruby
|
|
46
|
+
payload = {
|
|
47
|
+
"views" => EdgebaseCore::FieldOps.increment(1),
|
|
48
|
+
"legacyField" => EdgebaseCore::FieldOps.delete_field
|
|
49
|
+
}
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
## Hard Rules
|
|
53
|
+
|
|
54
|
+
- keep Service Keys on trusted servers only
|
|
55
|
+
- `HttpClient` is synchronous and server-side only
|
|
56
|
+
- `EdgebaseCore::FieldOps.increment()` and `delete_field` return marker hashes for update payloads
|
|
57
|
+
- `DbRef#table` returns a `TableRef`
|
|
58
|
+
- `TableRef#get_list`, `get_one`, and `get_first` are the main read helpers
|
|
59
|
+
- `StorageClient#bucket` returns a `StorageBucket`
|
|
60
|
+
- `StorageBucket#create_signed_upload_url(path, expires_in: 3600)` expects an integer TTL in seconds
|
|
61
|
+
- `StorageBucket#upload_string` accepts raw, base64, base64url, and data URL inputs
|
|
62
|
+
|
|
63
|
+
## Common Mistakes
|
|
64
|
+
|
|
65
|
+
- do not use `edgebase_core` when you actually need admin auth or other higher-level server features
|
|
66
|
+
- do not copy JavaScript promise-based examples into Ruby
|
|
67
|
+
- do not assume `TableRef` or `DbRef` are top-level app entry points; they are building blocks
|
|
68
|
+
- do not expose the Service Key through browser code
|
|
69
|
+
|
|
70
|
+
## Quick Reference
|
|
71
|
+
|
|
72
|
+
```text
|
|
73
|
+
EdgebaseCore::HttpClient.new(url, service_key:, bearer_token: nil) -> HttpClient
|
|
74
|
+
EdgebaseCore::StorageClient.new(client) -> StorageClient
|
|
75
|
+
storage.bucket(name) -> StorageBucket
|
|
76
|
+
EdgebaseCore::FieldOps.increment(value = 1) -> Hash
|
|
77
|
+
EdgebaseCore::FieldOps.delete_field -> Hash
|
|
78
|
+
db.table(name) -> TableRef
|
|
79
|
+
table.where(field, op, value) -> TableRef
|
|
80
|
+
table.get_list -> ListResult
|
|
81
|
+
table.get_one(id) -> Hash
|
|
82
|
+
table.get_first -> Hash or nil
|
|
83
|
+
table.doc(id) -> DocRef
|
|
84
|
+
bucket.upload(path, data, content_type: ...) -> Hash
|
|
85
|
+
bucket.download(path) -> String
|
|
86
|
+
bucket.create_signed_url(path, expires_in: "1h") -> Hash
|
|
87
|
+
bucket.create_signed_upload_url(path, expires_in: 3600) -> Hash
|
|
88
|
+
```
|
metadata
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
|
2
|
+
name: edgebase_core
|
|
3
|
+
version: !ruby/object:Gem::Version
|
|
4
|
+
version: 0.1.4
|
|
5
|
+
platform: ruby
|
|
6
|
+
authors:
|
|
7
|
+
- EdgeBase
|
|
8
|
+
autorequire:
|
|
9
|
+
bindir: bin
|
|
10
|
+
cert_chain: []
|
|
11
|
+
date: 2026-03-19 00:00:00.000000000 Z
|
|
12
|
+
dependencies: []
|
|
13
|
+
description: Core module for EdgeBase Ruby SDK. Provides HttpClient, TableRef, DocRef,
|
|
14
|
+
StorageClient, and generated API layer from OpenAPI spec.
|
|
15
|
+
email:
|
|
16
|
+
executables: []
|
|
17
|
+
extensions: []
|
|
18
|
+
extra_rdoc_files: []
|
|
19
|
+
files:
|
|
20
|
+
- LICENSE
|
|
21
|
+
- README.md
|
|
22
|
+
- lib/edgebase_core.rb
|
|
23
|
+
- lib/edgebase_core/context_manager.rb
|
|
24
|
+
- lib/edgebase_core/errors.rb
|
|
25
|
+
- lib/edgebase_core/field_ops.rb
|
|
26
|
+
- lib/edgebase_core/generated/api_core.rb
|
|
27
|
+
- lib/edgebase_core/generated/client_wrappers.rb
|
|
28
|
+
- lib/edgebase_core/http_client.rb
|
|
29
|
+
- lib/edgebase_core/storage.rb
|
|
30
|
+
- lib/edgebase_core/table_ref.rb
|
|
31
|
+
- llms.txt
|
|
32
|
+
homepage: https://edgebase.fun/docs/sdks
|
|
33
|
+
licenses:
|
|
34
|
+
- MIT
|
|
35
|
+
metadata:
|
|
36
|
+
allowed_push_host: https://rubygems.org
|
|
37
|
+
homepage_uri: https://edgebase.fun/docs/sdks
|
|
38
|
+
bug_tracker_uri: https://github.com/edge-base/edgebase/issues
|
|
39
|
+
source_code_uri: https://github.com/edge-base/edgebase/tree/main/packages/sdk/ruby/packages/core
|
|
40
|
+
documentation_uri: https://edgebase.fun/docs/sdks
|
|
41
|
+
post_install_message:
|
|
42
|
+
rdoc_options: []
|
|
43
|
+
require_paths:
|
|
44
|
+
- lib
|
|
45
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
|
46
|
+
requirements:
|
|
47
|
+
- - ">="
|
|
48
|
+
- !ruby/object:Gem::Version
|
|
49
|
+
version: '3.0'
|
|
50
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
|
51
|
+
requirements:
|
|
52
|
+
- - ">="
|
|
53
|
+
- !ruby/object:Gem::Version
|
|
54
|
+
version: '0'
|
|
55
|
+
requirements: []
|
|
56
|
+
rubygems_version: 3.0.3.1
|
|
57
|
+
signing_key:
|
|
58
|
+
specification_version: 4
|
|
59
|
+
summary: EdgeBase Core SDK for Ruby — shared HTTP client, query builder, and storage.
|
|
60
|
+
test_files: []
|