feedx 0.12.7 → 0.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/test.yml +2 -37
  3. data/.golangci.yml +13 -4
  4. data/.rubocop.yml +8 -14
  5. data/.tool-versions +1 -0
  6. data/Gemfile +8 -0
  7. data/Gemfile.lock +54 -68
  8. data/Makefile +3 -3
  9. data/README.md +3 -1
  10. data/compression.go +29 -0
  11. data/compression_test.go +73 -61
  12. data/consumer.go +96 -152
  13. data/consumer_test.go +124 -60
  14. data/example_test.go +148 -0
  15. data/feedx.gemspec +2 -10
  16. data/feedx.go +16 -31
  17. data/feedx_ext_test.go +13 -3
  18. data/feedx_test.go +24 -26
  19. data/format.go +29 -19
  20. data/format_test.go +84 -56
  21. data/go.mod +11 -7
  22. data/go.sum +16 -138
  23. data/incremental.go +122 -0
  24. data/incremental_test.go +62 -0
  25. data/lib/feedx/cache/abstract.rb +3 -3
  26. data/lib/feedx/cache/value.rb +6 -6
  27. data/lib/feedx/compression/abstract.rb +2 -2
  28. data/lib/feedx/compression/gzip.rb +4 -4
  29. data/lib/feedx/consumer.rb +8 -8
  30. data/lib/feedx/format/abstract.rb +6 -6
  31. data/lib/feedx/format/json.rb +2 -2
  32. data/lib/feedx/format/protobuf.rb +6 -6
  33. data/lib/feedx/format.rb +1 -3
  34. data/lib/feedx/producer.rb +11 -11
  35. data/lib/feedx/stream.rb +2 -2
  36. data/lib/feedx.rb +2 -3
  37. data/manifest.go +65 -0
  38. data/producer.go +34 -137
  39. data/producer_test.go +46 -60
  40. data/reader.go +142 -41
  41. data/reader_test.go +86 -35
  42. data/scheduler.go +179 -0
  43. data/scheduler_test.go +171 -0
  44. data/writer.go +13 -13
  45. data/writer_test.go +61 -44
  46. metadata +12 -137
  47. data/.github/workflows/lint.yml +0 -18
  48. data/ext/parquet/decoder.go +0 -59
  49. data/ext/parquet/decoder_test.go +0 -88
  50. data/ext/parquet/encoder.go +0 -27
  51. data/ext/parquet/encoder_test.go +0 -70
  52. data/ext/parquet/go.mod +0 -12
  53. data/ext/parquet/go.sum +0 -193
  54. data/ext/parquet/parquet.go +0 -78
  55. data/ext/parquet/parquet_test.go +0 -28
  56. data/ext/parquet/testdata/alltypes_plain.parquet +0 -0
  57. data/lib/feedx/format/parquet.rb +0 -102
  58. data/spec/feedx/cache/memory_spec.rb +0 -23
  59. data/spec/feedx/cache/value_spec.rb +0 -19
  60. data/spec/feedx/compression/gzip_spec.rb +0 -17
  61. data/spec/feedx/compression/none_spec.rb +0 -15
  62. data/spec/feedx/compression_spec.rb +0 -19
  63. data/spec/feedx/consumer_spec.rb +0 -49
  64. data/spec/feedx/format/abstract_spec.rb +0 -21
  65. data/spec/feedx/format/json_spec.rb +0 -27
  66. data/spec/feedx/format/parquet_spec.rb +0 -30
  67. data/spec/feedx/format/protobuf_spec.rb +0 -23
  68. data/spec/feedx/format_spec.rb +0 -21
  69. data/spec/feedx/producer_spec.rb +0 -74
  70. data/spec/feedx/stream_spec.rb +0 -109
  71. data/spec/spec_helper.rb +0 -57
data/reader_test.go CHANGED
@@ -1,56 +1,107 @@
1
1
  package feedx_test
2
2
 
3
3
  import (
4
- "context"
5
4
  "io"
6
- "io/ioutil"
7
- "time"
5
+ "reflect"
6
+ "testing"
8
7
 
9
8
  "github.com/bsm/bfs"
10
9
  "github.com/bsm/feedx"
11
10
  "github.com/bsm/feedx/internal/testdata"
12
- . "github.com/bsm/ginkgo"
13
- . "github.com/bsm/gomega"
14
11
  )
15
12
 
16
- var _ = Describe("Reader", func() {
17
- var subject *feedx.Reader
18
- var obj *bfs.Object
19
- var ctx = context.Background()
13
+ func TestReader(t *testing.T) {
14
+ t.Run("reads", func(t *testing.T) {
15
+ r := fixReader(t)
20
16
 
21
- BeforeEach(func() {
22
- obj = bfs.NewInMemObject("path/to/file.json")
23
- Expect(writeMulti(obj, 3, time.Time{})).To(Succeed())
17
+ if data, err := io.ReadAll(r); err != nil {
18
+ t.Fatal("unexpected error", err)
19
+ } else if exp, got := 111, len(data); exp != got {
20
+ t.Errorf("expected %v, got %v", exp, got)
21
+ } else if exp, got := int64(0), r.NumRead(); exp != got {
22
+ t.Errorf("expected %v, got %v", exp, got)
23
+ }
24
+ })
24
25
 
25
- var err error
26
- subject, err = feedx.NewReader(ctx, obj, nil)
27
- Expect(err).NotTo(HaveOccurred())
26
+ t.Run("decodes", func(t *testing.T) {
27
+ r := fixReader(t)
28
+ msgs := drainReader(t, r)
29
+ if exp := seedN(3); !reflect.DeepEqual(exp, msgs) {
30
+ t.Errorf("expected %#v, got %#v", exp, msgs)
31
+ }
32
+ if exp, got := int64(3), r.NumRead(); exp != got {
33
+ t.Errorf("expected %v, got %v", exp, got)
34
+ }
28
35
  })
36
+ }
37
+
38
+ func fixReader(t *testing.T) *feedx.Reader {
39
+ t.Helper()
29
40
 
30
- AfterEach(func() {
31
- Expect(subject.Close()).To(Succeed())
41
+ obj := bfs.NewInMemObject("path/to/file.jsonz")
42
+ if err := writeN(obj, 3, 0); err != nil {
43
+ t.Fatal("unexpected error", err)
44
+ }
45
+
46
+ r, err := feedx.NewReader(t.Context(), obj, nil)
47
+ if err != nil {
48
+ t.Fatal("unexpected error", err)
49
+ }
50
+
51
+ t.Cleanup(func() {
52
+ _ = r.Close()
32
53
  })
33
54
 
34
- It("reads", func() {
35
- data, err := ioutil.ReadAll(subject)
36
- Expect(err).NotTo(HaveOccurred())
37
- Expect(len(data)).To(BeNumerically("~", 110, 20))
38
- Expect(subject.NumRead()).To(Equal(0))
55
+ return r
56
+ }
57
+
58
+ func TestMultiReader(t *testing.T) {
59
+ t.Run("reads", func(t *testing.T) {
60
+ r := fixMultiReader(t)
61
+
62
+ if data, err := io.ReadAll(r); err != nil {
63
+ t.Fatal("unexpected error", err)
64
+ } else if exp, got := 222, len(data); exp != got {
65
+ t.Errorf("expected %v, got %v", exp, got)
66
+ } else if exp, got := int64(0), r.NumRead(); exp != got {
67
+ t.Errorf("expected %v, got %v", exp, got)
68
+ }
39
69
  })
40
70
 
41
- It("decodes", func() {
42
- var msgs []*testdata.MockMessage
43
- for {
44
- var msg testdata.MockMessage
45
- err := subject.Decode(&msg)
46
- if err == io.EOF {
47
- break
48
- }
49
- Expect(err).NotTo(HaveOccurred())
50
- msgs = append(msgs, &msg)
71
+ t.Run("decodes", func(t *testing.T) {
72
+ r := fixMultiReader(t)
73
+ msgs := drainReader(t, r)
74
+ if exp := seedN(6); !reflect.DeepEqual(exp, msgs) {
75
+ t.Errorf("expected %#v, got %#v", exp, msgs)
76
+ }
77
+ if exp, got := int64(6), r.NumRead(); exp != got {
78
+ t.Errorf("expected %v, got %v", exp, got)
51
79
  }
80
+ })
81
+ }
82
+
83
+ func fixMultiReader(t *testing.T) *feedx.Reader {
84
+ t.Helper()
52
85
 
53
- Expect(msgs).To(ConsistOf(seed(), seed(), seed()))
54
- Expect(subject.NumRead()).To(Equal(3))
86
+ obj := bfs.NewInMemObject("path/to/file.jsonz")
87
+ if err := writeN(obj, 3, 0); err != nil {
88
+ t.Fatal("unexpected error", err)
89
+ }
90
+
91
+ r := feedx.MultiReader(t.Context(), []*bfs.Object{obj, obj}, nil)
92
+ t.Cleanup(func() {
93
+ _ = r.Close()
55
94
  })
56
- })
95
+
96
+ return r
97
+ }
98
+
99
+ func drainReader(t *testing.T, r interface{ Decode(any) error }) []*testdata.MockMessage {
100
+ t.Helper()
101
+
102
+ msgs, err := readMessages(r)
103
+ if err != nil {
104
+ t.Fatal("unexpected error", err)
105
+ }
106
+ return msgs
107
+ }
data/scheduler.go ADDED
@@ -0,0 +1,179 @@
1
+ package feedx
2
+
3
+ import (
4
+ "context"
5
+ "sync"
6
+ "time"
7
+ )
8
+
9
+ // BeforeHook callbacks are run before jobs are started. It receives the local
10
+ // version before sync as an argument and may return false to abort the cycle.
11
+ type BeforeHook func(version int64) bool
12
+
13
+ // AfterHook callbacks are run after jobs have finished.
14
+ type AfterHook func(*Status, error)
15
+
16
+ // VersionCheck callbacks return the latest local version.
17
+ type VersionCheck func(context.Context) (int64, error)
18
+
19
+ // Scheduler runs cronjobs in regular intervals.
20
+ type Scheduler struct {
21
+ ctx context.Context
22
+ interval time.Duration
23
+
24
+ readerOpt *ReaderOptions
25
+ writerOpt *WriterOptions
26
+ versionCheck VersionCheck
27
+
28
+ // hooks
29
+ beforeHooks []BeforeHook
30
+ afterHooks []AfterHook
31
+ }
32
+
33
+ // Every creates a scheduler.
34
+ func Every(interval time.Duration) *Scheduler {
35
+ return &Scheduler{ctx: context.Background(), interval: interval}
36
+ }
37
+
38
+ // WithContext sets a custom context for the run.
39
+ func (s *Scheduler) WithContext(ctx context.Context) *Scheduler {
40
+ s.ctx = ctx
41
+ return s
42
+ }
43
+
44
+ // BeforeSync adds custom before hooks.
45
+ func (s *Scheduler) BeforeSync(hooks ...BeforeHook) *Scheduler {
46
+ s.beforeHooks = append(s.beforeHooks, hooks...)
47
+ return s
48
+ }
49
+
50
+ // AfterSync adds before hooks.
51
+ func (s *Scheduler) AfterSync(hooks ...AfterHook) *Scheduler {
52
+ s.afterHooks = append(s.afterHooks, hooks...)
53
+ return s
54
+ }
55
+
56
+ // WithReaderOptions sets custom reader options for consumers.
57
+ func (s *Scheduler) WithReaderOptions(opt *ReaderOptions) *Scheduler {
58
+ s.readerOpt = opt
59
+ return s
60
+ }
61
+
62
+ // Consume starts a consumer job.
63
+ func (s *Scheduler) Consume(csm Consumer, cfn ConsumeFunc) (*CronJob, error) {
64
+ return newCronJob(s.ctx, s.interval, func(ctx context.Context) error {
65
+ version := csm.Version()
66
+ if !s.runBeforeHooks(version) {
67
+ return nil
68
+ }
69
+
70
+ status, err := csm.Consume(ctx, s.readerOpt, cfn)
71
+ s.runAfterHooks(status, err)
72
+ return err
73
+ })
74
+ }
75
+
76
+ // WithWriterOptions sets custom writer options for producers.
77
+ func (s *Scheduler) WithWriterOptions(opt *WriterOptions) *Scheduler {
78
+ s.writerOpt = opt
79
+ return s
80
+ }
81
+
82
+ // WithVersionCheck sets a custom version check for producers.
83
+ func (s *Scheduler) WithVersionCheck(fn VersionCheck) *Scheduler {
84
+ s.versionCheck = fn
85
+ return s
86
+ }
87
+
88
+ // Produce starts a producer job.
89
+ func (s *Scheduler) Produce(pcr *Producer, pfn ProduceFunc) (*CronJob, error) {
90
+ return s.produce(func(ctx context.Context, version int64) (*Status, error) {
91
+ return pcr.Produce(ctx, version, s.writerOpt, pfn)
92
+ })
93
+ }
94
+
95
+ // ProduceIncrementally starts an incremental producer job.
96
+ func (s *Scheduler) ProduceIncrementally(pcr *IncrementalProducer, pfn IncrementalProduceFunc) (*CronJob, error) {
97
+ return s.produce(func(ctx context.Context, version int64) (*Status, error) {
98
+ return pcr.Produce(ctx, version, s.writerOpt, pfn)
99
+ })
100
+ }
101
+
102
+ func (s *Scheduler) produce(fn func(context.Context, int64) (*Status, error)) (*CronJob, error) {
103
+ return newCronJob(s.ctx, s.interval, func(ctx context.Context) error {
104
+ var version int64
105
+ if s.versionCheck != nil {
106
+ latest, err := s.versionCheck(s.ctx)
107
+ if err != nil {
108
+ s.runAfterHooks(nil, err)
109
+ return err
110
+ }
111
+ version = latest
112
+ }
113
+
114
+ if !s.runBeforeHooks(version) {
115
+ return nil
116
+ }
117
+
118
+ status, err := fn(ctx, version)
119
+ s.runAfterHooks(status, err)
120
+ return err
121
+ })
122
+ }
123
+
124
+ func (s *Scheduler) runBeforeHooks(version int64) bool {
125
+ for _, hook := range s.beforeHooks {
126
+ if !hook(version) {
127
+ return false
128
+ }
129
+ }
130
+ return true
131
+ }
132
+
133
+ func (s *Scheduler) runAfterHooks(status *Status, err error) {
134
+ for _, hook := range s.afterHooks {
135
+ hook(status, err)
136
+ }
137
+ }
138
+
139
+ // CronJob runs in regular intervals until it's stopped.
140
+ type CronJob struct {
141
+ cancel context.CancelFunc
142
+ interval time.Duration
143
+ perform func(context.Context) error
144
+ wait sync.WaitGroup
145
+ }
146
+
147
+ func newCronJob(ctx context.Context, interval time.Duration, perform func(context.Context) error) (*CronJob, error) {
148
+ if err := perform(ctx); err != nil {
149
+ return nil, err
150
+ }
151
+
152
+ ctx, cancel := context.WithCancel(ctx)
153
+ job := &CronJob{cancel: cancel, interval: interval, perform: perform}
154
+ go job.loop(ctx)
155
+ return job, nil
156
+ }
157
+
158
+ // Stop stops the job and waits until it is complete.
159
+ func (j *CronJob) Stop() {
160
+ j.cancel()
161
+ j.wait.Wait()
162
+ }
163
+
164
+ func (j *CronJob) loop(ctx context.Context) {
165
+ j.wait.Add(1)
166
+ defer j.wait.Done()
167
+
168
+ ticker := time.NewTicker(j.interval)
169
+ defer ticker.Stop()
170
+
171
+ for {
172
+ select {
173
+ case <-ctx.Done():
174
+ return
175
+ case <-ticker.C:
176
+ _ = j.perform(ctx)
177
+ }
178
+ }
179
+ }
data/scheduler_test.go ADDED
@@ -0,0 +1,171 @@
1
+ package feedx_test
2
+
3
+ import (
4
+ "context"
5
+ "errors"
6
+ "fmt"
7
+ "sync/atomic"
8
+ "testing"
9
+ "time"
10
+
11
+ "github.com/bsm/bfs"
12
+ "github.com/bsm/feedx"
13
+ )
14
+
15
+ func TestScheduler(t *testing.T) {
16
+ beforeCallbacks := new(atomic.Int32)
17
+ afterCallbacks := new(atomic.Int32)
18
+ numCycles := new(atomic.Int32)
19
+ numErrors := new(atomic.Int32)
20
+
21
+ resetCounters := func() {
22
+ beforeCallbacks.Store(0)
23
+ afterCallbacks.Store(0)
24
+ numCycles.Store(0)
25
+ numErrors.Store(0)
26
+ }
27
+
28
+ obj := bfs.NewInMemObject("file.json")
29
+ defer obj.Close()
30
+
31
+ t.Run("produce", func(t *testing.T) {
32
+ resetCounters()
33
+
34
+ pcr := feedx.NewProducerForRemote(obj)
35
+ defer pcr.Close()
36
+
37
+ job, err := feedx.Every(time.Millisecond).
38
+ BeforeSync(func(_ int64) bool {
39
+ beforeCallbacks.Add(1)
40
+ return true
41
+ }).
42
+ AfterSync(func(_ *feedx.Status, err error) {
43
+ afterCallbacks.Add(1)
44
+
45
+ if err != nil {
46
+ numErrors.Add(1)
47
+ }
48
+ }).
49
+ WithVersionCheck(func(_ context.Context) (int64, error) {
50
+ return 101, nil
51
+ }).
52
+ Produce(pcr, func(w *feedx.Writer) error {
53
+ if numCycles.Add(1)%2 == 0 {
54
+ return fmt.Errorf("failed!")
55
+ }
56
+ return nil
57
+ })
58
+ if err != nil {
59
+ t.Fatal("unexpected error", err)
60
+ }
61
+
62
+ time.Sleep(5 * time.Millisecond)
63
+ job.Stop()
64
+ time.Sleep(2 * time.Millisecond)
65
+
66
+ ranTimes := numCycles.Load()
67
+ if min, got := 4, int(ranTimes); got <= min {
68
+ t.Errorf("expected %d >= %d", got, min)
69
+ }
70
+ if exp, got := ranTimes, beforeCallbacks.Load(); exp != got {
71
+ t.Errorf("expected %d, got %d", exp, got)
72
+ }
73
+ if exp, got := ranTimes, afterCallbacks.Load(); exp != got {
74
+ t.Errorf("expected %d, got %d", exp, got)
75
+ }
76
+ if exp, got := ranTimes/2, numErrors.Load(); exp != got {
77
+ t.Errorf("expected %d, got %d", exp, got)
78
+ }
79
+
80
+ // wait a little longer, make sure job was stopped
81
+ time.Sleep(2 * time.Millisecond)
82
+ if exp, got := ranTimes, numCycles.Load(); exp != got {
83
+ t.Errorf("expected %d, got %d", exp, got)
84
+ }
85
+ })
86
+
87
+ t.Run("produce may fail", func(t *testing.T) {
88
+ resetCounters()
89
+
90
+ pcr := feedx.NewProducerForRemote(obj)
91
+ defer pcr.Close()
92
+
93
+ exp := fmt.Errorf("failed!")
94
+ _, err := feedx.Every(time.Millisecond).
95
+ Produce(pcr, func(w *feedx.Writer) error {
96
+ return exp
97
+ })
98
+ if !errors.Is(err, exp) {
99
+ t.Errorf("expected %v, got %v", exp, err)
100
+ }
101
+ })
102
+
103
+ t.Run("consume", func(t *testing.T) {
104
+ resetCounters()
105
+
106
+ csm := feedx.NewConsumerForRemote(obj)
107
+ defer csm.Close()
108
+
109
+ job, err := feedx.Every(time.Millisecond).
110
+ BeforeSync(func(_ int64) bool {
111
+ beforeCallbacks.Add(1)
112
+ return true
113
+ }).
114
+ AfterSync(func(_ *feedx.Status, err error) {
115
+ afterCallbacks.Add(1)
116
+
117
+ if err != nil {
118
+ numErrors.Add(1)
119
+ }
120
+ }).
121
+ Consume(csm, func(r *feedx.Reader) error {
122
+ if numCycles.Add(1)%2 == 0 {
123
+ return fmt.Errorf("failed!")
124
+ }
125
+ return nil
126
+ })
127
+ if err != nil {
128
+ t.Fatal("unexpected error", err)
129
+ }
130
+
131
+ time.Sleep(5 * time.Millisecond)
132
+ job.Stop()
133
+ time.Sleep(2 * time.Millisecond)
134
+
135
+ ranTimes := numCycles.Load()
136
+ if min, got := 4, int(ranTimes); got <= min {
137
+ t.Errorf("expected %d >= %d", got, min)
138
+ }
139
+ if exp, got := ranTimes, beforeCallbacks.Load(); exp != got {
140
+ t.Errorf("expected %d, got %d", exp, got)
141
+ }
142
+ if exp, got := ranTimes, afterCallbacks.Load(); exp != got {
143
+ t.Errorf("expected %d, got %d", exp, got)
144
+ }
145
+ if exp, got := ranTimes/2, numErrors.Load(); exp != got {
146
+ t.Errorf("expected %d, got %d", exp, got)
147
+ }
148
+
149
+ // wait a little longer, make sure job was stopped
150
+ time.Sleep(2 * time.Millisecond)
151
+ if exp, got := ranTimes, numCycles.Load(); exp != got {
152
+ t.Errorf("expected %d, got %d", exp, got)
153
+ }
154
+ })
155
+
156
+ t.Run("consume may fail", func(t *testing.T) {
157
+ resetCounters()
158
+
159
+ csm := feedx.NewConsumerForRemote(obj)
160
+ defer csm.Close()
161
+
162
+ exp := fmt.Errorf("failed!")
163
+ _, err := feedx.Every(time.Millisecond).
164
+ Consume(csm, func(r *feedx.Reader) error {
165
+ return exp
166
+ })
167
+ if !errors.Is(err, exp) {
168
+ t.Errorf("expected %v, got %v", exp, err)
169
+ }
170
+ })
171
+ }
data/writer.go CHANGED
@@ -3,8 +3,9 @@ package feedx
3
3
  import (
4
4
  "bufio"
5
5
  "context"
6
+ "errors"
6
7
  "io"
7
- "time"
8
+ "strconv"
8
9
 
9
10
  "github.com/bsm/bfs"
10
11
  )
@@ -19,9 +20,9 @@ type WriterOptions struct {
19
20
  // Default: auto-detected from URL path.
20
21
  Compression Compression
21
22
 
22
- // Provides an optional last modified timestamp which is stored with the remote metadata.
23
- // Default: time.Time{}.
24
- LastMod time.Time
23
+ // Provides an optional version which is stored with the remote metadata.
24
+ // Default: 0
25
+ Version int64
25
26
  }
26
27
 
27
28
  func (o *WriterOptions) norm(name string) {
@@ -38,7 +39,7 @@ type Writer struct {
38
39
  ctx context.Context
39
40
  remote *bfs.Object
40
41
  opt WriterOptions
41
- num int
42
+ num int64
42
43
 
43
44
  bw bfs.Writer
44
45
  cw io.WriteCloser // compression writer
@@ -100,7 +101,7 @@ func (w *Writer) Encode(v interface{}) error {
100
101
  }
101
102
 
102
103
  // NumWritten returns the number of written values.
103
- func (w *Writer) NumWritten() int {
104
+ func (w *Writer) NumWritten() int64 {
104
105
  return w.num
105
106
  }
106
107
 
@@ -109,7 +110,7 @@ func (w *Writer) Discard() error {
109
110
  err := w.close()
110
111
  if w.bw != nil {
111
112
  if e := w.bw.Discard(); e != nil {
112
- err = e
113
+ err = errors.Join(err, e)
113
114
  }
114
115
  }
115
116
  return err
@@ -120,7 +121,7 @@ func (w *Writer) Commit() error {
120
121
  err := w.close()
121
122
  if w.bw != nil {
122
123
  if e := w.bw.Commit(); e != nil {
123
- err = e
124
+ err = errors.Join(err, e)
124
125
  }
125
126
  }
126
127
  return err
@@ -129,17 +130,17 @@ func (w *Writer) Commit() error {
129
130
  func (w *Writer) close() (err error) {
130
131
  if w.fe != nil {
131
132
  if e := w.fe.Close(); e != nil {
132
- err = e
133
+ err = errors.Join(err, e)
133
134
  }
134
135
  }
135
136
  if w.ww != nil {
136
137
  if e := w.ww.Flush(); e != nil {
137
- err = e
138
+ err = errors.Join(err, e)
138
139
  }
139
140
  }
140
141
  if w.cw != nil {
141
142
  if e := w.cw.Close(); e != nil {
142
- err = e
143
+ err = errors.Join(err, e)
143
144
  }
144
145
  }
145
146
  return err
@@ -147,9 +148,8 @@ func (w *Writer) close() (err error) {
147
148
 
148
149
  func (w *Writer) ensureCreated() error {
149
150
  if w.bw == nil {
150
- ts := timestampFromTime(w.opt.LastMod)
151
151
  bw, err := w.remote.Create(w.ctx, &bfs.WriteOptions{
152
- Metadata: bfs.Metadata{metaLastModified: ts.String()},
152
+ Metadata: bfs.Metadata{metaVersion: strconv.FormatInt(w.opt.Version, 10)},
153
153
  })
154
154
  if err != nil {
155
155
  return err