rays-video 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.doc/ext/rays-video/native.cpp +17 -0
- data/.doc/ext/rays-video/video.cpp +257 -0
- data/.github/PULL_REQUEST_TEMPLATE.md +12 -0
- data/.github/workflows/release-gem.yml +51 -0
- data/.github/workflows/tag.yml +35 -0
- data/.github/workflows/test.yml +37 -0
- data/.github/workflows/utils.rb +127 -0
- data/CONTRIBUTING.md +7 -0
- data/ChangeLog.md +19 -0
- data/Gemfile +5 -0
- data/LICENSE +21 -0
- data/README.md +147 -0
- data/Rakefile +25 -0
- data/VERSION +1 -0
- data/ext/rays-video/defs.h +17 -0
- data/ext/rays-video/extconf.rb +23 -0
- data/ext/rays-video/native.cpp +17 -0
- data/ext/rays-video/video.cpp +282 -0
- data/include/rays/video.h +102 -0
- data/include/rays-video/ruby/video.h +47 -0
- data/include/rays-video/ruby.h +10 -0
- data/include/rays-video.h +10 -0
- data/lib/rays/video.rb +45 -0
- data/lib/rays-video/ext.rb +1 -0
- data/lib/rays-video/extension.rb +41 -0
- data/lib/rays-video.rb +3 -0
- data/rays-video.gemspec +39 -0
- data/src/ios/video.mm +633 -0
- data/src/ios/video_audio_in.h +22 -0
- data/src/ios/video_audio_in.mm +252 -0
- data/src/osx/video.mm +633 -0
- data/src/osx/video_audio_in.h +22 -0
- data/src/osx/video_audio_in.mm +252 -0
- data/src/sdl/video.cpp +86 -0
- data/src/sdl/video_audio_in.cpp +63 -0
- data/src/video.cpp +278 -0
- data/src/video.h +50 -0
- data/src/video_audio_in.h +57 -0
- data/src/win32/video.cpp +86 -0
- data/src/win32/video_audio_in.cpp +63 -0
- data/test/helper.rb +15 -0
- data/test/test_video.rb +165 -0
- metadata +145 -0
|
@@ -0,0 +1,252 @@
|
|
|
1
|
+
// -*- mode: objc -*-
|
|
2
|
+
#import "video_audio_in.h"
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
#import <AVFoundation/AVFoundation.h>
|
|
6
|
+
#include "rays/exception.h"
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
namespace Rays
|
|
10
|
+
{
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
struct VideoAudioIn::Data
|
|
14
|
+
{
|
|
15
|
+
|
|
16
|
+
AVAsset* asset = nil;
|
|
17
|
+
|
|
18
|
+
AVAssetTrack* audio_track = nil;
|
|
19
|
+
|
|
20
|
+
uint nsamples = 0;
|
|
21
|
+
|
|
22
|
+
Beeps::Signals buffer;
|
|
23
|
+
|
|
24
|
+
uint buffer_offset = 0;
|
|
25
|
+
|
|
26
|
+
AVAssetReader* reader = nil;
|
|
27
|
+
|
|
28
|
+
AVAssetReaderAudioMixOutput* output = nil;
|
|
29
|
+
|
|
30
|
+
Data (AVAsset* asset, AVAssetTrack* audio_track)
|
|
31
|
+
{
|
|
32
|
+
assert(asset && audio_track);
|
|
33
|
+
|
|
34
|
+
CMFormatDescriptionRef format =
|
|
35
|
+
(__bridge CMFormatDescriptionRef) audio_track.formatDescriptions.firstObject;
|
|
36
|
+
if (!format)
|
|
37
|
+
rays_error(__FILE__, __LINE__, "failed to get CMFormatDescription");
|
|
38
|
+
|
|
39
|
+
const AudioStreamBasicDescription* desc =
|
|
40
|
+
CMAudioFormatDescriptionGetStreamBasicDescription(format);
|
|
41
|
+
if (!desc)
|
|
42
|
+
rays_error(__FILE__, __LINE__, "failed to get AudioStreamBasicDescription");
|
|
43
|
+
|
|
44
|
+
this->asset = [asset retain];
|
|
45
|
+
this->audio_track = [audio_track retain];
|
|
46
|
+
uint nchannels = std::min<uint>(desc->mChannelsPerFrame, 2);
|
|
47
|
+
this->buffer = Beeps::Signals(2048, nchannels, desc->mSampleRate);
|
|
48
|
+
double duration = CMTimeGetSeconds(audio_track.timeRange.duration);
|
|
49
|
+
this->nsamples = (uint) (duration * buffer.sample_rate());
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
~Data ()
|
|
53
|
+
{
|
|
54
|
+
clear_reader();
|
|
55
|
+
[audio_track release];
|
|
56
|
+
[asset release];
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
void create_reader (uint offset)
|
|
60
|
+
{
|
|
61
|
+
clear_reader();
|
|
62
|
+
|
|
63
|
+
NSError* error = nil;
|
|
64
|
+
AVAssetReader* reader =
|
|
65
|
+
[[[AVAssetReader alloc] initWithAsset: asset error: &error] autorelease];
|
|
66
|
+
if (!reader || error)
|
|
67
|
+
rays_error(__FILE__, __LINE__, "failed to create AVAssetReader");
|
|
68
|
+
|
|
69
|
+
AVAssetReaderAudioMixOutput* output = [AVAssetReaderAudioMixOutput
|
|
70
|
+
assetReaderAudioMixOutputWithAudioTracks: @[audio_track]
|
|
71
|
+
audioSettings: @{
|
|
72
|
+
AVFormatIDKey: @(kAudioFormatLinearPCM),
|
|
73
|
+
AVLinearPCMBitDepthKey: @32,
|
|
74
|
+
AVLinearPCMIsFloatKey: @YES,
|
|
75
|
+
AVLinearPCMIsNonInterleaved: @YES,
|
|
76
|
+
AVNumberOfChannelsKey: @(buffer.nchannels()),
|
|
77
|
+
}];
|
|
78
|
+
if (![reader canAddOutput: output])
|
|
79
|
+
rays_error(__FILE__, __LINE__, "cannot add audio output");
|
|
80
|
+
|
|
81
|
+
[reader addOutput: output];
|
|
82
|
+
reader.timeRange = CMTimeRangeMake(
|
|
83
|
+
CMTimeMakeWithSeconds(
|
|
84
|
+
(double) offset / buffer.sample_rate(),
|
|
85
|
+
(int32_t) buffer.sample_rate()),
|
|
86
|
+
kCMTimePositiveInfinity);
|
|
87
|
+
if (![reader startReading])
|
|
88
|
+
rays_error(__FILE__, __LINE__, "failed to start reading audio");
|
|
89
|
+
|
|
90
|
+
this->reader = [reader retain];
|
|
91
|
+
this->output = [output retain];
|
|
92
|
+
this->buffer_offset = offset;
|
|
93
|
+
this->buffer .clear();
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
void clear_reader ()
|
|
97
|
+
{
|
|
98
|
+
if (reader && reader.status == AVAssetReaderStatusReading)
|
|
99
|
+
[reader cancelReading];
|
|
100
|
+
|
|
101
|
+
[output release];
|
|
102
|
+
output = nil;
|
|
103
|
+
[reader release];
|
|
104
|
+
reader = nil;
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
bool read_next (Beeps::Signals* signals, uint* offset)
|
|
108
|
+
{
|
|
109
|
+
assert(signals && offset);
|
|
110
|
+
|
|
111
|
+
if (
|
|
112
|
+
!reader ||
|
|
113
|
+
*offset < buffer_offset ||
|
|
114
|
+
*offset > buffer_offset + buffer.nsamples())
|
|
115
|
+
{
|
|
116
|
+
create_reader(*offset);
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
if (*offset == buffer_offset + buffer.nsamples())
|
|
120
|
+
{
|
|
121
|
+
if (!read_next_buffer()) return false;
|
|
122
|
+
buffer_offset = *offset;
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
uint size = signals->append(buffer, *offset - buffer_offset);
|
|
126
|
+
*offset += size;
|
|
127
|
+
return size > 0;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
bool read_next_buffer ()
|
|
131
|
+
{
|
|
132
|
+
if (!reader || !output || reader.status != AVAssetReaderStatusReading)
|
|
133
|
+
return false;
|
|
134
|
+
|
|
135
|
+
std::shared_ptr<opaqueCMSampleBuffer> samples(
|
|
136
|
+
[output copyNextSampleBuffer],
|
|
137
|
+
CFRelease);
|
|
138
|
+
if (!samples)
|
|
139
|
+
return false;
|
|
140
|
+
|
|
141
|
+
uint nsamples = (uint) CMSampleBufferGetNumSamples(samples.get());
|
|
142
|
+
if (nsamples <= 0)
|
|
143
|
+
return false;
|
|
144
|
+
|
|
145
|
+
CMBlockBufferRef block = CMSampleBufferGetDataBuffer(samples.get());
|
|
146
|
+
if (!block)
|
|
147
|
+
return false;
|
|
148
|
+
|
|
149
|
+
size_t size = CMBlockBufferGetDataLength(block);
|
|
150
|
+
char* data = NULL;
|
|
151
|
+
CMBlockBufferGetDataPointer(block, 0, NULL, &size, &data);
|
|
152
|
+
if (!data || size <= 0)
|
|
153
|
+
rays_error(__FILE__, __LINE__);
|
|
154
|
+
|
|
155
|
+
CMFormatDescriptionRef format =
|
|
156
|
+
CMSampleBufferGetFormatDescription(samples.get());
|
|
157
|
+
if (!format)
|
|
158
|
+
rays_error(__FILE__, __LINE__);
|
|
159
|
+
|
|
160
|
+
const AudioStreamBasicDescription* desc =
|
|
161
|
+
CMAudioFormatDescriptionGetStreamBasicDescription(format);
|
|
162
|
+
if (!desc)
|
|
163
|
+
rays_error(__FILE__, __LINE__);
|
|
164
|
+
|
|
165
|
+
uint nchannels = desc->mChannelsPerFrame;
|
|
166
|
+
if (nchannels != buffer.nchannels())
|
|
167
|
+
rays_error(__FILE__, __LINE__);
|
|
168
|
+
|
|
169
|
+
std::vector<const float*> channels(nchannels);
|
|
170
|
+
for (uint ch = 0; ch < nchannels; ++ch)
|
|
171
|
+
channels[ch] = (const float*) data + ch * nsamples;
|
|
172
|
+
|
|
173
|
+
buffer.clear(nsamples);
|
|
174
|
+
buffer.append(channels.data(), nsamples, nchannels, buffer.sample_rate());
|
|
175
|
+
return buffer.nsamples() > 0;
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
};// VideoAudioIn::Data
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
VideoAudioIn::Data*
|
|
182
|
+
VideoAudioIn_Data_create (AVAsset* asset, AVAssetTrack* audio_track)
|
|
183
|
+
{
|
|
184
|
+
return new VideoAudioIn::Data(asset, audio_track);
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
VideoAudioIn::VideoAudioIn (Data* data)
|
|
189
|
+
: self(data)
|
|
190
|
+
{
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
VideoAudioIn::~VideoAudioIn ()
|
|
194
|
+
{
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
double
|
|
198
|
+
VideoAudioIn::sample_rate () const
|
|
199
|
+
{
|
|
200
|
+
return self->buffer.sample_rate();
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
uint
|
|
204
|
+
VideoAudioIn::nchannels () const
|
|
205
|
+
{
|
|
206
|
+
return self->buffer.nchannels();
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
uint
|
|
210
|
+
VideoAudioIn::nsamples () const
|
|
211
|
+
{
|
|
212
|
+
return self->nsamples;
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
float
|
|
216
|
+
VideoAudioIn::seconds () const
|
|
217
|
+
{
|
|
218
|
+
if (this->sample_rate() <= 0)
|
|
219
|
+
return 0;
|
|
220
|
+
|
|
221
|
+
return (float) (self->nsamples / this->sample_rate());
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
VideoAudioIn::operator bool () const
|
|
225
|
+
{
|
|
226
|
+
return
|
|
227
|
+
Super::operator bool() &&
|
|
228
|
+
self->asset &&
|
|
229
|
+
self->audio_track &&
|
|
230
|
+
self->buffer;
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
bool
|
|
234
|
+
VideoAudioIn::seekable () const
|
|
235
|
+
{
|
|
236
|
+
return true;
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
void
|
|
240
|
+
VideoAudioIn::generate (Context* context, Beeps::Signals* signals, uint* offset)
|
|
241
|
+
{
|
|
242
|
+
Super::generate(context, signals, offset);
|
|
243
|
+
|
|
244
|
+
while (!signals->full())
|
|
245
|
+
{
|
|
246
|
+
if (!self->read_next(signals, offset))
|
|
247
|
+
break;
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
}// Rays
|