liquidsoap-prettier 1.8.2 → 1.8.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/check-formatting.yml +32 -0
- package/README.md +31 -5
- package/package.json +1 -1
- package/src/cli.js +104 -9
- package/tests/liq/audio.liq +460 -0
- package/tests/liq/autocue.liq +1081 -0
- package/tests/liq/clock.liq +14 -0
- package/tests/liq/cron.liq +74 -0
- package/tests/liq/error.liq +48 -0
- package/tests/liq/extra/audio.liq +677 -0
- package/tests/liq/extra/audioscrobbler.liq +482 -0
- package/tests/liq/extra/deprecations.liq +976 -0
- package/tests/liq/extra/externals.liq +196 -0
- package/tests/liq/extra/fades.liq +260 -0
- package/tests/liq/extra/file.liq +66 -0
- package/tests/liq/extra/http.liq +160 -0
- package/tests/liq/extra/interactive.liq +917 -0
- package/tests/liq/extra/metadata.liq +75 -0
- package/tests/liq/extra/native.liq +201 -0
- package/tests/liq/extra/openai.liq +150 -0
- package/tests/liq/extra/server.liq +177 -0
- package/tests/liq/extra/source.liq +476 -0
- package/tests/liq/extra/spinitron.liq +272 -0
- package/tests/liq/extra/telnet.liq +266 -0
- package/tests/liq/extra/video.liq +59 -0
- package/tests/liq/extra/visualization.liq +68 -0
- package/tests/liq/fades.liq +941 -0
- package/tests/liq/ffmpeg.liq +605 -0
- package/tests/liq/file.liq +387 -0
- package/tests/liq/getter.liq +74 -0
- package/tests/liq/hls.liq +329 -0
- package/tests/liq/http.liq +1048 -0
- package/tests/liq/http_codes.liq +447 -0
- package/tests/liq/icecast.liq +58 -0
- package/tests/liq/io.liq +106 -0
- package/tests/liq/liquidsoap.liq +31 -0
- package/tests/liq/list.liq +440 -0
- package/tests/liq/log.liq +47 -0
- package/tests/liq/lufs.liq +295 -0
- package/tests/liq/math.liq +23 -0
- package/tests/liq/medialib.liq +752 -0
- package/tests/liq/metadata.liq +253 -0
- package/tests/liq/nfo.liq +258 -0
- package/tests/liq/null.liq +71 -0
- package/tests/liq/playlist.liq +1347 -0
- package/tests/liq/predicate.liq +106 -0
- package/tests/liq/process.liq +93 -0
- package/tests/liq/profiler.liq +5 -0
- package/tests/liq/protocols.liq +1139 -0
- package/tests/liq/ref.liq +28 -0
- package/tests/liq/replaygain.liq +135 -0
- package/tests/liq/request.liq +467 -0
- package/tests/liq/resolvers.liq +33 -0
- package/tests/liq/runtime.liq +70 -0
- package/tests/liq/server.liq +99 -0
- package/tests/liq/settings.liq +41 -0
- package/tests/liq/socket.liq +33 -0
- package/tests/liq/source.liq +362 -0
- package/tests/liq/sqlite.liq +161 -0
- package/tests/liq/stdlib.liq +172 -0
- package/tests/liq/string.liq +476 -0
- package/tests/liq/switches.liq +197 -0
- package/tests/liq/testing.liq +37 -0
- package/tests/liq/thread.liq +161 -0
- package/tests/liq/tracks.liq +100 -0
- package/tests/liq/utils.liq +81 -0
- package/tests/liq/video.liq +918 -0
|
@@ -0,0 +1,605 @@
|
|
|
1
|
+
%ifdef track.ffmpeg.raw.encode.audio
|
|
2
|
+
let ffmpeg.encode = ()
|
|
3
|
+
let ffmpeg.raw.encode = ()
|
|
4
|
+
|
|
5
|
+
# Encode a source's audio content
|
|
6
|
+
# @category Source / Conversion
|
|
7
|
+
def ffmpeg.encode.audio(~id=null("ffmpeg.encode.audio"), f, s) =
|
|
8
|
+
audio = track.ffmpeg.encode.audio(f, source.tracks(s).audio)
|
|
9
|
+
source(
|
|
10
|
+
id=id,
|
|
11
|
+
{
|
|
12
|
+
track_marks = track.track_marks(audio),
|
|
13
|
+
metadata = track.metadata(audio),
|
|
14
|
+
audio = audio
|
|
15
|
+
}
|
|
16
|
+
)
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
# Encode a source's audio content
|
|
20
|
+
# @category Source / Conversion
|
|
21
|
+
def ffmpeg.raw.encode.audio(~id=null("ffmpeg.raw.encode.audio"), f, s) =
|
|
22
|
+
audio = track.ffmpeg.raw.encode.audio(f, source.tracks(s).audio)
|
|
23
|
+
source(
|
|
24
|
+
id=id,
|
|
25
|
+
{
|
|
26
|
+
track_marks = track.track_marks(audio),
|
|
27
|
+
metadata = track.metadata(audio),
|
|
28
|
+
audio = audio
|
|
29
|
+
}
|
|
30
|
+
)
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
# Encode a source's video content
|
|
34
|
+
# @category Source / Conversion
|
|
35
|
+
def ffmpeg.encode.video(~id=null("ffmpeg.encode.video"), f, s) =
|
|
36
|
+
video = track.ffmpeg.encode.video(f, source.tracks(s).video)
|
|
37
|
+
source(
|
|
38
|
+
id=id,
|
|
39
|
+
{
|
|
40
|
+
track_marks = track.track_marks(video),
|
|
41
|
+
metadata = track.metadata(video),
|
|
42
|
+
video = video
|
|
43
|
+
}
|
|
44
|
+
)
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
# Encode a source's video content
|
|
48
|
+
# @category Source / Conversion
|
|
49
|
+
def ffmpeg.raw.encode.video(~id=null("ffmpeg.raw.encode.video"), f, s) =
|
|
50
|
+
video = track.ffmpeg.raw.encode.video(f, source.tracks(s).video)
|
|
51
|
+
source(
|
|
52
|
+
id=id,
|
|
53
|
+
{
|
|
54
|
+
track_marks = track.track_marks(video),
|
|
55
|
+
metadata = track.metadata(video),
|
|
56
|
+
video = video
|
|
57
|
+
}
|
|
58
|
+
)
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
# Encode a source's audio and video content
|
|
62
|
+
# @category Source / Conversion
|
|
63
|
+
def ffmpeg.encode.audio_video(~id=null("ffmpeg.encode.audio_video"), f, s) =
|
|
64
|
+
let {audio, video} = source.tracks(s)
|
|
65
|
+
audio = track.ffmpeg.encode.audio(f, audio)
|
|
66
|
+
video = track.ffmpeg.encode.video(f, video)
|
|
67
|
+
source(
|
|
68
|
+
id=id,
|
|
69
|
+
{
|
|
70
|
+
track_marks = track.track_marks(audio),
|
|
71
|
+
metadata = track.metadata(audio),
|
|
72
|
+
audio = audio,
|
|
73
|
+
video = video
|
|
74
|
+
}
|
|
75
|
+
)
|
|
76
|
+
end
|
|
77
|
+
|
|
78
|
+
# Encode a source's audio and video content
|
|
79
|
+
# @category Source / Conversion
|
|
80
|
+
def ffmpeg.raw.encode.audio_video(
|
|
81
|
+
~id=null("ffmpeg.raw.encode.audio_video"),
|
|
82
|
+
f,
|
|
83
|
+
s
|
|
84
|
+
) =
|
|
85
|
+
let {audio, video} = source.tracks(s)
|
|
86
|
+
audio = track.ffmpeg.raw.encode.audio(f, audio)
|
|
87
|
+
video = track.ffmpeg.raw.encode.video(f, video)
|
|
88
|
+
source(
|
|
89
|
+
id=id,
|
|
90
|
+
{
|
|
91
|
+
track_marks = track.track_marks(audio),
|
|
92
|
+
metadata = track.metadata(audio),
|
|
93
|
+
audio = audio,
|
|
94
|
+
video = video
|
|
95
|
+
}
|
|
96
|
+
)
|
|
97
|
+
end
|
|
98
|
+
%endif
|
|
99
|
+
|
|
100
|
+
%ifdef track.ffmpeg.decode.audio
|
|
101
|
+
let ffmpeg.decode = ()
|
|
102
|
+
let ffmpeg.raw.decode = ()
|
|
103
|
+
|
|
104
|
+
# Decode a source's audio content
|
|
105
|
+
# @category Source / Conversion
|
|
106
|
+
def ffmpeg.decode.audio(~id=null("ffmpeg.decode.audio"), s) =
|
|
107
|
+
audio = track.ffmpeg.decode.audio(source.tracks(s).audio)
|
|
108
|
+
source(
|
|
109
|
+
id=id,
|
|
110
|
+
{
|
|
111
|
+
track_marks = track.track_marks(audio),
|
|
112
|
+
metadata = track.metadata(audio),
|
|
113
|
+
audio = audio
|
|
114
|
+
}
|
|
115
|
+
)
|
|
116
|
+
end
|
|
117
|
+
|
|
118
|
+
# Decode a source's audio content
|
|
119
|
+
# @category Source / Conversion
|
|
120
|
+
def ffmpeg.raw.decode.audio(~id=null("ffmpeg.raw.decode.audio"), s) =
|
|
121
|
+
audio = track.ffmpeg.raw.decode.audio(source.tracks(s).audio)
|
|
122
|
+
source(
|
|
123
|
+
id=id,
|
|
124
|
+
{
|
|
125
|
+
track_marks = track.track_marks(audio),
|
|
126
|
+
metadata = track.metadata(audio),
|
|
127
|
+
audio = audio
|
|
128
|
+
}
|
|
129
|
+
)
|
|
130
|
+
end
|
|
131
|
+
|
|
132
|
+
# Decode a source's video content
|
|
133
|
+
# @category Source / Conversion
|
|
134
|
+
def ffmpeg.decode.video(~id=null("ffmpeg.decode.video"), s) =
|
|
135
|
+
video = track.ffmpeg.decode.video(source.tracks(s).video)
|
|
136
|
+
source(
|
|
137
|
+
id=id,
|
|
138
|
+
{
|
|
139
|
+
track_marks = track.track_marks(video),
|
|
140
|
+
metadata = track.metadata(video),
|
|
141
|
+
video = video
|
|
142
|
+
}
|
|
143
|
+
)
|
|
144
|
+
end
|
|
145
|
+
|
|
146
|
+
# Decode a source's video content
|
|
147
|
+
# @category Source / Conversion
|
|
148
|
+
def ffmpeg.raw.decode.video(~id=null("ffmpeg.raw.decode.video"), s) =
|
|
149
|
+
video = track.ffmpeg.raw.decode.video(source.tracks(s).video)
|
|
150
|
+
source(
|
|
151
|
+
id=id,
|
|
152
|
+
{
|
|
153
|
+
track_marks = track.track_marks(video),
|
|
154
|
+
metadata = track.metadata(video),
|
|
155
|
+
video = video
|
|
156
|
+
}
|
|
157
|
+
)
|
|
158
|
+
end
|
|
159
|
+
|
|
160
|
+
# Decode a source's audio and video content
|
|
161
|
+
# @category Source / Conversion
|
|
162
|
+
def ffmpeg.decode.audio_video(~id=null("ffmpeg.decode.audio_video"), s) =
|
|
163
|
+
let {audio, video} = source.tracks(s)
|
|
164
|
+
audio = track.ffmpeg.decode.audio(audio)
|
|
165
|
+
video = track.ffmpeg.decode.video(video)
|
|
166
|
+
source(
|
|
167
|
+
id=id,
|
|
168
|
+
{
|
|
169
|
+
track_marks = track.track_marks(audio),
|
|
170
|
+
metadata = track.metadata(audio),
|
|
171
|
+
audio = audio,
|
|
172
|
+
video = video
|
|
173
|
+
}
|
|
174
|
+
)
|
|
175
|
+
end
|
|
176
|
+
|
|
177
|
+
# Decode a source's audio and video content
|
|
178
|
+
# @category Source / Conversion
|
|
179
|
+
def ffmpeg.raw.decode.audio_video(
|
|
180
|
+
~id=null("ffmpeg.raw.decode.audio_video"),
|
|
181
|
+
s
|
|
182
|
+
) =
|
|
183
|
+
let {audio, video} = source.tracks(s)
|
|
184
|
+
audio = track.ffmpeg.raw.decode.audio(audio)
|
|
185
|
+
video = track.ffmpeg.raw.decode.video(video)
|
|
186
|
+
source(
|
|
187
|
+
id=id,
|
|
188
|
+
{
|
|
189
|
+
track_marks = track.track_marks(audio),
|
|
190
|
+
metadata = track.metadata(audio),
|
|
191
|
+
audio = audio,
|
|
192
|
+
video = video
|
|
193
|
+
}
|
|
194
|
+
)
|
|
195
|
+
end
|
|
196
|
+
%endif
|
|
197
|
+
|
|
198
|
+
%ifdef input.ffmpeg
|
|
199
|
+
# Stream from a video4linux2 input device, such as a webcam.
|
|
200
|
+
# @category Source / Input / Active
|
|
201
|
+
# @param ~id Force the value of the source ID.
|
|
202
|
+
# @param ~max_buffer Maximum data buffer in seconds
|
|
203
|
+
# @param ~device V4L2 device to use.
|
|
204
|
+
def input.v4l2(~id=null, ~max_buffer=0.5, ~device="/dev/video0") =
|
|
205
|
+
(input.ffmpeg(id=id, format="v4l2", max_buffer=max_buffer, device) :
|
|
206
|
+
source(video=canvas)
|
|
207
|
+
)
|
|
208
|
+
end
|
|
209
|
+
|
|
210
|
+
# A test video source, which generates various patterns.
|
|
211
|
+
# @category Source / Video processing
|
|
212
|
+
# @param ~pattern Pattern drawn in the video: `"testsrc"`, `"testsrc2"`, `"smptebars"`, `"pal75bars"`, `"pal100bars"`, `"smptehdbars"`, `"yuvtestsrc"` or `"rgbtestsrc"` and more. Support any of the patterns supported by `ffmpeg`.
|
|
213
|
+
# @param ~max_buffer Maximum data buffer in seconds
|
|
214
|
+
# @param ~duration Duration of the source.
|
|
215
|
+
def video.testsrc.ffmpeg(
|
|
216
|
+
~id=null,
|
|
217
|
+
~pattern="testsrc",
|
|
218
|
+
~max_buffer=0.5,
|
|
219
|
+
~duration=null
|
|
220
|
+
) =
|
|
221
|
+
size = "size=#{settings.frame.video.width()}x#{settings.frame.video.height()}"
|
|
222
|
+
rate = "rate=#{settings.frame.video.framerate()}"
|
|
223
|
+
duration = if null.defined(duration) then ":duration=#{duration}" else "" end
|
|
224
|
+
src = "#{pattern}=#{size}:#{rate}#{duration}"
|
|
225
|
+
(input.ffmpeg(id=id, max_buffer=max_buffer, format="lavfi", src) :
|
|
226
|
+
source(video=canvas)
|
|
227
|
+
)
|
|
228
|
+
end
|
|
229
|
+
|
|
230
|
+
# Read an RTMP stream.
|
|
231
|
+
# @category Source / Input / Active
|
|
232
|
+
# @param ~max_buffer Maximum data buffer in seconds
|
|
233
|
+
# @param ~listen Act as a RTMP server and wait for incoming connection
|
|
234
|
+
# @param url URL to read RTMP from, in the form `rtmp://IP:PORT/ENDPOINT`
|
|
235
|
+
def input.rtmp(~id=null, ~max_buffer=5., ~listen=true, url) =
|
|
236
|
+
input.ffmpeg(
|
|
237
|
+
id=id,
|
|
238
|
+
max_buffer=max_buffer,
|
|
239
|
+
format="live_flv",
|
|
240
|
+
self_sync=true,
|
|
241
|
+
int_args=[("listen", listen ? 1 : 0)],
|
|
242
|
+
url
|
|
243
|
+
)
|
|
244
|
+
end
|
|
245
|
+
%endif
|
|
246
|
+
|
|
247
|
+
%ifdef ffmpeg.filter.drawtext
|
|
248
|
+
let video.add_text.ffmpeg = ()
|
|
249
|
+
let video.add_text.ffmpeg.raw = ()
|
|
250
|
+
|
|
251
|
+
# Display a text. Use this operator inside ffmpeg filters with a ffmpeg video input
|
|
252
|
+
# Returns a ffmpeg video output with `on_change` and `on_metadata` methods to be used
|
|
253
|
+
# to update the output text.
|
|
254
|
+
# @category Source / Video processing
|
|
255
|
+
# @param ~color Text color (in 0xRRGGBB format).
|
|
256
|
+
# @param ~cycle Cycle text when it reaches left boundary.
|
|
257
|
+
# @param ~font Path to ttf font file.
|
|
258
|
+
# @param ~metadata Change text on a particular metadata (empty string means disabled).
|
|
259
|
+
# @param ~size Font size.
|
|
260
|
+
# @param ~speed Horizontal speed in pixels per second (0 means no scrolling and update \
|
|
261
|
+
# according to x and y in case they are variable).
|
|
262
|
+
# @param ~graph a ffmpeg filter graph to attach this filter to.
|
|
263
|
+
# @param ~x x offset.
|
|
264
|
+
# @param ~y y offset.
|
|
265
|
+
# @params Text to display.
|
|
266
|
+
# @method on_change Method to call when parameters have changed to update the filter's rendered out, including when text changes.
|
|
267
|
+
# @method on_metadata Method to call on new metadata.
|
|
268
|
+
def video.add_text.ffmpeg.raw.filter(
|
|
269
|
+
~color=0xffffff,
|
|
270
|
+
~cycle=true,
|
|
271
|
+
~font=null,
|
|
272
|
+
~metadata=null,
|
|
273
|
+
~size=18,
|
|
274
|
+
~speed=70,
|
|
275
|
+
~x=getter(10),
|
|
276
|
+
~y=getter(10),
|
|
277
|
+
~graph,
|
|
278
|
+
d=getter(""),
|
|
279
|
+
s
|
|
280
|
+
) =
|
|
281
|
+
color = "0x" ^ string.hex_of_int(pad=6, color)
|
|
282
|
+
x =
|
|
283
|
+
if
|
|
284
|
+
speed != 0
|
|
285
|
+
then
|
|
286
|
+
last_time = ref(time())
|
|
287
|
+
changed = getter.changes(x)
|
|
288
|
+
effective_x = ref(getter.get(x))
|
|
289
|
+
getter(
|
|
290
|
+
{
|
|
291
|
+
begin
|
|
292
|
+
cur_time = time()
|
|
293
|
+
traveled_to = int(float(speed) * (cur_time - last_time()))
|
|
294
|
+
last_time := cur_time
|
|
295
|
+
if
|
|
296
|
+
changed()
|
|
297
|
+
then
|
|
298
|
+
effective_x := getter.get(x)
|
|
299
|
+
else
|
|
300
|
+
effective_x := effective_x() - traveled_to
|
|
301
|
+
end
|
|
302
|
+
|
|
303
|
+
if
|
|
304
|
+
effective_x() < 0
|
|
305
|
+
then
|
|
306
|
+
effective_x := settings.frame.video.width() - effective_x()
|
|
307
|
+
end
|
|
308
|
+
|
|
309
|
+
effective_x()
|
|
310
|
+
end
|
|
311
|
+
}
|
|
312
|
+
)
|
|
313
|
+
else
|
|
314
|
+
x
|
|
315
|
+
end
|
|
316
|
+
|
|
317
|
+
filter =
|
|
318
|
+
ffmpeg.filter.drawtext.create(
|
|
319
|
+
fontfile=font,
|
|
320
|
+
fontsize="#{size}",
|
|
321
|
+
x="#{getter.get(x)}",
|
|
322
|
+
y="#{getter.get(y)}",
|
|
323
|
+
fontcolor=color,
|
|
324
|
+
text=getter.get(d),
|
|
325
|
+
graph
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
def escape =
|
|
329
|
+
def special_char(~encoding:_, s) =
|
|
330
|
+
string.contains(substring=s, "(',%,\\,:,{,})")
|
|
331
|
+
end
|
|
332
|
+
|
|
333
|
+
def escape_char(~encoding:_, s) =
|
|
334
|
+
"\\#{s}"
|
|
335
|
+
end
|
|
336
|
+
|
|
337
|
+
fun (s) ->
|
|
338
|
+
string.escape(special_char=special_char, escape_char=escape_char, s)
|
|
339
|
+
end
|
|
340
|
+
|
|
341
|
+
def escaped_text() =
|
|
342
|
+
escape(getter.get(d))
|
|
343
|
+
end
|
|
344
|
+
|
|
345
|
+
filters =
|
|
346
|
+
[
|
|
347
|
+
{
|
|
348
|
+
args =
|
|
349
|
+
getter(
|
|
350
|
+
{"x=#{getter.get(x)}:y=#{getter.get(y)}:text=#{escaped_text()}"}
|
|
351
|
+
),
|
|
352
|
+
filter = filter
|
|
353
|
+
}
|
|
354
|
+
]
|
|
355
|
+
|
|
356
|
+
filters =
|
|
357
|
+
if
|
|
358
|
+
cycle
|
|
359
|
+
then
|
|
360
|
+
x = getter({"min(#{getter.get(x)}-w,#{getter.get(x)}-text_w)"})
|
|
361
|
+
[
|
|
362
|
+
...filters,
|
|
363
|
+
{
|
|
364
|
+
args =
|
|
365
|
+
getter(
|
|
366
|
+
{"x=#{getter.get(x)}:y=#{getter.get(y)}:text=#{escaped_text()}"}
|
|
367
|
+
),
|
|
368
|
+
filter =
|
|
369
|
+
ffmpeg.filter.drawtext.create(
|
|
370
|
+
fontfile=font,
|
|
371
|
+
fontsize="#{size}",
|
|
372
|
+
x="#{getter.get(x)}",
|
|
373
|
+
y="#{getter.get(y)}",
|
|
374
|
+
fontcolor=color,
|
|
375
|
+
text=getter.get(d),
|
|
376
|
+
graph
|
|
377
|
+
)
|
|
378
|
+
}
|
|
379
|
+
]
|
|
380
|
+
else
|
|
381
|
+
filters
|
|
382
|
+
end
|
|
383
|
+
|
|
384
|
+
changed =
|
|
385
|
+
getter.changes(getter({(getter.get(x), getter.get(y), getter.get(d))}))
|
|
386
|
+
|
|
387
|
+
def on_change() =
|
|
388
|
+
ignore(getter.get(x))
|
|
389
|
+
ignore(getter.get(y))
|
|
390
|
+
ignore(getter.get(d))
|
|
391
|
+
if
|
|
392
|
+
changed()
|
|
393
|
+
then
|
|
394
|
+
list.iter(
|
|
395
|
+
(
|
|
396
|
+
fun (el) ->
|
|
397
|
+
ignore(el.filter.process_command("reinit", getter.get(el.args)))
|
|
398
|
+
),
|
|
399
|
+
filters
|
|
400
|
+
)
|
|
401
|
+
end
|
|
402
|
+
end
|
|
403
|
+
|
|
404
|
+
def on_metadata(m) =
|
|
405
|
+
if
|
|
406
|
+
null.defined(metadata)
|
|
407
|
+
then
|
|
408
|
+
meta = (null.get(metadata) : string)
|
|
409
|
+
d = escape(m[meta])
|
|
410
|
+
if
|
|
411
|
+
d != ""
|
|
412
|
+
then
|
|
413
|
+
log(
|
|
414
|
+
level=3,
|
|
415
|
+
label="ffmpeg.filter.drawtext",
|
|
416
|
+
"Setting new text #{d} from metadata #{meta}"
|
|
417
|
+
)
|
|
418
|
+
|
|
419
|
+
list.iter(
|
|
420
|
+
(
|
|
421
|
+
fun (el) -> ignore(el.filter.process_command("reinit", "text=#{d}"))
|
|
422
|
+
),
|
|
423
|
+
filters
|
|
424
|
+
)
|
|
425
|
+
end
|
|
426
|
+
end
|
|
427
|
+
end
|
|
428
|
+
|
|
429
|
+
v =
|
|
430
|
+
list.fold(
|
|
431
|
+
(
|
|
432
|
+
fun (cur, el) ->
|
|
433
|
+
begin
|
|
434
|
+
el.filter.set_input(cur)
|
|
435
|
+
el.filter.output
|
|
436
|
+
end
|
|
437
|
+
),
|
|
438
|
+
s,
|
|
439
|
+
filters
|
|
440
|
+
)
|
|
441
|
+
|
|
442
|
+
v.{on_change = on_change, on_metadata = on_metadata}
|
|
443
|
+
end
|
|
444
|
+
|
|
445
|
+
# Display a text. Use this operator inside ffmpeg filters with a input source
|
|
446
|
+
# @category Source / Video processing
|
|
447
|
+
# @param ~color Text color (in 0xRRGGBB format).
|
|
448
|
+
# @param ~cycle Cycle text when it reaches left boundary.
|
|
449
|
+
# @param ~font Path to ttf font file.
|
|
450
|
+
# @param ~metadata Change text on a particular metadata (empty string means disabled).
|
|
451
|
+
# @param ~size Font size.
|
|
452
|
+
# @param ~speed Horizontal speed in pixels per second (0 means no scrolling and update \
|
|
453
|
+
# according to x and y in case they are variable).
|
|
454
|
+
# @param ~graph a ffmpeg filter graph to attach this filter to.
|
|
455
|
+
# @param ~x x offset.
|
|
456
|
+
# @param ~y y offset.
|
|
457
|
+
# @params Text to display.
|
|
458
|
+
def replaces video.add_text.ffmpeg.raw(
|
|
459
|
+
~color=0xffffff,
|
|
460
|
+
~cycle=true,
|
|
461
|
+
~font=null,
|
|
462
|
+
~metadata=null,
|
|
463
|
+
~size=18,
|
|
464
|
+
~speed=70,
|
|
465
|
+
~x=getter(10),
|
|
466
|
+
~y=getter(10),
|
|
467
|
+
~graph,
|
|
468
|
+
d=getter(""),
|
|
469
|
+
s
|
|
470
|
+
) =
|
|
471
|
+
on_frame = ref(fun () -> ())
|
|
472
|
+
on_metadata = ref(fun (_) -> ())
|
|
473
|
+
s.on_metadata(
|
|
474
|
+
synchronous=true,
|
|
475
|
+
fun (m) ->
|
|
476
|
+
begin
|
|
477
|
+
fn = on_metadata()
|
|
478
|
+
fn(m)
|
|
479
|
+
end
|
|
480
|
+
)
|
|
481
|
+
|
|
482
|
+
s.on_frame(
|
|
483
|
+
synchronous=true,
|
|
484
|
+
fun () ->
|
|
485
|
+
begin
|
|
486
|
+
fn = on_frame()
|
|
487
|
+
fn()
|
|
488
|
+
end
|
|
489
|
+
)
|
|
490
|
+
|
|
491
|
+
s = ffmpeg.filter.video.input(graph, source.tracks(s).video)
|
|
492
|
+
v =
|
|
493
|
+
video.add_text.ffmpeg.raw.filter(
|
|
494
|
+
color=color,
|
|
495
|
+
cycle=cycle,
|
|
496
|
+
font=font,
|
|
497
|
+
metadata=metadata,
|
|
498
|
+
size=size,
|
|
499
|
+
speed=speed,
|
|
500
|
+
x=x,
|
|
501
|
+
y=y,
|
|
502
|
+
graph=graph,
|
|
503
|
+
d,
|
|
504
|
+
s
|
|
505
|
+
)
|
|
506
|
+
|
|
507
|
+
on_frame := v.on_change
|
|
508
|
+
on_metadata := v.on_metadata
|
|
509
|
+
ffmpeg.filter.null(graph, v)
|
|
510
|
+
end
|
|
511
|
+
|
|
512
|
+
def video.add_text.ffmpeg.internal(
|
|
513
|
+
~id=null,
|
|
514
|
+
~color=0xffffff,
|
|
515
|
+
~cycle=true,
|
|
516
|
+
~font=null,
|
|
517
|
+
~duration=null,
|
|
518
|
+
~metadata=null,
|
|
519
|
+
~size=18,
|
|
520
|
+
~speed=70,
|
|
521
|
+
~x=getter(10),
|
|
522
|
+
~y=getter(10),
|
|
523
|
+
d,
|
|
524
|
+
s
|
|
525
|
+
) =
|
|
526
|
+
id = string.id.default(default="video.add_text.ffmpeg", id)
|
|
527
|
+
s = ffmpeg.raw.encode.audio_video(%ffmpeg(%audio.raw, %video.raw), s)
|
|
528
|
+
|
|
529
|
+
def mkfilter(graph) =
|
|
530
|
+
v =
|
|
531
|
+
video.add_text.ffmpeg.raw(
|
|
532
|
+
color=color,
|
|
533
|
+
cycle=cycle,
|
|
534
|
+
font=font,
|
|
535
|
+
metadata=metadata,
|
|
536
|
+
size=size,
|
|
537
|
+
speed=speed,
|
|
538
|
+
x=x,
|
|
539
|
+
y=y,
|
|
540
|
+
graph=graph,
|
|
541
|
+
d,
|
|
542
|
+
s
|
|
543
|
+
)
|
|
544
|
+
|
|
545
|
+
v = ffmpeg.filter.video.output(graph, v)
|
|
546
|
+
|
|
547
|
+
a = ffmpeg.filter.audio.input(graph, source.tracks(s).audio)
|
|
548
|
+
a = ffmpeg.filter.acopy(graph, a)
|
|
549
|
+
a = ffmpeg.filter.audio.output(graph, a)
|
|
550
|
+
|
|
551
|
+
source(
|
|
552
|
+
id=id,
|
|
553
|
+
{
|
|
554
|
+
audio = a,
|
|
555
|
+
video = v,
|
|
556
|
+
metadata = track.metadata(a),
|
|
557
|
+
track_marks = track.track_marks(a)
|
|
558
|
+
}
|
|
559
|
+
)
|
|
560
|
+
end
|
|
561
|
+
|
|
562
|
+
s = ffmpeg.filter.create(mkfilter)
|
|
563
|
+
s = ffmpeg.raw.decode.audio_video(s)
|
|
564
|
+
null.defined(duration) ? max_duration(null.get(duration), s) : s
|
|
565
|
+
end
|
|
566
|
+
|
|
567
|
+
# Display a text.
|
|
568
|
+
# @category Source / Video processing
|
|
569
|
+
# @param ~id Force the value of the source ID.
|
|
570
|
+
# @param ~color Text color (in 0xRRGGBB format).
|
|
571
|
+
# @param ~cycle Cycle text when it reaches left boundary.
|
|
572
|
+
# @param ~font Path to ttf font file.
|
|
573
|
+
# @param ~metadata Change text on a particular metadata (empty string means disabled).
|
|
574
|
+
# @param ~size Font size.
|
|
575
|
+
# @param ~speed Horizontal speed in pixels per second (0 means no scrolling and update \
|
|
576
|
+
# according to x and y in case they are variable).
|
|
577
|
+
# @param ~x x offset.
|
|
578
|
+
# @param ~y y offset.
|
|
579
|
+
# @params Text to display.
|
|
580
|
+
def replaces video.add_text.ffmpeg(
|
|
581
|
+
%argsof(video.add_text.ffmpeg.internal),
|
|
582
|
+
d,
|
|
583
|
+
s
|
|
584
|
+
) =
|
|
585
|
+
video.add_text.ffmpeg.internal(%argsof(video.add_text.ffmpeg.internal), d, s)
|
|
586
|
+
end
|
|
587
|
+
%endif
|
|
588
|
+
|
|
589
|
+
# video.add_text.available := [("ffmpeg", video.add_text.ffmpeg.internal), ...video.add_text.available()]
|
|
590
|
+
|
|
591
|
+
# if settings.video.add_text() != "sdl" then
|
|
592
|
+
# settings.video.add_text.set("ffmpeg")
|
|
593
|
+
# end
|
|
594
|
+
%ifdef ffmpeg.filter.video.output
|
|
595
|
+
let ffmpeg.filter.audio_video = ()
|
|
596
|
+
|
|
597
|
+
# Return a source with audio and video from a filter's output.
|
|
598
|
+
# @category Source / Output
|
|
599
|
+
# @param id Force the value of the source ID.
|
|
600
|
+
def ffmpeg.filter.audio_video.output(~id=null, graph, audio, video) =
|
|
601
|
+
audio = ffmpeg.filter.audio.output(graph, audio.tracks().audio)
|
|
602
|
+
video = ffmpeg.filter.video.output(graph, video.tracks().video)
|
|
603
|
+
source(id=id, {audio = audio, video = video})
|
|
604
|
+
end
|
|
605
|
+
%endif
|