earl 0.3.0 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.github/workflows/ruby-tests.yml +32 -0
- data/.gitignore +5 -0
- data/.rubocop.yml +35 -0
- data/.rubocop_todo.yml +22 -0
- data/.ruby-gemset +1 -0
- data/.ruby-version +1 -0
- data/Gemfile +13 -1
- data/Guardfile +15 -0
- data/LICENSE +2 -2
- data/README.md +127 -25
- data/Rakefile +10 -2
- data/earl.gemspec +19 -14
- data/lib/earl/earl.rb +172 -0
- data/lib/earl/scraper.rb +92 -0
- data/lib/earl/version.rb +4 -2
- data/lib/earl.rb +11 -20
- data/spec/fixtures/bicycles.html +490 -0
- data/spec/fixtures/bicycles_without_description.html +489 -0
- data/spec/fixtures/bicycles_without_images.html +457 -0
- data/spec/fixtures/cassettes/feed/is_atom_feed.yml +2298 -0
- data/spec/fixtures/cassettes/feed/is_rss_feed.yml +48 -0
- data/spec/fixtures/cassettes/feed/no_feed.yml +69 -0
- data/spec/fixtures/cassettes/feed/with_atom_and_rss_feed.yml +1471 -0
- data/spec/fixtures/cassettes/feed/with_rss_feed.yml +47 -0
- data/spec/fixtures/cassettes/oembed/no_oembed.yml +101 -0
- data/spec/fixtures/cassettes/oembed/youtube_oembed.yml +129 -0
- data/spec/fixtures/page_as_atom.html +161 -0
- data/spec/fixtures/page_as_rss.html +151 -0
- data/spec/fixtures/page_with_atom_feed.html +39 -0
- data/spec/fixtures/page_with_rss_and_atom_feeds.html +40 -0
- data/spec/fixtures/page_with_rss_feed.html +39 -0
- data/spec/fixtures/page_without_feeds.html +36 -0
- data/spec/fixtures/youtube.html +1839 -0
- data/spec/integration/feed_spec.rb +78 -0
- data/spec/integration/oembed_spec.rb +36 -0
- data/spec/spec_helper.rb +21 -29
- data/spec/support/fixtures.rb +15 -0
- data/spec/support/vcr.rb +9 -0
- data/spec/unit/earl/earl_spec.rb +15 -0
- data/spec/unit/earl/feed_spec.rb +62 -0
- data/spec/unit/earl/oembed_spec.rb +50 -0
- data/spec/unit/earl/scraper_spec.rb +49 -0
- data/spec/unit/earl_spec.rb +74 -0
- metadata +90 -62
- data/.rvmrc +0 -48
- data/lib/earl/email_assembler.rb +0 -11
- data/lib/earl/email_entity.rb +0 -27
- data/lib/earl/email_parser.tt +0 -58
- data/lib/earl/entity_base.rb +0 -37
- data/lib/earl/hash_inquirer.rb +0 -16
- data/lib/earl/string_inquirer.rb +0 -11
- data/lib/earl/url_assembler.rb +0 -15
- data/lib/earl/url_entity.rb +0 -23
- data/lib/earl/url_parser.tt +0 -163
- data/spec/earl/earl_spec.rb +0 -17
- data/spec/earl/email_entity_spec.rb +0 -31
- data/spec/earl/email_parser_spec.rb +0 -29
- data/spec/earl/entity_base_spec.rb +0 -39
- data/spec/earl/hash_inquirer_spec.rb +0 -24
- data/spec/earl/string_inquirer_spec.rb +0 -9
- data/spec/earl/url_entity_spec.rb +0 -45
- data/spec/earl/url_parser_spec.rb +0 -189
@@ -0,0 +1,2298 @@
|
|
1
|
+
---
|
2
|
+
http_interactions:
|
3
|
+
- request:
|
4
|
+
method: get
|
5
|
+
uri: https://0xfe.blogspot.com/feeds/posts/default
|
6
|
+
body:
|
7
|
+
encoding: US-ASCII
|
8
|
+
string: ''
|
9
|
+
headers:
|
10
|
+
Accept-Encoding:
|
11
|
+
- gzip;q=1.0,deflate;q=0.6,identity;q=0.3
|
12
|
+
Accept:
|
13
|
+
- "*/*"
|
14
|
+
User-Agent:
|
15
|
+
- Ruby
|
16
|
+
response:
|
17
|
+
status:
|
18
|
+
code: 200
|
19
|
+
message: OK
|
20
|
+
headers:
|
21
|
+
Cross-Origin-Opener-Policy-Report-Only:
|
22
|
+
- same-origin; report-to=coop_reporting
|
23
|
+
Report-To:
|
24
|
+
- '{"group":"blogger-renderd","max_age":2592000,"endpoints":[{"url":"https://csp.withgoogle.com/csp/report-to/httpsserver2/blogger-renderd"}]}'
|
25
|
+
Content-Security-Policy-Report-Only:
|
26
|
+
- script-src 'none';form-action 'none';frame-src 'none'; report-uri https://csp.withgoogle.com/csp/httpsserver2/blogger-renderd
|
27
|
+
Cross-Origin-Resource-Policy:
|
28
|
+
- cross-origin
|
29
|
+
Server:
|
30
|
+
- blogger-renderd
|
31
|
+
X-Content-Type-Options:
|
32
|
+
- nosniff
|
33
|
+
X-Xss-Protection:
|
34
|
+
- '0'
|
35
|
+
Content-Length:
|
36
|
+
- '242401'
|
37
|
+
X-Frame-Options:
|
38
|
+
- SAMEORIGIN
|
39
|
+
Date:
|
40
|
+
- Tue, 29 Jul 2025 09:55:38 GMT
|
41
|
+
Expires:
|
42
|
+
- Tue, 29 Jul 2025 09:55:39 GMT
|
43
|
+
Cache-Control:
|
44
|
+
- public, must-revalidate, proxy-revalidate, max-age=1
|
45
|
+
Last-Modified:
|
46
|
+
- Mon, 09 Jun 2025 12:31:35 GMT
|
47
|
+
Etag:
|
48
|
+
- W/"25062ffaf404d65f81e2ad9745e540e9e672dadd034afd042f1aeba8f35ccb2b"
|
49
|
+
Content-Type:
|
50
|
+
- application/atom+xml; charset=UTF-8
|
51
|
+
Age:
|
52
|
+
- '0'
|
53
|
+
Alt-Svc:
|
54
|
+
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
55
|
+
body:
|
56
|
+
encoding: ASCII-8BIT
|
57
|
+
string: "<?xml version='1.0' encoding='UTF-8'?><?xml-stylesheet href=\"http://www.blogger.com/styles/atom.css\"
|
58
|
+
type=\"text/css\"?><feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
|
59
|
+
xmlns:blogger='http://schemas.google.com/blogger/2008' xmlns:georss='http://www.georss.org/georss'
|
60
|
+
xmlns:gd=\"http://schemas.google.com/g/2005\" xmlns:thr='http://purl.org/syndication/thread/1.0'><id>tag:blogger.com,1999:blog-19544619</id><updated>2025-06-09T08:31:35.340-04:00</updated><category
|
61
|
+
term=\"vexflow\"/><category term=\"haskell\"/><category term=\"vim\"/><category
|
62
|
+
term=\"webaudio\"/><title type='text'>0xFE - 11111110b - 0376</title><subtitle
|
63
|
+
type='html'></subtitle><link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml'
|
64
|
+
href='https://0xfe.blogspot.com/feeds/posts/default'/><link rel='self' type='application/atom+xml'
|
65
|
+
href='https://www.blogger.com/feeds/19544619/posts/default'/><link rel='alternate'
|
66
|
+
type='text/html' href='https://0xfe.blogspot.com/'/><link rel='hub' href='http://pubsubhubbub.appspot.com/'/><link
|
67
|
+
rel='next' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default?start-index=26&max-results=25'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
68
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><generator
|
69
|
+
version='7.00' uri='http://www.blogger.com'>Blogger</generator><openSearch:totalResults>63</openSearch:totalResults><openSearch:startIndex>1</openSearch:startIndex><openSearch:itemsPerPage>25</openSearch:itemsPerPage><entry><id>tag:blogger.com,1999:blog-19544619.post-1927090407427133388</id><published>2023-12-22T13:33:00.000-05:00</published><updated>2023-12-22T13:33:16.974-05:00</updated><title
|
70
|
+
type='text'>The Firewall Guy</title><content type='html'><p>About 20
|
71
|
+
years ago, I worked as an independent consultant at a major telecom. I wrote
|
72
|
+
a program that did some stuff, and couldn't get it to communicate on the
|
73
|
+
network.</p><p>Developers there didn't get root on a machine,
|
74
|
+
and there was limited access to tooling, so I reached out to the sysadmin.
|
75
|
+
A day later he came back to me and said, "everything here's good,
|
76
|
+
check with the firewall guy."</p><p>The firewall guy. It
|
77
|
+
took me a day to track down the firewall guy.</p><p>"Umm...
|
78
|
+
Mr. Firewall guy, can you help me out with this problem?"</p><p>Firewall
|
79
|
+
guy was kinda grumpy. He begrudgingly collected some information, begrudgingly
|
80
|
+
looked at stuff. "Firewall's fine. Talk to the network guy."</p><p>Two
|
81
|
+
days later, I found the network guy, who was nicer: "Network's good.
|
82
|
+
Talk to the firewall guy."</p><p>"I did a couple of
|
83
|
+
days ago. Firewall guy says everything's fine."</p><p>"Okay,
|
84
|
+
then you're probably resolving to the wrong addresses. Talk to the DNS
|
85
|
+
guy."</p><p>It took me many days to find the DNS guy. DNS
|
86
|
+
guy left six months ago. Firewall guy was now also DNS guy.</p><p>I
|
87
|
+
did not like Firewall guy, but I didn't have a choice: "So Mr. Firewall
|
88
|
+
guy, Network guy says it might be a DNS issue."</p><p>"You
|
89
|
+
again. It definitely not a Firewall or DNS problem."</p><p>At
|
90
|
+
this point I was kinda stuck. I was accountable for delivering something that
|
91
|
+
works, but had no agency to actually deliver the thing.</p><p>As
|
92
|
+
I was thinking about what to do next, Firewall guy called me on my desk phone:
|
93
|
+
"try your thing again." I tried my thing again. "Thanks very
|
94
|
+
much, that worked! What was wrong?"</p><p>"It's
|
95
|
+
complicated. &lt;hangs up&gt;" I took a peek and saw the DNS
|
96
|
+
entries didn't change. It was probably the firewall.</p><p>It
|
97
|
+
took me about two days to build the thing, and eight days to navigate their
|
98
|
+
bureaucracy. I billed them for ten days of my time, and apologized for taking
|
99
|
+
so long.</p><p>The manager replied back right away: "Oh that
|
100
|
+
was quick! No need to apologize. Our regular consulting firm estimated 2 people
|
101
|
+
3 months. Can you come by next week for your next project?"</p></content><link
|
102
|
+
rel='replies' type='application/atom+xml' href='https://0xfe.blogspot.com/feeds/1927090407427133388/comments/default'
|
103
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2023/12/the-firewall-guy.html#comment-form'
|
104
|
+
title='0 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/1927090407427133388'/><link
|
105
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/1927090407427133388'/><link
|
106
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2023/12/the-firewall-guy.html'
|
107
|
+
title='The Firewall Guy'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
108
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-1309421684262886131</id><published>2020-03-02T14:14:00.000-05:00</published><updated>2020-03-02T14:27:52.942-05:00</updated><title
|
109
|
+
type='text'>Generating Spectrograms with Neural Networks</title><content type='html'><div
|
110
|
+
dir="ltr" style="text-align: left;" trbidi="on">\nIn
|
111
|
+
a previous experiments, I used <i><a href="https://en.wikipedia.org/wiki/Spectrogram">spectrograms</a></i>&nbsp;instead
|
112
|
+
of raw audio as inputs to neural networks, while training them to recognize
|
113
|
+
pitches, intervals, and chords.<br />\n<br />\nI found that feeding
|
114
|
+
the networks raw audio data got nowhere. Training was extremely slow, and
|
115
|
+
losses seemed to be bounded at unacceptably high values. After switching to
|
116
|
+
spectrograms, the networks started learning almost immediately -- it was quite
|
117
|
+
remarkable!<br />\n<br />\nThis post is about <i><b>generating</b></i>
|
118
|
+
spectrograms with neural networks.<br />\n<br />\n<table align="center"
|
119
|
+
cellpadding="0" cellspacing="0" class="tr-caption-container"
|
120
|
+
style="margin-left: auto; margin-right: auto; text-align: center;"><tbody>\n<tr><td
|
121
|
+
style="text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhDM2rOgj22MvXc9tLVrlD3jey8LAzjuXVUJljzhyPgaqEOluN1Fmtta9Iubv_X3rmQHWTv6y236myxVY_Hv-XBEeb3zkSPVbTYSxoW3Gk18XsOf-o9e9nEUzKDmwLaj4K6McaXEg/s1600/Screen+Shot+2020-03-02+at+11.17.26+AM.png"
|
122
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
123
|
+
border="0" data-original-height="684" data-original-width="1316"
|
124
|
+
height="332" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhDM2rOgj22MvXc9tLVrlD3jey8LAzjuXVUJljzhyPgaqEOluN1Fmtta9Iubv_X3rmQHWTv6y236myxVY_Hv-XBEeb3zkSPVbTYSxoW3Gk18XsOf-o9e9nEUzKDmwLaj4K6McaXEg/s640/Screen+Shot+2020-03-02+at+11.17.26+AM.png"
|
125
|
+
width="640" /></a></td></tr>\n<tr><td
|
126
|
+
class="tr-caption" style="text-align: center;">These
|
127
|
+
spectrograms were generated by a Neural Network</td></tr>\n</tbody></table>\n<br
|
128
|
+
/>\n<h3 style="text-align: left;">\nOn Spectrograms</h3>\n<div>\n<br
|
129
|
+
/></div>\nSpectrograms are 2-dimensional visual representations of
|
130
|
+
slices of audio (or really, any signal.) On the <i>x-axis</i>
|
131
|
+
of a spectrogram is time, and on the <i>y-axis</i> is frequency.<br
|
132
|
+
/>\n<br />\n<table align="center" cellpadding="0"
|
133
|
+
cellspacing="0" class="tr-caption-container" style="margin-left:
|
134
|
+
auto; margin-right: auto; text-align: center;"><tbody>\n<tr><td
|
135
|
+
style="text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEggIrcRsL735uyROVhGVIftc7Yg8a_RMVsl50-0JS39fjJM50Oe8bemSs_009R_DDH-MYpc_8J3-T0V51mTAS5-ae785UKd4li91y_VWNg3BzNL8sZAv_3y_fXYB4uEp6esiYdJNg/s1600/Screen+Shot+2020-02-22+at+9.15.03+PM.png"
|
136
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
137
|
+
border="0" data-original-height="526" data-original-width="820"
|
138
|
+
height="256" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEggIrcRsL735uyROVhGVIftc7Yg8a_RMVsl50-0JS39fjJM50Oe8bemSs_009R_DDH-MYpc_8J3-T0V51mTAS5-ae785UKd4li91y_VWNg3BzNL8sZAv_3y_fXYB4uEp6esiYdJNg/s400/Screen+Shot+2020-02-22+at+9.15.03+PM.png"
|
139
|
+
width="400" /></a></td></tr>\n<tr><td
|
140
|
+
class="tr-caption" style="text-align: center;">A Violin
|
141
|
+
playing A4 (440hz)</td></tr>\n</tbody></table>\n<br
|
142
|
+
/>\nBecause the data is correlated well along both dimensions, spectrograms
|
143
|
+
lend themselves well to both human analysis and convolutional neural networks.<br
|
144
|
+
/>\n<br />\nSo, I wondered, why can't the networks learn the
|
145
|
+
spectrograms themselves? Under the covers, spectrograms are built with <a
|
146
|
+
href="https://en.wikipedia.org/wiki/Short-time_Fourier_transform">STFTs</a>,
|
147
|
+
which are entirely linear operations on data -- you slide a window over the
|
148
|
+
data at some stride length, then perform a discrete Fourier transform to get
|
149
|
+
the frequency components of the window.<br />\n<br />\nSince the
|
150
|
+
transformation is entirely linear, all you need is one network layer, no activations,
|
151
|
+
no biases. This should theoretically collapse down to a simple regression
|
152
|
+
problem. Right? Let's find out.<br />\n<br />\n<h3 style="text-align:
|
153
|
+
left;">\nGenerating Training Data</h3>\n<div>\n<br
|
154
|
+
/></div>\nWe start by synthesizing some training data. To keep things
|
155
|
+
simple, let's assume that we want to generate spectrograms of 25ms of
|
156
|
+
audio sampled at 8khz, which is 2000 samples. Round up (in binary) to 2048
|
157
|
+
to make things GPU friendly.<br />\n<br />\nThe underlying <a
|
158
|
+
href="https://en.wikipedia.org/wiki/Short-time_Fourier_transform">STFT</a>
|
159
|
+
will use a <a href="https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows">hanning
|
160
|
+
window</a> of size 256, <a href="https://en.wikipedia.org/wiki/Fast_Fourier_transform">FFT</a>
|
161
|
+
size of 256, and a stride length of 250, producing <b>33x129</b>
|
162
|
+
images. That's 33 frequency-domain slices (along the time axis) capped
|
163
|
+
at <a href="https://en.wikipedia.org/wiki/Nyquist_frequency">128hz</a>.<br
|
164
|
+
/>\n<br />\n<script src="https://gist.github.com/0xfe/08204e72217ce29f06ac4c1ba7dd4d39.js"></script>\n\nNote
|
165
|
+
that the spectrograms return complex values. We want to make sure the networks
|
166
|
+
can learn to completely reconstruct both the magnitude and phase portions
|
167
|
+
of the signal. Also note that we're going to teach our network how to
|
168
|
+
compute hanning windows.<br />\n<br />\nHere's the code to
|
169
|
+
generate the training data -- we calculate <i>batch_size</i> (15,000)
|
170
|
+
examples, each with 2048 samples and assign them to <i>xs</i>.
|
171
|
+
We then calculate their spectrograms and assign them to <i>ys</i>
|
172
|
+
(the targets.)<br />\n<br />\n<script src="https://gist.github.com/0xfe/0da873c4ee32ff1bbb5d22b9f0b49817.js"></script>\n\nNote
|
173
|
+
that we separate the real and imaginary components of the spectrogram and
|
174
|
+
simply stack one atop the other. We also don't scale or normalize the
|
175
|
+
data in any way. <b>Let the network figure all that out! :-)</b><br
|
176
|
+
/>\n<br />\n<h3 style="text-align: left;">\nBuilding
|
177
|
+
the Model</h3>\n<br />\nNow the fun part. We build a single-layer
|
178
|
+
network with <i>2048</i> inputs for the audio slice, and <i><b>row
|
179
|
+
* col</b></i> outputs for the image (<b><i>times two</i></b>
|
180
|
+
to hold the real and imaginary components of the outputs.) Since the outputs
|
181
|
+
are strictly a linear function of the inputs, we don't need a bias term
|
182
|
+
or activation functions.<br />\n<br />\n<script src="https://gist.github.com/0xfe/bd8db7b878cbef048543e58d850bab67.js"></script>\n\n<br
|
183
|
+
/>\nAgain, this is really just linear regression. <i>With, oh, about
|
184
|
+
<b>17 million variables!</b></i><br />\n<br />\n<div
|
185
|
+
class="separator" style="clear: both; text-align: center;">\n<a
|
186
|
+
href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEi3_UJQowtAxQB2JF104bcDRY-rslxaO7XVXZWMD-vwkZ1aOrwi2NgneYOrACxPa_V8VtmtlZ-tTbsZH1amO8D_gubfAZJ-5K0KAMOuiNCv-Ze-O8WgPjOhDTCBpPbkMznMSat8zw/s1600/Screen+Shot+2020-03-01+at+8.36.20+AM.png"
|
187
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
188
|
+
border="0" data-original-height="320" data-original-width="1140"
|
189
|
+
height="176" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEi3_UJQowtAxQB2JF104bcDRY-rslxaO7XVXZWMD-vwkZ1aOrwi2NgneYOrACxPa_V8VtmtlZ-tTbsZH1amO8D_gubfAZJ-5K0KAMOuiNCv-Ze-O8WgPjOhDTCBpPbkMznMSat8zw/s640/Screen+Shot+2020-03-01+at+8.36.20+AM.png"
|
190
|
+
width="640" /></a></div>\n<br />\nThis model
|
191
|
+
trains very fast. In 4 epochs (about 80 seconds), the loss drops to 3.0e-08,
|
192
|
+
which is sufficient for our experiments, and in 10 epochs (about 7 minutes),
|
193
|
+
we can drop it all the way to 2.0e-15.<br />\n<br />\n<div
|
194
|
+
class="separator" style="clear: both; text-align: center;">\n<a
|
195
|
+
href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhlCy70eiXOmrryOkFXTIj7wXf3u2krxBTDuG-L_1fquE6l-J9sRGFomBL0dG7-H691XH22nPuQrwt9Fklage-dbLNfxMnllhj2S-w9b2is7ipDtNE5MNuuTLSdSSkfOzTi8mxfTQ/s1600/Screen+Shot+2020-03-02+at+11.34.06+AM.png"
|
196
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
197
|
+
border="0" data-original-height="686" data-original-width="1390"
|
198
|
+
height="313" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhlCy70eiXOmrryOkFXTIj7wXf3u2krxBTDuG-L_1fquE6l-J9sRGFomBL0dG7-H691XH22nPuQrwt9Fklage-dbLNfxMnllhj2S-w9b2is7ipDtNE5MNuuTLSdSSkfOzTi8mxfTQ/s640/Screen+Shot+2020-03-02+at+11.34.06+AM.png"
|
199
|
+
width="640" /></a></div>\n<br />\n<h3 style="text-align:
|
200
|
+
left;">\nThe Real Test</h3>\n<br />\nOur model is ready.
|
201
|
+
Let's see how well this does on unseen data. We generate a slice of audio
|
202
|
+
playing four tones, and compare scipy's spectrogram function with our
|
203
|
+
neural network.<br />\n<br />\n<script src="https://gist.github.com/0xfe/231d285b53f15b0ace08f93934100e6c.js"></script>\n\n<br
|
204
|
+
/>\n<table align="center" cellpadding="0" cellspacing="0"
|
205
|
+
class="tr-caption-container" style="margin-left: auto; margin-right:
|
206
|
+
auto; text-align: center;"><tbody>\n<tr><td style="text-align:
|
207
|
+
center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEg-ASUhhLR9dlj1s5OCAKk0f341ND0VOaDihlVFDCxqF6nYBaQ0kUZA8DdplPiOCe0g7wlQU28KEk75bXY_7cbiK1zz6KHszHnhrit5AWwqaloynohIeJWKBR8Cg6RybvNIvYXF-w/s1600/Screen+Shot+2020-03-01+at+12.28.19+PM.png"
|
208
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
209
|
+
border="0" data-original-height="384" data-original-width="1444"
|
210
|
+
height="170" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEg-ASUhhLR9dlj1s5OCAKk0f341ND0VOaDihlVFDCxqF6nYBaQ0kUZA8DdplPiOCe0g7wlQU28KEk75bXY_7cbiK1zz6KHszHnhrit5AWwqaloynohIeJWKBR8Cg6RybvNIvYXF-w/s640/Screen+Shot+2020-03-01+at+12.28.19+PM.png"
|
211
|
+
width="640" /></a></td></tr>\n<tr><td
|
212
|
+
class="tr-caption" style="text-align: center;">Left:
|
213
|
+
SciPy, Right: Neural Network</td></tr>\n</tbody></table>\n<br
|
214
|
+
/>\nWow, that's actually pretty good, however when we look at a log-scaled
|
215
|
+
version, you can see noise in the network-generated one.<br />\n<br
|
216
|
+
/>\n<script src="https://gist.github.com/0xfe/77110315b05d55bad70d9a20dcc2594e.js"></script>\n\n<br
|
217
|
+
/>\n<table align="center" cellpadding="0" cellspacing="0"
|
218
|
+
class="tr-caption-container" style="margin-left: auto; margin-right:
|
219
|
+
auto; text-align: center;"><tbody>\n<tr><td style="text-align:
|
220
|
+
center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgVkxUewAMYUH4TCma7eMbiYgKrY9dVsgrRaE_4yYizNhSg8sAStsvmBkuPtpakQBwpj_YP2jf6Z7PUsULu37T4nlDA8K1B-yArvWpHbjLoJkA5VXS0uvHvU0hWoGGRjPmenOoegA/s1600/Screen+Shot+2020-03-01+at+12.28.29+PM.png"
|
221
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
222
|
+
border="0" data-original-height="398" data-original-width="1434"
|
223
|
+
height="176" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgVkxUewAMYUH4TCma7eMbiYgKrY9dVsgrRaE_4yYizNhSg8sAStsvmBkuPtpakQBwpj_YP2jf6Z7PUsULu37T4nlDA8K1B-yArvWpHbjLoJkA5VXS0uvHvU0hWoGGRjPmenOoegA/s640/Screen+Shot+2020-03-01+at+12.28.29+PM.png"
|
224
|
+
width="640" /></a></td></tr>\n<tr><td
|
225
|
+
class="tr-caption" style="text-align: center;"><span
|
226
|
+
style="font-size: 12.8px;">Log-scaled spectrogram: Left: SciPy,
|
227
|
+
Right: Neural Network</span></td></tr>\n</tbody></table>\n<br
|
228
|
+
/>\nMaybe we can train it for a bit longer and try again.<br />\n<br
|
229
|
+
/>\n<table align="center" cellpadding="0" cellspacing="0"
|
230
|
+
class="tr-caption-container" style="margin-left: auto; margin-right:
|
231
|
+
auto; text-align: center;"><tbody>\n<tr><td style="text-align:
|
232
|
+
center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhSVrD6v42Jo2FkXPdZBpxpVcmyh3NdobD8pE5hJka6FtshFQBWnI0NoBYZup-be4ME93Jc6r4POBCU4mSuv-s5WHEABdK-5f3pU6SfR0Eo16GawGAG4_G__TumqlC0ZLMJURIJcA/s1600/Screen+Shot+2020-03-01+at+12.34.27+PM.png"
|
233
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
234
|
+
border="0" data-original-height="392" data-original-width="1434"
|
235
|
+
height="174" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhSVrD6v42Jo2FkXPdZBpxpVcmyh3NdobD8pE5hJka6FtshFQBWnI0NoBYZup-be4ME93Jc6r4POBCU4mSuv-s5WHEABdK-5f3pU6SfR0Eo16GawGAG4_G__TumqlC0ZLMJURIJcA/s640/Screen+Shot+2020-03-01+at+12.34.27+PM.png"
|
236
|
+
width="640" /></a></td></tr>\n<tr><td
|
237
|
+
class="tr-caption" style="text-align: center;">Left:
|
238
|
+
SciPy, Right: Neural Network</td></tr>\n</tbody></table>\n<br
|
239
|
+
/>\nOh yeah, that's way better!<br />\n<br />\n<h3 style="text-align:
|
240
|
+
left;">\nPeeking into the Model</h3>\n<br />\nOkay, so
|
241
|
+
we know that this works pretty well. It's worth taking a little time to
|
242
|
+
dig in and see what exactly it learned. The best way to do this is by slicing
|
243
|
+
through the layers and examining the weight matrices.<br />\n<br
|
244
|
+
/>\n<script src="https://gist.github.com/0xfe/af81df7faaae90a217e2a48b5251b63a.js"></script>\n\n<br
|
245
|
+
/>\nLucky for us there's just one layer with (2048, 8514) weights.
|
246
|
+
The second dimension (8154) is just the flattened spectrogram for each sample
|
247
|
+
in the first. In the code above, we reshaped and transformed the data to make
|
248
|
+
it easy to visualize.<br />\n<br />\nHere it is below -- the weight
|
249
|
+
maps for the first, 11th, 21st, and 31st slices (out of 33) of the output.<br
|
250
|
+
/>\n<br />\n<div class="separator" style="clear:
|
251
|
+
both; text-align: center;">\n<a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiRV8G03H9-Xzc1giGrnxzWeq7O2NzAsJelVTkuFl6E0rHdYSBimnTcVrYDKZ8xKDb8nZA-eqqa6XYL7s9ImXVqElW_NNZnXP2721C9aaBDGws7FE_uv02rPmAgn9Az8FElCDtjyw/s1600/Screen+Shot+2020-03-02+at+11.43.05+AM.png"
|
252
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
253
|
+
border="0" data-original-height="836" data-original-width="1444"
|
254
|
+
height="370" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiRV8G03H9-Xzc1giGrnxzWeq7O2NzAsJelVTkuFl6E0rHdYSBimnTcVrYDKZ8xKDb8nZA-eqqa6XYL7s9ImXVqElW_NNZnXP2721C9aaBDGws7FE_uv02rPmAgn9Az8FElCDtjyw/s640/Screen+Shot+2020-03-02+at+11.43.05+AM.png"
|
255
|
+
width="640" /></a></div>\n<br />\nThe vertical
|
256
|
+
bands represent activated neurons. You can see how the bands move from left
|
257
|
+
to right as they work on a 256-sample slice of the audio. But more interesting
|
258
|
+
is the spiral pattern of the windows. What's going on there? Let's
|
259
|
+
slice through one of the bands and plot just the inner dimension.<br />\n<br
|
260
|
+
/>\n<script src="https://gist.github.com/0xfe/a119bf4bfbe05eb4dc8e383f9355b37b.js"></script>\n\n<br
|
261
|
+
/>\nThis is actually pretty cool -- each of the graphs below is a Hanning-Windowed
|
262
|
+
sine wave of an integer frequency along each of the vertical bands. These
|
263
|
+
sinusoids are correlated with the audio, one-by-one, to tease out the active
|
264
|
+
frequencies in the slice of audio.<br />\n<br />\n<table align="center"
|
265
|
+
cellpadding="0" cellspacing="0" class="tr-caption-container"
|
266
|
+
style="margin-left: auto; margin-right: auto; text-align: center;"><tbody>\n<tr><td
|
267
|
+
style="text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhuBIsIVk6YKFkLN2Mn_j4yLWxqYNd8UV8W1l-lABrjd_tWEynRNCzQsX5JektI4FL6N0RgTyaEUTZNaILS0FTL1j5SlrxoSR_MydSeln0smNXQdk-7G9aBf3GnQYbMT_4priqZ9g/s1600/Screen+Shot+2020-03-02+at+11.47.28+AM.png"
|
268
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
269
|
+
border="0" data-original-height="999" data-original-width="1600"
|
270
|
+
height="398" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhuBIsIVk6YKFkLN2Mn_j4yLWxqYNd8UV8W1l-lABrjd_tWEynRNCzQsX5JektI4FL6N0RgTyaEUTZNaILS0FTL1j5SlrxoSR_MydSeln0smNXQdk-7G9aBf3GnQYbMT_4priqZ9g/s640/Screen+Shot+2020-03-02+at+11.47.28+AM.png"
|
271
|
+
width="640" /></a></td></tr>\n<tr><td
|
272
|
+
class="tr-caption" style="text-align: center;">1, 5,
|
273
|
+
and 10hz Sine Waves (Windowed)</td></tr>\n</tbody></table>\n<br
|
274
|
+
/>\nTo put it simply, those pretty spirally vertical bands are... <b><i>Fourier
|
275
|
+
Transforms</i></b>!<br />\n<br />\n<h3 style="text-align:
|
276
|
+
left;">\nLearning the Discrete Fourier Transform</h3>\n<br
|
277
|
+
/>\nExploring that network was fun, however we must go deeper. Let's
|
278
|
+
build a quick network to perform a 128-point DFT, without any windowing, and
|
279
|
+
see if there's more we can learn.<br />\n<br />\n<script
|
280
|
+
src="https://gist.github.com/0xfe/5a029d815b9a5f22985933cee1a71562.js"></script>\n\nThis
|
281
|
+
is a much simpler network, with only about 65k weights. It trains very fast,
|
282
|
+
and works like a charm!<br />\n<br />\n<div class="separator"
|
283
|
+
style="clear: both; text-align: center;">\n<a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgOHIjjzVGyiyn58PQvtk4JzHPvx2opmuS_jPt118WaRlYTeIdrR6d1SnT-WiOdSg-XXlEBI4-C-iuSQmhDJd_mB6UDvnmVDQf09Hja9c51BfQ7VD-9o8aM59Rdmk2AlIREqRQmag/s1600/Screen+Shot+2020-03-02+at+1.36.11+PM.png"
|
284
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
285
|
+
border="0" data-original-height="346" data-original-width="1126"
|
286
|
+
height="196" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgOHIjjzVGyiyn58PQvtk4JzHPvx2opmuS_jPt118WaRlYTeIdrR6d1SnT-WiOdSg-XXlEBI4-C-iuSQmhDJd_mB6UDvnmVDQf09Hja9c51BfQ7VD-9o8aM59Rdmk2AlIREqRQmag/s640/Screen+Shot+2020-03-02+at+1.36.11+PM.png"
|
287
|
+
width="640" /></a></div>\n<br />\nDigging into
|
288
|
+
the weights, you can clearly see the complex sinusoids used to calculate the
|
289
|
+
Fourier transform.<br />\n<br />\n<script src="https://gist.github.com/0xfe/89a0c10da18fef15602af5b2d59a66d6.js"></script>\n\n<br
|
290
|
+
/>\n<table align="center" cellpadding="0" cellspacing="0"
|
291
|
+
class="tr-caption-container" style="margin-left: auto; margin-right:
|
292
|
+
auto; text-align: center;"><tbody>\n<tr><td style="text-align:
|
293
|
+
center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEh7-rQnX9r7yc1_mRLb0ffAINnImyrTJAbtAimvFgFb2u5GkTGK-jzfIVcq5NfQVRAlMd5UJaIq8ie42XhyphenhyphenG8dGHEzO2eo3AOnT3SiMUKChTA94ZI5LCfykpHQQbW1w7wrYlq-raA/s1600/Screen+Shot+2020-03-02+at+1.41.30+PM.png"
|
294
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
295
|
+
border="0" data-original-height="1172" data-original-width="1108"
|
296
|
+
height="640" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEh7-rQnX9r7yc1_mRLb0ffAINnImyrTJAbtAimvFgFb2u5GkTGK-jzfIVcq5NfQVRAlMd5UJaIq8ie42XhyphenhyphenG8dGHEzO2eo3AOnT3SiMUKChTA94ZI5LCfykpHQQbW1w7wrYlq-raA/s640/Screen+Shot+2020-03-02+at+1.41.30+PM.png"
|
297
|
+
width="603" /></a></td></tr>\n<tr><td
|
298
|
+
class="tr-caption" style="text-align: center;">Real
|
299
|
+
(blue) and Imaginary (green) Components</td></tr>\n</tbody></table>\n<br
|
300
|
+
/>\nIf you look at the weight matrix as a whole, you see the same pattern
|
301
|
+
we saw in the vertical bands of the spectrogram NN weights.<br />\n<br
|
302
|
+
/>\n<div class="separator" style="clear: both; text-align:
|
303
|
+
center;">\n<a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiMZpKg4uDe5zCWWXae8oWABbYGaVCSm-PeWz6UPu65ZpZ1lYzksc_LKPisXS91Ck8GbR7gKYRwiqyVcDO7fiow1UB28BuTe1FzpVrLBZlgqYCSXECqNaEBLFNuSiI8G6cmKbppLw/s1600/Screen+Shot+2020-03-02+at+1.59.01+PM.png"
|
304
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
305
|
+
border="0" data-original-height="1376" data-original-width="1440"
|
306
|
+
height="610" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiMZpKg4uDe5zCWWXae8oWABbYGaVCSm-PeWz6UPu65ZpZ1lYzksc_LKPisXS91Ck8GbR7gKYRwiqyVcDO7fiow1UB28BuTe1FzpVrLBZlgqYCSXECqNaEBLFNuSiI8G6cmKbppLw/s640/Screen+Shot+2020-03-02+at+1.59.01+PM.png"
|
307
|
+
width="640" /></a></div>\n<div class="separator"
|
308
|
+
style="clear: both; text-align: center;">\n<br /></div>\nThere's
|
309
|
+
a lot more we can explore in these networks, but I should probably end here...
|
310
|
+
this post is getting way too long.<br />\n<br />\n<h3 style="text-align:
|
311
|
+
left;">\nFinal Thoughts</h3>\n<br />\nIt's impressive
|
312
|
+
how well this works, and how quickly this works. The neural networks we trained
|
313
|
+
above are relatively crude, and there are techniques we can explore to optimize
|
314
|
+
them.<br />\n<br />\nFor example, with the spectrogram networks
|
315
|
+
-- instead of having it learn each FFT band independently for each window,
|
316
|
+
we could use a different network architecture (like recurrent networks), or
|
317
|
+
implement some kind of weight sharing strategy across multiple layers.<br
|
318
|
+
/>\n<br />\nEither way, let me be clear: using Neural Networks to
|
319
|
+
perform FFTs or generate spectrograms is <b style="font-style: italic;">completely
|
320
|
+
impractical, </b>and you shouldn't do it. Really, don't do it!
|
321
|
+
It is, however, a great way to explore the guts of machine learning models
|
322
|
+
as they learn to perform complicated tasks.<br />\n<br />\n<br
|
323
|
+
/>\n<br /></div>\n</content><link rel='replies' type='application/atom+xml'
|
324
|
+
href='https://0xfe.blogspot.com/feeds/1309421684262886131/comments/default'
|
325
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2020/03/generating-spectrograms-with-neural.html#comment-form'
|
326
|
+
title='0 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/1309421684262886131'/><link
|
327
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/1309421684262886131'/><link
|
328
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2020/03/generating-spectrograms-with-neural.html'
|
329
|
+
title='Generating Spectrograms with Neural Networks'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
330
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail
|
331
|
+
xmlns:media=\"http://search.yahoo.com/mrss/\" url=\"https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhDM2rOgj22MvXc9tLVrlD3jey8LAzjuXVUJljzhyPgaqEOluN1Fmtta9Iubv_X3rmQHWTv6y236myxVY_Hv-XBEeb3zkSPVbTYSxoW3Gk18XsOf-o9e9nEUzKDmwLaj4K6McaXEg/s72-c/Screen+Shot+2020-03-02+at+11.17.26+AM.png\"
|
332
|
+
height=\"72\" width=\"72\"/><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-6954953458036352732</id><published>2020-02-28T07:44:00.001-05:00</published><updated>2020-02-28T07:44:23.757-05:00</updated><title
|
333
|
+
type='text'>Time Frequency Duality</title><content type='html'><div dir="ltr"
|
334
|
+
style="text-align: left;" trbidi="on">\nAn particularly
|
335
|
+
interesting characteristic of Fourier transforms is <b><i>time-frequency
|
336
|
+
duality.</i></b> This duality exposes a beautiful deep symmetry
|
337
|
+
between the time and frequency domains of a signal.<br />\n<br />\nFor
|
338
|
+
example, a sinusoid in the time domain is an impulse in the frequency domain,
|
339
|
+
<i><b>and vice versa</b></i>.<br />\n<br
|
340
|
+
/>\nHere's what a 1-second 20hz sine wave looks like. If you play this
|
341
|
+
on your audio device, you'll hear a 20hz tone.<br />\n<br />\n<script
|
342
|
+
src="https://gist.github.com/0xfe/736216255e9bbcad5929350a29d1b6b6.js"></script>\n\n<br
|
343
|
+
/>\n<table align="center" cellpadding="0" cellspacing="0"
|
344
|
+
class="tr-caption-container" style="margin-left: auto; margin-right:
|
345
|
+
auto; text-align: center;"><tbody>\n<tr><td style="text-align:
|
346
|
+
center;"><div class="separator" style="clear: both;
|
347
|
+
text-align: center;">\n<br /></div>\n<a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhfuInGS7azSrsPIEHFf8IRLzX092heOhMA-TvyOl4EWhzTGibkGhEUt317yNEu0jSSE0OlKEQFtxeWHcHPF2GeSyRlN2R41kLaE4xGnwG6qJYqFEt1CHzCBKwfWvJvhvhjIlv6Sg/s1600/Screen+Shot+2020-02-27+at+5.24.39+PM.png"
|
348
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
349
|
+
border="0" data-original-height="388" data-original-width="1212"
|
350
|
+
height="204" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhfuInGS7azSrsPIEHFf8IRLzX092heOhMA-TvyOl4EWhzTGibkGhEUt317yNEu0jSSE0OlKEQFtxeWHcHPF2GeSyRlN2R41kLaE4xGnwG6qJYqFEt1CHzCBKwfWvJvhvhjIlv6Sg/s640/Screen+Shot+2020-02-27+at+5.24.39+PM.png"
|
351
|
+
width="640" /></a></td></tr>\n<tr><td
|
352
|
+
class="tr-caption" style="text-align: center;">20hz
|
353
|
+
Sine Wave</td></tr>\n</tbody></table>\n<br />\nWhen
|
354
|
+
you take the Fourier transform of the wave, and plot the frequency domain
|
355
|
+
representation of the signal, you get an impulse in the bin representing the
|
356
|
+
20hz. (Ignore the&nbsp;<a href="https://en.wikipedia.org/wiki/Spectral_leakage">tiny
|
357
|
+
neighbours</a>&nbsp;for now.)<br />\n<br />\n<script
|
358
|
+
src="https://gist.github.com/0xfe/d5b29e9f9b987eeefce1148ec3698b60.js"></script>\n\n<br
|
359
|
+
/>\n<table align="center" cellpadding="0" cellspacing="0"
|
360
|
+
class="tr-caption-container" style="margin-left: auto; margin-right:
|
361
|
+
auto; text-align: center;"><tbody>\n<tr><td style="text-align:
|
362
|
+
center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEi1Bs_O3s1cSwNgTZiRB0dTlIg5GsCMzVK3kCtvwJtNWtpjrmP1FLYNF5mGTLL5EgRHg8j5qvyBSvM0Fi9XTihdyXzNtBPcAsZqkDgOqJt9JnqvKg4UqK_NpVLF32Q-lI-2wNHS8g/s1600/Screen+Shot+2020-02-27+at+5.30.35+PM.png"
|
363
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
364
|
+
border="0" data-original-height="396" data-original-width="1224"
|
365
|
+
height="206" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEi1Bs_O3s1cSwNgTZiRB0dTlIg5GsCMzVK3kCtvwJtNWtpjrmP1FLYNF5mGTLL5EgRHg8j5qvyBSvM0Fi9XTihdyXzNtBPcAsZqkDgOqJt9JnqvKg4UqK_NpVLF32Q-lI-2wNHS8g/s640/Screen+Shot+2020-02-27+at+5.30.35+PM.png"
|
366
|
+
width="640" /></a></td></tr>\n<tr><td
|
367
|
+
class="tr-caption" style="text-align: center;">Frequency
|
368
|
+
Domain of 20hz Sine Wave</td></tr>\n</tbody></table>\n<br
|
369
|
+
/>\nIf you play this transformed representation out to your audio device,
|
370
|
+
you'll hear a click, generated from the single impulse pushing the speaker's
|
371
|
+
diaphragm. This is effectively an impulse signal.<br />\n<br />\nOkay,
|
372
|
+
let's create an impulse signal by hand -- a string of zeros, with a 1
|
373
|
+
somewhere in the middle. Play this on your speaker, and, again, you'll
|
374
|
+
hear a click. This signal is no different from the the previous transformed
|
375
|
+
signal, except for maybe the position of the impulse.<br />\n<br
|
376
|
+
/>\n<script src="https://gist.github.com/0xfe/db8bbd3c1c8635e3b6fb8c2e28c93e20.js"></script>\n\nSo,
|
377
|
+
check this out. If you take the the FFT of the impulse and plot the frequency
|
378
|
+
domain representation, you get... <i><b>a sinusoid</b></i>!<br
|
379
|
+
/>\n<br />\n<script src="https://gist.github.com/0xfe/154b2a1b0be3fd92cdae206b829b38ba.js"></script>\n<a
|
380
|
+
href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhFWi8YhKu_nhS72RW8x_rcI2kuNQ4DqzZxYmd39x0KAtnA9PLrNaGEKSAOcTEuTvDOGnpM28adAauZqi26cMFi8MRJfCZJhVicEagWWkIlJXqP0ssEYHgdpDut5_hTAKckcJazMQ/s1600/Screen+Shot+2020-02-28+at+6.39.56+AM.png"
|
381
|
+
imageanchor="1"><img border="0" data-original-height="392"
|
382
|
+
data-original-width="1208" height="208" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhFWi8YhKu_nhS72RW8x_rcI2kuNQ4DqzZxYmd39x0KAtnA9PLrNaGEKSAOcTEuTvDOGnpM28adAauZqi26cMFi8MRJfCZJhVicEagWWkIlJXqP0ssEYHgdpDut5_hTAKckcJazMQ/s640/Screen+Shot+2020-02-28+at+6.39.56+AM.png"
|
383
|
+
width="640" /></a>\n<br />\n<br />\nThis works
|
384
|
+
both ways. You can the the <b><i>inverse FFT</i></b>&nbsp;of
|
385
|
+
a sine wave in the frequency domain, to produce an impulse in the time domain.<br
|
386
|
+
/>\n<br />\n<script src="https://gist.github.com/0xfe/fe317c6a7357dea6f23d9968e98dea2e.js"></script>\n\n<br
|
387
|
+
/>\n<table align="center" cellpadding="0" cellspacing="0"
|
388
|
+
class="tr-caption-container" style="margin-left: auto; margin-right:
|
389
|
+
auto; text-align: center;"><tbody>\n<tr><td style="text-align:
|
390
|
+
center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgS4gwS_iQX8deUHZMtLPB4Vu4B5OzEvbDx2v8hHRzMbPEFeoEC2Qq6zDb863GrX3a09DYurSEjiAIELYexDHEjVndVw2fgQWVlp3kxft8SM1Cc8A7J0gkvL2rwfCquHr480IqMQQ/s1600/Screen+Shot+2020-02-28+at+6.42.45+AM.png"
|
391
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
392
|
+
border="0" data-original-height="796" data-original-width="1214"
|
393
|
+
height="261" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgS4gwS_iQX8deUHZMtLPB4Vu4B5OzEvbDx2v8hHRzMbPEFeoEC2Qq6zDb863GrX3a09DYurSEjiAIELYexDHEjVndVw2fgQWVlp3kxft8SM1Cc8A7J0gkvL2rwfCquHr480IqMQQ/s400/Screen+Shot+2020-02-28+at+6.42.45+AM.png"
|
394
|
+
width="400" /></a>\n</td></tr>\n<tr><td
|
395
|
+
class="tr-caption" style="text-align: center;">Inverse
|
396
|
+
Fourier Transform of a Sine Wave</td></tr>\n</tbody></table>\n<br
|
397
|
+
/>\nThis is a wonderfully striking phenomenon, which I think reveals a
|
398
|
+
lot about our perception of nature.<br />\n<br />\nFor example,
|
399
|
+
here's another property of time-frequency duality --&nbsp;<a href="https://en.wikipedia.org/wiki/Convolution">convolutions</a>&nbsp;in
|
400
|
+
the time domain are multiplications in the frequency domain, and vice versa<i>.
|
401
|
+
</i>Because <i>multiplications require far fewer operations than
|
402
|
+
convolutions</i>, it's much simpler to operate on frequency domain
|
403
|
+
representations of signals.<br />\n<br />\nYour inner ear consists
|
404
|
+
of lots of tiny hairs that vary in thickness and resonate at different frequencies
|
405
|
+
sending frequency domain representations of sound to your brain -- i.e.,&nbsp;<i>your
|
406
|
+
ear evolved a little DSP chip in</i> it to make it easier on your brain.</div>\n</content><link
|
407
|
+
rel='replies' type='application/atom+xml' href='https://0xfe.blogspot.com/feeds/6954953458036352732/comments/default'
|
408
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2020/02/time-frequency-duality.html#comment-form'
|
409
|
+
title='0 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/6954953458036352732'/><link
|
410
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/6954953458036352732'/><link
|
411
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2020/02/time-frequency-duality.html'
|
412
|
+
title='Time Frequency Duality'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
413
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail
|
414
|
+
xmlns:media=\"http://search.yahoo.com/mrss/\" url=\"https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhfuInGS7azSrsPIEHFf8IRLzX092heOhMA-TvyOl4EWhzTGibkGhEUt317yNEu0jSSE0OlKEQFtxeWHcHPF2GeSyRlN2R41kLaE4xGnwG6qJYqFEt1CHzCBKwfWvJvhvhjIlv6Sg/s72-c/Screen+Shot+2020-02-27+at+5.24.39+PM.png\"
|
415
|
+
height=\"72\" width=\"72\"/><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-1894211342385136711</id><published>2020-02-22T23:11:00.001-05:00</published><updated>2020-02-23T07:50:40.637-05:00</updated><title
|
416
|
+
type='text'>Pitch Detection with Convolutional Networks</title><content type='html'><div
|
417
|
+
dir="ltr" style="text-align: left;" trbidi="on">\nWhile
|
418
|
+
working on <a href="https://pitchy.ninja/">Pitchy Ninja</a>&nbsp;and
|
419
|
+
<a href="https://vexflow.com/">Vexflow</a>, I explored
|
420
|
+
a variety of different techniques for pitch detection that would also work
|
421
|
+
well in a browser. Although, I settled on a relatively well-known algorithm,
|
422
|
+
the exploration took me down an interesting path -- I wondered if you could
|
423
|
+
build neural networks to classify pitches, intervals, and chords in recorded
|
424
|
+
audio.<br />\n<br />\nTurns out the answer is <b><i>yes.&nbsp;</i></b>To
|
425
|
+
all of them.<br />\n<br />\nThis post details some of the techniques
|
426
|
+
I used to build a pitch-detection neural network. Although I focus on single-note
|
427
|
+
pitch estimation, these methods seem to work well for multi-note chords too.<br
|
428
|
+
/>\n<br />\n<h3 style="text-align: left;">\nOn Pitch
|
429
|
+
Estimation</h3>\n<br />\n<a href="https://en.wikipedia.org/wiki/Pitch_detection_algorithm">Pitch
|
430
|
+
detection</a> (also called <i>fundamental frequency estimation</i>)
|
431
|
+
is not an exact science. What your brain perceives as pitch is a function
|
432
|
+
of lots of different variables, from the physical materials that generate
|
433
|
+
the sounds to your body's physiological structure.<br />\n<br
|
434
|
+
/>\nOne would presume that you can simply transform a signal to its frequency
|
435
|
+
domain representation, and look at the peak frequencies. This would work for
|
436
|
+
a sine wave, but as soon as you introduce any kind of timbre (e.g., when you
|
437
|
+
sing, or play a note on a guitar), the spectrum is flooded with <a href="https://en.wikipedia.org/wiki/Overtone">overtones</a>
|
438
|
+
and <a href="https://en.wikipedia.org/wiki/Harmonic">harmonic
|
439
|
+
partials</a>.<br />\n<br />\nHere's a 33ms spectrogram
|
440
|
+
of the note A4 (440hz) played on a piano. You can see a peak at 440hz, and
|
441
|
+
another around 1760hz.<br />\n<br />\n<div class="separator"
|
442
|
+
style="clear: both; text-align: center;">\n<a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhbd4u91YSFmMe3uUXT-tsjfSVygsHoRChlZKlFKRMkN11BNIFJsqrVhRRRxnC7hCeKEmkgiv3OYFjRo395zqpz_0zUBfl9R64ryL8Qdllg22ONbb4raIC5T1zW01BA3nOtzJG7Tw/s1600/Screen+Shot+2020-02-22+at+9.14.50+PM.png"
|
443
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
444
|
+
border="0" data-original-height="534" data-original-width="828"
|
445
|
+
height="257" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhbd4u91YSFmMe3uUXT-tsjfSVygsHoRChlZKlFKRMkN11BNIFJsqrVhRRRxnC7hCeKEmkgiv3OYFjRo395zqpz_0zUBfl9R64ryL8Qdllg22ONbb4raIC5T1zW01BA3nOtzJG7Tw/s400/Screen+Shot+2020-02-22+at+9.14.50+PM.png"
|
446
|
+
width="400" /></a></div>\n<br />\n<br />\nHere's
|
447
|
+
the same A4 (440hz), but on a violin.<br />\n<br />\n<div class="separator"
|
448
|
+
style="clear: both; text-align: center;">\n<a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEj61E0xsI4lmYpxhsVxaUh2k7woDBN220o6d_TjgnkEfDdo-yQkpUHq5hr1TYR_GCiGvKzmh9M205x2qbmeziTOydR6pRwyHp9j4X_gv4p2nfuJLPDvuVck8DR49mII0jmEAAABdA/s1600/Screen+Shot+2020-02-22+at+9.15.03+PM.png"
|
449
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
450
|
+
border="0" data-original-height="526" data-original-width="820"
|
451
|
+
height="256" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEj61E0xsI4lmYpxhsVxaUh2k7woDBN220o6d_TjgnkEfDdo-yQkpUHq5hr1TYR_GCiGvKzmh9M205x2qbmeziTOydR6pRwyHp9j4X_gv4p2nfuJLPDvuVck8DR49mII0jmEAAABdA/s400/Screen+Shot+2020-02-22+at+9.15.03+PM.png"
|
452
|
+
width="400" /></a></div>\n<br />\nAnd here's
|
453
|
+
a trumpet.<br />\n<br />\n<div class="separator"
|
454
|
+
style="clear: both; text-align: center;">\n<a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiOL3_A1envcr3tirSEFs5-oOWWYFXg6bgIz5EFpI6w2cQjtP-QBr8KUx1QE9tzGNyi4LSylXsY-BZ-5Aczr1-Icd6VeNjxC_TO5fe09k0jGR8JV7LwTo8rSkLRnh1E1Jd4T4CrIg/s1600/Screen+Shot+2020-02-22+at+9.15.14+PM.png"
|
455
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
456
|
+
border="0" data-original-height="528" data-original-width="826"
|
457
|
+
height="255" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiOL3_A1envcr3tirSEFs5-oOWWYFXg6bgIz5EFpI6w2cQjtP-QBr8KUx1QE9tzGNyi4LSylXsY-BZ-5Aczr1-Icd6VeNjxC_TO5fe09k0jGR8JV7LwTo8rSkLRnh1E1Jd4T4CrIg/s400/Screen+Shot+2020-02-22+at+9.15.14+PM.png"
|
458
|
+
width="400" /></a></div>\n<br />\nNotice how
|
459
|
+
the thicker instruments have rich harmonic spectrums? These harmonics are
|
460
|
+
what make them beautiful, and also what make pitch detection hard.<br />\n<br
|
461
|
+
/>\n<h3 style="text-align: left;">\nEstimation Techniques</h3>\n<br
|
462
|
+
/>\nA lot of the well understood pitch estimation algorithms resort to
|
463
|
+
transformations and heuristics which amplify the fundamental and cancel out
|
464
|
+
the overtones. Some, more advanced techniques work on (kind of) fingerprinting
|
465
|
+
timbres, and then attempting to correlate them with a signal.<br />\n<br
|
466
|
+
/>\nFor single tones, these techniques work well, but they do break down
|
467
|
+
in their own unique ways. After all, they're heuristics that try to <i>estimate
|
468
|
+
human perception</i>.<br />\n<br />\n<h3 style="text-align:
|
469
|
+
left;">\nConvolutional Networks</h3>\n<br />\nDeep <a
|
470
|
+
href="https://en.wikipedia.org/wiki/Convolutional_neural_network">convolutional
|
471
|
+
networks</a> have been winning image labeling challenges for nearly
|
472
|
+
a decade, starting with <a href="https://en.wikipedia.org/wiki/AlexNet">AlexNet
|
473
|
+
in 2012</a>. The key insight in these architectures is that detecting
|
474
|
+
objects require some level of locality in pattern recognition, i.e., learned
|
475
|
+
features should be agnostic to translations, rotations, intensities, etc.
|
476
|
+
Convolutional networks learn multiple layers of filters, each capturing some
|
477
|
+
perceptual element.<br />\n<br />\n<div class="separator"
|
478
|
+
style="clear: both; text-align: center;">\n<a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEj2LwwE33Uu5V1Kjy9nZxOQZEbh8LIe4nyUp728tznKUIym01KXtfscXhi8ASVftIu-7wJ8FKfBs4YDvDCTLSl7x2kmn7QPoubI9aBJ8zajZCg9IzGcbXvRDF8DM-pO1r_7IseNqw/s1600/Screen+Shot+2020-02-22+at+9.36.01+PM.png"
|
479
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
480
|
+
border="0" data-original-height="488" data-original-width="1458"
|
481
|
+
height="214" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEj2LwwE33Uu5V1Kjy9nZxOQZEbh8LIe4nyUp728tznKUIym01KXtfscXhi8ASVftIu-7wJ8FKfBs4YDvDCTLSl7x2kmn7QPoubI9aBJ8zajZCg9IzGcbXvRDF8DM-pO1r_7IseNqw/s640/Screen+Shot+2020-02-22+at+9.36.01+PM.png"
|
482
|
+
width="640" /></a></div>\n<br />\n<br />\nFor
|
483
|
+
example, the bottom layer of an image recognition network might detect edges
|
484
|
+
and curves, the next might detect simple shapes, and the next would detect
|
485
|
+
objects, etc. Here's an example of extracted features from various layers
|
486
|
+
(via <a href="https://www.groundai.com/project/deepfeat-a-bottom-up-and-top-down-saliency-model-based-on-deep-features-of-convolutional-neural-nets/">DeepFeat</a>.)<br
|
487
|
+
/>\n<br />\n<div class="separator" style="clear:
|
488
|
+
both; text-align: center;">\n<a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEijkByp7Wt8FbaM2YIKun1mLOehy2NOU5Oj8xgSKXrky_FeIHYqFkuxjEbtamdj_X_sC5pnC41QRUtTpUJjRXneSK7N5y4X8PiNQAKfB1Uoblcqo94HXEavYWlYQ2zYofVZHIWrdg/s1600/Screen+Shot+2020-02-22+at+9.40.06+PM.png"
|
489
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
490
|
+
border="0" data-original-height="1092" data-original-width="1096"
|
491
|
+
height="397" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEijkByp7Wt8FbaM2YIKun1mLOehy2NOU5Oj8xgSKXrky_FeIHYqFkuxjEbtamdj_X_sC5pnC41QRUtTpUJjRXneSK7N5y4X8PiNQAKfB1Uoblcqo94HXEavYWlYQ2zYofVZHIWrdg/s400/Screen+Shot+2020-02-22+at+9.40.06+PM.png"
|
492
|
+
width="400" /></a></div>\n<br />\n<h3 style="text-align:
|
493
|
+
left;">\nConvolutional Networks for Audio</h3>\n<br />\nFor
|
494
|
+
audio feature extraction, <a href="https://en.wikipedia.org/wiki/Time_domain">time
|
495
|
+
domain</a> representations don't seem to be very useful to convnets.
|
496
|
+
However, in the <a href="https://en.wikipedia.org/wiki/Frequency_domain">frequency
|
497
|
+
domain</a>, convnets learn features <i><b>extremely well</b></i>.
|
498
|
+
Once networks start looking at spectrograms, all kinds of patterns start to
|
499
|
+
emerge.<br />\n<br />\nIn the next few sections, we'll build
|
500
|
+
a and train a simple convolutional network to detect fundamental frequencies
|
501
|
+
across six octaves.<br />\n<br />\n<h3 style="text-align:
|
502
|
+
left;">\nGetting Training Data</h3>\n<br />\nTo do this
|
503
|
+
well, we need data. Labeled. <b>Lots of it!</b> There are a few
|
504
|
+
paths we can take:<br />\n<br />\n<b>Option 1</b>:
|
505
|
+
Go find a whole bunch of single-tone music online, slice it up into little
|
506
|
+
bits, transcribe and label.<br />\n<br />\n<b>Option 2</b>:
|
507
|
+
Take out my trusty guitar, record, slice, and label. Then my keyboard, and
|
508
|
+
my trumpet, and my clarinet. And maybe sing too. Ugh!<br />\n<br
|
509
|
+
/>\n<b>Option 3</b>: Build synthetic samples with... code!<br
|
510
|
+
/>\n<br />\nSince, you know, the ultimate programmer virtue is laziness,
|
511
|
+
let's go with Option 3.<br />\n<br />\n<h3 style="text-align:
|
512
|
+
left;">\nTools of the Trade</h3>\n<br />\nThe goal is
|
513
|
+
to build a model that performs well, and <i>generalizes well</i>,
|
514
|
+
so we'll need to make sure that we account for enough of the variability
|
515
|
+
in real audio as we can -- which means using a variety of instruments, velocities,
|
516
|
+
effects, envelopes, and noise profiles.<br />\n<br />\nWith a
|
517
|
+
good MIDI library, a patch bank, and some savvy, we can get this done. Here's
|
518
|
+
what we need:<br />\n<ul style="text-align: left;">\n<li><a
|
519
|
+
href="https://pypi.org/project/MIDIUtil/">MIDIUtil</a>
|
520
|
+
- Python library to generate MIDI files.</li>\n<li><a href="http://www.fluidsynth.org/">FluidSynth</a>
|
521
|
+
- Renders MIDI files to raw audio.</li>\n<li><a href="http://www.schristiancollins.com/generaluser.php">GeneralUser
|
522
|
+
GS</a>&nbsp;- A bank of GM instrument patches for FluidSynth.</li>\n<li><a
|
523
|
+
href="http://sox.sourceforge.net/sox.html">sox</a> - To
|
524
|
+
post-process the audio (resample, normalize, etc.)</li>\n<li><a
|
525
|
+
href="https://www.scipy.org/">scipy.io</a> - For generating
|
526
|
+
spectrograms</li>\n<li><a href="https://tensorflow.org/">Tensorflow</a>
|
527
|
+
- For building and training the models.</li>\n</ul>\n<div>\n<br
|
528
|
+
/>\nAll of these are open-source and freely available. Download and install
|
529
|
+
them before proceeding.</div>\n<h3 style="text-align: left;">\n</h3>\n<h3
|
530
|
+
style="text-align: left;">\n</h3>\n<h3 style="text-align:
|
531
|
+
left;">\n<br /></h3>\n<h3 style="text-align: left;">\nSynthesizing
|
532
|
+
the Data</h3>\n<br />\nWe start with picking a bunch of instruments
|
533
|
+
encompassing a variety of different timbres and tonalities.<br />\n<br
|
534
|
+
/>\n<script src="https://gist.github.com/0xfe/39f374a4ec66a1dc6af2a622851b2e74.js"></script><br
|
535
|
+
/>\n<div>\n<br />\nPick the notes and octaves you want to be
|
536
|
+
able to classify. I used all 12 tones between octaves 2 and 8 (and added some
|
537
|
+
random detunings.) Here's a handy class to deal with note to MIDI value
|
538
|
+
conversions.<br />\n<br />\n<br />\n<script src="https://gist.github.com/0xfe/0b8b66bf4e34caedcacb5894164e7c7c.js"></script>\n\nThe
|
539
|
+
next section is where the meat of the synthesis happens. It does the following:<br
|
540
|
+
/>\n<ul>\n<li>Renders the MIDI files to raw audio (wav) using
|
541
|
+
FluidSynth and a free GM sound font.</li>\n<li>Resamples to single-channel,
|
542
|
+
unsigned 16-bit, at 44.1khz, normalized.</li>\n<li>Slices the
|
543
|
+
sample up into its envelope components (attack, sustain, decay.)</li>\n<li>Detunes
|
544
|
+
some of the samples to cover more of the harmonic surface.</li>\n</ul>\n<div>\n<script
|
545
|
+
src="https://gist.github.com/0xfe/f01e3186b14543b42303e6af96286262.js"></script>\n</div>\n<div>\n<br
|
546
|
+
/></div>\nFinally, we use the <b>Sample</b>&nbsp;class
|
547
|
+
to generate thousands of different 33ms long MIDI files, each playing a single
|
548
|
+
note.&nbsp; The labels are part of the filename, and include the note,
|
549
|
+
octave, frequency, and envelope component.<br />\n<br />\n<script
|
550
|
+
src="https://gist.github.com/0xfe/29aedb7cc2733e7bde767eebcdd1d204.js"></script>\n\n<br
|
551
|
+
/>\n<br />\n<h3 style="text-align: left;">\nBuilding
|
552
|
+
the Network</h3>\n<div>\n<br /></div>\n<div>\nNow
|
553
|
+
that we have the training data, let's design the network.<br />\n<br
|
554
|
+
/></div>\n<div>\nI experimented with a variety of different
|
555
|
+
architectures before I got here, starting with simple dense (non-convolution)
|
556
|
+
networks with time-domain inputs, then moving on to one-dimensional LSTMs,
|
557
|
+
then two-dimensional convolutional networks (convnets) with frequency-domain
|
558
|
+
inputs.</div>\n<div>\n<br /></div>\n<div>\nAs
|
559
|
+
you can guess, the 2D networks with frequency-domain inputs worked <b>significantly
|
560
|
+
better.</b> As soon as I got decent baseline performance with them,
|
561
|
+
I focused on incrementally improving accuracy by reducing validation loss.</div>\n<div>\n<br
|
562
|
+
/></div>\n<h3 style="text-align: left;">\nModel Inputs</h3>\n<div>\n<br
|
563
|
+
/></div>\n<div>\nThe inputs to the network will be <a href="https://en.wikipedia.org/wiki/Spectrogram">spectrograms</a>,
|
564
|
+
which are 2D images representing a slice of audio. The X-axis is usually time,
|
565
|
+
and the Y-axis is frequency. They're great for visualizing audio spectrums,
|
566
|
+
but also for more advanced audio analysis.<br />\n<br />\n<table
|
567
|
+
align="center" cellpadding="0" cellspacing="0"
|
568
|
+
class="tr-caption-container" style="margin-left: auto; margin-right:
|
569
|
+
auto; text-align: center;"><tbody>\n<tr><td style="text-align:
|
570
|
+
center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgfBj4NcA-QJtuTsDM3Rv6rNxZJumr7qXjNgspGKe9lOnv9XRUjsjfqZZ3GSa50F76ayZn7BMIPcjV6MWOsihu8z4Pb0vBijwEQrV5x8VEs8zhe5llGV766O1lmXrJS0VidEAisJw/s1600/Screen+Shot+2020-02-22+at+9.17.27+PM.png"
|
571
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
572
|
+
border="0" data-original-height="526" data-original-width="840"
|
573
|
+
height="250" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgfBj4NcA-QJtuTsDM3Rv6rNxZJumr7qXjNgspGKe9lOnv9XRUjsjfqZZ3GSa50F76ayZn7BMIPcjV6MWOsihu8z4Pb0vBijwEQrV5x8VEs8zhe5llGV766O1lmXrJS0VidEAisJw/s400/Screen+Shot+2020-02-22+at+9.17.27+PM.png"
|
574
|
+
width="400" /></a></td></tr>\n<tr><td
|
575
|
+
class="tr-caption" style="text-align: center;">A Church
|
576
|
+
Organ playing A4 (440hz)</td></tr>\n</tbody></table>\n<br
|
577
|
+
/></div>\n<div>\n<br /></div>\n<div>\nSpectrograms
|
578
|
+
are typically generated with <a href="https://en.wikipedia.org/wiki/Short-time_Fourier_transform">Short
|
579
|
+
Time Fourier Transforms (STFTs)</a>. In short, the algorithm slides
|
580
|
+
a window over the audio, running <a href="https://en.wikipedia.org/wiki/Fast_Fourier_transform">FFTs</a>
|
581
|
+
over the windowed data. Depending on the parameters of the STFT (and the associated
|
582
|
+
FFTs), the precision of the detected frequencies can be tweaked to match the
|
583
|
+
use case.</div>\n<br />\nFor this experiment, we're working
|
584
|
+
with 44.1khz 16-bit samples, 33ms long -- which is about 14,500 data points
|
585
|
+
per sample. We first <b>downsample</b> the audio to 16khz, yielding
|
586
|
+
5280 data points per sample.<br />\n<br />\n<script src="https://gist.github.com/0xfe/86a9c94f75bd06bde76cb6a612744d49.js"></script>\n\nThe
|
587
|
+
spectrogram will be generated via STFT, using a window size of 256, an overlap
|
588
|
+
of 200, and a 1024 point FFT zero-padded on both sides. This yields one <b>513x90</b>
|
589
|
+
pixel image per sample.<br />\n<br />\nThe 1024-point FFT also
|
590
|
+
<b><i>caps the resolution</i></b> to about 19hz, which
|
591
|
+
isn't perfect, but fine for distinguishing pitches.<br />\n<br
|
592
|
+
/>\n<h3 style="text-align: left;">\n\nThe Network Model</h3>\n<br
|
593
|
+
/>\nOur network consists of 4 convolutional layers, with 64, 128, 128,
|
594
|
+
and 256 filters respectively, which are then immediately downsampled with
|
595
|
+
<a href="https://computersciencewiki.org/index.php/Max-pooling_/_Pooling">max-pooling</a>
|
596
|
+
layers. The input layer reshapes the input tensors by adding a <i>channels</i>
|
597
|
+
dimension for <a href="https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D">Conv2D</a>.
|
598
|
+
We close out the model with two densely connected layers, and a final output
|
599
|
+
node for the floating-point frequency.<br />\n<br />\nTo prevent
|
600
|
+
overfitting, we <i><a href="https://en.wikipedia.org/wiki/Regularization_(mathematics)">regularize</a></i>
|
601
|
+
by aggressively adding <a href="https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dropout">dropout</a>
|
602
|
+
layers, including one right at the input which also doubles as an ad-hoc noise
|
603
|
+
generator.<br />\n<br />\n<script src="https://gist.github.com/0xfe/56f14b33ac8e09e94d65e77f50a61280.js"></script>\n\n<br
|
604
|
+
/>\n<div>\n<br /></div>\nAlthough we use <a href="https://en.wikipedia.org/wiki/Mean_squared_error">mean-squared-error</a>
|
605
|
+
as our loss function, it's the <a href="https://en.wikipedia.org/wiki/Mean_absolute_error">mean-absolute-error</a>
|
606
|
+
that we need to watch, since it's easier to reason about. Let's take
|
607
|
+
a look at the model summary.<br />\n<br />\n<div class="separator"
|
608
|
+
style="clear: both; text-align: center;">\n<a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjLUdV6YW8fwFUwMsC0TGRx55VxkPmtXTC8mmDgu8iqKbGveU6Yz3pyyNTSRpX5rvgA2dtL6948KcbyiO1-h52ryC_R7KXom92l89UwACMadN_J9EmD7fB5_i72_ai6QtLRDGufTg/s1600/Screen+Shot+2020-02-22+at+5.08.33+PM.png"
|
609
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
610
|
+
border="0" data-original-height="1364" data-original-width="1124"
|
611
|
+
height="640" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjLUdV6YW8fwFUwMsC0TGRx55VxkPmtXTC8mmDgu8iqKbGveU6Yz3pyyNTSRpX5rvgA2dtL6948KcbyiO1-h52ryC_R7KXom92l89UwACMadN_J9EmD7fB5_i72_ai6QtLRDGufTg/s640/Screen+Shot+2020-02-22+at+5.08.33+PM.png"
|
612
|
+
width="524" /></a></div>\n<br />\n<br />\nWow,
|
613
|
+
<b>12 million</b> parameters! Feels like a lot for an experiment,
|
614
|
+
but it turns out we can build a model in less than 10 minutes on a modern
|
615
|
+
GPU. Let's start training.<br />\n<br />\n<script src="https://gist.github.com/0xfe/6a7234fe6e368bbd346f5fdddd88fa12.js"></script>\n\n<br
|
616
|
+
/>\nAfter 100 epochs, we can achieve a validation MSE of 0.002, and a validation
|
617
|
+
MAE of 0.03.<br />\n<br />\n<div class="separator"
|
618
|
+
style="clear: both; text-align: center;">\n<a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjW4345UjhQ7l9tyvSRWymJ0Ic-usaJeM-C6VcBKpNU_Al2ur3xoDRFSjz-xtEOsfqiCUbTxn1yd91VDmWVvZAOWbS-0dGfV4gUtnH5sxDSHAdeZ_ToAK4c8L_kYPEDBZ9yS-MIYA/s1600/Screen+Shot+2020-02-22+at+11.03.15+PM.png"
|
619
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
620
|
+
border="0" data-original-height="1066" data-original-width="814"
|
621
|
+
height="400" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjW4345UjhQ7l9tyvSRWymJ0Ic-usaJeM-C6VcBKpNU_Al2ur3xoDRFSjz-xtEOsfqiCUbTxn1yd91VDmWVvZAOWbS-0dGfV4gUtnH5sxDSHAdeZ_ToAK4c8L_kYPEDBZ9yS-MIYA/s400/Screen+Shot+2020-02-22+at+11.03.15+PM.png"
|
622
|
+
width="305" /></a></div>\n<br />\nYou may be
|
623
|
+
wondering why the validation MAE is so much better than the training MAE.
|
624
|
+
This is because of the aggressive dropout regularization. Dropout layers are
|
625
|
+
only activated during training, not prediction.<br />\n<br />\nThese
|
626
|
+
results are quite promising for an experiment! For classification problems,
|
627
|
+
we could use <a href="https://en.wikipedia.org/wiki/Confusion_matrix">confusion
|
628
|
+
matrices</a> to see where the models mispredict. For regression problems
|
629
|
+
(like this one), we can explore the losses a bit more by plotting a graph
|
630
|
+
of errors by pitch.<br />\n<br />\n<script src="https://gist.github.com/0xfe/3e4d90436dce46ec121b9f94948001a1.js"></script>\n\n<br
|
631
|
+
/>\n<div class="separator" style="clear: both; text-align:
|
632
|
+
center;">\n<br /></div>\n<div class="separator"
|
633
|
+
style="clear: both; text-align: center;">\n<br /></div>\n<table
|
634
|
+
align="center" cellpadding="0" cellspacing="0"
|
635
|
+
class="tr-caption-container" style="margin-left: auto; margin-right:
|
636
|
+
auto; text-align: center;"><tbody>\n<tr><td style="text-align:
|
637
|
+
center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhFvd_SBwOkcp64POT5Y_z7-tn1eNFhP4vMU8nPBGWtXkAv-Cn8XvQjzPc4VuFLgHNKSPSpPDJXB5MbwS0RmijcQoRiUtGZ4HZPAr_QM1mdBYaerG7V1OYCNXviTT0iJvDFF_6jGg/s1600/Screen+Shot+2020-02-22+at+10.25.46+PM.png"
|
638
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
639
|
+
border="0" data-original-height="512" data-original-width="786"
|
640
|
+
height="260" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhFvd_SBwOkcp64POT5Y_z7-tn1eNFhP4vMU8nPBGWtXkAv-Cn8XvQjzPc4VuFLgHNKSPSpPDJXB5MbwS0RmijcQoRiUtGZ4HZPAr_QM1mdBYaerG7V1OYCNXviTT0iJvDFF_6jGg/s400/Screen+Shot+2020-02-22+at+10.25.46+PM.png"
|
641
|
+
width="400" /></a></td></tr>\n<tr><td
|
642
|
+
class="tr-caption" style="text-align: center;">Prediction
|
643
|
+
Errors by Pitch</td></tr>\n</tbody></table>\n<br
|
644
|
+
/>\nAlready, we can see that the prediction errors are on the highest octaves.
|
645
|
+
This is very likely due to our downsampling to 16khz, causing <a href="https://en.wikipedia.org/wiki/Aliasing">aliasing</a>
|
646
|
+
in the harmonics and confusing the model.<br />\n<br />\nAfter
|
647
|
+
discarding the last octave, we can take the mean of the prediction error,
|
648
|
+
and what do we see?<br />\n<div style="background-color: #fffffe;
|
649
|
+
font-family: monospace, Menlo, Monaco, &quot;Courier New&quot;, monospace;
|
650
|
+
font-size: 14px; line-height: 19px; white-space: pre;">\nnp.mean(np.nan_to_num(errors_by_key[<span
|
651
|
+
style="color: #09885a;">0</span>:<span style="color:
|
652
|
+
#09885a;">80</span>]))</div>\n<span style="background-color:
|
653
|
+
white; color: #212121; font-family: monospace; font-size: 14px; white-space:
|
654
|
+
pre;">19.244542657486097</span><br />\n<br />\nPretty
|
655
|
+
much exactly the resolution of the FFT we used. It's very hard to do better
|
656
|
+
given the inputs.<br />\n<br />\n<h3 style="text-align:
|
657
|
+
left;">\nThe Real Test</h3>\n<div>\n<br /></div>\nSo,
|
658
|
+
how does this perform in the wild? To answer this question, I recorded a few
|
659
|
+
samples of myself playing single notes on the guitar, and pulled some youtube
|
660
|
+
videos of various instruments and sliced them up for analysis. I also crossed
|
661
|
+
my fingers and sacrificed a dozen goats.<br />\n<br />\nAs hoped,
|
662
|
+
the predictions were <b><i>right within the tolerances</i></b>
|
663
|
+
of the model. Try it yourself and let me know how it works out.<br />\n<br
|
664
|
+
/>\n<h3 style="text-align: left;">\nImprovements and Variations</h3>\n<br
|
665
|
+
/>\nThere's a few things we can do to improve what we have -- larger
|
666
|
+
FFT and window sizes, higher sample rates, better data, etc. We can also turn
|
667
|
+
this into a classification problem by using <a href="https://en.wikipedia.org/wiki/Softmax_function">softmax</a>
|
668
|
+
at the bottom layer and training directly on musical pitches instead of frequencies.<br
|
669
|
+
/>\n<br />\nThis experiment was part of a whole suite of models I
|
670
|
+
built for music recognition. In a future post I'll describe a more complex
|
671
|
+
set of models I built to recognize roots, intervals, and 2-4 note chords.<br
|
672
|
+
/>\n<br />\nUntil then, hope you enjoyed this post. If you did, drop
|
673
|
+
me a note at <a href="https://twitter.com/11111110b">@11111110b</a>.<br
|
674
|
+
/>\n<br />\nAll the source code for these experiments will be available
|
675
|
+
on <a href="https://github.com/0xfe">my Github page</a>
|
676
|
+
as soon as it's in slightly better shape.<br />\n<br />\n<br
|
677
|
+
/></div>\n</div>\n</content><link rel='replies' type='application/atom+xml'
|
678
|
+
href='https://0xfe.blogspot.com/feeds/1894211342385136711/comments/default'
|
679
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2020/02/pitch-detection-with-convolutional.html#comment-form'
|
680
|
+
title='0 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/1894211342385136711'/><link
|
681
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/1894211342385136711'/><link
|
682
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2020/02/pitch-detection-with-convolutional.html'
|
683
|
+
title='Pitch Detection with Convolutional Networks'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
684
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail
|
685
|
+
xmlns:media=\"http://search.yahoo.com/mrss/\" url=\"https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhbd4u91YSFmMe3uUXT-tsjfSVygsHoRChlZKlFKRMkN11BNIFJsqrVhRRRxnC7hCeKEmkgiv3OYFjRo395zqpz_0zUBfl9R64ryL8Qdllg22ONbb4raIC5T1zW01BA3nOtzJG7Tw/s72-c/Screen+Shot+2020-02-22+at+9.14.50+PM.png\"
|
686
|
+
height=\"72\" width=\"72\"/><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-541596065311835171</id><published>2020-02-19T13:26:00.005-05:00</published><updated>2020-02-20T08:38:01.568-05:00</updated><title
|
687
|
+
type='text'>No Servers, Just Buckets: Hosting Static Websites on the Cloud</title><content
|
688
|
+
type='html'><div dir="ltr" style="text-align: left;"
|
689
|
+
trbidi="on">\n<br />\nFor over two decades, I've hosted
|
690
|
+
websites on managed servers. Starting with web hosting providers, going to
|
691
|
+
dedicated machines, then dedicated VMs, then cloud VMs. Maintaining these
|
692
|
+
servers tend to come at a high cognitive cost -- machine and network setup,
|
693
|
+
OS patches, web server configuration, replication and high-availability, TLS
|
694
|
+
and cert management, security... the list goes on.<br />\n<br />\nLast
|
695
|
+
year, I moved [<a href="https://pitchy.ninja/">almost</a>]
|
696
|
+
[<a href="https://muthanna.com/">all</a>]&nbsp;[<a
|
697
|
+
href="https://float64.dev/">my</a>]&nbsp;[<a href="https://vexflow.com/">websites</a>]
|
698
|
+
to cloud buckets, and it has been amazing! Life just got simpler. With just
|
699
|
+
a few commands I got:<br />\n<br />\n<ul style="text-align:
|
700
|
+
left;">\n<li>A HTTP(s) web-server hosting my content.</li>\n<li>Managed
|
701
|
+
TLS certificates.</li>\n<li>Compression, Caching, and Content
|
702
|
+
Delivery.</li>\n<li>Replication and High availability.</li>\n<li>IPv6!</li>\n<li>Fewer
|
703
|
+
headaches, and more spending money. :-)</li>\n</ul>\n<br />\nIf
|
704
|
+
you don't need tight control over how your data is served, I would strongly
|
705
|
+
recommend that you host your sites on Cloud Buckets. (Yes, of course, servers
|
706
|
+
are still involved, you just don't need to worry about them.)<br />\n<br
|
707
|
+
/>\nIn this post, I'll show you how I got the&nbsp;<a href="https://float64.dev/">float64
|
708
|
+
website</a>&nbsp;up and serving in almost no time.<br />\n<br
|
709
|
+
/>\n<h3 style="text-align: left;">\nWhat are Cloud Buckets?</h3>\n<br
|
710
|
+
/>\nBuckets are a storage abstraction for blobs of data offered by cloud
|
711
|
+
providers. E.g., <a href="https://cloud.google.com/storage">Google
|
712
|
+
Cloud Storage</a> or <a href="https://aws.amazon.com/s3/">Amazon
|
713
|
+
S3</a>. Put simply, they're a place in the cloud where you can store
|
714
|
+
directories of files (typically called objects.)<br />\n<br />\nData
|
715
|
+
in buckets are managed by cloud providers -- they take care of all the heavy
|
716
|
+
lifting around storing the data, replicating, backing up, and serving. You
|
717
|
+
can access this data with command line tools, via language APIs, or from the
|
718
|
+
browser. You can also manage permissions, ownership, replication, retention,
|
719
|
+
encryption, and audit controls.<br />\n<br />\n<h3 style="text-align:
|
720
|
+
left;">\n</h3>\n<h3 style="text-align: left;">\nHosting
|
721
|
+
Websites on Cloud Buckets</h3>\n<br />\nMany cloud providers now
|
722
|
+
allow you to serve files (sometimes called bucket objects) over the web, and
|
723
|
+
let you distribute content over their respective CDNs. For this post, we'll
|
724
|
+
upload a website to a <a href="https://cloud.google.com/storage">Google
|
725
|
+
Cloud Storage</a> bucket and serve it over the web.<br />\n<br
|
726
|
+
/>\nMake sure you have your <a href="https://cloud.google.com/">Google
|
727
|
+
Cloud</a>&nbsp;account setup, <a href="https://cloud.google.com/sdk">command-line
|
728
|
+
tools installed</a>, and are logged in on your terminal.<br />\n<br
|
729
|
+
/>\n<code>\ngcloud auth login<br />\ngcloud config set project
|
730
|
+
&lt;your-project-id&gt;</code><br />\n<code><br
|
731
|
+
/></code>\n\nCreate your storage bucket with <a href="https://cloud.google.com/storage/docs/creating-buckets">gsutil
|
732
|
+
mb</a>. Bucket names must be globally unique, so you'll have to
|
733
|
+
pick something no one else has used. Here I'm using <i>float64</i>
|
734
|
+
as my bucket name.<br />\n<br />\n<code>gsutil mb gs://float64</code>\n<br
|
735
|
+
/>\n<code><br /></code>\nCopy your website content over
|
736
|
+
to the bucket. We specify '-<a href="https://cloud.google.com/storage/docs/gsutil/commands/cp">a
|
737
|
+
public-read</a>' to make the objects world-readable.<br />\n<br
|
738
|
+
/>\n<code>gsutil cp -a public-read index.html style.css index.AF4C.js
|
739
|
+
gs://float64</code>\n<br />\n<code><br /></code>\nThat's
|
740
|
+
it. Your content is now available at <i>https://storage.googleapis.com/&lt;BUCKET&gt;/index.html</i>.
|
741
|
+
Like mine is here:&nbsp;<a href="https://storage.googleapis.com/float64/index.html">https://storage.googleapis.com/float64/index.html</a>.<br
|
742
|
+
/>\n<br />\n<h3 style="text-align: left;">\n</h3>\n<h3
|
743
|
+
style="text-align: left;">\nUsing your own Domain</h3>\n<br
|
744
|
+
/>\nTo serve data over your own domain using HTTPS, you need to create
|
745
|
+
a <a href="https://cloud.google.com/load-balancing">Cloud
|
746
|
+
Load Balancer</a> (or use an existing one.) Go to the <a href="https://console.cloud.google.com/net-services/loadbalancing">Load
|
747
|
+
Balancer Console</a>, click "Create Load Balancer", and select
|
748
|
+
the HTTP/HTTPS option.<br />\n<br />\n<div class="separator"
|
749
|
+
style="clear: both; text-align: center;">\n<a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhipsKbza7Qsm2JCe2LJwWdOlpRexWoh8IwF0JVLK9BsUaaiV9jaUctCtiSiG6FNjoHpspbjVPED27FoXPYh8L671CzC-azbBzqp3LQ0LyJtMvQw78R3yLdjX963KIRUSd87b3Azg/s1600/Screen+Shot+2020-02-19+at+8.28.36+AM.png"
|
750
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
751
|
+
border="0" data-original-height="652" data-original-width="878"
|
752
|
+
height="295" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhipsKbza7Qsm2JCe2LJwWdOlpRexWoh8IwF0JVLK9BsUaaiV9jaUctCtiSiG6FNjoHpspbjVPED27FoXPYh8L671CzC-azbBzqp3LQ0LyJtMvQw78R3yLdjX963KIRUSd87b3Azg/s400/Screen+Shot+2020-02-19+at+8.28.36+AM.png"
|
753
|
+
width="400" /></a></div>\n<br />\nThe balancer
|
754
|
+
configuration has three main parts: backend, routing rules, and frontend.<br
|
755
|
+
/>\n<br />\nFor the backend, select "backend buckets",
|
756
|
+
and pick the bucket that you just created. Check the 'Enable CDN'
|
757
|
+
box if you want your content cached and delivered over Google's worldwide
|
758
|
+
Content Delivery Network.<br />\n<br />\n<div class="separator"
|
759
|
+
style="clear: both; text-align: center;">\n<a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEh9YZ38AmPpWnQ2YCL3PGI9xzJSbPD4GgyN776Ha5bZ82BubM7CjwmJ7qexivA9WywQpWBsEmKoFYRLrlvIMA3tfRkt95YDp7bZUnQbMC_8vw_OxGg5xq0J7MGNC8FyLZyT5jq3wQ/s1600/Screen+Shot+2020-02-19+at+8.31.48+AM.png"
|
760
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
761
|
+
border="0" data-original-height="100" data-original-width="280"
|
762
|
+
height="71" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEh9YZ38AmPpWnQ2YCL3PGI9xzJSbPD4GgyN776Ha5bZ82BubM7CjwmJ7qexivA9WywQpWBsEmKoFYRLrlvIMA3tfRkt95YDp7bZUnQbMC_8vw_OxGg5xq0J7MGNC8FyLZyT5jq3wQ/s200/Screen+Shot+2020-02-19+at+8.31.48+AM.png"
|
763
|
+
width="200" /></a></div>\n<br />\n<br />\nFor
|
764
|
+
the routing rules, simply use your domain name (float64.dev) in the host field,
|
765
|
+
your bucket (float64) in the backends field, and <code><b>/*</b></code>
|
766
|
+
in Paths to say that all paths get routed to your bucket.<br />\n<br
|
767
|
+
/>\nFinally, for the frontend, add a new IP address, and point your domain's
|
768
|
+
<i>A</i> record at it. If you're with the times, you can also
|
769
|
+
add an IPv6 address, and point your domain's <i>AAAA</i> record
|
770
|
+
at it.<br />\n<br />\n<div class="separator" style="clear:
|
771
|
+
both; text-align: center;">\n<a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjZYmJHBFv8QyXiXv30K_XVUf62MvxDLSmb4GijnBsHnc2vouiVM2bAqEtM0qpjfoj7_cCcg1p17ZVXntlwmSzctH0cWHKXAFRa38IfK5yS28Am_UCy1B140zhdfmzkB6AtPM0cKA/s1600/Screen+Shot+2020-02-19+at+8.29.55+AM.png"
|
772
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
773
|
+
border="0" data-original-height="272" data-original-width="962"
|
774
|
+
height="179" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjZYmJHBFv8QyXiXv30K_XVUf62MvxDLSmb4GijnBsHnc2vouiVM2bAqEtM0qpjfoj7_cCcg1p17ZVXntlwmSzctH0cWHKXAFRa38IfK5yS28Am_UCy1B140zhdfmzkB6AtPM0cKA/s640/Screen+Shot+2020-02-19+at+8.29.55+AM.png"
|
775
|
+
width="640" /></a></div>\n<br />\n<br />\nIf
|
776
|
+
you're serving over HTTPS, you can create a new <a href="https://cloud.google.com/load-balancing/docs/ssl-certificates">managed
|
777
|
+
certificate</a>. These certs are issued by Let's Encrypt and managed
|
778
|
+
by Google (i.e., Goole takes care of attaching, verifying, and renewing them.)
|
779
|
+
The certificates take about 30 minutes to propagate.<br />\n<br />\nSave
|
780
|
+
and apply your changes, and your custom HTTPS website is up! A few more odds
|
781
|
+
and ends before we call it a day.<br />\n<br />\n<h3 style="text-align:
|
782
|
+
left;">\nSetup Index and Error Pages</h3>\n<div>\n<br
|
783
|
+
/></div>\n<div>\nYou probably don't want your users typing
|
784
|
+
in the name of the index HTML file (<a href="https://float64.dev/index.html">https://float64.dev/index.html</a>)
|
785
|
+
every time they visit your site. You also probably want invalid URLs showing
|
786
|
+
a pretty error page.</div>\n<br />\nYou can use <a href="https://cloud.google.com/storage/docs/gsutil/commands/web"><b>gsutil
|
787
|
+
web</b></a>&nbsp;to configure the index and 404 pages for
|
788
|
+
the bucket.<br />\n<br />\n<code>gsutil web set gs://my-super-bucket
|
789
|
+
-m index.html -e 404.html</code><br />\n<br />\n<h3 style="text-align:
|
790
|
+
left;">\nCaching, Compression, and Content Delivery</h3>\n<br
|
791
|
+
/>\nTo take advantage of Google's CDN (or even simply to improve bandwidth
|
792
|
+
usage and latency), you should set the <a href="https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control">Cache-Control
|
793
|
+
headers</a> on your files. I like to keep the expiries for the index
|
794
|
+
page short, and everything else long (of course, also adding content hashes
|
795
|
+
to frequently modified files.)<br />\n<br />\nWe also want to
|
796
|
+
make sure that text files are served with <i>gzip</i> compression
|
797
|
+
enabled. The <code><b>-z</b></code> flag compresses
|
798
|
+
the file, and sets the <a href="https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding">content-encoding</a>
|
799
|
+
to <i>gzip</i> while serving over HTTP(s).<br />\n<br
|
800
|
+
/>\n<code>\ngsutil -h "Cache-control:public,max-age=86400"
|
801
|
+
-m \\</code><br />\n<code>&nbsp; cp -a public-read -z
|
802
|
+
js,map,css,svg \\<br />\n&nbsp; &nbsp; $DIST/*.js $DIST/*.map
|
803
|
+
$DIST/*.css \\<br />\n&nbsp; &nbsp; $DIST/*.jpg $DIST/*.svg
|
804
|
+
$DIST/*.png $DIST/*.ico \\<br />\n&nbsp; &nbsp; gs://float64</code><br
|
805
|
+
/>\n<code><br />\ngsutil -h "Cache-control:public,max-age=300"
|
806
|
+
-m \\</code><br />\n<code>&nbsp; cp -a public-read -z
|
807
|
+
html \\</code><br />\n<code>&nbsp; $DIST/index.html
|
808
|
+
gs://float64</code><br />\n<code><br /></code>\nIf
|
809
|
+
you've made it this far, you now have a (nearly) production-ready website
|
810
|
+
up and running. Congratulations!<br />\n<br />\n<h3 style="text-align:
|
811
|
+
left;">\nSo, how much does it cost?</h3>\n<br />\nI have
|
812
|
+
about 8 different websites running on different domains, all using managed
|
813
|
+
certificates and the CDN, and I pay about $20 a month.<br />\n<br
|
814
|
+
/>\nI use a single load balancer ($18/mo) and one IP address ($2/mo) for
|
815
|
+
all of them. I get about 10 - 20k requests a day across all my sites, and
|
816
|
+
bandwidth costs are in the pennies.<br />\n<br />\nNot cheap,
|
817
|
+
but not expensive either given the cognitive savings. And there are cheaper
|
818
|
+
options (as you'll see in the next section).<br />\n<br />\n<h3
|
819
|
+
style="text-align: left;">\nAlternatives</h3>\n<div>\n<br
|
820
|
+
/></div>\nThere are many ways to serve web content out of storage
|
821
|
+
buckets, and this is just one. Depending on your traffic, the number of sites
|
822
|
+
you're running, and what kinds of tradeoffs you're willing to make,
|
823
|
+
you can optimize costs further.<br />\n<br />\n<a href="https://firebase.google.com/docs/hosting">Firebase
|
824
|
+
Hosting</a>&nbsp;sticks all of this into one pretty package, with
|
825
|
+
a lower upfront cost (however, the bandwidth costs are higher as your traffic
|
826
|
+
increases.)<br />\n<br />\n<a href="https://www.cloudflare.com/">Cloudflare</a>&nbsp;has
|
827
|
+
a&nbsp;free plan and lets you stick an SSL server and CDN in front of
|
828
|
+
your Cloud Storage bucket. However if you want dedicated certificates, they
|
829
|
+
charge you $5 each. Also, the minimum TTL on the free plan is 2 hours, which
|
830
|
+
is not great if you're building static Javascript applications.<br
|
831
|
+
/>\n<br />\nAnd there's <a href="https://aws.amazon.com/cloudfront/">CloudFront</a>,
|
832
|
+
<a href="http://www.fastly.com/">Fastly</a>, <a href="https://netlify.com/">Netlify</a>,
|
833
|
+
all which provide various levels of managed infrastructure, still all better
|
834
|
+
than running your own servers.<br />\n<br />\n<h3 style="text-align:
|
835
|
+
left;">\nCaveats</h3>\n<div>\n<br /></div>\n<div>\nObviously,
|
836
|
+
there's no free lunch, and good engineering requires making tradeoffs,
|
837
|
+
and here are a few things to consider before you decide to migrate from servers
|
838
|
+
to buckets:</div>\n<div>\n<br /></div>\n<div>\n<ul
|
839
|
+
style="text-align: left;">\n<li><b>Vendor lock-in.</b>
|
840
|
+
Are you okay with using proprietary technologies for your stack. If not, you're
|
841
|
+
better off running your own servers.</li>\n<li><b>Control
|
842
|
+
and Flexibility.</b> Do you want advanced routing, URL rewriting, or
|
843
|
+
other custom behavior? If so you're better off running your own servers.</li>\n<li><b>Cost
|
844
|
+
transparency.</b> Although both Google and Amazon do great jobs with
|
845
|
+
billing and detailed price breakdowns, they are super complicated and can
|
846
|
+
change on a whim.</li>\n</ul>\n<div>\nFor a lot of what
|
847
|
+
I do, these downsides are well worth it. The vendor lock-in troubles me the
|
848
|
+
most, however it's not hard to migrate this stuff to other providers if
|
849
|
+
I need to.</div>\n</div>\n<div>\n<br /></div>\n<div>\nIf
|
850
|
+
you liked this, check out some of <a href="https://0xfe.blogspot.com/">my
|
851
|
+
other stuff</a> on this blog.</div>\n<br />\n<br />\n<br
|
852
|
+
/></div>\n</content><link rel='replies' type='application/atom+xml'
|
853
|
+
href='https://0xfe.blogspot.com/feeds/541596065311835171/comments/default'
|
854
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2020/02/no-servers-just-buckets-hosting-static.html#comment-form'
|
855
|
+
title='1 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/541596065311835171'/><link
|
856
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/541596065311835171'/><link
|
857
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2020/02/no-servers-just-buckets-hosting-static.html'
|
858
|
+
title='No Servers, Just Buckets: Hosting Static Websites on the Cloud'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
859
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail
|
860
|
+
xmlns:media=\"http://search.yahoo.com/mrss/\" url=\"https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhipsKbza7Qsm2JCe2LJwWdOlpRexWoh8IwF0JVLK9BsUaaiV9jaUctCtiSiG6FNjoHpspbjVPED27FoXPYh8L671CzC-azbBzqp3LQ0LyJtMvQw78R3yLdjX963KIRUSd87b3Azg/s72-c/Screen+Shot+2020-02-19+at+8.28.36+AM.png\"
|
861
|
+
height=\"72\" width=\"72\"/><thr:total>1</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-421690605020819859</id><published>2016-07-12T09:44:00.000-04:00</published><updated>2016-07-12T10:23:04.730-04:00</updated><title
|
862
|
+
type='text'>New in VexFlow: ES6, Visual Regression Tests, and more!</title><content
|
863
|
+
type='html'><div dir="ltr" style="text-align: left;"
|
864
|
+
trbidi="on">\nLots of developments since the last time I posted
|
865
|
+
about VexFlow.<br />\n<h3 style="text-align: left;">\n</h3>\n<div>\n<br
|
866
|
+
/></div>\n<h3 style="text-align: left;">\nVexFlow
|
867
|
+
is ES6</h3>\n<div>\n<br /></div>\nThanks to the heroics
|
868
|
+
of <a href="http://github.com/SilverWolf90">SilverWolf90</a>
|
869
|
+
and <a href="http://github.com/AaronMars">AaronMars</a>,
|
870
|
+
and the help from many others, VexFlow's entire <code>src/</code>
|
871
|
+
tree has been migrated to ES6. This is a huge benefit to the project and to
|
872
|
+
the health of the codebase. Some of the wins are:<br />\n<br />\n<ul
|
873
|
+
style="text-align: left;">\n<li>Real modules, which allows
|
874
|
+
us to extract explicit dependency information and generate graphs like <a
|
875
|
+
href="https://github.com/0xfe/vexflow/wiki/VexFlow-Dependency-Graph">this</a>.</li>\n<li>Const-correctness
|
876
|
+
and predictable variable scoping with <code>const</code> and <code>let</code>.</li>\n<li>Classes,
|
877
|
+
lambda functions, and lots of other structural enhancements that vastly improve
|
878
|
+
the clarity and conciseness of the codebase.</li>\n</ul>\n<div>\n<div
|
879
|
+
class="separator" style="clear: both; text-align: center;">\n<a
|
880
|
+
href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEi3aiqkRZhH7qzKgGfHCA1yXvmcc4CvQwetLQEHvrQej3ugBVywShEIL3tPMIFw93rJU4Qf7yyerY2Ha60AgIvadGwq98fZLMrTLbzRg0h0y9v1XG_sIyFh4lVEmi2tq-HbikKDMg/s1600/Screen+Shot+2016-07-12+at+10.06.20+AM.png"
|
881
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
882
|
+
border="0" height="390" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEi3aiqkRZhH7qzKgGfHCA1yXvmcc4CvQwetLQEHvrQej3ugBVywShEIL3tPMIFw93rJU4Qf7yyerY2Ha60AgIvadGwq98fZLMrTLbzRg0h0y9v1XG_sIyFh4lVEmi2tq-HbikKDMg/s640/Screen+Shot+2016-07-12+at+10.06.20+AM.png"
|
883
|
+
width="640" /></a></div>\n<br />\n<br />\nPart
|
884
|
+
of the migration effort also involved making everything lint-clean, improving
|
885
|
+
the overall style and consistency of the codebase -- see <a href="http://github.com/SilverWolf90">SilverWolf90</a>'s
|
886
|
+
brief document on how <a href="https://github.com/0xfe/vexflow/wiki/Migrating-to-ESLint">here</a>.</div>\n<h3
|
887
|
+
style="text-align: left;">\n</h3>\n<div>\n<br
|
888
|
+
/></div>\n<h3 style="text-align: left;">\nVisual
|
889
|
+
Regression Tests</h3>\n<div>\n<br /></div>\nVexFlow
|
890
|
+
now has a visual regression test system, and all image-generating QUnit tests
|
891
|
+
are automatically included.<br />\n<br />\nThe goal of this system
|
892
|
+
is to detect differences in the rendered output without having to rely on
|
893
|
+
human eyeballs, especially given the huge number of tests that exist today.
|
894
|
+
It does this by calculating a perceptual hash (PHASH) of each test image and
|
895
|
+
comparing it with the hash of a good known blessed image. The larger the arithmetic
|
896
|
+
distance between the hashes, the more different are the two images.<br
|
897
|
+
/>\n<br />\nThe system also generates a diff image, which is an overlay
|
898
|
+
of the two images, with the differences highlighted, to ease debugging. Here's
|
899
|
+
an example of a failing test:<br />\n<br />\n<div class="separator"
|
900
|
+
style="clear: both; text-align: center;">\n<a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhxeptv0TDTSQB4_RFZOumVUFEhBNkP8P3mIiTWAJSVescGm8Fx5x0-qqZYZiPSfK8Vfrc9GiNxsjhdTzalnb243CDZ_LmqI8Y-TvxblFvAxOKT_aK_j7I4q6SZ8DcdUqIGuZlSvA/s1600/Screen+Shot+2016-07-12+at+9.46.16+AM.png"
|
901
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
902
|
+
border="0" height="640" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhxeptv0TDTSQB4_RFZOumVUFEhBNkP8P3mIiTWAJSVescGm8Fx5x0-qqZYZiPSfK8Vfrc9GiNxsjhdTzalnb243CDZ_LmqI8Y-TvxblFvAxOKT_aK_j7I4q6SZ8DcdUqIGuZlSvA/s640/Screen+Shot+2016-07-12+at+9.46.16+AM.png"
|
903
|
+
width="600" /></a></div>\n<br />\n<br />\n<div
|
904
|
+
class="separator" style="clear: both; text-align: center;">\n</div>\nThese
|
905
|
+
tests are run automatically for all PRs, commits, and releases. Props to <a
|
906
|
+
href="http://github.com/panarch">Taehoon Moon</a> for migrating
|
907
|
+
the regression tests from NodeJS to SlimerJS, giving us headless support and
|
908
|
+
Travis CI integration. To find out more, read the Wiki page on <a href="https://github.com/0xfe/vexflow/wiki/Visual-Regression-Tests">Visual
|
909
|
+
Regression Tests</a>.<br />\n<h3 style="text-align: left;">\n</h3>\n<div>\n<br
|
910
|
+
/></div>\n<h3 style="text-align: left;">\nNative
|
911
|
+
SVG</h3>\n<div>\n<br /></div>\nThanks to the awesome
|
912
|
+
contribution of <a href="http://github.com/gristow">Gregory
|
913
|
+
Ristow</a>, VexFlow now has a native SVG rendering backend, and the
|
914
|
+
<a href="http://raphaeljs.com/">RaphaelJS</a> backend
|
915
|
+
has been deprecated. This not only reduces the overall size and bloat, but
|
916
|
+
also hugely improves rendering performance.<br />\n<br />\nThe
|
917
|
+
new backend is called <code>Rendering.Backends.SVG</code> with
|
918
|
+
the code at&nbsp;<a href="https://github.com/0xfe/vexflow/blob/master/src/svgcontext.js">Vex.Flow.SVGContext</a>.
|
919
|
+
Here is a quick example of how to use the new backend:&nbsp;<a href="https://jsfiddle.net/nL0cn3vL/2/">https://jsfiddle.net/nL0cn3vL/2/</a>.<br
|
920
|
+
/>\n<br />\n<h3 style="text-align: left;">\nImproved
|
921
|
+
Microtonal Support</h3>\n<br />\nVexFlow now has better support
|
922
|
+
for Arabic, Turkish, and other microtonal music via accidentals and key signatures.
|
923
|
+
Thanks to <a href="http://github.com/infojunkie">infojunkie</a>
|
924
|
+
for a lot of the heavy lifting here, and to all the contributors in the <a
|
925
|
+
href="https://github.com/0xfe/vexflow/issues/318">GitHub issue</a>.<br
|
926
|
+
/>\n<br />\n<div class="separator" style="clear:
|
927
|
+
both; text-align: center;">\n<a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhKqsOnWIHkdB5VVqFfq6gj2wgiwJXzN1Sk8XR2qJX_6EZ-2xnrzBYL5eZy23MiDbm5GCvgNA_TBzjFQ8JOCoA2JlOVMD7rw3jQxk4YphClucw6TQGtnc1-ZJgNhWYtX18BKHKMcQ/s1600/Screen+Shot+2016-07-12+at+9.50.40+AM.png"
|
928
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
929
|
+
border="0" height="212" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhKqsOnWIHkdB5VVqFfq6gj2wgiwJXzN1Sk8XR2qJX_6EZ-2xnrzBYL5eZy23MiDbm5GCvgNA_TBzjFQ8JOCoA2JlOVMD7rw3jQxk4YphClucw6TQGtnc1-ZJgNhWYtX18BKHKMcQ/s640/Screen+Shot+2016-07-12+at+9.50.40+AM.png"
|
930
|
+
width="640" /></a></div>\n<br />\n<div class="separator"
|
931
|
+
style="clear: both; text-align: center;">\n<a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgYh59DufskkVVEVorHw4Of9VzlaEjU2i2GG8p8q3-PpWfFZrbEcWs8rvfyiEvJ1gfJwvRoEjzkW88AzO4O2LUo-Vs3PU97AmA6ZINyvz5zm4vwfU_s89qNFNrcf56tys6hDXVN6w/s1600/Screen+Shot+2016-07-12+at+9.50.52+AM.png"
|
932
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
933
|
+
border="0" height="272" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgYh59DufskkVVEVorHw4Of9VzlaEjU2i2GG8p8q3-PpWfFZrbEcWs8rvfyiEvJ1gfJwvRoEjzkW88AzO4O2LUo-Vs3PU97AmA6ZINyvz5zm4vwfU_s89qNFNrcf56tys6hDXVN6w/s640/Screen+Shot+2016-07-12+at+9.50.52+AM.png"
|
934
|
+
width="640" /></a></div>\n<br />\nMicrotonal
|
935
|
+
support is by no means complete, but this is a noteworthy step forward in
|
936
|
+
the space.<br />\n<br />\n<h3 style="text-align: left;">\nOther
|
937
|
+
Stuff</h3>\n<br />\nLots of other stuff worth mentioning:<br
|
938
|
+
/>\n<br />\n<ul style="text-align: left;">\n<li>Support
|
939
|
+
for user interactivity in SVG notation. You can attach event-handlers to elements
|
940
|
+
(or groups of elements) and dynamically modify various properties of the score.</li>\n<li>Improved
|
941
|
+
bounding-box support.</li>\n<li>Alignment of clef, timesignature,
|
942
|
+
and other stave modifiers during mid-measure changes.</li>\n<li>Lots
|
943
|
+
of improvements to the build system and Travis CI integration.</li>\n<li>Lots
|
944
|
+
of bug fixes related to beaming, tuplets, annotations, etc.</li>\n</ul>\n<div>\n<br
|
945
|
+
/></div>\n<div>\nMany thanks to all the contributors involved!</div>\n</div>\n</content><link
|
946
|
+
rel='replies' type='application/atom+xml' href='https://0xfe.blogspot.com/feeds/421690605020819859/comments/default'
|
947
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2016/07/new-in-vexflow-es6-visual-regression.html#comment-form'
|
948
|
+
title='2 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/421690605020819859'/><link
|
949
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/421690605020819859'/><link
|
950
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2016/07/new-in-vexflow-es6-visual-regression.html'
|
951
|
+
title='New in VexFlow: ES6, Visual Regression Tests, and more!'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
952
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail
|
953
|
+
xmlns:media=\"http://search.yahoo.com/mrss/\" url=\"https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEi3aiqkRZhH7qzKgGfHCA1yXvmcc4CvQwetLQEHvrQej3ugBVywShEIL3tPMIFw93rJU4Qf7yyerY2Ha60AgIvadGwq98fZLMrTLbzRg0h0y9v1XG_sIyFh4lVEmi2tq-HbikKDMg/s72-c/Screen+Shot+2016-07-12+at+10.06.20+AM.png\"
|
954
|
+
height=\"72\" width=\"72\"/><thr:total>2</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-2970930225553638408</id><published>2014-05-02T11:18:00.000-04:00</published><updated>2014-05-02T11:18:34.873-04:00</updated><title
|
955
|
+
type='text'>New in VexFlow (May 2014)</title><content type='html'>Lots of
|
956
|
+
commits into the repository lately. Thanks to Cyril Silverman for may of these.
|
957
|
+
Here are some of the highlights:\n\n<p/>\n\n<h3>Chord Symbols</h3>\nThis
|
958
|
+
includes subscript/superscript support in <code>TextNote</code>
|
959
|
+
and support for common symbols (dim, half-dim, maj7, etc.)\n<p/>\n<div
|
960
|
+
class="separator" style="clear: both; text-align: center;"><a
|
961
|
+
href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhARWln9YaHdtVL5iBAiFKZHcGI85eDMyNLhN3USobfFRbFuq6FAZPsXK746YLJHrXEcb0Hpkbza1giiZ8sCb6I-1BVwuPTCNwkY8boODT0d1x7hYEnbys1OzHbdVRmwGwEYMSaSw/s1600/Screen+Shot+2014-05-02+at+10.51.12+AM.png"
|
962
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
963
|
+
border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhARWln9YaHdtVL5iBAiFKZHcGI85eDMyNLhN3USobfFRbFuq6FAZPsXK746YLJHrXEcb0Hpkbza1giiZ8sCb6I-1BVwuPTCNwkY8boODT0d1x7hYEnbys1OzHbdVRmwGwEYMSaSw/s400/Screen+Shot+2014-05-02+at+10.51.12+AM.png"
|
964
|
+
/></a></div>\n\n<p/>\n\n<h3>Stave Line Arrows</h3>\nThis
|
965
|
+
is typically used in instructional material.\n<p/>\n<div class="separator"
|
966
|
+
style="clear: both; text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgNZTalVSVd5H_a4RBTaURK2A7MhCwpKTZDwqEC6j3ZALkCB3TFv4jfVQyX6GoiALkwsTkxnRK_Ntw7HuRm3S7n6ehmw8M1xa1qqysNo7HVxG06zc1y2TNE09PNp41kaGqiHo32tg/s1600/Screen+Shot+2014-05-02+at+10.50.57+AM.png"
|
967
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
968
|
+
border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgNZTalVSVd5H_a4RBTaURK2A7MhCwpKTZDwqEC6j3ZALkCB3TFv4jfVQyX6GoiALkwsTkxnRK_Ntw7HuRm3S7n6ehmw8M1xa1qqysNo7HVxG06zc1y2TNE09PNp41kaGqiHo32tg/s400/Screen+Shot+2014-05-02+at+10.50.57+AM.png"
|
969
|
+
/></a></div>\n\n\n<div class="separator" style="clear:
|
970
|
+
both; text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgdoHUP3nNvIHRvV0nF_w0sv1dI7Lrir4jLiyzMNMQgMUrsgH-HkATiIUUjoYYprV9rOSCkX8-UyUCRT-331MsHJ-BTkjuBh3k-Zqq019RbKHpYP3vOjZcd3mKY7p_xiUmPkJlK5Q/s1600/Screen+Shot+2014-05-02+at+10.51.06+AM.png"
|
971
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
972
|
+
border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgdoHUP3nNvIHRvV0nF_w0sv1dI7Lrir4jLiyzMNMQgMUrsgH-HkATiIUUjoYYprV9rOSCkX8-UyUCRT-331MsHJ-BTkjuBh3k-Zqq019RbKHpYP3vOjZcd3mKY7p_xiUmPkJlK5Q/s400/Screen+Shot+2014-05-02+at+10.51.06+AM.png"
|
973
|
+
/></a></div>\n\n<p/>\n\n<h3>Slurs</h3>\nFinally,
|
974
|
+
we have slurs. This uses a new VexFlow class called <code>Curve</code>.
|
975
|
+
Slurs are highly configurable.\n<p/>\n<div class="separator"
|
976
|
+
style="clear: both; text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiwVjQOCOtb9K1roaI9Fkpdjer-LJGK_ItJNjilN5fxykR6nkpjO_ZG_Ows3HH20ix8sIOZrzb0jm5Z-VmqP2JjwAB_KuRQgvIXF618OUC7R4T6r5WfPzFq_E4ogC_Lak_wYGIkhg/s1600/Screen+Shot+2014-05-02+at+10.51.24+AM.png"
|
977
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
978
|
+
border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiwVjQOCOtb9K1roaI9Fkpdjer-LJGK_ItJNjilN5fxykR6nkpjO_ZG_Ows3HH20ix8sIOZrzb0jm5Z-VmqP2JjwAB_KuRQgvIXF618OUC7R4T6r5WfPzFq_E4ogC_Lak_wYGIkhg/s400/Screen+Shot+2014-05-02+at+10.51.24+AM.png"
|
979
|
+
/></a></div>\n\n<div class="separator" style="clear:
|
980
|
+
both; text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhatECDRLVFWQyBskqVOcVUOZg66Jcywok5t9qyAfIVcqHoUfCZa1J2Ga1Gxrp3dpiFA7Zbm6I1XGGRrWBMX6fcZ5oZDxlWZu0hR1UVipMdAgmEf-96gHbWgppln4Dp2DOsBhIAxg/s1600/Screen+Shot+2014-05-02+at+10.51.29+AM.png"
|
981
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
982
|
+
border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhatECDRLVFWQyBskqVOcVUOZg66Jcywok5t9qyAfIVcqHoUfCZa1J2Ga1Gxrp3dpiFA7Zbm6I1XGGRrWBMX6fcZ5oZDxlWZu0hR1UVipMdAgmEf-96gHbWgppln4Dp2DOsBhIAxg/s400/Screen+Shot+2014-05-02+at+10.51.29+AM.png"
|
983
|
+
/></a></div>\n\n\n<div class="separator" style="clear:
|
984
|
+
both; text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgc_eVlyXPeFIevIo9au9bCa75PKtHctY2oRecgdQl15Gspn0s_i7uZK_7t6UclD-UAqCXFCJrozYETXUqg45BobYrgRGs0EZkTpCMriR21P0CdLfaH5sDfSVljgqHokOQwHM7Kbw/s1600/Screen+Shot+2014-05-02+at+10.51.36+AM.png"
|
985
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
986
|
+
border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgc_eVlyXPeFIevIo9au9bCa75PKtHctY2oRecgdQl15Gspn0s_i7uZK_7t6UclD-UAqCXFCJrozYETXUqg45BobYrgRGs0EZkTpCMriR21P0CdLfaH5sDfSVljgqHokOQwHM7Kbw/s400/Screen+Shot+2014-05-02+at+10.51.36+AM.png"
|
987
|
+
/></a></div>\n\n\n<div class="separator" style="clear:
|
988
|
+
both; text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiedlG-ypZuwWHvQl6PNXpcjXV-qnrHI4QbHCX4b-xPrLfwm7i9JJjhDTORtXX2WYL-N5hzGEuGK0PFvdifZmnqU-QjRA9L1HeZ2LDC5XLe0SpiprIkafTO1_CS-66aWyGiSZpNsw/s1600/Screen+Shot+2014-05-02+at+10.51.45+AM.png"
|
989
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
990
|
+
border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiedlG-ypZuwWHvQl6PNXpcjXV-qnrHI4QbHCX4b-xPrLfwm7i9JJjhDTORtXX2WYL-N5hzGEuGK0PFvdifZmnqU-QjRA9L1HeZ2LDC5XLe0SpiprIkafTO1_CS-66aWyGiSZpNsw/s400/Screen+Shot+2014-05-02+at+10.51.45+AM.png"
|
991
|
+
/></a></div>\n\n<p/>\n\n<h3>Improved auto-positioning
|
992
|
+
of Annotations and Articulations</h3>\nAnnotations and Articulations
|
993
|
+
now self-position based on note, stem, and beam configuration.\n<p/>\n<div
|
994
|
+
class="separator" style="clear: both; text-align: center;"><a
|
995
|
+
href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgU13FCOX7XCOL9mnXofqMUGuOmGJbJvbCtUIixCdxqASZmSCLZNmPQ773x2hY5lWiPHorbJak3YtUg1BuhcV1HajHha3ryLrlId4AczhzrqALlOhdvm2wyBiz7ha8SZwRliBj11A/s1600/Screen+Shot+2014-05-02+at+10.54.03+AM.png"
|
996
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
997
|
+
border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgU13FCOX7XCOL9mnXofqMUGuOmGJbJvbCtUIixCdxqASZmSCLZNmPQ773x2hY5lWiPHorbJak3YtUg1BuhcV1HajHha3ryLrlId4AczhzrqALlOhdvm2wyBiz7ha8SZwRliBj11A/s400/Screen+Shot+2014-05-02+at+10.54.03+AM.png"
|
998
|
+
/></a></div>\n\n<p/>\n\n<h3>Grace Notes</h3>\nVexFlow
|
999
|
+
now has full support for Grace Notes. Grace Note groups can contain complex
|
1000
|
+
rhythmic elements, and are formatted using the same code as regular notes.\n\n<p/>\n<div
|
1001
|
+
class="separator" style="clear: both; text-align: center;"><a
|
1002
|
+
href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhSotGzQBCA_ljoJpIQM6N011kQTZ5AhkdoLgqO8ORq42h05hh82Rs_q9HBUN5lEkkba8_TbMNhx04aJ48K0GyPzsH1BHXG-1cNljOjvZgVfz7qGhcHD-N8KhS1juvDqSu7ECcd6g/s1600/Screen+Shot+2014-05-02+at+10.54.30+AM.png"
|
1003
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
1004
|
+
border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhSotGzQBCA_ljoJpIQM6N011kQTZ5AhkdoLgqO8ORq42h05hh82Rs_q9HBUN5lEkkba8_TbMNhx04aJ48K0GyPzsH1BHXG-1cNljOjvZgVfz7qGhcHD-N8KhS1juvDqSu7ECcd6g/s400/Screen+Shot+2014-05-02+at+10.54.30+AM.png"
|
1005
|
+
/></a></div>\n\n<div class="separator" style="clear:
|
1006
|
+
both; text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhZOxD9_LTKefUh0Vs9iqFpzlLI55B7tYSYKx79Zv43GL0BhyMqmfi63i7ulWmY1FrXmN_5HnFmORvMqCNA0yUX-jQEi6MJOv4cO3HBBvde5m1VOeRPzEXxvEO_FwR2qceVl1Zixg/s1600/Screen+Shot+2014-05-02+at+10.54.37+AM.png"
|
1007
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
1008
|
+
border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhZOxD9_LTKefUh0Vs9iqFpzlLI55B7tYSYKx79Zv43GL0BhyMqmfi63i7ulWmY1FrXmN_5HnFmORvMqCNA0yUX-jQEi6MJOv4cO3HBBvde5m1VOeRPzEXxvEO_FwR2qceVl1Zixg/s400/Screen+Shot+2014-05-02+at+10.54.37+AM.png"
|
1009
|
+
/></a></div>\n\n<p/>\n\n<h3>Auto-Beam Imnprovements</h3>\nLots
|
1010
|
+
more beaming options, including beaming over rests, stemlet rendering, and
|
1011
|
+
time-signature aware beaming.\n\n<p/>\n<div class="separator"
|
1012
|
+
style="clear: both; text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjULM-Ju99byoC-ygScvma7aMv67b6ruuqb27v6rZiaQf-kvPep0n_nn8SSmm1XFFdML16g8vhXjFNaJu3gfNDCqiQ5UD69eWnvx0IFj9lfC6RRhHLcmv_v8C0MeHdwizCb01XJTg/s1600/Screen+Shot+2014-05-02+at+10.54.58+AM.png"
|
1013
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
1014
|
+
border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjULM-Ju99byoC-ygScvma7aMv67b6ruuqb27v6rZiaQf-kvPep0n_nn8SSmm1XFFdML16g8vhXjFNaJu3gfNDCqiQ5UD69eWnvx0IFj9lfC6RRhHLcmv_v8C0MeHdwizCb01XJTg/s400/Screen+Shot+2014-05-02+at+10.54.58+AM.png"
|
1015
|
+
/></a></div>\n\n<div class="separator" style="clear:
|
1016
|
+
both; text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgovZ5gaOWJyLcc9DxyQX0yI54NK-hy3hSgukICa5-gMrGTaFbVR1OXhoWjS7TkX5CDtoiBRQC4S88QBNv_IHsnRU5U8nKB0H8xHPEuU2o3goALytt_ayQgkMyCNicq7dCJ8qmgQg/s1600/Screen+Shot+2014-05-02+at+10.55.15+AM.png"
|
1017
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
1018
|
+
border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgovZ5gaOWJyLcc9DxyQX0yI54NK-hy3hSgukICa5-gMrGTaFbVR1OXhoWjS7TkX5CDtoiBRQC4S88QBNv_IHsnRU5U8nKB0H8xHPEuU2o3goALytt_ayQgkMyCNicq7dCJ8qmgQg/s400/Screen+Shot+2014-05-02+at+10.55.15+AM.png"
|
1019
|
+
/></a></div>\n\n<div class="separator" style="clear:
|
1020
|
+
both; text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEihdUTDYtKZhcd-HQjXBxzNPfL0VycfORZBI7xoQWZ3Re4VpaLb9Ka59EL1WivBVfmt1cERK5RhN7zs0mbvxFwzbucMkfL50UYk7epEViA7L3UDk5ihFNJIGVdHmNTgZQ0gjPGrHg/s1600/Screen+Shot+2014-05-02+at+10.55.25+AM.png"
|
1021
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
1022
|
+
border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEihdUTDYtKZhcd-HQjXBxzNPfL0VycfORZBI7xoQWZ3Re4VpaLb9Ka59EL1WivBVfmt1cERK5RhN7zs0mbvxFwzbucMkfL50UYk7epEViA7L3UDk5ihFNJIGVdHmNTgZQ0gjPGrHg/s400/Screen+Shot+2014-05-02+at+10.55.25+AM.png"
|
1023
|
+
/></a></div>\n\n<div class="separator" style="clear:
|
1024
|
+
both; text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEg7hzcAqJ2cabV3VXA9JCKovDSwx5yKTJB8lKEOmOmHZ_VKoM9uTktsiIR6f2ulKVShwrOm7v7dUuPvNPM3E5EZ-YupxVdfz9EXHRr_FMaOlhsoP5At99tKHoVBmU9CX9-YpIMjhA/s1600/Screen+Shot+2014-05-02+at+10.55.35+AM.png"
|
1025
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
1026
|
+
border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEg7hzcAqJ2cabV3VXA9JCKovDSwx5yKTJB8lKEOmOmHZ_VKoM9uTktsiIR6f2ulKVShwrOm7v7dUuPvNPM3E5EZ-YupxVdfz9EXHRr_FMaOlhsoP5At99tKHoVBmU9CX9-YpIMjhA/s400/Screen+Shot+2014-05-02+at+10.55.35+AM.png"
|
1027
|
+
/></a></div>\n\n<p/>\n<h3>Tab-Stem Features</h3>\n\nYou
|
1028
|
+
can (optionally) render Tab Stems through stave lines.\n<p/>\n\n<div
|
1029
|
+
class="separator" style="clear: both; text-align: center;"><a
|
1030
|
+
href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjfsQjfpN66Lpyq_IC4dTlXCypbUy3s8wEOE1Tf-j0hRUS3JkAYk2C5topEC4P0cA2ZgQ2A7gxP1djMAp8ecedWzJgaO_lHOELzrFZq8EebqIO3Y7QZxa67UAKluCDHNhv-QgNkWQ/s1600/Screen+Shot+2014-05-02+at+10.57.42+AM.png"
|
1031
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
1032
|
+
border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjfsQjfpN66Lpyq_IC4dTlXCypbUy3s8wEOE1Tf-j0hRUS3JkAYk2C5topEC4P0cA2ZgQ2A7gxP1djMAp8ecedWzJgaO_lHOELzrFZq8EebqIO3Y7QZxa67UAKluCDHNhv-QgNkWQ/s400/Screen+Shot+2014-05-02+at+10.57.42+AM.png"
|
1033
|
+
/></a></div>\n\n<p/>\n\nThat's all, Folks!</content><link
|
1034
|
+
rel='replies' type='application/atom+xml' href='https://0xfe.blogspot.com/feeds/2970930225553638408/comments/default'
|
1035
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2014/05/new-in-vexflow.html#comment-form'
|
1036
|
+
title='19 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/2970930225553638408'/><link
|
1037
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/2970930225553638408'/><link
|
1038
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2014/05/new-in-vexflow.html'
|
1039
|
+
title='New in VexFlow (May 2014)'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
1040
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail
|
1041
|
+
xmlns:media=\"http://search.yahoo.com/mrss/\" url=\"https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhARWln9YaHdtVL5iBAiFKZHcGI85eDMyNLhN3USobfFRbFuq6FAZPsXK746YLJHrXEcb0Hpkbza1giiZ8sCb6I-1BVwuPTCNwkY8boODT0d1x7hYEnbys1OzHbdVRmwGwEYMSaSw/s72-c/Screen+Shot+2014-05-02+at+10.51.12+AM.png\"
|
1042
|
+
height=\"72\" width=\"72\"/><thr:total>19</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-6880188793332322250</id><published>2012-01-02T13:34:00.000-05:00</published><updated>2012-01-02T13:34:24.100-05:00</updated><title
|
1043
|
+
type='text'>More K-Means Clustering Experiments on Images</title><content
|
1044
|
+
type='html'>I spent a little more time experimenting with <a href="http://0xfe.blogspot.com/2011/12/k-means-clustering-and-art.html">k-means
|
1045
|
+
clustering</a> on images and realized that I could use these clusters
|
1046
|
+
to recolor the image in interesting ways.\n<p/>\nI wrote the function
|
1047
|
+
<code>save_recolor</code> to replace pixels from the given clusters
|
1048
|
+
(<code>replacements</code>) with new ones of equal intensity,
|
1049
|
+
as specified by the <code>rgb_factors</code> vector. For example,
|
1050
|
+
the following code will convert pixels of the first two clusters to greyscale.\n<p/>\n<pre
|
1051
|
+
class="prettyprint">\n&gt; save_recolor("baby.jpeg",
|
1052
|
+
"baby_new.jpg", replacements=c(1,2),\n rgb_factors=c(1/3,
|
1053
|
+
1/3, 1/3))\n</pre>\n<p/>\nIt's greyscale because the <code>rgb_factors</code>
|
1054
|
+
distributes the pixel intensity evenly among the channels. A factor of <code>c(20/100,
|
1055
|
+
60/100, 20/100)</code> would make pixels from the cluster 60% more green.\n<p/>\nLet's
|
1056
|
+
get to some examples. Here's an unprocessed image, alongside its color
|
1057
|
+
clusters. I picked <code>k=10</code>. You can set <code>k</code>
|
1058
|
+
by specifying the <code>palette_size</code> parameter to <code>save_recolor</code>.\n\n<div
|
1059
|
+
class="separator" style="clear: both; text-align: center;">\n<a
|
1060
|
+
href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjnRdj14UpXHdCnqTwLonrOWUuqLfH09kzSImmQpt8gaPgq8udoZZCtbCcvejK2mvW0u9RKzC355jFpINb2sxNvMccNBy5z8j477Vx0HLeTB87Mww1nldLP6aQ7qZn5mOalIi7-Tw/s1600/Screen+shot+2012-01-01+at+12.16.45+PM.png"
|
1061
|
+
imageanchor="1" style="margin-left:1em; margin-right:1em"><img
|
1062
|
+
border="0" height="274" width="400" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjnRdj14UpXHdCnqTwLonrOWUuqLfH09kzSImmQpt8gaPgq8udoZZCtbCcvejK2mvW0u9RKzC355jFpINb2sxNvMccNBy5z8j477Vx0HLeTB87Mww1nldLP6aQ7qZn5mOalIi7-Tw/s400/Screen+shot+2012-01-01+at+12.16.45+PM.png"
|
1063
|
+
/></a></div>\n\n<p/>\nHere's what happens when I
|
1064
|
+
remove the red (the first cluster).\n\n<div class="separator"
|
1065
|
+
style="clear: both; text-align: center;">\n<a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEj21ggbKmvK0T2hmbK5tWykQeThSTgl-n-NtJnxUF0v7xQf0DAQV6IXutPz2DuPxFVqBNim_dAvmKQ3xWcGOXrN1tc9NSb-LM2rj-1sAMVV_W6YLkX3dVST_CnlzaE6D4IzfwMUFw/s1600/arkin_recolor_nored.jpg"
|
1066
|
+
imageanchor="1" style="margin-left:1em; margin-right:1em"><img
|
1067
|
+
border="0" height="400" width="300" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEj21ggbKmvK0T2hmbK5tWykQeThSTgl-n-NtJnxUF0v7xQf0DAQV6IXutPz2DuPxFVqBNim_dAvmKQ3xWcGOXrN1tc9NSb-LM2rj-1sAMVV_W6YLkX3dVST_CnlzaE6D4IzfwMUFw/s400/arkin_recolor_nored.jpg"
|
1068
|
+
/></a></div>\n\n<pre class="prettyprint">\n&gt;
|
1069
|
+
save_recolor("baby.jpeg", "baby_new.jpg", replacements=1)\n</pre>\n\n\n<p/>\nIn
|
1070
|
+
the next image, I keep the red, and remove everything else.\n\n<div class="separator"
|
1071
|
+
style="clear: both; text-align: center;">\n<a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhA27bcS661NHy3BNusm97m9j8-jR-DYdF-rRYXgjwfZcSE08rY8qwFakOGzHk35tbuanWAJBf4QRySbcDvzE0vmDvlM0S4l1AOGHb8J6NIjCeBWAaDIZNfp8WH3puF_uqPN7K4DA/s1600/arkin_recolor_red.jpeg"
|
1072
|
+
imageanchor="1" style="margin-left:1em; margin-right:1em"><img
|
1073
|
+
border="0" height="400" width="300" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhA27bcS661NHy3BNusm97m9j8-jR-DYdF-rRYXgjwfZcSE08rY8qwFakOGzHk35tbuanWAJBf4QRySbcDvzE0vmDvlM0S4l1AOGHb8J6NIjCeBWAaDIZNfp8WH3puF_uqPN7K4DA/s400/arkin_recolor_red.jpeg"
|
1074
|
+
/></a></div>\n\n<pre class="prettyprint">\n&gt;
|
1075
|
+
save_recolor("baby.jpeg", "baby_new.jpg", replacements=2:10)\n</pre>\n\n\n<p/>\nBelow,
|
1076
|
+
I replace the red cluster pixels, with green ones of corresponding intensity.\n\n<div
|
1077
|
+
class="separator" style="clear: both; text-align: center;">\n<a
|
1078
|
+
href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiwxVeNhcer-um9mLCAKIdpVqaI5GsohQzeHECRjZ7bI6gDEHzsp45SuCy7GjANE9IYAXdI0qBmf4ZEO8yWwgPWWy8MOW_O8MGnr1BDR1A3P4U90SXaYy1fmBlQU8pRiSNHXZdKug/s1600/arkin_recolor_green.jpg"
|
1079
|
+
imageanchor="1" style="margin-left:1em; margin-right:1em"><img
|
1080
|
+
border="0" height="400" width="300" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiwxVeNhcer-um9mLCAKIdpVqaI5GsohQzeHECRjZ7bI6gDEHzsp45SuCy7GjANE9IYAXdI0qBmf4ZEO8yWwgPWWy8MOW_O8MGnr1BDR1A3P4U90SXaYy1fmBlQU8pRiSNHXZdKug/s400/arkin_recolor_green.jpg"
|
1081
|
+
/></a></div>\n\n<pre class="prettyprint">\n&gt;
|
1082
|
+
save_recolor("baby.jpeg", "baby_new.jpg", replacements=1,\n
|
1083
|
+
\ rgb_factors=c(10/100, 80/100, 10/100))\n</pre>\n\n<p/>\nAnd
|
1084
|
+
this is a fun one: Get rid of everything, keep just the grass.\n\n<div
|
1085
|
+
class="separator" style="clear: both; text-align: center;">\n<a
|
1086
|
+
href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhSuSiJ8ulDFAstlCFQkYTAJIRIhYSVl_jrUYMaC-G4D9hmpIEBMnkl3DI4B54Kvi28b5q4qJPBS8M9w0OBPqLebGd5-nNN32MUJioCg-cFx3hhoEwARmexWb9kLyd20u8NIAu_Aw/s1600/arkin_recolor_grass.jpg"
|
1087
|
+
imageanchor="1" style="margin-left:1em; margin-right:1em"><img
|
1088
|
+
border="0" height="400" width="300" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhSuSiJ8ulDFAstlCFQkYTAJIRIhYSVl_jrUYMaC-G4D9hmpIEBMnkl3DI4B54Kvi28b5q4qJPBS8M9w0OBPqLebGd5-nNN32MUJioCg-cFx3hhoEwARmexWb9kLyd20u8NIAu_Aw/s400/arkin_recolor_grass.jpg"
|
1089
|
+
/></a></div>\n\n\n<pre class="prettyprint">\n&gt;
|
1090
|
+
save_recolor("baby.jpeg", "baby_new.jpg", replacements=c(1,3:10))\n</pre>\n\n<p/>\nI
|
1091
|
+
tried this on various images, using different cluster sizes, replacements,
|
1092
|
+
and RGB factors, with lots of interesting results. Anyhow, you should experiment
|
1093
|
+
with this yourselves and let me know what you find.\n<p/>\nI should
|
1094
|
+
point out that nothing here is novel or new -- it's all well known in
|
1095
|
+
image processing circles. It's still pretty impressive what you can do
|
1096
|
+
when you apply simple machine learning algorithms to other areas.\n\n<p>\nOkay,
|
1097
|
+
as in all my posts, the code is available in my <a href="http://github.com/0xfe">GitHub
|
1098
|
+
repository</a>:\n<p/>\n<a href="https://github.com/0xfe/experiments/blob/master/r/recolor.rscript">https://github.com/0xfe/experiments/blob/master/r/recolor.rscript</a>\n\n<p/>\nHappy
|
1099
|
+
new year!</content><link rel='replies' type='application/atom+xml' href='https://0xfe.blogspot.com/feeds/6880188793332322250/comments/default'
|
1100
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2012/01/more-k-means-clustering-experiments-on.html#comment-form'
|
1101
|
+
title='3 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/6880188793332322250'/><link
|
1102
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/6880188793332322250'/><link
|
1103
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2012/01/more-k-means-clustering-experiments-on.html'
|
1104
|
+
title='More K-Means Clustering Experiments on Images'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
1105
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail
|
1106
|
+
xmlns:media=\"http://search.yahoo.com/mrss/\" url=\"https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjnRdj14UpXHdCnqTwLonrOWUuqLfH09kzSImmQpt8gaPgq8udoZZCtbCcvejK2mvW0u9RKzC355jFpINb2sxNvMccNBy5z8j477Vx0HLeTB87Mww1nldLP6aQ7qZn5mOalIi7-Tw/s72-c/Screen+shot+2012-01-01+at+12.16.45+PM.png\"
|
1107
|
+
height=\"72\" width=\"72\"/><thr:total>3</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-8650024762996646704</id><published>2011-12-31T10:27:00.000-05:00</published><updated>2011-12-31T14:14:34.568-05:00</updated><title
|
1108
|
+
type='text'>K-Means Clustering and Art</title><content type='html'><i>Cross
|
1109
|
+
posted from <a href="https://plus.google.com/u/0/111867441083313519234/posts/dxp5w3R7ts3">Google+</a>.</i>\n<p/>\nMy
|
1110
|
+
coworker at Google, Tony Rippy, has for a while been working on a fascinating
|
1111
|
+
problem. Take all the pixels of a photograph, and rearrange them so that the
|
1112
|
+
final image looks like an artist's palette -- something to which you can
|
1113
|
+
take a paintbrush and recreate the original image.\n<p/>\nHe's got
|
1114
|
+
some really good looking solutions which he might post if you ask him nicely.
|
1115
|
+
:-)\n<p/>\nThis turns out to be a tricky problem, and its hard to come
|
1116
|
+
up with an objective measure of the quality of any given solution. In fact,
|
1117
|
+
the quality is very subjective.\n<p/>\nAnyhow, while studying the <a
|
1118
|
+
href="http://en.wikipedia.org/wiki/K-means_clustering">K-means
|
1119
|
+
clustering algorithm</a> from <a href="http://www.ml-class.org">ml-class</a>,
|
1120
|
+
it struck me that <i>k-means</i> could be used to help with extracting
|
1121
|
+
a small palette of colors from an image. For example, by using each of the
|
1122
|
+
RGB channels as features, and euclidian distance as the similarity metric,
|
1123
|
+
one could run stock <i>k-means</i> to generate clusters of similar
|
1124
|
+
colors.\n<p/>\nI coded up a quick R script to test this and got some
|
1125
|
+
interesting results. Here is an example of an image with its potential palette.
|
1126
|
+
Recall that the second image is simply the first image with the pixels rearranged.\n<p/>\n\n<div
|
1127
|
+
class="separator" style="clear: both; text-align: center;">\n<a
|
1128
|
+
href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjxNbcvY4I9RIAU91lzjK2m8HR9V5o9fhVa8AYncnYU1uhzq6uHAB3r0NgMlndNIw_TZG5S0gqcZmf-cV6A0A5ll8v66DqrHqoC9nPSWGVpfdAW1qGMape3yvvrC5_q019LmHJ74g/s1600/Screen+shot+2011-12-30+at+1.58.13+PM.png"
|
1129
|
+
imageanchor="1" style="margin-left:1em; margin-right:1em"><img
|
1130
|
+
border="0" height="180" width="400" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjxNbcvY4I9RIAU91lzjK2m8HR9V5o9fhVa8AYncnYU1uhzq6uHAB3r0NgMlndNIw_TZG5S0gqcZmf-cV6A0A5ll8v66DqrHqoC9nPSWGVpfdAW1qGMape3yvvrC5_q019LmHJ74g/s400/Screen+shot+2011-12-30+at+1.58.13+PM.png"
|
1131
|
+
/></a></div>\n\n\n<p/>\nI experimented with various values
|
1132
|
+
of <i>k</i> (number of clusters) for the different images. It
|
1133
|
+
turns out that it's pretty hard to algorithmically pre-determine this
|
1134
|
+
number (although there are various techniques that do exist.) The water villa
|
1135
|
+
pic above has 15 clusters, the nursery pic below has 20, and the cartoon has
|
1136
|
+
6.\n<p/>\n<div class="separator" style="clear: both;
|
1137
|
+
text-align: center;">\n<a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEh-MFHMYHmP27nm5qteBSwvHnYI6jfyLCVledC8UBk9t8tjffNwLd4jldB691bTfQmNuBkQaQXQ-bEuEFYo-Z09908pU09B1VrhQmccYFw9hyphenhyphen0WL69_XQXRn2GnnWppDi64dVW_lg/s1600/Screen+shot+2011-12-30+at+1.57.12+PM.png"
|
1138
|
+
imageanchor="1" style="margin-left:1em; margin-right:1em"><img
|
1139
|
+
border="0" height="235" width="400" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEh-MFHMYHmP27nm5qteBSwvHnYI6jfyLCVledC8UBk9t8tjffNwLd4jldB691bTfQmNuBkQaQXQ-bEuEFYo-Z09908pU09B1VrhQmccYFw9hyphenhyphen0WL69_XQXRn2GnnWppDi64dVW_lg/s400/Screen+shot+2011-12-30+at+1.57.12+PM.png"
|
1140
|
+
/></a></div>\n\n<p/>\nNote that this is only one subproblem
|
1141
|
+
of the original one; there is also the subproblem of placement, which I skirted
|
1142
|
+
around by simply arranging the colors in vertical bands across the final image.
|
1143
|
+
I'm pretty sure no artist's palette looks like this.\n<p/>\n<div
|
1144
|
+
class="separator" style="clear: both; text-align: center;">\n<a
|
1145
|
+
href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhlwZAhtmIefeK5li5WDXi9R8ieeu1d9aKww6_JScrZ9IbPnQqT1um3HKhpVcsH7X7yG-T0j873HOt-t0kOz3W4UryfCA_qzFFYNzbVv66X0zBGUzx_rP7dZiKnSXd8RkWNhGreQg/s1600/Screen+shot+2011-12-30+at+1.57.49+PM.png"
|
1146
|
+
imageanchor="1" style="margin-left:1em; margin-right:1em"><img
|
1147
|
+
border="0" height="182" width="400" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhlwZAhtmIefeK5li5WDXi9R8ieeu1d9aKww6_JScrZ9IbPnQqT1um3HKhpVcsH7X7yG-T0j873HOt-t0kOz3W4UryfCA_qzFFYNzbVv66X0zBGUzx_rP7dZiKnSXd8RkWNhGreQg/s400/Screen+shot+2011-12-30+at+1.57.49+PM.png"
|
1148
|
+
/></a></div>\n\n\n<p/>\nAlso, these palettes aren't
|
1149
|
+
very "clean". Since the original pictures themselves are noisy,
|
1150
|
+
some of this noise arbitrarily creep into the various clusters. Working with
|
1151
|
+
a filtered version of the picture would be cheating, so we won't do that.
|
1152
|
+
But we might be able to extract the noisy pixels, put them in a special cluster,
|
1153
|
+
and run <i>k-means</i> on the remaining pixels.\n<p/>\nOkay,
|
1154
|
+
enough talk. Here's the code: <a href="https://github.com/0xfe/experiments/blob/master/r/palette.rscript">https://github.com/0xfe/experiments/blob/master/r/palette.rscript</a>\n<p/>\nFirst
|
1155
|
+
install <code>cclust</code> and <code>ReadImages</code>
|
1156
|
+
packages from <a href="http://cran.r-project.org">CRAN</a>,
|
1157
|
+
and try out the algorithm in an R console:\n<p/>\n<pre class="prettyprint">\n&gt;
|
1158
|
+
source('/path/to/palette.rscript')\n&gt; plot_palette('/path/to/some/image.jpg')\n</pre>\n<p/>\nThis
|
1159
|
+
will produce a plot with the original image and the transformed one next to
|
1160
|
+
each other, like the attached pics below. It uses 10 clusters by default,
|
1161
|
+
for a palette of 10 colors. You can change this by passing the cluster count
|
1162
|
+
as the second parameter to <code>plot_palette</code>.\n<p/>\n<pre
|
1163
|
+
class="prettyprint">\n&gt; plot_palette('/path/to/some/image.jpg',
|
1164
|
+
20)\n</pre>\n<p/>\nThat's all folks!</content><link rel='replies'
|
1165
|
+
type='application/atom+xml' href='https://0xfe.blogspot.com/feeds/8650024762996646704/comments/default'
|
1166
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2011/12/k-means-clustering-and-art.html#comment-form'
|
1167
|
+
title='18 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/8650024762996646704'/><link
|
1168
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/8650024762996646704'/><link
|
1169
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2011/12/k-means-clustering-and-art.html'
|
1170
|
+
title='K-Means Clustering and Art'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
1171
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail
|
1172
|
+
xmlns:media=\"http://search.yahoo.com/mrss/\" url=\"https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjxNbcvY4I9RIAU91lzjK2m8HR9V5o9fhVa8AYncnYU1uhzq6uHAB3r0NgMlndNIw_TZG5S0gqcZmf-cV6A0A5ll8v66DqrHqoC9nPSWGVpfdAW1qGMape3yvvrC5_q019LmHJ74g/s72-c/Screen+shot+2011-12-30+at+1.58.13+PM.png\"
|
1173
|
+
height=\"72\" width=\"72\"/><thr:total>18</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-7852661428465635210</id><published>2011-08-21T11:15:00.000-04:00</published><updated>2011-08-21T21:37:53.266-04:00</updated><category
|
1174
|
+
scheme=\"http://www.blogger.com/atom/ns#\" term=\"webaudio\"/><title type='text'>A
|
1175
|
+
Web Audio Spectrum Analyzer</title><content type='html'>In my last post, I
|
1176
|
+
went over some of the <a href="http://0xfe.blogspot.com/2011/08/generating-tones-with-web-audio-api.html">basics
|
1177
|
+
of the Web Audio API</a> and showed you how to generate <a href="http://0xfe.muthanna.com/tone">sine
|
1178
|
+
waves</a> of various frequencies and amplitudes. We were introduced
|
1179
|
+
to some key <a href="https://dvcs.w3.org/hg/audio/raw-file/tip/webaudio/specification.html">Web
|
1180
|
+
Audio classes</a>, such as <code>AudioContext</code>, <code>AudioNode</code>,
|
1181
|
+
and <code>JavaScriptAudioNode</code>.\n<p/>\nThis time,
|
1182
|
+
I'm going to go take things a little further and build a realtime spectrum
|
1183
|
+
analyzer with Web Audio and HTML5 Canvas. The final product plays a remote
|
1184
|
+
music file, and displays the frequency spectrum overlaid with a time domain
|
1185
|
+
graph.\n<p/>\n\n<div class="separator" style="clear:
|
1186
|
+
both; text-align: center;">\n<a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgpX14OzEMC3DnQ74e2TZUz2RSTwgECiV182MM7scAFYjkJTdyT5xMNYngRGYwOy-FaaGIcnO-geVMsVzpfgcG0smJChWAWouNtkZMxUCWIT7LJdIq7JojMIpsAmfMOD5MMIx0lAg/s1600/Screen+shot+2011-08-20+at+11.47.10+AM.png"
|
1187
|
+
imageanchor="1" style="margin-left:1em; margin-right:1em"><img
|
1188
|
+
border="0" height="234" width="400" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgpX14OzEMC3DnQ74e2TZUz2RSTwgECiV182MM7scAFYjkJTdyT5xMNYngRGYwOy-FaaGIcnO-geVMsVzpfgcG0smJChWAWouNtkZMxUCWIT7LJdIq7JojMIpsAmfMOD5MMIx0lAg/s400/Screen+shot+2011-08-20+at+11.47.10+AM.png"
|
1189
|
+
/></a></div>\n\n<p/>\nThe demo is here: <a href="http://0xfe.muthanna.com/wavebox">JavaScript
|
1190
|
+
Spectrum Analyzer</a>. The code for the demo is in my <a href="https://github.com/0xfe/experiments/tree/master/www/wavebox">GitHub</a>
|
1191
|
+
repository.\n<p/>\n<h3>The New Classes</h3>\n<p/>\nIn
|
1192
|
+
this post we introduce three new Web Audio classes: <code>AudioBuffer</code>,
|
1193
|
+
<code>AudioBufferSourceNode</code>, and <code>RealtimeAnalyzerNode</code>.\n<p/>\nAn
|
1194
|
+
<code>AudioBuffer</code> represents an in-memory audio asset.
|
1195
|
+
It is usually used to store short audio clips and can contain multiple channels.\n<p/>\nAn
|
1196
|
+
<code>AudioBufferSourceNode</code> is a specialization of <code>AudioNode</code>
|
1197
|
+
that serves audio from <code>AudioBuffer</code>s.\n<p/>\nA
|
1198
|
+
<code>RealtimeAnalyzerNode</code> is an <code>AudioNode</code>
|
1199
|
+
that returns time- and frequency-domain analysis information in real time.\n\n<p/>\n<h3>The
|
1200
|
+
Plumbing</h3>\n<p/>\n\nTo begin, we need to acquire some audio.
|
1201
|
+
The API supports a number of different formats, including MP3 and raw PCM-encoded
|
1202
|
+
audio. In our demo, we retrieve a remote audio asset (an MP3 file) using AJAX,
|
1203
|
+
and use it to populate a new <code>AudioBuffer</code>. This is
|
1204
|
+
implemented in the <code>RemoteAudioPlayer</code> class (<a
|
1205
|
+
href="https://github.com/0xfe/experiments/blob/master/www/wavebox/js/remoteaudioplayer.js">js/remoteaudioplayer.js</a>)
|
1206
|
+
like so:\n\n<pre class="prettyprint">\nRemoteAudioPlayer.prototype.load
|
1207
|
+
= function(callback) {\n var request = new XMLHttpRequest();\n var that
|
1208
|
+
= this;\n request.open("GET", this.url, true);\n request.responseType
|
1209
|
+
= "arraybuffer";\n request.onload = function() {\n that.buffer
|
1210
|
+
= that.context.createBuffer(request.response, true);\n that.reload();\n
|
1211
|
+
\ callback(request.response);\n }\n\n request.send();\n}\n</pre>\n\nNotice
|
1212
|
+
that the <i>jQuery</i>'s AJAX calls aren't used here.
|
1213
|
+
This is because jQuery does not support the <i>arraybuffer</i>
|
1214
|
+
response type, which is required for loading binary data from the server.
|
1215
|
+
The <code>AudioBuffer</code> is created with the <code>AudioContext</code>'s
|
1216
|
+
<code>createBuffer</code> function. The second parameter, <code>true</code>,
|
1217
|
+
tells it to mix down all the channels to a single mono channel.\n\n<p/>\nThe
|
1218
|
+
<code>AudioBuffer</code> is then provided to an <code>AudioBufferSourceNode</code>,
|
1219
|
+
which will be the context's audio source. This source node is then connected
|
1220
|
+
to a <code>RealTimeAnalyzerNode</code>, which in turn is connected
|
1221
|
+
to the context's destination, i.e, the computer's output device.\n\n<pre
|
1222
|
+
class="prettyprint">\nvar source_node = context.createBufferSource();\nsource_node.buffer
|
1223
|
+
= audio_buffer;\n\nvar analyzer = context.createAnalyser();\nanalyzer.fftSize
|
1224
|
+
= 2048; // 2048-point FFT\nsource_node.connect(analyzer);\nanalyzer.connect(context.destination);\n</pre>\n\nTo
|
1225
|
+
start playing the music, call the <code>noteOn</code> method of
|
1226
|
+
the source node. <code>noteOn</code> takes one parameter: a timestamp
|
1227
|
+
indicating when to start playing. If set to <code>0</code>, it
|
1228
|
+
plays immediately. To start playing the music 0.5 seconds from now, you can
|
1229
|
+
use <code>context.currentTime</code> to get the reference point.\n\n<pre
|
1230
|
+
class="prettyprint">\n// Play music 0.5 seconds from now\nsource_node.noteOn(context.currentTime
|
1231
|
+
+ 0.5);\n</pre>\n\nIt's also worth noting that we specified the
|
1232
|
+
granularity of the FFT to 2048 by setting the <code>analyzer.fftSize</code>
|
1233
|
+
variable. For those unfamiliar with DSP theory, this breaks the frequency
|
1234
|
+
spectrum of the audio into 2048 points, each point representing the magnitude
|
1235
|
+
of the <i>n/2048th</i> frequency bin.\n\n<p/>\n\n<div
|
1236
|
+
class="separator" style="clear: both; text-align: center;">\n<a
|
1237
|
+
href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEi_sZlkl6rt9eyhJSC6_Veh4Q5eYipuxoAac_7G56fMnENz4dVXrfYXwQsjKX6ewUoj5GEvnE5JIpAh9iMuhFyR2OJw3AoQAe7vkd7XclMC7Mo0NO_QwSIDaQVmYa2QEnhrjLv5_A/s1600/Screen+shot+2011-08-20+at+11.38.45+AM.png"
|
1238
|
+
imageanchor="1" style="margin-left:1em; margin-right:1em"><img
|
1239
|
+
border="0" height="150" width="400" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEi_sZlkl6rt9eyhJSC6_Veh4Q5eYipuxoAac_7G56fMnENz4dVXrfYXwQsjKX6ewUoj5GEvnE5JIpAh9iMuhFyR2OJw3AoQAe7vkd7XclMC7Mo0NO_QwSIDaQVmYa2QEnhrjLv5_A/s400/Screen+shot+2011-08-20+at+11.38.45+AM.png"
|
1240
|
+
/></a></div>\n\n\n<p/>\n<h3>The Pretty Graphs</h3>\n<p/>\n\nOkay,
|
1241
|
+
it's now all wired up -- how do I get the pretty graphs? The general strategy
|
1242
|
+
is to poll the analyzer every few milliseconds (e.g., with <code>window.setInterval</code>),
|
1243
|
+
request the time- or frequency-domain data, and then render it onto a HTML5
|
1244
|
+
Canvas element. The analyzer exports a few different methods to access the
|
1245
|
+
analysis data: <code>getFloatFrequencyData</code>, <code>getByteFrequencyData</code>,
|
1246
|
+
<code>getByteTimeDomainData</code>. Each of these methods populate
|
1247
|
+
a given <code>ArrayBuffer</code> with the appropriate analysis
|
1248
|
+
data.\n\n<p/>\nIn the below snippet, we schedule an <code>update()</code>
|
1249
|
+
function every 50ms, which breaks the frequency-domain data points into 30
|
1250
|
+
bins, and renders a bar representing the average magnitude of the points in
|
1251
|
+
each bin.\n\n<pre class="prettyprint">\ncanvas = document.getElementById(canvas_id);\ncanvas_context
|
1252
|
+
= canvas.getContext("2d");\n\nfunction update() {\n // This graph
|
1253
|
+
has 30 bars.\n var num_bars = 30;\n\n // Get the frequency-domain data\n
|
1254
|
+
\ var data = new Uint8Array(2048);\n analyzer.getByteFrequencyData(data);\n\n
|
1255
|
+
\ // Clear the canvas\n canvas_context.clearRect(0, 0, this.width, this.height);\n\n
|
1256
|
+
\ // Break the samples up into bins\n var bin_size = Math.floor(length /
|
1257
|
+
num_bars);\n for (var i=0; i < num_bars; ++i) {\n var sum = 0;\n for
|
1258
|
+
(var j=0; j < bin_size; ++j) {\n sum += data[(i * bin_size) + j];\n
|
1259
|
+
\ }\n\n // Calculate the average frequency of the samples in the bin\n
|
1260
|
+
\ var average = sum / bin_size;\n\n // Draw the bars on the canvas\n
|
1261
|
+
\ var bar_width = canvas.width / num_bars;\n var scaled_average = (average
|
1262
|
+
/ 256) * canvas.height;\n\n canvas_context.fillRect(i * bar_width, canvas.height,
|
1263
|
+
bar_width - 2,\n -scaled_average);\n}\n\n// Render
|
1264
|
+
every 50ms\nwindow.setInterval(update, 50);\n\n// Start the music\nsource_node.noteOn(0);\n</pre>\n\nA
|
1265
|
+
similar strategy can be employed for time-domain data, except for a few minor
|
1266
|
+
differences: Time-domain data is usually rendered as waves, so you might want
|
1267
|
+
to use lot more bins and plot pixels instead of drawing bars. The code that
|
1268
|
+
renders the time and frequency domain graphs in the demo is encapsulated in
|
1269
|
+
the <code>SpectrumBox</code> class in <a href="https://github.com/0xfe/experiments/blob/master/www/wavebox/js/spectrum.js">js/spectrum.js</a>.\n\n<p/>\n<h3>The
|
1270
|
+
Minutiae</h3>\n<p/>\nI glossed over a number of things in this
|
1271
|
+
post, mostly with respect to the details of the demo. You can learn it all
|
1272
|
+
from the source code, but here's a summary for the impatient: \n<p/>\nThe
|
1273
|
+
graphs are actually two HTML5 Canvas elements overlaid using CSS absolute
|
1274
|
+
positioning. Each element is used by its own <code>SpectrumBox</code>
|
1275
|
+
class, one which displays the frequency spectrum, the other which displays
|
1276
|
+
the time-domain wave. \n<p/>\nThe routing of the nodes is done in the
|
1277
|
+
<code>onclick</code> handler to the <code>#play</code>
|
1278
|
+
button -- it takes the <code>AudioSourceNode</code> from the <code>RemoteAudioPlayer</code>,
|
1279
|
+
routes it to node of the frequency analyzer, routes <i>that</i>
|
1280
|
+
to the node of the time-domain analyzer, and then finally to the destination.\n<p/>\n<h3>Bonus:
|
1281
|
+
Another Demo</h3>\n<p/>\n\nThat's all folks! You now have
|
1282
|
+
the knowhow to build yourself a fancy new graphical spectrum analyzer. If
|
1283
|
+
all you want to do is play with the waves and stare at the graphs, check out
|
1284
|
+
my other demo: The <a href="http://0xfe.muthanna.com/analyzer">Web
|
1285
|
+
Audio Tone Analyzer</code> (<a href="https://github.com/0xfe/experiments/tree/master/www/analyzer">source</a>).
|
1286
|
+
This is really just the same spectrum analyzer from the first demo, connected
|
1287
|
+
to the tone generator from the <a href="https://github.com/0xfe/experiments/tree/master/www/wavebox">last
|
1288
|
+
post</a>.\n\n<p/>\n<div class="separator" style="clear:
|
1289
|
+
both; text-align: center;">\n<a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjBgJGcbHueNFzV57A0hNmHy5pgk8CujXroKhFGrDgfpHQhVqkpcJbA3n11VSR-BJw6voEuyJubfEoUQXhyeisdk4h9Mwgap66UU4ivhRiJY_mKPESde1aquV7NsQt1gcbsY5ALdg/s1600/Screen+shot+2011-08-20+at+10.13.15+AM.png"
|
1290
|
+
imageanchor="1" style="margin-left:1em; margin-right:1em"><img
|
1291
|
+
border="0" height="127" width="320" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjBgJGcbHueNFzV57A0hNmHy5pgk8CujXroKhFGrDgfpHQhVqkpcJbA3n11VSR-BJw6voEuyJubfEoUQXhyeisdk4h9Mwgap66UU4ivhRiJY_mKPESde1aquV7NsQt1gcbsY5ALdg/s320/Screen+shot+2011-08-20+at+10.13.15+AM.png"
|
1292
|
+
/></a></div>\n\n<p/>\n\n<h3>References</h3>\n</p>\n\nAs
|
1293
|
+
a reminder, all the code for my posts is available at my GitHub repository:
|
1294
|
+
<a href="http://github.com/0xfe">github.com/0xfe</a>.\n\n<p/>\nThe
|
1295
|
+
audio track used in the demo is a discarded take of <a href="http://captainstarr.bandcamp.com/track/who-da-man">Who-Da-Man</a>,
|
1296
|
+
which I recorded with my previous band <a href="http://captainstarr.bandcamp.com/album/ep">Captain
|
1297
|
+
Starr</a> many many years ago.\n<p/>\nFinally, don't forget
|
1298
|
+
to read the <a href="https://dvcs.w3.org/hg/audio/raw-file/tip/webaudio/specification.html">Web
|
1299
|
+
Audio API</a> draft specification for more information.\n<p/>\nEnjoy!</content><link
|
1300
|
+
rel='replies' type='application/atom+xml' href='https://0xfe.blogspot.com/feeds/7852661428465635210/comments/default'
|
1301
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2011/08/web-audio-spectrum-analyzer.html#comment-form'
|
1302
|
+
title='9 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/7852661428465635210'/><link
|
1303
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/7852661428465635210'/><link
|
1304
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2011/08/web-audio-spectrum-analyzer.html'
|
1305
|
+
title='A Web Audio Spectrum Analyzer'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
1306
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail
|
1307
|
+
xmlns:media=\"http://search.yahoo.com/mrss/\" url=\"https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgpX14OzEMC3DnQ74e2TZUz2RSTwgECiV182MM7scAFYjkJTdyT5xMNYngRGYwOy-FaaGIcnO-geVMsVzpfgcG0smJChWAWouNtkZMxUCWIT7LJdIq7JojMIpsAmfMOD5MMIx0lAg/s72-c/Screen+shot+2011-08-20+at+11.47.10+AM.png\"
|
1308
|
+
height=\"72\" width=\"72\"/><thr:total>9</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-6394151762251429282</id><published>2011-08-13T20:58:00.005-04:00</published><updated>2011-08-22T07:12:12.828-04:00</updated><title
|
1309
|
+
type='text'>Generating Tones with the Web Audio API</title><content type='html'>The
|
1310
|
+
Web Audio API is a W3C draft standard interface for building in-browser audio
|
1311
|
+
applications. Although the draft is well specified, it is almost impossible
|
1312
|
+
to find useful documentation on building applications with it. <p/>In
|
1313
|
+
my quest to deeper understand HTML5 audio, I spent some time figuring out
|
1314
|
+
how the API works, and decided to write up this quick tutorial on doing useful
|
1315
|
+
things with it. <p/>We will build a sine wave tone-generator entirely
|
1316
|
+
in JavaScript. The final product looks like this: <a href="http://0xfe.muthanna.com/tone">Web
|
1317
|
+
Audio Tone Generator</a>. <p/>The full code is available in my
|
1318
|
+
GitHub repository: <a href="https://github.com/0xfe/experiments/tree/master/www/tone">https://github.com/0xfe/experiments/tree/master/www/tone</a>
|
1319
|
+
<p/><h3>\nCaveats</h3>\n<p/>The Web Audio API is draft,
|
1320
|
+
is likely to change, and does not work on all browsers. Right now, only the
|
1321
|
+
latest versions of Chrome and Safari support it. <p/><h3>\nOnwards
|
1322
|
+
We Go</h3>\n<p/>Getting started making sounds with the Web Audio
|
1323
|
+
API is straightforward so long as you take the time to study the plumbing,
|
1324
|
+
most of which exists to allow for real-time audio processing and synthesis.
|
1325
|
+
The complete specification is available on the <a href="https://dvcs.w3.org/hg/audio/raw-file/tip/webaudio/specification.html">W3C
|
1326
|
+
Web Audio API</a> page, and I'd strongly recommend that you read
|
1327
|
+
it thoroughly if you're interested in building advanced applications with
|
1328
|
+
the API. <p/>To produce any form of sound, you need an <code>AudioContext</code>
|
1329
|
+
and a few <code>AudioNode</code>s. The <code>AudioContext</code>
|
1330
|
+
is sort of like an environment for audio processing -- it's where various
|
1331
|
+
attributes such as the sample rate, the clock status, and other environment-global
|
1332
|
+
state reside. Most applications will need no more than a single instance of
|
1333
|
+
<code>AudioContext</code>. <p/>The <code>AudioNode</code>
|
1334
|
+
is probably the most important component in the API, and is responsible for
|
1335
|
+
synthesizing or processing audio. An <code>AudioNode</code> instance
|
1336
|
+
can be an input source, an output destination, or a mid-stream processor.
|
1337
|
+
These nodes can be linked together to form processing pipelines to render
|
1338
|
+
a complete audio stream. <p/>One kind of <code>AudioNode</code>
|
1339
|
+
is <code>JavaScriptAudioNode</code>, which is used to generate
|
1340
|
+
sounds in JavaScript. This is what what we will use in this tutorial to build
|
1341
|
+
a tone generator. <p/>Let us begin by instantiating an <code>AudioContext</code>
|
1342
|
+
and creating a <code>JavaScriptAudioNode</code>. <pre class="prettyprint">var
|
1343
|
+
context = new webkitAudioContext();\nvar node = context.createJavaScriptNode(1024,
|
1344
|
+
1, 1);\n</pre>\nThe parameters to <code>createJavaScriptNode</code>
|
1345
|
+
refer to the buffer size, the number of input channels, and the number of
|
1346
|
+
output channels. The buffer size must be in units of sample frames, i.e.,
|
1347
|
+
one of: 256, 512, 1024, 2048, 4096, 8192, or 16384. It controls the frequency
|
1348
|
+
of callbacks asking for a buffer refill. Smaller sizes allow for lower latency
|
1349
|
+
and higher for better overall quality. <p/>We're going to use the
|
1350
|
+
<code>JavaScriptNode</code> as the source node along with a bit
|
1351
|
+
of code to create sine waves. To actually <i>hear</i>&nbsp;anything,
|
1352
|
+
it must be connected to an output node. It turns out that <code>context.destination</code>
|
1353
|
+
gives us just that -- a node that maps to the speaker on your machine. <p/><h3>\nThe
|
1354
|
+
SineWave Class</h3>\n<p/>To start off our tone generator, we create
|
1355
|
+
a <code>SineWave</code> class, which wraps the <code>AudioNode</code>
|
1356
|
+
and wave generation logic into one cohesive package. This class will be responsible
|
1357
|
+
for creating the <code>JavaScriptNode</code> instances, generating
|
1358
|
+
the sine waves, and managing the connection to the destination node. <pre
|
1359
|
+
class="prettyprint">SineWave = function(context) {\n var that
|
1360
|
+
= this;\n this.x = 0; // Initial sample number\n this.context = context;\n
|
1361
|
+
\ this.node = context.createJavaScriptNode(1024, 1, 1);\n this.node.onaudioprocess
|
1362
|
+
= function(e) { that.process(e) };\n}\n\nSineWave.prototype.process = function(e)
|
1363
|
+
{\n var data = e.outputBuffer.getChannelData(0);\n for (var i = 0; i &lt;
|
1364
|
+
data.length; ++i) {\n data[i] = Math.sin(this.x++);\n }\n}\n\nSineWave.prototype.play
|
1365
|
+
= function() {\n this.node.connect(this.context.destination);\n}\n\nSineWave.prototype.pause
|
1366
|
+
= function() {\n this.node.disconnect();\n}\n</pre>\nUpon instantiation,
|
1367
|
+
this class creates a <code>JavaScriptAudioNode</code> and attaches
|
1368
|
+
an event handler to <code>onaudioprocess</code> for buffer refills.
|
1369
|
+
The event handler requests a reference to the output buffer for the first
|
1370
|
+
channel, and fills it with a sine wave. Notice that the handler does not know
|
1371
|
+
the buffer size in advance, and gets it from <code>data.length</code>.
|
1372
|
+
<p/>The buffer is of type <code>ArrayBuffer</code> which
|
1373
|
+
is a JavaScript Typed Array. These arrays allow for high throughput processing
|
1374
|
+
of raw binary data. To learn more about Typed Arrays, check out the <a
|
1375
|
+
href="https://developer.mozilla.org/en/javascript_typed_arrays">Mozilla
|
1376
|
+
Developer Documentation on Typed Arrays</a>. <p/>To try out a
|
1377
|
+
quick demo of the <code>SineWave</code> class, add the following
|
1378
|
+
code to the <code>onload</code> handler for your page: <pre
|
1379
|
+
class="prettyprint">var context = new webkitAudioContext();\nvar
|
1380
|
+
sinewave = new SineWave(context);\nsinewave.play();\n</pre>\n<div>\nNotice
|
1381
|
+
that <code>sinewave.play()</code> works by wiring up the node
|
1382
|
+
to the <code>AudioContext</code>'s destination (the speakers).
|
1383
|
+
To stop the tone, call <code>sinewave.pause()</code>, which unplugs
|
1384
|
+
this connection.</div>\n<p/><div>\n<h3>\nGenerating
|
1385
|
+
Specific Tones</h3>\n</div>\n<p/> <div>\nSo, now you
|
1386
|
+
have yourself a tone. Are we done yet?</div>\n\n<p/>\n<div>\nNot
|
1387
|
+
quite. How does one know what the frequency of the generated wave is? How
|
1388
|
+
does one generate tones of arbitrary frequencies?</div>\n<p/>\n<div>\nTo
|
1389
|
+
answer these questions, we must find out the sample rate of the audio. Each
|
1390
|
+
data value we stuff into the buffer in out handler is a sample, and the sample
|
1391
|
+
rate is the number of samples processed per second. We can calculate the frequency
|
1392
|
+
of the tone by dividing the sample rate by the length of a full wave cycle.</div>\n<p/>\n<div>\nHow
|
1393
|
+
do we get the sample rate? Via the <code>getSampleRate()</code>
|
1394
|
+
method of <code>AudioContext</code>. On my machine, the default
|
1395
|
+
sample rate is 44KHz, i.e., 44100 samples per second. This means that the
|
1396
|
+
frequency of the generated tone in our above code is:</div>\n<pre
|
1397
|
+
class="prettyprint">freq = context.getSampleRate() / 2 * Math.PI\n</pre>\n<div>\nThat's
|
1398
|
+
about 7KHz. Ouch! Lets use our newfound knowledge to generate less spine-curdling
|
1399
|
+
tones. To generate a tone of a specific frequency, you can change <code>SineWave.process</code>
|
1400
|
+
to:</div>\n<pre class="prettyprint">SineWave.prototype.process
|
1401
|
+
= function(e) {\n var data = e.outputBuffer.getChannelData(0);\n for (var
|
1402
|
+
i = 0; i &lt; data.length; ++i) {\n data[i] = Math.sin(this.x++ / (this.sample_rate
|
1403
|
+
/ 2 * Math.PI * this.frequency));\n }\n}\n</pre>\nAlso make sure you
|
1404
|
+
add the following two lines to <code>SineWave</code>'s constructor:
|
1405
|
+
\ <pre class="prettyprint">this.sample_rate = this.context.getSampleRate();\nthis.frequency
|
1406
|
+
= 440;\n</pre>\nThis initializes the frequency to <i>pitch standard
|
1407
|
+
A440</i>, i.e., the A above <i>middle C.</i> <p/><h3>\nThe
|
1408
|
+
Theremin Effect</h3>\n<p/>Now that we can generate tones of arbitrary
|
1409
|
+
frequencies, it's only natural that we connect our class to some sort
|
1410
|
+
of slider widget so we can experience the entire spectrum right in our browsers.
|
1411
|
+
Turns out that <a href="http://jqueryui.com/">JQueryUI</a>
|
1412
|
+
already has such a <a href="http://jqueryui.com/demos/slider/">slider</a>,
|
1413
|
+
leaving us only a little plumbing to do. <p/>We add a setter function
|
1414
|
+
to our <code>SineWave</code> class, and call it from our slider
|
1415
|
+
widget's change handler. <pre class="prettyprint">SineWave.prototype.setFrequency
|
1416
|
+
= function(freq) {\n this.next_frequency = freq;\n}\n</pre>\nA JQueryUI
|
1417
|
+
snippet would look like this: <pre class="prettyprint">$("#slider").slider({\n
|
1418
|
+
\ value: 440,\n min: 1,\n max: 2048,\n slide: function(event, ui)
|
1419
|
+
{ sinewave.setFrequency(ui.value); }\n});\n</pre>\n<h3>\nGoing
|
1420
|
+
up to Eleven</h3>\n<p/>Adding support for volume is straightforward.
|
1421
|
+
Add an amplitude member to the <code>SineWave</code> constructor
|
1422
|
+
along with a setter method, just like we did for frequency, and change <code>SineWave.process</code>
|
1423
|
+
to: <pre class="prettyprint">SineWave.prototype.process =
|
1424
|
+
function(e) {\n var data = e.outputBuffer.getChannelData(0);\n for (var
|
1425
|
+
i = 0; i &lt; data.length; ++i) {\n data[i] = this.amplitude * Math.sin(this.x++
|
1426
|
+
/ (this.sample_rate / 2 * Math.PI * this.frequency));\n }\n}\n</pre>\nFolks,
|
1427
|
+
we now have a full fledged sine wave generator! <p/><h3>\nBoo
|
1428
|
+
Hiss Crackle</h3>\n<p/>But, we're not done yet. You've
|
1429
|
+
probably noticed that changing the frequency causes mildly annoying crackling
|
1430
|
+
sounds. This happens because when the frequency changes, discontinuity occurs
|
1431
|
+
in the wave, causing a high-frequency <i>pop</i>&nbsp;in the
|
1432
|
+
audio stream. <p/><table align="center" cellpadding="0"
|
1433
|
+
cellspacing="0" class="tr-caption-container" style="margin-left:
|
1434
|
+
auto; margin-right: auto; text-align: center;"><tbody>\n<tr><td
|
1435
|
+
style="text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhalpMImRBOQHBmOscmOwmDAlyeLBL3bJ-unGATwmWuxUWy1OggewBKAyp4ckF67vY4RODDhgK1J0inVUaE0gcE4SG9s-bbQE7kOm5sdtIcTjvHi7jl99iV4XPXhnv-QqqehGBKPQ/s1600/Screen+shot+2011-08-13+at+8.45.06+PM.png"
|
1436
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
1437
|
+
border="0" height="137" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhalpMImRBOQHBmOscmOwmDAlyeLBL3bJ-unGATwmWuxUWy1OggewBKAyp4ckF67vY4RODDhgK1J0inVUaE0gcE4SG9s-bbQE7kOm5sdtIcTjvHi7jl99iV4XPXhnv-QqqehGBKPQ/s320/Screen+shot+2011-08-13+at+8.45.06+PM.png"
|
1438
|
+
width="320" /></a></td></tr>\n<tr><td
|
1439
|
+
class="tr-caption" style="text-align: center;">Discontinuity
|
1440
|
+
when Changing Frequencies</td></tr>\n</tbody></table>\n<p/>We
|
1441
|
+
try to eliminate the discontinuity by only shifting frequencies when the cycle
|
1442
|
+
of the previous frequency completes, i.e., the sample value is (approximately)
|
1443
|
+
zero. (There are better ways to do this, e.g., windowing, LPFs, etc., but
|
1444
|
+
these techniques are out of the scope of this tutorial.) <p/>Although
|
1445
|
+
this complicates the code a little bit, waiting for the cycle to end significantly
|
1446
|
+
reduces the noise upon frequency shifts. <pre class="prettyprint">SineWave.prototype.setFrequency
|
1447
|
+
= function(freq) {\n this.next_frequency = freq;\n}\n\nSineWave.prototype.process
|
1448
|
+
= function(e) {\n // Get a reference to the output buffer and fill it up.\n
|
1449
|
+
\ var data = e.outputBuffer.getChannelData(0);\n\n // We need to be careful
|
1450
|
+
about filling up the entire buffer and not\n // overflowing.\n for (var
|
1451
|
+
i = 0; i &lt; data.length; ++i) {\n data[i] = this.amplitude * Math.sin(\n
|
1452
|
+
\ this.x++ / (this.sampleRate / (this.frequency * 2 * Math.PI)));\n\n
|
1453
|
+
\ // This reduces high-frequency blips while switching frequencies. It works\n
|
1454
|
+
\ // by waiting for the sine wave to hit 0 (on it's way to positive
|
1455
|
+
territory)\n // before switching frequencies.\n if (this.next_frequency
|
1456
|
+
!= this.frequency) {\n // Figure out what the next point is.\n next_data
|
1457
|
+
= this.amplitude * Math.sin(\n this.x / (this.sampleRate / (this.frequency
|
1458
|
+
* 2 * Math.PI)));\n\n // If the current point approximates 0, and the
|
1459
|
+
direction is positive,\n // switch frequencies.\n if (data[i] &lt;
|
1460
|
+
0.001 &amp;&amp; data[i] &gt; -0.001 &amp;&amp; data[i]
|
1461
|
+
&lt; next_data) {\n this.frequency = this.next_frequency;\n this.x
|
1462
|
+
= 0;\n }\n }\n }\n}\n</pre>\n<p/><div>\n<h3>\nThe
|
1463
|
+
End</h3>\n</div>\n<div>\n<p/></div>\n<div>\nAs
|
1464
|
+
mentioned in the beginning of this tutorial, a demo of the full code is available
|
1465
|
+
at&nbsp;<a href="http://0xfe.muthanna.com/tone/">http://0xfe.muthanna.com/tone/</a>&nbsp;and
|
1466
|
+
the entire source code is available at&nbsp;<a href="https://github.com/0xfe/experiments/tree/master/www/tone">https://github.com/0xfe/experiments/tree/master/www/tone</a>.</div>\n<p/>
|
1467
|
+
<div>\nDo check out the <a href="https://dvcs.w3.org/hg/audio/raw-file/tip/webaudio/specification.html">W3C
|
1468
|
+
Web Audio API</a> specification. Do check out the <a href="https://developer.mozilla.org/en/javascript_typed_arrays">Mozilla
|
1469
|
+
document on JavaScript Typed Arrays</a>.</div>\n<p/> <div>\nComments,
|
1470
|
+
criticism, and error reports welcome. Enjoy!</div>\n</content><link
|
1471
|
+
rel='replies' type='application/atom+xml' href='https://0xfe.blogspot.com/feeds/6394151762251429282/comments/default'
|
1472
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2011/08/generating-tones-with-web-audio-api.html#comment-form'
|
1473
|
+
title='11 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/6394151762251429282'/><link
|
1474
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/6394151762251429282'/><link
|
1475
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2011/08/generating-tones-with-web-audio-api.html'
|
1476
|
+
title='Generating Tones with the Web Audio API'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
1477
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail
|
1478
|
+
xmlns:media=\"http://search.yahoo.com/mrss/\" url=\"https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhalpMImRBOQHBmOscmOwmDAlyeLBL3bJ-unGATwmWuxUWy1OggewBKAyp4ckF67vY4RODDhgK1J0inVUaE0gcE4SG9s-bbQE7kOm5sdtIcTjvHi7jl99iV4XPXhnv-QqqehGBKPQ/s72-c/Screen+shot+2011-08-13+at+8.45.06+PM.png\"
|
1479
|
+
height=\"72\" width=\"72\"/><thr:total>11</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-1841038669655438377</id><published>2011-03-31T13:50:00.000-04:00</published><updated>2011-03-31T13:50:44.471-04:00</updated><title
|
1480
|
+
type='text'>A Music Theory API</title><content type='html'>Most of my work
|
1481
|
+
last week consisted of writing music theory code. <a href="http://vexflow.com/">VexFlow</a>
|
1482
|
+
now has a neat little music theory API, that gives you answers to questions
|
1483
|
+
like the following:<br />\n<br />\n<ul><li>What note
|
1484
|
+
is a minor 3rd above a B?</li>\n<li>What are the scale tones of
|
1485
|
+
a Gb Harmonic Minor?</li>\n<li>What relation is the C# note to
|
1486
|
+
an A Major scale? (Major 3rd)</li>\n<li>What accidentals should
|
1487
|
+
be displayed for the perfect 4th note of a G Major scale?</li>\n<li>etc.</li>\n</ul><br
|
1488
|
+
/>\nThe API is part of VexFlow, and can be used independently of the rendering
|
1489
|
+
API. Take a look at <a href="https://github.com/0xfe/vexflow/blob/master/src/music.js">music.js</a>
|
1490
|
+
in the <a href="http://github.com/0xfe/vexflow">VexFlow GitHub
|
1491
|
+
repository</a> for the complete reference. There's also a handy
|
1492
|
+
key management library for building scores in <a href="https://github.com/0xfe/vexflow/blob/master/src/keymanager.js">keymanager.js</a>.<br
|
1493
|
+
/>\n<br />\nI'm currently working on updating the&nbsp;<a
|
1494
|
+
href="http://vexflow.com/docs/tutorial.html">VexFlow Tutorial</a>&nbsp;with
|
1495
|
+
a quickstart on the music theory API, but meanwhile, here are some teasers
|
1496
|
+
(pulled straight out of the&nbsp;<a href="https://github.com/0xfe/vexflow/blob/master/tests/music_tests.js">tests</a>).<br
|
1497
|
+
/>\n<br />\n<pre class="prettyprint">// What does
|
1498
|
+
C note consist of?\nvar parts = music.getNoteParts("c");\nequals(parts.root,
|
1499
|
+
"c");\nequals(parts.accidental, null);\n\n// What does C# note consist
|
1500
|
+
of?\nvar parts = music.getNoteParts("c#");\nequals(parts.root, "c");\nequals(parts.accidental,
|
1501
|
+
"#");\n\n// What is a flat-5th above C?\nvar value = music.getRelativeNoteValue(music.getNoteValue("c"),\n
|
1502
|
+
\ music.getIntervalValue("b5"));\nequals(value,
|
1503
|
+
music.getNoteValue("gb");\nequals(value, music.getNoteValue("f#");\n\n//
|
1504
|
+
What is the C quality of a Db?\nequals(music.getRelativeNoteName("c",
|
1505
|
+
music.getNoteValue("db")), "c#");\n\n// What are the tones
|
1506
|
+
of a C major scale?\nvar c_major = music.getScaleTones(\n music.getNoteValue("c"),
|
1507
|
+
Vex.Flow.Music.scales.major);\n// result: ["c", "d", "e",
|
1508
|
+
"f", "g", "a", "b"]\n\n// What is
|
1509
|
+
the interval between a C and a D?\nequals(music.getCanonicalIntervalName(music.getIntervalBetween(\n
|
1510
|
+
\ music.getNoteValue("c"), music.getNoteValue("d"))),
|
1511
|
+
"M2");\n</pre><br />\n<div><span class="Apple-style-span"
|
1512
|
+
style="line-height: 16px;"><br />\n</span></div>Thanks
|
1513
|
+
to the theory support, we now have smarter Accidentals in the standard notation
|
1514
|
+
stave that VexTab generates.<br />\n<br />\n<table align="center"
|
1515
|
+
cellpadding="0" cellspacing="0" class="tr-caption-container"
|
1516
|
+
style="margin-left: auto; margin-right: auto; text-align: center;"><tbody>\n<tr><td
|
1517
|
+
style="text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjN1ilwcTQy81IgS1-R_nl7tp4oG0agi_Q3jvBZvwvFH3qq3Ktm7ikP87TY-S_YqrZx8cfd6-I21BeYfkqe2cidfztdCK1YLOC4Kb_z0OX2oKsVgdOlDsobtYqiSvBEc-9_lGAp4A/s1600/Screen+shot+2011-03-31+at+1.03.07+PM.png"
|
1518
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
1519
|
+
border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjN1ilwcTQy81IgS1-R_nl7tp4oG0agi_Q3jvBZvwvFH3qq3Ktm7ikP87TY-S_YqrZx8cfd6-I21BeYfkqe2cidfztdCK1YLOC4Kb_z0OX2oKsVgdOlDsobtYqiSvBEc-9_lGAp4A/s1600/Screen+shot+2011-03-31+at+1.03.07+PM.png"
|
1520
|
+
/></a></td></tr>\n<tr><td class="tr-caption"
|
1521
|
+
style="text-align: center;">Smarter Accidentals</td></tr>\n</tbody></table><br
|
1522
|
+
/>\nNotice how the accidentals are correctly picked according to the rules
|
1523
|
+
of standard notation? Yep, so do I.<br />\n<br />\nWe also have
|
1524
|
+
lots more tests -- over 750 of them! <a href="http://vexflow.com/tests">Try
|
1525
|
+
running them on your browser</a> and tell me how long it takes.<br
|
1526
|
+
/>\n<br />\nThat's all folks!</content><link rel='replies' type='application/atom+xml'
|
1527
|
+
href='https://0xfe.blogspot.com/feeds/1841038669655438377/comments/default'
|
1528
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2011/03/music-theory-api.html#comment-form'
|
1529
|
+
title='19 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/1841038669655438377'/><link
|
1530
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/1841038669655438377'/><link
|
1531
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2011/03/music-theory-api.html'
|
1532
|
+
title='A Music Theory API'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
1533
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail
|
1534
|
+
xmlns:media=\"http://search.yahoo.com/mrss/\" url=\"https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjN1ilwcTQy81IgS1-R_nl7tp4oG0agi_Q3jvBZvwvFH3qq3Ktm7ikP87TY-S_YqrZx8cfd6-I21BeYfkqe2cidfztdCK1YLOC4Kb_z0OX2oKsVgdOlDsobtYqiSvBEc-9_lGAp4A/s72-c/Screen+shot+2011-03-31+at+1.03.07+PM.png\"
|
1535
|
+
height=\"72\" width=\"72\"/><thr:total>19</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-6538091093974991528</id><published>2011-03-27T14:48:00.000-04:00</published><updated>2011-03-27T14:48:07.944-04:00</updated><title
|
1536
|
+
type='text'>Prettier Tablature</title><content type='html'>Spot the difference:<br
|
1537
|
+
/>\n<br />\n<table align="center" cellpadding="0"
|
1538
|
+
cellspacing="0" class="tr-caption-container" style="margin-left:
|
1539
|
+
auto; margin-right: auto; text-align: center;"><tbody>\n<tr><td
|
1540
|
+
style="text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhvDxctz41t8lkakd87vv6buAIPudM5HtxgjG12vGWh6yZ3Qe3keTOSaVtvJHYwxOHmqpm911ybd8t4jW-A4SFn6MzBOdCOaK1ZsOA29iPPif-UudHxuIpN5WJFLUfb_b9WIILBtA/s1600/Screen+shot+2011-03-27+at+2.34.14+PM.png"
|
1541
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
1542
|
+
border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhvDxctz41t8lkakd87vv6buAIPudM5HtxgjG12vGWh6yZ3Qe3keTOSaVtvJHYwxOHmqpm911ybd8t4jW-A4SFn6MzBOdCOaK1ZsOA29iPPif-UudHxuIpN5WJFLUfb_b9WIILBtA/s1600/Screen+shot+2011-03-27+at+2.34.14+PM.png"
|
1543
|
+
/></a></td></tr>\n<tr><td class="tr-caption"
|
1544
|
+
style="text-align: center;">Before</td></tr>\n</tbody></table><br
|
1545
|
+
/>\n<table align="center" cellpadding="0" cellspacing="0"
|
1546
|
+
class="tr-caption-container" style="margin-left: auto; margin-right:
|
1547
|
+
auto; text-align: center;"><tbody>\n<tr><td style="text-align:
|
1548
|
+
center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEi3EiDHCEJfygzSLmLpnBqYC8yjLl4LMjkrlNFDhqQi5HO6TOpSUrOsOi2I3u1BRrNfVeGg8cz-NMIsdednVAHu-lxEA5yRwunWuoE06EbEyeNXYy_VBRqrw3gvJ2GOT7PHvn8rkA/s1600/Screen+shot+2011-03-27+at+2.32.23+PM.png"
|
1549
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
1550
|
+
border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEi3EiDHCEJfygzSLmLpnBqYC8yjLl4LMjkrlNFDhqQi5HO6TOpSUrOsOi2I3u1BRrNfVeGg8cz-NMIsdednVAHu-lxEA5yRwunWuoE06EbEyeNXYy_VBRqrw3gvJ2GOT7PHvn8rkA/s1600/Screen+shot+2011-03-27+at+2.32.23+PM.png"
|
1551
|
+
/></a></td></tr>\n<tr><td class="tr-caption"
|
1552
|
+
style="text-align: center;">After</td></tr>\n</tbody></table><div>Still
|
1553
|
+
can't tell? Let me help you out. We have:</div><div><ul><li>Slightly
|
1554
|
+
greater spacing between the tablature stave lines. This makes it more consistent
|
1555
|
+
in appearance with printed tablature.</li>\n<li>Stave lines are
|
1556
|
+
cleared before fret numbers are rendered, vastly improving readability.</li>\n<li>Font
|
1557
|
+
sizes for fret numbers and annotations are bigger.</li>\n<li>Associated
|
1558
|
+
notation and tablature staves are connected with a vertical bar on the left.</li>\n<li>Micro-changes
|
1559
|
+
in spacing between fret numbers, effects, annotations, etc.</li>\n</ul></div><div>All
|
1560
|
+
these changes have been incorporated into <a href="http://vexflow.com/tabdiv">TabDiv</a>,
|
1561
|
+
and pushed to <a href="http://github.com/0xfe/vexflow">GitHub</a>.
|
1562
|
+
See more on the <a href="http://vexflow.com/vextab/tutorial.html">VexTab
|
1563
|
+
Tutorial</a> page. Enjoy!</div></content><link rel='replies' type='application/atom+xml'
|
1564
|
+
href='https://0xfe.blogspot.com/feeds/6538091093974991528/comments/default'
|
1565
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2011/03/prettier-tablature.html#comment-form'
|
1566
|
+
title='1 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/6538091093974991528'/><link
|
1567
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/6538091093974991528'/><link
|
1568
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2011/03/prettier-tablature.html'
|
1569
|
+
title='Prettier Tablature'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
1570
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail
|
1571
|
+
xmlns:media=\"http://search.yahoo.com/mrss/\" url=\"https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhvDxctz41t8lkakd87vv6buAIPudM5HtxgjG12vGWh6yZ3Qe3keTOSaVtvJHYwxOHmqpm911ybd8t4jW-A4SFn6MzBOdCOaK1ZsOA29iPPif-UudHxuIpN5WJFLUfb_b9WIILBtA/s72-c/Screen+shot+2011-03-27+at+2.34.14+PM.png\"
|
1572
|
+
height=\"72\" width=\"72\"/><thr:total>1</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-1929109118059060998</id><published>2011-03-24T16:44:00.000-04:00</published><updated>2011-03-24T16:44:47.737-04:00</updated><category
|
1573
|
+
scheme=\"http://www.blogger.com/atom/ns#\" term=\"vexflow\"/><title type='text'>The
|
1574
|
+
VexFlow Tutorial (...and other goodies)</title><content type='html'>Finally...
|
1575
|
+
finally... finally... we have the humble beginnings of what could be considered
|
1576
|
+
"documentation".<br />\n<br />\nI present to you <a
|
1577
|
+
href="http://vexflow.com/docs/tutorial.html">The VexFlow Tutorial</a>.<br
|
1578
|
+
/>\n<br />\nAlthough still in its infancy, the tutorial covers everything
|
1579
|
+
you need to <i>start</i> using VexFlow in your own code. My plan
|
1580
|
+
for the next few weeks is to make this document as comprehensive as possible,
|
1581
|
+
and write up a separate API reference.<br />\n<br />\nI hope that
|
1582
|
+
this tutorial will help developers understand VexFlow better, and enable them
|
1583
|
+
to build new and interesting libraries, parsers, and applications.<br />\n<br
|
1584
|
+
/>\nThe entire tutorial is stored in the <a href="https://github.com/0xfe/vexflow/blob/master/docs/tutorial.html">Git
|
1585
|
+
repo</a>; feel free to send me your corrections or other updates.<br
|
1586
|
+
/>\n<br />\nAbout time, I know.<br />\n<br />\nIn other
|
1587
|
+
news, we have had a few contributions to both VexFlow and VexTab. A big thanks
|
1588
|
+
to <a href="https://github.com/airfrog">airfrog</a>,
|
1589
|
+
<a href="https://github.com/wiseleyb">wiseleyb</a>,
|
1590
|
+
and <a href="https://github.com/adamf">adamf</a> for
|
1591
|
+
getting these done.<br />\n<br />\nFirst, we have the ability
|
1592
|
+
to render dotted notes.<br />\n<br />\n<table align="center"
|
1593
|
+
cellpadding="0" cellspacing="0" class="tr-caption-container"
|
1594
|
+
style="margin-left: auto; margin-right: auto; text-align: center;"><tbody>\n<tr><td
|
1595
|
+
style="text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjTJvi0zGz6LnMaUlGrYTQQCtHK5qDwop451cOBsWGXLwSaSUGrHs6UpuBK95zj-fRdph9w6ez4pzLuQIMze2owizc3Ly9Ja8Br3Va3KV-8J9Do0w6e0gb5W3La87_5sBro9mmEmA/s1600/Screen+shot+2011-03-24+at+4.29.01+PM.png"
|
1596
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
1597
|
+
border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjTJvi0zGz6LnMaUlGrYTQQCtHK5qDwop451cOBsWGXLwSaSUGrHs6UpuBK95zj-fRdph9w6ez4pzLuQIMze2owizc3Ly9Ja8Br3Va3KV-8J9Do0w6e0gb5W3La87_5sBro9mmEmA/s1600/Screen+shot+2011-03-24+at+4.29.01+PM.png"
|
1598
|
+
/></a></td></tr>\n<tr><td class="tr-caption"
|
1599
|
+
style="text-align: center;">Dotted Notes</td></tr>\n</tbody></table><br
|
1600
|
+
/>\nThen we have key signatures.<br />\n<br />\n<table align="center"
|
1601
|
+
cellpadding="0" cellspacing="0" class="tr-caption-container"
|
1602
|
+
style="margin-left: auto; margin-right: auto; text-align: center;"><tbody>\n<tr><td
|
1603
|
+
style="text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEheUE2y0VoCdcCgZCE32Gwj4JCEFDx8hh7b2TjjwV42XamUDvNNhQX5Xq6RypmUawFxFo0zf9zBoQATGJXO_g3JPbYpzG5dDEzhm7zibHJ9eZnlWc0hKzmAFFi164GazKoULiI8Eg/s1600/Screen+shot+2011-03-24+at+4.30.42+PM.png"
|
1604
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
1605
|
+
border="0" height="131" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEheUE2y0VoCdcCgZCE32Gwj4JCEFDx8hh7b2TjjwV42XamUDvNNhQX5Xq6RypmUawFxFo0zf9zBoQATGJXO_g3JPbYpzG5dDEzhm7zibHJ9eZnlWc0hKzmAFFi164GazKoULiI8Eg/s320/Screen+shot+2011-03-24+at+4.30.42+PM.png"
|
1606
|
+
width="320" /></a></td></tr>\n<tr><td
|
1607
|
+
class="tr-caption" style="text-align: center;">Key
|
1608
|
+
Signatures</td></tr>\n</tbody></table><br />\nWe
|
1609
|
+
also have time signatures.<br />\n<br />\n<table align="center"
|
1610
|
+
cellpadding="0" cellspacing="0" class="tr-caption-container"
|
1611
|
+
style="margin-left: auto; margin-right: auto; text-align: center;"><tbody>\n<tr><td
|
1612
|
+
style="text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEg27y5Z87WaSWtfaCNXHl9HVU0W9gH9Aso7krjOZS4QF0qaNfqJSiftWZcPNvnGmTe19-LS4rbPPyBaOnJCKE7s5vQEnA85ZnGC8RDbP8oqxOIraHn835bZ9lUe-0CmyElU8tAAXQ/s1600/Screen+shot+2011-03-24+at+4.30.53+PM.png"
|
1613
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
1614
|
+
border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEg27y5Z87WaSWtfaCNXHl9HVU0W9gH9Aso7krjOZS4QF0qaNfqJSiftWZcPNvnGmTe19-LS4rbPPyBaOnJCKE7s5vQEnA85ZnGC8RDbP8oqxOIraHn835bZ9lUe-0CmyElU8tAAXQ/s1600/Screen+shot+2011-03-24+at+4.30.53+PM.png"
|
1615
|
+
/></a></td></tr>\n<tr><td class="tr-caption"
|
1616
|
+
style="text-align: center;">Time Signatures</td></tr>\n</tbody></table><br
|
1617
|
+
/>\nThis includes the really crazy time signatures too.<br />\n<br
|
1618
|
+
/>\n<table align="center" cellpadding="0" cellspacing="0"
|
1619
|
+
class="tr-caption-container" style="margin-left: auto; margin-right:
|
1620
|
+
auto; text-align: center;"><tbody>\n<tr><td style="text-align:
|
1621
|
+
center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEj6onDHjQYycBooCK4HNjeECNfDU6s5UcPQai7Ab8ybP-vmVd2iZijJkoOb0H3-RZdS8-IdHCNQbPlyhsa6_s61YWGa0M6UkvTuy1zGGdaNkf_xYRJEiFKK92fb1v1-SQKXjBK0_w/s1600/Screen+shot+2011-03-24+at+4.31.03+PM.png"
|
1622
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
1623
|
+
border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEj6onDHjQYycBooCK4HNjeECNfDU6s5UcPQai7Ab8ybP-vmVd2iZijJkoOb0H3-RZdS8-IdHCNQbPlyhsa6_s61YWGa0M6UkvTuy1zGGdaNkf_xYRJEiFKK92fb1v1-SQKXjBK0_w/s1600/Screen+shot+2011-03-24+at+4.31.03+PM.png"
|
1624
|
+
/></a></td></tr>\n<tr><td class="tr-caption"
|
1625
|
+
style="text-align: center;">Whacky Time Signatures</td></tr>\n</tbody></table><br
|
1626
|
+
/>\nFinally, we have support for different types of clefs.<br />\n<br
|
1627
|
+
/>\n<table align="center" cellpadding="0" cellspacing="0"
|
1628
|
+
class="tr-caption-container" style="margin-left: auto; margin-right:
|
1629
|
+
auto; text-align: center;"><tbody>\n<tr><td style="text-align:
|
1630
|
+
center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjUXGZKV-uwG5IG2PIILUssE5giARboG92nDQDdlC3CJt-zu3QicRDirgKZxx_1geAMV_YHG-8QdEFP1hE6ek9_jTXdPUVNlQn0QOUsTh_81yimLMQ0mazZCGL0_p-YYRkfG3vWPg/s1600/Screen+shot+2011-03-24+at+4.31.40+PM.png"
|
1631
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
1632
|
+
border="0" height="92" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjUXGZKV-uwG5IG2PIILUssE5giARboG92nDQDdlC3CJt-zu3QicRDirgKZxx_1geAMV_YHG-8QdEFP1hE6ek9_jTXdPUVNlQn0QOUsTh_81yimLMQ0mazZCGL0_p-YYRkfG3vWPg/s320/Screen+shot+2011-03-24+at+4.31.40+PM.png"
|
1633
|
+
width="320" /></a></td></tr>\n<tr><td
|
1634
|
+
class="tr-caption" style="text-align: center;">Alternate
|
1635
|
+
Clefs</td></tr>\n</tbody></table><br />\nBut
|
1636
|
+
wait... there's more.&nbsp;All of this is supported in <a href="http://vexflow.com/vextab/tutorial.html">VexTab</a>
|
1637
|
+
by way of new <span class="Apple-style-span" style="font-family:
|
1638
|
+
'Courier New', Courier, monospace;">tabstave</span>
|
1639
|
+
parameters. Take a look at the updated <a href="http://vexflow.com/vextab/tutorial.html">VexTab
|
1640
|
+
Tutorial</a> for the details.</content><link rel='replies' type='application/atom+xml'
|
1641
|
+
href='https://0xfe.blogspot.com/feeds/1929109118059060998/comments/default'
|
1642
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2011/03/vexflow-tutorial-and-other-goodies.html#comment-form'
|
1643
|
+
title='5 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/1929109118059060998'/><link
|
1644
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/1929109118059060998'/><link
|
1645
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2011/03/vexflow-tutorial-and-other-goodies.html'
|
1646
|
+
title='The VexFlow Tutorial (...and other goodies)'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
1647
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail
|
1648
|
+
xmlns:media=\"http://search.yahoo.com/mrss/\" url=\"https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjTJvi0zGz6LnMaUlGrYTQQCtHK5qDwop451cOBsWGXLwSaSUGrHs6UpuBK95zj-fRdph9w6ez4pzLuQIMze2owizc3Ly9Ja8Br3Va3KV-8J9Do0w6e0gb5W3La87_5sBro9mmEmA/s72-c/Screen+shot+2011-03-24+at+4.29.01+PM.png\"
|
1649
|
+
height=\"72\" width=\"72\"/><thr:total>5</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-6639885962680967449</id><published>2011-03-23T13:26:00.000-04:00</published><updated>2011-09-10T11:05:46.322-04:00</updated><category
|
1650
|
+
scheme=\"http://www.blogger.com/atom/ns#\" term=\"vim\"/><title type='text'>Editing
|
1651
|
+
XML and HTML in Vim</title><content type='html'>I just discovered the Vim
|
1652
|
+
<a href="https://github.com/sukima/xmledit/">xmledit</a>
|
1653
|
+
plugin.<br />\n<br />\nWith features like tag-completion, auto-wrapping
|
1654
|
+
and unwrapping, quick navigation, etc., it has, in a matter of minutes, measurably
|
1655
|
+
decreased my level of frustration while editing markup in Vim.<br />\n<br
|
1656
|
+
/>\n<h3>Installation</h3>\n<p/>\n<div>The quickest
|
1657
|
+
way to install the plugin is by downloading the latest .<code>vba</code>
|
1658
|
+
from the <a href="http://vim.sourceforge.net/scripts/script.php?script_id=301">plugin
|
1659
|
+
site</a>, and run the following commands:</div><br />\n<pre
|
1660
|
+
class="prettyprint">$ vim xmledit.vba\n:so %\n</pre><br
|
1661
|
+
/>\nYou also need to edit your&nbsp;<code>.vimrc</code>
|
1662
|
+
and add the following:<br />\n<br />\n<pre class="prettyprint">filetype
|
1663
|
+
plugin on\n</pre><br />\n<div>This installs the plugin into
|
1664
|
+
your <code>.vim/ftplugin</code> directory, and enables it for
|
1665
|
+
<code>.xml</code> files. To enable it for other file types, create
|
1666
|
+
a link to the file with the new extension name in the same directory. (Copying
|
1667
|
+
the file also works.)</div><br />\n<pre class="prettyprint">$
|
1668
|
+
cd ~/.vim/ftplugin\n$ ln -s xml.vim html.vim\n$ ln -s xml.vim xhtml.vim\n</pre><br
|
1669
|
+
/>\n<h3>Usage</h3><br />\nThe plugin supports the various
|
1670
|
+
Vim modes in interesting ways.<br />\n<br />\nIn insert mode,
|
1671
|
+
when you finish a tag (with the <code><span class="Apple-style-span"
|
1672
|
+
style="font-family: 'Courier New', Courier, monospace;">&gt;</span></code>&nbsp;character),
|
1673
|
+
it will be autocompleted and the cursor placed in-between the tags.<br
|
1674
|
+
/>\n<br />\nIf you immediately type another <code><span
|
1675
|
+
class="Apple-style-span" style="font-family: 'Courier New',
|
1676
|
+
Courier, monospace;">&gt;</span></code>, it will close
|
1677
|
+
the tag on its own line, and place the cursor on a new line right in between.<br
|
1678
|
+
/>\n<br />\nThe <code>%</code> key jumps between the
|
1679
|
+
start and end of a tag.<br />\n<br />\nThe <code>\\%</code>
|
1680
|
+
combination jumps between opening and closing tags. (Note that backslash is
|
1681
|
+
the default key-prefix for scripts and plugins to use. You can change this
|
1682
|
+
prefix with the <code>mapleader</code> setting.)<br />\n<br
|
1683
|
+
/>\nIf you select text (for example with <code>v</code>), and
|
1684
|
+
type <code>\\x</code>, it will prompt you to wrap the text with
|
1685
|
+
a custom tag.<br />\n<br />\nTyping in <code>\\d</code>
|
1686
|
+
unwraps surrounding tags from the cursor.<br />\n<br />\n<h3>Learning
|
1687
|
+
More</h3><br />\nType <code>:help xml-plugin</code>
|
1688
|
+
for help and more information.<br />\n<br />\nYay for another
|
1689
|
+
awesome Vim plugin. You can see my entire Vim profile in my <a href="https://github.com/0xfe/evil/tree/master/dotfiles/vim_local">Evil
|
1690
|
+
Tomato GitHub Repository</a>.</content><link rel='replies' type='application/atom+xml'
|
1691
|
+
href='https://0xfe.blogspot.com/feeds/6639885962680967449/comments/default'
|
1692
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2011/03/editing-xml-and-html-in-vim.html#comment-form'
|
1693
|
+
title='3 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/6639885962680967449'/><link
|
1694
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/6639885962680967449'/><link
|
1695
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2011/03/editing-xml-and-html-in-vim.html'
|
1696
|
+
title='Editing XML and HTML in Vim'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
1697
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>3</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-4947829838430309691</id><published>2011-03-20T08:43:00.000-04:00</published><updated>2011-03-20T08:43:13.268-04:00</updated><title
|
1698
|
+
type='text'>On Twitter</title><content type='html'>It turns out I'm on
|
1699
|
+
twitter. I have absolutely no idea what I'm going to do with it.<br
|
1700
|
+
/>\n<br />\nMaybe I'll tweet every time time the compiler yells
|
1701
|
+
at me... or when my kernel panics... or when my browser <a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiiAOC6OcErm77z3wo9yVSCI1strQm99Ap5KQOXC7JYi1P1YdC0CMaaUWfpUh-0pZzGyodL1WA7AcdRp0WLDJ-kCTFmHDtGtAkKc5HmY6ohmqYVV0ZCFZ1-NYxKBB5FG1iuj1mZ/s400/aw,+snap.png">frowns</a>.<br
|
1702
|
+
/>\n<br />\nWhatever it is, I'm on twitter: <a href="http://twitter.com/11111110b">twitter.com/11111110b</a><br
|
1703
|
+
/>\n<br />\nThat's seven ones and a zero, followed by a bee.</content><link
|
1704
|
+
rel='replies' type='application/atom+xml' href='https://0xfe.blogspot.com/feeds/4947829838430309691/comments/default'
|
1705
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2011/03/on-twitter.html#comment-form'
|
1706
|
+
title='0 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/4947829838430309691'/><link
|
1707
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/4947829838430309691'/><link
|
1708
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2011/03/on-twitter.html'
|
1709
|
+
title='On Twitter'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
1710
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-5987592118110172726</id><published>2010-09-13T11:03:00.000-04:00</published><updated>2010-09-13T11:03:44.098-04:00</updated><category
|
1711
|
+
scheme=\"http://www.blogger.com/atom/ns#\" term=\"haskell\"/><title type='text'>Regex
|
1712
|
+
Substitution in Haskell</title><content type='html'>I'm shocked and appalled
|
1713
|
+
at the fact that there is no generic regex substitution function in the GHC
|
1714
|
+
libraries. All I'm looking for is a simple function equivalent to perl's
|
1715
|
+
<span class="Apple-style-span" style="font-family: 'Courier
|
1716
|
+
New', Courier, monospace;">s/.../.../</span> expression.<br
|
1717
|
+
/>\n<br />\nAfter digging around a bit, I found <span class="Apple-style-span"
|
1718
|
+
style="font-family: 'Courier New', Courier, monospace;">subRegex</span>
|
1719
|
+
in <span class="Apple-style-span" style="font-family: 'Courier
|
1720
|
+
New', Courier, monospace;">regex-compat</span>. While this
|
1721
|
+
works well, it does not use PCRE, and as far as I can tell, there's no
|
1722
|
+
support for <span class="Apple-style-span" style="font-family:
|
1723
|
+
'Courier New', Courier, monospace;">ByteString</span>s.<br
|
1724
|
+
/>\n<br />\nGrrr.<br />\n<br />\nAnyhow, I took the <span
|
1725
|
+
class="Apple-style-span" style="font-family: 'Courier New',
|
1726
|
+
Courier, monospace;">subRegex</span> implementation from <span
|
1727
|
+
class="Apple-style-span" style="font-family: 'Courier New',
|
1728
|
+
Courier, monospace;">regex-compat</span> and mangled it slightly
|
1729
|
+
to work with <span class="Apple-style-span" style="font-family:
|
1730
|
+
'Courier New', Courier, monospace;">Text.Regex.PCRE</span>.
|
1731
|
+
I also added the <span class="Apple-style-span" style="font-family:
|
1732
|
+
'Courier New', Courier, monospace;">(=~$)</span> function
|
1733
|
+
which feels a bit more familiar to perl users. For example:<br />\n<pre
|
1734
|
+
class="prettyprint">Prelude PCRESub&gt; "me boo"
|
1735
|
+
=~$ ("(me) boo", "he \\\\1")\n"he me"</pre>The
|
1736
|
+
above is equivalent to perl's:<br />\n<pre class="prettyprint">$text
|
1737
|
+
= "me boo";\n$text =~ s/(me) boo/he $1/;</pre><span class="Apple-style-span"
|
1738
|
+
style="font-family: 'Courier New', Courier, monospace;">(~=$)</span>
|
1739
|
+
is implemented with <span class="Apple-style-span" style="font-family:
|
1740
|
+
'Courier New', Courier, monospace;">reSub</span> (which
|
1741
|
+
is also exported by <span class="Apple-style-span" style="font-family:
|
1742
|
+
'Courier New', Courier, monospace;">PCRESub</span>).
|
1743
|
+
<span class="Apple-style-span" style="font-family: 'Courier
|
1744
|
+
New', Courier, monospace;">reSub</span> allows you to provide
|
1745
|
+
your own <span class="Apple-style-span" style="font-family:
|
1746
|
+
'Courier New', Courier, monospace;">CompOption</span>
|
1747
|
+
and <span class="Apple-style-span" style="font-family: 'Courier
|
1748
|
+
New', Courier, monospace;">ExecOption</span> options.<br
|
1749
|
+
/>\n<br />\nHere's the <span class="Apple-style-span"
|
1750
|
+
style="font-family: 'Courier New', Courier, monospace;">PCRESub</span>
|
1751
|
+
module:<br />\n<pre class="prettyprint">-- PCRE-based
|
1752
|
+
Regex Substitution\n-- Mohit Muthanna Cheppudira\n--\n-- Based off code by
|
1753
|
+
Chris Kuklewicz from regex-compat library.\n--\n-- Requires Text.Regex.PCRE
|
1754
|
+
from regex-pcre.\n\nmodule PCRESub(\n (=~$),\n reSub\n) where\n\nimport
|
1755
|
+
Data.Array((!))\nimport Text.Regex.PCRE\n\nsubRegex :: Regex --
|
1756
|
+
^ Search pattern\n -> String -- ^ Input
|
1757
|
+
string\n -> String -- ^ Replacement text\n
|
1758
|
+
\ -> String -- ^ Output string\nsubRegex
|
1759
|
+
_ "" _ = ""\nsubRegex regexp inp repl =\n let compile
|
1760
|
+
_i str [] = \\ _m -> (str++)\n compile i str (("\\\\",(off,len)):rest)
|
1761
|
+
=\n let i' = off+len\n pre = take (off-i) str\n str'
|
1762
|
+
= drop (i'-i) str\n in if null str' then \\ _m -> (pre ++)
|
1763
|
+
. ('\\\\':)\n else \\ m -> (pre ++) . ('\\\\'
|
1764
|
+
:) . compile i' str' rest m\n compile i str ((xstr,(off,len)):rest)
|
1765
|
+
=\n let i' = off+len\n pre = take (off-i) str\n str'
|
1766
|
+
= drop (i'-i) str\n x = read xstr\n in if null str'
|
1767
|
+
then \\ m -> (pre++) . ((fst (m!x))++)\n else \\ m -> (pre++)
|
1768
|
+
. ((fst (m!x))++) . compile i' str' rest m\n compiled :: MatchText
|
1769
|
+
String -> String -> String\n compiled = compile 0 repl findrefs
|
1770
|
+
where\n bre = makeRegexOpts defaultCompOpt execBlank "\\\\\\\\(\\\\\\\\|[0-9]+)"\n
|
1771
|
+
\ findrefs = map (\\m -> (fst (m!1),snd (m!0))) (matchAllText bre
|
1772
|
+
repl)\n go _i str [] = str\n go i str (m:ms) =\n let (_,(off,len))
|
1773
|
+
= m!0\n i' = off+len\n pre = take (off-i) str\n
|
1774
|
+
\ str' = drop (i'-i) str\n in if null str' then
|
1775
|
+
pre ++ (compiled m "")\n else pre ++ (compiled m (go
|
1776
|
+
i' str' ms))\n in go 0 inp (matchAllText regexp inp)\n\n-- Substitue
|
1777
|
+
re with sub in str using options copts and eopts.\nreSub :: String -> String
|
1778
|
+
-> String -> CompOption -> ExecOption -> String\nreSub str re
|
1779
|
+
sub copts eopts = subRegex (makeRegexOpts copts eopts re) str sub\n\n-- Substitute
|
1780
|
+
re with sub in str, e.g.,\n--\n-- The perl expression:\n--\n-- $text = "me
|
1781
|
+
boo";\n-- $text =~ s/(me) boo/he $1/;\n--\n-- can be written as:\n--\n--
|
1782
|
+
\ text = "me boo" =~$ ("(me) boo", "he \\\\1")\n--\n(=~$)
|
1783
|
+
:: String -> (String, String) -> String\n(=~$) str (re, sub) = reSub
|
1784
|
+
str re sub defaultCompOpt defaultExecOpt</pre>Example usage:<br />\n<br
|
1785
|
+
/>\n<pre class="prettyprint">import PCRESub\n\nmain = do\n
|
1786
|
+
\ let text = "me boo" =~$ ("(me) boo", "he \\\\1")\n
|
1787
|
+
\ print text</pre>Paste this code in, or browse the source at my GitHub
|
1788
|
+
repo: <a href="http://github.com/0xfe/experiments/blob/master/haskell/PCRESub.hs">PCRESub.hs</a><br
|
1789
|
+
/>\n<br />\nSomeone please make this work across all the regex backends
|
1790
|
+
(and add support for <span class="Apple-style-span" style="font-family:
|
1791
|
+
'Courier New', Courier, monospace;">ByteString</span>s)!</content><link
|
1792
|
+
rel='replies' type='application/atom+xml' href='https://0xfe.blogspot.com/feeds/5987592118110172726/comments/default'
|
1793
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2010/09/regex-substitution-in-haskell.html#comment-form'
|
1794
|
+
title='3 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/5987592118110172726'/><link
|
1795
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/5987592118110172726'/><link
|
1796
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2010/09/regex-substitution-in-haskell.html'
|
1797
|
+
title='Regex Substitution in Haskell'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
1798
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>3</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-5809791720055478982</id><published>2010-09-12T12:24:00.000-04:00</published><updated>2010-09-12T12:24:22.308-04:00</updated><category
|
1799
|
+
scheme=\"http://www.blogger.com/atom/ns#\" term=\"vexflow\"/><title type='text'>VexFlow
|
1800
|
+
Google Group</title><content type='html'>I've been out of touch for a
|
1801
|
+
while, and it took me way too long to set this up; but hey - better late than
|
1802
|
+
never. :-)<br />\n<br />\nAfter looking into various options for
|
1803
|
+
the VexFlow mailing list, I eventually decided to use Google Groups. It's
|
1804
|
+
super easy to setup and manage, and has all the features I'll ever need.<br
|
1805
|
+
/>\n<br />\nIf you're interested in hacking, discussing, or simply
|
1806
|
+
keeping up with VexFlow, sign up here:<br />\n<br />\n<div
|
1807
|
+
style="text-align: left;"><a href="http://groups.google.com/group/vexflow">http://groups.google.com/group/vexflow</a></div><br
|
1808
|
+
/>\nUnfortunately, I haven't had the time lately to hack on VexFlow,
|
1809
|
+
but I assure you that it's only temporary. I have a bunch of partial changes
|
1810
|
+
in the works, along with some interesting ideas floating around.&nbsp;More
|
1811
|
+
later.</content><link rel='replies' type='application/atom+xml' href='https://0xfe.blogspot.com/feeds/5809791720055478982/comments/default'
|
1812
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2010/09/vexflow-google-group.html#comment-form'
|
1813
|
+
title='1 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/5809791720055478982'/><link
|
1814
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/5809791720055478982'/><link
|
1815
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2010/09/vexflow-google-group.html'
|
1816
|
+
title='VexFlow Google Group'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
1817
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>1</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-3059590296733853422</id><published>2010-08-03T18:04:00.000-04:00</published><updated>2010-08-03T18:04:26.454-04:00</updated><title
|
1818
|
+
type='text'>VexFlow is Open Source</title><content type='html'><div style="margin-bottom:
|
1819
|
+
0px; margin-left: 0px; margin-right: 0px; margin-top: 0px;">That's
|
1820
|
+
right folks! All the <a href="http://www.vexflow.com/">VexFlow</a>
|
1821
|
+
code is now available in the <a href="http://github.com/0xfe/vexflow">VexFlow
|
1822
|
+
GitHub Repository</a>.</div><div style="margin-bottom:
|
1823
|
+
0px; margin-left: 0px; margin-right: 0px; margin-top: 0px;"><br
|
1824
|
+
/>\n</div><div style="margin-bottom: 0px; margin-left: 0px;
|
1825
|
+
margin-right: 0px; margin-top: 0px;">It's distributed under the
|
1826
|
+
OSI approved MIT License, so feel free to tinker, tweak, hack, fix, fork,
|
1827
|
+
and redistribute it.</div><div style="margin-bottom: 0px; margin-left:
|
1828
|
+
0px; margin-right: 0px; margin-top: 0px;"><br />\n</div><div
|
1829
|
+
class="separator" style="clear: both; text-align: center;"><a
|
1830
|
+
href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiCCX_uGvdpHI6QDxV6euNvnnKUFw7lBIkbPcBqlyD75_J7RsVW5qbWjSK9qCbeQNe9i_UwEPvWSCOSNnpEunNPcA4TyAO2-FPX77mdDwUhFYe6JSYmFEVHAedxT0oOF5_iVGqooQ/s1600/Screen+shot+2010-08-03+at+5.36.09+PM.png"
|
1831
|
+
imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img
|
1832
|
+
border="0" height="187" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiCCX_uGvdpHI6QDxV6euNvnnKUFw7lBIkbPcBqlyD75_J7RsVW5qbWjSK9qCbeQNe9i_UwEPvWSCOSNnpEunNPcA4TyAO2-FPX77mdDwUhFYe6JSYmFEVHAedxT0oOF5_iVGqooQ/s320/Screen+shot+2010-08-03+at+5.36.09+PM.png"
|
1833
|
+
width="320" /></a></div><div style="margin-bottom:
|
1834
|
+
0px; margin-left: 0px; margin-right: 0px; margin-top: 0px;"><br
|
1835
|
+
/>\n</div><div style="margin-bottom: 0px; margin-left: 0px;
|
1836
|
+
margin-right: 0px; margin-top: 0px;"><br />\n</div><div
|
1837
|
+
style="margin-bottom: 0px; margin-left: 0px; margin-right: 0px; margin-top:
|
1838
|
+
0px;">A lot of the core infrastructure (e.g., contexts, formatting,
|
1839
|
+
etc.) is ready and stable, and most of the work that needs to be done is adding
|
1840
|
+
support for various types of modifiers, effects, and annotations. I've
|
1841
|
+
worked on some of the trickier ones, like accidentals and beams, and have
|
1842
|
+
left the easier ones out so interested coders can learn by contributing.</div><div
|
1843
|
+
style="margin-bottom: 0px; margin-left: 0px; margin-right: 0px; margin-top:
|
1844
|
+
0px;"><br />\n</div><div style="margin-bottom:
|
1845
|
+
0px; margin-left: 0px; margin-right: 0px; margin-top: 0px;">This said,
|
1846
|
+
algorithms-enthusiasts need not feel left out - there are some hard problems
|
1847
|
+
to solve as well :-)</div><div style="margin-bottom: 0px; margin-left:
|
1848
|
+
0px; margin-right: 0px; margin-top: 0px;"><br />\n</div><div
|
1849
|
+
style="margin-bottom: 0px; margin-left: 0px; margin-right: 0px; margin-top:
|
1850
|
+
0px;">Here's where I would like help from the community:</div><div
|
1851
|
+
style="margin-bottom: 0px; margin-left: 0px; margin-right: 0px; margin-top:
|
1852
|
+
0px;"></div><ul><li>Dots (Easy)</li>\n<li>Trills
|
1853
|
+
(Easy)</li>\n<li>Grace Notes (Moderate)</li>\n<li>Slurs
|
1854
|
+
(Easy)</li>\n<li>Glyphs for time signatures (Easy)</li>\n<li>Key
|
1855
|
+
signature (Easy if you reuse the accidental placement code from accidentals.js)</li>\n<li>Guitar
|
1856
|
+
effects: Palm Muting, Scratches, Whammy, Harmonics, etc. (Easy)</li>\n<li>Chord
|
1857
|
+
Stave with Rhythm Slashes (Moderate to Hard)</li>\n<li>Lyrics
|
1858
|
+
(Easy)</li>\n</ul><br />\n<div style="margin-bottom:
|
1859
|
+
0px; margin-left: 0px; margin-right: 0px; margin-top: 0px;">Here's
|
1860
|
+
what I'm working on right now (and also wouldn't mind some help with):</div><div
|
1861
|
+
style="margin-bottom: 0px; margin-left: 0px; margin-right: 0px; margin-top:
|
1862
|
+
0px;"></div><ul><li>Tuplets / Triplets</li>\n<li>VexTab
|
1863
|
+
parser support for rests, alternate keys, and multiple voices.</li>\n<li>Alternate
|
1864
|
+
tunings and support for arbitrary-string instruments.</li>\n</ul><div>There
|
1865
|
+
isn't much developer documentation right now, but a good place to start
|
1866
|
+
is by going through the <a href="http://github.com/0xfe/vexflow/tree/master/tests/">code</a>
|
1867
|
+
for <a href="http://vexflow.com/tests/">the tests</a>.
|
1868
|
+
You may notice that some files are commented better than others - a great
|
1869
|
+
way to help is by adding better comments along with more thorough tests.</div><div><br
|
1870
|
+
/>\n</div><div>If you're not a coder and would like to
|
1871
|
+
help, you can do so by testing and <a href="http://github.com/0xfe/vexflow/issues">reporting
|
1872
|
+
bugs</a>, helping with documentation, spending $7 on a <a href="http://vexflow.com/tabdiv/">TabDiv
|
1873
|
+
license</a>, or simply spreading the word.</div><div><br
|
1874
|
+
/>\n</div><div>Thanks for all the support and help over the
|
1875
|
+
past few months. <a href="http://github.com/0xfe/vexflow">Dive
|
1876
|
+
in</a> and enjoy!</div></content><link rel='replies' type='application/atom+xml'
|
1877
|
+
href='https://0xfe.blogspot.com/feeds/3059590296733853422/comments/default'
|
1878
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2010/08/vexflow-is-open-source.html#comment-form'
|
1879
|
+
title='15 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/3059590296733853422'/><link
|
1880
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/3059590296733853422'/><link
|
1881
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2010/08/vexflow-is-open-source.html'
|
1882
|
+
title='VexFlow is Open Source'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
1883
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail
|
1884
|
+
xmlns:media=\"http://search.yahoo.com/mrss/\" url=\"https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiCCX_uGvdpHI6QDxV6euNvnnKUFw7lBIkbPcBqlyD75_J7RsVW5qbWjSK9qCbeQNe9i_UwEPvWSCOSNnpEunNPcA4TyAO2-FPX77mdDwUhFYe6JSYmFEVHAedxT0oOF5_iVGqooQ/s72-c/Screen+shot+2010-08-03+at+5.36.09+PM.png\"
|
1885
|
+
height=\"72\" width=\"72\"/><thr:total>15</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-7961142667231282175</id><published>2010-07-20T21:54:00.000-04:00</published><updated>2010-07-20T21:54:23.905-04:00</updated><title
|
1886
|
+
type='text'>More Durations and Better Beaming</title><content type='html'>I
|
1887
|
+
finally got most of the duration and beaming support worked out last weekend,
|
1888
|
+
and I gotta say that the generated scores by <a href="http://vexflow.com/">VexFlow</a>
|
1889
|
+
(and <a href="http://vexflow.com/vextab">VexTab</a>)
|
1890
|
+
are starting to look pretty good.<br />\n<br />\nIn VexTab notation,
|
1891
|
+
you can now set the duration of the subsequent notes using the colon (<span
|
1892
|
+
class="Apple-style-span" style="font-family: 'Courier New',
|
1893
|
+
Courier, monospace;">:</span>) character. By default, note durations
|
1894
|
+
are set to eighth notes.<br />\n<br />\nHere's an example
|
1895
|
+
of a line that generates a half-note followed by two quarter-notes.<br
|
1896
|
+
/>\n<br />\n<table align="center" cellpadding="0"
|
1897
|
+
cellspacing="0" class="tr-caption-container" style="margin-left:
|
1898
|
+
auto; margin-right: auto; text-align: center;"><tbody>\n<tr><td
|
1899
|
+
style="text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEh4SokXKqQO6IN0lEH3D2PlyQERmbOVipuHNHDG4r-kmDdSPdJDz8hH38O8SKUyg1Xafi9Sp0jljjnPayVNJ1ta8Ez8H_c1PJwrLdqq5WtE7UXf7RgWWQXDmXmMlpSbf0llA1537Q/s1600/Screen+shot+2010-07-20+at+9.17.44+PM.png"
|
1900
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
1901
|
+
border="0" height="328" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEh4SokXKqQO6IN0lEH3D2PlyQERmbOVipuHNHDG4r-kmDdSPdJDz8hH38O8SKUyg1Xafi9Sp0jljjnPayVNJ1ta8Ez8H_c1PJwrLdqq5WtE7UXf7RgWWQXDmXmMlpSbf0llA1537Q/s400/Screen+shot+2010-07-20+at+9.17.44+PM.png"
|
1902
|
+
width="400" /></a></td></tr>\n<tr><td
|
1903
|
+
class="tr-caption" style="text-align: center;">Basic
|
1904
|
+
Duration Support<br />\n</td></tr>\n</tbody></table><br
|
1905
|
+
/>\nValid duration values (currently) are: <span class="Apple-style-span"
|
1906
|
+
style="font-family: 'Courier New', Courier, monospace;">w</span>,
|
1907
|
+
<span class="Apple-style-span" style="font-family: 'Courier
|
1908
|
+
New', Courier, monospace;">h</span>, <span class="Apple-style-span"
|
1909
|
+
style="font-family: 'Courier New', Courier, monospace;">q</span>,
|
1910
|
+
<span class="Apple-style-span" style="font-family: 'Courier
|
1911
|
+
New', Courier, monospace;">8</span>, <span class="Apple-style-span"
|
1912
|
+
style="font-family: 'Courier New', Courier, monospace;">16</span>,
|
1913
|
+
and <span class="Apple-style-span" style="font-family: 'Courier
|
1914
|
+
New', Courier, monospace;">32</span>. Support for dots and
|
1915
|
+
tuplets/triplets is not yet implemented.<br />\n<br />\nDurations
|
1916
|
+
can be specified inside slides, bends, and other types of ties by prefixing
|
1917
|
+
the fret with the duration value enclosed within colon characters.<br />\n<br
|
1918
|
+
/>\n<table align="center" cellpadding="0" cellspacing="0"
|
1919
|
+
class="tr-caption-container" style="margin-left: auto; margin-right:
|
1920
|
+
auto; text-align: center;"><tbody>\n<tr><td style="text-align:
|
1921
|
+
center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhKIJDWT5dvihx_d6pAC88xRKSznZvATNRSTXth6Kc3-UZgerT7Mg5t3F8z__X8uZGQ8dfHAAbkGSf1vXEvnEdG9J3CKzDuZvo5FLcCoiJyWDEasHpooW296n-xj1X8YNoCwTS1Vg/s1600/Screen+shot+2010-07-20+at+9.18.50+PM.png"
|
1922
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
1923
|
+
border="0" height="330" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhKIJDWT5dvihx_d6pAC88xRKSznZvATNRSTXth6Kc3-UZgerT7Mg5t3F8z__X8uZGQ8dfHAAbkGSf1vXEvnEdG9J3CKzDuZvo5FLcCoiJyWDEasHpooW296n-xj1X8YNoCwTS1Vg/s400/Screen+shot+2010-07-20+at+9.18.50+PM.png"
|
1924
|
+
width="400" /></a></td></tr>\n<tr><td
|
1925
|
+
class="tr-caption" style="text-align: center;">Durations
|
1926
|
+
within Ties</td></tr>\n</tbody></table><br />\nAlso,
|
1927
|
+
I spent time working on some of the tricker beam configurations, where notes
|
1928
|
+
with varying durations are beamed.<br />\n<br />\n<table align="center"
|
1929
|
+
cellpadding="0" cellspacing="0" class="tr-caption-container"
|
1930
|
+
style="margin-left: auto; margin-right: auto; text-align: center;"><tbody>\n<tr><td
|
1931
|
+
style="text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjCpz_M1ppQZsOebHxPliRfXzxugqHBkSBmNjXQlEZg4HUFaAL3JI6G2WsM7aUnqn-lp0mi7r0Z14frYzVcyn5SB2YuJ08zOVyuSVA-d5JbFqx3qV_7v493PTGFmgeflHrKHLgAkA/s1600/Screen+shot+2010-07-20+at+9.20.49+PM.png"
|
1932
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
1933
|
+
border="0" height="127" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjCpz_M1ppQZsOebHxPliRfXzxugqHBkSBmNjXQlEZg4HUFaAL3JI6G2WsM7aUnqn-lp0mi7r0Z14frYzVcyn5SB2YuJ08zOVyuSVA-d5JbFqx3qV_7v493PTGFmgeflHrKHLgAkA/s200/Screen+shot+2010-07-20+at+9.20.49+PM.png"
|
1934
|
+
width="200" /></a></td></tr>\n<tr><td
|
1935
|
+
class="tr-caption" style="text-align: center;">Crazy
|
1936
|
+
Beaming</td></tr>\n</tbody></table><br />\nIn
|
1937
|
+
VexTab, you can create beams by enclosing your notes within brackets (separated
|
1938
|
+
by spaces).<br />\n<br />\n<table align="center"
|
1939
|
+
cellpadding="0" cellspacing="0" class="tr-caption-container"
|
1940
|
+
style="margin-left: auto; margin-right: auto; text-align: center;"><tbody>\n<tr><td
|
1941
|
+
style="text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiY52epYKi4fdr_f1eQQRr2uzjG0H6qW1oKc-ND8x9U9129pIzxGd0bmW5RonRIfGnZ279nUOcOJ37X_iZiO7HaZSL3ESQKwnqHPpM1GroFHSzO7ByHIzfdkEdTBun3rBEBGxnbHw/s1600/Screen+shot+2010-07-20+at+9.25.47+PM.png"
|
1942
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
1943
|
+
border="0" height="332" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiY52epYKi4fdr_f1eQQRr2uzjG0H6qW1oKc-ND8x9U9129pIzxGd0bmW5RonRIfGnZ279nUOcOJ37X_iZiO7HaZSL3ESQKwnqHPpM1GroFHSzO7ByHIzfdkEdTBun3rBEBGxnbHw/s400/Screen+shot+2010-07-20+at+9.25.47+PM.png"
|
1944
|
+
width="400" /></a></td></tr>\n<tr><td
|
1945
|
+
class="tr-caption" style="text-align: center;">Beaming
|
1946
|
+
in VexTab</td></tr>\n</tbody></table><br />\nAnyhow,
|
1947
|
+
I've pushed out the latest revision, with support for standard notation,
|
1948
|
+
durations, and beaming to the&nbsp;<a href="http://vexflow.com/tabdiv">TabDiv
|
1949
|
+
website</a>. Feel free to <a href="http://vexflow.com/vextab/tutorial.html">toy
|
1950
|
+
with it</a> and report any issues you come across.<br />\n<br
|
1951
|
+
/>\nHere's a screenshot of a bluesy guitar lick written in VexTab.<br
|
1952
|
+
/>\n<br />\n<table align="center" cellpadding="0"
|
1953
|
+
cellspacing="0" class="tr-caption-container" style="margin-left:
|
1954
|
+
auto; margin-right: auto; text-align: center;"><tbody>\n<tr><td
|
1955
|
+
style="text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiR7GuMPHx-SaGfRCOiB-3FL9pkFaDKuocLuIiJigPH6MP91HqMsTHMpFXhg5jx5XhNa2yo7QuBvLQ3nbI3z2-mYHwtqSEfJQp5TvfqwtxqmRV0Hmv-OUtIM0-rkqYkYlf4omfg0A/s1600/Screen+shot+2010-07-20+at+9.50.07+PM.png"
|
1956
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
1957
|
+
border="0" height="640" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiR7GuMPHx-SaGfRCOiB-3FL9pkFaDKuocLuIiJigPH6MP91HqMsTHMpFXhg5jx5XhNa2yo7QuBvLQ3nbI3z2-mYHwtqSEfJQp5TvfqwtxqmRV0Hmv-OUtIM0-rkqYkYlf4omfg0A/s640/Screen+shot+2010-07-20+at+9.50.07+PM.png"
|
1958
|
+
width="408" /></a></td></tr>\n<tr><td
|
1959
|
+
class="tr-caption" style="text-align: center;">A Blues
|
1960
|
+
Lick in VexTab<br />\n</td></tr>\n</tbody></table><br
|
1961
|
+
/>\nThat's all for this week, folks! There are a lot more interesting
|
1962
|
+
things coming up. Check out the <a href="http://vexflow.com/vextab/tutorial.html">VexTab
|
1963
|
+
tutorial</a> to play around in the sandboxes, and stay in touch.</content><link
|
1964
|
+
rel='replies' type='application/atom+xml' href='https://0xfe.blogspot.com/feeds/7961142667231282175/comments/default'
|
1965
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2010/07/more-durations-and-better-beaming.html#comment-form'
|
1966
|
+
title='10 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/7961142667231282175'/><link
|
1967
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/7961142667231282175'/><link
|
1968
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2010/07/more-durations-and-better-beaming.html'
|
1969
|
+
title='More Durations and Better Beaming'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
1970
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail
|
1971
|
+
xmlns:media=\"http://search.yahoo.com/mrss/\" url=\"https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEh4SokXKqQO6IN0lEH3D2PlyQERmbOVipuHNHDG4r-kmDdSPdJDz8hH38O8SKUyg1Xafi9Sp0jljjnPayVNJ1ta8Ez8H_c1PJwrLdqq5WtE7UXf7RgWWQXDmXmMlpSbf0llA1537Q/s72-c/Screen+shot+2010-07-20+at+9.17.44+PM.png\"
|
1972
|
+
height=\"72\" width=\"72\"/><thr:total>10</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-7789963467582961010</id><published>2010-07-14T11:51:00.000-04:00</published><updated>2010-07-14T11:51:00.426-04:00</updated><title
|
1973
|
+
type='text'>Migrating VexFlow to SCons</title><content type='html'>I love
|
1974
|
+
tools. Tools keep my projects predictable and smooth. Tools help me code fast
|
1975
|
+
and release fast.<br />\n<br />\n<a href="http://www.vexflow.com/">VexFlow</a>
|
1976
|
+
has gone through many iterations of design, implementation, and deployment,
|
1977
|
+
and I couldn't have brought it this far so quickly without my tools. (Well,
|
1978
|
+
automated testing had a lot to do with it too, but that's for another
|
1979
|
+
post.)<br />\n<br />\nYesterday, I added a new tool to my toolbox
|
1980
|
+
- <a href="http://www.scons.org/">SCons</a>. I migrated
|
1981
|
+
all my building, packaging, test driving, and deployment code to SCons. Compared
|
1982
|
+
to the ugly shell scripts I previously used, SCons is a lot cleaner, a lot
|
1983
|
+
faster, and significantly easier to manage.<br />\n<br />\nI chose
|
1984
|
+
SCons for two reasons:<br />\n<ol><li>Simplicity. It's
|
1985
|
+
Python-based and super-easy to work with.</li>\n<li>Familiarity.
|
1986
|
+
I've used it before, so already know my way around it.</li>\n</ol><div>Since
|
1987
|
+
I use the <a href="http://code.google.com/closure/compiler/">Google
|
1988
|
+
Closure Compiler</a> to build and minimize my JavaScript code, I had
|
1989
|
+
to write a new builder for SCons. That turned out to be pretty straightforward
|
1990
|
+
to implement.</div><br />\n<pre class="prettyprint">def
|
1991
|
+
js_builder(target, source, env):\n """ A JavaScript builder
|
1992
|
+
using Google Closure Compiler. """\n\n cmd = env.subst(\n
|
1993
|
+
\ "$JAVA -jar $JS_COMPILER --compilation_level $JS_COMPILATION_LEVEL");\n\n
|
1994
|
+
\ # Add defines to the command\n for define in env['JS_DEFINES'].keys():\n
|
1995
|
+
\ cmd += " --define=\\"%s=%s\\"" % (define, env['JS_DEFINES'][define])\n\n
|
1996
|
+
\ # Add the source files\n for file in source:\n cmd += " --js "
|
1997
|
+
+ str(file)\n\n # Add the output file\n cmd += " --js_output_file "
|
1998
|
+
+ str(target[0])\n\n # Log the command and run\n print env.subst(cmd)\n
|
1999
|
+
\ os.system(env.subst(cmd))\n</pre><br />\nI also needed a new
|
2000
|
+
builder to stamp my output with the relevant build information. So, I created
|
2001
|
+
a <i>Stamper, </i>which is just a builder that runs some string
|
2002
|
+
substitution on files with <span class="Apple-style-span" style="font-family:
|
2003
|
+
'Courier New', Courier, monospace;">sed</span>. The
|
2004
|
+
stamper looks like this:<br />\n<br />\n<pre class="prettyprint">def
|
2005
|
+
vexflow_stamper(target, source, env):\n """ A Build Stamper
|
2006
|
+
for VexFlow """\n\n cmd = "sed "\n cmd += "
|
2007
|
+
-e s/__VEX_BUILD_PREFIX__/$VEX_BUILD_PREFIX/"\n cmd += " -e s/__VEX_VERSION__/$VEX_VERSION/"\n
|
2008
|
+
\ cmd += ' -e "s/__VEX_BUILD_DATE__/${VEX_BUILD_DATE}/"'\n
|
2009
|
+
\ cmd += " -e s/__VEX_GIT_SHA1__/`git rev-list --max-count=1 HEAD`/ "\n
|
2010
|
+
\ cmd += ("%s &gt; %s" % (source[0], target[0]))\n\n print
|
2011
|
+
env.subst(cmd)\n os.system(env.subst(cmd))\n</pre><br />\nBefore
|
2012
|
+
you can use these builders, you need to add them to your environment:<br
|
2013
|
+
/>\n<br />\n<pre class="prettyprint">env.Append(BUILDERS
|
2014
|
+
= {'JavaScript': Builder(action = js_builder),\n 'VexFlowStamp':
|
2015
|
+
Builder(action = vexflow_stamper)})\n</pre><br />\nOnce this is
|
2016
|
+
done, you can add build JavaScript targets with the <i>JavaScript</i>
|
2017
|
+
command.<br />\n<br />\n<pre class="prettyprint">env['JAVA']
|
2018
|
+
= "/usr/bin/java"\nenv['JS_COMPILER'] = "support/compiler.jar"\nenv['JS_DEFINES'
|
2019
|
+
] = {\n "Vex.Debug": "true",\n "Vex.LogLevel":
|
2020
|
+
"4"\n}\n\nsources = ["src1.js", "src2.js", "src3.js"]\n\nenv.JavaScript("src.min.js",
|
2021
|
+
sources)\n</pre><br />\nThis really is just scratching the surface.
|
2022
|
+
There's a lot more you can do with SCons to automate and streamline your
|
2023
|
+
builds. To learn more, take a look at the <a href="http://www.scons.org/doc/2.0.0.final.0/HTML/scons-user/index.html">user
|
2024
|
+
guide</a>.<br />\n<br />\nI added support for testing, packaging,
|
2025
|
+
and deployment (of the web pages and demos) to my SCons scripts in a matter
|
2026
|
+
of hours, and finally purged all my nasty shell scripts from the VexFlow codebase.<br
|
2027
|
+
/>\n<br />\nGive it a try. I guarantee you'll be happier.</content><link
|
2028
|
+
rel='replies' type='application/atom+xml' href='https://0xfe.blogspot.com/feeds/7789963467582961010/comments/default'
|
2029
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2010/07/migrating-vexflow-to-scons.html#comment-form'
|
2030
|
+
title='3 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/7789963467582961010'/><link
|
2031
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/7789963467582961010'/><link
|
2032
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2010/07/migrating-vexflow-to-scons.html'
|
2033
|
+
title='Migrating VexFlow to SCons'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
2034
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>3</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-6514910752490146447</id><published>2010-07-12T14:50:00.000-04:00</published><updated>2010-07-12T14:50:31.885-04:00</updated><title
|
2035
|
+
type='text'>Durations, Code, and Posters</title><content type='html'>The last
|
2036
|
+
few weeks have been relatively quiet on the VexFlow side. I've been vacationing
|
2037
|
+
in Cape Cod with my wife and 3-month-old.<br />\n<br />\nObviously,
|
2038
|
+
vacation is never fun without a few good coding sprints. I started work on
|
2039
|
+
incorporating standard notation into <a href="http://www.vexflow.com/vextab">VexTab</a>.<br
|
2040
|
+
/>\n<br />\nThe first thing I needed to do was create a class to
|
2041
|
+
convert fret-string pairs to notes. In order to support alternate tunings,
|
2042
|
+
I created a <i>Tuning</i>&nbsp;class, whose sole responsibility
|
2043
|
+
is to return the correct note for a given fret-string pair, based on the instrument
|
2044
|
+
type and tuning.<br />\n<br />\nSo, to convert the fret-string
|
2045
|
+
pair "5/2" on a 5-string bass to standard notation, all I need to
|
2046
|
+
do is:<br />\n<br />\n<pre class="prettyprint">var
|
2047
|
+
tuning = new Vex.Flow.Tuning("G/4,D/4,A/3,E/3,B/2");\nvar note =
|
2048
|
+
tuning.getNoteForFret(5, 2);\n</pre><br />\nThe next part was
|
2049
|
+
augmenting the language to render standard notation when requested. I modified
|
2050
|
+
<span class="Apple-style-span" style="font-family: 'Courier
|
2051
|
+
New', Courier, monospace;">tabstave</span> to accept <span
|
2052
|
+
class="Apple-style-span" style="font-family: inherit;"><i>key=value</i></span>
|
2053
|
+
parameters, and added a parameter called <span class="Apple-style-span"
|
2054
|
+
style="font-family: 'Courier New', Courier, monospace;">notation</span>.
|
2055
|
+
When set to <span class="Apple-style-span" style="font-family:
|
2056
|
+
'Courier New', Courier, monospace;">true</span>, it
|
2057
|
+
renders standard notation above the guitar tab.<br />\n<br />\n<table
|
2058
|
+
align="center" cellpadding="0" cellspacing="0"
|
2059
|
+
class="tr-caption-container" style="margin-left: auto; margin-right:
|
2060
|
+
auto; text-align: center;"><tbody>\n<tr><td style="text-align:
|
2061
|
+
center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjOfzb0VTihrFnDXvA642NRGLuWU45tUoukDnqlFYI5bF-J_CBAWHriN3qroiWKRqWYDoOPFNKfgDoZp5jmRwRqYjla4YEQ2Chd0yL_O6bJAFow9mG6-kUlCNHTbrFvHpupX02O3g/s1600/Screen+shot+2010-07-12+at+2.20.47+PM.png"
|
2062
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
2063
|
+
border="0" height="276" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjOfzb0VTihrFnDXvA642NRGLuWU45tUoukDnqlFYI5bF-J_CBAWHriN3qroiWKRqWYDoOPFNKfgDoZp5jmRwRqYjla4YEQ2Chd0yL_O6bJAFow9mG6-kUlCNHTbrFvHpupX02O3g/s320/Screen+shot+2010-07-12+at+2.20.47+PM.png"
|
2064
|
+
width="320" /></a></td></tr>\n<tr><td
|
2065
|
+
class="tr-caption" style="text-align: center;">VexTab
|
2066
|
+
with Standard Notation</td></tr>\n</tbody></table><br
|
2067
|
+
/>\nI also started work on basic duration support and auto-beaming. I don't
|
2068
|
+
have much to show for this yet, because they're currently a bit intertwined,
|
2069
|
+
and automatic beaming is harder than I anticipated (yet again!)<br />\n<br
|
2070
|
+
/>\nIn other news, I <a href="http://github.com/0xfe/vex/blob/master/vextab/vextab.js">open
|
2071
|
+
sourced</a> the VexTab parser, so you can learn more about the language
|
2072
|
+
or use it in your own rendering engines. It is currently slightly coupled
|
2073
|
+
to VexFlow, but pretty trivial to decouple. (I'm going to fully decouple
|
2074
|
+
it as this project progresses.)<br />\n<br />\nThe parser is licensed
|
2075
|
+
under the <a href="http://www.opensource.org/licenses/mit-license.php">MIT
|
2076
|
+
license</a>, and is available on GitHub at: <a href="http://github.com/0xfe/vex/tree/master/vextab/">http://github.com/0xfe/vex/tree/master/vextab/</a>.<br
|
2077
|
+
/>\n<br />\nFinally, some readers who liked my previous <a href="http://0xfe.blogspot.com/2009/12/google-chrome-poster-from-source-code.html">Chrome
|
2078
|
+
Poster from Source Code</a> post requested posters for other open-source
|
2079
|
+
projects. I generated posters for Firefox, Linux, and FreeBSD, and made them
|
2080
|
+
available on my other side project: <a href="http://wickedmeanposters.com/">Wicked
|
2081
|
+
Mean Posters</a>.<br />\n<br />\n<table align="center"
|
2082
|
+
cellpadding="0" cellspacing="0" class="tr-caption-container"
|
2083
|
+
style="margin-left: auto; margin-right: auto; text-align: center;"><tbody>\n<tr><td
|
2084
|
+
style="text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiAD0NHLeE4x2HoXGDzW6pydG0jqBIb_bI1AzrlDohHGM14iu928BTzOftpGh0cHv4uKD47sphwVDgO1sbkX61XXvgeIU1F0He8VGp0Na_QoGFUtXfOXd-ItH6nITciSpGfCm-fHA/s1600/Screen+shot+2010-07-12+at+2.43.14+PM.png"
|
2085
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
2086
|
+
border="0" height="198" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiAD0NHLeE4x2HoXGDzW6pydG0jqBIb_bI1AzrlDohHGM14iu928BTzOftpGh0cHv4uKD47sphwVDgO1sbkX61XXvgeIU1F0He8VGp0Na_QoGFUtXfOXd-ItH6nITciSpGfCm-fHA/s320/Screen+shot+2010-07-12+at+2.43.14+PM.png"
|
2087
|
+
width="320" /></a></td></tr>\n<tr><td
|
2088
|
+
class="tr-caption" style="text-align: center;">Firefox
|
2089
|
+
Poster from Source Code</td></tr>\n</tbody></table><br
|
2090
|
+
/>\nMore next time!</content><link rel='replies' type='application/atom+xml'
|
2091
|
+
href='https://0xfe.blogspot.com/feeds/6514910752490146447/comments/default'
|
2092
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2010/07/durations-code-and-posters.html#comment-form'
|
2093
|
+
title='5 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/6514910752490146447'/><link
|
2094
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/6514910752490146447'/><link
|
2095
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2010/07/durations-code-and-posters.html'
|
2096
|
+
title='Durations, Code, and Posters'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
2097
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail
|
2098
|
+
xmlns:media=\"http://search.yahoo.com/mrss/\" url=\"https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjOfzb0VTihrFnDXvA642NRGLuWU45tUoukDnqlFYI5bF-J_CBAWHriN3qroiWKRqWYDoOPFNKfgDoZp5jmRwRqYjla4YEQ2Chd0yL_O6bJAFow9mG6-kUlCNHTbrFvHpupX02O3g/s72-c/Screen+shot+2010-07-12+at+2.20.47+PM.png\"
|
2099
|
+
height=\"72\" width=\"72\"/><thr:total>5</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-8369373862453509821</id><published>2010-06-25T17:25:00.000-04:00</published><updated>2011-09-10T11:20:07.098-04:00</updated><title
|
2100
|
+
type='text'>Encrypted Incremental Backups to S3</title><content type='html'>I
|
2101
|
+
spent some time this week trying to get secure online backups working for
|
2102
|
+
all my machines.<br />\n<br />\nSo far, I've been managing
|
2103
|
+
most of my data and workspaces with replicated Git repositories. I have scripts
|
2104
|
+
that allow me to maintain roaming profiles across my machines (almost) seamlessly,
|
2105
|
+
and these scripts try to ensure that these profiles are consistently replicated.
|
2106
|
+
My profiles include things like dot files (<code>.vimrc</code>,
|
2107
|
+
<code>.screenrc</code>, etc.), startup scripts, tools, workspaces,
|
2108
|
+
repositories, and other odds and ends.<br />\n<br />\nBecause
|
2109
|
+
I tend to be ultra-paranoid about security and reliability, the replicas are
|
2110
|
+
encrypted and distributed across different machines in different locations.
|
2111
|
+
For encryption, I use <a href="https://launchpad.net/ecryptfs">ecryptfs</a>
|
2112
|
+
on Linux machines, and FileVault on the Mac.<br />\n<br />\nAnyhow,
|
2113
|
+
this week I lost my Mac to a hardware failure, and my co-located Linux machine
|
2114
|
+
to a service-provider <i>dismantling</i>. That left me with one
|
2115
|
+
replica... just waiting to fail.<br />\n<br />\nI decided that
|
2116
|
+
I needed another replica, but didn't want to pay for, or have to setup
|
2117
|
+
another co-located server. After spending some time researching various online-backup
|
2118
|
+
providers, I decided to go with <a href="http://aws.amazon.com/s3/">Amazon's
|
2119
|
+
S3 service</a>.<br />\n<br />\nI chose S3 because - it's
|
2120
|
+
cheap, it's tried and tested, it's built on an internal distributed
|
2121
|
+
and replicated database, and there are some great tools that work with it.<br
|
2122
|
+
/>\n<br />\n<h3>Brackup</h3><br />\n<a href="http://code.google.com/p/brackup/">Brackup</a>,
|
2123
|
+
by Brad Fitzpatrick, is one of those tools. It allows you to make encrypted
|
2124
|
+
incremental backups to S3, without a lot of hair-pulling or teeth-gnashing.<br
|
2125
|
+
/>\n<br />\nTo get Brackup running on your machine, you need to have
|
2126
|
+
GPG, Perl 5, and the Net::Amazon::S3 Perl module installed. On the Mac, you
|
2127
|
+
also need to get MacPorts.<br />\n<br />\n<h3>Installation</h3><br
|
2128
|
+
/>\nMost modern distributions come with Perl 5 pre-installed, but not with
|
2129
|
+
GPG. The package name you want on both MacPorts and Ubuntu, is <code>gnupg</code>.<br
|
2130
|
+
/>\n<br />\nThe first thing you need to do, if you don't already
|
2131
|
+
have a GPG key, is to generate one.<br />\n<br />\n<pre class="prettyprint">$
|
2132
|
+
gpg --gen-keys</pre><br />\nIf you need to backup multiple machines,
|
2133
|
+
export your public key to a text file, and import it on the other machines.<br
|
2134
|
+
/>\n<br />\n<pre class="prettyprint">hostA$ gpg --export
|
2135
|
+
-a "User Name" &gt; public.key\nhostA$ scp public.key hostB:/tmp\nhostB$
|
2136
|
+
gpg --import /tmp/public.key\n</pre><br />\nRemember that all
|
2137
|
+
your backups will be encrypted with your public key, so if you lose your private
|
2138
|
+
key, the only thing you can do with your backups is generate white noise.
|
2139
|
+
Export your private key and save it in a safe place. (I suggest <a href="http://vexcrypto.appspot.com/">VexCrypto</a>.)<br
|
2140
|
+
/>\n<br />\n<pre class="prettyprint">$ gpg --export-secret-key
|
2141
|
+
-a "User Name" &gt; private.key\n</pre><br />\nNow
|
2142
|
+
that you have your keys setup, download and install Brackup. The easiest way
|
2143
|
+
to do this is by using the <code>cpan</code> tool.<br />\n<br
|
2144
|
+
/>\n<pre class="prettyprint">$ sudo cpan Net::Amazon::S3\n$
|
2145
|
+
sudo cpan Brackup\n</pre><br />\nNote that it's better (and
|
2146
|
+
way faster) to use your distribution's package for Net::Amazon::S3. On
|
2147
|
+
Ubuntu the package is <code>libnet-amazon-s3-perl</code>, and
|
2148
|
+
on MacPorts, the package is <code>p5-amazon-s3</code>.<br />\n<br
|
2149
|
+
/>\n<h3>Configuration</h3>\n<br />\nOnce this is done,
|
2150
|
+
you can generate a template configuration file by typing in <code>brackup</code>
|
2151
|
+
on the command line. This file is stored in <code>$HOME/.brackup</code>.<br
|
2152
|
+
/>\n<br />\n<pre class="prettyprint">$ <b>brackup</b>\nError:
|
2153
|
+
Your config file needs tweaking. I put a commented-out template at: /home/muthanna/.brackup.conf\n\nbrackup
|
2154
|
+
--from=[source_name] --to=[target_name] [--output=<backup_metafile.brackup>]\nbrackup
|
2155
|
+
--help\n</backup_metafile.brackup></pre><br />\nEdit the
|
2156
|
+
configuration file and create your sources and targets. You will likely have
|
2157
|
+
multiple sources, and one target. Here's a snip of my configuration:<br
|
2158
|
+
/>\n<br />\n<pre class="prettyprint">[TARGET:amazon]\ntype
|
2159
|
+
= Amazon\naws_access_key_id = XXXXXXXXXXX\naws_secret_access_key = XXXXXXXXXXXXXX\nkeep_backups
|
2160
|
+
= 10\n\n[SOURCE:mac_repos]\npath = /Users/0xfe/Local\nchunk_size = 5m\ngpg_recipient
|
2161
|
+
= 79E44165\nignore = ^.*\\.(swp|swo|hi|o|a|pyc|svn|class|DS_Store|Trash|Trashes)$/\n\n[SOURCE:mac_desktop_books]\npath
|
2162
|
+
= /Users/0xfe/Desktop/Books\ngpg_recipient = 79E44165\nignore = ^.*\\.(swp|swo|hi|o|a|pyc|svn|class|DS_Store|Trash|Trashes)$/\n\n[SOURCE:mac_desktop_workspace]\npath
|
2163
|
+
= /Users/0xfe/Desktop/Workspace\ngpg_recipient = 79E44165\nignore = ^.*\\.(swp|swo|hi|o|a|pyc|svn|class|DS_Store|Trash|Trashes)$/\n</pre><br
|
2164
|
+
/>\nThe configuration keys are pretty self explanatory. I should point
|
2165
|
+
out that <code>gpg_recipient</code> is your public key ID, as
|
2166
|
+
shown by <code>gpg --list-keys</code>.<br />\n<br />\n<pre
|
2167
|
+
class="prettyprint">$ gpg --list-keys\n/Users/0xfe/.gnupg/pubring.gpg\n-----------------------------------\npub
|
2168
|
+
\ 2048R/79E44165 2010-06-24\nuid My Username &lt;snip@snip.com&gt;\nsub
|
2169
|
+
\ 2048R/43AD4B72 2010-06-24\n</pre><br />\nFor more details on
|
2170
|
+
the various parameters, see <a href="http://search.cpan.org/~bradfitz/Brackup/lib/Brackup/Manual/Overview.pod">The
|
2171
|
+
Brackup Manual</a>.<br />\n<br />\n<h3>Start a Backup</h3>\n<br
|
2172
|
+
/>\nTo backup one of your sources, use the <code>brackup</code>
|
2173
|
+
command, as so:<br />\n<br />\n<pre class="prettyprint">$
|
2174
|
+
brackup -v --from=mac_repos --to=amazon\n</pre><br />\nIf you
|
2175
|
+
now take a look at your <a href="http://aws.amazon.com/">AWS
|
2176
|
+
Dashboard</a>, you should see the buckets and chunks created for your
|
2177
|
+
backup data.<br />\n<br />\nNotice that <code>brackup</code>
|
2178
|
+
creates an output file (with the extension <code>.brackup</code>)
|
2179
|
+
in the current directory. This file serves as an index, and maintains pointers
|
2180
|
+
to the S3 chunks for each backed-up file. You will need this file to locate
|
2181
|
+
and restore your data, and a copy of it is maintained on S3.<br />\n<br
|
2182
|
+
/>\n<h3>Restoring</h3>\n<br />\n"<i>Test restores
|
2183
|
+
regularly.</i>" -- a wise man.<br />\n<br />\nTo restore
|
2184
|
+
your Brackup backups, you will need to have your private key handy on the
|
2185
|
+
machine that you're restoring to. Brackup accesses your private key via
|
2186
|
+
<code>gpg-agent</code>.<br />\n<br />\n<pre class="prettyprint">$
|
2187
|
+
sudo port install gpg-agent\n$ eval $(gpg-agent --daemon)\n</pre><br
|
2188
|
+
/>\nThe <code>brackup-restore</code> command restores a source
|
2189
|
+
tree to a path specified on the command line. It makes use of the output file
|
2190
|
+
that brackup generated during the initial backup to locate and restore your
|
2191
|
+
data. If you don't have a local copy of the output file, you can use <code>brackup-target</code>
|
2192
|
+
to retrieve a copy from S3.<br />\n<br />\n<pre class="prettyprint">$
|
2193
|
+
brackup-restore -v --from=mac_repos-20100624.brackup \\\n --to=/Users/0xfe/temp/mac_repos
|
2194
|
+
--all\n</pre><br />\nYou will be prompted for you AWS key, your
|
2195
|
+
AWS secret key, and your GPG private key passphrase. Make sure that that the
|
2196
|
+
restore completed successfully and correctly. Comparing the SHA1 hashes of
|
2197
|
+
the restored data with those of the original data is a good way to validate
|
2198
|
+
correctness.<br />\n<br />\n<h3>Garbage Collection</h3>\n<br
|
2199
|
+
/>\nYou will need to prune and garbage collect your data regularly to keep
|
2200
|
+
backups from piling up and using up space in S3. The following commands delete
|
2201
|
+
old backed up chunks based on the <i>keep_files </i>configuration
|
2202
|
+
value of the target.<br />\n<br />\n<pre class="prettyprint">$
|
2203
|
+
brackup-target amazon prune\n$ brackup-target amazon gc\n</pre><br
|
2204
|
+
/>\nThat's all folks! Secure, on-line, off-site, incremental, buzz-word-ridden
|
2205
|
+
backups. Code safely!</content><link rel='replies' type='application/atom+xml'
|
2206
|
+
href='https://0xfe.blogspot.com/feeds/8369373862453509821/comments/default'
|
2207
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2010/06/encrypted-incremental-backups-to-s3.html#comment-form'
|
2208
|
+
title='4 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/8369373862453509821'/><link
|
2209
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/8369373862453509821'/><link
|
2210
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2010/06/encrypted-incremental-backups-to-s3.html'
|
2211
|
+
title='Encrypted Incremental Backups to S3'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
2212
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>4</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-3313904850292755497</id><published>2010-06-21T14:12:00.000-04:00</published><updated>2010-06-21T14:12:11.700-04:00</updated><title
|
2213
|
+
type='text'>On Parsing and Licenses</title><content type='html'>So I spent
|
2214
|
+
this weekend rewriting the <a href="http://vexflow.com/tabdiv/tutorial.html">VexTab</a>
|
2215
|
+
parser. The original version, though it served its purpose as a quick prototype
|
2216
|
+
for the language, was severely limited due to it being built primarily out
|
2217
|
+
of regular expressions.<br />\n<br />\nThe new parser uses a recursive-descent
|
2218
|
+
algorithm, and fully supports the original grammar. Adding new syntactic elements
|
2219
|
+
to the language is now simple, as is adding support for more complex grammars.<br
|
2220
|
+
/>\n<br />\nSome new features I added to the language are support
|
2221
|
+
for slides, hammer-ons, pull-offs, and tapping. Here's a blues lick written
|
2222
|
+
in VexTab:<br />\n<br />\n<table align="center" cellpadding="0"
|
2223
|
+
cellspacing="0" class="tr-caption-container" style="margin-left:
|
2224
|
+
auto; margin-right: auto; text-align: center;"><tbody>\n<tr><td
|
2225
|
+
style="text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjOTpU652I2h8heK-MA-5nu0w1I70rvYWZDuPrcr2ufE663YB3DK-yexfnHSJz3BnbRSGvk8N3OpPwmu_ZIgDX0zHQkialvKR014n87LzrgUHY0vDCVutWBuAABvtU8yR3BFYY60g/s1600/Picture+11.png"
|
2226
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
2227
|
+
border="0" height="262" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjOTpU652I2h8heK-MA-5nu0w1I70rvYWZDuPrcr2ufE663YB3DK-yexfnHSJz3BnbRSGvk8N3OpPwmu_ZIgDX0zHQkialvKR014n87LzrgUHY0vDCVutWBuAABvtU8yR3BFYY60g/s320/Picture+11.png"
|
2228
|
+
width="320" /></a></td></tr>\n<tr><td
|
2229
|
+
class="tr-caption" style="text-align: center;">Blues
|
2230
|
+
Lick in VexTab</td></tr>\n</tbody></table><br />\nSome
|
2231
|
+
readers have asked me about durations and how to specify rhythms in VexTab.
|
2232
|
+
Although the VexFlow core has full support for durations and timing, I still
|
2233
|
+
need a good way to represent them in the language. I'm open to ideas here
|
2234
|
+
if you have any.<br />\n<br />\n<b>Enter TabDiv</b><br
|
2235
|
+
/>\n<br />\nI also spent this weekend working on the release of the
|
2236
|
+
first VexFlow-based product:&nbsp;<a href="http://vexflow.com/tabdiv/index.html">TabDiv</a>.
|
2237
|
+
TabDiv lets you easily embed guitar tablature into your website or blog.<br
|
2238
|
+
/>\n<br />\nAfter you've included the TabDiv <code>.js</code>
|
2239
|
+
and <code>.css</code> files in your HTML document (or blog template),
|
2240
|
+
you can add tabs by simply creating DIV elements and setting the class to
|
2241
|
+
vex-tabdiv.<br />\n<br />\nYou can get TabDiv here:&nbsp;<a
|
2242
|
+
href="http://vexflow.com/tabdiv/index.html">http://vexflow.com/tabdiv/index.html</a>.<br
|
2243
|
+
/>\n<br />\n<b>Why not Open-Source?</b><br />\n<br
|
2244
|
+
/>\nI'm no stranger to open-source. I've been writing, maintaining,
|
2245
|
+
and contributing to open-source software for over a decade.<br />\n<br
|
2246
|
+
/>\nAlthough I hope to eventually open-source all the VexFlow source code,
|
2247
|
+
I'm going to hold off on it until I figure out where I want to take this
|
2248
|
+
product. I've invested a lot of time and effort into making VexFlow a
|
2249
|
+
fast high-quality renderer, and I'd like to find a way to cater to both
|
2250
|
+
a commercial-audience, and the open-source community.<br />\n<br
|
2251
|
+
/>\nSo, how does one find and maintain this delicate balance? Do I completely
|
2252
|
+
open-source it? Should I keep it closed and charge for it? Dual-license maybe?</content><link
|
2253
|
+
rel='replies' type='application/atom+xml' href='https://0xfe.blogspot.com/feeds/3313904850292755497/comments/default'
|
2254
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2010/06/on-parsing-and-licenses.html#comment-form'
|
2255
|
+
title='16 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/3313904850292755497'/><link
|
2256
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/3313904850292755497'/><link
|
2257
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2010/06/on-parsing-and-licenses.html'
|
2258
|
+
title='On Parsing and Licenses'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
2259
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail
|
2260
|
+
xmlns:media=\"http://search.yahoo.com/mrss/\" url=\"https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjOTpU652I2h8heK-MA-5nu0w1I70rvYWZDuPrcr2ufE663YB3DK-yexfnHSJz3BnbRSGvk8N3OpPwmu_ZIgDX0zHQkialvKR014n87LzrgUHY0vDCVutWBuAABvtU8yR3BFYY60g/s72-c/Picture+11.png\"
|
2261
|
+
height=\"72\" width=\"72\"/><thr:total>16</thr:total></entry><entry><id>tag:blogger.com,1999:blog-19544619.post-7599469828136185502</id><published>2010-06-17T15:01:00.000-04:00</published><updated>2010-06-17T15:01:57.156-04:00</updated><title
|
2262
|
+
type='text'>Benchmarking VexFlow</title><content type='html'>I have about
|
2263
|
+
340 tests now for VexFlow, and one of the things I find really impressive
|
2264
|
+
is the speed at which browsers currently load, execute, and render web-pages.<br
|
2265
|
+
/>\n<br />\nSince the code exercises the browser on a few different
|
2266
|
+
dimensions (heavy JavaScript, lots of DOM manipulation, a few new HTML5 features),
|
2267
|
+
I decided to pit the major browsers against each other and run a few benchmarks.<br
|
2268
|
+
/>\n<br />\n<table align="center" cellpadding="0"
|
2269
|
+
cellspacing="0" class="tr-caption-container" style="margin-left:
|
2270
|
+
auto; margin-right: auto; text-align: center;"><tbody>\n<tr><td
|
2271
|
+
style="text-align: center;"><a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiYbJtGgfxWbAZNnhARDNSNFhsLELRdCUQC9AJgQVTSxMrw0AcWkLlOZYuhzbxW5wUGHjptKYm1Ul758647rbb2p2hdn8ItTxlQN4qJe8CzrLH01dzwV248vhoP27vsIFWikAo0nA/s1600/Picture+9.png"
|
2272
|
+
imageanchor="1" style="margin-left: auto; margin-right: auto;"><img
|
2273
|
+
border="0" height="127" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiYbJtGgfxWbAZNnhARDNSNFhsLELRdCUQC9AJgQVTSxMrw0AcWkLlOZYuhzbxW5wUGHjptKYm1Ul758647rbb2p2hdn8ItTxlQN4qJe8CzrLH01dzwV248vhoP27vsIFWikAo0nA/s320/Picture+9.png"
|
2274
|
+
width="320" /></a></td></tr>\n<tr><td
|
2275
|
+
class="tr-caption" style="text-align: center;">Test
|
2276
|
+
Suite on Chrome 5.0.375</td></tr>\n</tbody></table>I
|
2277
|
+
ran the 340 tests in a loop a 1000 times on each browser, and calculated the
|
2278
|
+
mean runtime in milliseconds. Here are the results:<br />\n<br />\n<ul><li>Chrome
|
2279
|
+
5.0.375: <b>754ms</b></li>\n<li>Safari 4.0.4: <b>1118ms</b></li>\n<li>Opera
|
2280
|
+
10.53: <b>1511ms</b></li>\n<li>Firefox 3.6.3: <b>3209ms</b></li>\n</ul><div><br
|
2281
|
+
/>\nThe difference between the Chrome and Firefox numbers is quite surprising.</div><br
|
2282
|
+
/>\nI also ran some SVG vs. Canvas benchmarks and found that SVG was about
|
2283
|
+
3 times slower than Canvas. This factor increased significantly as the number
|
2284
|
+
of elements in the SVG image grew. That said, SVG rendered much more consistently
|
2285
|
+
across the different browsers.<br />\n<br />\nThe test machine
|
2286
|
+
used was a dual-core MacBook Pro with a 2.53 GHz Intel Core 2 Duo processor
|
2287
|
+
and 4GB of DDR3 RAM.</content><link rel='replies' type='application/atom+xml'
|
2288
|
+
href='https://0xfe.blogspot.com/feeds/7599469828136185502/comments/default'
|
2289
|
+
title='Post Comments'/><link rel='replies' type='text/html' href='https://0xfe.blogspot.com/2010/06/benchmarking-vexflow.html#comment-form'
|
2290
|
+
title='3 Comments'/><link rel='edit' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/7599469828136185502'/><link
|
2291
|
+
rel='self' type='application/atom+xml' href='https://www.blogger.com/feeds/19544619/posts/default/7599469828136185502'/><link
|
2292
|
+
rel='alternate' type='text/html' href='https://0xfe.blogspot.com/2010/06/benchmarking-vexflow.html'
|
2293
|
+
title='Benchmarking VexFlow'/><author><name>0xfe</name><uri>http://www.blogger.com/profile/11179501091623983192</uri><email>noreply@blogger.com</email><gd:image
|
2294
|
+
rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail
|
2295
|
+
xmlns:media=\"http://search.yahoo.com/mrss/\" url=\"https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiYbJtGgfxWbAZNnhARDNSNFhsLELRdCUQC9AJgQVTSxMrw0AcWkLlOZYuhzbxW5wUGHjptKYm1Ul758647rbb2p2hdn8ItTxlQN4qJe8CzrLH01dzwV248vhoP27vsIFWikAo0nA/s72-c/Picture+9.png\"
|
2296
|
+
height=\"72\" width=\"72\"/><thr:total>3</thr:total></entry></feed>"
|
2297
|
+
recorded_at: Tue, 29 Jul 2025 09:55:38 GMT
|
2298
|
+
recorded_with: VCR 6.3.1
|