piccle 0.1.0.rc1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +17 -0
  3. data/.rspec +2 -0
  4. data/.travis.yml +5 -0
  5. data/Gemfile +4 -0
  6. data/NOTES.md +69 -0
  7. data/README.md +175 -0
  8. data/Rakefile +8 -0
  9. data/agpl-3.0.md +660 -0
  10. data/assets/css/default.css +397 -0
  11. data/assets/css/normalize.css +427 -0
  12. data/assets/icons/android-chrome-192x192.png +0 -0
  13. data/assets/icons/android-chrome-512x512.png +0 -0
  14. data/assets/icons/apple-touch-icon.png +0 -0
  15. data/assets/icons/favicon-16x16.png +0 -0
  16. data/assets/icons/favicon-32x32.png +0 -0
  17. data/assets/icons/favicon.ico +0 -0
  18. data/bin/console +14 -0
  19. data/bin/piccle +355 -0
  20. data/bin/setup +8 -0
  21. data/db/migrations/001_create_photos.rb +15 -0
  22. data/db/migrations/002_update_photos.rb +14 -0
  23. data/db/migrations/003_create_keywords_and_join_table.rb +14 -0
  24. data/db/migrations/004_add_focal_length.rb +7 -0
  25. data/db/migrations/005_create_locations.rb +20 -0
  26. data/js-renderer/handlebars.min-v4.7.6.js +29 -0
  27. data/js-renderer/renderer.js +93 -0
  28. data/lib/piccle.rb +52 -0
  29. data/lib/piccle/config.rb +136 -0
  30. data/lib/piccle/database.rb +33 -0
  31. data/lib/piccle/dstk_service.rb +64 -0
  32. data/lib/piccle/extractor.rb +128 -0
  33. data/lib/piccle/js_renderer.rb +37 -0
  34. data/lib/piccle/models/keyword.rb +6 -0
  35. data/lib/piccle/models/location.rb +11 -0
  36. data/lib/piccle/models/photo.rb +211 -0
  37. data/lib/piccle/parser.rb +230 -0
  38. data/lib/piccle/quilt_generator.rb +30 -0
  39. data/lib/piccle/renderer.rb +175 -0
  40. data/lib/piccle/streams.rb +2 -0
  41. data/lib/piccle/streams/base_stream.rb +56 -0
  42. data/lib/piccle/streams/camera_stream.rb +35 -0
  43. data/lib/piccle/streams/date_stream.rb +95 -0
  44. data/lib/piccle/streams/event_stream.rb +73 -0
  45. data/lib/piccle/streams/keyword_stream.rb +24 -0
  46. data/lib/piccle/streams/location_stream.rb +57 -0
  47. data/lib/piccle/template_helpers.rb +79 -0
  48. data/lib/piccle/version.rb +3 -0
  49. data/lib/tasks/development.rake +38 -0
  50. data/piccle.gemspec +43 -0
  51. data/templates/_breadcrumbs.handlebars.slim +16 -0
  52. data/templates/_footer.handlebars.slim +2 -0
  53. data/templates/_header.handlebars.slim +36 -0
  54. data/templates/_navigation.handlebars.slim +16 -0
  55. data/templates/_substream.handlebars.slim +17 -0
  56. data/templates/feed.atom.slim +29 -0
  57. data/templates/index.html.handlebars.slim +36 -0
  58. data/templates/show.html.handlebars.slim +64 -0
  59. metadata +340 -0
@@ -0,0 +1,37 @@
1
+ # Render using a NodeJS helper program. The Handlebars.rb bindings are tied to an old version of libv8; they render
2
+ # REALLY slowly as a result.
3
+ # This renderer calls out to a NodeJS helper program instead - so all the templating is handled in JavaScript.
4
+ module Piccle
5
+ class JsRenderer < Renderer
6
+ def initialize(*args)
7
+ @renderer = IO.popen(["node", "js-renderer/renderer.js", Piccle.config.output_dir], "r+")
8
+ super(*args)
9
+ end
10
+
11
+ def render_main_index
12
+ call_nodejs("index", render_main_index_template_vars)
13
+ end
14
+
15
+ def render_index(selector)
16
+ call_nodejs("index", render_index_template_vars(selector))
17
+ end
18
+
19
+ def render_photo(hash, selector = [])
20
+ call_nodejs("show", render_photo_template_vars(hash, selector))
21
+ end
22
+
23
+ protected
24
+
25
+ def call_nodejs(template, template_vars)
26
+ @renderer.write("render_#{template}\n")
27
+ @renderer.write("#{JSON.dump(template_vars)}\n")
28
+ buffer = ""
29
+ loop do
30
+ line = @renderer.readline
31
+ break if line.strip == "\x1C"
32
+ buffer += line
33
+ end
34
+ buffer
35
+ end
36
+ end
37
+ end
@@ -0,0 +1,6 @@
1
+ require 'sequel'
2
+
3
+ # Represents a keyword/label/tag in the system. This is pulled out of the image XMP data.
4
+ class Piccle::Keyword < Sequel::Model
5
+ many_to_many :photos
6
+ end
@@ -0,0 +1,11 @@
1
+ require 'sequel'
2
+
3
+ # Represents a location in the system - either just a lat/long point (to be geocoded later) or a lat/long named
4
+ # with "city", "state", "country". Countries are normally countries, but overall it's more like "small area", "wider
5
+ # geographic area", "big geographic area".
6
+ class Piccle::Location < Sequel::Model
7
+ def before_create
8
+ self.created_at ||= Time.now
9
+ super
10
+ end
11
+ end
@@ -0,0 +1,211 @@
1
+ require 'exifr/jpeg'
2
+ require 'xmp'
3
+ require 'digest'
4
+ require 'sequel'
5
+ require 'rmagick'
6
+ require 'json'
7
+
8
+ # Represents an image in the system. Reading info from an image? Inferring something based on the data? Put it here.
9
+ class Piccle::Photo < Sequel::Model
10
+ many_to_many :keywords
11
+ attr_accessor :changed_hash # Has this file been modified?
12
+ attr_accessor :freshly_created # Have we just generated this file?
13
+
14
+ def before_create
15
+ self.created_at ||= Time.now
16
+ super
17
+ end
18
+
19
+ def self.from_file(path_to_file)
20
+ freshly_created = false
21
+ md5 = Digest::MD5.file(path_to_file).to_s
22
+
23
+ photo = self.find_or_create(file_name: File.basename(path_to_file), path: File.dirname(path_to_file)) do |p|
24
+ # Block executes when creating a new record.
25
+ freshly_created = true
26
+ p.set(data_hash(path_to_file))
27
+ end
28
+ photo.changed_hash = md5 != photo.md5
29
+ photo.freshly_created = freshly_created
30
+
31
+ # Pull out keywords for this file, if it's new or changed.
32
+ photo.generate_keywords if freshly_created || photo.changed_hash?
33
+
34
+ photo
35
+ end
36
+
37
+ # Gets a dataset of properties to save about this file. We reuse this between from_file (above) and update_from_file
38
+ # (below).
39
+ def self.data_hash(path_to_file)
40
+ exif_info = EXIFR::JPEG.new(path_to_file)
41
+ xmp = XMP.parse(exif_info)
42
+ p = {}
43
+
44
+ p[:md5] = Digest::MD5.file(path_to_file).to_s
45
+ p[:width] = exif_info.width
46
+ p[:height] = exif_info.height
47
+ p[:camera_name] = exif_info.model || "Unknown camera"
48
+ p[:description] = exif_info.image_description
49
+ p[:aperture] = exif_info.aperture_value
50
+ p[:iso] = exif_info.iso_speed_ratings
51
+ p[:iso] = p[:iso].first if p[:iso].is_a? Array
52
+ p[:shutter_speed_numerator] = exif_info.exposure_time&.numerator
53
+ p[:shutter_speed_denominator] = exif_info.exposure_time&.denominator
54
+ p[:focal_length] = exif_info.focal_length.to_f
55
+ p[:taken_at] = exif_info.date_time_original&.to_datetime
56
+
57
+ p[:latitude] = if exif_info.gps_latitude && exif_info.gps_latitude_ref
58
+ exif_info.gps_latitude_ref == "S" ? (exif_info.gps_latitude.to_f * -1) : exif_info.gps_latitude.to_f
59
+ end
60
+
61
+ p[:longitude] = if exif_info.gps_longitude && exif_info.gps_longitude_ref
62
+ exif_info.gps_longitude_ref == "W" ? (exif_info.gps_longitude.to_f * -1) : exif_info.gps_longitude.to_f
63
+ end
64
+
65
+ p[:title] = if xmp && xmp.namespaces && xmp.namespaces.include?("dc") && xmp.dc.attributes.include?("title")
66
+ xmp.dc.title
67
+ end
68
+ %w[City State Country].each do |location|
69
+ p[location.downcase.to_sym] = if xmp && xmp.namespaces && xmp.namespaces.include?("photoshop") &&
70
+ xmp.photoshop.attributes.include?(location)
71
+ xmp.photoshop.send(location)
72
+ end
73
+ end
74
+
75
+ # Tweak encoding of potential non-UTF-8 strings
76
+ %i[description title city state country].each do |attr|
77
+ p[attr].force_encoding("UTF-8") if p[attr].respond_to?(:force_encoding)
78
+ end
79
+
80
+ p
81
+ end
82
+
83
+ # The year our earliest photo was taken. Used by our copyright footer.
84
+ def self.earliest_photo_year
85
+ Date.parse(self.min(:taken_at)).year
86
+ end
87
+
88
+ # The year the last photo was taken. Used by the copyright footer.
89
+ def self.latest_photo_year
90
+ Date.parse(self.max(:taken_at)).year
91
+ end
92
+
93
+ # ---- Image attributes (inferred from data) ----
94
+
95
+ def portrait?
96
+ height > width
97
+ end
98
+
99
+ # Is this image landscape?
100
+ def landscape?
101
+ width > height
102
+ end
103
+
104
+ # Is this image square?
105
+ def square?
106
+ width == height
107
+ end
108
+
109
+ # Have we already generated a thumbnail for this image?
110
+ def thumbnail_exists?
111
+ File.exist?(thumbnail_path)
112
+ end
113
+
114
+ # Gets the full path to the thumbnail for this photo.
115
+ def thumbnail_path
116
+ File.join(Piccle.config.output_dir, template_thumbnail_path)
117
+ end
118
+
119
+ # Gets the path to use in our generated HTML.
120
+ def template_thumbnail_path
121
+ File.join("images", "thumbnails", "#{md5}.#{file_name}")
122
+ end
123
+
124
+ # Does a "full-size" image exist?
125
+ def full_image_exists?
126
+ File.exist?(full_image_path)
127
+ end
128
+
129
+ # Gets the full path to the "full" image for this photo.
130
+ def full_image_path
131
+ File.join(Piccle.config.output_dir, template_full_image_path)
132
+ end
133
+
134
+ # Gets the path to use in our generated HTML."
135
+ def template_full_image_path
136
+ File.join("images", "photos", "#{md5}.#{file_name}")
137
+ end
138
+
139
+ # Gets the path to the photo page.
140
+ def photo_show_path
141
+ "#{md5}.html"
142
+ end
143
+
144
+ def original_photo_path
145
+ File.join(path, file_name)
146
+ end
147
+
148
+ # Munge the shutter speed data into a human-readable string.
149
+ def friendly_shutter_speed
150
+ if shutter_speed_numerator && shutter_speed_denominator
151
+ if shutter_speed_denominator > 1
152
+ "#{shutter_speed_numerator}/#{shutter_speed_denominator}s"
153
+ else
154
+ "#{shutter_speed_numerator}s"
155
+ end
156
+ end
157
+ end
158
+
159
+ def friendly_focal_length
160
+ "#{focal_length.round(1)} mm" if focal_length.positive?
161
+ end
162
+
163
+ # Does this image have both a lat-long pair, AND at least one of (city, state, country)?
164
+ def geocoded?
165
+ (latitude && longitude) && (city || state || country)
166
+ end
167
+
168
+ # ---- Piccle internals ----
169
+
170
+ # Has this file changed hash? You probably want to call update if so.
171
+ def changed_hash?
172
+ changed_hash
173
+ end
174
+
175
+ # Have we just created this file?
176
+ def freshly_created?
177
+ freshly_created
178
+ end
179
+
180
+ # Re-read the photo data, and save it to the DB.
181
+ def update_from_file
182
+ update(Piccle::Photo.data_hash(original_photo_path))
183
+ end
184
+
185
+ # Read the keywords from the photo file, and ensure they're included in the DB.
186
+ # TODO: remove any keywords that aren't currently in the file.
187
+ def generate_keywords
188
+ exif_info = EXIFR::JPEG.new(original_photo_path)
189
+ xmp = XMP.parse(exif_info)
190
+
191
+ if xmp && xmp.namespaces && xmp.namespaces.include?("dc") && xmp.dc.attributes.include?("subject")
192
+ xmp.dc.subject.each do |keyword|
193
+ keyword = Piccle::Keyword.find_or_create(name: keyword)
194
+ add_keyword(keyword) unless keywords.include?(keyword)
195
+ end
196
+ end
197
+ end
198
+
199
+ # Generate a thumbnail for this image.
200
+ def generate_thumbnail!
201
+ img = Magick::Image.read(original_photo_path).first
202
+ img.resize_to_fill!(Piccle::THUMBNAIL_SIZE)
203
+ img.write(thumbnail_path)
204
+ end
205
+
206
+ def generate_full_image!
207
+ img = Magick::Image.read(original_photo_path).first
208
+ img.resize_to_fit!(Piccle::FULL_SIZE, Piccle::FULL_SIZE)
209
+ img.write(full_image_path)
210
+ end
211
+ end
@@ -0,0 +1,230 @@
1
+ # The "base parser" for Piccle. Repeatedly call parser.parse(Photo), and it pulls out the metadata necessary to generate pages.
2
+ # It'll figure out which details to pull out, links between individual photos, details like ordering, etc.
3
+ #
4
+ # Essentially, we end up building a big @data array that's got all the photo metadata, and the streams populate the various
5
+ # facets of the data. And then another module can render our site from this big specially-structured hash.
6
+ #
7
+ # Our hash looks like this:
8
+ # {
9
+ # title: "Foo", # The title of this section
10
+ # photos: { md5_string => Hash[photo_data] }, # Data needed to display
11
+ # events: [ Hash[event_data] ] # Details about named events. These get special tiles on
12
+ # # the front page, but are implemented via a stream.
13
+ #
14
+
15
+ module Piccle
16
+ class Parser
17
+ attr_accessor :data
18
+ attr_accessor :streams
19
+
20
+ def initialize
21
+ @data = { friendly_name: "All Photos" } # The extracted metadata that we'll use to generate our photo gallery.
22
+ @photos = {} # An array of MD5 -> Photo object, in case we want to get back to them easily at some point.
23
+ @streams = [] # Any extra processors that we want to use.
24
+ end
25
+
26
+ # Register a "stream", a thing that can extract extra data from a photo and add it to our data array, for later generation.
27
+ def add_stream(stream)
28
+ @streams << stream.new
29
+ end
30
+
31
+ # Do we have any photos in this parsed data yet?
32
+ def empty?
33
+ @photos.empty?
34
+ end
35
+
36
+ # Parse a photo. Also passes it to any registered streams, which can subcategorise each photo into sections under its own namespace.
37
+ # Streams can also return a metadata element, that we can display specially in the photo page.
38
+ def parse(photo)
39
+ @photos[photo.md5] = photo
40
+ @data[:photos] ||= {}
41
+
42
+ @data[:photos][photo.md5] = { hash: photo.md5,
43
+ file_name: photo.file_name,
44
+ title: photo.title,
45
+ photo_show_path: photo.photo_show_path,
46
+ description: photo.description,
47
+ width: photo.width,
48
+ height: photo.height,
49
+ taken_at: photo.taken_at,
50
+ created_at: photo.created_at,
51
+ aperture: photo.aperture,
52
+ shutter_speed: photo.friendly_shutter_speed,
53
+ focal_length: photo.friendly_focal_length,
54
+ iso: photo.iso,
55
+ city: photo.city,
56
+ state: photo.state,
57
+ country: photo.country,
58
+ has_location: photo.geocoded?,
59
+ metadata: []
60
+ }
61
+
62
+ @streams.each do |stream|
63
+ @data = merge_into(@data, stream.data_for(photo)) if stream.respond_to?(:data_for)
64
+ @data[:photos][photo.md5][:metadata] += stream.metadata_for(photo) if stream.respond_to?(:metadata_for)
65
+ end
66
+ end
67
+
68
+ # You can iterate over this list to display things.
69
+ def order
70
+ @data[:photos] = @data[:photos].sort_by { |k, v| v[:taken_at] || Time.new(1970, 1, 1) }.reverse.to_h
71
+
72
+ @streams.each do |stream|
73
+ @data = stream.order(@data) if stream.respond_to?(:order)
74
+ end
75
+ end
76
+
77
+ # Loads the event data from the EventStream. It also finds "sentinels", which are photos where we should display a special
78
+ # tile beforehand to indicate the start/end of the event.
79
+ def load_events
80
+ event_stream = Piccle::Streams::EventStream.new
81
+ order
82
+ @data[:events] = event_stream.events
83
+ @data[:event_starts], @data[:event_ends] = event_stream.sentinels_for(@data)
84
+ end
85
+
86
+ # Gets the metadata for a given photo hash.
87
+ def metadata_for(photo_hash)
88
+ @data.dig(:photos, photo_hash, :metadata)
89
+ end
90
+
91
+ # Get the friendly name for the given selector.
92
+ def friendly_name_for(selector)
93
+ @data.dig(*selector, :friendly_name)
94
+ end
95
+
96
+ # Gets a list of all subsections (ie. all the subindexes that we should render).
97
+ # It's an array of hash keys, suitable for passing via @data.dig(*keys).
98
+ # TODO: This could probably be sped up.
99
+ def subsections
100
+ previous_size = 0
101
+ subsection_list = faceted_data.keys.map { |el| [el] }
102
+ size = subsection_list.count
103
+
104
+ # Find all the string keys in our data.
105
+ loop do
106
+ subsection_list.each do |key_path|
107
+ new_keys = string_keys_only(@data.dig(*key_path)).keys
108
+ new_keys.each { |k| subsection_list << key_path + [k] }
109
+ end
110
+
111
+ # Clean up our state - remove dupes, update counts.
112
+ subsection_list.uniq!
113
+ previous_size = size
114
+ size = subsection_list.count
115
+ break if previous_size == size
116
+ end
117
+
118
+ subsection_list
119
+ end
120
+
121
+ # Get photo hashes in a given subsection, given a diggable path.
122
+ def subsection_photo_hashes(subsection_path)
123
+ @data.dig(*subsection_path).fetch(:photos, [])
124
+ end
125
+
126
+ # Gets the actual photo objects for a given subsection.
127
+ def subsection_photos(subsection_path)
128
+ if subsection_path.any?
129
+ subsection_photo_hashes(subsection_path).map { |hash| [hash, @data[:photos][hash]] }.to_h
130
+ else
131
+ @data[:photos]
132
+ end
133
+ end
134
+
135
+ # Given an MD5 hash, returns an array of arrays. Each array is a set of strings that, combined with the MD5, gives a link to the photo.
136
+ # So for instance, with a date stream parser, if a photo was taken on 2016-04-19 you'll get back:
137
+ # [["by-date", "2016"], ["by-date", "2016", "4"], ["by-date", "2016", "4", "19"]]
138
+ # And you could use that to generate a links akin to /by-date/2016/4/19/abcdef1234567890.html.
139
+ def links_for(md5)
140
+ # Return each key that includes the photos.
141
+ subsections.select { |path| @data.dig(*path).fetch(:photos, []).include?(md5) }
142
+ end
143
+
144
+ # Given a photo hash, and a substream selector (which may be omitted, for the main list of photos),
145
+ # returns an array with *up to* 5 previous/next photos, as well as this image. It's ideal for rendering small
146
+ # strips of neighbouring images.
147
+ def substream_hashes_for(hash, selector = [])
148
+ relevant_hashes = (@data.dig(*selector, :photos) || {})
149
+ relevant_hashes = relevant_hashes.keys if relevant_hashes.respond_to?(:keys)
150
+ if photo_index = relevant_hashes.find_index(hash)
151
+ before_index = [0, photo_index-5].max
152
+ after_index = [photo_index + 5, relevant_hashes.length - 1].min
153
+ relevant_hashes[before_index..after_index]
154
+ else
155
+ []
156
+ end
157
+ end
158
+
159
+ # Same as the above, but only returns hashes for "interesting" substreams.
160
+ def interesting_substream_hashes_for(hash, selector = [])
161
+ if @data.dig(*selector, :interesting)
162
+ substream_hashes_for(hash, selector)
163
+ else
164
+ []
165
+ end
166
+ end
167
+
168
+ # Returns a substream hash. This is a bundle of data suitable for rendering a navigation strip within this stream.
169
+ # It includes a title for the substream, previous/next photos where applicable (ie. for nav arrows), and a set of
170
+ # photos including the current photo.
171
+ def substream_for(hash, selector = [])
172
+ photo_hashes = substream_hashes_for(hash, selector)
173
+ if photo_hashes.any?
174
+ substream = {}
175
+ photo_i = photo_hashes.find_index(hash)
176
+ substream[:title] = friendly_name_for(selector)
177
+ substream[:photos] = photo_hashes.map { |h| @data[:photos][h] }
178
+ substream[:selector_path] = "#{selector.join('/')}/" if selector.any?
179
+ substream[:previous] = @data[:photos][photo_hashes[photo_i - 1]] if photo_i > 0
180
+ substream[:next] = @data[:photos][photo_hashes[photo_i + 1]] if photo_i < photo_hashes.length - 1
181
+ substream
182
+ else
183
+ nil
184
+ end
185
+ end
186
+
187
+ # Same as the above, but only if this stream is flagged as interesting.
188
+ def interesting_substream_for(hash, selector = [])
189
+ substream_for(hash, selector) if @data.dig(*selector, :interesting)
190
+ end
191
+
192
+ # Accessor for the photo hashes.
193
+ def photo_hashes
194
+ @photos.keys
195
+ end
196
+
197
+
198
+ # Gets the data that we faceted - the things broken down by stream.
199
+ def faceted_data
200
+ @faceted_data ||= string_keys_only(@data)
201
+ end
202
+
203
+ protected
204
+
205
+ def string_keys_only(data)
206
+ data.select { |k, _| k.is_a? String }
207
+ end
208
+
209
+ def merge_into(destination, source)
210
+ # If the source has a photos key, make sure one exists in the destination, and then append the source's contents.
211
+ if source.key?(:photos)
212
+ destination[:photos] ||= []
213
+ destination[:photos] += source[:photos]
214
+ end
215
+
216
+ # For all the other keys, see if the destination has them. If it does, combine them using OURSELF. Otherwise, just set it to our version.
217
+ source.keys.each do |key|
218
+ next if :photos == key
219
+ if destination.key?(key) && destination[key].is_a?(Hash)
220
+ destination[key] = merge_into(destination[key], source[key])
221
+
222
+ else
223
+ destination[key] = source[key]
224
+ end
225
+ end
226
+
227
+ destination
228
+ end
229
+ end
230
+ end