net-sftp-backports 4.0.0.backports

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,93 @@
1
+ require 'net/ssh/loggable'
2
+
3
+ module Net; module SFTP; module Operations
4
+
5
+ # A convenience class for working with remote directories. It provides methods
6
+ # for searching and enumerating directory entries, similarly to the standard
7
+ # ::Dir class.
8
+ #
9
+ # sftp.dir.foreach("/remote/path") do |entry|
10
+ # puts entry.name
11
+ # end
12
+ #
13
+ # p sftp.dir.entries("/remote/path").map { |e| e.name }
14
+ #
15
+ # sftp.dir.glob("/remote/path", "**/*.rb") do |entry|
16
+ # puts entry.name
17
+ # end
18
+ class Dir
19
+ # The SFTP session object that drives this directory factory.
20
+ attr_reader :sftp
21
+
22
+ # Create a new instance on top of the given SFTP session instance.
23
+ def initialize(sftp)
24
+ @sftp = sftp
25
+ end
26
+
27
+ # Calls the block once for each entry in the named directory on the
28
+ # remote server. Yields a Name object to the block, rather than merely
29
+ # the name of the entry.
30
+ def foreach(path)
31
+ handle = sftp.opendir!(path)
32
+ while entries = sftp.readdir!(handle)
33
+ entries.each { |entry| yield entry }
34
+ end
35
+ return nil
36
+ ensure
37
+ sftp.close!(handle) if handle
38
+ end
39
+
40
+ # Returns an array of Name objects representing the items in the given
41
+ # remote directory, +path+.
42
+ def entries(path)
43
+ results = []
44
+ foreach(path) { |entry| results << entry }
45
+ return results
46
+ end
47
+
48
+ # Works as ::Dir.glob, matching (possibly recursively) all directory
49
+ # entries under +path+ against +pattern+. If a block is given, matches
50
+ # will be yielded to the block as they are found; otherwise, they will
51
+ # be returned in an array when the method finishes.
52
+ #
53
+ # Because working over an SFTP connection is always going to be slower than
54
+ # working purely locally, don't expect this method to perform with the
55
+ # same level of alacrity that ::Dir.glob does; it will work best for
56
+ # shallow directory hierarchies with relatively few directories, though
57
+ # it should be able to handle modest numbers of files in each directory.
58
+ def glob(path, pattern, flags=0)
59
+ flags |= ::File::FNM_PATHNAME
60
+ path = path.chop if path.end_with?('/') && path != '/'
61
+
62
+ results = [] unless block_given?
63
+ queue = entries(path).reject { |e| %w(. ..).include?(e.name) }
64
+ while queue.any?
65
+ entry = queue.shift
66
+
67
+ if entry.directory? && !%w(. ..).include?(::File.basename(entry.name))
68
+ queue += entries("#{path}/#{entry.name}").map do |e|
69
+ e.name.replace("#{entry.name}/#{e.name}")
70
+ e
71
+ end
72
+ end
73
+
74
+ if ::File.fnmatch(pattern, entry.name, flags)
75
+ if block_given?
76
+ yield entry
77
+ else
78
+ results << entry
79
+ end
80
+ end
81
+ end
82
+
83
+ return results unless block_given?
84
+ end
85
+
86
+ # Identical to calling #glob with a +flags+ parameter of 0 and no block.
87
+ # Simply returns the matched entries as an array.
88
+ def [](path, pattern)
89
+ glob(path, pattern, 0)
90
+ end
91
+ end
92
+
93
+ end; end; end
@@ -0,0 +1,365 @@
1
+ require 'net/ssh/loggable'
2
+
3
+ module Net; module SFTP; module Operations
4
+
5
+ # A general purpose downloader module for Net::SFTP. It can download files
6
+ # into IO objects, or directly to files on the local file system. It can
7
+ # even download entire directory trees via SFTP, and provides a flexible
8
+ # progress reporting mechanism.
9
+ #
10
+ # To download a single file from the remote server, simply specify both the
11
+ # remote and local paths:
12
+ #
13
+ # downloader = sftp.download("/path/to/remote.txt", "/path/to/local.txt")
14
+ #
15
+ # By default, this operates asynchronously, so if you want to block until
16
+ # the download finishes, you can use the 'bang' variant:
17
+ #
18
+ # sftp.download!("/path/to/remote.txt", "/path/to/local.txt")
19
+ #
20
+ # Or, if you have multiple downloads that you want to run in parallel, you can
21
+ # employ the #wait method of the returned object:
22
+ #
23
+ # dls = %w(file1 file2 file3).map { |f| sftp.download("remote/#{f}", f) }
24
+ # dls.each { |d| d.wait }
25
+ #
26
+ # To download an entire directory tree, recursively, simply specify :recursive => true:
27
+ #
28
+ # sftp.download!("/path/to/remotedir", "/path/to/local", :recursive => true)
29
+ #
30
+ # This will download "/path/to/remotedir", its contents, its subdirectories,
31
+ # and their contents, recursively, to "/path/to/local" on the local host.
32
+ # (If you specify :recursive => true and the source is not a directory,
33
+ # you'll get an error!)
34
+ #
35
+ # If you want to pull the contents of a file on the remote server, and store
36
+ # the data in memory rather than immediately to disk, you can pass an IO
37
+ # object as the destination:
38
+ #
39
+ # require 'stringio'
40
+ # io = StringIO.new
41
+ # sftp.download!("/path/to/remote", io)
42
+ #
43
+ # This will only work for single-file downloads. Trying to do so with
44
+ # :recursive => true will cause an error.
45
+ #
46
+ # The following options are supported:
47
+ #
48
+ # * <tt>:progress</tt> - either a block or an object to act as a progress
49
+ # callback. See the discussion of "progress monitoring" below.
50
+ # * <tt>:requests</tt> - the number of pending SFTP requests to allow at
51
+ # any given time. When downloading an entire directory tree recursively,
52
+ # this will default to 16. Setting this higher might improve throughput.
53
+ # Reducing it will reduce throughput.
54
+ # * <tt>:read_size</tt> - the maximum number of bytes to read at a time
55
+ # from the source. Increasing this value might improve throughput. It
56
+ # defaults to 32,000 bytes.
57
+ #
58
+ # == Progress Monitoring
59
+ #
60
+ # Sometimes it is desirable to track the progress of a download. There are
61
+ # two ways to do this: either using a callback block, or a special custom
62
+ # object.
63
+ #
64
+ # Using a block it's pretty straightforward:
65
+ #
66
+ # sftp.download!("remote", "local") do |event, downloader, *args|
67
+ # case event
68
+ # when :open then
69
+ # # args[0] : file metadata
70
+ # puts "starting download: #{args[0].remote} -> #{args[0].local} (#{args[0].size} bytes}"
71
+ # when :get then
72
+ # # args[0] : file metadata
73
+ # # args[1] : byte offset in remote file
74
+ # # args[2] : data that was received
75
+ # puts "writing #{args[2].length} bytes to #{args[0].local} starting at #{args[1]}"
76
+ # when :close then
77
+ # # args[0] : file metadata
78
+ # puts "finished with #{args[0].remote}"
79
+ # when :mkdir then
80
+ # # args[0] : local path name
81
+ # puts "creating directory #{args[0]}"
82
+ # when :finish then
83
+ # puts "all done!"
84
+ # end
85
+ # end
86
+ #
87
+ # However, for more complex implementations (e.g., GUI interfaces and such)
88
+ # a block can become cumbersome. In those cases, you can create custom
89
+ # handler objects that respond to certain methods, and then pass your handler
90
+ # to the downloader:
91
+ #
92
+ # class CustomHandler
93
+ # def on_open(downloader, file)
94
+ # puts "starting download: #{file.remote} -> #{file.local} (#{file.size} bytes)"
95
+ # end
96
+ #
97
+ # def on_get(downloader, file, offset, data)
98
+ # puts "writing #{data.length} bytes to #{file.local} starting at #{offset}"
99
+ # end
100
+ #
101
+ # def on_close(downloader, file)
102
+ # puts "finished with #{file.remote}"
103
+ # end
104
+ #
105
+ # def on_mkdir(downloader, path)
106
+ # puts "creating directory #{path}"
107
+ # end
108
+ #
109
+ # def on_finish(downloader)
110
+ # puts "all done!"
111
+ # end
112
+ # end
113
+ #
114
+ # sftp.download!("remote", "local", :progress => CustomHandler.new)
115
+ #
116
+ # If you omit any of those methods, the progress updates for those missing
117
+ # events will be ignored. You can create a catchall method named "call" for
118
+ # those, instead.
119
+ class Download
120
+ include Net::SSH::Loggable
121
+
122
+ # The destination of the download (the name of a file or directory on
123
+ # the local server, or an IO object)
124
+ attr_reader :local
125
+
126
+ # The source of the download (the name of a file or directory on the
127
+ # remote server)
128
+ attr_reader :remote
129
+
130
+ # The hash of options that was given to this Download instance.
131
+ attr_reader :options
132
+
133
+ # The SFTP session instance that drives this download.
134
+ attr_reader :sftp
135
+
136
+ # The properties hash for this object
137
+ attr_reader :properties
138
+
139
+ # Instantiates a new downloader process on top of the given SFTP session.
140
+ # +local+ is either an IO object that should receive the data, or a string
141
+ # identifying the target file or directory on the local host. +remote+ is
142
+ # a string identifying the location on the remote host that the download
143
+ # should source.
144
+ #
145
+ # This will return immediately, and requires that the SSH event loop be
146
+ # run in order to effect the download. (See #wait.)
147
+ def initialize(sftp, local, remote, options={}, &progress)
148
+ @sftp = sftp
149
+ @local = local
150
+ @remote = remote
151
+ @progress = progress || options[:progress]
152
+ @options = options
153
+ @active = 0
154
+ @properties = options[:properties] || {}
155
+
156
+ self.logger = sftp.logger
157
+
158
+ if recursive? && local.respond_to?(:write)
159
+ raise ArgumentError, "cannot download a directory tree in-memory"
160
+ end
161
+
162
+ @stack = [Entry.new(remote, local, recursive?)]
163
+ process_next_entry
164
+ end
165
+
166
+ # Returns the value of the :recursive key in the options hash that was
167
+ # given when the object was instantiated.
168
+ def recursive?
169
+ options[:recursive]
170
+ end
171
+
172
+ # Returns true if there are any active requests or pending files or
173
+ # directories.
174
+ def active?
175
+ @active > 0 || stack.any?
176
+ end
177
+
178
+ # Forces the transfer to stop.
179
+ def abort!
180
+ @active = 0
181
+ @stack.clear
182
+ end
183
+
184
+ # Runs the SSH event loop for as long as the downloader is active (see
185
+ # #active?). This can be used to block until the download completes.
186
+ def wait
187
+ sftp.loop { active? }
188
+ self
189
+ end
190
+
191
+ # Returns the property with the given name. This allows Download instances
192
+ # to store their own state when used as part of a state machine.
193
+ def [](name)
194
+ @properties[name.to_sym]
195
+ end
196
+
197
+ # Sets the given property to the given name. This allows Download instances
198
+ # to store their own state when used as part of a state machine.
199
+ def []=(name, value)
200
+ @properties[name.to_sym] = value
201
+ end
202
+
203
+ private
204
+
205
+ # A simple struct for encapsulating information about a single remote
206
+ # file or directory that needs to be downloaded.
207
+ Entry = Struct.new(:remote, :local, :directory, :size, :handle, :offset, :sink)
208
+
209
+ #--
210
+ # "ruby -w" hates private attributes, so we have to do these longhand
211
+ #++
212
+
213
+ # The stack of Entry instances, indicating which files and directories
214
+ # on the remote host remain to be downloaded.
215
+ def stack; @stack; end
216
+
217
+ # The progress handler for this instance. Possibly nil.
218
+ def progress; @progress; end
219
+
220
+ # The default read size.
221
+ DEFAULT_READ_SIZE = 32_000
222
+
223
+ # The number of bytes to read at a time from remote files.
224
+ def read_size
225
+ options[:read_size] || DEFAULT_READ_SIZE
226
+ end
227
+
228
+ # The number of simultaneou SFTP requests to use to effect the download.
229
+ # Defaults to 16 for recursive downloads.
230
+ def requests
231
+ options[:requests] || (recursive? ? 16 : 2)
232
+ end
233
+
234
+ # Enqueues as many files and directories from the stack as possible
235
+ # (see #requests).
236
+ def process_next_entry
237
+ while stack.any? && requests > @active
238
+ entry = stack.shift
239
+ @active += 1
240
+
241
+ if entry.directory
242
+ update_progress(:mkdir, entry.local)
243
+ ::Dir.mkdir(entry.local) unless ::File.directory?(entry.local)
244
+ request = sftp.opendir(entry.remote, &method(:on_opendir))
245
+ request[:entry] = entry
246
+ else
247
+ open_file(entry)
248
+ end
249
+ end
250
+
251
+ update_progress(:finish) if !active?
252
+ end
253
+
254
+ # Called when a remote directory is "opened" for reading, e.g. to
255
+ # enumerate its contents. Starts an readdir operation if the opendir
256
+ # operation was successful.
257
+ def on_opendir(response)
258
+ entry = response.request[:entry]
259
+ raise StatusException.new(response, "opendir #{entry.remote}") unless response.ok?
260
+ entry.handle = response[:handle]
261
+ request = sftp.readdir(response[:handle], &method(:on_readdir))
262
+ request[:parent] = entry
263
+ end
264
+
265
+ # Called when the next batch of items is read from a directory on the
266
+ # remote server. If any items were read, they are added to the queue
267
+ # and #process_next_entry is called.
268
+ def on_readdir(response)
269
+ entry = response.request[:parent]
270
+ if response.eof?
271
+ request = sftp.close(entry.handle, &method(:on_closedir))
272
+ request[:parent] = entry
273
+ elsif !response.ok?
274
+ raise StatusException.new(response, "readdir #{entry.remote}")
275
+ else
276
+ response[:names].each do |item|
277
+ next if item.name == "." || item.name == ".."
278
+ stack << Entry.new(::File.join(entry.remote, item.name), ::File.join(entry.local, item.name), item.directory?, item.attributes.size)
279
+ end
280
+
281
+ # take this opportunity to enqueue more requests
282
+ process_next_entry
283
+
284
+ request = sftp.readdir(entry.handle, &method(:on_readdir))
285
+ request[:parent] = entry
286
+ end
287
+ end
288
+
289
+ # Called when a file is to be opened for reading from the remote server.
290
+ def open_file(entry)
291
+ update_progress(:open, entry)
292
+ request = sftp.open(entry.remote, &method(:on_open))
293
+ request[:entry] = entry
294
+ end
295
+
296
+ # Called when a directory handle is closed.
297
+ def on_closedir(response)
298
+ @active -= 1
299
+ entry = response.request[:parent]
300
+ raise StatusException.new(response, "close #{entry.remote}") unless response.ok?
301
+ process_next_entry
302
+ end
303
+
304
+ # Called when a file has been opened. This will call #download_next_chunk
305
+ # to initiate the data transfer.
306
+ def on_open(response)
307
+ entry = response.request[:entry]
308
+ raise StatusException.new(response, "open #{entry.remote}") unless response.ok?
309
+
310
+ entry.handle = response[:handle]
311
+ entry.sink = entry.local.respond_to?(:write) ? entry.local : ::File.open(entry.local, "wb")
312
+ entry.offset = 0
313
+
314
+ download_next_chunk(entry)
315
+ end
316
+
317
+ # Initiates a read of the next #read_size bytes from the file.
318
+ def download_next_chunk(entry)
319
+ request = sftp.read(entry.handle, entry.offset, read_size, &method(:on_read))
320
+ request[:entry] = entry
321
+ request[:offset] = entry.offset
322
+ end
323
+
324
+ # Called when a read from a file finishes. If the read was successful
325
+ # and returned data, this will call #download_next_chunk to read the
326
+ # next bit from the file. Otherwise the file will be closed.
327
+ def on_read(response)
328
+ entry = response.request[:entry]
329
+
330
+ if response.eof?
331
+ update_progress(:close, entry)
332
+ entry.sink.close
333
+ request = sftp.close(entry.handle, &method(:on_close))
334
+ request[:entry] = entry
335
+ elsif !response.ok?
336
+ raise StatusException.new(response, "read #{entry.remote}")
337
+ else
338
+ entry.offset += response[:data].bytesize
339
+ update_progress(:get, entry, response.request[:offset], response[:data])
340
+ entry.sink.write(response[:data])
341
+ download_next_chunk(entry)
342
+ end
343
+ end
344
+
345
+ # Called when a file handle is closed.
346
+ def on_close(response)
347
+ @active -= 1
348
+ entry = response.request[:entry]
349
+ raise StatusException.new(response, "close #{entry.remote}") unless response.ok?
350
+ process_next_entry
351
+ end
352
+
353
+ # If a progress callback or object has been set, this will report
354
+ # the progress to that callback or object.
355
+ def update_progress(hook, *args)
356
+ on = "on_#{hook}"
357
+ if progress.respond_to?(on)
358
+ progress.send(on, self, *args)
359
+ elsif progress.respond_to?(:call)
360
+ progress.call(hook, self, *args)
361
+ end
362
+ end
363
+ end
364
+
365
+ end; end; end
@@ -0,0 +1,198 @@
1
+ require 'net/ssh/loggable'
2
+
3
+ module Net; module SFTP; module Operations
4
+
5
+ # A wrapper around an SFTP file handle, that exposes an IO-like interface
6
+ # for interacting with the remote file. All operations are synchronous
7
+ # (blocking), making this a very convenient way to deal with remote files.
8
+ #
9
+ # A wrapper is usually created via the Net::SFTP::Session#file factory:
10
+ #
11
+ # file = sftp.file.open("/path/to/remote")
12
+ # puts file.gets
13
+ # file.close
14
+ class File
15
+ # A reference to the Net::SFTP::Session instance that drives this wrapper
16
+ attr_reader :sftp
17
+
18
+ # The SFTP file handle object that this object wraps
19
+ attr_reader :handle
20
+
21
+ # The current position within the remote file
22
+ attr_reader :pos
23
+
24
+ # Creates a new wrapper that encapsulates the given +handle+ (such as
25
+ # would be returned by Net::SFTP::Session#open!). The +sftp+ parameter
26
+ # must be the same Net::SFTP::Session instance that opened the file.
27
+ def initialize(sftp, handle)
28
+ @sftp = sftp
29
+ @handle = handle
30
+ @pos = 0
31
+ @real_pos = 0
32
+ @real_eof = false
33
+ @buffer = ""
34
+ end
35
+
36
+ # Repositions the file pointer to the given offset (relative to the
37
+ # start of the file). This will also reset the EOF flag.
38
+ def pos=(offset)
39
+ @real_pos = @pos = offset
40
+ @buffer = ""
41
+ @real_eof = false
42
+ end
43
+
44
+ # Closes the underlying file and sets the handle to +nil+. Subsequent
45
+ # operations on this object will fail.
46
+ def close
47
+ sftp.close!(handle)
48
+ @handle = nil
49
+ end
50
+
51
+ # Returns true if the end of the file has been encountered by a previous
52
+ # read. Setting the current file position via #pos= will reset this
53
+ # flag (useful if the file's contents have changed since the EOF was
54
+ # encountered).
55
+ def eof?
56
+ @real_eof && @buffer.empty?
57
+ end
58
+
59
+ # Reads up to +n+ bytes of data from the stream. Fewer bytes will be
60
+ # returned if EOF is encountered before the requested number of bytes
61
+ # could be read. Without an argument (or with a nil argument) all data
62
+ # to the end of the file will be read and returned.
63
+ #
64
+ # This will advance the file pointer (#pos).
65
+ def read(n=nil)
66
+ loop do
67
+ break if n && @buffer.length >= n
68
+ break unless fill
69
+ end
70
+
71
+ if n
72
+ result, @buffer = @buffer[0,n], (@buffer[n..-1] || "")
73
+ else
74
+ result, @buffer = @buffer, ""
75
+ end
76
+
77
+ @pos += result.length
78
+ return result
79
+ end
80
+
81
+ # Reads up to the next instance of +sep_string+ in the stream, and
82
+ # returns the bytes read (including +sep_string+). If +sep_string+ is
83
+ # omitted, it defaults to +$/+. If EOF is encountered before any data
84
+ # could be read, #gets will return +nil+. If the first argument is an
85
+ # integer, or optional second argument is given, the returning string
86
+ # would not be longer than the given value in bytes.
87
+ def gets(sep_or_limit=$/, limit=Float::INFINITY)
88
+ if sep_or_limit.is_a? Integer
89
+ sep_string = $/
90
+ lim = sep_or_limit
91
+ else
92
+ sep_string = sep_or_limit
93
+ lim = limit
94
+ end
95
+
96
+ delim = if sep_string && sep_string.length == 0
97
+ "#{$/}#{$/}"
98
+ else
99
+ sep_string
100
+ end
101
+
102
+ loop do
103
+ at = @buffer.index(delim) if delim
104
+ if at
105
+ offset = [at + delim.length, lim].min
106
+ @pos += offset
107
+ line, @buffer = @buffer[0,offset], @buffer[offset..-1]
108
+ return line
109
+ elsif lim < @buffer.length
110
+ @pos += lim
111
+ line, @buffer = @buffer[0,lim], @buffer[lim..-1]
112
+ return line
113
+ elsif !fill
114
+ return nil if @buffer.empty?
115
+ @pos += @buffer.length
116
+ line, @buffer = @buffer, ""
117
+ return line
118
+ end
119
+ end
120
+ end
121
+
122
+ # Same as #gets, but raises EOFError if EOF is encountered before any
123
+ # data could be read.
124
+ def readline(sep_or_limit=$/, limit=Float::INFINITY)
125
+ line = gets(sep_or_limit, limit)
126
+ raise EOFError if line.nil?
127
+ return line
128
+ end
129
+
130
+ # Writes the given data to the stream, incrementing the file position and
131
+ # returning the number of bytes written.
132
+ def write(data)
133
+ data = data.to_s
134
+ sftp.write!(handle, @real_pos, data)
135
+ @real_pos += data.bytes.length
136
+ @pos = @real_pos
137
+ data.bytes.length
138
+ end
139
+
140
+ # Writes each argument to the stream. If +$\+ is set, it will be written
141
+ # after all arguments have been written.
142
+ def print(*items)
143
+ items.each { |item| write(item) }
144
+ write($\) if $\
145
+ nil
146
+ end
147
+
148
+ def size
149
+ stat.size
150
+ end
151
+
152
+ # Resets position to beginning of file
153
+ def rewind
154
+ self.pos = 0
155
+ end
156
+
157
+ # Writes each argument to the stream, appending a newline to any item
158
+ # that does not already end in a newline. Array arguments are flattened.
159
+ def puts(*items)
160
+ items.each do |item|
161
+ if Array === item
162
+ puts(*item)
163
+ else
164
+ write(item)
165
+ write("\n") unless item[-1] == ?\n
166
+ end
167
+ end
168
+ nil
169
+ end
170
+
171
+ # Performs an fstat operation on the handle and returns the attribute
172
+ # object (Net::SFTP::Protocol::V01::Attributes, Net::SFTP::Protool::V04::Attributes,
173
+ # or Net::SFTP::Protocol::V06::Attributes, depending on the SFTP protocol
174
+ # version in use).
175
+ def stat
176
+ sftp.fstat!(handle)
177
+ end
178
+
179
+ private
180
+
181
+ # Fills the buffer. Returns +true+ if it succeeded, and +false+ if
182
+ # EOF was encountered before any data was read.
183
+ def fill
184
+ data = sftp.read!(handle, @real_pos, 8192)
185
+
186
+ if data.nil?
187
+ @real_eof = true
188
+ return false
189
+ else
190
+ @real_pos += data.length
191
+ @buffer << data
192
+ end
193
+
194
+ !@real_eof
195
+ end
196
+ end
197
+
198
+ end; end; end