s3_cmd_bin 0.0.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (74) hide show
  1. data/.gitignore +17 -0
  2. data/Gemfile +3 -0
  3. data/LICENSE.txt +22 -0
  4. data/README.md +28 -0
  5. data/Rakefile +1 -0
  6. data/lib/s3_cmd_bin/version.rb +3 -0
  7. data/lib/s3_cmd_bin.rb +15 -0
  8. data/resources/ChangeLog +1462 -0
  9. data/resources/INSTALL +97 -0
  10. data/resources/LICENSE +339 -0
  11. data/resources/MANIFEST.in +2 -0
  12. data/resources/Makefile +4 -0
  13. data/resources/NEWS +234 -0
  14. data/resources/README +342 -0
  15. data/resources/S3/ACL.py +224 -0
  16. data/resources/S3/ACL.pyc +0 -0
  17. data/resources/S3/AccessLog.py +92 -0
  18. data/resources/S3/AccessLog.pyc +0 -0
  19. data/resources/S3/BidirMap.py +42 -0
  20. data/resources/S3/BidirMap.pyc +0 -0
  21. data/resources/S3/CloudFront.py +773 -0
  22. data/resources/S3/CloudFront.pyc +0 -0
  23. data/resources/S3/Config.py +294 -0
  24. data/resources/S3/Config.pyc +0 -0
  25. data/resources/S3/ConnMan.py +71 -0
  26. data/resources/S3/ConnMan.pyc +0 -0
  27. data/resources/S3/Exceptions.py +88 -0
  28. data/resources/S3/Exceptions.pyc +0 -0
  29. data/resources/S3/FileDict.py +53 -0
  30. data/resources/S3/FileDict.pyc +0 -0
  31. data/resources/S3/FileLists.py +517 -0
  32. data/resources/S3/FileLists.pyc +0 -0
  33. data/resources/S3/HashCache.py +53 -0
  34. data/resources/S3/HashCache.pyc +0 -0
  35. data/resources/S3/MultiPart.py +137 -0
  36. data/resources/S3/MultiPart.pyc +0 -0
  37. data/resources/S3/PkgInfo.py +14 -0
  38. data/resources/S3/PkgInfo.pyc +0 -0
  39. data/resources/S3/Progress.py +173 -0
  40. data/resources/S3/Progress.pyc +0 -0
  41. data/resources/S3/S3.py +979 -0
  42. data/resources/S3/S3.pyc +0 -0
  43. data/resources/S3/S3Uri.py +223 -0
  44. data/resources/S3/S3Uri.pyc +0 -0
  45. data/resources/S3/SimpleDB.py +178 -0
  46. data/resources/S3/SortedDict.py +66 -0
  47. data/resources/S3/SortedDict.pyc +0 -0
  48. data/resources/S3/Utils.py +462 -0
  49. data/resources/S3/Utils.pyc +0 -0
  50. data/resources/S3/__init__.py +0 -0
  51. data/resources/S3/__init__.pyc +0 -0
  52. data/resources/TODO +52 -0
  53. data/resources/artwork/AtomicClockRadio.ttf +0 -0
  54. data/resources/artwork/TypeRa.ttf +0 -0
  55. data/resources/artwork/site-top-full-size.xcf +0 -0
  56. data/resources/artwork/site-top-label-download.png +0 -0
  57. data/resources/artwork/site-top-label-s3cmd.png +0 -0
  58. data/resources/artwork/site-top-label-s3sync.png +0 -0
  59. data/resources/artwork/site-top-s3tools-logo.png +0 -0
  60. data/resources/artwork/site-top.jpg +0 -0
  61. data/resources/artwork/site-top.png +0 -0
  62. data/resources/artwork/site-top.xcf +0 -0
  63. data/resources/format-manpage.pl +196 -0
  64. data/resources/magic +63 -0
  65. data/resources/run-tests.py +537 -0
  66. data/resources/s3cmd +2116 -0
  67. data/resources/s3cmd.1 +435 -0
  68. data/resources/s3db +55 -0
  69. data/resources/setup.cfg +2 -0
  70. data/resources/setup.py +80 -0
  71. data/resources/testsuite.tar.gz +0 -0
  72. data/resources/upload-to-sf.sh +7 -0
  73. data/s3_cmd_bin.gemspec +23 -0
  74. metadata +152 -0
data/resources/README ADDED
@@ -0,0 +1,342 @@
1
+ S3cmd tool for Amazon Simple Storage Service (S3)
2
+ =================================================
3
+
4
+ Author:
5
+ Michal Ludvig <michal@logix.cz>
6
+
7
+ S3tools / S3cmd project homepage:
8
+ http://s3tools.org
9
+
10
+ S3tools / S3cmd mailing lists:
11
+ * Announcements of new releases:
12
+ s3tools-announce@lists.sourceforge.net
13
+
14
+ * General questions and discussion about usage
15
+ s3tools-general@lists.sourceforge.net
16
+
17
+ * Bug reports
18
+ s3tools-bugs@lists.sourceforge.net
19
+
20
+ Amazon S3 homepage:
21
+ http://aws.amazon.com/s3
22
+
23
+ !!!
24
+ !!! Please consult INSTALL file for installation instructions!
25
+ !!!
26
+
27
+ What is Amazon S3
28
+ -----------------
29
+ Amazon S3 provides a managed internet-accessible storage
30
+ service where anyone can store any amount of data and
31
+ retrieve it later again. Maximum amount of data in one
32
+ "object" is 5GB, maximum number of objects is not limited.
33
+
34
+ S3 is a paid service operated by the well known Amazon.com
35
+ internet book shop. Before storing anything into S3 you
36
+ must sign up for an "AWS" account (where AWS = Amazon Web
37
+ Services) to obtain a pair of identifiers: Access Key and
38
+ Secret Key. You will need to give these keys to S3cmd.
39
+ Think of them as if they were a username and password for
40
+ your S3 account.
41
+
42
+ Pricing explained
43
+ -----------------
44
+ At the time of this writing the costs of using S3 are (in USD):
45
+
46
+ $0.15 per GB per month of storage space used
47
+
48
+ plus
49
+
50
+ $0.10 per GB - all data uploaded
51
+
52
+ plus
53
+
54
+ $0.18 per GB - first 10 TB / month data downloaded
55
+ $0.16 per GB - next 40 TB / month data downloaded
56
+ $0.13 per GB - data downloaded / month over 50 TB
57
+
58
+ plus
59
+
60
+ $0.01 per 1,000 PUT or LIST requests
61
+ $0.01 per 10,000 GET and all other requests
62
+
63
+ If for instance on 1st of January you upload 2GB of
64
+ photos in JPEG from your holiday in New Zealand, at the
65
+ end of January you will be charged $0.30 for using 2GB of
66
+ storage space for a month, $0.20 for uploading 2GB
67
+ of data, and a few cents for requests.
68
+ That comes to slightly over $0.50 for a complete backup
69
+ of your precious holiday pictures.
70
+
71
+ In February you don't touch it. Your data are still on S3
72
+ servers so you pay $0.30 for those two gigabytes, but not
73
+ a single cent will be charged for any transfer. That comes
74
+ to $0.30 as an ongoing cost of your backup. Not too bad.
75
+
76
+ In March you allow anonymous read access to some of your
77
+ pictures and your friends download, say, 500MB of them.
78
+ As the files are owned by you, you are responsible for the
79
+ costs incurred. That means at the end of March you'll be
80
+ charged $0.30 for storage plus $0.09 for the download traffic
81
+ generated by your friends.
82
+
83
+ There is no minimum monthly contract or a setup fee. What
84
+ you use is what you pay for. At the beginning my bill used
85
+ to be like US$0.03 or even nil.
86
+
87
+ That's the pricing model of Amazon S3 in a nutshell. Check
88
+ Amazon S3 homepage at http://aws.amazon.com/s3 for more
89
+ details.
90
+
91
+ Needless to say that all these money are charged by Amazon
92
+ itself, there is obviously no payment for using S3cmd :-)
93
+
94
+ Amazon S3 basics
95
+ ----------------
96
+ Files stored in S3 are called "objects" and their names are
97
+ officially called "keys". Since this is sometimes confusing
98
+ for the users we often refer to the objects as "files" or
99
+ "remote files". Each object belongs to exactly one "bucket".
100
+
101
+ To describe objects in S3 storage we invented a URI-like
102
+ schema in the following form:
103
+
104
+ s3://BUCKET
105
+ or
106
+ s3://BUCKET/OBJECT
107
+
108
+ Buckets
109
+ -------
110
+ Buckets are sort of like directories or folders with some
111
+ restrictions:
112
+ 1) each user can only have 100 buckets at the most,
113
+ 2) bucket names must be unique amongst all users of S3,
114
+ 3) buckets can not be nested into a deeper hierarchy and
115
+ 4) a name of a bucket can only consist of basic alphanumeric
116
+ characters plus dot (.) and dash (-). No spaces, no accented
117
+ or UTF-8 letters, etc.
118
+
119
+ It is a good idea to use DNS-compatible bucket names. That
120
+ for instance means you should not use upper case characters.
121
+ While DNS compliance is not strictly required some features
122
+ described below are not available for DNS-incompatible named
123
+ buckets. One more step further is using a fully qualified
124
+ domain name (FQDN) for a bucket - that has even more benefits.
125
+
126
+ * For example "s3://--My-Bucket--" is not DNS compatible.
127
+ * On the other hand "s3://my-bucket" is DNS compatible but
128
+ is not FQDN.
129
+ * Finally "s3://my-bucket.s3tools.org" is DNS compatible
130
+ and FQDN provided you own the s3tools.org domain and can
131
+ create the domain record for "my-bucket.s3tools.org".
132
+
133
+ Look for "Virtual Hosts" later in this text for more details
134
+ regarding FQDN named buckets.
135
+
136
+ Objects (files stored in Amazon S3)
137
+ -----------------------------------
138
+ Unlike for buckets there are almost no restrictions on object
139
+ names. These can be any UTF-8 strings of up to 1024 bytes long.
140
+ Interestingly enough the object name can contain forward
141
+ slash character (/) thus a "my/funny/picture.jpg" is a valid
142
+ object name. Note that there are not directories nor
143
+ buckets called "my" and "funny" - it is really a single object
144
+ name called "my/funny/picture.jpg" and S3 does not care at
145
+ all that it _looks_ like a directory structure.
146
+
147
+ The full URI of such an image could be, for example:
148
+
149
+ s3://my-bucket/my/funny/picture.jpg
150
+
151
+ Public vs Private files
152
+ -----------------------
153
+ The files stored in S3 can be either Private or Public. The
154
+ Private ones are readable only by the user who uploaded them
155
+ while the Public ones can be read by anyone. Additionally the
156
+ Public files can be accessed using HTTP protocol, not only
157
+ using s3cmd or a similar tool.
158
+
159
+ The ACL (Access Control List) of a file can be set at the
160
+ time of upload using --acl-public or --acl-private options
161
+ with 's3cmd put' or 's3cmd sync' commands (see below).
162
+
163
+ Alternatively the ACL can be altered for existing remote files
164
+ with 's3cmd setacl --acl-public' (or --acl-private) command.
165
+
166
+ Simple s3cmd HowTo
167
+ ------------------
168
+ 1) Register for Amazon AWS / S3
169
+ Go to http://aws.amazon.com/s3, click the "Sign up
170
+ for web service" button in the right column and work
171
+ through the registration. You will have to supply
172
+ your Credit Card details in order to allow Amazon
173
+ charge you for S3 usage.
174
+ At the end you should have your Access and Secret Keys
175
+
176
+ 2) Run "s3cmd --configure"
177
+ You will be asked for the two keys - copy and paste
178
+ them from your confirmation email or from your Amazon
179
+ account page. Be careful when copying them! They are
180
+ case sensitive and must be entered accurately or you'll
181
+ keep getting errors about invalid signatures or similar.
182
+
183
+ 3) Run "s3cmd ls" to list all your buckets.
184
+ As you just started using S3 there are no buckets owned by
185
+ you as of now. So the output will be empty.
186
+
187
+ 4) Make a bucket with "s3cmd mb s3://my-new-bucket-name"
188
+ As mentioned above the bucket names must be unique amongst
189
+ _all_ users of S3. That means the simple names like "test"
190
+ or "asdf" are already taken and you must make up something
191
+ more original. To demonstrate as many features as possible
192
+ let's create a FQDN-named bucket s3://public.s3tools.org:
193
+
194
+ ~$ s3cmd mb s3://public.s3tools.org
195
+ Bucket 's3://public.s3tools.org' created
196
+
197
+ 5) List your buckets again with "s3cmd ls"
198
+ Now you should see your freshly created bucket
199
+
200
+ ~$ s3cmd ls
201
+ 2009-01-28 12:34 s3://public.s3tools.org
202
+
203
+ 6) List the contents of the bucket
204
+
205
+ ~$ s3cmd ls s3://public.s3tools.org
206
+ ~$
207
+
208
+ It's empty, indeed.
209
+
210
+ 7) Upload a single file into the bucket:
211
+
212
+ ~$ s3cmd put some-file.xml s3://public.s3tools.org/somefile.xml
213
+ some-file.xml -> s3://public.s3tools.org/somefile.xml [1 of 1]
214
+ 123456 of 123456 100% in 2s 51.75 kB/s done
215
+
216
+ Upload a two directory tree into the bucket's virtual 'directory':
217
+
218
+ ~$ s3cmd put --recursive dir1 dir2 s3://public.s3tools.org/somewhere/
219
+ File 'dir1/file1-1.txt' stored as 's3://public.s3tools.org/somewhere/dir1/file1-1.txt' [1 of 5]
220
+ File 'dir1/file1-2.txt' stored as 's3://public.s3tools.org/somewhere/dir1/file1-2.txt' [2 of 5]
221
+ File 'dir1/file1-3.log' stored as 's3://public.s3tools.org/somewhere/dir1/file1-3.log' [3 of 5]
222
+ File 'dir2/file2-1.bin' stored as 's3://public.s3tools.org/somewhere/dir2/file2-1.bin' [4 of 5]
223
+ File 'dir2/file2-2.txt' stored as 's3://public.s3tools.org/somewhere/dir2/file2-2.txt' [5 of 5]
224
+
225
+ As you can see we didn't have to create the /somewhere
226
+ 'directory'. In fact it's only a filename prefix, not
227
+ a real directory and it doesn't have to be created in
228
+ any way beforehand.
229
+
230
+ 8) Now list the bucket contents again:
231
+
232
+ ~$ s3cmd ls s3://public.s3tools.org
233
+ DIR s3://public.s3tools.org/somewhere/
234
+ 2009-02-10 05:10 123456 s3://public.s3tools.org/somefile.xml
235
+
236
+ Use --recursive (or -r) to list all the remote files:
237
+
238
+ ~$ s3cmd ls s3://public.s3tools.org
239
+ 2009-02-10 05:10 123456 s3://public.s3tools.org/somefile.xml
240
+ 2009-02-10 05:13 18 s3://public.s3tools.org/somewhere/dir1/file1-1.txt
241
+ 2009-02-10 05:13 8 s3://public.s3tools.org/somewhere/dir1/file1-2.txt
242
+ 2009-02-10 05:13 16 s3://public.s3tools.org/somewhere/dir1/file1-3.log
243
+ 2009-02-10 05:13 11 s3://public.s3tools.org/somewhere/dir2/file2-1.bin
244
+ 2009-02-10 05:13 8 s3://public.s3tools.org/somewhere/dir2/file2-2.txt
245
+
246
+ 9) Retrieve one of the files back and verify that it hasn't been
247
+ corrupted:
248
+
249
+ ~$ s3cmd get s3://public.s3tools.org/somefile.xml some-file-2.xml
250
+ s3://public.s3tools.org/somefile.xml -> some-file-2.xml [1 of 1]
251
+ 123456 of 123456 100% in 3s 35.75 kB/s done
252
+
253
+ ~$ md5sum some-file.xml some-file-2.xml
254
+ 39bcb6992e461b269b95b3bda303addf some-file.xml
255
+ 39bcb6992e461b269b95b3bda303addf some-file-2.xml
256
+
257
+ Checksums of the original file matches the one of the
258
+ retrieved one. Looks like it worked :-)
259
+
260
+ To retrieve a whole 'directory tree' from S3 use recursive get:
261
+
262
+ ~$ s3cmd get --recursive s3://public.s3tools.org/somewhere
263
+ File s3://public.s3tools.org/somewhere/dir1/file1-1.txt saved as './somewhere/dir1/file1-1.txt'
264
+ File s3://public.s3tools.org/somewhere/dir1/file1-2.txt saved as './somewhere/dir1/file1-2.txt'
265
+ File s3://public.s3tools.org/somewhere/dir1/file1-3.log saved as './somewhere/dir1/file1-3.log'
266
+ File s3://public.s3tools.org/somewhere/dir2/file2-1.bin saved as './somewhere/dir2/file2-1.bin'
267
+ File s3://public.s3tools.org/somewhere/dir2/file2-2.txt saved as './somewhere/dir2/file2-2.txt'
268
+
269
+ Since the destination directory wasn't specified s3cmd
270
+ saved the directory structure in a current working
271
+ directory ('.').
272
+
273
+ There is an important difference between:
274
+ get s3://public.s3tools.org/somewhere
275
+ and
276
+ get s3://public.s3tools.org/somewhere/
277
+ (note the trailing slash)
278
+ S3cmd always uses the last path part, ie the word
279
+ after the last slash, for naming files.
280
+
281
+ In the case of s3://.../somewhere the last path part
282
+ is 'somewhere' and therefore the recursive get names
283
+ the local files as somewhere/dir1, somewhere/dir2, etc.
284
+
285
+ On the other hand in s3://.../somewhere/ the last path
286
+ part is empty and s3cmd will only create 'dir1' and 'dir2'
287
+ without the 'somewhere/' prefix:
288
+
289
+ ~$ s3cmd get --recursive s3://public.s3tools.org/somewhere /tmp
290
+ File s3://public.s3tools.org/somewhere/dir1/file1-1.txt saved as '/tmp/dir1/file1-1.txt'
291
+ File s3://public.s3tools.org/somewhere/dir1/file1-2.txt saved as '/tmp/dir1/file1-2.txt'
292
+ File s3://public.s3tools.org/somewhere/dir1/file1-3.log saved as '/tmp/dir1/file1-3.log'
293
+ File s3://public.s3tools.org/somewhere/dir2/file2-1.bin saved as '/tmp/dir2/file2-1.bin'
294
+
295
+ See? It's /tmp/dir1 and not /tmp/somewhere/dir1 as it
296
+ was in the previous example.
297
+
298
+ 10) Clean up - delete the remote files and remove the bucket:
299
+
300
+ Remove everything under s3://public.s3tools.org/somewhere/
301
+
302
+ ~$ s3cmd del --recursive s3://public.s3tools.org/somewhere/
303
+ File s3://public.s3tools.org/somewhere/dir1/file1-1.txt deleted
304
+ File s3://public.s3tools.org/somewhere/dir1/file1-2.txt deleted
305
+ ...
306
+
307
+ Now try to remove the bucket:
308
+
309
+ ~$ s3cmd rb s3://public.s3tools.org
310
+ ERROR: S3 error: 409 (BucketNotEmpty): The bucket you tried to delete is not empty
311
+
312
+ Ouch, we forgot about s3://public.s3tools.org/somefile.xml
313
+ We can force the bucket removal anyway:
314
+
315
+ ~$ s3cmd rb --force s3://public.s3tools.org/
316
+ WARNING: Bucket is not empty. Removing all the objects from it first. This may take some time...
317
+ File s3://public.s3tools.org/somefile.xml deleted
318
+ Bucket 's3://public.s3tools.org/' removed
319
+
320
+ Hints
321
+ -----
322
+ The basic usage is as simple as described in the previous
323
+ section.
324
+
325
+ You can increase the level of verbosity with -v option and
326
+ if you're really keen to know what the program does under
327
+ its bonet run it with -d to see all 'debugging' output.
328
+
329
+ After configuring it with --configure all available options
330
+ are spitted into your ~/.s3cfg file. It's a text file ready
331
+ to be modified in your favourite text editor.
332
+
333
+ For more information refer to:
334
+ * S3cmd / S3tools homepage at http://s3tools.org
335
+ * Amazon S3 homepage at http://aws.amazon.com/s3
336
+
337
+ Enjoy!
338
+
339
+ Michal Ludvig
340
+ * michal@logix.cz
341
+ * http://www.logix.cz/michal
342
+
@@ -0,0 +1,224 @@
1
+ ## Amazon S3 - Access Control List representation
2
+ ## Author: Michal Ludvig <michal@logix.cz>
3
+ ## http://www.logix.cz/michal
4
+ ## License: GPL Version 2
5
+
6
+ from Utils import getTreeFromXml
7
+
8
+ try:
9
+ import xml.etree.ElementTree as ET
10
+ except ImportError:
11
+ import elementtree.ElementTree as ET
12
+
13
+ class Grantee(object):
14
+ ALL_USERS_URI = "http://acs.amazonaws.com/groups/global/AllUsers"
15
+ LOG_DELIVERY_URI = "http://acs.amazonaws.com/groups/s3/LogDelivery"
16
+
17
+ def __init__(self):
18
+ self.xsi_type = None
19
+ self.tag = None
20
+ self.name = None
21
+ self.display_name = None
22
+ self.permission = None
23
+
24
+ def __repr__(self):
25
+ return 'Grantee("%(tag)s", "%(name)s", "%(permission)s")' % {
26
+ "tag" : self.tag,
27
+ "name" : self.name,
28
+ "permission" : self.permission
29
+ }
30
+
31
+ def isAllUsers(self):
32
+ return self.tag == "URI" and self.name == Grantee.ALL_USERS_URI
33
+
34
+ def isAnonRead(self):
35
+ return self.isAllUsers() and (self.permission == "READ" or self.permission == "FULL_CONTROL")
36
+
37
+ def getElement(self):
38
+ el = ET.Element("Grant")
39
+ grantee = ET.SubElement(el, "Grantee", {
40
+ 'xmlns:xsi' : 'http://www.w3.org/2001/XMLSchema-instance',
41
+ 'xsi:type' : self.xsi_type
42
+ })
43
+ name = ET.SubElement(grantee, self.tag)
44
+ name.text = self.name
45
+ permission = ET.SubElement(el, "Permission")
46
+ permission.text = self.permission
47
+ return el
48
+
49
+ class GranteeAnonRead(Grantee):
50
+ def __init__(self):
51
+ Grantee.__init__(self)
52
+ self.xsi_type = "Group"
53
+ self.tag = "URI"
54
+ self.name = Grantee.ALL_USERS_URI
55
+ self.permission = "READ"
56
+
57
+ class GranteeLogDelivery(Grantee):
58
+ def __init__(self, permission):
59
+ """
60
+ permission must be either READ_ACP or WRITE
61
+ """
62
+ Grantee.__init__(self)
63
+ self.xsi_type = "Group"
64
+ self.tag = "URI"
65
+ self.name = Grantee.LOG_DELIVERY_URI
66
+ self.permission = permission
67
+
68
+ class ACL(object):
69
+ EMPTY_ACL = "<AccessControlPolicy><Owner><ID></ID></Owner><AccessControlList></AccessControlList></AccessControlPolicy>"
70
+
71
+ def __init__(self, xml = None):
72
+ if not xml:
73
+ xml = ACL.EMPTY_ACL
74
+
75
+ self.grantees = []
76
+ self.owner_id = ""
77
+ self.owner_nick = ""
78
+
79
+ tree = getTreeFromXml(xml)
80
+ self.parseOwner(tree)
81
+ self.parseGrants(tree)
82
+
83
+ def parseOwner(self, tree):
84
+ self.owner_id = tree.findtext(".//Owner//ID")
85
+ self.owner_nick = tree.findtext(".//Owner//DisplayName")
86
+
87
+ def parseGrants(self, tree):
88
+ for grant in tree.findall(".//Grant"):
89
+ grantee = Grantee()
90
+ g = grant.find(".//Grantee")
91
+ grantee.xsi_type = g.attrib['{http://www.w3.org/2001/XMLSchema-instance}type']
92
+ grantee.permission = grant.find('Permission').text
93
+ for el in g:
94
+ if el.tag == "DisplayName":
95
+ grantee.display_name = el.text
96
+ else:
97
+ grantee.tag = el.tag
98
+ grantee.name = el.text
99
+ self.grantees.append(grantee)
100
+
101
+ def getGrantList(self):
102
+ acl = []
103
+ for grantee in self.grantees:
104
+ if grantee.display_name:
105
+ user = grantee.display_name
106
+ elif grantee.isAllUsers():
107
+ user = "*anon*"
108
+ else:
109
+ user = grantee.name
110
+ acl.append({'grantee': user, 'permission': grantee.permission})
111
+ return acl
112
+
113
+ def getOwner(self):
114
+ return { 'id' : self.owner_id, 'nick' : self.owner_nick }
115
+
116
+ def isAnonRead(self):
117
+ for grantee in self.grantees:
118
+ if grantee.isAnonRead():
119
+ return True
120
+ return False
121
+
122
+ def grantAnonRead(self):
123
+ if not self.isAnonRead():
124
+ self.appendGrantee(GranteeAnonRead())
125
+
126
+ def revokeAnonRead(self):
127
+ self.grantees = [g for g in self.grantees if not g.isAnonRead()]
128
+
129
+ def appendGrantee(self, grantee):
130
+ self.grantees.append(grantee)
131
+
132
+ def hasGrant(self, name, permission):
133
+ name = name.lower()
134
+ permission = permission.upper()
135
+
136
+ for grantee in self.grantees:
137
+ if grantee.name.lower() == name:
138
+ if grantee.permission == "FULL_CONTROL":
139
+ return True
140
+ elif grantee.permission.upper() == permission:
141
+ return True
142
+
143
+ return False;
144
+
145
+ def grant(self, name, permission):
146
+ if self.hasGrant(name, permission):
147
+ return
148
+
149
+ name = name.lower()
150
+ permission = permission.upper()
151
+
152
+ if "ALL" == permission:
153
+ permission = "FULL_CONTROL"
154
+
155
+ if "FULL_CONTROL" == permission:
156
+ self.revoke(name, "ALL")
157
+
158
+ grantee = Grantee()
159
+ grantee.name = name
160
+ grantee.permission = permission
161
+
162
+ if name.find('@') <= -1: # ultra lame attempt to differenciate emails id from canonical ids
163
+ grantee.xsi_type = "CanonicalUser"
164
+ grantee.tag = "ID"
165
+ else:
166
+ grantee.xsi_type = "AmazonCustomerByEmail"
167
+ grantee.tag = "EmailAddress"
168
+
169
+ self.appendGrantee(grantee)
170
+
171
+
172
+ def revoke(self, name, permission):
173
+ name = name.lower()
174
+ permission = permission.upper()
175
+
176
+ if "ALL" == permission:
177
+ self.grantees = [g for g in self.grantees if not g.name.lower() == name]
178
+ else:
179
+ self.grantees = [g for g in self.grantees if not (g.name.lower() == name and g.permission.upper() == permission)]
180
+
181
+
182
+ def __str__(self):
183
+ tree = getTreeFromXml(ACL.EMPTY_ACL)
184
+ tree.attrib['xmlns'] = "http://s3.amazonaws.com/doc/2006-03-01/"
185
+ owner = tree.find(".//Owner//ID")
186
+ owner.text = self.owner_id
187
+ acl = tree.find(".//AccessControlList")
188
+ for grantee in self.grantees:
189
+ acl.append(grantee.getElement())
190
+ return ET.tostring(tree)
191
+
192
+ if __name__ == "__main__":
193
+ xml = """<?xml version="1.0" encoding="UTF-8"?>
194
+ <AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
195
+ <Owner>
196
+ <ID>12345678901234567890</ID>
197
+ <DisplayName>owner-nickname</DisplayName>
198
+ </Owner>
199
+ <AccessControlList>
200
+ <Grant>
201
+ <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser">
202
+ <ID>12345678901234567890</ID>
203
+ <DisplayName>owner-nickname</DisplayName>
204
+ </Grantee>
205
+ <Permission>FULL_CONTROL</Permission>
206
+ </Grant>
207
+ <Grant>
208
+ <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="Group">
209
+ <URI>http://acs.amazonaws.com/groups/global/AllUsers</URI>
210
+ </Grantee>
211
+ <Permission>READ</Permission>
212
+ </Grant>
213
+ </AccessControlList>
214
+ </AccessControlPolicy>
215
+ """
216
+ acl = ACL(xml)
217
+ print "Grants:", acl.getGrantList()
218
+ acl.revokeAnonRead()
219
+ print "Grants:", acl.getGrantList()
220
+ acl.grantAnonRead()
221
+ print "Grants:", acl.getGrantList()
222
+ print acl
223
+
224
+ # vim:et:ts=4:sts=4:ai
Binary file
@@ -0,0 +1,92 @@
1
+ ## Amazon S3 - Access Control List representation
2
+ ## Author: Michal Ludvig <michal@logix.cz>
3
+ ## http://www.logix.cz/michal
4
+ ## License: GPL Version 2
5
+
6
+ import S3Uri
7
+ from Exceptions import ParameterError
8
+ from Utils import getTreeFromXml
9
+ from ACL import GranteeAnonRead
10
+
11
+ try:
12
+ import xml.etree.ElementTree as ET
13
+ except ImportError:
14
+ import elementtree.ElementTree as ET
15
+
16
+ __all__ = []
17
+ class AccessLog(object):
18
+ LOG_DISABLED = "<BucketLoggingStatus></BucketLoggingStatus>"
19
+ LOG_TEMPLATE = "<LoggingEnabled><TargetBucket></TargetBucket><TargetPrefix></TargetPrefix></LoggingEnabled>"
20
+
21
+ def __init__(self, xml = None):
22
+ if not xml:
23
+ xml = self.LOG_DISABLED
24
+ self.tree = getTreeFromXml(xml)
25
+ self.tree.attrib['xmlns'] = "http://doc.s3.amazonaws.com/2006-03-01"
26
+
27
+ def isLoggingEnabled(self):
28
+ return bool(self.tree.find(".//LoggingEnabled"))
29
+
30
+ def disableLogging(self):
31
+ el = self.tree.find(".//LoggingEnabled")
32
+ if el:
33
+ self.tree.remove(el)
34
+
35
+ def enableLogging(self, target_prefix_uri):
36
+ el = self.tree.find(".//LoggingEnabled")
37
+ if not el:
38
+ el = getTreeFromXml(self.LOG_TEMPLATE)
39
+ self.tree.append(el)
40
+ el.find(".//TargetBucket").text = target_prefix_uri.bucket()
41
+ el.find(".//TargetPrefix").text = target_prefix_uri.object()
42
+
43
+ def targetPrefix(self):
44
+ if self.isLoggingEnabled():
45
+ el = self.tree.find(".//LoggingEnabled")
46
+ target_prefix = "s3://%s/%s" % (
47
+ self.tree.find(".//LoggingEnabled//TargetBucket").text,
48
+ self.tree.find(".//LoggingEnabled//TargetPrefix").text)
49
+ return S3Uri.S3Uri(target_prefix)
50
+ else:
51
+ return ""
52
+
53
+ def setAclPublic(self, acl_public):
54
+ le = self.tree.find(".//LoggingEnabled")
55
+ if not le:
56
+ raise ParameterError("Logging not enabled, can't set default ACL for logs")
57
+ tg = le.find(".//TargetGrants")
58
+ if not acl_public:
59
+ if not tg:
60
+ ## All good, it's not been there
61
+ return
62
+ else:
63
+ le.remove(tg)
64
+ else: # acl_public == True
65
+ anon_read = GranteeAnonRead().getElement()
66
+ if not tg:
67
+ tg = ET.SubElement(le, "TargetGrants")
68
+ ## What if TargetGrants already exists? We should check if
69
+ ## AnonRead is there before appending a new one. Later...
70
+ tg.append(anon_read)
71
+
72
+ def isAclPublic(self):
73
+ raise NotImplementedError()
74
+
75
+ def __str__(self):
76
+ return ET.tostring(self.tree)
77
+ __all__.append("AccessLog")
78
+
79
+ if __name__ == "__main__":
80
+ from S3Uri import S3Uri
81
+ log = AccessLog()
82
+ print log
83
+ log.enableLogging(S3Uri("s3://targetbucket/prefix/log-"))
84
+ print log
85
+ log.setAclPublic(True)
86
+ print log
87
+ log.setAclPublic(False)
88
+ print log
89
+ log.disableLogging()
90
+ print log
91
+
92
+ # vim:et:ts=4:sts=4:ai
Binary file