capun 0.0.30 → 0.0.31

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 4baa7ac853298bbe25092a0711f86178c2be77d6
4
- data.tar.gz: 2a323905899516638368782a88698c7fed381eaf
3
+ metadata.gz: c9cad418ca5b3b3cbbcab859022a574e1f549892
4
+ data.tar.gz: 92424bad80df1c7957fc4085fd196db137bd5bae
5
5
  SHA512:
6
- metadata.gz: bac8bab9b399d3b8f4cf1a8eb68c44c6f0e51637f77ec986791b48cc9c8c649d61fcf86134c265e2283011529428eba982e8b5ec067c19c943daa17f39d92b9b
7
- data.tar.gz: 76fb48ee778f41e39115800837a073518006e46069d6efcb0fb198f2408f9645bce6f6515fde083ec5b7decf9cfbf75b018ef5f4f76c9eac4e504f06ff119b1a
6
+ metadata.gz: ce502e94d0245afc0d2de1a6e41223049b13a67bf6c53d50849cc8dacd5523363eb221dd8c34d8272c5af12ade0ad416dff745cdd825636b9a5ad945bc9e27cc
7
+ data.tar.gz: 25e78ac4a8917753230dedd420dabae1b96e7682a36c9e283d7a17f88b3a2d597e45c00483218a9962ecafc73eb3fb7d4b29c593f2c7d69511efd05fe7786d81
@@ -33,6 +33,10 @@ set :std_uploads, [
33
33
  # unicorn.config.rb
34
34
  {what: "config/deploy/unicorn.config.rb.erb", where: '#{shared_path}/config/unicorn.config.rb', upload: true, overwrite: true},
35
35
  # database.yml
36
+ {what: "config/deploy/backup.sh.erb", where: '#{shared_path}/backup.sh', upload: true, overwrite: true},
37
+ # backup.sh.erb
38
+ {what: "config/deploy/drivesink.py", where: '#{shared_path}/drivesink.py', upload: true, overwrite: true},
39
+ # backup.sh.erb
36
40
  {what: "config/deploy/database.yml.erb", where: '#{shared_path}/config/database.yml', upload: true, overwrite: true},
37
41
  # jenkins' config.xml
38
42
  {what: "config/deploy/jenkins.config.xml.erb", where: '/var/lib/jenkins/jobs/#{fetch(:application)}/config.xml', upload: -> { !!fetch(:addJenkins) }, overwrite: false},
@@ -49,6 +53,16 @@ set :std_symlinks, [
49
53
  {what: "application.yml", where: '#{release_path}/config/application.yml'},
50
54
  {what: "newrelic.yml", where: '#{release_path}/config/newrelic.yml'}
51
55
  ]
56
+ namespace :backup do
57
+ desc 'Backup application'
58
+ task :exec do
59
+ on roles(:app) do
60
+ if fetch(:useBackups)
61
+ execute "sudo /home/#{fetch(:user)}/apps/#{fetch(:application)}/shared/backup.sh"
62
+ end
63
+ end
64
+ end
65
+ end
52
66
 
53
67
  namespace :predeploy do
54
68
  namespace :install do
@@ -67,10 +81,10 @@ before 'deploy', 'predeploy:install:rvm_ruby'
67
81
 
68
82
  namespace :deploy do
69
83
 
70
- desc 'Kills running processes'
84
+ desc 'Kills unicorn processes'
71
85
  task :kill_me do
72
86
  on roles(:app) do
73
- execute "kill -9 $(ps aux | grep #{fetch(:application)} | grep -v grep | awk '{print $2}') || true"
87
+ execute "cd /home/#{fetch(:user)}/apps/#{fetch(:application)}/shared/tmp/pids; for line in $(ls | grep unicorn); do kill -15 $(sudo cat $line) || true ; done;"
74
88
  end
75
89
  end
76
90
  before :deploy, 'deploy:kill_me'
@@ -175,6 +189,18 @@ namespace :deploy do
175
189
  end
176
190
  end
177
191
 
192
+ desc 'Update cron backup task'
193
+ task :update_cron do
194
+ if fetch(:useBackups)
195
+ on roles(:app) do
196
+ execute :chmod, "+x #{shared_path}/backup.sh"
197
+ info "making backup.sh executable"
198
+ execute :sudo, :ln, "-nfs", "#{shared_path}/backup.sh /etc/cron.#{fetch(:backupTime)}/backup-#{fetch(:application).gsub(/\./, '-')}"
199
+ info "Create symbolic link for backup"
200
+ end
201
+ end
202
+ end
203
+
178
204
  end
179
205
 
180
206
  before "deploy:updating", "deploy:make_dirs"
@@ -185,4 +211,5 @@ after "deploy:publishing", "deploy:set_up_jenkins"
185
211
  after "deploy:publishing", "deploy:prepare_logrotate"
186
212
  after "deploy:publishing", "deploy:restart_nginx"
187
213
  after "deploy:publishing", "deploy:restart_logstash"
214
+ after "deploy:publishing", "deploy:update_cron"
188
215
  after "deploy:publishing", "unicorn:legacy_restart"
@@ -1,3 +1,3 @@
1
1
  module Capun
2
- VERSION = "0.0.30"
2
+ VERSION = "0.0.31"
3
3
  end
@@ -22,12 +22,14 @@ module Capun
22
22
  end
23
23
  @addELK = ask("Would you like to add ELK-compatible logging? [Y/n]").capitalize == 'Y'
24
24
  @addlogrotate = ask("Would you like to add logrotate configuration to stage? [Y/n]").capitalize == 'Y'
25
+ @useBackups = ask("Would you like to add amazon backup system? [Y/n]").capitalize == 'Y'
25
26
  end
26
27
 
27
28
  def add_stage
28
29
  template "stage.rb.erb", "config/deploy/#{singular_name}.rb"
29
30
  end
30
31
 
32
+
31
33
  def copy_env_file
32
34
  copy_file Rails.root.join('config', 'environments', 'production.rb'), "config/environments/#{singular_name}.rb"
33
35
  end
@@ -85,6 +87,19 @@ module Capun
85
87
  end
86
88
  end
87
89
 
90
+ def useBackups
91
+ if @useBackups
92
+ append_to_file "config/deploy/#{singular_name}.rb", "#backup_system\n" +
93
+ "set :useBackups, true\n" +
94
+ "set :backupTime, \"daily\" # available hourly, daily, monthly, weekly\n" +
95
+ "set :backupFolders, %w{public/system} #recursive\n" +
96
+ "#set :slack_hook, [hook]\n" +
97
+ "#set :slack_channel, [channel] #must be specified"
98
+ copy_file "backup.sh.erb", "config/deploy/backup.sh.erb"
99
+ copy_file "drivesink.py", "config/deploy/drivesink.py"
100
+ end
101
+ end
102
+
88
103
  def add_jenkins
89
104
  if @addJenkins
90
105
  copy_file "jenkins.config.xml.erb", "config/deploy/jenkins.config.xml.erb"
@@ -105,4 +120,4 @@ module Capun
105
120
  end
106
121
  end
107
122
  end
108
- end
123
+ end
@@ -0,0 +1,35 @@
1
+ #!/bin/bash
2
+
3
+ cd /home/<%= fetch(:user) %>/apps/<%= fetch(:application) %>/shared
4
+ curdate=$(date +"%m-%d-%y")
5
+ mkdir -p "backuping/version-$curdate"
6
+
7
+ zip -u -0 -r -s 1000 "backuping/version-$curdate/assets.zip" <% fetch(:backupFolders).each do |folder|%> <%= folder + ' ' %> <% end %>
8
+
9
+ adapter=$(cat config/database.yml | grep adapter | sed s/adapter://g | xargs)
10
+ if [ "$adapter" = "sqlite3" ]; then
11
+ DATA_BASE_TYPE="SQLite3"
12
+ cp "<%=fetch(:stage)%>.sqlite3" "backuping/version-$curdate/database.sqlite3"
13
+ fi
14
+ if [ "$adapter" = "mysql2" ]; then
15
+ DATA_BASE_TYPE="MySQL"
16
+ username=$(cat config/database.yml | grep username | sed s/username://g | xargs)
17
+ password=$(cat config/database.yml | grep password | sed s/password://g | xargs)
18
+ database=$(cat config/database.yml | grep database | sed s/database://g | xargs)
19
+ mysqldump --user="$username" --password="$password" $database > "backuping/version-$curdate/database.dump"
20
+ fi
21
+
22
+ python drivesink.py upload "backuping" <%= fetch(:backupDestinationFolder) || "backups/firstdedic-server/" + fetch(:application) %>
23
+
24
+ if [ $? -eq 0 ]; then
25
+ SIZE=$(du -sh backuping | awk '{print $1}')
26
+ curl -X POST -H 'Content-type: application/json' \
27
+ --data "{'attachments': [{'mrkdwn_in': ['text'],'text': 'Успешо создан бэкап для *<%= fetch(:application) %>*\n<% fetch(:backupFolders).each do |folder|%><%= "-" +folder + '\n' %><% end %>Тип базы данных: *$DATA_BASE_TYPE*\nРазмер *$SIZE*','color': '#3AA3E3', 'title': 'Amazon Backup System'}]<%=", \'channel\': \'#{fetch(:slack_channel)}\' " if !fetch(:slack_channel).nil? %>}" \
28
+ <%= fetch(:slack_hook) %>
29
+ else
30
+ curl -X POST -H 'Content-type: application/json' \
31
+ --data "{'attachments': [{'mrkdwn_in': ['text'],'text': 'Произошла ошибка при создании бэкапа для *<%= fetch(:application) %>','color': '#FF0000', 'title': 'Amazon Backup System'}]<%=", \'channel\': \'#{fetch(:slack_channel)}\' " if !fetch(:slack_channel).nil? %>}" \
32
+ <%= fetch(:slack_hook) %>
33
+ fi
34
+
35
+ rm -rf backuping
@@ -0,0 +1,336 @@
1
+ #!/usr/bin/env python
2
+ #--*-- coding: utf-8 --*--
3
+
4
+ import argparse
5
+ import hashlib
6
+ import json
7
+ import logging
8
+ import os
9
+ import requests
10
+ import requests_toolbelt
11
+ import sys
12
+ import uuid
13
+ import mimetypes
14
+ import inspect
15
+
16
+ class CloudNode(object):
17
+ def __init__(self, node):
18
+ self.node = node
19
+ self._children_fetched = False
20
+
21
+ def children(self):
22
+ if not self._children_fetched:
23
+ nodes = DriveSink.instance().request_metadata(
24
+ "%%snodes/%s/children" % self.node["id"])
25
+ self._children = {n["name"]: CloudNode(n) for n in nodes["data"]}
26
+ self._children_fetched = True
27
+ return self._children
28
+
29
+ def child(self, name, create=False):
30
+ node = self.children().get(name)
31
+ if not node and create:
32
+ node = self._make_child_folder(name)
33
+ return node
34
+
35
+ def upload_child_file(self, name, local_path, existing_node=None):
36
+ logging.info("Uploading %s to %s", local_path, self.node["name"])
37
+ mime_type = _get_mimetype(name)
38
+ m = requests_toolbelt.MultipartEncoder([
39
+ ("metadata", json.dumps({
40
+ "name": name, # Здесь можно задать имя файлу, которое будет у него в облаке
41
+ "kind": "FILE",
42
+ "parents": [self.node["id"]]
43
+ })),
44
+ ("content", (name, open(local_path, "rb"), mime_type))])
45
+ if existing_node:
46
+ """
47
+ # TODO: this is under-documented and currently 500s on Amazon's side
48
+ node = CloudNode(DriveSink.instance().request_content(
49
+ "%%snodes/%s/content" % existing_node.node["id"],
50
+ method="put", data=m, headers={"Content-Type": m.content_type}))
51
+ """
52
+ old_info = DriveSink.instance().request_metadata(
53
+ "%%s/trash/%s" % existing_node.node["id"], method="put")
54
+ node = CloudNode(DriveSink.instance().request_content(
55
+ "%snodes", method="post", data=m,
56
+ headers={"Content-Type": m.content_type}))
57
+ self._children[name] = node
58
+
59
+ def download_file(self, local_path):
60
+ logging.info("Downloading %s into %s", self.node["name"], local_path)
61
+ req = DriveSink.instance().request_content(
62
+ "%%snodes/%s/content" % self.node["id"], method="get", stream=True,
63
+ decode=False)
64
+ if req.status_code != 200:
65
+ logging.error("Unable to download file: %r", req.text)
66
+ sys.exit(1)
67
+ with open(local_path, "wb") as f:
68
+ for chunk in req.iter_content():
69
+ f.write(chunk)
70
+
71
+ def differs(self, local_path):
72
+
73
+ # Этот закомментированный блок проверяет загружаемые файлы на дубликатность... как-то так
74
+ #
75
+ #return (not os.path.exists(local_path) or
76
+ # self.node["contentProperties"]["size"] !=
77
+ # os.path.getsize(local_path) or
78
+ # self.node["contentProperties"]["md5"] !=
79
+ # self._md5sum(local_path))
80
+
81
+ return True
82
+
83
+
84
+ def _md5sum(self, filename, blocksize=65536):
85
+ md5 = hashlib.md5()
86
+ with open(filename, "rb") as f:
87
+ for block in iter(lambda: f.read(blocksize), ""):
88
+ md5.update(block)
89
+ return md5.hexdigest()
90
+
91
+ def _make_child_folder(self, name):
92
+ logging.info(
93
+ "Creating remote folder %s in %s", name, self.node["name"])
94
+ node = CloudNode(
95
+ DriveSink.instance().request_metadata("%snodes", {
96
+ "kind": "FOLDER",
97
+ "name": name,
98
+ "parents": [self.node["id"]]}))
99
+ self._children[name] = node
100
+ return node
101
+
102
+
103
+ class DriveSink(object):
104
+ def __init__(self, args):
105
+ if not args:
106
+ logging.error("Never initialized")
107
+ sys.exit(1)
108
+ self.args = args
109
+ self.config = None
110
+
111
+ @classmethod
112
+ def instance(cls, args=None):
113
+ if not hasattr(cls, "_instance"):
114
+ cls._instance = cls(args)
115
+ return cls._instance
116
+
117
+ def upload(self, source, destination):
118
+ remote_node = self.node_at_path(
119
+ self.get_root(), destination, create_missing=True)
120
+ for dirpath, dirnames, filenames in os.walk(source):
121
+ relative = dirpath[len(source):]
122
+ current_dir = self.node_at_path(
123
+ remote_node, relative, create_missing=True)
124
+ if not current_dir:
125
+ logging.error("Could not create missing node")
126
+ sys.exit(1)
127
+ for dirname in dirnames:
128
+ logging.info(dirnames)
129
+ current_dir.child(dirname, create=True)
130
+ for filename in filenames:
131
+ local_path = os.path.join(dirpath, filename)
132
+ node = current_dir.child(filename)
133
+ if (not node or node.differs(
134
+ local_path)) and self.filter_file(filename):
135
+ current_dir.upload_child_file(filename, local_path, node)
136
+
137
+ def upload_file(self, source, destination):
138
+ '''
139
+ funkcija poluchaet polnyj put' k fajlu, soderzhashhij imja i rasshirenie fajla (primer: /home/ubuntu/upl/index.html)
140
+ i razbivaet ego na put' k fajlu (dirpath=/home/ubuntu/upl/) i nazvanie fajla (filename=index.html)
141
+
142
+ zatem dejstvuet po analogii s funkciej "upload"
143
+ '''
144
+ remote_node = self.node_at_path(
145
+ self.get_root(), destination, create_missing=True)
146
+
147
+ dirpath = os.path.split(source)[0]
148
+ filename = os.path.split(source)[1]
149
+
150
+ relative = dirpath[len(dirpath):]
151
+ current_dir = self.node_at_path(
152
+ remote_node, relative, create_missing=True)
153
+
154
+ local_path = os.path.join(dirpath, filename)
155
+ node = current_dir.child(filename)
156
+ current_dir.upload_child_file(filename, local_path, node)
157
+
158
+ def download(self, source, destination):
159
+ to_download = [(self.node_at_path(self.get_root(), source),
160
+ self.join_path(destination, create_missing=True))]
161
+ while len(to_download):
162
+ node, path = to_download.pop(0)
163
+ for name, child in node.children().iteritems():
164
+ if child.node["kind"] == "FOLDER":
165
+ to_download.append((child, self.join_path(
166
+ child.node["name"], path, create_missing=True)))
167
+ elif child.node["kind"] == "FILE":
168
+ local_path = os.path.join(path, child.node["name"])
169
+ if child.differs(local_path):
170
+ child.download_file(local_path)
171
+
172
+ def filter_file(self, filename):
173
+ _, extension = os.path.splitext(filename)
174
+ extension = extension.lstrip(".").lower()
175
+
176
+ # Этот закомментированный блок проверяет расширения файлов и если они есть в Allowed, то он их загружает в облако.
177
+ # Если нужна фильтрация файлов, то раскомментируйте этот блок, а строку "return extension" - закомментируйте
178
+ #
179
+ # allowed = self.args.extensions
180
+ # if not allowed:
181
+ # # Not all tested to be free
182
+ # allowed = (
183
+ # "apng,arw,bmp,cr2,crw,dng,emf,gif,jfif,jpe,jpeg,jpg,mef,nef,"
184
+ # "orf,pcx,png,psd,raf,ras,srw,swf,tga,tif,tiff,wmf")
185
+ # return extension in allowed.split(",")
186
+
187
+ return extension
188
+
189
+ def get_root(self):
190
+ nodes = self.request_metadata("%snodes?filters=isRoot:true")
191
+ if nodes["count"] != 1:
192
+ logging.error("Could not find root")
193
+ sys.exit(1)
194
+ return CloudNode(nodes["data"][0])
195
+
196
+ def node_at_path(self, root, path, create_missing=False):
197
+ parts = filter(None, path.split("/"))
198
+ node = root
199
+ while len(parts):
200
+ node = node.child(parts.pop(0), create=create_missing)
201
+ if not node:
202
+ return None
203
+ return node
204
+
205
+ def join_path(self, destination, root="/", create_missing=True):
206
+ directory = os.path.join(root, destination)
207
+ if not os.path.exists(directory):
208
+ if create_missing:
209
+ os.makedirs(directory)
210
+ else:
211
+ return None
212
+ if not os.path.isdir(directory):
213
+ logging.error("%s is not a directory", directory)
214
+ sys.exit(1)
215
+ return directory
216
+
217
+ def _config_file(self):
218
+ config_filename = self.args.config or os.environ.get(
219
+ "DRIVESINK", None)
220
+ if not config_filename:
221
+ config_filename = os.path.join(
222
+ os.path.expanduser("~"), ".drivesink")
223
+ return config_filename
224
+
225
+ def _config(self):
226
+ if not self.config:
227
+ config_filename = self._config_file()
228
+ try:
229
+ self.config = json.loads(open(config_filename, "r").read())
230
+ except:
231
+ print "%s/config to get your tokens" % self.args.drivesink
232
+ sys.exit(1)
233
+ return self.config
234
+
235
+ def request_metadata(self, path, json_data=None, **kwargs):
236
+ args = {}
237
+ if json_data:
238
+ args["method"] = "post"
239
+ args["data"] = json.dumps(json_data)
240
+ else:
241
+ args["method"] = "get"
242
+
243
+ args.update(kwargs)
244
+
245
+ return self._request(
246
+ path % self._config()["metadataUrl"], **args)
247
+
248
+ def request_content(self, path, **kwargs):
249
+ # приписка '?suppress=deduplication' дает возможность добавлять одинаковые файлы в разные папки.
250
+ # если имена файлов одинаковы, то они замещаются при этом хэш-сумма не учитывается.
251
+ return self._request(
252
+ (path + '?suppress=deduplication') % self._config()["contentUrl"], **kwargs)
253
+
254
+ def _request(self, url, refresh=True, decode=True, **kwargs):
255
+ headers = {
256
+ "Authorization": "Bearer %s" % self._config()["access_token"],
257
+ }
258
+ headers.update(kwargs.pop("headers", {}))
259
+ req = requests.request(url=url, headers=headers, **kwargs)
260
+
261
+ if req.status_code == 401 and refresh:
262
+ # Have to proxy to get the client id and secret
263
+ req = requests.post("%s/refresh" % self.args.drivesink, data={
264
+ "refresh_token": self._config()["refresh_token"],
265
+ })
266
+ if req.status_code != 200:
267
+ try:
268
+ response = req.json()
269
+ logging.error("Got Amazon code %s: %s",
270
+ response["code"], response["message"])
271
+ sys.exit(1)
272
+ except Exception:
273
+ pass
274
+ req.raise_for_status()
275
+ try:
276
+ new_config = req.json()
277
+ except:
278
+ logging.error("Could not refresh: %r", req.text)
279
+ raise
280
+ self.config.update(new_config)
281
+ with open(self._config_file(), "w") as f:
282
+ f.write(json.dumps(self.config, sort_keys=True, indent=4))
283
+ return self._request(url, refresh=False, decode=decode, **kwargs)
284
+ if req.status_code != 200:
285
+ try:
286
+ response = req.json()
287
+ logging.error("Got Amazon code %s: %s",
288
+ response["code"], response["message"])
289
+ sys.exit(1)
290
+ except Exception:
291
+ pass
292
+ # Здесь можно организовать уведомление об успешной загрузке - код ответа 200
293
+ if req.status_code == 200:
294
+ logging.info('Отправка данных: <' + inspect.stack()[1][3] + '> Код ответа: 200. ОК.')
295
+
296
+ req.raise_for_status()
297
+ if decode:
298
+ return req.json()
299
+ return req
300
+
301
+ def _get_mimetype(file_name = ''):
302
+ mt = mimetypes.guess_type(file_name)[0]
303
+ return mt if mt else 'application/octet-stream'
304
+
305
+ def main():
306
+ parser = argparse.ArgumentParser(
307
+ description="Amazon Cloud Drive synchronization tool")
308
+ parser.add_argument("command", choices=["upload", "download"],
309
+ help="Commands: 'upload' or 'download'")
310
+ parser.add_argument("source", help="The source directory")
311
+ parser.add_argument("destination", help="The destination directory")
312
+ parser.add_argument("-e", "--extensions",
313
+ help="File extensions to upload, images by default")
314
+ parser.add_argument("-c", "--config", help="The config file")
315
+ parser.add_argument("-d", "--drivesink", help="Drivesink URL",
316
+ default="https://drivesink.appspot.com")
317
+ args = parser.parse_args()
318
+
319
+ drivesink = DriveSink.instance(args)
320
+
321
+ if args.command == "upload":
322
+ if(not os.path.isfile(args.source)):
323
+ drivesink.upload(args.source, args.destination)
324
+ else:
325
+ drivesink.upload_file(args.source, args.destination)
326
+ elif args.command == "download":
327
+ drivesink.download(args.source, args.destination)
328
+
329
+ logging.basicConfig(
330
+ format = "%(levelname) -10s %(module)s:%(lineno)s %(funcName)s %(message)s",
331
+ level = logging.DEBUG
332
+ )
333
+ logging.getLogger("requests").setLevel(logging.WARNING)
334
+
335
+ if __name__ == "__main__":
336
+ main()
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: capun
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.30
4
+ version: 0.0.31
5
5
  platform: ruby
6
6
  authors:
7
7
  - Ivan Zamylin
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2016-07-12 00:00:00.000000000 Z
11
+ date: 2016-08-23 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -129,9 +129,11 @@ files:
129
129
  - lib/generators/capun/stage_generator.rb
130
130
  - lib/generators/capun/templates/Capfile
131
131
  - lib/generators/capun/templates/append_info.excerpt
132
+ - lib/generators/capun/templates/backup.sh.erb
132
133
  - lib/generators/capun/templates/basic_authenticatable.rb.erb
133
134
  - lib/generators/capun/templates/database.yml.erb
134
135
  - lib/generators/capun/templates/deploy.rb.erb
136
+ - lib/generators/capun/templates/drivesink.py
135
137
  - lib/generators/capun/templates/jenkins.config.xml.erb
136
138
  - lib/generators/capun/templates/lograge_env_config.excerpt
137
139
  - lib/generators/capun/templates/lograge_initializer.rb