Left: | ||
Right: |
LEFT | RIGHT |
---|---|
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
mathias
2018/06/07 21:52:00
According to our legal department all source files
f.lopez
2018/06/18 18:41:30
Acknowledged.
| |
2 # | |
3 # This file is part of the Adblock Plus infrastructure | |
4 # Copyright (C) 2018-present eyeo GmbH | |
5 # | |
6 # Adblock Plus is free software: you can redistribute it and/or modify | |
7 # it under the terms of the GNU General Public License version 3 as | |
8 # published by the Free Software Foundation. | |
9 # | |
10 # Adblock Plus is distributed in the hope that it will be useful, | |
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 # GNU General Public License for more details. | |
14 # | |
15 # You should have received a copy of the GNU General Public License | |
16 # along with Adblock Plus. If not, see <http://www.gnu.org/licenses/>. | |
2 | 17 |
3 import argparse | 18 import argparse |
4 from filecmp import dircmp | 19 from filecmp import dircmp |
5 import hashlib | 20 import hashlib |
6 import os | 21 import os |
7 import sys | 22 import sys |
8 import shutil | 23 import shutil |
9 import tarfile | 24 import tarfile |
10 import tempfile | 25 import tempfile |
11 import urllib | 26 import urllib |
12 | 27 |
13 | 28 |
14 def download(url, tmp_dir): | 29 __doc__ = """This script MUST be renamed in the form of $WEBSITE, e.g. |
30 help.eyeo.com, --name must be provided in order to fetch the | |
31 files, expected files to be fetched are $NAME.tar.gz and $NAME.md5 in | |
32 order to compare the hashes. --source must be an URL, e.g. | |
33 https://helpcenter.eyeofiles.com""" | |
34 | |
35 | |
36 def download(url, temporary_directory): | |
15 file_name = url.split('/')[-1] | 37 file_name = url.split('/')[-1] |
16 abs_file_name = os.path.join(tmp_dir, file_name) | 38 absolute_file_path = os.path.join(temporary_directory, file_name) |
17 print 'Downloading: ' + file_name | 39 print 'Downloading: ' + file_name |
18 urllib.urlretrieve(url, abs_file_name) | 40 urllib.urlretrieve(url, absolute_file_path) |
19 return abs_file_name | 41 return absolute_file_path |
20 | 42 |
21 | 43 |
22 def calculate_md5(file): | 44 def calculate_md5(file): |
23 with open(file) as f: | 45 with open(file) as file_handle: |
24 data = f.read() | 46 data = file_handle.read() |
25 md5_result = hashlib.md5(data).hexdigest() | 47 md5_result = hashlib.md5(data).hexdigest() |
26 return md5_result.strip() | 48 return md5_result.strip() |
27 | 49 |
28 | 50 |
29 def read_md5(file): | 51 def read_md5(file): |
30 with open(file) as f: | 52 with open(file) as file_handle: |
31 md5_result = f.readline() | 53 md5_result = file_handle.readline() |
32 return md5_result.strip() | 54 return md5_result.strip() |
33 | 55 |
34 | 56 |
35 def untar(tar_file, tmp_dir): | 57 def untar(tar_file, temporary_directory): |
36 if tarfile.is_tarfile(tar_file): | 58 if tarfile.is_tarfile(tar_file): |
37 with tarfile.open(tar_file, 'r:gz') as tar: | 59 with tarfile.open(tar_file, 'r:gz') as tar: |
38 tar.extractall(tmp_dir) | 60 tar.extractall(temporary_directory) |
39 | 61 |
40 | 62 |
41 def remove_tree(to_remove): | 63 def remove_tree(to_remove): |
42 if os.path.exists(to_remove): | 64 if os.path.exists(to_remove): |
43 if os.path.isdir(to_remove): | 65 if os.path.isdir(to_remove): |
44 shutil.rmtree(to_remove) | 66 shutil.rmtree(to_remove) |
45 else: | 67 else: |
46 os.remove(to_remove) | 68 os.remove(to_remove) |
47 | 69 |
48 | 70 |
49 def deploy_files(dcmp): | 71 def deploy_files(directory_comparison): |
50 for name in dcmp.diff_files: | 72 for name in directory_comparison.diff_files: |
51 copytree(dcmp.right, dcmp.left) | 73 copytree(directory_comparison.right, directory_comparison.left) |
52 for name in dcmp.left_only: | 74 for name in directory_comparison.left_only: |
53 remove_tree(os.path.join(dcmp.left, name)) | 75 remove_tree(os.path.join(directory_comparison.left, name)) |
54 for name in dcmp.right_only: | 76 for name in directory_comparison.right_only: |
55 copytree(dcmp.right, dcmp.left) | 77 copytree(directory_comparison.right, directory_comparison.left) |
56 for sub_dcmp in dcmp.subdirs.values(): | 78 for subdirectory_comparison in directory_comparison.subdirs.values(): |
57 deploy_files(sub_dcmp) | 79 deploy_files(subdirectory_comparison) |
58 | 80 |
59 | 81 |
60 def copytree(src, dst): | 82 # shutil.copytree copies a tree but the destination directory MUST NOT exist |
mathias
2018/06/07 21:52:00
Why don't you use shutil.copytree? And why isn't t
f.lopez
2018/06/18 18:41:29
Acknowledged.
| |
61 if not os.path.exists(dst): | 83 # this might break the site for the duration of the files being deployed |
62 os.makedirs(dst) | 84 # for more info read: https://docs.python.org/2/library/shutil.html |
63 shutil.copystat(src, dst) | 85 def copytree(source, destination): |
64 lst = os.listdir(src) | 86 if not os.path.exists(destination): |
65 for item in lst: | 87 os.makedirs(destination) |
66 s = os.path.join(src, item) | 88 shutil.copystat(source, destination) |
mathias
2018/06/07 21:52:00
So `s` is an abbreviation for `src` which is of co
f.lopez
2018/06/18 18:41:29
Acknowledged.
| |
67 d = os.path.join(dst, item) | 89 source_items = os.listdir(source) |
mathias
2018/06/07 21:52:00
And it continues. At least you are consistent.
f.lopez
2018/06/18 18:41:29
Acknowledged.
| |
68 if os.path.isdir(s): | 90 for item in source_items: |
69 copytree(s, d) | 91 source_path = os.path.join(source, item) |
92 destination_path = os.path.join(destination, item) | |
93 if os.path.isdir(source_path): | |
94 copytree(source_path, destination_path) | |
70 else: | 95 else: |
71 shutil.copy2(s, d) | 96 shutil.copy2(source_path, destination_path) |
72 | 97 |
73 | 98 |
74 if __name__ == '__main__': | 99 if __name__ == '__main__': |
100 website = os.path.basename(__file__) | |
75 parser = argparse.ArgumentParser( | 101 parser = argparse.ArgumentParser( |
76 description="""Fetch a compressed archive in the form of $HASH.tar.gz | 102 description="""Fetch a compressed archive in the form of $NAME.tar.gz |
77 and deploy it to /var/www/$WEBSITE folder""", | 103 and deploy it to /var/www/{0} folder""".format(website), |
78 epilog="""--hash must be provided in order to fetch the files, | 104 epilog=__doc__, |
79 expected files to be fetched are $HASH.tar.gz and $HASH.md5 in | |
80 order to compare the hashes. | |
81 --source must be an URL, e.g. | |
82 https://helpcenter.eyeofiles.com""", | |
83 ) | 105 ) |
84 parser.add_argument('--hash', action='store', type=str, required=True, | 106 parser.add_argument('--name', action='store', type=str, required=True, |
mathias
2018/06/07 21:52:00
Is this sure to always be a hash? Wouldn't `--revi
f.lopez
2018/06/18 18:41:30
Acknowledged.
| |
85 help='Hash of the commit to deploy') | 107 help='Name of the tarball to deploy') |
86 parser.add_argument('--source', action='store', type=str, required=True, | 108 parser.add_argument('--source', action='store', type=str, required=True, |
87 help='The source where files will be downloaded') | 109 help='The source where files will be downloaded') |
88 parser.add_argument('--website', action='store', type=str, | 110 arguments = parser.parse_args() |
mathias
2018/06/07 21:52:00
Why does option even exist? IT should not be possi
f.lopez
2018/06/18 18:41:30
Acknowledged.
| |
89 help='The name of the website [e.g. help.eyeo.com]') | 111 name = arguments.name |
90 args = parser.parse_args() | 112 source = arguments.source |
91 hash = args.hash | 113 url_file = '{0}/{1}.tar.gz'.format(source, name) |
92 source = args.source | 114 url_md5 = '{0}/{1}.md5'.format(source, name) |
93 url_file = '{0}/{1}.tar.gz'.format(source, hash) | 115 temporary_directory = tempfile.mkdtemp() |
94 url_md5 = '{0}/{1}.md5'.format(source, hash) | |
95 tmp_dir = tempfile.mkdtemp() | |
96 try: | 116 try: |
97 down_file = download(url_file, tmp_dir) | 117 downloaded_file = download(url_file, temporary_directory) |
98 down_md5 = download(url_md5, tmp_dir) | 118 downloaded_md5 = download(url_md5, temporary_directory) |
99 if calculate_md5(down_file) == read_md5(down_md5): | 119 if calculate_md5(downloaded_file) == read_md5(downloaded_md5): |
100 untar(down_file, tmp_dir) | 120 untar(downloaded_file, temporary_directory) |
101 hash_directory = os.path.join(tmp_dir, hash) | 121 tarball_directory = os.path.join(temporary_directory, name) |
102 destination = os.path.join('/var/www/', args.website) | 122 destination = os.path.join('/var/www/', website) |
103 dcmp = dircmp(destination, hash_directory) | 123 directory_comparison = dircmp(destination, tarball_directory) |
104 print 'Deploying files' | 124 print 'Deploying files' |
105 deploy_files(dcmp) | 125 deploy_files(directory_comparison) |
106 else: | 126 else: |
107 sys.exit("Hashes don't match") | 127 error_message = """{0}.tar.gz md5 computation doesn't match {0}.md5 |
108 except Exception as e: | 128 contents""".format(name) |
109 sys.exit(e) | 129 sys.exit(error_message) |
130 except Exception as error: | |
131 sys.exit(error) | |
110 finally: | 132 finally: |
111 shutil.rmtree(tmp_dir) | 133 shutil.rmtree(temporary_directory) |
LEFT | RIGHT |