| Left: | ||
| Right: |
| LEFT | RIGHT |
|---|---|
| 1 # This Source Code Form is subject to the terms of the Mozilla Public | 1 # This Source Code Form is subject to the terms of the Mozilla Public |
| 2 # License, v. 2.0. If a copy of the MPL was not distributed with this | 2 # License, v. 2.0. If a copy of the MPL was not distributed with this |
| 3 # file, You can obtain one at http://mozilla.org/MPL/2.0/. | 3 # file, You can obtain one at http://mozilla.org/MPL/2.0/. |
| 4 | 4 |
| 5 import re | 5 import re |
| 6 import struct | 6 import struct |
| 7 import time | 7 import time |
| 8 import zlib | 8 import zlib |
| 9 | 9 |
| 10 from Crypto.Hash import SHA | 10 from Crypto.Hash import SHA |
| 11 from Crypto.PublicKey import RSA | 11 from Crypto.PublicKey import RSA |
| 12 from Crypto.Signature import PKCS1_v1_5 | 12 from Crypto.Signature import PKCS1_v1_5 |
| 13 | 13 |
| 14 from buildtools.packager import getTemplate | 14 from buildtools.packager import getTemplate |
| 15 | 15 |
| 16 XAR_HEADER = struct.Struct('>IHHQQI') | |
| 16 XAR_HEADER_MAGIC = 0x78617221 | 17 XAR_HEADER_MAGIC = 0x78617221 |
| 17 XAR_HEADER_SIZE = 28 | |
| 18 XAR_VERSION = 1 | 18 XAR_VERSION = 1 |
| 19 XAR_CKSUM_SHA1 = 1 | 19 XAR_CKSUM_SHA1 = 1 |
| 20 | 20 |
| 21 PRIVATE_KEY_REGEXP = r'-+BEGIN PRIVATE KEY-+(.*?)-+END PRIVATE KEY-+' | |
| 22 CERTIFICATE_REGEXP = r'-+BEGIN CERTIFICATE-+(.*?)-+END CERTIFICATE-+' | |
| 23 | 21 |
| 24 | 22 def read_certificates_and_key(keyfile): |
| 25 def read_key(keyfile): | |
| 26 with open(keyfile, 'r') as file: | 23 with open(keyfile, 'r') as file: |
| 27 data = file.read() | 24 data = file.read() |
| 28 match = re.search(PRIVATE_KEY_REGEXP, data, re.S) | |
|
Sebastian Noack
2016/08/17 12:53:45
You can leave the with block after the data has be
Wladimir Palant
2016/08/17 14:11:40
Done.
| |
| 29 if not match: | |
| 30 raise Exception('Cound not find private key in file') | |
| 31 return RSA.importKey(match.group(0)) | |
| 32 | 25 |
| 26 certificates = [] | |
| 27 key = None | |
| 28 for match in re.finditer(r'-+BEGIN (.*?)-+(.*?)-+END \1-+', data, re.S): | |
| 29 section = match.group(1) | |
| 30 if section == 'CERTIFICATE': | |
| 31 certificates.append(re.sub(r'\s+', '', match.group(2))) | |
| 32 elif section == 'PRIVATE KEY': | |
| 33 key = RSA.importKey(match.group(0)) | |
| 34 if not key: | |
| 35 raise Exception('Could not find private key in file') | |
| 33 | 36 |
| 34 def read_certificates(keyfile): | 37 return certificates, key |
| 35 certificates = [] | |
| 36 with open(keyfile, 'r') as file: | |
| 37 data = file.read() | |
| 38 for match in re.finditer(CERTIFICATE_REGEXP, data, re.S): | |
| 39 certificates.append(re.sub(r'\s+', '', match.group(1))) | |
| 40 return certificates | |
| 41 | 38 |
| 42 | 39 |
| 43 def get_checksum(data): | 40 def get_checksum(data): |
| 44 return SHA.new(data).digest() | 41 return SHA.new(data).digest() |
| 45 | 42 |
| 46 | 43 |
| 47 def get_hexchecksum(data): | 44 def get_hexchecksum(data): |
| 48 return SHA.new(data).hexdigest() | 45 return SHA.new(data).hexdigest() |
| 49 | 46 |
| 50 | 47 |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 93 'offset': offset, | 90 'offset': offset, |
| 94 } | 91 } |
| 95 file_id += 1 | 92 file_id += 1 |
| 96 offset += len(compressed) | 93 offset += len(compressed) |
| 97 directory_stack[-1][1].append(file) | 94 directory_stack[-1][1].append(file) |
| 98 compressed_data.append(compressed) | 95 compressed_data.append(compressed) |
| 99 return compressed_data | 96 return compressed_data |
| 100 | 97 |
| 101 | 98 |
| 102 def create(archivepath, contents, keyfile): | 99 def create(archivepath, contents, keyfile): |
| 103 key = read_key(keyfile) | 100 certificates, key = read_certificates_and_key(keyfile) |
| 104 checksum_length = len(get_checksum('')) | 101 checksum_length = len(get_checksum('')) |
|
Sebastian Noack
2016/08/17 12:53:45
No need to hash any (empty) data to get the digest
Wladimir Palant
2016/08/17 14:11:40
Strictly speaking - no, it's not necessary. Howeve
| |
| 105 params = { | 102 params = { |
| 106 'certificates': read_certificates(keyfile), | 103 'certificates': certificates, |
| 107 | 104 |
| 108 # Timestamp epoch starts at 2001-01-01T00:00:00.000Z | 105 # Timestamp epoch starts at 2001-01-01T00:00:00.000Z |
| 109 'timestamp_numerical': time.time() - 978307200, | 106 'timestamp_numerical': time.time() - 978307200, |
| 110 'timestamp_iso': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), | 107 'timestamp_iso': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), |
| 111 | 108 |
| 112 'checksum': { | 109 'checksum': { |
| 113 'offset': 0, | 110 'offset': 0, |
| 114 'size': checksum_length, | 111 'size': checksum_length, |
| 115 }, | 112 }, |
| 116 'signature': { | 113 'signature': { |
| 117 'offset': checksum_length, | 114 'offset': checksum_length, |
| 118 'size': len(get_signature(key, '')), | 115 'size': len(get_signature(key, '')), |
| 119 }, | 116 }, |
| 120 'files': [], | 117 'files': [], |
| 121 } | 118 } |
| 122 | 119 |
| 123 offset = params['signature']['offset'] + params['signature']['size'] | 120 offset = params['signature']['offset'] + params['signature']['size'] |
| 124 compressed_data = compress_files(contents, params['files'], offset) | 121 compressed_data = compress_files(contents, params['files'], offset) |
| 125 | 122 |
| 126 template = getTemplate('xartoc.xml.tmpl', autoEscape=True) | 123 template = getTemplate('xartoc.xml.tmpl', autoEscape=True) |
| 127 toc_uncompressed = template.render(params).encode('utf-8') | 124 toc_uncompressed = template.render(params).encode('utf-8') |
| 128 toc_compressed = zlib.compress(toc_uncompressed, 9) | 125 toc_compressed = zlib.compress(toc_uncompressed, 9) |
| 129 | 126 |
| 130 with open(archivepath, 'wb') as file: | 127 with open(archivepath, 'wb') as file: |
| 131 # The file starts with a minimalistic header | 128 # The file starts with a minimalistic header |
| 132 header = struct.pack('>IHHQQI', XAR_HEADER_MAGIC, XAR_HEADER_SIZE, | 129 file.write(XAR_HEADER.pack(XAR_HEADER_MAGIC, XAR_HEADER.size, |
|
Sebastian Noack
2016/08/17 12:53:45
Note that you could avoid hard-coding the header s
Wladimir Palant
2016/08/17 14:11:40
Done.
| |
| 133 XAR_VERSION, len(toc_compressed), | 130 XAR_VERSION, len(toc_compressed), |
| 134 len(toc_uncompressed), XAR_CKSUM_SHA1) | 131 len(toc_uncompressed), XAR_CKSUM_SHA1)) |
| 135 file.write(header) | |
| 136 | 132 |
| 137 # It's followed up with a compressed XML table of contents | 133 # It's followed up with a compressed XML table of contents |
| 138 file.write(toc_compressed) | 134 file.write(toc_compressed) |
| 139 | 135 |
| 140 # Now the actual data, all the offsets are in the table of contents | 136 # Now the actual data, all the offsets are in the table of contents |
| 141 file.write(get_checksum(toc_compressed)) | 137 file.write(get_checksum(toc_compressed)) |
| 142 file.write(get_signature(key, toc_compressed)) | 138 file.write(get_signature(key, toc_compressed)) |
| 143 for blob in compressed_data: | 139 for blob in compressed_data: |
| 144 file.write(blob) | 140 file.write(blob) |
| LEFT | RIGHT |