| Index: sitescripts/content_blocker_lists/bin/generate_lists.py |
| diff --git a/sitescripts/content_blocker_lists/bin/generate_lists.py b/sitescripts/content_blocker_lists/bin/generate_lists.py |
| index d86a52a95bc4edf0a14a017573c893f1f2c693bf..5d3731f3b891d2bd231f03e9f564ac321e5d2212 100644 |
| --- a/sitescripts/content_blocker_lists/bin/generate_lists.py |
| +++ b/sitescripts/content_blocker_lists/bin/generate_lists.py |
| @@ -16,61 +16,91 @@ |
| # You should have received a copy of the GNU General Public License |
| # along with Adblock Plus. If not, see <http://www.gnu.org/licenses/>. |
| +from contextlib import closing |
| +from datetime import datetime |
| +import json |
| import os |
| +from StringIO import StringIO |
| import subprocess |
| +import re |
| import urllib2 |
| from sitescripts.utils import get_config |
| -def _update_abp2blocklist(): |
| +config = dict(get_config().items("content_blocker_lists")) |
| + |
| +def update_abp2blocklist(): |
| with open(os.devnull, "w") as devnull: |
| - config = get_config() |
| - abp2blocklist_path = config.get("content_blocker_lists", |
| - "abp2blocklist_path") |
| + abp2blocklist_path = config["abp2blocklist_path"] |
| if os.path.isdir(abp2blocklist_path): |
| subprocess.check_call(("hg", "pull", "-u", "-R", abp2blocklist_path), |
| stdout=devnull) |
| else: |
| - abp2blocklist_url = config.get("content_blocker_lists", |
| - "abp2blocklist_url") |
| - subprocess.check_call(("hg", "clone", abp2blocklist_url, |
| + subprocess.check_call(("hg", "clone", config["abp2blocklist_url"], |
| abp2blocklist_path), stdout=devnull) |
| subprocess.check_call(("npm", "install"), cwd=abp2blocklist_path, |
| stdout=devnull) |
| -def _download(url_key): |
| - url = get_config().get("content_blocker_lists", url_key) |
| - response = urllib2.urlopen(url) |
| - try: |
| - return response.read() |
| - finally: |
| - response.close() |
| +def download_filter_list(url): |
| + filter_list = {} |
| + with closing(urllib2.urlopen(url)) as response: |
| + filter_list["body"] = response.read() |
| + filter_list["header"] = parse_filter_list_header(filter_list["body"]) |
| + filter_list["header"]["url"] = url |
| + return filter_list |
| + |
| +def parse_filter_list_header(filter_list): |
| + field_re = re.compile(r"^!\s*([^:]+):\s*(.+)$") |
| + with closing(StringIO(filter_list)) as stream: |
|
Sebastian Noack
2015/11/30 13:55:49
Never mind closing a StringIO. It doesn't do anyth
kzar
2015/11/30 15:13:11
We need the Version field, but otherwise Done.
Sebastian Noack
2015/11/30 15:49:19
Well, you set the version field based on the curre
kzar
2015/11/30 17:06:00
That's the version for the block list, in the sour
Felix Dahlke
2015/12/01 08:32:56
You still won't have to parse the header of the fi
kzar
2015/12/01 12:13:38
I don't see that header present?
curl -I https:/
Felix Dahlke
2015/12/01 14:04:19
Ouch, big mixup on my end, we only have this for n
|
| + header = {} |
| + next(stream) |
| + for line in stream: |
| + match = field_re.search(line) |
| + if match: |
| + header[match.group(1)] = match.group(2) |
| + else: |
|
Sebastian Noack
2015/11/30 13:55:49
Nit: If you negate the logic you don't need an els
|
| + break |
| + return header |
| + |
| +def generate_metadata(filter_lists, expires="4 days"): |
|
kzar
2015/11/27 16:28:11
It is unclear where the expires value for content
Sebastian Noack
2015/11/30 13:55:49
The expiration interval should be configured in si
kzar
2015/11/30 15:13:11
Done.
|
| + metadata = { |
| + "sources": [], |
| + "version": datetime.utcnow().strftime("%Y%m%d%H%M"), |
| + "expires": expires |
| + } |
| + for filter_list in filter_lists: |
| + metadata["sources"].append({ k.lower(): filter_list["header"][k] |
| + for k in ["url", "Version"]}) |
| + return metadata |
| -def _convert_filter_list(sources, destination_path_key): |
| - config = get_config() |
| - destination_path = config.get("content_blocker_lists", destination_path_key) |
| - with open(destination_path, "wb") as destination_file: |
| - abp2blocklist_path = config.get("content_blocker_lists", |
| - "abp2blocklist_path") |
| +def write_block_list(filter_lists, path): |
| + metadata = generate_metadata(filter_lists) |
|
kzar
2015/11/27 16:28:11
I'm doing it this way to avoid having to load the
Sebastian Noack
2015/11/30 13:55:49
We don't have to care too much about memory consum
kzar
2015/11/30 15:13:11
Done.
|
| + header = json.dumps(metadata, indent=2).rsplit("}", 1)[0].rstrip() |
| + header += ',\n "rules": ' |
| + with open(path, "wb") as destination_file: |
| + destination_file.write(header) |
| + destination_file.flush() |
|
Sebastian Noack
2015/11/30 13:55:49
Any particular reason you flush the file here?
|
| process = subprocess.Popen(("node", "abp2blocklist.js"), |
| - cwd=abp2blocklist_path, stdin=subprocess.PIPE, |
| + cwd=config["abp2blocklist_path"], |
| + stdin=subprocess.PIPE, |
| stdout=destination_file) |
| try: |
| - for source in sources: |
| - print >>process.stdin, source |
| + for filter_list in filter_lists: |
| + print >>process.stdin, filter_list["body"] |
| finally: |
| process.stdin.close() |
| process.wait() |
| + print >>destination_file, "}" |
| if process.returncode: |
| raise Exception("abp2blocklist returned %s" % process.returncode) |
| if __name__ == "__main__": |
| - _update_abp2blocklist() |
| + update_abp2blocklist() |
| - easylist = _download("easylist_url") |
| - exceptionrules = _download("exceptionrules_url") |
| + easylist = download_filter_list(config["easylist_url"]) |
| + exceptionrules = download_filter_list(config["exceptionrules_url"]) |
| - _convert_filter_list([easylist], "easylist_content_blocker_path") |
| - _convert_filter_list([easylist, exceptionrules], |
| - "combined_content_blocker_path") |
| + write_block_list([easylist], config["easylist_content_blocker_path"]) |
| + write_block_list([easylist, exceptionrules], |
| + config["combined_content_blocker_path"]) |