Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code

Delta Between Two Patch Sets: lib/abp2blocklist.js

Issue 29426594: Issue 3673 - Merge closely matching rules (Closed) Base URL: https://hg.adblockplus.org/abp2blocklist
Left Patch Set: Improved matching algorithm Created May 4, 2017, 2:36 a.m.
Right Patch Set: Rebase Created July 28, 2017, 1:31 p.m.
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments.
Jump to:
Left: Side by side diff | Download
Right: Side by side diff | Download
« no previous file with change/comment | « abp2blocklist.js ('k') | test/abp2blocklist.js » ('j') | no next file with change/comment »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
LEFTRIGHT
1 /* 1 /*
2 * This file is part of Adblock Plus <https://adblockplus.org/>, 2 * This file is part of Adblock Plus <https://adblockplus.org/>,
3 * Copyright (C) 2006-2017 eyeo GmbH 3 * Copyright (C) 2006-2017 eyeo GmbH
4 * 4 *
5 * Adblock Plus is free software: you can redistribute it and/or modify 5 * Adblock Plus is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 3 as 6 * it under the terms of the GNU General Public License version 3 as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 * 8 *
9 * Adblock Plus is distributed in the hope that it will be useful, 9 * Adblock Plus is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with Adblock Plus. If not, see <http://www.gnu.org/licenses/>. 15 * along with Adblock Plus. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18 /** @module abp2blocklist */ 18 /** @module abp2blocklist */
19 19
20 "use strict"; 20 "use strict";
21 21
22 const crypto = require("crypto");
23
24 let filterClasses = require("filterClasses"); 22 let filterClasses = require("filterClasses");
25 let tldjs = require("tldjs");
26 let punycode = require("punycode"); 23 let punycode = require("punycode");
27 24
28 const selectorLimit = 5000; 25 const selectorLimit = 5000;
29 const typeMap = filterClasses.RegExpFilter.typeMap; 26 const typeMap = filterClasses.RegExpFilter.typeMap;
30 const whitelistableRequestTypes = (typeMap.IMAGE 27
31 | typeMap.STYLESHEET 28 const httpRequestTypes = typeMap.IMAGE |
32 | typeMap.SCRIPT 29 typeMap.STYLESHEET |
33 | typeMap.FONT 30 typeMap.SCRIPT |
34 | typeMap.MEDIA 31 typeMap.FONT |
35 | typeMap.POPUP 32 typeMap.MEDIA |
36 | typeMap.OBJECT 33 typeMap.POPUP |
37 | typeMap.OBJECT_SUBREQUEST 34 typeMap.OBJECT |
38 | typeMap.XMLHTTPREQUEST 35 typeMap.OBJECT_SUBREQUEST |
39 | typeMap.PING 36 typeMap.XMLHTTPREQUEST |
40 | typeMap.SUBDOCUMENT 37 typeMap.PING |
41 | typeMap.OTHER); 38 typeMap.SUBDOCUMENT |
39 typeMap.OTHER;
40 const rawRequestTypes = typeMap.XMLHTTPREQUEST |
41 typeMap.WEBSOCKET |
42 typeMap.WEBRTC |
43 typeMap.OBJECT_SUBREQUEST |
44 typeMap.PING |
45 typeMap.OTHER;
46 const whitelistableRequestTypes = httpRequestTypes |
47 typeMap.WEBSOCKET |
48 typeMap.WEBRTC;
49
50 function callLater(func)
51 {
52 return new Promise(resolve =>
53 {
54 let call = () => resolve(func());
55
56 // If this looks like Node.js, call process.nextTick, otherwise call
57 // setTimeout.
58 if (typeof process != "undefined")
59 process.nextTick(call);
60 else
61 setTimeout(call, 0);
62 });
63 }
64
65 function async(callees, mapFunction)
66 {
67 if (!(Symbol.iterator in callees))
68 callees = [callees];
69
70 let lastPause = Date.now();
71 let index = 0;
72
73 let promise = Promise.resolve();
74
75 for (let next of callees)
76 {
77 let currentIndex = index;
78
79 promise = promise.then(() =>
80 {
81 if (mapFunction)
82 next = mapFunction(next, currentIndex);
83
84 // If it has been 100ms or longer since the last call, take a pause. This
85 // keeps the browser from freezing up.
86 let now = Date.now();
87 if (now - lastPause >= 100)
88 {
89 lastPause = now;
90 return callLater(next);
91 }
92
93 return next();
94 });
95
96 index++;
97 }
98
99 return promise;
100 }
42 101
43 function parseDomains(domains, included, excluded) 102 function parseDomains(domains, included, excluded)
44 { 103 {
45 for (let domain in domains) 104 for (let domain in domains)
46 { 105 {
47 if (domain != "") 106 if (domain != "")
48 { 107 {
49 let enabled = domains[domain]; 108 let enabled = domains[domain];
50 domain = punycode.toASCII(domain.toLowerCase()); 109 domain = punycode.toASCII(domain.toLowerCase());
51 110
52 if (!enabled) 111 if (!enabled)
53 excluded.push(domain); 112 excluded.push(domain);
54 else if (!domains[""]) 113 else if (!domains[""])
55 included.push(domain); 114 included.push(domain);
56 } 115 }
57 } 116 }
58 } 117 }
59 118
60 function escapeRegExp(s) 119 function escapeRegExp(s)
61 { 120 {
62 return s.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"); 121 return s.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
63 } 122 }
64 123
65 function matchDomain(domain) 124 function matchDomain(domain)
66 { 125 {
126 if (!domain)
127 return "^https?://";
128
67 return "^https?://([^/:]*\\.)?" + escapeRegExp(domain).toLowerCase() + "[/:]"; 129 return "^https?://([^/:]*\\.)?" + escapeRegExp(domain).toLowerCase() + "[/:]";
130 }
131
132 function getURLSchemes(contentType)
133 {
134 // If the given content type includes all supported URL schemes, simply
135 // return a single generic URL scheme pattern. This minimizes the size of the
136 // generated rule set. The downside to this is that it will also match
137 // schemes that we do not want to match (e.g. "ftp://"), but this can be
138 // mitigated by adding exceptions for those schemes.
139 if (contentType & typeMap.WEBSOCKET && contentType & typeMap.WEBRTC &&
140 contentType & httpRequestTypes)
141 return ["[^:]+:(//)?"];
142
143 let urlSchemes = [];
144
145 if (contentType & typeMap.WEBSOCKET)
146 urlSchemes.push("wss?://");
147
148 if (contentType & typeMap.WEBRTC)
149 urlSchemes.push("stuns?:", "turns?:");
150
151 if (contentType & httpRequestTypes)
152 urlSchemes.push("https?://");
153
154 return urlSchemes;
155 }
156
157 function findSubdomainsInList(domain, list)
158 {
159 let subdomains = [];
160 let suffixLength = domain.length + 1;
161
162 for (let name of list)
163 {
164 if (name.length > suffixLength && name.slice(-suffixLength) == "." + domain)
165 subdomains.push(name.slice(0, -suffixLength));
166 }
167
168 return subdomains;
169 }
170
171 function extractFilterDomains(filters)
172 {
173 let domains = new Set();
174 for (let filter of filters)
175 {
176 let parsed = parseFilterRegexpSource(filter.regexpSource);
177 if (parsed.justHostname)
178 domains.add(parsed.hostname);
179 }
180 return domains;
68 } 181 }
69 182
70 function convertElemHideFilter(filter, elemhideSelectorExceptions) 183 function convertElemHideFilter(filter, elemhideSelectorExceptions)
71 { 184 {
72 let included = []; 185 let included = [];
73 let excluded = []; 186 let excluded = [];
74 let rules = [];
75 187
76 parseDomains(filter.domains, included, excluded); 188 parseDomains(filter.domains, included, excluded);
77 189
78 if (excluded.length == 0 && !(filter.selector in elemhideSelectorExceptions)) 190 if (excluded.length == 0 && !(filter.selector in elemhideSelectorExceptions))
79 return {matchDomains: included.map(matchDomain), selector: filter.selector}; 191 return {matchDomains: included, selector: filter.selector};
80 } 192 }
81 193
82 /** 194 /**
83 * Parse the given filter "regexpSource" string. Producing a regular expression, 195 * Parse the given filter "regexpSource" string. Producing a regular expression,
84 * extracting the hostname (if any), deciding if the regular expression is safe 196 * extracting the hostname (if any), deciding if the regular expression is safe
85 * to be converted + matched as lower case and noting if the source contains 197 * to be converted + matched as lower case and noting if the source contains
86 * anything after the hostname.) 198 * anything after the hostname.)
87 * 199 *
88 * @param {string} text regexpSource property of a filter 200 * @param {string} text regexpSource property of a filter
201 * @param {string} urlScheme The URL scheme to use in the regular expression
89 * @returns {object} An object containing a regular expression string, a bool 202 * @returns {object} An object containing a regular expression string, a bool
90 * indicating if the filter can be safely matched as lower 203 * indicating if the filter can be safely matched as lower
91 * case, a hostname string (or undefined) and a bool 204 * case, a hostname string (or undefined) and a bool
92 * indicating if the source only contains a hostname or not: 205 * indicating if the source only contains a hostname or not:
93 * {regexp: "...", 206 * {regexp: "...",
94 * canSafelyMatchAsLowercase: true/false, 207 * canSafelyMatchAsLowercase: true/false,
95 * hostname: "...", 208 * hostname: "...",
96 * justHostname: true/false} 209 * justHostname: true/false}
97 */ 210 */
98 function parseFilterRegexpSource(text) 211 function parseFilterRegexpSource(text, urlScheme)
99 { 212 {
100 let regexp = []; 213 let regexp = [];
101 let lastIndex = text.length - 1; 214
215 // Convert the text into an array of Unicode characters.
216 //
217 // In the case of surrogate pairs (the smiley emoji, for example), one
218 // Unicode code point is represented by two JavaScript characters together.
219 // We want to iterate over Unicode code points rather than JavaScript
220 // characters.
221 let characters = Array.from(text);
222
223 let lastIndex = characters.length - 1;
102 let hostname; 224 let hostname;
103 let hostnameStart = null; 225 let hostnameStart = null;
104 let hostnameFinished = false; 226 let hostnameFinished = false;
105 let justHostname = false; 227 let justHostname = false;
106 let canSafelyMatchAsLowercase = false; 228 let canSafelyMatchAsLowercase = false;
107 229
108 for (let i = 0; i < text.length; i++) 230 if (!urlScheme)
109 { 231 urlScheme = getURLSchemes()[0];
110 let c = text[i]; 232
233 for (let i = 0; i < characters.length; i++)
234 {
235 let c = characters[i];
111 236
112 if (hostnameFinished) 237 if (hostnameFinished)
113 justHostname = false; 238 justHostname = false;
114 239
115 // If we're currently inside the hostname we have to be careful not to 240 // If we're currently inside the hostname we have to be careful not to
116 // escape any characters until after we have converted it to punycode. 241 // escape any characters until after we have converted it to punycode.
117 if (hostnameStart != null && !hostnameFinished) 242 if (hostnameStart != null && !hostnameFinished)
118 { 243 {
119 let endingChar = (c == "*" || c == "^" || 244 let endingChar = (c == "*" || c == "^" ||
120 c == "?" || c == "/" || c == "|"); 245 c == "?" || c == "/" || c == "|");
121 if (!endingChar && i != lastIndex) 246 if (!endingChar && i != lastIndex)
122 continue; 247 continue;
123 248
124 hostname = punycode.toASCII( 249 hostname = punycode.toASCII(
125 text.substring(hostnameStart, endingChar ? i : i + 1) 250 characters.slice(hostnameStart, endingChar ? i : i + 1).join("")
251 .toLowerCase()
126 ); 252 );
127 hostnameFinished = justHostname = true; 253 hostnameFinished = justHostname = true;
128 regexp.push(escapeRegExp(hostname)); 254 regexp.push(escapeRegExp(hostname));
129 if (!endingChar) 255 if (!endingChar)
130 break; 256 break;
131 } 257 }
132 258
133 switch (c) 259 switch (c)
134 { 260 {
135 case "*": 261 case "*":
136 if (regexp.length > 0 && i < lastIndex && text[i + 1] != "*") 262 if (regexp.length > 0 && i < lastIndex && characters[i + 1] != "*")
137 regexp.push(".*"); 263 regexp.push(".*");
138 break; 264 break;
139 case "^": 265 case "^":
140 if (i < lastIndex) 266 let alphabet = "a-z";
141 regexp.push("."); 267 // If justHostname is true and we've encountered a "^", it means we're
268 // still in the hostname part of the URL. Since hostnames are always
269 // lower case (Punycode), there's no need to include "A-Z" in the
270 // pattern. Further, subsequent code may lower-case the entire regular
271 // expression (if the URL contains only the hostname part), leaving us
272 // with "a-za-z", which would be redundant.
273 if (!justHostname)
274 alphabet = "A-Z" + alphabet;
275 let digits = "0-9";
276 // Note that the "-" must appear first here in order to retain its
277 // literal meaning within the brackets.
278 let specialCharacters = "-_.%";
279 let separator = "[^" + specialCharacters + alphabet + digits + "]";
280 if (i == 0)
281 regexp.push("^" + urlScheme + "(.*" + separator + ")?");
282 else if (i == lastIndex)
283 regexp.push("(" + separator + ".*)?$");
284 else
285 regexp.push(separator);
142 break; 286 break;
143 case "|": 287 case "|":
144 if (i == 0) 288 if (i == 0)
145 { 289 {
146 regexp.push("^"); 290 regexp.push("^");
147 break; 291 break;
148 } 292 }
149 if (i == lastIndex) 293 if (i == lastIndex)
150 { 294 {
151 regexp.push("$"); 295 regexp.push("$");
152 break; 296 break;
153 } 297 }
154 if (i == 1 && text[0] == "|") 298 if (i == 1 && characters[0] == "|")
155 { 299 {
156 hostnameStart = i + 1; 300 hostnameStart = i + 1;
157 canSafelyMatchAsLowercase = true; 301 canSafelyMatchAsLowercase = true;
158 regexp.push("https?://([^/]+\\.)?"); 302 regexp.push(urlScheme + "([^/]+\\.)?");
159 break; 303 break;
160 } 304 }
161 regexp.push("\\|"); 305 regexp.push("\\|");
162 break; 306 break;
163 case "/": 307 case "/":
164 if (!hostnameFinished && 308 if (!hostnameFinished &&
165 text.charAt(i-2) == ":" && text.charAt(i-1) == "/") 309 characters[i - 2] == ":" && characters[i - 1] == "/")
166 { 310 {
167 hostnameStart = i + 1; 311 hostnameStart = i + 1;
168 canSafelyMatchAsLowercase = true; 312 canSafelyMatchAsLowercase = true;
169 } 313 }
170 regexp.push("/"); 314 regexp.push("/");
171 break; 315 break;
172 case ".": case "+": case "$": case "?": 316 case ".": case "+": case "$": case "?":
173 case "{": case "}": case "(": case ")": 317 case "{": case "}": case "(": case ")":
174 case "[": case "]": case "\\": 318 case "[": case "]": case "\\":
175 regexp.push("\\", c); 319 regexp.push("\\", c);
176 break; 320 break;
177 default: 321 default:
178 if (hostnameFinished && (c >= "a" && c <= "z" || 322 if (hostnameFinished && (c >= "a" && c <= "z" ||
179 c >= "A" && c <= "Z")) 323 c >= "A" && c <= "Z"))
180 canSafelyMatchAsLowercase = false; 324 canSafelyMatchAsLowercase = false;
181 regexp.push(c); 325 regexp.push(c == "%" ? c : encodeURI(c));
182 } 326 }
183 } 327 }
184 328
185 return { 329 return {
186 regexp: regexp.join(""), 330 regexp: regexp.join(""),
187 canSafelyMatchAsLowercase: canSafelyMatchAsLowercase, 331 canSafelyMatchAsLowercase: canSafelyMatchAsLowercase,
188 hostname: hostname, 332 hostname: hostname,
189 justHostname: justHostname 333 justHostname: justHostname
190 }; 334 };
191 } 335 }
192 336
193 function getResourceTypes(filter) 337 function getResourceTypes(contentType)
194 { 338 {
195 let types = []; 339 let types = [];
196 340
197 if (filter.contentType & typeMap.IMAGE) 341 if (contentType & typeMap.IMAGE)
198 types.push("image"); 342 types.push("image");
199 if (filter.contentType & typeMap.STYLESHEET) 343 if (contentType & typeMap.STYLESHEET)
200 types.push("style-sheet"); 344 types.push("style-sheet");
201 if (filter.contentType & typeMap.SCRIPT) 345 if (contentType & typeMap.SCRIPT)
202 types.push("script"); 346 types.push("script");
203 if (filter.contentType & typeMap.FONT) 347 if (contentType & typeMap.FONT)
204 types.push("font"); 348 types.push("font");
205 if (filter.contentType & (typeMap.MEDIA | typeMap.OBJECT)) 349 if (contentType & (typeMap.MEDIA | typeMap.OBJECT))
206 types.push("media"); 350 types.push("media");
207 if (filter.contentType & typeMap.POPUP) 351 if (contentType & typeMap.POPUP)
208 types.push("popup"); 352 types.push("popup");
209 if (filter.contentType & (typeMap.XMLHTTPREQUEST | 353 if (contentType & rawRequestTypes)
210 typeMap.OBJECT_SUBREQUEST |
211 typeMap.PING |
212 typeMap.OTHER))
213 types.push("raw"); 354 types.push("raw");
214 if (filter.contentType & typeMap.SUBDOCUMENT) 355 if (contentType & typeMap.SUBDOCUMENT)
215 types.push("document"); 356 types.push("document");
216 357
217 return types; 358 return types;
218 } 359 }
219 360
220 function addDomainPrefix(domains) 361 function makeRuleCopies(trigger, action, urlSchemes)
221 { 362 {
222 let result = []; 363 let copies = [];
223 364
224 for (let domain of domains) 365 // Always make a deep copy of the rule, since rules may have to be
225 { 366 // manipulated individually at a later stage.
226 result.push(domain); 367 let stringifiedTrigger = JSON.stringify(trigger);
227 368
228 if (tldjs.getDomain(domain) == domain) 369 let filterPattern = trigger["url-filter"].substring(1);
229 result.push("www." + domain); 370 let startIndex = 0;
230 } 371
231 372 // If the URL filter already begins with the first URL scheme pattern, skip
232 return result; 373 // it.
233 } 374 if (trigger["url-filter"].startsWith("^" + urlSchemes[0]))
234 375 {
235 function convertFilterAddRules(rules, filter, action, withResourceTypes) 376 filterPattern = filterPattern.substring(urlSchemes[0].length);
236 { 377 startIndex = 1;
237 let parsed = parseFilterRegexpSource(filter.regexpSource); 378 }
379 else
380 {
381 filterPattern = ".*" + filterPattern;
382 }
383
384 for (let i = startIndex; i < urlSchemes.length; i++)
385 {
386 let copyTrigger = Object.assign(JSON.parse(stringifiedTrigger), {
387 "url-filter": "^" + urlSchemes[i] + filterPattern
388 });
389 copies.push({trigger: copyTrigger, action});
390 }
391
392 return copies;
393 }
394
395 function excludeTopURLFromTrigger(trigger)
396 {
397 trigger["unless-top-url"] = [trigger["url-filter"]];
398 if (trigger["url-filter-is-case-sensitive"])
399 trigger["top-url-filter-is-case-sensitive"] = true;
400 }
401
402 function convertFilterAddRules(rules, filter, action, withResourceTypes,
403 exceptionDomains, contentType)
404 {
405 if (!contentType)
406 contentType = filter.contentType;
407
408 // If WebSocket or WebRTC are given along with other options but not
409 // including all three of WebSocket, WebRTC, and at least one HTTP raw type,
410 // we must generate multiple rules. For example, for the filter
411 // "foo$websocket,image", we must generate one rule with "^wss?://" and "raw"
412 // and another rule with "^https?://" and "image". If we merge the two, we
413 // end up blocking requests of all HTTP raw types (e.g. XMLHttpRequest)
414 // inadvertently.
415 if ((contentType & typeMap.WEBSOCKET && contentType != typeMap.WEBSOCKET &&
416 !(contentType & typeMap.WEBRTC &&
417 contentType & rawRequestTypes & httpRequestTypes)) ||
418 (contentType & typeMap.WEBRTC && contentType != typeMap.WEBRTC &&
419 !(contentType & typeMap.WEBSOCKET &&
420 contentType & rawRequestTypes & httpRequestTypes)))
421 {
422 if (contentType & typeMap.WEBSOCKET)
423 {
424 convertFilterAddRules(rules, filter, action, withResourceTypes,
425 exceptionDomains, typeMap.WEBSOCKET);
426 }
427
428 if (contentType & typeMap.WEBRTC)
429 {
430 convertFilterAddRules(rules, filter, action, withResourceTypes,
431 exceptionDomains, typeMap.WEBRTC);
432 }
433
434 contentType &= ~(typeMap.WEBSOCKET | typeMap.WEBRTC);
435
436 if (!contentType)
437 return;
438 }
439
440 let urlSchemes = getURLSchemes(contentType);
441 let parsed = parseFilterRegexpSource(filter.regexpSource, urlSchemes[0]);
238 442
239 // For the special case of $document whitelisting filters with just a domain 443 // For the special case of $document whitelisting filters with just a domain
240 // we can generate an equivalent blocking rule exception using if-domain. 444 // we can generate an equivalent blocking rule exception using if-domain.
241 if (filter instanceof filterClasses.WhitelistFilter && 445 if (filter instanceof filterClasses.WhitelistFilter &&
242 filter.contentType & typeMap.DOCUMENT && 446 contentType & typeMap.DOCUMENT &&
243 parsed.justHostname) 447 parsed.justHostname)
244 { 448 {
245 rules.push({ 449 rules.push({
246 trigger: { 450 trigger: {
247 "url-filter": ".*", 451 "url-filter": ".*",
248 "if-domain": addDomainPrefix([parsed.hostname]) 452 "if-domain": ["*" + parsed.hostname]
249 }, 453 },
250 action: {type: "ignore-previous-rules"} 454 action: {type: "ignore-previous-rules"}
251 }); 455 });
252 // If the filter contains other supported options we'll need to generate 456 // If the filter contains other supported options we'll need to generate
253 // further rules for it, but if not we can simply return now. 457 // further rules for it, but if not we can simply return now.
254 if (!(filter.contentType & whitelistableRequestTypes)) 458 if (!(contentType & whitelistableRequestTypes))
255 return; 459 return;
256 } 460 }
257 461
258 let trigger = {"url-filter": parsed.regexp}; 462 let trigger = {"url-filter": parsed.regexp};
259 463
260 // Limit rules to HTTP(S) URLs 464 // If the URL filter begins with one of the URL schemes for this content
261 if (!/^(\^|http)/i.test(trigger["url-filter"])) 465 // type, we generate additional rules for all the URL scheme patterns;
262 trigger["url-filter"] = "^https?://.*" + trigger["url-filter"]; 466 // otherwise, if the start of the URL filter literally matches the first URL
467 // scheme pattern, we just generate additional rules for the remaining URL
468 // scheme patterns.
469 //
470 // For example, "stun:foo$webrtc" will give us "stun:foo", then we add a "^"
471 // in front of this and generate two additional rules for
472 // "^stuns?:.*stun:foo" and "^turns?:.*stun:foo". On the other hand,
473 // "||foo$webrtc" will give us "^stuns?:([^/]+\\.)?foo", so we just generate
474 // "^turns?:([^/]+\\.)?foo" in addition.
475 //
476 // Note that the filter can be already anchored to the beginning
477 // (e.g. "|stun:foo$webrtc"), in which case we do not generate any additional
478 // rules.
479 let needAltRules = trigger["url-filter"][0] != "^" ||
480 trigger["url-filter"].startsWith("^" + urlSchemes[0]);
481
482 if (trigger["url-filter"][0] != "^")
483 {
484 if (!urlSchemes.some(scheme => new RegExp("^" + scheme)
485 .test(trigger["url-filter"])))
486 {
487 trigger["url-filter"] = urlSchemes[0] + ".*" + trigger["url-filter"];
488 }
489
490 trigger["url-filter"] = "^" + trigger["url-filter"];
491 }
263 492
264 // For rules containing only a hostname we know that we're matching against 493 // For rules containing only a hostname we know that we're matching against
265 // a lowercase string unless the matchCase option was passed. 494 // a lowercase string unless the matchCase option was passed.
266 if (parsed.canSafelyMatchAsLowercase && !filter.matchCase) 495 if (parsed.canSafelyMatchAsLowercase && !filter.matchCase)
267 trigger["url-filter"] = trigger["url-filter"].toLowerCase(); 496 trigger["url-filter"] = trigger["url-filter"].toLowerCase();
268 497
269 if (parsed.canSafelyMatchAsLowercase || filter.matchCase) 498 if (parsed.canSafelyMatchAsLowercase || filter.matchCase)
270 trigger["url-filter-is-case-sensitive"] = true; 499 trigger["url-filter-is-case-sensitive"] = true;
271 500
272 let included = []; 501 let included = [];
273 let excluded = []; 502 let excluded = [];
274 503
275 parseDomains(filter.domains, included, excluded); 504 parseDomains(filter.domains, included, excluded);
276 505
506 if (exceptionDomains)
507 excluded = excluded.concat(exceptionDomains);
508
277 if (withResourceTypes) 509 if (withResourceTypes)
278 { 510 {
279 trigger["resource-type"] = getResourceTypes(filter); 511 let resourceTypes = getResourceTypes(contentType);
280 512
281 if (trigger["resource-type"].length == 0) 513 // Content blocker rules can't differentiate between sub-document requests
514 // (iframes) and top-level document requests. To avoid too many false
515 // positives, we prevent rules with no hostname part from blocking document
516 // requests.
517 //
518 // Once Safari 11 becomes our minimum supported version, we could change
519 // our approach here to use the new "unless-top-url" property instead.
520 if (filter instanceof filterClasses.BlockingFilter && !parsed.hostname)
521 resourceTypes = resourceTypes.filter(type => type != "document");
522
523 if (resourceTypes.length == 0)
282 return; 524 return;
525
526 trigger["resource-type"] = resourceTypes;
283 } 527 }
284 528
285 if (filter.thirdParty != null) 529 if (filter.thirdParty != null)
286 trigger["load-type"] = [filter.thirdParty ? "third-party" : "first-party"]; 530 trigger["load-type"] = [filter.thirdParty ? "third-party" : "first-party"];
287 531
532 let addTopLevelException = false;
533
288 if (included.length > 0) 534 if (included.length > 0)
289 trigger["if-domain"] = addDomainPrefix(included); 535 {
536 trigger["if-domain"] = [];
537
538 for (let name of included)
539 {
540 // If this is a blocking filter or an element hiding filter, add the
541 // subdomain wildcard only if no subdomains have been excluded.
542 let notSubdomains = null;
543 if ((filter instanceof filterClasses.BlockingFilter ||
544 filter instanceof filterClasses.ElemHideFilter) &&
545 (notSubdomains = findSubdomainsInList(name, excluded)).length > 0)
546 {
547 trigger["if-domain"].push(name);
548
549 // Add the "www" prefix but only if it hasn't been excluded.
550 if (!notSubdomains.includes("www"))
551 trigger["if-domain"].push("www." + name);
552 }
553 else
554 {
555 trigger["if-domain"].push("*" + name);
556 }
557 }
558 }
290 else if (excluded.length > 0) 559 else if (excluded.length > 0)
291 trigger["unless-domain"] = addDomainPrefix(excluded); 560 {
561 trigger["unless-domain"] = excluded.map(name => "*" + name);
562 }
563 else if (filter instanceof filterClasses.BlockingFilter &&
564 filter.contentType & typeMap.SUBDOCUMENT && parsed.hostname)
565 {
566 // Rules with a hostname part are still allowed to block document requests,
567 // but we add an exception for top-level documents.
568 //
569 // Note that we can only do this if there's no "unless-domain" property for
570 // now. This also only works in Safari 11 onwards, while older versions
571 // simply ignore this property. Once Safari 11 becomes our minimum
572 // supported version, we can merge "unless-domain" into "unless-top-url".
573 addTopLevelException = true;
574 excludeTopURLFromTrigger(trigger);
575 }
292 576
293 rules.push({trigger: trigger, action: {type: action}}); 577 rules.push({trigger: trigger, action: {type: action}});
294 } 578
295 579 if (needAltRules)
296 function hasNonASCI(obj) 580 {
297 { 581 // Generate additional rules for any alternative URL schemes.
298 if (typeof obj == "string") 582 for (let altRule of makeRuleCopies(trigger, {type: action}, urlSchemes))
299 { 583 {
300 if (/[^\x00-\x7F]/.test(obj)) 584 if (addTopLevelException)
301 return true; 585 excludeTopURLFromTrigger(altRule.trigger);
302 } 586
303 587 rules.push(altRule);
304 if (typeof obj == "object") 588 }
305 { 589 }
306 if (obj instanceof Array)
307 for (let item of obj)
308 if (hasNonASCI(item))
309 return true;
310
311 let names = Object.getOwnPropertyNames(obj);
312 for (let name of names)
313 if (hasNonASCI(obj[name]))
314 return true;
315 }
316
317 return false;
318 } 590 }
319 591
320 function convertIDSelectorsToAttributeSelectors(selector) 592 function convertIDSelectorsToAttributeSelectors(selector)
321 { 593 {
322 // First we figure out where all the IDs are 594 // First we figure out where all the IDs are
323 let sep = ""; 595 let sep = "";
324 let start = null; 596 let start = null;
325 let positions = []; 597 let positions = [];
326 for (let i = 0; i < selector.length; i++) 598 for (let i = 0; i < selector.length; i++)
327 { 599 {
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
361 { 633 {
362 newSelector.push(selector.substring(i, pos.start)); 634 newSelector.push(selector.substring(i, pos.start));
363 newSelector.push('[id=', selector.substring(pos.start + 1, pos.end), ']'); 635 newSelector.push('[id=', selector.substring(pos.start + 1, pos.end), ']');
364 i = pos.end; 636 i = pos.end;
365 } 637 }
366 newSelector.push(selector.substring(i)); 638 newSelector.push(selector.substring(i));
367 639
368 return newSelector.join(""); 640 return newSelector.join("");
369 } 641 }
370 642
371 function closeMatch(s, t, {singleCharacterOnly = false} = {}) 643 function addCSSRules(rules, selectors, domain, exceptionDomains)
372 { 644 {
373 // This function returns an edit operation, one of "substitute", "delete", 645 let unlessDomain = exceptionDomains.size > 0 ? [] : null;
374 // and "insert", along with an index in the source string where the edit must 646
375 // occur in order to arrive at the target string. If the strings are not a 647 exceptionDomains.forEach(name =>
376 // close match, it returns null. 648 {
377 649 // For domain-specific filters, include the exception domains only if
378 // If singleCharacterOnly is false, deletions or insertions of a contiguous 650 // they're subdomains of the given domain.
379 // range of characters from one string into the other, at the same index, are 651 if (!domain || name.substr(-domain.length - 1) == "." + domain)
380 // treated as a single edit. For example, "internal" and "international" are 652 unlessDomain.push("*" + name);
381 // considered to be one edit apart, inserting the substring "tiona" from the 653 });
382 // latter into the former. 654
383 655 while (selectors.length)
384 // A few things to note: 656 {
385 // 657 let selector = selectors.splice(0, selectorLimit).join(", ");
386 // 1) This function does not care about how the input strings are treated 658
387 // by the caller. It only treats them as raw strings. For example, the 659 // As of Safari 9.0 element IDs are matched as lowercase. We work around
388 // caller may treat them as regular expressions, where "[ab]" and "[bc]" 660 // this by converting to the attribute format [id="elementID"]
389 // could be considered to have an edit distance of 1, since the order 661 selector = convertIDSelectorsToAttributeSelectors(selector);
390 // within the brackets does not matter. This function will still return 662
391 // null for this set of inputs since they are two edits apart. 663 let rule = {
392 // 664 trigger: {"url-filter": matchDomain(domain),
393 // 2) To be friendly to calling code that might be passing in regular 665 "url-filter-is-case-sensitive": true},
394 // expressions anyway, this function will simply return null if it 666 action: {type: "css-display-none",
395 // encounters a special character (e.g. "\", "?", "+", "*", etc.) in the 667 selector: selector}
396 // delta. For example, given "Hello" and "Hello, how are you?", it will 668 };
397 // return null instead of "{type: 'insert', index: 5, endIndex: 19}". 669
398 // 670 if (unlessDomain)
399 // 3) The calling code within this file does indeed pass in regular 671 rule.trigger["unless-domain"] = unlessDomain;
400 // expressions (the strict subset of JavaScript regular expressions 672
401 // supported by WebKit for content blockers), making the important 673 rules.push(rule);
402 // assumption that the parts where two such regular expressions may 674 }
403 // differ can always be treated as normal strings. 675 }
404 // 676
405 // For example, "^https?://.*/ads" and "^https?://.*/adv" differ only in 677 /**
406 // the last character, therefore the regular expressions can safely be 678 * Check if two strings are a close match
407 // merged into "^https?://.*/ad[sv]". If, for example, the characters in 679 *
408 // the delta were to appear within square brackets originally in the 680 * This function returns an edit operation, one of "substitute", "delete", and
409 // input strings (e.g. "^https?://.*/ad[sx]" and "^https?://.*/ad[vx]"), 681 * "insert", along with an index in the source string where the edit must occur
410 // the calling code would have to do extra work to merge the two regular 682 * in order to arrive at the target string. If the strings are not a close
411 // expressions correctly. The calling code within this file assumes that 683 * match, it returns null.
412 // this is never the case. 684 *
413 685 * Two strings are considered to be a close match if they are one edit
686 * operation apart.
687 *
688 * Deletions or insertions of a contiguous range of characters from one string
689 * into the other, at the same index, are treated as a single edit. For
690 * example, "internal" and "international" are considered to be one edit apart
691 * and therefore a close match.
692 *
693 * A few things to note:
694 *
695 * 1) This function does not care about the format of the input strings. For
696 * example, the caller may pass in regular expressions, where "[ab]" and
697 * "[bc]" could be considered to be a close match, since the order within the
698 * brackets doesn't matter. This function will still return null for this set
699 * of inputs since they are two edits apart.
700 *
701 * 2) To be friendly to calling code that might be passing in regular
702 * expressions, this function will simply return null if it encounters a
703 * special character (e.g. "\", "?", "+", etc.) in the delta. For example,
704 * given "Hello" and "Hello, how are you?", it will return null.
705 *
706 * 3) If the caller does indeed pass in regular expressions, it must make the
707 * important assumption that the parts where two such regular expressions may
708 * differ can always be treated as normal strings. For example,
709 * "^https?://example.com/ads" and "^https?://example.com/adv" differ only in
710 * the last character, therefore the regular expressions can safely be merged
711 * into "^https?://example.com/ad[sv]".
712 *
713 * @param {string} s The source string
714 * @param {string} t The target string
715 *
716 * @returns {object} An object describing the single edit operation that must
717 * occur in the source string in order to arrive at the
718 * target string
719 */
720 function closeMatch(s, t)
721 {
414 let diff = s.length - t.length; 722 let diff = s.length - t.length;
415
416 // If the string lengths differ by more than one character, we cannot arrive
417 // at target from source in a single edit operation.
418 if (singleCharacterOnly && (diff < -1 || diff > 1))
419 return null;
420 723
421 // If target is longer than source, swap them for the purpose of our 724 // If target is longer than source, swap them for the purpose of our
422 // calculation. 725 // calculation.
423 if (diff < 0) 726 if (diff < 0)
424 { 727 {
425 let tmp = s; 728 let tmp = s;
426 s = t; 729 s = t;
427 t = tmp; 730 t = tmp;
428 } 731 }
429 732
430 let edit = null; 733 let edit = null;
431 734
432 // If the string lengths differ by only one character at most, use the simple 735 let i = 0;
433 // algorithm to find a single character edit. 736 let j = 0;
434 if (diff == 0 || diff == 1 || diff == -1) 737
435 { 738 // Start from the beginning and keep going until we hit a character that
436 for (let i = 0, j = 0; i < s.length; i++) 739 // doesn't match.
437 { 740 for (; i < s.length; i++)
438 if (s[i] == t[j]) 741 {
439 { 742 if (s[i] != t[i])
440 j++; 743 break;
744 }
745
746 // Now do exactly the same from the end, but also stop if we reach the
747 // position where we terminated the previous loop.
748 for (; j < t.length; j++)
749 {
750 if (t.length - j == i || s[s.length - j - 1] != t[t.length - j - 1])
751 break;
752 }
753
754 if (diff == 0)
755 {
756 // If the strings are equal in length and the delta isn't exactly one
757 // character, it's not a close match.
758 if (t.length - j - i != 1)
759 return null;
760 }
761 else if (i != t.length - j)
762 {
763 // For strings of unequal length, if we haven't found a match for every
764 // single character in the shorter string counting from both the beginning
765 // and the end, it's not a close match.
766 return null;
767 }
768
769 for (let k = i; k < s.length - j; k++)
770 {
771 // If the delta contains any special characters, it's not a close match.
772 if (s[k] == "." || s[k] == "+" || s[k] == "$" || s[k] == "?" ||
773 s[k] == "{" || s[k] == "}" || s[k] == "(" || s[k] == ")" ||
774 s[k] == "[" || s[k] == "]" || s[k] == "\\")
775 return null;
776 }
777
778 if (diff == 0)
779 {
780 edit = {type: "substitute", index: i};
781 }
782 else if (diff > 0)
783 {
784 edit = {type: "delete", index: i};
785
786 if (diff > 1)
787 edit.endIndex = s.length - j;
788 }
789 else
790 {
791 edit = {type: "insert", index: i};
792
793 if (diff < -1)
794 edit.endIndex = s.length - j;
795 }
796
797 return edit;
798 }
799
800 function eliminateRedundantRulesByURLFilter(rulesInfo, exhaustive)
801 {
802 const heuristicRange = 1000;
803
804 let ol = rulesInfo.length;
805
806 // Throw out obviously redundant rules.
807 return async(rulesInfo, (ruleInfo, index) => () =>
808 {
809 // If this rule is already marked as redundant, don't bother comparing it
810 // with other rules.
811 if (rulesInfo[index].redundant)
812 return;
813
814 let limit = exhaustive ? rulesInfo.length :
815 Math.min(index + heuristicRange, rulesInfo.length);
816
817 for (let i = index, j = i + 1; j < limit; j++)
818 {
819 if (rulesInfo[j].redundant)
820 continue;
821
822 let source = rulesInfo[i].rule.trigger["url-filter"];
823 let target = rulesInfo[j].rule.trigger["url-filter"];
824
825 if (source.length >= target.length)
826 {
827 // If one URL filter is a substring of the other starting at the
828 // beginning, the other one is clearly redundant.
829 if (source.substring(0, target.length) == target)
830 {
831 rulesInfo[i].redundant = true;
832 break;
833 }
441 } 834 }
442 else if (edit) 835 else if (target.substring(0, source.length) == source)
443 { 836 {
444 // Since we want one and only one edit operation, we must bail here. 837 rulesInfo[j].redundant = true;
445 return null;
446 } 838 }
447 else if ((s[i] == "." || s[i] == "+" || s[i] == "$" || s[i] == "?" || 839 }
448 s[i] == "{" || s[i] == "}" || s[i] == "(" || s[i] == ")" || 840 })
449 s[i] == "[" || s[i] == "]" || s[i] == "\\") || 841 .then(() => rulesInfo.filter(ruleInfo => !ruleInfo.redundant));
450 (t[j] == "." || t[j] == "+" || t[j] == "$" || t[j] == "?" || 842 }
451 t[j] == "{" || t[j] == "}" || t[j] == "(" || t[j] == ")" || 843
452 t[j] == "[" || t[j] == "]" || t[j] == "\\")) 844 function findMatchesForRuleByURLFilter(rulesInfo, index, exhaustive)
453 { 845 {
454 // We don't deal with special characters for now. 846 // Closely matching rules are likely to be within a certain range. We only
455 return null; 847 // look for matches within this range by default. If we increase this value,
456 } 848 // it can give us more matches and a smaller resulting rule set, but possibly
457 else if (diff == 0) 849 // at a significant performance cost.
458 { 850 //
459 // If both strings are equal in length, this is a substitution. 851 // If the exhaustive option is true, we simply ignore this value and look for
460 edit = {type: "substitute", index: i}; 852 // matches throughout the rule set.
461 j++; 853 const heuristicRange = 1000;
462 } 854
463 else if (diff > 0) 855 let limit = exhaustive ? rulesInfo.length :
464 { 856 Math.min(index + heuristicRange, rulesInfo.length);
465 // If the source string is longer, this is a deletion. 857
466 edit = {type: "delete", index: i}; 858 for (let i = index, j = i + 1; j < limit; j++)
859 {
860 let source = rulesInfo[i].rule.trigger["url-filter"];
861 let target = rulesInfo[j].rule.trigger["url-filter"];
862
863 let edit = closeMatch(source, target);
864
865 if (edit)
866 {
867 let urlFilter, ruleInfo, match = {edit};
868
869 if (edit.type == "insert")
870 {
871 // Convert the insertion into a deletion and stick it on the target
872 // rule instead. We can only group deletions and substitutions;
873 // therefore insertions must be treated as deletions on the target
874 // rule.
875 urlFilter = target;
876 ruleInfo = rulesInfo[j];
877 match.index = i;
878 edit.type = "delete";
467 } 879 }
468 else 880 else
469 { 881 {
470 edit = {type: "insert", index: i}; 882 urlFilter = source;
883 ruleInfo = rulesInfo[i];
884 match.index = j;
471 } 885 }
472 } 886
473 } 887 // If the edit has an end index, it represents a multiple character
474 else if (!singleCharacterOnly) 888 // edit.
475 { 889 let multiEdit = !!edit.endIndex;
476 // Try another algorithm to find a multiple character deletion or 890
477 // insertion. 891 if (multiEdit)
478 892 {
479 let i = 0, j = 0; 893 // We only care about a single multiple character edit because the
480 894 // number of characters for such a match doesn't matter, we can
481 for (; i < s.length; i++) 895 // only merge with one other rule.
482 { 896 if (!ruleInfo.multiEditMatch)
483 if (s[i] != t[i]) 897 ruleInfo.multiEditMatch = match;
484 break; 898 }
485 } 899 else
486 900 {
487 for (; j < t.length; j++) 901 // For single character edits, multiple rules can be merged into
488 { 902 // one. e.g. "ad", "ads", and "adv" can be merged into "ad[sv]?".
489 if (t.length - j == i || 903 if (!ruleInfo.matches)
490 s[s.length - j - 1] != t[t.length - j - 1]) 904 ruleInfo.matches = new Array(urlFilter.length);
491 break; 905
492 } 906 // Matches at a particular index. For example, for a source string
493 907 // "ads", both target strings "ad" (deletion) and "adv"
494 if (i != t.length - j) 908 // (substitution) match at index 2, hence they are grouped together
495 return null; 909 // to possibly be merged later into "ad[sv]?".
496 910 let matchesForIndex = ruleInfo.matches[edit.index];
497 for (let k = i; k < s.length - j; k++) 911
498 { 912 if (matchesForIndex)
499 // If there are any special characters in the delta, bail.
500 if (s[k] == "." || s[k] == "+" || s[k] == "$" || s[k] == "?" ||
501 s[k] == "{" || s[k] == "}" || s[k] == "(" || s[k] == ")" ||
502 s[k] == "[" || s[k] == "]" || s[k] == "\\")
503 return null;
504 }
505
506 if (diff > 0)
507 {
508 edit = {type: "delete", index: i, endIndex: s.length - j};
509 }
510 else
511 {
512 edit = {type: "insert", index: i, endIndex: s.length - j};
513 }
514 }
515
516 return edit;
517 }
518
519 function mergeCloselyMatchingRules(rules,
520 {advanced = false, exhaustive = false} = {})
521 {
522 // Closely matching rules are likely to be within a certain range. We only
523 // look for matches within this range. If we increase this value, it can give
524 // us more matches and a smaller resulting rule set, but possibly at a
525 // significant performance cost.
526 const heuristicRange = 100;
527
528 let rulesInfo = new Array(rules.length);
529
530 rules.forEach((rule, index) =>
531 {
532 rulesInfo[index] = {rule};
533
534 if (rule.action.type == "ignore-previous-rules")
535 {
536 rulesInfo[index].skip = true;
537 }
538 else
539 {
540 // Save a hash of the rule but without the URL filter. We use this for
541 // comparison later.
542 let copy = {
543 trigger: Object.assign({}, rule.trigger),
544 action: Object.assign({}, rule.action)
545 };
546
547 delete copy.trigger["url-filter"];
548
549 rulesInfo[index].ruleHash = crypto.createHash("sha1")
550 .update(JSON.stringify(copy))
551 .digest("hex")
552 .substring(0, 8);
553 }
554 });
555
556 for (let i = 0; i < rules.length; i++)
557 {
558 if (rulesInfo[i].skip)
559 continue;
560
561 let limit = exhaustive ? rules.length :
562 Math.min(i + heuristicRange, rules.length);
563
564 for (let j = i + 1; j < limit; j++)
565 {
566 if (rulesInfo[j].skip)
567 continue;
568
569 // Check if the rules are identical except for the URL filter.
570 if (rulesInfo[i].ruleHash == rulesInfo[j].ruleHash)
571 {
572 let source = rules[i].trigger["url-filter"];
573 let target = rules[j].trigger["url-filter"];
574
575 let edit = closeMatch(source, target, {singleCharacterOnly: !advanced});
576
577 if (edit)
578 { 913 {
579 let urlFilter, ruleInfo, match = {edit}; 914 matchesForIndex.push(match);
580
581 if (edit.type == "insert")
582 {
583 // Convert the insertion into a deletion and stick it on the target
584 // rule instead. We can only group deletions and substitutions;
585 // therefore insertions must be treated as deletions on the target
586 // rule.
587 urlFilter = target;
588 ruleInfo = rulesInfo[j];
589 match.index = i;
590 edit.type = "delete";
591 }
592 else
593 {
594 urlFilter = source;
595 ruleInfo = rulesInfo[i];
596 match.index = j;
597 }
598
599 // If the edit has an end index, it represents a multiple character
600 // edit.
601 let multiEdit = !!edit.endIndex;
602
603 if (multiEdit)
604 {
605 // We only care about a single multiple character edit because the
606 // number of characters for such a match doesn't matter, we can
607 // only merge with one other rule.
608 if (!ruleInfo.multiEditMatch)
609 ruleInfo.multiEditMatch = match;
610 }
611 else
612 {
613 // For single character edits, multiple rules can be merged into
614 // one. e.g. "ad", "ads", and "adv" can be merged into "ad[sv]?".
615 if (!ruleInfo.matches)
616 ruleInfo.matches = new Array(urlFilter.length + 1);
617
618 // Matches at a particular index. For example, for a source string
619 // "ads", both target strings "ad" (deletion) and "adv"
620 // (substitution) match at index 2, hence they are grouped together
621 // to possibly be merged later into "ad[sv]?".
622 let matchesForIndex = ruleInfo.matches[edit.index];
623
624 if (matchesForIndex)
625 {
626 matchesForIndex.push(match);
627 }
628 else
629 {
630 matchesForIndex = [match];
631 ruleInfo.matches[edit.index] = matchesForIndex;
632 }
633
634 // Keep track of the best set of matches. We later sort by this to
635 // get best results.
636 if (!ruleInfo.bestMatches ||
637 matchesForIndex.length > ruleInfo.bestMatches.length)
638 ruleInfo.bestMatches = matchesForIndex;
639 }
640 } 915 }
916 else
917 {
918 matchesForIndex = [match];
919 ruleInfo.matches[edit.index] = matchesForIndex;
920 }
921
922 // Keep track of the best set of matches. We later sort by this to
923 // get best results.
924 if (!ruleInfo.bestMatches ||
925 matchesForIndex.length > ruleInfo.bestMatches.length)
926 ruleInfo.bestMatches = matchesForIndex;
641 } 927 }
642 } 928 }
643 } 929 }
644 930 }
931
932 function mergeCandidateRulesByURLFilter(rulesInfo)
933 {
645 // Filter out rules that have no matches at all. 934 // Filter out rules that have no matches at all.
646 let candidateRulesInfo = rulesInfo.filter(ruleInfo => 935 let candidateRulesInfo = rulesInfo.filter(ruleInfo =>
647 { 936 {
648 return ruleInfo.bestMatches || ruleInfo.multiEditMatch 937 return ruleInfo.bestMatches || ruleInfo.multiEditMatch
649 }); 938 });
650 939
651 // For best results, we have to sort the candidates by the largest set of 940 // For best results, we have to sort the candidates by the largest set of
652 // matches. 941 // matches.
653 // 942 //
654 // For example, we want "ads", "bds", "adv", "bdv", "adx", and "bdx" to 943 // For example, we want "ads", "bds", "adv", "bdv", "adx", and "bdx" to
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
703 if (best.length > 0) 992 if (best.length > 0)
704 { 993 {
705 let urlFilter = rule.trigger["url-filter"]; 994 let urlFilter = rule.trigger["url-filter"];
706 995
707 let editIndex = best[0].edit.index; 996 let editIndex = best[0].edit.index;
708 997
709 if (!multiEdit) 998 if (!multiEdit)
710 { 999 {
711 // Merge all the matching rules into this one. 1000 // Merge all the matching rules into this one.
712 1001
713 let characters = []; 1002 let characters = [urlFilter[editIndex]];
714 let quantifier = ""; 1003 let quantifier = "";
715 1004
716 for (let match of best) 1005 for (let match of best)
717 { 1006 {
718 if (match.edit.type == "delete") 1007 if (match.edit.type == "delete")
719 { 1008 {
720 quantifier = "?"; 1009 quantifier = "?";
721 } 1010 }
722 else 1011 else
723 { 1012 {
724 let character = rules[match.index].trigger["url-filter"][editIndex]; 1013 let character = rulesInfo[match.index].rule
725 characters.push(character); 1014 .trigger["url-filter"][editIndex];
1015
1016 // Insert any hyphen at the beginning so it gets interpreted as a
1017 // literal hyphen.
1018 if (character == "-")
1019 characters.unshift(character);
1020 else
1021 characters.push(character);
726 } 1022 }
727 1023
728 // Mark the target rule as merged so other rules don't try to merge 1024 // Mark the target rule as merged so other rules don't try to merge
729 // it again. 1025 // it again.
730 rulesInfo[match.index].merged = true; 1026 rulesInfo[match.index].merged = true;
731 } 1027 }
732 1028
733 urlFilter = urlFilter.substring(0, editIndex + 1) + quantifier + 1029 urlFilter = urlFilter.substring(0, editIndex + 1) + quantifier +
734 urlFilter.substring(editIndex + 1); 1030 urlFilter.substring(editIndex + 1);
735 if (characters.length > 0) 1031 if (characters.length > 1)
736 { 1032 {
737 urlFilter = urlFilter.substring(0, editIndex) + "[" + 1033 urlFilter = urlFilter.substring(0, editIndex) + "[" +
738 urlFilter[editIndex] + characters.join("") + "]" + 1034 characters.join("") + "]" +
739 urlFilter.substring(editIndex + 1); 1035 urlFilter.substring(editIndex + 1);
740 } 1036 }
741 } 1037 }
742 else 1038 else
743 { 1039 {
744 let editEndIndex = best[0].edit.endIndex; 1040 let editEndIndex = best[0].edit.endIndex;
745 1041
746 // Mark the target rule as merged so other rules don't try to merge it 1042 // Mark the target rule as merged so other rules don't try to merge it
747 // again. 1043 // again.
748 rulesInfo[best[0].index].merged = true; 1044 rulesInfo[best[0].index].merged = true;
749 1045
750 urlFilter = urlFilter.substring(0, editIndex) + "(" + 1046 urlFilter = urlFilter.substring(0, editIndex) + "(" +
751 urlFilter.substring(editIndex, editEndIndex) + ")?" + 1047 urlFilter.substring(editIndex, editEndIndex) + ")?" +
752 urlFilter.substring(editEndIndex); 1048 urlFilter.substring(editEndIndex);
753 } 1049 }
754 1050
755 rule.trigger["url-filter"] = urlFilter; 1051 rule.trigger["url-filter"] = urlFilter;
756 1052
757 // Mark this rule as one that has had other rules merged into it. 1053 // Mark this rule as one that has had other rules merged into it.
758 ruleInfo.mergedInto = true; 1054 ruleInfo.mergedInto = true;
759 } 1055 }
760 } 1056 }
761 1057 }
762 // Filter out rules that have been merged into other rules. 1058
763 return rulesInfo.filter(ruleInfo => !ruleInfo.merged) 1059 function mergeRulesByURLFilter(rulesInfo, exhaustive)
764 .map(ruleInfo => ruleInfo.rule); 1060 {
1061 return async(rulesInfo, (ruleInfo, index) => () =>
1062 findMatchesForRuleByURLFilter(rulesInfo, index, exhaustive)
1063 )
1064 .then(() => mergeCandidateRulesByURLFilter(rulesInfo));
1065 }
1066
1067 function mergeRulesByArrayProperty(rulesInfo, propertyType, property)
1068 {
1069 if (rulesInfo.length <= 1)
1070 return;
1071
1072 let valueSet = new Set(rulesInfo[0].rule[propertyType][property]);
1073
1074 for (let i = 1; i < rulesInfo.length; i++)
1075 {
1076 for (let value of rulesInfo[i].rule[propertyType][property] || [])
1077 valueSet.add(value);
1078
1079 rulesInfo[i].merged = true;
1080 }
1081
1082 if (valueSet.size > 0)
1083 rulesInfo[0].rule[propertyType][property] = Array.from(valueSet);
1084
1085 rulesInfo[0].mergedInto = true;
1086 }
1087
1088 function groupRulesByMergeableProperty(rulesInfo, propertyType, property)
1089 {
1090 let mergeableRulesInfoByGroup = new Map();
1091
1092 for (let ruleInfo of rulesInfo)
1093 {
1094 let copy = {
1095 trigger: Object.assign({}, ruleInfo.rule.trigger),
1096 action: Object.assign({}, ruleInfo.rule.action)
1097 };
1098
1099 delete copy[propertyType][property];
1100
1101 let groupKey = JSON.stringify(copy);
1102
1103 let mergeableRulesInfo = mergeableRulesInfoByGroup.get(groupKey);
1104
1105 if (mergeableRulesInfo)
1106 mergeableRulesInfo.push(ruleInfo);
1107 else
1108 mergeableRulesInfoByGroup.set(groupKey, [ruleInfo]);
1109 }
1110
1111 return mergeableRulesInfoByGroup;
1112 }
1113
1114 function mergeRules(rules, exhaustive)
1115 {
1116 let rulesInfo = rules.map(rule => ({rule}));
1117
1118 let arrayPropertiesToMergeBy = ["resource-type", "if-domain"];
1119
1120 return async(() =>
1121 {
1122 let map = groupRulesByMergeableProperty(rulesInfo, "trigger", "url-filter");
1123 return async(map.values(), mergeableRulesInfo => () =>
1124 eliminateRedundantRulesByURLFilter(mergeableRulesInfo, exhaustive)
1125 .then(rulesInfo => mergeRulesByURLFilter(rulesInfo, exhaustive))
1126 )
1127 .then(() =>
1128 {
1129 // Filter out rules that are redundant or have been merged into other
1130 // rules.
1131 rulesInfo = rulesInfo.filter(ruleInfo => !ruleInfo.redundant &&
1132 !ruleInfo.merged);
1133 });
1134 })
1135 .then(() => async(arrayPropertiesToMergeBy, arrayProperty => () =>
1136 {
1137 let map = groupRulesByMergeableProperty(rulesInfo, "trigger",
1138 arrayProperty);
1139 return async(map.values(), mergeableRulesInfo => () =>
1140 mergeRulesByArrayProperty(mergeableRulesInfo, "trigger", arrayProperty)
1141 )
1142 .then(() =>
1143 {
1144 rulesInfo = rulesInfo.filter(ruleInfo => !ruleInfo.merged);
1145 });
1146 }))
1147 .then(() => rulesInfo.map(ruleInfo => ruleInfo.rule));
765 } 1148 }
766 1149
767 let ContentBlockerList = 1150 let ContentBlockerList =
768 /** 1151 /**
769 * Create a new Adblock Plus filter to content blocker list converter 1152 * Create a new Adblock Plus filter to content blocker list converter
770 * 1153 *
1154 * @param {object} options Options for content blocker list generation
1155 *
771 * @constructor 1156 * @constructor
772 */ 1157 */
773 exports.ContentBlockerList = function () 1158 exports.ContentBlockerList = function (options)
774 { 1159 {
1160 const defaultOptions = {
1161 merge: "auto"
1162 };
1163
1164 this.options = Object.assign({}, defaultOptions, options);
1165
775 this.requestFilters = []; 1166 this.requestFilters = [];
776 this.requestExceptions = []; 1167 this.requestExceptions = [];
777 this.elemhideFilters = []; 1168 this.elemhideFilters = [];
778 this.elemhideExceptions = []; 1169 this.elemhideExceptions = [];
1170 this.genericblockExceptions = [];
1171 this.generichideExceptions = [];
779 this.elemhideSelectorExceptions = new Map(); 1172 this.elemhideSelectorExceptions = new Map();
780 }; 1173 };
781 1174
782 /** 1175 /**
783 * Add Adblock Plus filter to be converted 1176 * Add Adblock Plus filter to be converted
784 * 1177 *
785 * @param {Filter} filter Filter to convert 1178 * @param {Filter} filter Filter to convert
786 */ 1179 */
787 ContentBlockerList.prototype.addFilter = function(filter) 1180 ContentBlockerList.prototype.addFilter = function(filter)
788 { 1181 {
789 if (filter.sitekeys) 1182 if (filter.sitekeys)
790 return; 1183 return;
791 if (filter instanceof filterClasses.RegExpFilter && 1184 if (filter instanceof filterClasses.RegExpFilter &&
792 filter.regexpSource == null) 1185 filter.regexpSource == null)
793 return; 1186 return;
794 1187
795 if (filter instanceof filterClasses.BlockingFilter) 1188 if (filter instanceof filterClasses.BlockingFilter)
796 this.requestFilters.push(filter); 1189 this.requestFilters.push(filter);
797 1190
798 if (filter instanceof filterClasses.WhitelistFilter) 1191 if (filter instanceof filterClasses.WhitelistFilter)
799 { 1192 {
800 if (filter.contentType & (typeMap.DOCUMENT | whitelistableRequestTypes)) 1193 if (filter.contentType & (typeMap.DOCUMENT | whitelistableRequestTypes))
801 this.requestExceptions.push(filter); 1194 this.requestExceptions.push(filter);
802 1195
803 if (filter.contentType & typeMap.ELEMHIDE) 1196 if (filter.contentType & typeMap.GENERICBLOCK)
804 this.elemhideExceptions.push(filter); 1197 this.genericblockExceptions.push(filter);
1198
1199 if (filter.contentType & typeMap.ELEMHIDE)
1200 this.elemhideExceptions.push(filter);
1201 else if (filter.contentType & typeMap.GENERICHIDE)
1202 this.generichideExceptions.push(filter);
805 } 1203 }
806 1204
807 if (filter instanceof filterClasses.ElemHideFilter) 1205 if (filter instanceof filterClasses.ElemHideFilter)
808 this.elemhideFilters.push(filter); 1206 this.elemhideFilters.push(filter);
809 1207
810 if (filter instanceof filterClasses.ElemHideException) 1208 if (filter instanceof filterClasses.ElemHideException)
811 { 1209 {
812 let domains = this.elemhideSelectorExceptions[filter.selector]; 1210 let domains = this.elemhideSelectorExceptions[filter.selector];
813 if (!domains) 1211 if (!domains)
814 domains = this.elemhideSelectorExceptions[filter.selector] = []; 1212 domains = this.elemhideSelectorExceptions[filter.selector] = [];
815 1213
816 parseDomains(filter.domains, domains, []); 1214 parseDomains(filter.domains, domains, []);
817 } 1215 }
818 }; 1216 };
819 1217
820 /** 1218 /**
821 * Generate content blocker list for all filters that were added 1219 * Generate content blocker list for all filters that were added
822 *
823 * @returns {Filter} filter Filter to convert
824 */ 1220 */
825 ContentBlockerList.prototype.generateRules = function({ 1221 ContentBlockerList.prototype.generateRules = function()
826 merge = false, 1222 {
827 fastMerge = true, 1223 let cssRules = [];
828 advancedMerge, 1224 let cssExceptionRules = [];
829 exhaustiveMerge 1225 let blockingRules = [];
830 } = {}) 1226 let blockingExceptionRules = [];
831 { 1227
832 let rules = []; 1228 let ruleGroups = [cssRules, cssExceptionRules,
833 1229 blockingRules, blockingExceptionRules];
1230
1231 let genericSelectors = [];
834 let groupedElemhideFilters = new Map(); 1232 let groupedElemhideFilters = new Map();
1233
835 for (let filter of this.elemhideFilters) 1234 for (let filter of this.elemhideFilters)
836 { 1235 {
837 let result = convertElemHideFilter(filter, this.elemhideSelectorExceptions); 1236 let result = convertElemHideFilter(filter, this.elemhideSelectorExceptions);
838 if (!result) 1237 if (!result)
839 continue; 1238 continue;
840 1239
841 if (result.matchDomains.length == 0) 1240 if (result.matchDomains.length == 0)
842 result.matchDomains = ["^https?://"]; 1241 {
843 1242 genericSelectors.push(result.selector);
844 for (let matchDomain of result.matchDomains) 1243 }
845 { 1244 else
846 let group = groupedElemhideFilters.get(matchDomain) || []; 1245 {
847 group.push(result.selector); 1246 for (let matchDomain of result.matchDomains)
848 groupedElemhideFilters.set(matchDomain, group); 1247 {
849 } 1248 let group = groupedElemhideFilters.get(matchDomain) || [];
850 } 1249 group.push(result.selector);
1250 groupedElemhideFilters.set(matchDomain, group);
1251 }
1252 }
1253 }
1254
1255 // Separate out the element hiding exceptions that have only a hostname part
1256 // from the rest. This allows us to implement a workaround for issue #5345
1257 // (WebKit bug #167423), but as a bonus it also reduces the number of
1258 // generated rules. The downside is that the exception will only apply to the
1259 // top-level document, not to iframes. We have to live with this until the
1260 // WebKit bug is fixed in all supported versions of Safari.
1261 // https://bugs.webkit.org/show_bug.cgi?id=167423
1262 //
1263 // Note that as a result of this workaround we end up with a huge rule set in
1264 // terms of the amount of memory used. This can cause Node.js to throw
1265 // "JavaScript heap out of memory". To avoid this, call Node.js with
1266 // --max_old_space_size=4096
1267 let elemhideExceptionDomains = extractFilterDomains(this.elemhideExceptions);
1268
1269 let genericSelectorExceptionDomains =
1270 extractFilterDomains(this.generichideExceptions);
1271 elemhideExceptionDomains.forEach(name =>
1272 {
1273 genericSelectorExceptionDomains.add(name);
1274 });
1275
1276 addCSSRules(cssRules, genericSelectors, null,
1277 genericSelectorExceptionDomains);
1278
1279 // Filter out whitelisted domains.
1280 elemhideExceptionDomains.forEach(domain =>
1281 groupedElemhideFilters.delete(domain));
851 1282
852 groupedElemhideFilters.forEach((selectors, matchDomain) => 1283 groupedElemhideFilters.forEach((selectors, matchDomain) =>
853 { 1284 {
854 while (selectors.length) 1285 addCSSRules(cssRules, selectors, matchDomain, elemhideExceptionDomains);
855 { 1286 });
856 let selector = selectors.splice(0, selectorLimit).join(", "); 1287
857 1288 let requestFilterExceptionDomains = [];
858 // As of Safari 9.0 element IDs are matched as lowercase. We work around 1289 for (let filter of this.genericblockExceptions)
859 // this by converting to the attribute format [id="elementID"] 1290 {
860 selector = convertIDSelectorsToAttributeSelectors(selector); 1291 let parsed = parseFilterRegexpSource(filter.regexpSource);
861 1292 if (parsed.hostname)
862 rules.push({ 1293 requestFilterExceptionDomains.push(parsed.hostname);
863 trigger: {"url-filter": matchDomain, 1294 }
864 "url-filter-is-case-sensitive": true}, 1295
865 action: {type: "css-display-none", 1296 for (let filter of this.requestFilters)
866 selector: selector} 1297 {
1298 convertFilterAddRules(blockingRules, filter, "block", true,
1299 requestFilterExceptionDomains);
1300 }
1301
1302 for (let filter of this.requestExceptions)
1303 {
1304 convertFilterAddRules(blockingExceptionRules, filter,
1305 "ignore-previous-rules", true);
1306 }
1307
1308 return async(ruleGroups, (group, index) => () =>
1309 {
1310 let next = () =>
1311 {
1312 if (index == ruleGroups.length - 1)
1313 return ruleGroups.reduce((all, rules) => all.concat(rules), []);
1314 };
1315
1316 if (this.options.merge == "all" ||
1317 (this.options.merge == "auto" &&
1318 ruleGroups.reduce((n, group) => n + group.length, 0) > 50000))
1319 {
1320 return mergeRules(ruleGroups[index], this.options.merge == "all")
1321 .then(rules =>
1322 {
1323 ruleGroups[index] = rules;
1324 return next();
867 }); 1325 });
868 } 1326 }
1327
1328 return next();
869 }); 1329 });
870
871 for (let filter of this.elemhideExceptions)
872 convertFilterAddRules(rules, filter, "ignore-previous-rules", false);
873 for (let filter of this.requestFilters)
874 convertFilterAddRules(rules, filter, "block", true);
875 for (let filter of this.requestExceptions)
876 convertFilterAddRules(rules, filter, "ignore-previous-rules", true);
877
878 rules = rules.filter(rule => !hasNonASCI(rule));
879
880 if (merge)
881 {
882 // If the more specific options are specified (e.g. "advanced" and
883 // "exhaustive"), they override the more general options (e.g. "fast").
884 let mergeOptions = {
885 advanced: advancedMerge || (!fastMerge && advancedMerge != false),
886 exhaustive: exhaustiveMerge || (!fastMerge && exhaustiveMerge != false)
887 };
888
889 rules = mergeCloselyMatchingRules(rules, mergeOptions);
890 }
891
892 return rules;
893 }; 1330 };
LEFTRIGHT

Powered by Google App Engine
This is Rietveld