Pull request: 3846 filter lists ids

Merge in DNS/adguard-home from 3846-list-ids to master

Closes #3846.

Squashed commit of the following:

commit 02a12fc27bc5d3cf1a17fd43c6f05e2c389bd71d
Author: Ildar Kamalov <ik@adguard.com>
Date:   Fri Nov 26 16:58:13 2021 +0300

    client: fix name

commit 6220570e6e9c968f0d3fa9d02c12099ce66aaaad
Author: Ildar Kamalov <ik@adguard.com>
Date:   Fri Nov 26 16:46:54 2021 +0300

    client: handle special filter ids

commit dcdeb2d7f4500aab6ce5ffe642bdaacf291f5951
Author: Eugene Burkov <E.Burkov@AdGuard.COM>
Date:   Fri Nov 26 15:52:06 2021 +0300

    all: mv constants, imp config

commit 8ceb4a2b351e595929d8b2af564c6d0267afa230
Author: Eugene Burkov <E.Burkov@AdGuard.COM>
Date:   Fri Nov 26 15:04:36 2021 +0300

    all: fix custom list id, log changes

commit acb8b456e7f41a556da34cf10647eecee058beec
Author: Eugene Burkov <E.Burkov@AdGuard.COM>
Date:   Thu Nov 25 20:04:37 2021 +0300

    all: rm global ctx, add const flt ids
This commit is contained in:
Eugene Burkov 2021-11-26 18:25:43 +03:00
parent 936a7057fd
commit 4f50519b9f
17 changed files with 211 additions and 146 deletions

View File

@ -625,5 +625,7 @@
"filter_allowlist": "WARNING: This action also will exclude the rule \"{{disallowed_rule}}\" from the list of allowed clients.", "filter_allowlist": "WARNING: This action also will exclude the rule \"{{disallowed_rule}}\" from the list of allowed clients.",
"last_rule_in_allowlist": "Cannot disallow this client because excluding the rule \"{{disallowed_rule}}\" will DISABLE \"Allowed clients\" list.", "last_rule_in_allowlist": "Cannot disallow this client because excluding the rule \"{{disallowed_rule}}\" will DISABLE \"Allowed clients\" list.",
"experimental": "Experimental", "experimental": "Experimental",
"use_saved_key": "Use the previously saved key" "use_saved_key": "Use the previously saved key",
"parental_control": "Parental control",
"safe_browsing": "Safe browsing"
} }

View File

@ -528,8 +528,14 @@ export const DETAILED_DATE_FORMAT_OPTIONS = {
month: 'long', month: 'long',
}; };
export const CUSTOM_FILTERING_RULES_ID = 0; export const SPECIAL_FILTER_ID = {
export const SYSTEM_HOSTS_FILTER_ID = -1; CUSTOM_FILTERING_RULES: 0,
SYSTEM_HOSTS: -1,
BLOCKED_SERVICES: -2,
PARENTAL: -3,
SAFE_BROWSING: -4,
SAFE_SEARCH: -5,
};
export const BLOCK_ACTIONS = { export const BLOCK_ACTIONS = {
BLOCK: 'block', BLOCK: 'block',

View File

@ -13,7 +13,6 @@ import {
ADDRESS_TYPES, ADDRESS_TYPES,
CHECK_TIMEOUT, CHECK_TIMEOUT,
COMMENT_LINE_DEFAULT_TOKEN, COMMENT_LINE_DEFAULT_TOKEN,
CUSTOM_FILTERING_RULES_ID,
DEFAULT_DATE_FORMAT_OPTIONS, DEFAULT_DATE_FORMAT_OPTIONS,
DEFAULT_LANGUAGE, DEFAULT_LANGUAGE,
DEFAULT_TIME_FORMAT, DEFAULT_TIME_FORMAT,
@ -26,7 +25,7 @@ import {
STANDARD_DNS_PORT, STANDARD_DNS_PORT,
STANDARD_HTTPS_PORT, STANDARD_HTTPS_PORT,
STANDARD_WEB_PORT, STANDARD_WEB_PORT,
SYSTEM_HOSTS_FILTER_ID, SPECIAL_FILTER_ID,
} from './constants'; } from './constants';
/** /**
@ -774,6 +773,30 @@ export const sortIp = (a, b) => {
} }
}; };
/**
* @param {number} filterId
* @returns {string}
*/
export const getSpecialFilterName = (filterId) => {
switch (filterId) {
case SPECIAL_FILTER_ID.CUSTOM_FILTERING_RULES:
return i18n.t('custom_filter_rules');
case SPECIAL_FILTER_ID.SYSTEM_HOSTS:
return i18n.t('system_host_files');
case SPECIAL_FILTER_ID.BLOCKED_SERVICES:
return i18n.t('blocked_services');
case SPECIAL_FILTER_ID.PARENTAL:
return i18n.t('parental_control');
case SPECIAL_FILTER_ID.SAFE_BROWSING:
return i18n.t('safe_browsing');
case SPECIAL_FILTER_ID.SAFE_SEARCH:
return i18n.t('safe_search');
default:
return i18n.t('unknown_filter', { filterId });
}
};
/** /**
* @param {array} filters * @param {array} filters
* @param {array} whitelistFilters * @param {array} whitelistFilters
@ -785,15 +808,11 @@ export const getFilterName = (
filters, filters,
whitelistFilters, whitelistFilters,
filterId, filterId,
customFilterTranslationKey = 'custom_filter_rules',
resolveFilterName = (filter) => (filter ? filter.name : i18n.t('unknown_filter', { filterId })), resolveFilterName = (filter) => (filter ? filter.name : i18n.t('unknown_filter', { filterId })),
) => { ) => {
if (filterId === CUSTOM_FILTERING_RULES_ID) { const specialFilterIds = Object.values(SPECIAL_FILTER_ID);
return i18n.t(customFilterTranslationKey); if (specialFilterIds.includes(filterId)) {
} return getSpecialFilterName(filterId);
if (filterId === SYSTEM_HOSTS_FILTER_ID) {
return i18n.t('system_host_files');
} }
const matchIdPredicate = (filter) => filter.id === filterId; const matchIdPredicate = (filter) => filter.id === filterId;

View File

@ -102,6 +102,9 @@ type HostsContainer struct {
// embedded to implement MatchRequest and Translate for *HostsContainer. // embedded to implement MatchRequest and Translate for *HostsContainer.
requestMatcher requestMatcher
// listID is the identifier for the list of generated rules.
listID int
// done is the channel to sign closing the container. // done is the channel to sign closing the container.
done chan struct{} done chan struct{}
@ -124,9 +127,11 @@ type HostsContainer struct {
const ErrNoHostsPaths errors.Error = "no valid paths to hosts files provided" const ErrNoHostsPaths errors.Error = "no valid paths to hosts files provided"
// NewHostsContainer creates a container of hosts, that watches the paths with // NewHostsContainer creates a container of hosts, that watches the paths with
// w. paths shouldn't be empty and each of paths should locate either a file or // w. listID is used as an identifier of the underlying rules list. paths
// a directory in fsys. fsys and w must be non-nil. // shouldn't be empty and each of paths should locate either a file or a
// directory in fsys. fsys and w must be non-nil.
func NewHostsContainer( func NewHostsContainer(
listID int,
fsys fs.FS, fsys fs.FS,
w aghos.FSWatcher, w aghos.FSWatcher,
paths ...string, paths ...string,
@ -149,6 +154,7 @@ func NewHostsContainer(
requestMatcher: requestMatcher{ requestMatcher: requestMatcher{
stateLock: &sync.RWMutex{}, stateLock: &sync.RWMutex{},
}, },
listID: listID,
done: make(chan struct{}, 1), done: make(chan struct{}, 1),
updates: make(chan *netutil.IPMap, 1), updates: make(chan *netutil.IPMap, 1),
fsys: fsys, fsys: fsys,
@ -507,10 +513,9 @@ func (hp *hostsParser) sendUpd(ch chan *netutil.IPMap) {
} }
// newStrg creates a new rules storage from parsed data. // newStrg creates a new rules storage from parsed data.
func (hp *hostsParser) newStrg() (s *filterlist.RuleStorage, err error) { func (hp *hostsParser) newStrg(id int) (s *filterlist.RuleStorage, err error) {
return filterlist.NewRuleStorage([]filterlist.RuleList{&filterlist.StringRuleList{ return filterlist.NewRuleStorage([]filterlist.RuleList{&filterlist.StringRuleList{
// TODO(e.burkov): Make configurable. ID: id,
ID: -1,
RulesText: hp.rulesBuilder.String(), RulesText: hp.rulesBuilder.String(),
IgnoreCosmetic: true, IgnoreCosmetic: true,
}}) }})
@ -538,7 +543,7 @@ func (hc *HostsContainer) refresh() (err error) {
hc.last = hp.table.ShallowClone() hc.last = hp.table.ShallowClone()
var rulesStrg *filterlist.RuleStorage var rulesStrg *filterlist.RuleStorage
if rulesStrg, err = hp.newStrg(); err != nil { if rulesStrg, err = hp.newStrg(hc.listID); err != nil {
return fmt.Errorf("initializing rules storage: %w", err) return fmt.Errorf("initializing rules storage: %w", err)
} }

View File

@ -73,7 +73,7 @@ func TestNewHostsContainer(t *testing.T) {
return eventsCh return eventsCh
} }
hc, err := NewHostsContainer(testFS, &aghtest.FSWatcher{ hc, err := NewHostsContainer(0, testFS, &aghtest.FSWatcher{
OnEvents: onEvents, OnEvents: onEvents,
OnAdd: onAdd, OnAdd: onAdd,
OnClose: func() (err error) { panic("not implemented") }, OnClose: func() (err error) { panic("not implemented") },
@ -98,7 +98,7 @@ func TestNewHostsContainer(t *testing.T) {
t.Run("nil_fs", func(t *testing.T) { t.Run("nil_fs", func(t *testing.T) {
require.Panics(t, func() { require.Panics(t, func() {
_, _ = NewHostsContainer(nil, &aghtest.FSWatcher{ _, _ = NewHostsContainer(0, nil, &aghtest.FSWatcher{
// Those shouldn't panic. // Those shouldn't panic.
OnEvents: func() (e <-chan struct{}) { return nil }, OnEvents: func() (e <-chan struct{}) { return nil },
OnAdd: func(name string) (err error) { return nil }, OnAdd: func(name string) (err error) { return nil },
@ -109,7 +109,7 @@ func TestNewHostsContainer(t *testing.T) {
t.Run("nil_watcher", func(t *testing.T) { t.Run("nil_watcher", func(t *testing.T) {
require.Panics(t, func() { require.Panics(t, func() {
_, _ = NewHostsContainer(testFS, nil, p) _, _ = NewHostsContainer(0, testFS, nil, p)
}) })
}) })
@ -122,7 +122,7 @@ func TestNewHostsContainer(t *testing.T) {
OnClose: func() (err error) { panic("not implemented") }, OnClose: func() (err error) { panic("not implemented") },
} }
hc, err := NewHostsContainer(testFS, errWatcher, p) hc, err := NewHostsContainer(0, testFS, errWatcher, p)
require.ErrorIs(t, err, errOnAdd) require.ErrorIs(t, err, errOnAdd)
assert.Nil(t, hc) assert.Nil(t, hc)
@ -164,7 +164,7 @@ func TestHostsContainer_Refresh(t *testing.T) {
OnClose: func() (err error) { panic("not implemented") }, OnClose: func() (err error) { panic("not implemented") },
} }
hc, err := NewHostsContainer(testFS, w, dirname) hc, err := NewHostsContainer(0, testFS, w, dirname)
require.NoError(t, err) require.NoError(t, err)
checkRefresh := func(t *testing.T, wantHosts *stringutil.Set) { checkRefresh := func(t *testing.T, wantHosts *stringutil.Set) {
@ -291,6 +291,8 @@ func TestHostsContainer_PathsToPatterns(t *testing.T) {
} }
func TestHostsContainer(t *testing.T) { func TestHostsContainer(t *testing.T) {
const listID = 1234
testdata := os.DirFS("./testdata") testdata := os.DirFS("./testdata")
nRewrites := func(t *testing.T, res *urlfilter.DNSResult, n int) (rws []*rules.DNSRewrite) { nRewrites := func(t *testing.T, res *urlfilter.DNSResult, n int) (rws []*rules.DNSRewrite) {
@ -300,6 +302,8 @@ func TestHostsContainer(t *testing.T) {
assert.Len(t, rewrites, n) assert.Len(t, rewrites, n)
for _, rewrite := range rewrites { for _, rewrite := range rewrites {
require.Equal(t, listID, rewrite.FilterListID)
rw := rewrite.DNSRewrite rw := rewrite.DNSRewrite
require.NotNil(t, rw) require.NotNil(t, rw)
@ -382,7 +386,7 @@ func TestHostsContainer(t *testing.T) {
OnClose: func() (err error) { panic("not implemented") }, OnClose: func() (err error) { panic("not implemented") },
} }
hc, err := NewHostsContainer(testdata, &stubWatcher, "etc_hosts") hc, err := NewHostsContainer(listID, testdata, &stubWatcher, "etc_hosts")
require.NoError(t, err) require.NoError(t, err)
for _, tc := range testCases { for _, tc := range testCases {

View File

@ -1078,7 +1078,7 @@ func TestPTRResponseFromHosts(t *testing.T) {
} }
var eventsCalledCounter uint32 var eventsCalledCounter uint32
hc, err := aghnet.NewHostsContainer(testFS, &aghtest.FSWatcher{ hc, err := aghnet.NewHostsContainer(0, testFS, &aghtest.FSWatcher{
OnEvents: func() (e <-chan struct{}) { OnEvents: func() (e <-chan struct{}) {
assert.Equal(t, uint32(1), atomic.AddUint32(&eventsCalledCounter, 1)) assert.Equal(t, uint32(1), atomic.AddUint32(&eventsCalledCounter, 1))

View File

@ -239,7 +239,7 @@ func initBlockedServices() {
for _, s := range serviceRulesArray { for _, s := range serviceRulesArray {
netRules := []*rules.NetworkRule{} netRules := []*rules.NetworkRule{}
for _, text := range s.rules { for _, text := range s.rules {
rule, err := rules.NewNetworkRule(text, 0) rule, err := rules.NewNetworkRule(text, BlockedSvcsListID)
if err != nil { if err != nil {
log.Error("rules.NewNetworkRule: %s rule: %s", err, text) log.Error("rules.NewNetworkRule: %s rule: %s", err, text)
continue continue

View File

@ -49,7 +49,7 @@ func TestDNSFilter_CheckHostRules_dnsrewrite(t *testing.T) {
|1.2.3.5.in-addr.arpa^$dnsrewrite=NOERROR;PTR;new-ptr-with-dot. |1.2.3.5.in-addr.arpa^$dnsrewrite=NOERROR;PTR;new-ptr-with-dot.
` `
f := newForTest(nil, []Filter{{ID: 0, Data: []byte(text)}}) f := newForTest(t, nil, []Filter{{ID: 0, Data: []byte(text)}})
setts := &Settings{ setts := &Settings{
FilteringEnabled: true, FilteringEnabled: true,
} }

View File

@ -4,6 +4,7 @@ package filtering
import ( import (
"context" "context"
"fmt" "fmt"
"io/fs"
"net" "net"
"net/http" "net/http"
"os" "os"
@ -16,6 +17,7 @@ import (
"github.com/AdguardTeam/AdGuardHome/internal/aghnet" "github.com/AdguardTeam/AdGuardHome/internal/aghnet"
"github.com/AdguardTeam/dnsproxy/upstream" "github.com/AdguardTeam/dnsproxy/upstream"
"github.com/AdguardTeam/golibs/cache" "github.com/AdguardTeam/golibs/cache"
"github.com/AdguardTeam/golibs/errors"
"github.com/AdguardTeam/golibs/log" "github.com/AdguardTeam/golibs/log"
"github.com/AdguardTeam/golibs/stringutil" "github.com/AdguardTeam/golibs/stringutil"
"github.com/AdguardTeam/urlfilter" "github.com/AdguardTeam/urlfilter"
@ -24,6 +26,18 @@ import (
"github.com/miekg/dns" "github.com/miekg/dns"
) )
// The IDs of built-in filter lists.
//
// Keep in sync with client/src/helpers/contants.js.
const (
CustomListID = -iota
SysHostsListID
BlockedSvcsListID
ParentalListID
SafeBrowsingListID
SafeSearchListID
)
// ServiceEntry - blocked service array element // ServiceEntry - blocked service array element
type ServiceEntry struct { type ServiceEntry struct {
Name string Name string
@ -125,6 +139,10 @@ type DNSFilter struct {
parentalUpstream upstream.Upstream parentalUpstream upstream.Upstream
safeBrowsingUpstream upstream.Upstream safeBrowsingUpstream upstream.Upstream
safebrowsingCache cache.Cache
parentalCache cache.Cache
safeSearchCache cache.Cache
Config // for direct access by library users, even a = assignment Config // for direct access by library users, even a = assignment
// confLock protects Config. // confLock protects Config.
confLock sync.RWMutex confLock sync.RWMutex
@ -340,14 +358,6 @@ func (d *DNSFilter) reset() {
} }
} }
type dnsFilterContext struct {
safebrowsingCache cache.Cache
parentalCache cache.Cache
safeSearchCache cache.Cache
}
var gctx dnsFilterContext
// ResultRule contains information about applied rules. // ResultRule contains information about applied rules.
type ResultRule struct { type ResultRule struct {
// Text is the text of the rule. // Text is the text of the rule.
@ -598,71 +608,70 @@ func matchBlockedServicesRules(
// Adding rule and matching against the rules // Adding rule and matching against the rules
// //
// fileExists returns true if file exists. func newRuleStorage(filters []Filter) (rs *filterlist.RuleStorage, err error) {
func fileExists(fn string) bool { lists := make([]filterlist.RuleList, 0, len(filters))
_, err := os.Stat(fn)
return err == nil
}
func createFilteringEngine(filters []Filter) (*filterlist.RuleStorage, *urlfilter.DNSEngine, error) {
listArray := []filterlist.RuleList{}
for _, f := range filters { for _, f := range filters {
var list filterlist.RuleList switch id := int(f.ID); {
case len(f.Data) != 0:
if f.ID == 0 { lists = append(lists, &filterlist.StringRuleList{
list = &filterlist.StringRuleList{ ID: id,
ID: 0,
RulesText: string(f.Data), RulesText: string(f.Data),
IgnoreCosmetic: true, IgnoreCosmetic: true,
} })
} else if !fileExists(f.FilePath) { case f.FilePath == "":
list = &filterlist.StringRuleList{ continue
ID: int(f.ID), case runtime.GOOS == "windows":
IgnoreCosmetic: true, // On Windows we don't pass a file to urlfilter because it's
} // difficult to update this file while it's being used.
} else if runtime.GOOS == "windows" { var data []byte
// On Windows we don't pass a file to urlfilter because data, err = os.ReadFile(f.FilePath)
// it's difficult to update this file while it's being if errors.Is(err, fs.ErrNotExist) {
// used. continue
data, err := os.ReadFile(f.FilePath) } else if err != nil {
if err != nil { return nil, fmt.Errorf("reading filter content: %w", err)
return nil, nil, fmt.Errorf("reading filter content: %w", err)
} }
list = &filterlist.StringRuleList{ lists = append(lists, &filterlist.StringRuleList{
ID: int(f.ID), ID: id,
RulesText: string(data), RulesText: string(data),
IgnoreCosmetic: true, IgnoreCosmetic: true,
})
default:
var list *filterlist.FileRuleList
list, err = filterlist.NewFileRuleList(id, f.FilePath, true)
if errors.Is(err, fs.ErrNotExist) {
continue
} else if err != nil {
return nil, fmt.Errorf("creating file rule list with %q: %w", f.FilePath, err)
} }
} else {
var err error lists = append(lists, list)
list, err = filterlist.NewFileRuleList(int(f.ID), f.FilePath, true)
if err != nil {
return nil, nil, fmt.Errorf("filterlist.NewFileRuleList(): %s: %w", f.FilePath, err)
}
} }
listArray = append(listArray, list)
} }
rulesStorage, err := filterlist.NewRuleStorage(listArray) rs, err = filterlist.NewRuleStorage(lists)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("filterlist.NewRuleStorage(): %w", err) return nil, fmt.Errorf("creating rule stroage: %w", err)
} }
filteringEngine := urlfilter.NewDNSEngine(rulesStorage)
return rulesStorage, filteringEngine, nil return rs, nil
} }
// Initialize urlfilter objects. // Initialize urlfilter objects.
func (d *DNSFilter) initFiltering(allowFilters, blockFilters []Filter) error { func (d *DNSFilter) initFiltering(allowFilters, blockFilters []Filter) error {
rulesStorage, filteringEngine, err := createFilteringEngine(blockFilters) rulesStorage, err := newRuleStorage(blockFilters)
if err != nil { if err != nil {
return err return err
} }
rulesStorageAllow, filteringEngineAllow, err := createFilteringEngine(allowFilters)
rulesStorageAllow, err := newRuleStorage(allowFilters)
if err != nil { if err != nil {
return err return err
} }
filteringEngine := urlfilter.NewDNSEngine(rulesStorage)
filteringEngineAllow := urlfilter.NewDNSEngine(rulesStorageAllow)
func() { func() {
d.engineLock.Lock() d.engineLock.Lock()
defer d.engineLock.Unlock() defer d.engineLock.Unlock()
@ -855,43 +864,37 @@ func makeResult(matchedRules []rules.Rule, reason Reason) (res Result) {
} }
} }
// InitModule manually initializes blocked services map. // InitModule manually initializes blocked services map using blockedSvcListID
// as list ID for the rules.
func InitModule() { func InitModule() {
initBlockedServices() initBlockedServices()
} }
// New creates properly initialized DNS Filter that is ready to be used. // New creates properly initialized DNS Filter that is ready to be used.
func New(c *Config, blockFilters []Filter) *DNSFilter { func New(c *Config, blockFilters []Filter) (d *DNSFilter) {
var resolver Resolver = net.DefaultResolver d = &DNSFilter{
resolver: net.DefaultResolver,
}
if c != nil { if c != nil {
cacheConf := cache.Config{
d.safebrowsingCache = cache.New(cache.Config{
EnableLRU: true, EnableLRU: true,
} MaxSize: c.SafeBrowsingCacheSize,
})
if gctx.safebrowsingCache == nil { d.safeSearchCache = cache.New(cache.Config{
cacheConf.MaxSize = c.SafeBrowsingCacheSize EnableLRU: true,
gctx.safebrowsingCache = cache.New(cacheConf) MaxSize: c.SafeSearchCacheSize,
} })
d.parentalCache = cache.New(cache.Config{
if gctx.safeSearchCache == nil { EnableLRU: true,
cacheConf.MaxSize = c.SafeSearchCacheSize MaxSize: c.ParentalCacheSize,
gctx.safeSearchCache = cache.New(cacheConf) })
}
if gctx.parentalCache == nil {
cacheConf.MaxSize = c.ParentalCacheSize
gctx.parentalCache = cache.New(cacheConf)
}
if c.CustomResolver != nil { if c.CustomResolver != nil {
resolver = c.CustomResolver d.resolver = c.CustomResolver
} }
} }
d := &DNSFilter{
resolver: resolver,
}
d.hostCheckers = []hostChecker{{ d.hostCheckers = []hostChecker{{
check: d.matchSysHosts, check: d.matchSysHosts,
name: "hosts container", name: "hosts container",

View File

@ -27,11 +27,11 @@ var setts = Settings{
// Helpers. // Helpers.
func purgeCaches() { func purgeCaches(d *DNSFilter) {
for _, c := range []cache.Cache{ for _, c := range []cache.Cache{
gctx.safebrowsingCache, d.safebrowsingCache,
gctx.parentalCache, d.parentalCache,
gctx.safeSearchCache, d.safeSearchCache,
} { } {
if c != nil { if c != nil {
c.Clear() c.Clear()
@ -39,7 +39,7 @@ func purgeCaches() {
} }
} }
func newForTest(c *Config, filters []Filter) *DNSFilter { func newForTest(t testing.TB, c *Config, filters []Filter) *DNSFilter {
setts = Settings{ setts = Settings{
ProtectionEnabled: true, ProtectionEnabled: true,
FilteringEnabled: true, FilteringEnabled: true,
@ -54,7 +54,8 @@ func newForTest(c *Config, filters []Filter) *DNSFilter {
setts.ParentalEnabled = c.ParentalEnabled setts.ParentalEnabled = c.ParentalEnabled
} }
d := New(c, filters) d := New(c, filters)
purgeCaches() purgeCaches(d)
return d return d
} }
@ -105,7 +106,7 @@ func TestEtcHostsMatching(t *testing.T) {
filters := []Filter{{ filters := []Filter{{
ID: 0, Data: []byte(text), ID: 0, Data: []byte(text),
}} }}
d := newForTest(nil, filters) d := newForTest(t, nil, filters)
t.Cleanup(d.Close) t.Cleanup(d.Close)
d.checkMatchIP(t, "google.com", addr, dns.TypeA) d.checkMatchIP(t, "google.com", addr, dns.TypeA)
@ -170,7 +171,7 @@ func TestSafeBrowsing(t *testing.T) {
aghtest.ReplaceLogWriter(t, logOutput) aghtest.ReplaceLogWriter(t, logOutput)
aghtest.ReplaceLogLevel(t, log.DEBUG) aghtest.ReplaceLogLevel(t, log.DEBUG)
d := newForTest(&Config{SafeBrowsingEnabled: true}, nil) d := newForTest(t, &Config{SafeBrowsingEnabled: true}, nil)
t.Cleanup(d.Close) t.Cleanup(d.Close)
const matching = "wmconvirus.narod.ru" const matching = "wmconvirus.narod.ru"
d.SetSafeBrowsingUpstream(&aghtest.TestBlockUpstream{ d.SetSafeBrowsingUpstream(&aghtest.TestBlockUpstream{
@ -193,7 +194,7 @@ func TestSafeBrowsing(t *testing.T) {
} }
func TestParallelSB(t *testing.T) { func TestParallelSB(t *testing.T) {
d := newForTest(&Config{SafeBrowsingEnabled: true}, nil) d := newForTest(t, &Config{SafeBrowsingEnabled: true}, nil)
t.Cleanup(d.Close) t.Cleanup(d.Close)
const matching = "wmconvirus.narod.ru" const matching = "wmconvirus.narod.ru"
d.SetSafeBrowsingUpstream(&aghtest.TestBlockUpstream{ d.SetSafeBrowsingUpstream(&aghtest.TestBlockUpstream{
@ -217,7 +218,7 @@ func TestParallelSB(t *testing.T) {
// Safe Search. // Safe Search.
func TestSafeSearch(t *testing.T) { func TestSafeSearch(t *testing.T) {
d := newForTest(&Config{SafeSearchEnabled: true}, nil) d := newForTest(t, &Config{SafeSearchEnabled: true}, nil)
t.Cleanup(d.Close) t.Cleanup(d.Close)
val, ok := d.SafeSearchDomain("www.google.com") val, ok := d.SafeSearchDomain("www.google.com")
require.True(t, ok) require.True(t, ok)
@ -226,7 +227,9 @@ func TestSafeSearch(t *testing.T) {
} }
func TestCheckHostSafeSearchYandex(t *testing.T) { func TestCheckHostSafeSearchYandex(t *testing.T) {
d := newForTest(&Config{SafeSearchEnabled: true}, nil) d := newForTest(t, &Config{
SafeSearchEnabled: true,
}, nil)
t.Cleanup(d.Close) t.Cleanup(d.Close)
yandexIP := net.IPv4(213, 180, 193, 56) yandexIP := net.IPv4(213, 180, 193, 56)
@ -249,13 +252,14 @@ func TestCheckHostSafeSearchYandex(t *testing.T) {
require.Len(t, res.Rules, 1) require.Len(t, res.Rules, 1)
assert.Equal(t, yandexIP, res.Rules[0].IP) assert.Equal(t, yandexIP, res.Rules[0].IP)
assert.EqualValues(t, SafeSearchListID, res.Rules[0].FilterListID)
}) })
} }
} }
func TestCheckHostSafeSearchGoogle(t *testing.T) { func TestCheckHostSafeSearchGoogle(t *testing.T) {
resolver := &aghtest.TestResolver{} resolver := &aghtest.TestResolver{}
d := newForTest(&Config{ d := newForTest(t, &Config{
SafeSearchEnabled: true, SafeSearchEnabled: true,
CustomResolver: resolver, CustomResolver: resolver,
}, nil) }, nil)
@ -282,12 +286,13 @@ func TestCheckHostSafeSearchGoogle(t *testing.T) {
require.Len(t, res.Rules, 1) require.Len(t, res.Rules, 1)
assert.Equal(t, ip, res.Rules[0].IP) assert.Equal(t, ip, res.Rules[0].IP)
assert.EqualValues(t, SafeSearchListID, res.Rules[0].FilterListID)
}) })
} }
} }
func TestSafeSearchCacheYandex(t *testing.T) { func TestSafeSearchCacheYandex(t *testing.T) {
d := newForTest(nil, nil) d := newForTest(t, nil, nil)
t.Cleanup(d.Close) t.Cleanup(d.Close)
const domain = "yandex.ru" const domain = "yandex.ru"
@ -301,7 +306,7 @@ func TestSafeSearchCacheYandex(t *testing.T) {
yandexIP := net.IPv4(213, 180, 193, 56) yandexIP := net.IPv4(213, 180, 193, 56)
d = newForTest(&Config{SafeSearchEnabled: true}, nil) d = newForTest(t, &Config{SafeSearchEnabled: true}, nil)
t.Cleanup(d.Close) t.Cleanup(d.Close)
res, err = d.CheckHost(domain, dns.TypeA, &setts) res, err = d.CheckHost(domain, dns.TypeA, &setts)
@ -312,7 +317,7 @@ func TestSafeSearchCacheYandex(t *testing.T) {
assert.Equal(t, res.Rules[0].IP, yandexIP) assert.Equal(t, res.Rules[0].IP, yandexIP)
// Check cache. // Check cache.
cachedValue, isFound := getCachedResult(gctx.safeSearchCache, domain) cachedValue, isFound := getCachedResult(d.safeSearchCache, domain)
require.True(t, isFound) require.True(t, isFound)
require.Len(t, cachedValue.Rules, 1) require.Len(t, cachedValue.Rules, 1)
@ -321,7 +326,7 @@ func TestSafeSearchCacheYandex(t *testing.T) {
func TestSafeSearchCacheGoogle(t *testing.T) { func TestSafeSearchCacheGoogle(t *testing.T) {
resolver := &aghtest.TestResolver{} resolver := &aghtest.TestResolver{}
d := newForTest(&Config{ d := newForTest(t, &Config{
CustomResolver: resolver, CustomResolver: resolver,
}, nil) }, nil)
t.Cleanup(d.Close) t.Cleanup(d.Close)
@ -334,7 +339,7 @@ func TestSafeSearchCacheGoogle(t *testing.T) {
require.Empty(t, res.Rules) require.Empty(t, res.Rules)
d = newForTest(&Config{SafeSearchEnabled: true}, nil) d = newForTest(t, &Config{SafeSearchEnabled: true}, nil)
t.Cleanup(d.Close) t.Cleanup(d.Close)
d.resolver = resolver d.resolver = resolver
@ -361,7 +366,7 @@ func TestSafeSearchCacheGoogle(t *testing.T) {
assert.True(t, res.Rules[0].IP.Equal(ip)) assert.True(t, res.Rules[0].IP.Equal(ip))
// Check cache. // Check cache.
cachedValue, isFound := getCachedResult(gctx.safeSearchCache, domain) cachedValue, isFound := getCachedResult(d.safeSearchCache, domain)
require.True(t, isFound) require.True(t, isFound)
require.Len(t, cachedValue.Rules, 1) require.Len(t, cachedValue.Rules, 1)
@ -375,7 +380,7 @@ func TestParentalControl(t *testing.T) {
aghtest.ReplaceLogWriter(t, logOutput) aghtest.ReplaceLogWriter(t, logOutput)
aghtest.ReplaceLogLevel(t, log.DEBUG) aghtest.ReplaceLogLevel(t, log.DEBUG)
d := newForTest(&Config{ParentalEnabled: true}, nil) d := newForTest(t, &Config{ParentalEnabled: true}, nil)
t.Cleanup(d.Close) t.Cleanup(d.Close)
const matching = "pornhub.com" const matching = "pornhub.com"
d.SetParentalUpstream(&aghtest.TestBlockUpstream{ d.SetParentalUpstream(&aghtest.TestBlockUpstream{
@ -679,7 +684,7 @@ func TestMatching(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
t.Run(fmt.Sprintf("%s-%s", tc.name, tc.host), func(t *testing.T) { t.Run(fmt.Sprintf("%s-%s", tc.name, tc.host), func(t *testing.T) {
filters := []Filter{{ID: 0, Data: []byte(tc.rules)}} filters := []Filter{{ID: 0, Data: []byte(tc.rules)}}
d := newForTest(nil, filters) d := newForTest(t, nil, filters)
t.Cleanup(d.Close) t.Cleanup(d.Close)
res, err := d.CheckHost(tc.host, tc.wantDNSType, &setts) res, err := d.CheckHost(tc.host, tc.wantDNSType, &setts)
@ -705,7 +710,7 @@ func TestWhitelist(t *testing.T) {
whiteFilters := []Filter{{ whiteFilters := []Filter{{
ID: 0, Data: []byte(whiteRules), ID: 0, Data: []byte(whiteRules),
}} }}
d := newForTest(nil, filters) d := newForTest(t, nil, filters)
err := d.SetFilters(filters, whiteFilters, false) err := d.SetFilters(filters, whiteFilters, false)
require.NoError(t, err) require.NoError(t, err)
@ -750,7 +755,7 @@ func applyClientSettings(setts *Settings) {
} }
func TestClientSettings(t *testing.T) { func TestClientSettings(t *testing.T) {
d := newForTest( d := newForTest(t,
&Config{ &Config{
ParentalEnabled: true, ParentalEnabled: true,
SafeBrowsingEnabled: false, SafeBrowsingEnabled: false,
@ -829,7 +834,7 @@ func TestClientSettings(t *testing.T) {
// Benchmarks. // Benchmarks.
func BenchmarkSafeBrowsing(b *testing.B) { func BenchmarkSafeBrowsing(b *testing.B) {
d := newForTest(&Config{SafeBrowsingEnabled: true}, nil) d := newForTest(b, &Config{SafeBrowsingEnabled: true}, nil)
b.Cleanup(d.Close) b.Cleanup(d.Close)
blocked := "wmconvirus.narod.ru" blocked := "wmconvirus.narod.ru"
d.SetSafeBrowsingUpstream(&aghtest.TestBlockUpstream{ d.SetSafeBrowsingUpstream(&aghtest.TestBlockUpstream{
@ -845,7 +850,7 @@ func BenchmarkSafeBrowsing(b *testing.B) {
} }
func BenchmarkSafeBrowsingParallel(b *testing.B) { func BenchmarkSafeBrowsingParallel(b *testing.B) {
d := newForTest(&Config{SafeBrowsingEnabled: true}, nil) d := newForTest(b, &Config{SafeBrowsingEnabled: true}, nil)
b.Cleanup(d.Close) b.Cleanup(d.Close)
blocked := "wmconvirus.narod.ru" blocked := "wmconvirus.narod.ru"
d.SetSafeBrowsingUpstream(&aghtest.TestBlockUpstream{ d.SetSafeBrowsingUpstream(&aghtest.TestBlockUpstream{
@ -863,7 +868,7 @@ func BenchmarkSafeBrowsingParallel(b *testing.B) {
} }
func BenchmarkSafeSearch(b *testing.B) { func BenchmarkSafeSearch(b *testing.B) {
d := newForTest(&Config{SafeSearchEnabled: true}, nil) d := newForTest(b, &Config{SafeSearchEnabled: true}, nil)
b.Cleanup(d.Close) b.Cleanup(d.Close)
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
val, ok := d.SafeSearchDomain("www.google.com") val, ok := d.SafeSearchDomain("www.google.com")
@ -874,7 +879,7 @@ func BenchmarkSafeSearch(b *testing.B) {
} }
func BenchmarkSafeSearchParallel(b *testing.B) { func BenchmarkSafeSearchParallel(b *testing.B) {
d := newForTest(&Config{SafeSearchEnabled: true}, nil) d := newForTest(b, &Config{SafeSearchEnabled: true}, nil)
b.Cleanup(d.Close) b.Cleanup(d.Close)
b.RunParallel(func(pb *testing.PB) { b.RunParallel(func(pb *testing.PB) {
for pb.Next() { for pb.Next() {

View File

@ -12,7 +12,7 @@ import (
// TODO(e.burkov): All the tests in this file may and should me merged together. // TODO(e.burkov): All the tests in this file may and should me merged together.
func TestRewrites(t *testing.T) { func TestRewrites(t *testing.T) {
d := newForTest(nil, nil) d := newForTest(t, nil, nil)
t.Cleanup(d.Close) t.Cleanup(d.Close)
d.Rewrites = []RewriteEntry{{ d.Rewrites = []RewriteEntry{{
@ -163,7 +163,7 @@ func TestRewrites(t *testing.T) {
} }
func TestRewritesLevels(t *testing.T) { func TestRewritesLevels(t *testing.T) {
d := newForTest(nil, nil) d := newForTest(t, nil, nil)
t.Cleanup(d.Close) t.Cleanup(d.Close)
// Exact host, wildcard L2, wildcard L3. // Exact host, wildcard L2, wildcard L3.
d.Rewrites = []RewriteEntry{{ d.Rewrites = []RewriteEntry{{
@ -209,7 +209,7 @@ func TestRewritesLevels(t *testing.T) {
} }
func TestRewritesExceptionCNAME(t *testing.T) { func TestRewritesExceptionCNAME(t *testing.T) {
d := newForTest(nil, nil) d := newForTest(t, nil, nil)
t.Cleanup(d.Close) t.Cleanup(d.Close)
// Wildcard and exception for a sub-domain. // Wildcard and exception for a sub-domain.
d.Rewrites = []RewriteEntry{{ d.Rewrites = []RewriteEntry{{
@ -257,7 +257,7 @@ func TestRewritesExceptionCNAME(t *testing.T) {
} }
func TestRewritesExceptionIP(t *testing.T) { func TestRewritesExceptionIP(t *testing.T) {
d := newForTest(nil, nil) d := newForTest(t, nil, nil)
t.Cleanup(d.Close) t.Cleanup(d.Close)
// Exception for AAAA record. // Exception for AAAA record.
d.Rewrites = []RewriteEntry{{ d.Rewrites = []RewriteEntry{{

View File

@ -318,7 +318,7 @@ func (d *DNSFilter) checkSafeBrowsing(
sctx := &sbCtx{ sctx := &sbCtx{
host: host, host: host,
svc: "SafeBrowsing", svc: "SafeBrowsing",
cache: gctx.safebrowsingCache, cache: d.safebrowsingCache,
cacheTime: d.Config.CacheTime, cacheTime: d.Config.CacheTime,
} }
@ -326,7 +326,8 @@ func (d *DNSFilter) checkSafeBrowsing(
IsFiltered: true, IsFiltered: true,
Reason: FilteredSafeBrowsing, Reason: FilteredSafeBrowsing,
Rules: []*ResultRule{{ Rules: []*ResultRule{{
Text: "adguard-malware-shavar", Text: "adguard-malware-shavar",
FilterListID: SafeBrowsingListID,
}}, }},
} }
@ -351,7 +352,7 @@ func (d *DNSFilter) checkParental(
sctx := &sbCtx{ sctx := &sbCtx{
host: host, host: host,
svc: "Parental", svc: "Parental",
cache: gctx.parentalCache, cache: d.parentalCache,
cacheTime: d.Config.CacheTime, cacheTime: d.Config.CacheTime,
} }
@ -359,7 +360,8 @@ func (d *DNSFilter) checkParental(
IsFiltered: true, IsFiltered: true,
Reason: FilteredParental, Reason: FilteredParental,
Rules: []*ResultRule{{ Rules: []*ResultRule{{
Text: "parental CATEGORY_BLACKLISTED", Text: "parental CATEGORY_BLACKLISTED",
FilterListID: ParentalListID,
}}, }},
} }

View File

@ -108,7 +108,7 @@ func TestSafeBrowsingCache(t *testing.T) {
} }
func TestSBPC_checkErrorUpstream(t *testing.T) { func TestSBPC_checkErrorUpstream(t *testing.T) {
d := newForTest(&Config{SafeBrowsingEnabled: true}, nil) d := newForTest(t, &Config{SafeBrowsingEnabled: true}, nil)
t.Cleanup(d.Close) t.Cleanup(d.Close)
ups := &aghtest.TestErrUpstream{} ups := &aghtest.TestErrUpstream{}
@ -130,7 +130,7 @@ func TestSBPC_checkErrorUpstream(t *testing.T) {
} }
func TestSBPC(t *testing.T) { func TestSBPC(t *testing.T) {
d := newForTest(&Config{SafeBrowsingEnabled: true}, nil) d := newForTest(t, &Config{SafeBrowsingEnabled: true}, nil)
t.Cleanup(d.Close) t.Cleanup(d.Close)
const hostname = "example.org" const hostname = "example.org"
@ -147,22 +147,22 @@ func TestSBPC(t *testing.T) {
name string name string
block bool block bool
}{{ }{{
testCache: gctx.safebrowsingCache, testCache: d.safebrowsingCache,
testFunc: d.checkSafeBrowsing, testFunc: d.checkSafeBrowsing,
name: "sb_no_block", name: "sb_no_block",
block: false, block: false,
}, { }, {
testCache: gctx.safebrowsingCache, testCache: d.safebrowsingCache,
testFunc: d.checkSafeBrowsing, testFunc: d.checkSafeBrowsing,
name: "sb_block", name: "sb_block",
block: true, block: true,
}, { }, {
testCache: gctx.parentalCache, testCache: d.parentalCache,
testFunc: d.checkParental, testFunc: d.checkParental,
name: "pc_no_block", name: "pc_no_block",
block: false, block: false,
}, { }, {
testCache: gctx.parentalCache, testCache: d.parentalCache,
testFunc: d.checkParental, testFunc: d.checkParental,
name: "pc_block", name: "pc_block",
block: true, block: true,
@ -217,6 +217,6 @@ func TestSBPC(t *testing.T) {
assert.Equal(t, 1, ups.RequestsCount()) assert.Equal(t, 1, ups.RequestsCount())
}) })
purgeCaches() purgeCaches(d)
} }
} }

View File

@ -84,7 +84,7 @@ func (d *DNSFilter) checkSafeSearch(
} }
// Check cache. Return cached result if it was found // Check cache. Return cached result if it was found
cachedValue, isFound := getCachedResult(gctx.safeSearchCache, host) cachedValue, isFound := getCachedResult(d.safeSearchCache, host)
if isFound { if isFound {
// atomic.AddUint64(&gctx.stats.Safesearch.CacheHits, 1) // atomic.AddUint64(&gctx.stats.Safesearch.CacheHits, 1)
log.Tracef("SafeSearch: found in cache: %s", host) log.Tracef("SafeSearch: found in cache: %s", host)
@ -99,12 +99,14 @@ func (d *DNSFilter) checkSafeSearch(
res = Result{ res = Result{
IsFiltered: true, IsFiltered: true,
Reason: FilteredSafeSearch, Reason: FilteredSafeSearch,
Rules: []*ResultRule{{}}, Rules: []*ResultRule{{
FilterListID: SafeSearchListID,
}},
} }
if ip := net.ParseIP(safeHost); ip != nil { if ip := net.ParseIP(safeHost); ip != nil {
res.Rules[0].IP = ip res.Rules[0].IP = ip
valLen := d.setCacheResult(gctx.safeSearchCache, host, res) valLen := d.setCacheResult(d.safeSearchCache, host, res)
log.Debug("SafeSearch: stored in cache: %s (%d bytes)", host, valLen) log.Debug("SafeSearch: stored in cache: %s (%d bytes)", host, valLen)
return res, nil return res, nil
@ -123,7 +125,7 @@ func (d *DNSFilter) checkSafeSearch(
res.Rules[0].IP = ip res.Rules[0].IP = ip
l := d.setCacheResult(gctx.safeSearchCache, host, res) l := d.setCacheResult(d.safeSearchCache, host, res)
log.Debug("SafeSearch: stored in cache: %s (%d bytes)", host, l) log.Debug("SafeSearch: stored in cache: %s (%d bytes)", host, l)
return res, nil return res, nil

View File

@ -711,6 +711,7 @@ func enableFilters(async bool) {
func enableFiltersLocked(async bool) { func enableFiltersLocked(async bool) {
filters := []filtering.Filter{{ filters := []filtering.Filter{{
ID: filtering.CustomListID,
Data: []byte(strings.Join(config.UserRules, "\n")), Data: []byte(strings.Join(config.UserRules, "\n")),
}} }}

View File

@ -244,6 +244,7 @@ func setupHostsContainer() (err error) {
} }
Context.etcHosts, err = aghnet.NewHostsContainer( Context.etcHosts, err = aghnet.NewHostsContainer(
filtering.SysHostsListID,
aghos.RootDirFS(), aghos.RootDirFS(),
Context.hostsWatcher, Context.hostsWatcher,
aghnet.DefaultHostsPaths()..., aghnet.DefaultHostsPaths()...,

View File

@ -4,6 +4,21 @@
## v0.107: API changes ## v0.107: API changes
### New constant values for `filter_list_id` field in `ResultRule`
* Value of `0` is now used for custom filtering rules list.
* Value of `-1` is now used for rules generated from the operating system hosts
files.
* Value of `-2` is now used for blocked services' rules.
* Value of `-3` is now used for rules generated by parental control web service.
* Value of `-4` is now used for rules generated by safe browsing web service.
* Value of `-5` is now used for rules generated by safe search web service.
### New possible value of `"name"` field in `QueryLogItemClient` ### New possible value of `"name"` field in `QueryLogItemClient`
* The value of `"name"` field in `GET /control/querylog` method is never empty: * The value of `"name"` field in `GET /control/querylog` method is never empty: