Added filterId to the querylog
Updated the openapi.yaml accordingly Some minor refactoring/renaming Fix other review comments
This commit is contained in:
parent
760e3596b6
commit
591065aa3a
10
app.go
10
app.go
|
@ -119,6 +119,8 @@ func main() {
|
|||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// override bind host/port from the console
|
||||
if bindHost != nil {
|
||||
config.BindHost = *bindHost
|
||||
}
|
||||
|
@ -156,7 +158,7 @@ func main() {
|
|||
|
||||
address := net.JoinHostPort(config.BindHost, strconv.Itoa(config.BindPort))
|
||||
|
||||
runFilterRefreshers()
|
||||
runFiltersUpdatesTimer()
|
||||
|
||||
http.Handle("/", optionalAuthHandler(http.FileServer(box)))
|
||||
registerControlHandlers()
|
||||
|
@ -309,6 +311,12 @@ func upgradeConfigSchema(oldVersion int, newVersion int) error {
|
|||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Saving it to the filters dir now
|
||||
err = filter.save()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// No more "dnsfilter.txt", filters are now loaded from config.ourDataDir/filters/
|
||||
|
|
|
@ -64,7 +64,7 @@ export const normalizeFilteringStatus = (filteringStatus) => {
|
|||
const { enabled, filters, user_rules: userRules } = filteringStatus;
|
||||
const newFilters = filters ? filters.map((filter) => {
|
||||
const {
|
||||
url, enabled, last_updated: lastUpdated = Date.now(), name = 'Default name', rules_count: rulesCount = 0,
|
||||
url, enabled, lastUpdated: lastUpdated = Date.now(), name = 'Default name', rulesCount: rulesCount = 0,
|
||||
} = filter;
|
||||
|
||||
return {
|
||||
|
|
70
config.go
70
config.go
|
@ -20,9 +20,14 @@ const SchemaVersion = 1
|
|||
// Directory where we'll store all downloaded filters contents
|
||||
const FiltersDir = "filters"
|
||||
|
||||
// Just a counter that we use for incrementing the filter ID
|
||||
var NextFilterId int
|
||||
|
||||
// configuration is loaded from YAML
|
||||
type configuration struct {
|
||||
// Config filename (can be overriden via the command line arguments)
|
||||
ourConfigFilename string
|
||||
// Basically, this is our working directory
|
||||
ourBinaryDir string
|
||||
// Directory to store data (i.e. filters contents)
|
||||
ourDataDir string
|
||||
|
@ -65,13 +70,13 @@ type coreDNSConfig struct {
|
|||
}
|
||||
|
||||
type filter struct {
|
||||
ID int `json:"ID"` // auto-assigned when filter is added
|
||||
ID int `json:"id" yaml:"id"` // auto-assigned when filter is added (see NextFilterId)
|
||||
URL string `json:"url"`
|
||||
Name string `json:"name" yaml:"name"`
|
||||
Enabled bool `json:"enabled"`
|
||||
RulesCount int `json:"rules_count" yaml:"-"`
|
||||
RulesCount int `json:"rulesCount" yaml:"-"`
|
||||
contents []byte
|
||||
LastUpdated time.Time `json:"last_updated" yaml:"-"`
|
||||
LastUpdated time.Time `json:"lastUpdated" yaml:"last_updated"`
|
||||
}
|
||||
|
||||
var defaultDNS = []string{"tls://1.1.1.1", "tls://1.0.0.1"}
|
||||
|
@ -107,7 +112,7 @@ var config = configuration{
|
|||
func getUserFilter() filter {
|
||||
|
||||
// TODO: This should be calculated when UserRules are set
|
||||
contents := []byte{}
|
||||
var contents []byte
|
||||
for _, rule := range config.UserRules {
|
||||
contents = append(contents, []byte(rule)...)
|
||||
contents = append(contents, '\n')
|
||||
|
@ -123,15 +128,16 @@ func getUserFilter() filter {
|
|||
return userFilter
|
||||
}
|
||||
|
||||
// Loads configuration from the YAML file
|
||||
func parseConfig() error {
|
||||
configfile := filepath.Join(config.ourBinaryDir, config.ourConfigFilename)
|
||||
log.Printf("Reading YAML file: %s", configfile)
|
||||
if _, err := os.Stat(configfile); os.IsNotExist(err) {
|
||||
configFile := filepath.Join(config.ourBinaryDir, config.ourConfigFilename)
|
||||
log.Printf("Reading YAML file: %s", configFile)
|
||||
if _, err := os.Stat(configFile); os.IsNotExist(err) {
|
||||
// do nothing, file doesn't exist
|
||||
log.Printf("YAML file doesn't exist, skipping: %s", configfile)
|
||||
log.Printf("YAML file doesn't exist, skipping: %s", configFile)
|
||||
return nil
|
||||
}
|
||||
yamlFile, err := ioutil.ReadFile(configfile)
|
||||
yamlFile, err := ioutil.ReadFile(configFile)
|
||||
if err != nil {
|
||||
log.Printf("Couldn't read config file: %s", err)
|
||||
return err
|
||||
|
@ -142,18 +148,42 @@ func parseConfig() error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Deduplicate filters
|
||||
{
|
||||
i := 0 // output index, used for deletion later
|
||||
urls := map[string]bool{}
|
||||
for _, filter := range config.Filters {
|
||||
if _, ok := urls[filter.URL]; !ok {
|
||||
// we didn't see it before, keep it
|
||||
urls[filter.URL] = true // remember the URL
|
||||
config.Filters[i] = filter
|
||||
i++
|
||||
}
|
||||
}
|
||||
// all entries we want to keep are at front, delete the rest
|
||||
config.Filters = config.Filters[:i]
|
||||
}
|
||||
|
||||
// Set the next filter ID to max(filter.ID) + 1
|
||||
for i := range config.Filters {
|
||||
if NextFilterId < config.Filters[i].ID {
|
||||
NextFilterId = config.Filters[i].ID + 1
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Saves configuration to the YAML file and also saves the user filter contents to a file
|
||||
func writeConfig() error {
|
||||
configfile := filepath.Join(config.ourBinaryDir, config.ourConfigFilename)
|
||||
log.Printf("Writing YAML file: %s", configfile)
|
||||
configFile := filepath.Join(config.ourBinaryDir, config.ourConfigFilename)
|
||||
log.Printf("Writing YAML file: %s", configFile)
|
||||
yamlText, err := yaml.Marshal(&config)
|
||||
if err != nil {
|
||||
log.Printf("Couldn't generate YAML file: %s", err)
|
||||
return err
|
||||
}
|
||||
err = writeFileSafe(configfile, yamlText)
|
||||
err = writeFileSafe(configFile, yamlText)
|
||||
if err != nil {
|
||||
log.Printf("Couldn't save YAML config: %s", err)
|
||||
return err
|
||||
|
@ -173,14 +203,14 @@ func writeConfig() error {
|
|||
// coredns config
|
||||
// --------------
|
||||
func writeCoreDNSConfig() error {
|
||||
corefile := filepath.Join(config.ourBinaryDir, config.CoreDNS.coreFile)
|
||||
log.Printf("Writing DNS config: %s", corefile)
|
||||
configtext, err := generateCoreDNSConfigText()
|
||||
coreFile := filepath.Join(config.ourBinaryDir, config.CoreDNS.coreFile)
|
||||
log.Printf("Writing DNS config: %s", coreFile)
|
||||
configText, err := generateCoreDNSConfigText()
|
||||
if err != nil {
|
||||
log.Printf("Couldn't generate DNS config: %s", err)
|
||||
return err
|
||||
}
|
||||
err = writeFileSafe(corefile, []byte(configtext))
|
||||
err = writeFileSafe(coreFile, []byte(configText))
|
||||
if err != nil {
|
||||
log.Printf("Couldn't save DNS config: %s", err)
|
||||
return err
|
||||
|
@ -227,7 +257,7 @@ const coreDNSConfigTemplate = `.:{{.Port}} {
|
|||
|
||||
var removeEmptyLines = regexp.MustCompile("([\t ]*\n)+")
|
||||
|
||||
// generate config text
|
||||
// generate CoreDNS config text
|
||||
func generateCoreDNSConfigText() (string, error) {
|
||||
t, err := template.New("config").Parse(coreDNSConfigTemplate)
|
||||
if err != nil {
|
||||
|
@ -264,9 +294,9 @@ func generateCoreDNSConfigText() (string, error) {
|
|||
log.Printf("Couldn't generate DNS config: %s", err)
|
||||
return "", err
|
||||
}
|
||||
configtext := configBytes.String()
|
||||
configText := configBytes.String()
|
||||
|
||||
// remove empty lines from generated config
|
||||
configtext = removeEmptyLines.ReplaceAllString(configtext, "\n")
|
||||
return configtext, nil
|
||||
configText = removeEmptyLines.ReplaceAllString(configText, "\n")
|
||||
return configText, nil
|
||||
}
|
||||
|
|
233
control.go
233
control.go
|
@ -15,14 +15,14 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
coredns_plugin "github.com/AdguardTeam/AdGuardHome/coredns_plugin"
|
||||
coreDnsPlugin "github.com/AdguardTeam/AdGuardHome/coredns_plugin"
|
||||
"github.com/miekg/dns"
|
||||
"gopkg.in/asaskevich/govalidator.v4"
|
||||
)
|
||||
|
||||
const updatePeriod = time.Minute * 30
|
||||
|
||||
var filterTitle = regexp.MustCompile(`^! Title: +(.*)$`)
|
||||
var filterTitleRegexp = regexp.MustCompile(`^! Title: +(.*)$`)
|
||||
|
||||
// cached version.json to avoid hammering github.io for each page reload
|
||||
var versionCheckJSON []byte
|
||||
|
@ -39,7 +39,7 @@ var client = &http.Client{
|
|||
// coredns run control
|
||||
// -------------------
|
||||
func tellCoreDNSToReload() {
|
||||
coredns_plugin.Reload <- true
|
||||
coreDnsPlugin.Reload <- true
|
||||
}
|
||||
|
||||
func writeAllConfigsAndReloadCoreDNS() error {
|
||||
|
@ -63,6 +63,7 @@ func httpUpdateConfigReloadDNSReturnOK(w http.ResponseWriter, r *http.Request) {
|
|||
returnOK(w, r)
|
||||
}
|
||||
|
||||
//noinspection GoUnusedParameter
|
||||
func returnOK(w http.ResponseWriter, r *http.Request) {
|
||||
_, err := fmt.Fprintf(w, "OK\n")
|
||||
if err != nil {
|
||||
|
@ -72,6 +73,7 @@ func returnOK(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
//noinspection GoUnusedParameter
|
||||
func handleStatus(w http.ResponseWriter, r *http.Request) {
|
||||
data := map[string]interface{}{
|
||||
"dns_address": config.BindHost,
|
||||
|
@ -236,7 +238,7 @@ func checkDNS(input string) error {
|
|||
|
||||
resp, rtt, err := c.Exchange(&req, host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Couldn't communicate with DNS server %s: %s", input, err)
|
||||
return fmt.Errorf("couldn't communicate with DNS server %s: %s", input, err)
|
||||
}
|
||||
trace("exchange with %s took %v", input, rtt)
|
||||
if len(resp.Answer) != 1 {
|
||||
|
@ -253,7 +255,7 @@ func checkDNS(input string) error {
|
|||
|
||||
func sanitiseDNSServers(input string) ([]string, error) {
|
||||
fields := strings.Fields(input)
|
||||
hosts := []string{}
|
||||
hosts := make([]string, 0)
|
||||
for _, field := range fields {
|
||||
sanitized, err := sanitizeDNSServer(field)
|
||||
if err != nil {
|
||||
|
@ -291,7 +293,7 @@ func sanitizeDNSServer(input string) (string, error) {
|
|||
}
|
||||
ip := net.ParseIP(h)
|
||||
if ip == nil {
|
||||
return "", fmt.Errorf("Invalid DNS server field: %s", h)
|
||||
return "", fmt.Errorf("invalid DNS server field: %s", h)
|
||||
}
|
||||
}
|
||||
return prefix + host, nil
|
||||
|
@ -310,6 +312,7 @@ func appendPortIfMissing(prefix, input string) string {
|
|||
return net.JoinHostPort(input, port)
|
||||
}
|
||||
|
||||
//noinspection GoUnusedParameter
|
||||
func handleGetVersionJSON(w http.ResponseWriter, r *http.Request) {
|
||||
now := time.Now()
|
||||
if now.Sub(versionCheckLastTime) <= versionCheckPeriod && len(versionCheckJSON) != 0 {
|
||||
|
@ -365,6 +368,7 @@ func handleFilteringDisable(w http.ResponseWriter, r *http.Request) {
|
|||
httpUpdateConfigReloadDNSReturnOK(w, r)
|
||||
}
|
||||
|
||||
//noinspection GoUnusedParameter
|
||||
func handleFilteringStatus(w http.ResponseWriter, r *http.Request) {
|
||||
data := map[string]interface{}{
|
||||
"enabled": config.CoreDNS.FilteringEnabled,
|
||||
|
@ -394,6 +398,7 @@ func handleFilteringStatus(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
func handleFilteringAddURL(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
filter := filter{}
|
||||
err := json.NewDecoder(r.Body).Decode(&filter)
|
||||
if err != nil {
|
||||
|
@ -401,7 +406,6 @@ func handleFilteringAddURL(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
filter.Enabled = true
|
||||
if len(filter.URL) == 0 {
|
||||
http.Error(w, "URL parameter was not specified", 400)
|
||||
return
|
||||
|
@ -412,33 +416,48 @@ func handleFilteringAddURL(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
// check for duplicates
|
||||
// Check for duplicates
|
||||
for i := range config.Filters {
|
||||
if config.Filters[i].URL == filter.URL {
|
||||
errortext := fmt.Sprintf("Filter URL already added -- %s", filter.URL)
|
||||
log.Println(errortext)
|
||||
http.Error(w, errortext, http.StatusBadRequest)
|
||||
errorText := fmt.Sprintf("Filter URL already added -- %s", filter.URL)
|
||||
log.Println(errorText)
|
||||
http.Error(w, errorText, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Set necessary properties
|
||||
filter.ID = NextFilterId
|
||||
filter.Enabled = true
|
||||
NextFilterId++
|
||||
|
||||
// Download the filter contents
|
||||
ok, err := filter.update(true)
|
||||
if err != nil {
|
||||
errortext := fmt.Sprintf("Couldn't fetch filter from url %s: %s", filter.URL, err)
|
||||
log.Println(errortext)
|
||||
http.Error(w, errortext, http.StatusBadRequest)
|
||||
errorText := fmt.Sprintf("Couldn't fetch filter from url %s: %s", filter.URL, err)
|
||||
log.Println(errorText)
|
||||
http.Error(w, errorText, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if filter.RulesCount == 0 {
|
||||
errortext := fmt.Sprintf("Filter at url %s has no rules (maybe it points to blank page?)", filter.URL)
|
||||
log.Println(errortext)
|
||||
http.Error(w, errortext, http.StatusBadRequest)
|
||||
errorText := fmt.Sprintf("Filter at the url %s has no rules (maybe it points to blank page?)", filter.URL)
|
||||
log.Println(errorText)
|
||||
http.Error(w, errorText, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if !ok {
|
||||
errortext := fmt.Sprintf("Filter at url %s is invalid (maybe it points to blank page?)", filter.URL)
|
||||
log.Println(errortext)
|
||||
http.Error(w, errortext, http.StatusBadRequest)
|
||||
errorText := fmt.Sprintf("Filter at the url %s is invalid (maybe it points to blank page?)", filter.URL)
|
||||
log.Println(errorText)
|
||||
http.Error(w, errorText, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Save the filter contents
|
||||
err = filter.save()
|
||||
if err != nil {
|
||||
errorText := fmt.Sprintf("Failed to save filter %d due to %s", filter.ID, err)
|
||||
log.Println(errorText)
|
||||
http.Error(w, errorText, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -446,9 +465,9 @@ func handleFilteringAddURL(w http.ResponseWriter, r *http.Request) {
|
|||
config.Filters = append(config.Filters, filter)
|
||||
err = writeAllConfigs()
|
||||
if err != nil {
|
||||
errortext := fmt.Sprintf("Couldn't write config file: %s", err)
|
||||
log.Println(errortext)
|
||||
http.Error(w, errortext, http.StatusInternalServerError)
|
||||
errorText := fmt.Sprintf("Couldn't write config file: %s", err)
|
||||
log.Println(errorText)
|
||||
http.Error(w, errorText, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -456,19 +475,18 @@ func handleFilteringAddURL(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
_, err = fmt.Fprintf(w, "OK %d rules\n", filter.RulesCount)
|
||||
if err != nil {
|
||||
errortext := fmt.Sprintf("Couldn't write body: %s", err)
|
||||
log.Println(errortext)
|
||||
http.Error(w, errortext, http.StatusInternalServerError)
|
||||
errorText := fmt.Sprintf("Couldn't write body: %s", err)
|
||||
log.Println(errorText)
|
||||
http.Error(w, errorText, http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Start using filter ID
|
||||
func handleFilteringRemoveURL(w http.ResponseWriter, r *http.Request) {
|
||||
parameters, err := parseParametersFromBody(r.Body)
|
||||
if err != nil {
|
||||
errortext := fmt.Sprintf("failed to parse parameters from body: %s", err)
|
||||
log.Println(errortext)
|
||||
http.Error(w, errortext, 400)
|
||||
errorText := fmt.Sprintf("failed to parse parameters from body: %s", err)
|
||||
log.Println(errorText)
|
||||
http.Error(w, errorText, 400)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -492,8 +510,8 @@ func handleFilteringRemoveURL(w http.ResponseWriter, r *http.Request) {
|
|||
// Remove the filter file
|
||||
err := os.Remove(filter.getFilterFilePath())
|
||||
if err != nil {
|
||||
errortext := fmt.Sprintf("Couldn't remove the filter file: %s", err)
|
||||
http.Error(w, errortext, http.StatusInternalServerError)
|
||||
errorText := fmt.Sprintf("Couldn't remove the filter file: %s", err)
|
||||
http.Error(w, errorText, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -503,13 +521,12 @@ func handleFilteringRemoveURL(w http.ResponseWriter, r *http.Request) {
|
|||
httpUpdateConfigReloadDNSReturnOK(w, r)
|
||||
}
|
||||
|
||||
// TODO: Start using filter ID
|
||||
func handleFilteringEnableURL(w http.ResponseWriter, r *http.Request) {
|
||||
parameters, err := parseParametersFromBody(r.Body)
|
||||
if err != nil {
|
||||
errortext := fmt.Sprintf("failed to parse parameters from body: %s", err)
|
||||
log.Println(errortext)
|
||||
http.Error(w, errortext, 400)
|
||||
errorText := fmt.Sprintf("failed to parse parameters from body: %s", err)
|
||||
log.Println(errorText)
|
||||
http.Error(w, errorText, 400)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -539,17 +556,16 @@ func handleFilteringEnableURL(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
// kick off refresh of rules from new URLs
|
||||
refreshFiltersIfNeccessary()
|
||||
checkFiltersUpdates(false)
|
||||
httpUpdateConfigReloadDNSReturnOK(w, r)
|
||||
}
|
||||
|
||||
// TODO: Start using filter ID
|
||||
func handleFilteringDisableURL(w http.ResponseWriter, r *http.Request) {
|
||||
parameters, err := parseParametersFromBody(r.Body)
|
||||
if err != nil {
|
||||
errortext := fmt.Sprintf("failed to parse parameters from body: %s", err)
|
||||
log.Println(errortext)
|
||||
http.Error(w, errortext, 400)
|
||||
errorText := fmt.Sprintf("failed to parse parameters from body: %s", err)
|
||||
log.Println(errorText)
|
||||
http.Error(w, errorText, 400)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -584,9 +600,9 @@ func handleFilteringDisableURL(w http.ResponseWriter, r *http.Request) {
|
|||
func handleFilteringSetRules(w http.ResponseWriter, r *http.Request) {
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
errortext := fmt.Sprintf("Failed to read request body: %s", err)
|
||||
log.Println(errortext)
|
||||
http.Error(w, errortext, 400)
|
||||
errorText := fmt.Sprintf("Failed to read request body: %s", err)
|
||||
log.Println(errorText)
|
||||
http.Error(w, errorText, 400)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -596,56 +612,41 @@ func handleFilteringSetRules(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
func handleFilteringRefresh(w http.ResponseWriter, r *http.Request) {
|
||||
force := r.URL.Query().Get("force")
|
||||
if force != "" {
|
||||
config.Lock()
|
||||
for i := range config.Filters {
|
||||
filter := &config.Filters[i] // otherwise we will be operating on a copy
|
||||
filter.LastUpdated = time.Unix(0, 0)
|
||||
}
|
||||
config.Unlock() // not defer because refreshFiltersIfNeccessary locks it too
|
||||
}
|
||||
updated := refreshFiltersIfNeccessary()
|
||||
updated := checkFiltersUpdates(force != "")
|
||||
fmt.Fprintf(w, "OK %d filters updated\n", updated)
|
||||
}
|
||||
|
||||
func runFilterRefreshers() {
|
||||
// Sets up a timer that will be checking for filters updates periodically
|
||||
func runFiltersUpdatesTimer() {
|
||||
go func() {
|
||||
for range time.Tick(time.Second) {
|
||||
refreshFiltersIfNeccessary()
|
||||
for range time.Tick(time.Minute) {
|
||||
checkFiltersUpdates(false)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func refreshFiltersIfNeccessary() int {
|
||||
// Checks filters updates if necessary
|
||||
// If force is true, it ignores the filter.LastUpdated field value
|
||||
func checkFiltersUpdates(force bool) int {
|
||||
config.Lock()
|
||||
|
||||
// deduplicate
|
||||
// TODO: move it somewhere else
|
||||
{
|
||||
i := 0 // output index, used for deletion later
|
||||
urls := map[string]bool{}
|
||||
for _, filter := range config.Filters {
|
||||
if _, ok := urls[filter.URL]; !ok {
|
||||
// we didn't see it before, keep it
|
||||
urls[filter.URL] = true // remember the URL
|
||||
config.Filters[i] = filter
|
||||
i++
|
||||
}
|
||||
}
|
||||
// all entries we want to keep are at front, delete the rest
|
||||
config.Filters = config.Filters[:i]
|
||||
}
|
||||
|
||||
// fetch URLs
|
||||
updateCount := 0
|
||||
for i := range config.Filters {
|
||||
filter := &config.Filters[i] // otherwise we will be operating on a copy
|
||||
updated, err := filter.update(false)
|
||||
updated, err := filter.update(force)
|
||||
if err != nil {
|
||||
log.Printf("Failed to update filter %s: %s\n", filter.URL, err)
|
||||
continue
|
||||
}
|
||||
if updated {
|
||||
// Saving it to the filters dir now
|
||||
err = filter.save()
|
||||
if err != nil {
|
||||
log.Printf("Failed to save the updated filter %d: %s", filter.ID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
updateCount++
|
||||
}
|
||||
}
|
||||
|
@ -657,8 +658,32 @@ func refreshFiltersIfNeccessary() int {
|
|||
return updateCount
|
||||
}
|
||||
|
||||
// A helper function that parses filter contents and returns a number of rules and a filter name (if there's any)
|
||||
func parseFilterContents(contents []byte) (int, string) {
|
||||
lines := strings.Split(string(contents), "\n")
|
||||
rulesCount := 0
|
||||
name := ""
|
||||
seenTitle := false
|
||||
|
||||
// Count lines in the filter
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if len(line) > 0 && line[0] == '!' {
|
||||
if m := filterTitleRegexp.FindAllStringSubmatch(line, -1); len(m) > 0 && len(m[0]) >= 2 && !seenTitle {
|
||||
name = m[0][1]
|
||||
seenTitle = true
|
||||
}
|
||||
} else if len(line) != 0 {
|
||||
rulesCount++
|
||||
}
|
||||
}
|
||||
|
||||
return rulesCount, name
|
||||
}
|
||||
|
||||
// Checks for filters updates
|
||||
// If "force" is true -- does not check the filter's LastUpdated field
|
||||
// Call "save" to persist the filter contents
|
||||
func (filter *filter) update(force bool) (bool, error) {
|
||||
if !filter.Enabled {
|
||||
return false, nil
|
||||
|
@ -667,9 +692,9 @@ func (filter *filter) update(force bool) (bool, error) {
|
|||
return false, nil
|
||||
}
|
||||
|
||||
log.Printf("Downloading update for filter %d", filter.ID)
|
||||
log.Printf("Downloading update for filter %d from %s", filter.ID, filter.URL)
|
||||
|
||||
// use same update period for failed filter downloads to avoid flooding with requests
|
||||
// use the same update period for failed filter downloads to avoid flooding with requests
|
||||
filter.LastUpdated = time.Now()
|
||||
|
||||
resp, err := client.Get(filter.URL)
|
||||
|
@ -699,38 +724,22 @@ func (filter *filter) update(force bool) (bool, error) {
|
|||
}
|
||||
|
||||
// Extract filter name and count number of rules
|
||||
lines := strings.Split(string(body), "\n")
|
||||
rulesCount := 0
|
||||
seenTitle := false
|
||||
rulesCount, filterName := parseFilterContents(body)
|
||||
|
||||
// Count lines in the filter
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if len(line) > 0 && line[0] == '!' {
|
||||
if m := filterTitle.FindAllStringSubmatch(line, -1); len(m) > 0 && len(m[0]) >= 2 && !seenTitle {
|
||||
filter.Name = m[0][1]
|
||||
seenTitle = true
|
||||
}
|
||||
} else if len(line) != 0 {
|
||||
rulesCount++
|
||||
}
|
||||
if filterName != "" {
|
||||
filter.Name = filterName
|
||||
}
|
||||
|
||||
// Check if the filter was really changed
|
||||
// Check if the filter has been really changed
|
||||
if bytes.Equal(filter.contents, body) {
|
||||
log.Printf("The filter %d text has not changed", filter.ID)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
log.Printf("Filter %s updated: %d bytes, %d rules", filter.URL, len(body), rulesCount)
|
||||
log.Printf("Filter %d has been updated: %d bytes, %d rules", filter.ID, len(body), rulesCount)
|
||||
filter.RulesCount = rulesCount
|
||||
filter.contents = body
|
||||
|
||||
// Saving it to the filters dir now
|
||||
err = filter.save()
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
@ -745,7 +754,7 @@ func (filter *filter) save() error {
|
|||
return err
|
||||
}
|
||||
|
||||
return nil;
|
||||
return nil
|
||||
}
|
||||
|
||||
// loads filter contents from the file in config.ourDataDir
|
||||
|
@ -764,13 +773,18 @@ func (filter *filter) load() error {
|
|||
return err
|
||||
}
|
||||
|
||||
filterFile, err := ioutil.ReadFile(filterFilePath)
|
||||
filterFileContents, err := ioutil.ReadFile(filterFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("Filter %d length is %d", filter.ID, len(filterFile))
|
||||
filter.contents = filterFile
|
||||
log.Printf("Filter %d length is %d", filter.ID, len(filterFileContents))
|
||||
filter.contents = filterFileContents
|
||||
|
||||
// Now extract the rules count
|
||||
rulesCount, _ := parseFilterContents(filter.contents)
|
||||
filter.RulesCount = rulesCount
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -793,6 +807,7 @@ func handleSafeBrowsingDisable(w http.ResponseWriter, r *http.Request) {
|
|||
httpUpdateConfigReloadDNSReturnOK(w, r)
|
||||
}
|
||||
|
||||
//noinspection GoUnusedParameter
|
||||
func handleSafeBrowsingStatus(w http.ResponseWriter, r *http.Request) {
|
||||
data := map[string]interface{}{
|
||||
"enabled": config.CoreDNS.SafeBrowsingEnabled,
|
||||
|
@ -868,6 +883,7 @@ func handleParentalDisable(w http.ResponseWriter, r *http.Request) {
|
|||
httpUpdateConfigReloadDNSReturnOK(w, r)
|
||||
}
|
||||
|
||||
//noinspection GoUnusedParameter
|
||||
func handleParentalStatus(w http.ResponseWriter, r *http.Request) {
|
||||
data := map[string]interface{}{
|
||||
"enabled": config.CoreDNS.ParentalEnabled,
|
||||
|
@ -907,6 +923,7 @@ func handleSafeSearchDisable(w http.ResponseWriter, r *http.Request) {
|
|||
httpUpdateConfigReloadDNSReturnOK(w, r)
|
||||
}
|
||||
|
||||
//noinspection GoUnusedParameter
|
||||
func handleSafeSearchStatus(w http.ResponseWriter, r *http.Request) {
|
||||
data := map[string]interface{}{
|
||||
"enabled": config.CoreDNS.SafeSearchEnabled,
|
||||
|
@ -933,15 +950,15 @@ func registerControlHandlers() {
|
|||
http.HandleFunc("/control/status", optionalAuth(ensureGET(handleStatus)))
|
||||
http.HandleFunc("/control/enable_protection", optionalAuth(ensurePOST(handleProtectionEnable)))
|
||||
http.HandleFunc("/control/disable_protection", optionalAuth(ensurePOST(handleProtectionDisable)))
|
||||
http.HandleFunc("/control/querylog", optionalAuth(ensureGET(coredns_plugin.HandleQueryLog)))
|
||||
http.HandleFunc("/control/querylog", optionalAuth(ensureGET(coreDnsPlugin.HandleQueryLog)))
|
||||
http.HandleFunc("/control/querylog_enable", optionalAuth(ensurePOST(handleQueryLogEnable)))
|
||||
http.HandleFunc("/control/querylog_disable", optionalAuth(ensurePOST(handleQueryLogDisable)))
|
||||
http.HandleFunc("/control/set_upstream_dns", optionalAuth(ensurePOST(handleSetUpstreamDNS)))
|
||||
http.HandleFunc("/control/test_upstream_dns", optionalAuth(ensurePOST(handleTestUpstreamDNS)))
|
||||
http.HandleFunc("/control/stats_top", optionalAuth(ensureGET(coredns_plugin.HandleStatsTop)))
|
||||
http.HandleFunc("/control/stats", optionalAuth(ensureGET(coredns_plugin.HandleStats)))
|
||||
http.HandleFunc("/control/stats_history", optionalAuth(ensureGET(coredns_plugin.HandleStatsHistory)))
|
||||
http.HandleFunc("/control/stats_reset", optionalAuth(ensurePOST(coredns_plugin.HandleStatsReset)))
|
||||
http.HandleFunc("/control/stats_top", optionalAuth(ensureGET(coreDnsPlugin.HandleStatsTop)))
|
||||
http.HandleFunc("/control/stats", optionalAuth(ensureGET(coreDnsPlugin.HandleStats)))
|
||||
http.HandleFunc("/control/stats_history", optionalAuth(ensureGET(coreDnsPlugin.HandleStatsHistory)))
|
||||
http.HandleFunc("/control/stats_reset", optionalAuth(ensurePOST(coreDnsPlugin.HandleStatsReset)))
|
||||
http.HandleFunc("/control/version.json", optionalAuth(handleGetVersionJSON))
|
||||
http.HandleFunc("/control/filtering/enable", optionalAuth(ensurePOST(handleFilteringEnable)))
|
||||
http.HandleFunc("/control/filtering/disable", optionalAuth(ensurePOST(handleFilteringDisable)))
|
||||
|
|
|
@ -25,7 +25,6 @@ const (
|
|||
queryLogFileName = "querylog.json" // .gz added during compression
|
||||
queryLogSize = 5000 // maximum API response for /querylog
|
||||
queryLogTopSize = 500 // Keep in memory only top N values
|
||||
queryLogAPIPort = "8618" // 8618 is sha512sum of "querylog" then each byte summed
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -34,7 +33,6 @@ var (
|
|||
|
||||
queryLogCache []*logEntry
|
||||
queryLogLock sync.RWMutex
|
||||
queryLogTime time.Time
|
||||
)
|
||||
|
||||
type logEntry struct {
|
||||
|
@ -107,6 +105,7 @@ func logRequest(question *dns.Msg, answer *dns.Msg, result dnsfilter.Result, ela
|
|||
}
|
||||
}
|
||||
|
||||
//noinspection GoUnusedParameter
|
||||
func HandleQueryLog(w http.ResponseWriter, r *http.Request) {
|
||||
queryLogLock.RLock()
|
||||
values := make([]*logEntry, len(queryLogCache))
|
||||
|
@ -140,14 +139,14 @@ func HandleQueryLog(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
jsonentry := map[string]interface{}{
|
||||
jsonEntry := map[string]interface{}{
|
||||
"reason": entry.Result.Reason.String(),
|
||||
"elapsed_ms": strconv.FormatFloat(entry.Elapsed.Seconds()*1000, 'f', -1, 64),
|
||||
"elapsedMs": strconv.FormatFloat(entry.Elapsed.Seconds()*1000, 'f', -1, 64),
|
||||
"time": entry.Time.Format(time.RFC3339),
|
||||
"client": entry.IP,
|
||||
}
|
||||
if q != nil {
|
||||
jsonentry["question"] = map[string]interface{}{
|
||||
jsonEntry["question"] = map[string]interface{}{
|
||||
"host": strings.ToLower(strings.TrimSuffix(q.Question[0].Name, ".")),
|
||||
"type": dns.Type(q.Question[0].Qtype).String(),
|
||||
"class": dns.Class(q.Question[0].Qclass).String(),
|
||||
|
@ -156,10 +155,11 @@ func HandleQueryLog(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
if a != nil {
|
||||
status, _ := response.Typify(a, time.Now().UTC())
|
||||
jsonentry["status"] = status.String()
|
||||
jsonEntry["status"] = status.String()
|
||||
}
|
||||
if len(entry.Result.Rule) > 0 {
|
||||
jsonentry["rule"] = entry.Result.Rule
|
||||
jsonEntry["rule"] = entry.Result.Rule
|
||||
jsonEntry["filterId"] = entry.Result.FilterID
|
||||
}
|
||||
|
||||
if a != nil && len(a.Answer) > 0 {
|
||||
|
@ -202,26 +202,26 @@ func HandleQueryLog(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
answers = append(answers, answer)
|
||||
}
|
||||
jsonentry["answer"] = answers
|
||||
jsonEntry["answer"] = answers
|
||||
}
|
||||
|
||||
data = append(data, jsonentry)
|
||||
data = append(data, jsonEntry)
|
||||
}
|
||||
|
||||
jsonVal, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
errortext := fmt.Sprintf("Couldn't marshal data into json: %s", err)
|
||||
log.Println(errortext)
|
||||
http.Error(w, errortext, http.StatusInternalServerError)
|
||||
errorText := fmt.Sprintf("Couldn't marshal data into json: %s", err)
|
||||
log.Println(errorText)
|
||||
http.Error(w, errorText, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, err = w.Write(jsonVal)
|
||||
if err != nil {
|
||||
errortext := fmt.Sprintf("Unable to write response json: %s", err)
|
||||
log.Println(errortext)
|
||||
http.Error(w, errortext, http.StatusInternalServerError)
|
||||
errorText := fmt.Sprintf("Unable to write response json: %s", err)
|
||||
log.Println(errorText)
|
||||
http.Error(w, errorText, http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -499,6 +499,7 @@ func (rule *rule) match(host string) (Result, error) {
|
|||
if matched {
|
||||
res.Reason = FilteredBlackList
|
||||
res.IsFiltered = true
|
||||
res.FilterID = rule.listID
|
||||
if rule.isWhitelist {
|
||||
res.Reason = NotFilteredWhiteList
|
||||
res.IsFiltered = false
|
||||
|
|
17
openapi.yaml
17
openapi.yaml
|
@ -92,7 +92,7 @@ paths:
|
|||
- ttl: 55
|
||||
type: A
|
||||
value: 217.69.139.200
|
||||
elapsed_ms: '65.469556'
|
||||
elapsedMs: '65.469556'
|
||||
question:
|
||||
class: IN
|
||||
host: mail.ru
|
||||
|
@ -100,7 +100,7 @@ paths:
|
|||
reason: DNSFILTER_NOTFILTERED_NOTFOUND
|
||||
status: NOERROR
|
||||
time: '2018-07-16T22:24:02+03:00'
|
||||
- elapsed_ms: '0.15716999999999998'
|
||||
- elapsedMs: '0.15716999999999998'
|
||||
question:
|
||||
class: IN
|
||||
host: doubleclick.net
|
||||
|
@ -113,13 +113,14 @@ paths:
|
|||
- ttl: 299
|
||||
type: A
|
||||
value: 176.103.133.78
|
||||
elapsed_ms: '132.110929'
|
||||
elapsedMs: '132.110929'
|
||||
question:
|
||||
class: IN
|
||||
host: wmconvirus.narod.ru
|
||||
type: A
|
||||
reason: DNSFILTER_FILTERED_SAFEBROWSING
|
||||
rule: adguard-malware-shavar
|
||||
filterId: 1
|
||||
status: NOERROR
|
||||
time: '2018-07-16T22:24:02+03:00'
|
||||
/querylog_enable:
|
||||
|
@ -448,9 +449,13 @@ paths:
|
|||
examples:
|
||||
application/json:
|
||||
enabled: false
|
||||
urls:
|
||||
- 'https://filters.adtidy.org/windows/filters/1.txt'
|
||||
- 'https://filters.adtidy.org/windows/filters/2.txt'
|
||||
- filters:
|
||||
enabled: true
|
||||
id: 1
|
||||
lastUpdated: "2018-10-30T12:18:57.223101822+03:00"
|
||||
name: "AdGuard Simplified Domain Names filter"
|
||||
rulesCount: 24896
|
||||
url: "https://adguardteam.github.io/AdGuardSDNSFilter/Filters/filter.txt"
|
||||
rules:
|
||||
- '@@||yandex.ru^|'
|
||||
/filtering/set_rules:
|
||||
|
|
Loading…
Reference in New Issue