Added filterId to the querylog
Updated the openapi.yaml accordingly Some minor refactoring/renaming Fix other review comments
This commit is contained in:
parent
760e3596b6
commit
591065aa3a
10
app.go
10
app.go
|
@ -119,6 +119,8 @@ func main() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// override bind host/port from the console
|
||||||
if bindHost != nil {
|
if bindHost != nil {
|
||||||
config.BindHost = *bindHost
|
config.BindHost = *bindHost
|
||||||
}
|
}
|
||||||
|
@ -156,7 +158,7 @@ func main() {
|
||||||
|
|
||||||
address := net.JoinHostPort(config.BindHost, strconv.Itoa(config.BindPort))
|
address := net.JoinHostPort(config.BindHost, strconv.Itoa(config.BindPort))
|
||||||
|
|
||||||
runFilterRefreshers()
|
runFiltersUpdatesTimer()
|
||||||
|
|
||||||
http.Handle("/", optionalAuthHandler(http.FileServer(box)))
|
http.Handle("/", optionalAuthHandler(http.FileServer(box)))
|
||||||
registerControlHandlers()
|
registerControlHandlers()
|
||||||
|
@ -309,6 +311,12 @@ func upgradeConfigSchema(oldVersion int, newVersion int) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Saving it to the filters dir now
|
||||||
|
err = filter.save()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// No more "dnsfilter.txt", filters are now loaded from config.ourDataDir/filters/
|
// No more "dnsfilter.txt", filters are now loaded from config.ourDataDir/filters/
|
||||||
|
|
|
@ -64,7 +64,7 @@ export const normalizeFilteringStatus = (filteringStatus) => {
|
||||||
const { enabled, filters, user_rules: userRules } = filteringStatus;
|
const { enabled, filters, user_rules: userRules } = filteringStatus;
|
||||||
const newFilters = filters ? filters.map((filter) => {
|
const newFilters = filters ? filters.map((filter) => {
|
||||||
const {
|
const {
|
||||||
url, enabled, last_updated: lastUpdated = Date.now(), name = 'Default name', rules_count: rulesCount = 0,
|
url, enabled, lastUpdated: lastUpdated = Date.now(), name = 'Default name', rulesCount: rulesCount = 0,
|
||||||
} = filter;
|
} = filter;
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
|
72
config.go
72
config.go
|
@ -20,10 +20,15 @@ const SchemaVersion = 1
|
||||||
// Directory where we'll store all downloaded filters contents
|
// Directory where we'll store all downloaded filters contents
|
||||||
const FiltersDir = "filters"
|
const FiltersDir = "filters"
|
||||||
|
|
||||||
|
// Just a counter that we use for incrementing the filter ID
|
||||||
|
var NextFilterId int
|
||||||
|
|
||||||
// configuration is loaded from YAML
|
// configuration is loaded from YAML
|
||||||
type configuration struct {
|
type configuration struct {
|
||||||
|
// Config filename (can be overriden via the command line arguments)
|
||||||
ourConfigFilename string
|
ourConfigFilename string
|
||||||
ourBinaryDir string
|
// Basically, this is our working directory
|
||||||
|
ourBinaryDir string
|
||||||
// Directory to store data (i.e. filters contents)
|
// Directory to store data (i.e. filters contents)
|
||||||
ourDataDir string
|
ourDataDir string
|
||||||
|
|
||||||
|
@ -65,13 +70,13 @@ type coreDNSConfig struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type filter struct {
|
type filter struct {
|
||||||
ID int `json:"ID"` // auto-assigned when filter is added
|
ID int `json:"id" yaml:"id"` // auto-assigned when filter is added (see NextFilterId)
|
||||||
URL string `json:"url"`
|
URL string `json:"url"`
|
||||||
Name string `json:"name" yaml:"name"`
|
Name string `json:"name" yaml:"name"`
|
||||||
Enabled bool `json:"enabled"`
|
Enabled bool `json:"enabled"`
|
||||||
RulesCount int `json:"rules_count" yaml:"-"`
|
RulesCount int `json:"rulesCount" yaml:"-"`
|
||||||
contents []byte
|
contents []byte
|
||||||
LastUpdated time.Time `json:"last_updated" yaml:"-"`
|
LastUpdated time.Time `json:"lastUpdated" yaml:"last_updated"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var defaultDNS = []string{"tls://1.1.1.1", "tls://1.0.0.1"}
|
var defaultDNS = []string{"tls://1.1.1.1", "tls://1.0.0.1"}
|
||||||
|
@ -107,7 +112,7 @@ var config = configuration{
|
||||||
func getUserFilter() filter {
|
func getUserFilter() filter {
|
||||||
|
|
||||||
// TODO: This should be calculated when UserRules are set
|
// TODO: This should be calculated when UserRules are set
|
||||||
contents := []byte{}
|
var contents []byte
|
||||||
for _, rule := range config.UserRules {
|
for _, rule := range config.UserRules {
|
||||||
contents = append(contents, []byte(rule)...)
|
contents = append(contents, []byte(rule)...)
|
||||||
contents = append(contents, '\n')
|
contents = append(contents, '\n')
|
||||||
|
@ -123,15 +128,16 @@ func getUserFilter() filter {
|
||||||
return userFilter
|
return userFilter
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Loads configuration from the YAML file
|
||||||
func parseConfig() error {
|
func parseConfig() error {
|
||||||
configfile := filepath.Join(config.ourBinaryDir, config.ourConfigFilename)
|
configFile := filepath.Join(config.ourBinaryDir, config.ourConfigFilename)
|
||||||
log.Printf("Reading YAML file: %s", configfile)
|
log.Printf("Reading YAML file: %s", configFile)
|
||||||
if _, err := os.Stat(configfile); os.IsNotExist(err) {
|
if _, err := os.Stat(configFile); os.IsNotExist(err) {
|
||||||
// do nothing, file doesn't exist
|
// do nothing, file doesn't exist
|
||||||
log.Printf("YAML file doesn't exist, skipping: %s", configfile)
|
log.Printf("YAML file doesn't exist, skipping: %s", configFile)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
yamlFile, err := ioutil.ReadFile(configfile)
|
yamlFile, err := ioutil.ReadFile(configFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Couldn't read config file: %s", err)
|
log.Printf("Couldn't read config file: %s", err)
|
||||||
return err
|
return err
|
||||||
|
@ -142,18 +148,42 @@ func parseConfig() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Deduplicate filters
|
||||||
|
{
|
||||||
|
i := 0 // output index, used for deletion later
|
||||||
|
urls := map[string]bool{}
|
||||||
|
for _, filter := range config.Filters {
|
||||||
|
if _, ok := urls[filter.URL]; !ok {
|
||||||
|
// we didn't see it before, keep it
|
||||||
|
urls[filter.URL] = true // remember the URL
|
||||||
|
config.Filters[i] = filter
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// all entries we want to keep are at front, delete the rest
|
||||||
|
config.Filters = config.Filters[:i]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the next filter ID to max(filter.ID) + 1
|
||||||
|
for i := range config.Filters {
|
||||||
|
if NextFilterId < config.Filters[i].ID {
|
||||||
|
NextFilterId = config.Filters[i].ID + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Saves configuration to the YAML file and also saves the user filter contents to a file
|
||||||
func writeConfig() error {
|
func writeConfig() error {
|
||||||
configfile := filepath.Join(config.ourBinaryDir, config.ourConfigFilename)
|
configFile := filepath.Join(config.ourBinaryDir, config.ourConfigFilename)
|
||||||
log.Printf("Writing YAML file: %s", configfile)
|
log.Printf("Writing YAML file: %s", configFile)
|
||||||
yamlText, err := yaml.Marshal(&config)
|
yamlText, err := yaml.Marshal(&config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Couldn't generate YAML file: %s", err)
|
log.Printf("Couldn't generate YAML file: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = writeFileSafe(configfile, yamlText)
|
err = writeFileSafe(configFile, yamlText)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Couldn't save YAML config: %s", err)
|
log.Printf("Couldn't save YAML config: %s", err)
|
||||||
return err
|
return err
|
||||||
|
@ -173,14 +203,14 @@ func writeConfig() error {
|
||||||
// coredns config
|
// coredns config
|
||||||
// --------------
|
// --------------
|
||||||
func writeCoreDNSConfig() error {
|
func writeCoreDNSConfig() error {
|
||||||
corefile := filepath.Join(config.ourBinaryDir, config.CoreDNS.coreFile)
|
coreFile := filepath.Join(config.ourBinaryDir, config.CoreDNS.coreFile)
|
||||||
log.Printf("Writing DNS config: %s", corefile)
|
log.Printf("Writing DNS config: %s", coreFile)
|
||||||
configtext, err := generateCoreDNSConfigText()
|
configText, err := generateCoreDNSConfigText()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Couldn't generate DNS config: %s", err)
|
log.Printf("Couldn't generate DNS config: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = writeFileSafe(corefile, []byte(configtext))
|
err = writeFileSafe(coreFile, []byte(configText))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Couldn't save DNS config: %s", err)
|
log.Printf("Couldn't save DNS config: %s", err)
|
||||||
return err
|
return err
|
||||||
|
@ -227,7 +257,7 @@ const coreDNSConfigTemplate = `.:{{.Port}} {
|
||||||
|
|
||||||
var removeEmptyLines = regexp.MustCompile("([\t ]*\n)+")
|
var removeEmptyLines = regexp.MustCompile("([\t ]*\n)+")
|
||||||
|
|
||||||
// generate config text
|
// generate CoreDNS config text
|
||||||
func generateCoreDNSConfigText() (string, error) {
|
func generateCoreDNSConfigText() (string, error) {
|
||||||
t, err := template.New("config").Parse(coreDNSConfigTemplate)
|
t, err := template.New("config").Parse(coreDNSConfigTemplate)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -264,9 +294,9 @@ func generateCoreDNSConfigText() (string, error) {
|
||||||
log.Printf("Couldn't generate DNS config: %s", err)
|
log.Printf("Couldn't generate DNS config: %s", err)
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
configtext := configBytes.String()
|
configText := configBytes.String()
|
||||||
|
|
||||||
// remove empty lines from generated config
|
// remove empty lines from generated config
|
||||||
configtext = removeEmptyLines.ReplaceAllString(configtext, "\n")
|
configText = removeEmptyLines.ReplaceAllString(configText, "\n")
|
||||||
return configtext, nil
|
return configText, nil
|
||||||
}
|
}
|
||||||
|
|
235
control.go
235
control.go
|
@ -15,14 +15,14 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
coredns_plugin "github.com/AdguardTeam/AdGuardHome/coredns_plugin"
|
coreDnsPlugin "github.com/AdguardTeam/AdGuardHome/coredns_plugin"
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
"gopkg.in/asaskevich/govalidator.v4"
|
"gopkg.in/asaskevich/govalidator.v4"
|
||||||
)
|
)
|
||||||
|
|
||||||
const updatePeriod = time.Minute * 30
|
const updatePeriod = time.Minute * 30
|
||||||
|
|
||||||
var filterTitle = regexp.MustCompile(`^! Title: +(.*)$`)
|
var filterTitleRegexp = regexp.MustCompile(`^! Title: +(.*)$`)
|
||||||
|
|
||||||
// cached version.json to avoid hammering github.io for each page reload
|
// cached version.json to avoid hammering github.io for each page reload
|
||||||
var versionCheckJSON []byte
|
var versionCheckJSON []byte
|
||||||
|
@ -39,7 +39,7 @@ var client = &http.Client{
|
||||||
// coredns run control
|
// coredns run control
|
||||||
// -------------------
|
// -------------------
|
||||||
func tellCoreDNSToReload() {
|
func tellCoreDNSToReload() {
|
||||||
coredns_plugin.Reload <- true
|
coreDnsPlugin.Reload <- true
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeAllConfigsAndReloadCoreDNS() error {
|
func writeAllConfigsAndReloadCoreDNS() error {
|
||||||
|
@ -63,6 +63,7 @@ func httpUpdateConfigReloadDNSReturnOK(w http.ResponseWriter, r *http.Request) {
|
||||||
returnOK(w, r)
|
returnOK(w, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//noinspection GoUnusedParameter
|
||||||
func returnOK(w http.ResponseWriter, r *http.Request) {
|
func returnOK(w http.ResponseWriter, r *http.Request) {
|
||||||
_, err := fmt.Fprintf(w, "OK\n")
|
_, err := fmt.Fprintf(w, "OK\n")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -72,6 +73,7 @@ func returnOK(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//noinspection GoUnusedParameter
|
||||||
func handleStatus(w http.ResponseWriter, r *http.Request) {
|
func handleStatus(w http.ResponseWriter, r *http.Request) {
|
||||||
data := map[string]interface{}{
|
data := map[string]interface{}{
|
||||||
"dns_address": config.BindHost,
|
"dns_address": config.BindHost,
|
||||||
|
@ -236,7 +238,7 @@ func checkDNS(input string) error {
|
||||||
|
|
||||||
resp, rtt, err := c.Exchange(&req, host)
|
resp, rtt, err := c.Exchange(&req, host)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Couldn't communicate with DNS server %s: %s", input, err)
|
return fmt.Errorf("couldn't communicate with DNS server %s: %s", input, err)
|
||||||
}
|
}
|
||||||
trace("exchange with %s took %v", input, rtt)
|
trace("exchange with %s took %v", input, rtt)
|
||||||
if len(resp.Answer) != 1 {
|
if len(resp.Answer) != 1 {
|
||||||
|
@ -253,7 +255,7 @@ func checkDNS(input string) error {
|
||||||
|
|
||||||
func sanitiseDNSServers(input string) ([]string, error) {
|
func sanitiseDNSServers(input string) ([]string, error) {
|
||||||
fields := strings.Fields(input)
|
fields := strings.Fields(input)
|
||||||
hosts := []string{}
|
hosts := make([]string, 0)
|
||||||
for _, field := range fields {
|
for _, field := range fields {
|
||||||
sanitized, err := sanitizeDNSServer(field)
|
sanitized, err := sanitizeDNSServer(field)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -291,7 +293,7 @@ func sanitizeDNSServer(input string) (string, error) {
|
||||||
}
|
}
|
||||||
ip := net.ParseIP(h)
|
ip := net.ParseIP(h)
|
||||||
if ip == nil {
|
if ip == nil {
|
||||||
return "", fmt.Errorf("Invalid DNS server field: %s", h)
|
return "", fmt.Errorf("invalid DNS server field: %s", h)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return prefix + host, nil
|
return prefix + host, nil
|
||||||
|
@ -310,6 +312,7 @@ func appendPortIfMissing(prefix, input string) string {
|
||||||
return net.JoinHostPort(input, port)
|
return net.JoinHostPort(input, port)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//noinspection GoUnusedParameter
|
||||||
func handleGetVersionJSON(w http.ResponseWriter, r *http.Request) {
|
func handleGetVersionJSON(w http.ResponseWriter, r *http.Request) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
if now.Sub(versionCheckLastTime) <= versionCheckPeriod && len(versionCheckJSON) != 0 {
|
if now.Sub(versionCheckLastTime) <= versionCheckPeriod && len(versionCheckJSON) != 0 {
|
||||||
|
@ -365,6 +368,7 @@ func handleFilteringDisable(w http.ResponseWriter, r *http.Request) {
|
||||||
httpUpdateConfigReloadDNSReturnOK(w, r)
|
httpUpdateConfigReloadDNSReturnOK(w, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//noinspection GoUnusedParameter
|
||||||
func handleFilteringStatus(w http.ResponseWriter, r *http.Request) {
|
func handleFilteringStatus(w http.ResponseWriter, r *http.Request) {
|
||||||
data := map[string]interface{}{
|
data := map[string]interface{}{
|
||||||
"enabled": config.CoreDNS.FilteringEnabled,
|
"enabled": config.CoreDNS.FilteringEnabled,
|
||||||
|
@ -394,6 +398,7 @@ func handleFilteringStatus(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleFilteringAddURL(w http.ResponseWriter, r *http.Request) {
|
func handleFilteringAddURL(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
filter := filter{}
|
filter := filter{}
|
||||||
err := json.NewDecoder(r.Body).Decode(&filter)
|
err := json.NewDecoder(r.Body).Decode(&filter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -401,7 +406,6 @@ func handleFilteringAddURL(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
filter.Enabled = true
|
|
||||||
if len(filter.URL) == 0 {
|
if len(filter.URL) == 0 {
|
||||||
http.Error(w, "URL parameter was not specified", 400)
|
http.Error(w, "URL parameter was not specified", 400)
|
||||||
return
|
return
|
||||||
|
@ -412,33 +416,48 @@ func handleFilteringAddURL(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// check for duplicates
|
// Check for duplicates
|
||||||
for i := range config.Filters {
|
for i := range config.Filters {
|
||||||
if config.Filters[i].URL == filter.URL {
|
if config.Filters[i].URL == filter.URL {
|
||||||
errortext := fmt.Sprintf("Filter URL already added -- %s", filter.URL)
|
errorText := fmt.Sprintf("Filter URL already added -- %s", filter.URL)
|
||||||
log.Println(errortext)
|
log.Println(errorText)
|
||||||
http.Error(w, errortext, http.StatusBadRequest)
|
http.Error(w, errorText, http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set necessary properties
|
||||||
|
filter.ID = NextFilterId
|
||||||
|
filter.Enabled = true
|
||||||
|
NextFilterId++
|
||||||
|
|
||||||
|
// Download the filter contents
|
||||||
ok, err := filter.update(true)
|
ok, err := filter.update(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errortext := fmt.Sprintf("Couldn't fetch filter from url %s: %s", filter.URL, err)
|
errorText := fmt.Sprintf("Couldn't fetch filter from url %s: %s", filter.URL, err)
|
||||||
log.Println(errortext)
|
log.Println(errorText)
|
||||||
http.Error(w, errortext, http.StatusBadRequest)
|
http.Error(w, errorText, http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if filter.RulesCount == 0 {
|
if filter.RulesCount == 0 {
|
||||||
errortext := fmt.Sprintf("Filter at url %s has no rules (maybe it points to blank page?)", filter.URL)
|
errorText := fmt.Sprintf("Filter at the url %s has no rules (maybe it points to blank page?)", filter.URL)
|
||||||
log.Println(errortext)
|
log.Println(errorText)
|
||||||
http.Error(w, errortext, http.StatusBadRequest)
|
http.Error(w, errorText, http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
errortext := fmt.Sprintf("Filter at url %s is invalid (maybe it points to blank page?)", filter.URL)
|
errorText := fmt.Sprintf("Filter at the url %s is invalid (maybe it points to blank page?)", filter.URL)
|
||||||
log.Println(errortext)
|
log.Println(errorText)
|
||||||
http.Error(w, errortext, http.StatusBadRequest)
|
http.Error(w, errorText, http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the filter contents
|
||||||
|
err = filter.save()
|
||||||
|
if err != nil {
|
||||||
|
errorText := fmt.Sprintf("Failed to save filter %d due to %s", filter.ID, err)
|
||||||
|
log.Println(errorText)
|
||||||
|
http.Error(w, errorText, http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -446,9 +465,9 @@ func handleFilteringAddURL(w http.ResponseWriter, r *http.Request) {
|
||||||
config.Filters = append(config.Filters, filter)
|
config.Filters = append(config.Filters, filter)
|
||||||
err = writeAllConfigs()
|
err = writeAllConfigs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errortext := fmt.Sprintf("Couldn't write config file: %s", err)
|
errorText := fmt.Sprintf("Couldn't write config file: %s", err)
|
||||||
log.Println(errortext)
|
log.Println(errorText)
|
||||||
http.Error(w, errortext, http.StatusInternalServerError)
|
http.Error(w, errorText, http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -456,19 +475,18 @@ func handleFilteringAddURL(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
_, err = fmt.Fprintf(w, "OK %d rules\n", filter.RulesCount)
|
_, err = fmt.Fprintf(w, "OK %d rules\n", filter.RulesCount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errortext := fmt.Sprintf("Couldn't write body: %s", err)
|
errorText := fmt.Sprintf("Couldn't write body: %s", err)
|
||||||
log.Println(errortext)
|
log.Println(errorText)
|
||||||
http.Error(w, errortext, http.StatusInternalServerError)
|
http.Error(w, errorText, http.StatusInternalServerError)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Start using filter ID
|
|
||||||
func handleFilteringRemoveURL(w http.ResponseWriter, r *http.Request) {
|
func handleFilteringRemoveURL(w http.ResponseWriter, r *http.Request) {
|
||||||
parameters, err := parseParametersFromBody(r.Body)
|
parameters, err := parseParametersFromBody(r.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errortext := fmt.Sprintf("failed to parse parameters from body: %s", err)
|
errorText := fmt.Sprintf("failed to parse parameters from body: %s", err)
|
||||||
log.Println(errortext)
|
log.Println(errorText)
|
||||||
http.Error(w, errortext, 400)
|
http.Error(w, errorText, 400)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -492,8 +510,8 @@ func handleFilteringRemoveURL(w http.ResponseWriter, r *http.Request) {
|
||||||
// Remove the filter file
|
// Remove the filter file
|
||||||
err := os.Remove(filter.getFilterFilePath())
|
err := os.Remove(filter.getFilterFilePath())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errortext := fmt.Sprintf("Couldn't remove the filter file: %s", err)
|
errorText := fmt.Sprintf("Couldn't remove the filter file: %s", err)
|
||||||
http.Error(w, errortext, http.StatusInternalServerError)
|
http.Error(w, errorText, http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -503,13 +521,12 @@ func handleFilteringRemoveURL(w http.ResponseWriter, r *http.Request) {
|
||||||
httpUpdateConfigReloadDNSReturnOK(w, r)
|
httpUpdateConfigReloadDNSReturnOK(w, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Start using filter ID
|
|
||||||
func handleFilteringEnableURL(w http.ResponseWriter, r *http.Request) {
|
func handleFilteringEnableURL(w http.ResponseWriter, r *http.Request) {
|
||||||
parameters, err := parseParametersFromBody(r.Body)
|
parameters, err := parseParametersFromBody(r.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errortext := fmt.Sprintf("failed to parse parameters from body: %s", err)
|
errorText := fmt.Sprintf("failed to parse parameters from body: %s", err)
|
||||||
log.Println(errortext)
|
log.Println(errorText)
|
||||||
http.Error(w, errortext, 400)
|
http.Error(w, errorText, 400)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -539,17 +556,16 @@ func handleFilteringEnableURL(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// kick off refresh of rules from new URLs
|
// kick off refresh of rules from new URLs
|
||||||
refreshFiltersIfNeccessary()
|
checkFiltersUpdates(false)
|
||||||
httpUpdateConfigReloadDNSReturnOK(w, r)
|
httpUpdateConfigReloadDNSReturnOK(w, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Start using filter ID
|
|
||||||
func handleFilteringDisableURL(w http.ResponseWriter, r *http.Request) {
|
func handleFilteringDisableURL(w http.ResponseWriter, r *http.Request) {
|
||||||
parameters, err := parseParametersFromBody(r.Body)
|
parameters, err := parseParametersFromBody(r.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errortext := fmt.Sprintf("failed to parse parameters from body: %s", err)
|
errorText := fmt.Sprintf("failed to parse parameters from body: %s", err)
|
||||||
log.Println(errortext)
|
log.Println(errorText)
|
||||||
http.Error(w, errortext, 400)
|
http.Error(w, errorText, 400)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -584,9 +600,9 @@ func handleFilteringDisableURL(w http.ResponseWriter, r *http.Request) {
|
||||||
func handleFilteringSetRules(w http.ResponseWriter, r *http.Request) {
|
func handleFilteringSetRules(w http.ResponseWriter, r *http.Request) {
|
||||||
body, err := ioutil.ReadAll(r.Body)
|
body, err := ioutil.ReadAll(r.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errortext := fmt.Sprintf("Failed to read request body: %s", err)
|
errorText := fmt.Sprintf("Failed to read request body: %s", err)
|
||||||
log.Println(errortext)
|
log.Println(errorText)
|
||||||
http.Error(w, errortext, 400)
|
http.Error(w, errorText, 400)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -596,56 +612,41 @@ func handleFilteringSetRules(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
func handleFilteringRefresh(w http.ResponseWriter, r *http.Request) {
|
func handleFilteringRefresh(w http.ResponseWriter, r *http.Request) {
|
||||||
force := r.URL.Query().Get("force")
|
force := r.URL.Query().Get("force")
|
||||||
if force != "" {
|
updated := checkFiltersUpdates(force != "")
|
||||||
config.Lock()
|
|
||||||
for i := range config.Filters {
|
|
||||||
filter := &config.Filters[i] // otherwise we will be operating on a copy
|
|
||||||
filter.LastUpdated = time.Unix(0, 0)
|
|
||||||
}
|
|
||||||
config.Unlock() // not defer because refreshFiltersIfNeccessary locks it too
|
|
||||||
}
|
|
||||||
updated := refreshFiltersIfNeccessary()
|
|
||||||
fmt.Fprintf(w, "OK %d filters updated\n", updated)
|
fmt.Fprintf(w, "OK %d filters updated\n", updated)
|
||||||
}
|
}
|
||||||
|
|
||||||
func runFilterRefreshers() {
|
// Sets up a timer that will be checking for filters updates periodically
|
||||||
|
func runFiltersUpdatesTimer() {
|
||||||
go func() {
|
go func() {
|
||||||
for range time.Tick(time.Second) {
|
for range time.Tick(time.Minute) {
|
||||||
refreshFiltersIfNeccessary()
|
checkFiltersUpdates(false)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func refreshFiltersIfNeccessary() int {
|
// Checks filters updates if necessary
|
||||||
|
// If force is true, it ignores the filter.LastUpdated field value
|
||||||
|
func checkFiltersUpdates(force bool) int {
|
||||||
config.Lock()
|
config.Lock()
|
||||||
|
|
||||||
// deduplicate
|
|
||||||
// TODO: move it somewhere else
|
|
||||||
{
|
|
||||||
i := 0 // output index, used for deletion later
|
|
||||||
urls := map[string]bool{}
|
|
||||||
for _, filter := range config.Filters {
|
|
||||||
if _, ok := urls[filter.URL]; !ok {
|
|
||||||
// we didn't see it before, keep it
|
|
||||||
urls[filter.URL] = true // remember the URL
|
|
||||||
config.Filters[i] = filter
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// all entries we want to keep are at front, delete the rest
|
|
||||||
config.Filters = config.Filters[:i]
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetch URLs
|
// fetch URLs
|
||||||
updateCount := 0
|
updateCount := 0
|
||||||
for i := range config.Filters {
|
for i := range config.Filters {
|
||||||
filter := &config.Filters[i] // otherwise we will be operating on a copy
|
filter := &config.Filters[i] // otherwise we will be operating on a copy
|
||||||
updated, err := filter.update(false)
|
updated, err := filter.update(force)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Failed to update filter %s: %s\n", filter.URL, err)
|
log.Printf("Failed to update filter %s: %s\n", filter.URL, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if updated {
|
if updated {
|
||||||
|
// Saving it to the filters dir now
|
||||||
|
err = filter.save()
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Failed to save the updated filter %d: %s", filter.ID, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
updateCount++
|
updateCount++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -657,8 +658,32 @@ func refreshFiltersIfNeccessary() int {
|
||||||
return updateCount
|
return updateCount
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A helper function that parses filter contents and returns a number of rules and a filter name (if there's any)
|
||||||
|
func parseFilterContents(contents []byte) (int, string) {
|
||||||
|
lines := strings.Split(string(contents), "\n")
|
||||||
|
rulesCount := 0
|
||||||
|
name := ""
|
||||||
|
seenTitle := false
|
||||||
|
|
||||||
|
// Count lines in the filter
|
||||||
|
for _, line := range lines {
|
||||||
|
line = strings.TrimSpace(line)
|
||||||
|
if len(line) > 0 && line[0] == '!' {
|
||||||
|
if m := filterTitleRegexp.FindAllStringSubmatch(line, -1); len(m) > 0 && len(m[0]) >= 2 && !seenTitle {
|
||||||
|
name = m[0][1]
|
||||||
|
seenTitle = true
|
||||||
|
}
|
||||||
|
} else if len(line) != 0 {
|
||||||
|
rulesCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return rulesCount, name
|
||||||
|
}
|
||||||
|
|
||||||
// Checks for filters updates
|
// Checks for filters updates
|
||||||
// If "force" is true -- does not check the filter's LastUpdated field
|
// If "force" is true -- does not check the filter's LastUpdated field
|
||||||
|
// Call "save" to persist the filter contents
|
||||||
func (filter *filter) update(force bool) (bool, error) {
|
func (filter *filter) update(force bool) (bool, error) {
|
||||||
if !filter.Enabled {
|
if !filter.Enabled {
|
||||||
return false, nil
|
return false, nil
|
||||||
|
@ -667,9 +692,9 @@ func (filter *filter) update(force bool) (bool, error) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("Downloading update for filter %d", filter.ID)
|
log.Printf("Downloading update for filter %d from %s", filter.ID, filter.URL)
|
||||||
|
|
||||||
// use same update period for failed filter downloads to avoid flooding with requests
|
// use the same update period for failed filter downloads to avoid flooding with requests
|
||||||
filter.LastUpdated = time.Now()
|
filter.LastUpdated = time.Now()
|
||||||
|
|
||||||
resp, err := client.Get(filter.URL)
|
resp, err := client.Get(filter.URL)
|
||||||
|
@ -699,38 +724,22 @@ func (filter *filter) update(force bool) (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extract filter name and count number of rules
|
// Extract filter name and count number of rules
|
||||||
lines := strings.Split(string(body), "\n")
|
rulesCount, filterName := parseFilterContents(body)
|
||||||
rulesCount := 0
|
|
||||||
seenTitle := false
|
|
||||||
|
|
||||||
// Count lines in the filter
|
if filterName != "" {
|
||||||
for _, line := range lines {
|
filter.Name = filterName
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
if len(line) > 0 && line[0] == '!' {
|
|
||||||
if m := filterTitle.FindAllStringSubmatch(line, -1); len(m) > 0 && len(m[0]) >= 2 && !seenTitle {
|
|
||||||
filter.Name = m[0][1]
|
|
||||||
seenTitle = true
|
|
||||||
}
|
|
||||||
} else if len(line) != 0 {
|
|
||||||
rulesCount++
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the filter was really changed
|
// Check if the filter has been really changed
|
||||||
if bytes.Equal(filter.contents, body) {
|
if bytes.Equal(filter.contents, body) {
|
||||||
|
log.Printf("The filter %d text has not changed", filter.ID)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("Filter %s updated: %d bytes, %d rules", filter.URL, len(body), rulesCount)
|
log.Printf("Filter %d has been updated: %d bytes, %d rules", filter.ID, len(body), rulesCount)
|
||||||
filter.RulesCount = rulesCount
|
filter.RulesCount = rulesCount
|
||||||
filter.contents = body
|
filter.contents = body
|
||||||
|
|
||||||
// Saving it to the filters dir now
|
|
||||||
err = filter.save()
|
|
||||||
if err != nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -745,7 +754,7 @@ func (filter *filter) save() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil;
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// loads filter contents from the file in config.ourDataDir
|
// loads filter contents from the file in config.ourDataDir
|
||||||
|
@ -764,19 +773,24 @@ func (filter *filter) load() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
filterFile, err := ioutil.ReadFile(filterFilePath)
|
filterFileContents, err := ioutil.ReadFile(filterFilePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("Filter %d length is %d", filter.ID, len(filterFile))
|
log.Printf("Filter %d length is %d", filter.ID, len(filterFileContents))
|
||||||
filter.contents = filterFile
|
filter.contents = filterFileContents
|
||||||
|
|
||||||
|
// Now extract the rules count
|
||||||
|
rulesCount, _ := parseFilterContents(filter.contents)
|
||||||
|
filter.RulesCount = rulesCount
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Path to the filter contents
|
// Path to the filter contents
|
||||||
func (filter *filter) getFilterFilePath() string {
|
func (filter *filter) getFilterFilePath() string {
|
||||||
return filepath.Join(config.ourBinaryDir, config.ourDataDir, FiltersDir, strconv.Itoa(filter.ID) + ".txt")
|
return filepath.Join(config.ourBinaryDir, config.ourDataDir, FiltersDir, strconv.Itoa(filter.ID)+".txt")
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------
|
// ------------
|
||||||
|
@ -793,6 +807,7 @@ func handleSafeBrowsingDisable(w http.ResponseWriter, r *http.Request) {
|
||||||
httpUpdateConfigReloadDNSReturnOK(w, r)
|
httpUpdateConfigReloadDNSReturnOK(w, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//noinspection GoUnusedParameter
|
||||||
func handleSafeBrowsingStatus(w http.ResponseWriter, r *http.Request) {
|
func handleSafeBrowsingStatus(w http.ResponseWriter, r *http.Request) {
|
||||||
data := map[string]interface{}{
|
data := map[string]interface{}{
|
||||||
"enabled": config.CoreDNS.SafeBrowsingEnabled,
|
"enabled": config.CoreDNS.SafeBrowsingEnabled,
|
||||||
|
@ -868,6 +883,7 @@ func handleParentalDisable(w http.ResponseWriter, r *http.Request) {
|
||||||
httpUpdateConfigReloadDNSReturnOK(w, r)
|
httpUpdateConfigReloadDNSReturnOK(w, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//noinspection GoUnusedParameter
|
||||||
func handleParentalStatus(w http.ResponseWriter, r *http.Request) {
|
func handleParentalStatus(w http.ResponseWriter, r *http.Request) {
|
||||||
data := map[string]interface{}{
|
data := map[string]interface{}{
|
||||||
"enabled": config.CoreDNS.ParentalEnabled,
|
"enabled": config.CoreDNS.ParentalEnabled,
|
||||||
|
@ -907,6 +923,7 @@ func handleSafeSearchDisable(w http.ResponseWriter, r *http.Request) {
|
||||||
httpUpdateConfigReloadDNSReturnOK(w, r)
|
httpUpdateConfigReloadDNSReturnOK(w, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//noinspection GoUnusedParameter
|
||||||
func handleSafeSearchStatus(w http.ResponseWriter, r *http.Request) {
|
func handleSafeSearchStatus(w http.ResponseWriter, r *http.Request) {
|
||||||
data := map[string]interface{}{
|
data := map[string]interface{}{
|
||||||
"enabled": config.CoreDNS.SafeSearchEnabled,
|
"enabled": config.CoreDNS.SafeSearchEnabled,
|
||||||
|
@ -933,15 +950,15 @@ func registerControlHandlers() {
|
||||||
http.HandleFunc("/control/status", optionalAuth(ensureGET(handleStatus)))
|
http.HandleFunc("/control/status", optionalAuth(ensureGET(handleStatus)))
|
||||||
http.HandleFunc("/control/enable_protection", optionalAuth(ensurePOST(handleProtectionEnable)))
|
http.HandleFunc("/control/enable_protection", optionalAuth(ensurePOST(handleProtectionEnable)))
|
||||||
http.HandleFunc("/control/disable_protection", optionalAuth(ensurePOST(handleProtectionDisable)))
|
http.HandleFunc("/control/disable_protection", optionalAuth(ensurePOST(handleProtectionDisable)))
|
||||||
http.HandleFunc("/control/querylog", optionalAuth(ensureGET(coredns_plugin.HandleQueryLog)))
|
http.HandleFunc("/control/querylog", optionalAuth(ensureGET(coreDnsPlugin.HandleQueryLog)))
|
||||||
http.HandleFunc("/control/querylog_enable", optionalAuth(ensurePOST(handleQueryLogEnable)))
|
http.HandleFunc("/control/querylog_enable", optionalAuth(ensurePOST(handleQueryLogEnable)))
|
||||||
http.HandleFunc("/control/querylog_disable", optionalAuth(ensurePOST(handleQueryLogDisable)))
|
http.HandleFunc("/control/querylog_disable", optionalAuth(ensurePOST(handleQueryLogDisable)))
|
||||||
http.HandleFunc("/control/set_upstream_dns", optionalAuth(ensurePOST(handleSetUpstreamDNS)))
|
http.HandleFunc("/control/set_upstream_dns", optionalAuth(ensurePOST(handleSetUpstreamDNS)))
|
||||||
http.HandleFunc("/control/test_upstream_dns", optionalAuth(ensurePOST(handleTestUpstreamDNS)))
|
http.HandleFunc("/control/test_upstream_dns", optionalAuth(ensurePOST(handleTestUpstreamDNS)))
|
||||||
http.HandleFunc("/control/stats_top", optionalAuth(ensureGET(coredns_plugin.HandleStatsTop)))
|
http.HandleFunc("/control/stats_top", optionalAuth(ensureGET(coreDnsPlugin.HandleStatsTop)))
|
||||||
http.HandleFunc("/control/stats", optionalAuth(ensureGET(coredns_plugin.HandleStats)))
|
http.HandleFunc("/control/stats", optionalAuth(ensureGET(coreDnsPlugin.HandleStats)))
|
||||||
http.HandleFunc("/control/stats_history", optionalAuth(ensureGET(coredns_plugin.HandleStatsHistory)))
|
http.HandleFunc("/control/stats_history", optionalAuth(ensureGET(coreDnsPlugin.HandleStatsHistory)))
|
||||||
http.HandleFunc("/control/stats_reset", optionalAuth(ensurePOST(coredns_plugin.HandleStatsReset)))
|
http.HandleFunc("/control/stats_reset", optionalAuth(ensurePOST(coreDnsPlugin.HandleStatsReset)))
|
||||||
http.HandleFunc("/control/version.json", optionalAuth(handleGetVersionJSON))
|
http.HandleFunc("/control/version.json", optionalAuth(handleGetVersionJSON))
|
||||||
http.HandleFunc("/control/filtering/enable", optionalAuth(ensurePOST(handleFilteringEnable)))
|
http.HandleFunc("/control/filtering/enable", optionalAuth(ensurePOST(handleFilteringEnable)))
|
||||||
http.HandleFunc("/control/filtering/disable", optionalAuth(ensurePOST(handleFilteringDisable)))
|
http.HandleFunc("/control/filtering/disable", optionalAuth(ensurePOST(handleFilteringDisable)))
|
||||||
|
|
|
@ -25,7 +25,6 @@ const (
|
||||||
queryLogFileName = "querylog.json" // .gz added during compression
|
queryLogFileName = "querylog.json" // .gz added during compression
|
||||||
queryLogSize = 5000 // maximum API response for /querylog
|
queryLogSize = 5000 // maximum API response for /querylog
|
||||||
queryLogTopSize = 500 // Keep in memory only top N values
|
queryLogTopSize = 500 // Keep in memory only top N values
|
||||||
queryLogAPIPort = "8618" // 8618 is sha512sum of "querylog" then each byte summed
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -34,7 +33,6 @@ var (
|
||||||
|
|
||||||
queryLogCache []*logEntry
|
queryLogCache []*logEntry
|
||||||
queryLogLock sync.RWMutex
|
queryLogLock sync.RWMutex
|
||||||
queryLogTime time.Time
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type logEntry struct {
|
type logEntry struct {
|
||||||
|
@ -107,6 +105,7 @@ func logRequest(question *dns.Msg, answer *dns.Msg, result dnsfilter.Result, ela
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//noinspection GoUnusedParameter
|
||||||
func HandleQueryLog(w http.ResponseWriter, r *http.Request) {
|
func HandleQueryLog(w http.ResponseWriter, r *http.Request) {
|
||||||
queryLogLock.RLock()
|
queryLogLock.RLock()
|
||||||
values := make([]*logEntry, len(queryLogCache))
|
values := make([]*logEntry, len(queryLogCache))
|
||||||
|
@ -140,14 +139,14 @@ func HandleQueryLog(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
jsonentry := map[string]interface{}{
|
jsonEntry := map[string]interface{}{
|
||||||
"reason": entry.Result.Reason.String(),
|
"reason": entry.Result.Reason.String(),
|
||||||
"elapsed_ms": strconv.FormatFloat(entry.Elapsed.Seconds()*1000, 'f', -1, 64),
|
"elapsedMs": strconv.FormatFloat(entry.Elapsed.Seconds()*1000, 'f', -1, 64),
|
||||||
"time": entry.Time.Format(time.RFC3339),
|
"time": entry.Time.Format(time.RFC3339),
|
||||||
"client": entry.IP,
|
"client": entry.IP,
|
||||||
}
|
}
|
||||||
if q != nil {
|
if q != nil {
|
||||||
jsonentry["question"] = map[string]interface{}{
|
jsonEntry["question"] = map[string]interface{}{
|
||||||
"host": strings.ToLower(strings.TrimSuffix(q.Question[0].Name, ".")),
|
"host": strings.ToLower(strings.TrimSuffix(q.Question[0].Name, ".")),
|
||||||
"type": dns.Type(q.Question[0].Qtype).String(),
|
"type": dns.Type(q.Question[0].Qtype).String(),
|
||||||
"class": dns.Class(q.Question[0].Qclass).String(),
|
"class": dns.Class(q.Question[0].Qclass).String(),
|
||||||
|
@ -156,10 +155,11 @@ func HandleQueryLog(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
if a != nil {
|
if a != nil {
|
||||||
status, _ := response.Typify(a, time.Now().UTC())
|
status, _ := response.Typify(a, time.Now().UTC())
|
||||||
jsonentry["status"] = status.String()
|
jsonEntry["status"] = status.String()
|
||||||
}
|
}
|
||||||
if len(entry.Result.Rule) > 0 {
|
if len(entry.Result.Rule) > 0 {
|
||||||
jsonentry["rule"] = entry.Result.Rule
|
jsonEntry["rule"] = entry.Result.Rule
|
||||||
|
jsonEntry["filterId"] = entry.Result.FilterID
|
||||||
}
|
}
|
||||||
|
|
||||||
if a != nil && len(a.Answer) > 0 {
|
if a != nil && len(a.Answer) > 0 {
|
||||||
|
@ -202,26 +202,26 @@ func HandleQueryLog(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
answers = append(answers, answer)
|
answers = append(answers, answer)
|
||||||
}
|
}
|
||||||
jsonentry["answer"] = answers
|
jsonEntry["answer"] = answers
|
||||||
}
|
}
|
||||||
|
|
||||||
data = append(data, jsonentry)
|
data = append(data, jsonEntry)
|
||||||
}
|
}
|
||||||
|
|
||||||
jsonVal, err := json.Marshal(data)
|
jsonVal, err := json.Marshal(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errortext := fmt.Sprintf("Couldn't marshal data into json: %s", err)
|
errorText := fmt.Sprintf("Couldn't marshal data into json: %s", err)
|
||||||
log.Println(errortext)
|
log.Println(errorText)
|
||||||
http.Error(w, errortext, http.StatusInternalServerError)
|
http.Error(w, errorText, http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
_, err = w.Write(jsonVal)
|
_, err = w.Write(jsonVal)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errortext := fmt.Sprintf("Unable to write response json: %s", err)
|
errorText := fmt.Sprintf("Unable to write response json: %s", err)
|
||||||
log.Println(errortext)
|
log.Println(errorText)
|
||||||
http.Error(w, errortext, http.StatusInternalServerError)
|
http.Error(w, errorText, http.StatusInternalServerError)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -499,6 +499,7 @@ func (rule *rule) match(host string) (Result, error) {
|
||||||
if matched {
|
if matched {
|
||||||
res.Reason = FilteredBlackList
|
res.Reason = FilteredBlackList
|
||||||
res.IsFiltered = true
|
res.IsFiltered = true
|
||||||
|
res.FilterID = rule.listID
|
||||||
if rule.isWhitelist {
|
if rule.isWhitelist {
|
||||||
res.Reason = NotFilteredWhiteList
|
res.Reason = NotFilteredWhiteList
|
||||||
res.IsFiltered = false
|
res.IsFiltered = false
|
||||||
|
|
17
openapi.yaml
17
openapi.yaml
|
@ -92,7 +92,7 @@ paths:
|
||||||
- ttl: 55
|
- ttl: 55
|
||||||
type: A
|
type: A
|
||||||
value: 217.69.139.200
|
value: 217.69.139.200
|
||||||
elapsed_ms: '65.469556'
|
elapsedMs: '65.469556'
|
||||||
question:
|
question:
|
||||||
class: IN
|
class: IN
|
||||||
host: mail.ru
|
host: mail.ru
|
||||||
|
@ -100,7 +100,7 @@ paths:
|
||||||
reason: DNSFILTER_NOTFILTERED_NOTFOUND
|
reason: DNSFILTER_NOTFILTERED_NOTFOUND
|
||||||
status: NOERROR
|
status: NOERROR
|
||||||
time: '2018-07-16T22:24:02+03:00'
|
time: '2018-07-16T22:24:02+03:00'
|
||||||
- elapsed_ms: '0.15716999999999998'
|
- elapsedMs: '0.15716999999999998'
|
||||||
question:
|
question:
|
||||||
class: IN
|
class: IN
|
||||||
host: doubleclick.net
|
host: doubleclick.net
|
||||||
|
@ -113,13 +113,14 @@ paths:
|
||||||
- ttl: 299
|
- ttl: 299
|
||||||
type: A
|
type: A
|
||||||
value: 176.103.133.78
|
value: 176.103.133.78
|
||||||
elapsed_ms: '132.110929'
|
elapsedMs: '132.110929'
|
||||||
question:
|
question:
|
||||||
class: IN
|
class: IN
|
||||||
host: wmconvirus.narod.ru
|
host: wmconvirus.narod.ru
|
||||||
type: A
|
type: A
|
||||||
reason: DNSFILTER_FILTERED_SAFEBROWSING
|
reason: DNSFILTER_FILTERED_SAFEBROWSING
|
||||||
rule: adguard-malware-shavar
|
rule: adguard-malware-shavar
|
||||||
|
filterId: 1
|
||||||
status: NOERROR
|
status: NOERROR
|
||||||
time: '2018-07-16T22:24:02+03:00'
|
time: '2018-07-16T22:24:02+03:00'
|
||||||
/querylog_enable:
|
/querylog_enable:
|
||||||
|
@ -448,9 +449,13 @@ paths:
|
||||||
examples:
|
examples:
|
||||||
application/json:
|
application/json:
|
||||||
enabled: false
|
enabled: false
|
||||||
urls:
|
- filters:
|
||||||
- 'https://filters.adtidy.org/windows/filters/1.txt'
|
enabled: true
|
||||||
- 'https://filters.adtidy.org/windows/filters/2.txt'
|
id: 1
|
||||||
|
lastUpdated: "2018-10-30T12:18:57.223101822+03:00"
|
||||||
|
name: "AdGuard Simplified Domain Names filter"
|
||||||
|
rulesCount: 24896
|
||||||
|
url: "https://adguardteam.github.io/AdGuardSDNSFilter/Filters/filter.txt"
|
||||||
rules:
|
rules:
|
||||||
- '@@||yandex.ru^|'
|
- '@@||yandex.ru^|'
|
||||||
/filtering/set_rules:
|
/filtering/set_rules:
|
||||||
|
|
Loading…
Reference in New Issue