From 60eb55bdce883f16d6cff0574f4c999a69ce33be Mon Sep 17 00:00:00 2001 From: Simon Zolin Date: Thu, 8 Aug 2019 12:56:02 +0300 Subject: [PATCH] * stats: remove old code --- dnsforward/dnsforward.go | 47 +---- dnsforward/dnsforward_test.go | 59 ------ dnsforward/querylog.go | 14 +- dnsforward/querylog_file.go | 96 --------- dnsforward/querylog_top.go | 315 ------------------------------ dnsforward/stats.go | 355 ---------------------------------- home/control.go | 150 +------------- home/dns.go | 5 - 8 files changed, 4 insertions(+), 1037 deletions(-) delete mode 100644 dnsforward/querylog_top.go delete mode 100644 dnsforward/stats.go diff --git a/dnsforward/dnsforward.go b/dnsforward/dnsforward.go index 2eec20a4..fe943b91 100644 --- a/dnsforward/dnsforward.go +++ b/dnsforward/dnsforward.go @@ -40,7 +40,6 @@ type Server struct { dnsProxy *proxy.Proxy // DNS proxy instance dnsFilter *dnsfilter.Dnsfilter // DNS filter instance queryLog *queryLog // Query log instance - stats *stats // General server statistics AllowedClients map[string]bool // IP addresses of whitelist clients DisallowedClients map[string]bool // IP addresses of clients that should be blocked @@ -58,19 +57,10 @@ type Server struct { func NewServer(baseDir string) *Server { s := &Server{ queryLog: newQueryLog(baseDir), - stats: newStats(), - } - - log.Tracef("Loading stats from querylog") - err := s.queryLog.fillStatsFromQueryLog(s.stats) - if err != nil { - log.Error("failed to load stats from querylog: %s", err) } log.Printf("Start DNS server periodic jobs") go s.queryLog.periodicQueryLogRotate() - go s.queryLog.runningTop.periodicHourlyTopRotate() - go s.stats.statsRotator() return s } @@ -357,38 +347,6 @@ func (s *Server) GetQueryLog() []map[string]interface{} { return s.queryLog.getQueryLog() } -// GetStatsTop returns the current stop stats -func (s *Server) GetStatsTop() *StatsTop { - s.RLock() - defer s.RUnlock() - return s.queryLog.runningTop.getStatsTop() -} - -// PurgeStats purges current server stats -func (s *Server) PurgeStats() { - s.Lock() - defer s.Unlock() - s.stats.purgeStats() -} - -// GetAggregatedStats returns aggregated stats data for the 24 hours -func (s *Server) GetAggregatedStats() map[string]interface{} { - s.RLock() - defer s.RUnlock() - return s.stats.getAggregatedStats() -} - -// GetStatsHistory gets stats history aggregated by the specified time unit -// timeUnit is either time.Second, time.Minute, time.Hour, or 24*time.Hour -// start is start of the time range -// end is end of the time range -// returns nil if time unit is not supported -func (s *Server) GetStatsHistory(timeUnit time.Duration, startTime time.Time, endTime time.Time) (map[string]interface{}, error) { - s.RLock() - defer s.RUnlock() - return s.stats.getStatsHistory(timeUnit, startTime, endTime) -} - // Return TRUE if this client should be blocked func (s *Server) isBlockedIP(ip string) bool { if len(s.AllowedClients) != 0 || len(s.AllowedClientsIPNet) != 0 { @@ -513,10 +471,7 @@ func (s *Server) handleDNSRequest(p *proxy.Proxy, d *proxy.DNSContext) error { if d.Upstream != nil { upstreamAddr = d.Upstream.Address() } - entry := s.queryLog.logRequest(msg, d.Res, res, elapsed, d.Addr, upstreamAddr) - if entry != nil { - s.stats.incrementCounters(entry) - } + _ = s.queryLog.logRequest(msg, d.Res, res, elapsed, d.Addr, upstreamAddr) } return nil diff --git a/dnsforward/dnsforward_test.go b/dnsforward/dnsforward_test.go index 6d41c62a..eb69e3d0 100644 --- a/dnsforward/dnsforward_test.go +++ b/dnsforward/dnsforward_test.go @@ -48,10 +48,6 @@ func TestServer(t *testing.T) { // check query log and stats log := s.GetQueryLog() assert.Equal(t, 1, len(log), "Log size") - stats := s.GetStatsTop() - assert.Equal(t, 1, len(stats.Domains), "Top domains length") - assert.Equal(t, 0, len(stats.Blocked), "Top blocked length") - assert.Equal(t, 1, len(stats.Clients), "Top clients length") // message over TCP req = createGoogleATestMessage() @@ -66,11 +62,6 @@ func TestServer(t *testing.T) { // check query log and stats again log = s.GetQueryLog() assert.Equal(t, 2, len(log), "Log size") - stats = s.GetStatsTop() - // Length did not change as we queried the same domain - assert.Equal(t, 1, len(stats.Domains), "Top domains length") - assert.Equal(t, 0, len(stats.Blocked), "Top blocked length") - assert.Equal(t, 1, len(stats.Clients), "Top clients length") err = s.Stop() if err != nil { @@ -100,10 +91,6 @@ func TestServerWithProtectionDisabled(t *testing.T) { // check query log and stats log := s.GetQueryLog() assert.Equal(t, 1, len(log), "Log size") - stats := s.GetStatsTop() - assert.Equal(t, 1, len(stats.Domains), "Top domains length") - assert.Equal(t, 0, len(stats.Blocked), "Top blocked length") - assert.Equal(t, 1, len(stats.Clients), "Top clients length") err = s.Stop() if err != nil { @@ -195,11 +182,6 @@ func TestSafeSearch(t *testing.T) { exchangeAndAssertResponse(t, &client, addr, host, "213.180.193.56") } - // Check aggregated stats - assert.Equal(t, s.GetAggregatedStats()["replaced_safesearch"], float64(len(yandexDomains))) - assert.Equal(t, s.GetAggregatedStats()["blocked_filtering"], float64(len(yandexDomains))) - assert.Equal(t, s.GetAggregatedStats()["dns_queries"], float64(len(yandexDomains))) - // Let's lookup for google safesearch ip ips, err := net.LookupIP("forcesafesearch.google.com") if err != nil { @@ -220,27 +202,6 @@ func TestSafeSearch(t *testing.T) { exchangeAndAssertResponse(t, &client, addr, host, ip.String()) } - // Check aggregated stats - assert.Equal(t, s.GetAggregatedStats()["replaced_safesearch"], float64(len(yandexDomains)+len(googleDomains))) - assert.Equal(t, s.GetAggregatedStats()["blocked_filtering"], float64(len(yandexDomains)+len(googleDomains))) - assert.Equal(t, s.GetAggregatedStats()["dns_queries"], float64(len(yandexDomains)+len(googleDomains))) - - // Do one more exchange - exchangeAndAssertResponse(t, &client, addr, "google-public-dns-a.google.com.", "8.8.8.8") - - // Check aggregated stats - assert.Equal(t, s.GetAggregatedStats()["replaced_safesearch"], float64(len(yandexDomains)+len(googleDomains))) - assert.Equal(t, s.GetAggregatedStats()["blocked_filtering"], float64(len(yandexDomains)+len(googleDomains))) - assert.Equal(t, s.GetAggregatedStats()["dns_queries"], float64(len(yandexDomains)+len(googleDomains)+1)) - - // Count of blocked domains (there is `yandex.com` duplicate in yandexDomains array) - blockedCount := len(yandexDomains) - 1 + len(googleDomains) - assert.Equal(t, len(s.GetStatsTop().Blocked), blockedCount) - - // Count of domains (blocked domains + `google-public-dns-a.google.com`) - domainsCount := blockedCount + 1 - assert.Equal(t, len(s.GetStatsTop().Domains), domainsCount) - err = s.Stop() if err != nil { t.Fatalf("Can not stopd server cause: %s", err) @@ -272,10 +233,6 @@ func TestInvalidRequest(t *testing.T) { // invalid requests aren't written to the query log log := s.GetQueryLog() assert.Equal(t, 0, len(log), "Log size") - stats := s.GetStatsTop() - assert.Equal(t, 0, len(stats.Domains), "Top domains length") - assert.Equal(t, 0, len(stats.Blocked), "Top blocked length") - assert.Equal(t, 0, len(stats.Clients), "Top clients length") err = s.Stop() if err != nil { @@ -313,10 +270,6 @@ func TestBlockedRequest(t *testing.T) { // check query log and stats log := s.GetQueryLog() assert.Equal(t, 1, len(log), "Log size") - stats := s.GetStatsTop() - assert.Equal(t, 1, len(stats.Domains), "Top domains length") - assert.Equal(t, 1, len(stats.Blocked), "Top blocked length") - assert.Equal(t, 1, len(stats.Clients), "Top clients length") err = s.Stop() if err != nil { @@ -362,10 +315,6 @@ func TestNullBlockedRequest(t *testing.T) { // check query log and stats log := s.GetQueryLog() assert.Equal(t, 1, len(log), "Log size") - stats := s.GetStatsTop() - assert.Equal(t, 1, len(stats.Domains), "Top domains length") - assert.Equal(t, 1, len(stats.Blocked), "Top blocked length") - assert.Equal(t, 1, len(stats.Clients), "Top clients length") err = s.Stop() if err != nil { @@ -410,10 +359,6 @@ func TestBlockedByHosts(t *testing.T) { // check query log and stats log := s.GetQueryLog() assert.Equal(t, 1, len(log), "Log size") - stats := s.GetStatsTop() - assert.Equal(t, 1, len(stats.Domains), "Top domains length") - assert.Equal(t, 1, len(stats.Blocked), "Top blocked length") - assert.Equal(t, 1, len(stats.Clients), "Top clients length") err = s.Stop() if err != nil { @@ -469,10 +414,6 @@ func TestBlockedBySafeBrowsing(t *testing.T) { // check query log and stats log := s.GetQueryLog() assert.Equal(t, 1, len(log), "Log size") - stats := s.GetStatsTop() - assert.Equal(t, 1, len(stats.Domains), "Top domains length") - assert.Equal(t, 1, len(stats.Blocked), "Top blocked length") - assert.Equal(t, 1, len(stats.Clients), "Top clients length") err = s.Stop() if err != nil { diff --git a/dnsforward/querylog.go b/dnsforward/querylog.go index 230cffd2..fbbeb7f2 100644 --- a/dnsforward/querylog.go +++ b/dnsforward/querylog.go @@ -25,8 +25,7 @@ const ( // queryLog is a structure that writes and reads the DNS query log type queryLog struct { - logFile string // path to the log file - runningTop *dayTop // current top charts + logFile string // path to the log file logBufferLock sync.RWMutex logBuffer []*logEntry @@ -40,10 +39,8 @@ type queryLog struct { // newQueryLog creates a new instance of the query log func newQueryLog(baseDir string) *queryLog { l := &queryLog{ - logFile: filepath.Join(baseDir, queryLogFileName), - runningTop: &dayTop{}, + logFile: filepath.Join(baseDir, queryLogFileName), } - l.runningTop.init() return l } @@ -112,13 +109,6 @@ func (l *queryLog) logRequest(question *dns.Msg, answer *dns.Msg, result *dnsfil } l.queryLogLock.Unlock() - // add it to running top - err = l.runningTop.addEntry(&entry, question, now) - if err != nil { - log.Printf("Failed to add entry to running top: %s", err) - // don't do failure, just log - } - // if buffer needs to be flushed to disk, do it now if needFlush { // write to file diff --git a/dnsforward/querylog_file.go b/dnsforward/querylog_file.go index 2147b042..e990fdec 100644 --- a/dnsforward/querylog_file.go +++ b/dnsforward/querylog_file.go @@ -178,99 +178,3 @@ func (l *queryLog) periodicQueryLogRotate() { } } } - -func (l *queryLog) genericLoader(onEntry func(entry *logEntry) error, needMore func() bool, timeWindow time.Duration) error { - now := time.Now() - // read from querylog files, try newest file first - var files []string - - if enableGzip { - files = []string{ - l.logFile + ".gz", - l.logFile + ".gz.1", - } - } else { - files = []string{ - l.logFile, - l.logFile + ".1", - } - } - - // read from all files - for _, file := range files { - if !needMore() { - break - } - if _, err := os.Stat(file); os.IsNotExist(err) { - // do nothing, file doesn't exist - continue - } - - f, err := os.Open(file) - if err != nil { - log.Error("Failed to open file \"%s\": %s", file, err) - // try next file - continue - } - defer f.Close() - - var d *json.Decoder - - if enableGzip { - zr, err := gzip.NewReader(f) - if err != nil { - log.Error("Failed to create gzip reader: %s", err) - continue - } - defer zr.Close() - d = json.NewDecoder(zr) - } else { - d = json.NewDecoder(f) - } - - i := 0 - over := 0 - max := 10000 * time.Second - var sum time.Duration - // entries on file are in oldest->newest order - // we want maxLen newest - for d.More() { - if !needMore() { - break - } - var entry logEntry - err := d.Decode(&entry) - if err != nil { - log.Error("Failed to decode: %s", err) - // next entry can be fine, try more - continue - } - - if now.Sub(entry.Time) > timeWindow { - // log.Tracef("skipping entry") // debug logging - continue - } - - if entry.Elapsed > max { - over++ - } else { - sum += entry.Elapsed - } - - i++ - err = onEntry(&entry) - if err != nil { - return err - } - } - elapsed := time.Since(now) - var perunit time.Duration - var avg time.Duration - if i > 0 { - perunit = elapsed / time.Duration(i) - avg = sum / time.Duration(i) - } - log.Debug("file \"%s\": read %d entries in %v, %v/entry, %v over %v, %v avg", file, i, elapsed, perunit, over, max, avg) - } - return nil -} diff --git a/dnsforward/querylog_top.go b/dnsforward/querylog_top.go deleted file mode 100644 index 3a81182d..00000000 --- a/dnsforward/querylog_top.go +++ /dev/null @@ -1,315 +0,0 @@ -package dnsforward - -import ( - "fmt" - "os" - "path" - "runtime" - "strings" - "sync" - "time" - - "github.com/AdguardTeam/golibs/log" - "github.com/bluele/gcache" - "github.com/miekg/dns" -) - -type hourTop struct { - domains gcache.Cache - blocked gcache.Cache - clients gcache.Cache - - mutex sync.RWMutex -} - -func (h *hourTop) init() { - h.domains = gcache.New(queryLogTopSize).LRU().Build() - h.blocked = gcache.New(queryLogTopSize).LRU().Build() - h.clients = gcache.New(queryLogTopSize).LRU().Build() -} - -type dayTop struct { - hours []*hourTop - hoursLock sync.RWMutex // writelock this lock ONLY WHEN rotating or intializing hours! - - loaded bool - loadedLock sync.Mutex -} - -func (d *dayTop) init() { - d.hoursWriteLock() - for i := 0; i < 24; i++ { - hour := hourTop{} - hour.init() - d.hours = append(d.hours, &hour) - } - d.hoursWriteUnlock() -} - -func (d *dayTop) rotateHourlyTop() { - log.Printf("Rotating hourly top") - hour := &hourTop{} - hour.init() - d.hoursWriteLock() - d.hours = append([]*hourTop{hour}, d.hours...) - d.hours = d.hours[:24] - d.hoursWriteUnlock() -} - -func (d *dayTop) periodicHourlyTopRotate() { - t := time.Hour - for range time.Tick(t) { - d.rotateHourlyTop() - } -} - -func (h *hourTop) incrementValue(key string, cache gcache.Cache) error { - h.Lock() - defer h.Unlock() - ivalue, err := cache.Get(key) - if err == gcache.KeyNotFoundError { - // we just set it and we're done - err = cache.Set(key, 1) - if err != nil { - log.Printf("Failed to set hourly top value: %s", err) - return err - } - return nil - } - - if err != nil { - log.Printf("gcache encountered an error during get: %s", err) - return err - } - - cachedValue, ok := ivalue.(int) - if !ok { - err = fmt.Errorf("SHOULD NOT HAPPEN: gcache has non-int as value: %v", ivalue) - log.Println(err) - return err - } - - err = cache.Set(key, cachedValue+1) - if err != nil { - log.Printf("Failed to set hourly top value: %s", err) - return err - } - return nil -} - -func (h *hourTop) incrementDomains(key string) error { - return h.incrementValue(key, h.domains) -} - -func (h *hourTop) incrementBlocked(key string) error { - return h.incrementValue(key, h.blocked) -} - -func (h *hourTop) incrementClients(key string) error { - return h.incrementValue(key, h.clients) -} - -// if does not exist -- return 0 -func (h *hourTop) lockedGetValue(key string, cache gcache.Cache) (int, error) { - ivalue, err := cache.Get(key) - if err == gcache.KeyNotFoundError { - return 0, nil - } - - if err != nil { - log.Printf("gcache encountered an error during get: %s", err) - return 0, err - } - - value, ok := ivalue.(int) - if !ok { - err := fmt.Errorf("SHOULD NOT HAPPEN: gcache has non-int as value: %v", ivalue) - log.Println(err) - return 0, err - } - - return value, nil -} - -func (h *hourTop) lockedGetDomains(key string) (int, error) { - return h.lockedGetValue(key, h.domains) -} - -func (h *hourTop) lockedGetBlocked(key string) (int, error) { - return h.lockedGetValue(key, h.blocked) -} - -func (h *hourTop) lockedGetClients(key string) (int, error) { - return h.lockedGetValue(key, h.clients) -} - -func (d *dayTop) addEntry(entry *logEntry, q *dns.Msg, now time.Time) error { - // figure out which hour bucket it belongs to - hour := int(now.Sub(entry.Time).Hours()) - if hour >= 24 { - log.Printf("t %v is >24 hours ago, ignoring", entry.Time) - return nil - } - - // if a DNS query doesn't have questions, do nothing - if len(q.Question) == 0 { - return nil - } - - hostname := strings.ToLower(strings.TrimSuffix(q.Question[0].Name, ".")) - - // if question hostname is empty, do nothing - if hostname == "" { - return nil - } - - // get value, if not set, crate one - d.hoursReadLock() - defer d.hoursReadUnlock() - err := d.hours[hour].incrementDomains(hostname) - if err != nil { - log.Printf("Failed to increment value: %s", err) - return err - } - - if entry.Result.IsFiltered { - err := d.hours[hour].incrementBlocked(hostname) - if err != nil { - log.Printf("Failed to increment value: %s", err) - return err - } - } - - if len(entry.IP) > 0 { - err := d.hours[hour].incrementClients(entry.IP) - if err != nil { - log.Printf("Failed to increment value: %s", err) - return err - } - } - - return nil -} - -func (l *queryLog) fillStatsFromQueryLog(s *stats) error { - now := time.Now() - l.runningTop.loadedWriteLock() - defer l.runningTop.loadedWriteUnlock() - if l.runningTop.loaded { - return nil - } - onEntry := func(entry *logEntry) error { - if len(entry.Question) == 0 { - log.Printf("entry question is absent, skipping") - return nil - } - - if entry.Time.After(now) { - log.Printf("t %v vs %v is in the future, ignoring", entry.Time, now) - return nil - } - - q := new(dns.Msg) - if err := q.Unpack(entry.Question); err != nil { - log.Printf("failed to unpack dns message question: %s", err) - return err - } - - if len(q.Question) != 1 { - log.Printf("malformed dns message, has no questions, skipping") - return nil - } - - err := l.runningTop.addEntry(entry, q, now) - if err != nil { - log.Printf("Failed to add entry to running top: %s", err) - return err - } - - l.queryLogLock.Lock() - l.queryLogCache = append(l.queryLogCache, entry) - if len(l.queryLogCache) > queryLogSize { - toremove := len(l.queryLogCache) - queryLogSize - l.queryLogCache = l.queryLogCache[toremove:] - } - l.queryLogLock.Unlock() - - s.incrementCounters(entry) - return nil - } - - needMore := func() bool { return true } - err := l.genericLoader(onEntry, needMore, queryLogTimeLimit) - if err != nil { - log.Printf("Failed to load entries from querylog: %s", err) - return err - } - - l.runningTop.loaded = true - return nil -} - -// StatsTop represents top stat charts -type StatsTop struct { - Domains map[string]int // Domains - top requested domains - Blocked map[string]int // Blocked - top blocked domains - Clients map[string]int // Clients - top DNS clients -} - -// getStatsTop returns the current top stats -func (d *dayTop) getStatsTop() *StatsTop { - s := &StatsTop{ - Domains: map[string]int{}, - Blocked: map[string]int{}, - Clients: map[string]int{}, - } - - do := func(keys []interface{}, getter func(key string) (int, error), result map[string]int) { - for _, ikey := range keys { - key, ok := ikey.(string) - if !ok { - continue - } - value, err := getter(key) - if err != nil { - log.Printf("Failed to get top domains value for %v: %s", key, err) - return - } - result[key] += value - } - } - - d.hoursReadLock() - for hour := 0; hour < 24; hour++ { - d.hours[hour].RLock() - do(d.hours[hour].domains.Keys(false), d.hours[hour].lockedGetDomains, s.Domains) - do(d.hours[hour].blocked.Keys(false), d.hours[hour].lockedGetBlocked, s.Blocked) - do(d.hours[hour].clients.Keys(false), d.hours[hour].lockedGetClients, s.Clients) - d.hours[hour].RUnlock() - } - d.hoursReadUnlock() - - return s -} - -func (d *dayTop) hoursWriteLock() { tracelock(); d.hoursLock.Lock() } -func (d *dayTop) hoursWriteUnlock() { tracelock(); d.hoursLock.Unlock() } -func (d *dayTop) hoursReadLock() { tracelock(); d.hoursLock.RLock() } -func (d *dayTop) hoursReadUnlock() { tracelock(); d.hoursLock.RUnlock() } -func (d *dayTop) loadedWriteLock() { tracelock(); d.loadedLock.Lock() } -func (d *dayTop) loadedWriteUnlock() { tracelock(); d.loadedLock.Unlock() } - -func (h *hourTop) Lock() { tracelock(); h.mutex.Lock() } -func (h *hourTop) RLock() { tracelock(); h.mutex.RLock() } -func (h *hourTop) RUnlock() { tracelock(); h.mutex.RUnlock() } -func (h *hourTop) Unlock() { tracelock(); h.mutex.Unlock() } - -func tracelock() { - if false { // not commented out to make code checked during compilation - pc := make([]uintptr, 10) // at least 1 entry needed - runtime.Callers(2, pc) - f := path.Base(runtime.FuncForPC(pc[1]).Name()) - lockf := path.Base(runtime.FuncForPC(pc[0]).Name()) - fmt.Fprintf(os.Stderr, "%s(): %s\n", f, lockf) - } -} diff --git a/dnsforward/stats.go b/dnsforward/stats.go deleted file mode 100644 index 62565a98..00000000 --- a/dnsforward/stats.go +++ /dev/null @@ -1,355 +0,0 @@ -package dnsforward - -import ( - "fmt" - "sync" - "time" - - "github.com/AdguardTeam/AdGuardHome/dnsfilter" -) - -// how far back to keep the stats -const statsHistoryElements = 60 + 1 // +1 for calculating delta - -// entries for single time period (for example all per-second entries) -type statsEntries map[string][statsHistoryElements]float64 - -// each periodic stat is a map of arrays -type periodicStats struct { - entries statsEntries - period time.Duration // how long one entry lasts - lastRotate time.Time // last time this data was rotated - - sync.RWMutex -} - -// stats is the DNS server historical statistics -type stats struct { - perSecond periodicStats - perMinute periodicStats - perHour periodicStats - perDay periodicStats - - requests *counter // total number of requests - filtered *counter // total number of filtered requests - filteredLists *counter // total number of requests blocked by filter lists - filteredSafebrowsing *counter // total number of requests blocked by safebrowsing - filteredParental *counter // total number of requests blocked by the parental control - whitelisted *counter // total number of requests whitelisted by filter lists - safesearch *counter // total number of requests for which safe search rules were applied - errorsTotal *counter // total number of errors - elapsedTime *histogram // requests duration histogram -} - -// initializes an empty stats structure -func newStats() *stats { - s := &stats{ - requests: newDNSCounter("requests_total"), - filtered: newDNSCounter("filtered_total"), - filteredLists: newDNSCounter("filtered_lists_total"), - filteredSafebrowsing: newDNSCounter("filtered_safebrowsing_total"), - filteredParental: newDNSCounter("filtered_parental_total"), - whitelisted: newDNSCounter("whitelisted_total"), - safesearch: newDNSCounter("safesearch_total"), - errorsTotal: newDNSCounter("errors_total"), - elapsedTime: newDNSHistogram("request_duration"), - } - - // Initializes empty per-sec/minute/hour/day stats - s.purgeStats() - return s -} - -func initPeriodicStats(periodic *periodicStats, period time.Duration) { - periodic.Lock() - periodic.entries = statsEntries{} - periodic.lastRotate = time.Now() - periodic.period = period - periodic.Unlock() -} - -func (s *stats) purgeStats() { - initPeriodicStats(&s.perSecond, time.Second) - initPeriodicStats(&s.perMinute, time.Minute) - initPeriodicStats(&s.perHour, time.Hour) - initPeriodicStats(&s.perDay, time.Hour*24) -} - -func (p *periodicStats) Inc(name string, when time.Time) { - // calculate how many periods ago this happened - elapsed := int64(time.Since(when) / p.period) - // log.Tracef("%s: %v as %v -> [%v]", name, time.Since(when), p.period, elapsed) - if elapsed >= statsHistoryElements { - return // outside of our timeframe - } - p.Lock() - currentValues := p.entries[name] - currentValues[elapsed]++ - p.entries[name] = currentValues - p.Unlock() -} - -func (p *periodicStats) Observe(name string, when time.Time, value float64) { - // calculate how many periods ago this happened - elapsed := int64(time.Since(when) / p.period) - // log.Tracef("%s: %v as %v -> [%v]", name, time.Since(when), p.period, elapsed) - if elapsed >= statsHistoryElements { - return // outside of our timeframe - } - p.Lock() - { - countname := name + "_count" - currentValues := p.entries[countname] - v := currentValues[elapsed] - // log.Tracef("Will change p.entries[%s][%d] from %v to %v", countname, elapsed, value, value+1) - v++ - currentValues[elapsed] = v - p.entries[countname] = currentValues - } - { - totalname := name + "_sum" - currentValues := p.entries[totalname] - currentValues[elapsed] += value - p.entries[totalname] = currentValues - } - p.Unlock() -} - -func (p *periodicStats) statsRotate(now time.Time) { - p.Lock() - rotations := int64(now.Sub(p.lastRotate) / p.period) - if rotations > statsHistoryElements { - rotations = statsHistoryElements - } - // calculate how many times we should rotate - for r := int64(0); r < rotations; r++ { - for key, values := range p.entries { - newValues := [statsHistoryElements]float64{} - for i := 1; i < len(values); i++ { - newValues[i] = values[i-1] - } - p.entries[key] = newValues - } - } - if rotations > 0 { - p.lastRotate = now - } - p.Unlock() -} - -func (s *stats) statsRotator() { - for range time.Tick(time.Second) { - now := time.Now() - s.perSecond.statsRotate(now) - s.perMinute.statsRotate(now) - s.perHour.statsRotate(now) - s.perDay.statsRotate(now) - } -} - -// counter that wraps around prometheus Counter but also adds to periodic stats -type counter struct { - name string // used as key in periodic stats - value int64 - - sync.Mutex -} - -func newDNSCounter(name string) *counter { - // log.Tracef("called") - return &counter{ - name: name, - } -} - -func (s *stats) incWithTime(c *counter, when time.Time) { - s.perSecond.Inc(c.name, when) - s.perMinute.Inc(c.name, when) - s.perHour.Inc(c.name, when) - s.perDay.Inc(c.name, when) - c.Lock() - c.value++ - c.Unlock() -} - -type histogram struct { - name string // used as key in periodic stats - count int64 - total float64 - - sync.Mutex -} - -func newDNSHistogram(name string) *histogram { - return &histogram{ - name: name, - } -} - -func (s *stats) observeWithTime(h *histogram, value float64, when time.Time) { - s.perSecond.Observe(h.name, when, value) - s.perMinute.Observe(h.name, when, value) - s.perHour.Observe(h.name, when, value) - s.perDay.Observe(h.name, when, value) - h.Lock() - h.count++ - h.total += value - h.Unlock() -} - -// ----- -// stats -// ----- -func (s *stats) incrementCounters(entry *logEntry) { - s.incWithTime(s.requests, entry.Time) - if entry.Result.IsFiltered { - s.incWithTime(s.filtered, entry.Time) - } - - switch entry.Result.Reason { - case dnsfilter.NotFilteredWhiteList: - s.incWithTime(s.whitelisted, entry.Time) - case dnsfilter.NotFilteredError: - s.incWithTime(s.errorsTotal, entry.Time) - case dnsfilter.FilteredBlackList: - s.incWithTime(s.filteredLists, entry.Time) - case dnsfilter.FilteredSafeBrowsing: - s.incWithTime(s.filteredSafebrowsing, entry.Time) - case dnsfilter.FilteredParental: - s.incWithTime(s.filteredParental, entry.Time) - case dnsfilter.FilteredInvalid: - // do nothing - case dnsfilter.FilteredSafeSearch: - s.incWithTime(s.safesearch, entry.Time) - } - s.observeWithTime(s.elapsedTime, entry.Elapsed.Seconds(), entry.Time) -} - -// getAggregatedStats returns aggregated stats data for the 24 hours -func (s *stats) getAggregatedStats() map[string]interface{} { - const numHours = 24 - historical := s.generateMapFromStats(&s.perHour, 0, numHours) - // sum them up - summed := map[string]interface{}{} - for key, values := range historical { - summedValue := 0.0 - floats, ok := values.([]float64) - if !ok { - continue - } - for _, v := range floats { - summedValue += v - } - summed[key] = summedValue - } - // don't forget to divide by number of elements in returned slice - if val, ok := summed["avg_processing_time"]; ok { - if flval, flok := val.(float64); flok { - flval /= numHours - summed["avg_processing_time"] = flval - } - } - - summed["stats_period"] = "24 hours" - return summed -} - -func (s *stats) generateMapFromStats(stats *periodicStats, start int, end int) map[string]interface{} { - stats.RLock() - defer stats.RUnlock() - - // clamp - start = clamp(start, 0, statsHistoryElements) - end = clamp(end, 0, statsHistoryElements) - - avgProcessingTime := make([]float64, 0) - - count := getReversedSlice(stats.entries[s.elapsedTime.name+"_count"], start, end) - sum := getReversedSlice(stats.entries[s.elapsedTime.name+"_sum"], start, end) - for i := 0; i < len(count); i++ { - var avg float64 - if count[i] != 0 { - avg = sum[i] / count[i] - avg *= 1000 - } - avgProcessingTime = append(avgProcessingTime, avg) - } - - result := map[string]interface{}{ - "dns_queries": getReversedSlice(stats.entries[s.requests.name], start, end), - "blocked_filtering": getReversedSlice(stats.entries[s.filtered.name], start, end), - "replaced_safebrowsing": getReversedSlice(stats.entries[s.filteredSafebrowsing.name], start, end), - "replaced_safesearch": getReversedSlice(stats.entries[s.safesearch.name], start, end), - "replaced_parental": getReversedSlice(stats.entries[s.filteredParental.name], start, end), - "avg_processing_time": avgProcessingTime, - } - return result -} - -// getStatsHistory gets stats history aggregated by the specified time unit -// timeUnit is either time.Second, time.Minute, time.Hour, or 24*time.Hour -// start is start of the time range -// end is end of the time range -// returns nil if time unit is not supported -func (s *stats) getStatsHistory(timeUnit time.Duration, startTime time.Time, endTime time.Time) (map[string]interface{}, error) { - var stats *periodicStats - - switch timeUnit { - case time.Second: - stats = &s.perSecond - case time.Minute: - stats = &s.perMinute - case time.Hour: - stats = &s.perHour - case 24 * time.Hour: - stats = &s.perDay - } - - if stats == nil { - return nil, fmt.Errorf("unsupported time unit: %v", timeUnit) - } - - now := time.Now() - - // check if start and time times are within supported time range - timeRange := timeUnit * statsHistoryElements - if startTime.Add(timeRange).Before(now) { - return nil, fmt.Errorf("start_time parameter is outside of supported range: %s", startTime.String()) - } - if endTime.Add(timeRange).Before(now) { - return nil, fmt.Errorf("end_time parameter is outside of supported range: %s", startTime.String()) - } - - // calculate start and end of our array - // basically it's how many hours/minutes/etc have passed since now - start := int(now.Sub(endTime) / timeUnit) - end := int(now.Sub(startTime) / timeUnit) - - // swap them around if they're inverted - if start > end { - start, end = end, start - } - - return s.generateMapFromStats(stats, start, end), nil -} - -func clamp(value, low, high int) int { - if value < low { - return low - } - if value > high { - return high - } - return value -} - -// -------------------------- -// helper functions for stats -// -------------------------- -func getReversedSlice(input [statsHistoryElements]float64, start int, end int) []float64 { - output := make([]float64, 0) - for i := start; i <= end; i++ { - output = append([]float64{input[i]}, output...) - } - return output -} diff --git a/home/control.go b/home/control.go index 6601b364..5ac71d6d 100644 --- a/home/control.go +++ b/home/control.go @@ -1,12 +1,10 @@ package home import ( - "bytes" "encoding/json" "fmt" "net" "net/http" - "sort" "strconv" "strings" "time" @@ -177,149 +175,6 @@ func handleQueryLog(w http.ResponseWriter, r *http.Request) { } } -func handleStatsTop(w http.ResponseWriter, r *http.Request) { - s := config.dnsServer.GetStatsTop() - - // use manual json marshalling because we want maps to be sorted by value - statsJSON := bytes.Buffer{} - statsJSON.WriteString("{\n") - - gen := func(json *bytes.Buffer, name string, top map[string]int, addComma bool) { - json.WriteString(" ") - json.WriteString(fmt.Sprintf("%q", name)) - json.WriteString(": {\n") - sorted := sortByValue(top) - // no more than 50 entries - if len(sorted) > 50 { - sorted = sorted[:50] - } - for i, key := range sorted { - json.WriteString(" ") - json.WriteString(fmt.Sprintf("%q", key)) - json.WriteString(": ") - json.WriteString(strconv.Itoa(top[key])) - if i+1 != len(sorted) { - json.WriteByte(',') - } - json.WriteByte('\n') - } - json.WriteString(" }") - if addComma { - json.WriteByte(',') - } - json.WriteByte('\n') - } - gen(&statsJSON, "top_queried_domains", s.Domains, true) - gen(&statsJSON, "top_blocked_domains", s.Blocked, true) - gen(&statsJSON, "top_clients", s.Clients, true) - statsJSON.WriteString(" \"stats_period\": \"24 hours\"\n") - statsJSON.WriteString("}\n") - - w.Header().Set("Content-Type", "application/json") - _, err := w.Write(statsJSON.Bytes()) - if err != nil { - httpError(w, http.StatusInternalServerError, "Couldn't write body: %s", err) - } -} - -// handleStatsReset resets the stats caches -func handleStatsReset(w http.ResponseWriter, r *http.Request) { - config.dnsServer.PurgeStats() - _, err := fmt.Fprintf(w, "OK\n") - if err != nil { - httpError(w, http.StatusInternalServerError, "Couldn't write body: %s", err) - } -} - -// handleStats returns aggregated stats data for the 24 hours -func handleStats(w http.ResponseWriter, r *http.Request) { - summed := config.dnsServer.GetAggregatedStats() - - statsJSON, err := json.Marshal(summed) - if err != nil { - httpError(w, http.StatusInternalServerError, "Unable to marshal status json: %s", err) - return - } - w.Header().Set("Content-Type", "application/json") - _, err = w.Write(statsJSON) - if err != nil { - httpError(w, http.StatusInternalServerError, "Unable to write response json: %s", err) - return - } -} - -// HandleStatsHistory returns historical stats data for the 24 hours -func handleStatsHistory(w http.ResponseWriter, r *http.Request) { - // handle time unit and prepare our time window size - timeUnitString := r.URL.Query().Get("time_unit") - var timeUnit time.Duration - switch timeUnitString { - case "seconds": - timeUnit = time.Second - case "minutes": - timeUnit = time.Minute - case "hours": - timeUnit = time.Hour - case "days": - timeUnit = time.Hour * 24 - default: - http.Error(w, "Must specify valid time_unit parameter", http.StatusBadRequest) - return - } - - // parse start and end time - startTime, err := time.Parse(time.RFC3339, r.URL.Query().Get("start_time")) - if err != nil { - httpError(w, http.StatusBadRequest, "Must specify valid start_time parameter: %s", err) - return - } - endTime, err := time.Parse(time.RFC3339, r.URL.Query().Get("end_time")) - if err != nil { - httpError(w, http.StatusBadRequest, "Must specify valid end_time parameter: %s", err) - return - } - - data, err := config.dnsServer.GetStatsHistory(timeUnit, startTime, endTime) - if err != nil { - httpError(w, http.StatusBadRequest, "Cannot get stats history: %s", err) - return - } - - statsJSON, err := json.Marshal(data) - if err != nil { - httpError(w, http.StatusInternalServerError, "Unable to marshal status json: %s", err) - return - } - - w.Header().Set("Content-Type", "application/json") - _, err = w.Write(statsJSON) - if err != nil { - httpError(w, http.StatusInternalServerError, "Unable to write response json: %s", err) - return - } -} - -// sortByValue is a helper function for querylog API -func sortByValue(m map[string]int) []string { - type kv struct { - k string - v int - } - var ss []kv - for k, v := range m { - ss = append(ss, kv{k, v}) - } - sort.Slice(ss, func(l, r int) bool { - return ss[l].v > ss[r].v - }) - - sorted := []string{} - for _, v := range ss { - sorted = append(sorted, v.k) - } - return sorted -} - // ----------------------- // upstreams configuration // ----------------------- @@ -722,10 +577,6 @@ func registerControlHandlers() { httpRegister(http.MethodPost, "/control/test_upstream_dns", handleTestUpstreamDNS) httpRegister(http.MethodPost, "/control/i18n/change_language", handleI18nChangeLanguage) httpRegister(http.MethodGet, "/control/i18n/current_language", handleI18nCurrentLanguage) - httpRegister(http.MethodGet, "/control/stats_top", handleStatsTop) - httpRegister(http.MethodGet, "/control/stats", handleStats) - httpRegister(http.MethodGet, "/control/stats_history", handleStatsHistory) - httpRegister(http.MethodPost, "/control/stats_reset", handleStatsReset) http.HandleFunc("/control/version.json", postInstall(optionalAuth(handleGetVersionJSON))) httpRegister(http.MethodPost, "/control/update", handleUpdate) httpRegister(http.MethodPost, "/control/filtering/enable", handleFilteringEnable) @@ -760,6 +611,7 @@ func registerControlHandlers() { RegisterClientsHandlers() registerRewritesHandlers() RegisterBlockedServicesHandlers() + RegisterStatsHandlers() http.HandleFunc("/dns-query", postInstall(handleDOH)) } diff --git a/home/dns.go b/home/dns.go index ee25ebc1..cace3288 100644 --- a/home/dns.go +++ b/home/dns.go @@ -152,11 +152,6 @@ func startDNSServer() error { return errorx.Decorate(err, "Couldn't start forwarding DNS server") } - top := config.dnsServer.GetStatsTop() - for k := range top.Clients { - beginAsyncRDNS(k) - } - return nil }