(
- props.data &&
+const Line = ({ data, color }) => (
+ data &&
(props.color)}
+ colorBy={() => (color)}
tooltip={slice => (
{slice.data.map(d => (
diff --git a/client/src/containers/Clients.js b/client/src/containers/Clients.js
index 20f756d1..6651ceea 100644
--- a/client/src/containers/Clients.js
+++ b/client/src/containers/Clients.js
@@ -1,20 +1,22 @@
import { connect } from 'react-redux';
-import { getClients, getTopStats } from '../actions';
+import { getClients } from '../actions';
+import { getStats } from '../actions/stats';
import { addClient, updateClient, deleteClient, toggleClientModal } from '../actions/clients';
import Clients from '../components/Settings/Clients';
const mapStateToProps = (state) => {
- const { dashboard, clients } = state;
+ const { dashboard, clients, stats } = state;
const props = {
dashboard,
clients,
+ stats,
};
return props;
};
const mapDispatchToProps = {
getClients,
- getTopStats,
+ getStats,
addClient,
updateClient,
deleteClient,
diff --git a/client/src/containers/Dashboard.js b/client/src/containers/Dashboard.js
index d5874768..8d40df18 100644
--- a/client/src/containers/Dashboard.js
+++ b/client/src/containers/Dashboard.js
@@ -1,14 +1,23 @@
import { connect } from 'react-redux';
-import * as actionCreators from '../actions';
+import { toggleProtection, getClients } from '../actions';
+import { getStats, getStatsConfig, setStatsConfig } from '../actions/stats';
import Dashboard from '../components/Dashboard';
const mapStateToProps = (state) => {
- const { dashboard } = state;
- const props = { dashboard };
+ const { dashboard, stats } = state;
+ const props = { dashboard, stats };
return props;
};
+const mapDispatchToProps = {
+ toggleProtection,
+ getClients,
+ getStats,
+ getStatsConfig,
+ setStatsConfig,
+};
+
export default connect(
mapStateToProps,
- actionCreators,
+ mapDispatchToProps,
)(Dashboard);
diff --git a/client/src/containers/Settings.js b/client/src/containers/Settings.js
index 054d1d1b..14f08cfc 100644
--- a/client/src/containers/Settings.js
+++ b/client/src/containers/Settings.js
@@ -1,13 +1,15 @@
import { connect } from 'react-redux';
import { initSettings, toggleSetting } from '../actions';
import { getBlockedServices, setBlockedServices } from '../actions/services';
+import { getStatsConfig, setStatsConfig, resetStats } from '../actions/stats';
import Settings from '../components/Settings';
const mapStateToProps = (state) => {
- const { settings, services } = state;
+ const { settings, services, stats } = state;
const props = {
settings,
services,
+ stats,
};
return props;
};
@@ -17,6 +19,9 @@ const mapDispatchToProps = {
toggleSetting,
getBlockedServices,
setBlockedServices,
+ getStatsConfig,
+ setStatsConfig,
+ resetStats,
};
export default connect(
diff --git a/client/src/helpers/constants.js b/client/src/helpers/constants.js
index 74ac19c1..9f536d60 100644
--- a/client/src/helpers/constants.js
+++ b/client/src/helpers/constants.js
@@ -260,3 +260,5 @@ export const FILTERED_STATUS = {
FILTERED_BLOCKED_SERVICE: 'FilteredBlockedService',
REWRITE: 'Rewrite',
};
+
+export const STATS_INTERVALS_DAYS = [1, 7, 30, 90];
diff --git a/client/src/helpers/helpers.js b/client/src/helpers/helpers.js
index def170f0..ad0e94dd 100644
--- a/client/src/helpers/helpers.js
+++ b/client/src/helpers/helpers.js
@@ -2,11 +2,12 @@ import dateParse from 'date-fns/parse';
import dateFormat from 'date-fns/format';
import subHours from 'date-fns/sub_hours';
import addHours from 'date-fns/add_hours';
+import addDays from 'date-fns/add_days';
+import subDays from 'date-fns/sub_days';
import round from 'lodash/round';
import axios from 'axios';
import {
- STATS_NAMES,
STANDARD_DNS_PORT,
STANDARD_WEB_PORT,
STANDARD_HTTPS_PORT,
@@ -49,29 +50,28 @@ export const normalizeLogs = logs => logs.map((log) => {
};
});
-export const normalizeHistory = history => Object.keys(history).map((key) => {
- let id = STATS_NAMES[key];
- if (!id) {
- id = key.replace(/_/g, ' ').replace(/^\w/, c => c.toUpperCase());
+export const normalizeHistory = (history, interval) => {
+ if (interval === 1 || interval === 7) {
+ const hoursAgo = subHours(Date.now(), 24 * interval);
+ return history.map((item, index) => ({
+ x: dateFormat(addHours(hoursAgo, index), 'D MMM HH:00'),
+ y: round(item, 2),
+ }));
}
- const dayAgo = subHours(Date.now(), 24);
+ const daysAgo = subDays(Date.now(), interval - 1);
+ return history.map((item, index) => ({
+ x: dateFormat(addDays(daysAgo, index), 'D MMM YYYY'),
+ y: round(item, 2),
+ }));
+};
- const data = history[key].map((item, index) => {
- const formatHour = dateFormat(addHours(dayAgo, index), 'ddd HH:00');
- const roundValue = round(item, 2);
-
- return {
- x: formatHour,
- y: roundValue,
- };
- });
-
- return {
- id,
- data,
- };
-});
+export const normalizeTopStats = stats => (
+ stats.map(item => ({
+ name: Object.keys(item)[0],
+ count: Object.values(item)[0],
+ }))
+);
export const normalizeFilteringStatus = (filteringStatus) => {
const { enabled, filters, user_rules: userRules } = filteringStatus;
@@ -233,3 +233,11 @@ export const sortClients = (clients) => {
export const toggleAllServices = (services, change, isSelected) => {
services.forEach(service => change(`blocked_services.${service.id}`, isSelected));
};
+
+export const secondsToMilliseconds = (seconds) => {
+ if (seconds) {
+ return seconds * 1000;
+ }
+
+ return seconds;
+};
diff --git a/client/src/reducers/index.js b/client/src/reducers/index.js
index 2913f5cc..2af571b2 100644
--- a/client/src/reducers/index.js
+++ b/client/src/reducers/index.js
@@ -11,6 +11,7 @@ import clients from './clients';
import access from './access';
import rewrites from './rewrites';
import services from './services';
+import stats from './stats';
const settings = handleActions({
[actions.initSettingsRequest]: state => ({ ...state, processing: true }),
@@ -93,27 +94,6 @@ const dashboard = handleActions({
return newState;
},
- [actions.getStatsRequest]: state => ({ ...state, processingStats: true }),
- [actions.getStatsFailure]: state => ({ ...state, processingStats: false }),
- [actions.getStatsSuccess]: (state, { payload }) => {
- const newState = { ...state, stats: payload, processingStats: false };
- return newState;
- },
-
- [actions.getTopStatsRequest]: state => ({ ...state, processingTopStats: true }),
- [actions.getTopStatsFailure]: state => ({ ...state, processingTopStats: false }),
- [actions.getTopStatsSuccess]: (state, { payload }) => {
- const newState = { ...state, topStats: payload, processingTopStats: false };
- return newState;
- },
-
- [actions.getStatsHistoryRequest]: state => ({ ...state, processingStatsHistory: true }),
- [actions.getStatsHistoryFailure]: state => ({ ...state, processingStatsHistory: false }),
- [actions.getStatsHistorySuccess]: (state, { payload }) => {
- const newState = { ...state, statsHistory: payload, processingStatsHistory: false };
- return newState;
- },
-
[actions.toggleLogStatusRequest]: state => ({ ...state, logStatusProcessing: true }),
[actions.toggleLogStatusFailure]: state => ({ ...state, logStatusProcessing: false }),
[actions.toggleLogStatusSuccess]: (state) => {
@@ -199,8 +179,6 @@ const dashboard = handleActions({
}, {
processing: true,
isCoreRunning: false,
- processingTopStats: true,
- processingStats: true,
logStatusProcessing: false,
processingVersion: true,
processingFiltering: true,
@@ -217,7 +195,6 @@ const dashboard = handleActions({
dnsVersion: '',
clients: [],
autoClients: [],
- topStats: [],
});
const queryLogs = handleActions({
@@ -230,7 +207,11 @@ const queryLogs = handleActions({
[actions.downloadQueryLogRequest]: state => ({ ...state, logsDownloading: true }),
[actions.downloadQueryLogFailure]: state => ({ ...state, logsDownloading: false }),
[actions.downloadQueryLogSuccess]: state => ({ ...state, logsDownloading: false }),
-}, { getLogsProcessing: false, logsDownloading: false });
+}, {
+ getLogsProcessing: false,
+ logsDownloading: false,
+ logs: [],
+});
const filtering = handleActions({
[actions.setRulesRequest]: state => ({ ...state, processingRules: true }),
@@ -426,6 +407,7 @@ export default combineReducers({
access,
rewrites,
services,
+ stats,
loadingBar: loadingBarReducer,
form: formReducer,
});
diff --git a/client/src/reducers/stats.js b/client/src/reducers/stats.js
new file mode 100644
index 00000000..88c33a12
--- /dev/null
+++ b/client/src/reducers/stats.js
@@ -0,0 +1,97 @@
+import { handleActions } from 'redux-actions';
+
+import * as actions from '../actions/stats';
+
+const defaultStats = {
+ dnsQueries: [],
+ blockedFiltering: [],
+ replacedParental: [],
+ replacedSafebrowsing: [],
+ topBlockedDomains: [],
+ topClients: [],
+ topQueriedDomains: [],
+ numBlockedFiltering: 0,
+ numDnsQueries: 0,
+ numReplacedParental: 0,
+ numReplacedSafebrowsing: 0,
+ numReplacedSafesearch: 0,
+ avgProcessingTime: 0,
+};
+
+const stats = handleActions(
+ {
+ [actions.getStatsConfigRequest]: state => ({ ...state, processingGetConfig: true }),
+ [actions.getStatsConfigFailure]: state => ({ ...state, processingGetConfig: false }),
+ [actions.getStatsConfigSuccess]: (state, { payload }) => ({
+ ...state,
+ interval: payload.interval,
+ processingGetConfig: false,
+ }),
+
+ [actions.setStatsConfigRequest]: state => ({ ...state, processingSetConfig: true }),
+ [actions.setStatsConfigFailure]: state => ({ ...state, processingSetConfig: false }),
+ [actions.setStatsConfigSuccess]: (state, { payload }) => ({
+ ...state,
+ interval: payload.interval,
+ processingSetConfig: false,
+ }),
+
+ [actions.getStatsRequest]: state => ({ ...state, processingStats: true }),
+ [actions.getStatsFailure]: state => ({ ...state, processingStats: false }),
+ [actions.getStatsSuccess]: (state, { payload }) => {
+ const {
+ dns_queries: dnsQueries,
+ blocked_filtering: blockedFiltering,
+ replaced_parental: replacedParental,
+ replaced_safebrowsing: replacedSafebrowsing,
+ top_blocked_domains: topBlockedDomains,
+ top_clients: topClients,
+ top_queried_domains: topQueriedDomains,
+ num_blocked_filtering: numBlockedFiltering,
+ num_dns_queries: numDnsQueries,
+ num_replaced_parental: numReplacedParental,
+ num_replaced_safebrowsing: numReplacedSafebrowsing,
+ num_replaced_safesearch: numReplacedSafesearch,
+ avg_processing_time: avgProcessingTime,
+ } = payload;
+
+ const newState = {
+ ...state,
+ processingStats: false,
+ dnsQueries,
+ blockedFiltering,
+ replacedParental,
+ replacedSafebrowsing,
+ topBlockedDomains,
+ topClients,
+ topQueriedDomains,
+ numBlockedFiltering,
+ numDnsQueries,
+ numReplacedParental,
+ numReplacedSafebrowsing,
+ numReplacedSafesearch,
+ avgProcessingTime,
+ };
+
+ return newState;
+ },
+
+ [actions.resetStatsRequest]: state => ({ ...state, processingReset: true }),
+ [actions.resetStatsFailure]: state => ({ ...state, processingReset: false }),
+ [actions.resetStatsSuccess]: state => ({
+ ...state,
+ ...defaultStats,
+ processingReset: false,
+ }),
+ },
+ {
+ processingGetConfig: false,
+ processingSetConfig: false,
+ processingStats: true,
+ processingReset: false,
+ interval: 1,
+ ...defaultStats,
+ },
+);
+
+export default stats;
diff --git a/dnsforward/dnsforward.go b/dnsforward/dnsforward.go
index 2eec20a4..29146843 100644
--- a/dnsforward/dnsforward.go
+++ b/dnsforward/dnsforward.go
@@ -11,6 +11,7 @@ import (
"time"
"github.com/AdguardTeam/AdGuardHome/dnsfilter"
+ "github.com/AdguardTeam/AdGuardHome/stats"
"github.com/AdguardTeam/dnsproxy/proxy"
"github.com/AdguardTeam/dnsproxy/upstream"
"github.com/AdguardTeam/golibs/log"
@@ -40,7 +41,7 @@ type Server struct {
dnsProxy *proxy.Proxy // DNS proxy instance
dnsFilter *dnsfilter.Dnsfilter // DNS filter instance
queryLog *queryLog // Query log instance
- stats *stats // General server statistics
+ stats stats.Stats
AllowedClients map[string]bool // IP addresses of whitelist clients
DisallowedClients map[string]bool // IP addresses of clients that should be blocked
@@ -55,22 +56,14 @@ type Server struct {
// NewServer creates a new instance of the dnsforward.Server
// baseDir is the base directory for query logs
// Note: this function must be called only once
-func NewServer(baseDir string) *Server {
+func NewServer(baseDir string, stats stats.Stats) *Server {
s := &Server{
queryLog: newQueryLog(baseDir),
- stats: newStats(),
- }
-
- log.Tracef("Loading stats from querylog")
- err := s.queryLog.fillStatsFromQueryLog(s.stats)
- if err != nil {
- log.Error("failed to load stats from querylog: %s", err)
}
+ s.stats = stats
log.Printf("Start DNS server periodic jobs")
go s.queryLog.periodicQueryLogRotate()
- go s.queryLog.runningTop.periodicHourlyTopRotate()
- go s.stats.statsRotator()
return s
}
@@ -357,38 +350,6 @@ func (s *Server) GetQueryLog() []map[string]interface{} {
return s.queryLog.getQueryLog()
}
-// GetStatsTop returns the current stop stats
-func (s *Server) GetStatsTop() *StatsTop {
- s.RLock()
- defer s.RUnlock()
- return s.queryLog.runningTop.getStatsTop()
-}
-
-// PurgeStats purges current server stats
-func (s *Server) PurgeStats() {
- s.Lock()
- defer s.Unlock()
- s.stats.purgeStats()
-}
-
-// GetAggregatedStats returns aggregated stats data for the 24 hours
-func (s *Server) GetAggregatedStats() map[string]interface{} {
- s.RLock()
- defer s.RUnlock()
- return s.stats.getAggregatedStats()
-}
-
-// GetStatsHistory gets stats history aggregated by the specified time unit
-// timeUnit is either time.Second, time.Minute, time.Hour, or 24*time.Hour
-// start is start of the time range
-// end is end of the time range
-// returns nil if time unit is not supported
-func (s *Server) GetStatsHistory(timeUnit time.Duration, startTime time.Time, endTime time.Time) (map[string]interface{}, error) {
- s.RLock()
- defer s.RUnlock()
- return s.stats.getStatsHistory(timeUnit, startTime, endTime)
-}
-
// Return TRUE if this client should be blocked
func (s *Server) isBlockedIP(ip string) bool {
if len(s.AllowedClients) != 0 || len(s.AllowedClientsIPNet) != 0 {
@@ -507,21 +468,61 @@ func (s *Server) handleDNSRequest(p *proxy.Proxy, d *proxy.DNSContext) error {
shouldLog = false
}
+ elapsed := time.Since(start)
if s.conf.QueryLogEnabled && shouldLog {
- elapsed := time.Since(start)
upstreamAddr := ""
if d.Upstream != nil {
upstreamAddr = d.Upstream.Address()
}
- entry := s.queryLog.logRequest(msg, d.Res, res, elapsed, d.Addr, upstreamAddr)
- if entry != nil {
- s.stats.incrementCounters(entry)
- }
+ _ = s.queryLog.logRequest(msg, d.Res, res, elapsed, d.Addr, upstreamAddr)
}
+ s.updateStats(d, elapsed, *res)
+
return nil
}
+func (s *Server) updateStats(d *proxy.DNSContext, elapsed time.Duration, res dnsfilter.Result) {
+ if s.stats == nil {
+ return
+ }
+
+ e := stats.Entry{}
+ e.Domain = strings.ToLower(d.Req.Question[0].Name)
+ e.Domain = e.Domain[:len(e.Domain)-1] // remove last "."
+ switch addr := d.Addr.(type) {
+ case *net.UDPAddr:
+ e.Client = addr.IP
+ case *net.TCPAddr:
+ e.Client = addr.IP
+ }
+ e.Time = uint(elapsed / 1000)
+ switch res.Reason {
+
+ case dnsfilter.NotFilteredNotFound:
+ fallthrough
+ case dnsfilter.NotFilteredWhiteList:
+ fallthrough
+ case dnsfilter.NotFilteredError:
+ e.Result = stats.RNotFiltered
+
+ case dnsfilter.FilteredSafeBrowsing:
+ e.Result = stats.RSafeBrowsing
+ case dnsfilter.FilteredParental:
+ e.Result = stats.RParental
+ case dnsfilter.FilteredSafeSearch:
+ e.Result = stats.RSafeSearch
+
+ case dnsfilter.FilteredBlackList:
+ fallthrough
+ case dnsfilter.FilteredInvalid:
+ fallthrough
+ case dnsfilter.FilteredBlockedService:
+ e.Result = stats.RFiltered
+ }
+ s.stats.Update(e)
+}
+
// filterDNSRequest applies the dnsFilter and sets d.Res if the request was filtered
func (s *Server) filterDNSRequest(d *proxy.DNSContext) (*dnsfilter.Result, error) {
var res dnsfilter.Result
diff --git a/dnsforward/dnsforward_test.go b/dnsforward/dnsforward_test.go
index 6d41c62a..740a43b8 100644
--- a/dnsforward/dnsforward_test.go
+++ b/dnsforward/dnsforward_test.go
@@ -48,10 +48,6 @@ func TestServer(t *testing.T) {
// check query log and stats
log := s.GetQueryLog()
assert.Equal(t, 1, len(log), "Log size")
- stats := s.GetStatsTop()
- assert.Equal(t, 1, len(stats.Domains), "Top domains length")
- assert.Equal(t, 0, len(stats.Blocked), "Top blocked length")
- assert.Equal(t, 1, len(stats.Clients), "Top clients length")
// message over TCP
req = createGoogleATestMessage()
@@ -66,11 +62,6 @@ func TestServer(t *testing.T) {
// check query log and stats again
log = s.GetQueryLog()
assert.Equal(t, 2, len(log), "Log size")
- stats = s.GetStatsTop()
- // Length did not change as we queried the same domain
- assert.Equal(t, 1, len(stats.Domains), "Top domains length")
- assert.Equal(t, 0, len(stats.Blocked), "Top blocked length")
- assert.Equal(t, 1, len(stats.Clients), "Top clients length")
err = s.Stop()
if err != nil {
@@ -100,10 +91,6 @@ func TestServerWithProtectionDisabled(t *testing.T) {
// check query log and stats
log := s.GetQueryLog()
assert.Equal(t, 1, len(log), "Log size")
- stats := s.GetStatsTop()
- assert.Equal(t, 1, len(stats.Domains), "Top domains length")
- assert.Equal(t, 0, len(stats.Blocked), "Top blocked length")
- assert.Equal(t, 1, len(stats.Clients), "Top clients length")
err = s.Stop()
if err != nil {
@@ -195,11 +182,6 @@ func TestSafeSearch(t *testing.T) {
exchangeAndAssertResponse(t, &client, addr, host, "213.180.193.56")
}
- // Check aggregated stats
- assert.Equal(t, s.GetAggregatedStats()["replaced_safesearch"], float64(len(yandexDomains)))
- assert.Equal(t, s.GetAggregatedStats()["blocked_filtering"], float64(len(yandexDomains)))
- assert.Equal(t, s.GetAggregatedStats()["dns_queries"], float64(len(yandexDomains)))
-
// Let's lookup for google safesearch ip
ips, err := net.LookupIP("forcesafesearch.google.com")
if err != nil {
@@ -220,27 +202,6 @@ func TestSafeSearch(t *testing.T) {
exchangeAndAssertResponse(t, &client, addr, host, ip.String())
}
- // Check aggregated stats
- assert.Equal(t, s.GetAggregatedStats()["replaced_safesearch"], float64(len(yandexDomains)+len(googleDomains)))
- assert.Equal(t, s.GetAggregatedStats()["blocked_filtering"], float64(len(yandexDomains)+len(googleDomains)))
- assert.Equal(t, s.GetAggregatedStats()["dns_queries"], float64(len(yandexDomains)+len(googleDomains)))
-
- // Do one more exchange
- exchangeAndAssertResponse(t, &client, addr, "google-public-dns-a.google.com.", "8.8.8.8")
-
- // Check aggregated stats
- assert.Equal(t, s.GetAggregatedStats()["replaced_safesearch"], float64(len(yandexDomains)+len(googleDomains)))
- assert.Equal(t, s.GetAggregatedStats()["blocked_filtering"], float64(len(yandexDomains)+len(googleDomains)))
- assert.Equal(t, s.GetAggregatedStats()["dns_queries"], float64(len(yandexDomains)+len(googleDomains)+1))
-
- // Count of blocked domains (there is `yandex.com` duplicate in yandexDomains array)
- blockedCount := len(yandexDomains) - 1 + len(googleDomains)
- assert.Equal(t, len(s.GetStatsTop().Blocked), blockedCount)
-
- // Count of domains (blocked domains + `google-public-dns-a.google.com`)
- domainsCount := blockedCount + 1
- assert.Equal(t, len(s.GetStatsTop().Domains), domainsCount)
-
err = s.Stop()
if err != nil {
t.Fatalf("Can not stopd server cause: %s", err)
@@ -272,10 +233,6 @@ func TestInvalidRequest(t *testing.T) {
// invalid requests aren't written to the query log
log := s.GetQueryLog()
assert.Equal(t, 0, len(log), "Log size")
- stats := s.GetStatsTop()
- assert.Equal(t, 0, len(stats.Domains), "Top domains length")
- assert.Equal(t, 0, len(stats.Blocked), "Top blocked length")
- assert.Equal(t, 0, len(stats.Clients), "Top clients length")
err = s.Stop()
if err != nil {
@@ -313,10 +270,6 @@ func TestBlockedRequest(t *testing.T) {
// check query log and stats
log := s.GetQueryLog()
assert.Equal(t, 1, len(log), "Log size")
- stats := s.GetStatsTop()
- assert.Equal(t, 1, len(stats.Domains), "Top domains length")
- assert.Equal(t, 1, len(stats.Blocked), "Top blocked length")
- assert.Equal(t, 1, len(stats.Clients), "Top clients length")
err = s.Stop()
if err != nil {
@@ -362,10 +315,6 @@ func TestNullBlockedRequest(t *testing.T) {
// check query log and stats
log := s.GetQueryLog()
assert.Equal(t, 1, len(log), "Log size")
- stats := s.GetStatsTop()
- assert.Equal(t, 1, len(stats.Domains), "Top domains length")
- assert.Equal(t, 1, len(stats.Blocked), "Top blocked length")
- assert.Equal(t, 1, len(stats.Clients), "Top clients length")
err = s.Stop()
if err != nil {
@@ -410,10 +359,6 @@ func TestBlockedByHosts(t *testing.T) {
// check query log and stats
log := s.GetQueryLog()
assert.Equal(t, 1, len(log), "Log size")
- stats := s.GetStatsTop()
- assert.Equal(t, 1, len(stats.Domains), "Top domains length")
- assert.Equal(t, 1, len(stats.Blocked), "Top blocked length")
- assert.Equal(t, 1, len(stats.Clients), "Top clients length")
err = s.Stop()
if err != nil {
@@ -469,10 +414,6 @@ func TestBlockedBySafeBrowsing(t *testing.T) {
// check query log and stats
log := s.GetQueryLog()
assert.Equal(t, 1, len(log), "Log size")
- stats := s.GetStatsTop()
- assert.Equal(t, 1, len(stats.Domains), "Top domains length")
- assert.Equal(t, 1, len(stats.Blocked), "Top blocked length")
- assert.Equal(t, 1, len(stats.Clients), "Top clients length")
err = s.Stop()
if err != nil {
@@ -481,7 +422,7 @@ func TestBlockedBySafeBrowsing(t *testing.T) {
}
func createTestServer(t *testing.T) *Server {
- s := NewServer(createDataDir(t))
+ s := NewServer(createDataDir(t), nil)
s.conf.UDPListenAddr = &net.UDPAddr{Port: 0}
s.conf.TCPListenAddr = &net.TCPAddr{Port: 0}
diff --git a/dnsforward/querylog.go b/dnsforward/querylog.go
index 230cffd2..fbbeb7f2 100644
--- a/dnsforward/querylog.go
+++ b/dnsforward/querylog.go
@@ -25,8 +25,7 @@ const (
// queryLog is a structure that writes and reads the DNS query log
type queryLog struct {
- logFile string // path to the log file
- runningTop *dayTop // current top charts
+ logFile string // path to the log file
logBufferLock sync.RWMutex
logBuffer []*logEntry
@@ -40,10 +39,8 @@ type queryLog struct {
// newQueryLog creates a new instance of the query log
func newQueryLog(baseDir string) *queryLog {
l := &queryLog{
- logFile: filepath.Join(baseDir, queryLogFileName),
- runningTop: &dayTop{},
+ logFile: filepath.Join(baseDir, queryLogFileName),
}
- l.runningTop.init()
return l
}
@@ -112,13 +109,6 @@ func (l *queryLog) logRequest(question *dns.Msg, answer *dns.Msg, result *dnsfil
}
l.queryLogLock.Unlock()
- // add it to running top
- err = l.runningTop.addEntry(&entry, question, now)
- if err != nil {
- log.Printf("Failed to add entry to running top: %s", err)
- // don't do failure, just log
- }
-
// if buffer needs to be flushed to disk, do it now
if needFlush {
// write to file
diff --git a/dnsforward/querylog_file.go b/dnsforward/querylog_file.go
index 2147b042..e990fdec 100644
--- a/dnsforward/querylog_file.go
+++ b/dnsforward/querylog_file.go
@@ -178,99 +178,3 @@ func (l *queryLog) periodicQueryLogRotate() {
}
}
}
-
-func (l *queryLog) genericLoader(onEntry func(entry *logEntry) error, needMore func() bool, timeWindow time.Duration) error {
- now := time.Now()
- // read from querylog files, try newest file first
- var files []string
-
- if enableGzip {
- files = []string{
- l.logFile + ".gz",
- l.logFile + ".gz.1",
- }
- } else {
- files = []string{
- l.logFile,
- l.logFile + ".1",
- }
- }
-
- // read from all files
- for _, file := range files {
- if !needMore() {
- break
- }
- if _, err := os.Stat(file); os.IsNotExist(err) {
- // do nothing, file doesn't exist
- continue
- }
-
- f, err := os.Open(file)
- if err != nil {
- log.Error("Failed to open file \"%s\": %s", file, err)
- // try next file
- continue
- }
- defer f.Close()
-
- var d *json.Decoder
-
- if enableGzip {
- zr, err := gzip.NewReader(f)
- if err != nil {
- log.Error("Failed to create gzip reader: %s", err)
- continue
- }
- defer zr.Close()
- d = json.NewDecoder(zr)
- } else {
- d = json.NewDecoder(f)
- }
-
- i := 0
- over := 0
- max := 10000 * time.Second
- var sum time.Duration
- // entries on file are in oldest->newest order
- // we want maxLen newest
- for d.More() {
- if !needMore() {
- break
- }
- var entry logEntry
- err := d.Decode(&entry)
- if err != nil {
- log.Error("Failed to decode: %s", err)
- // next entry can be fine, try more
- continue
- }
-
- if now.Sub(entry.Time) > timeWindow {
- // log.Tracef("skipping entry") // debug logging
- continue
- }
-
- if entry.Elapsed > max {
- over++
- } else {
- sum += entry.Elapsed
- }
-
- i++
- err = onEntry(&entry)
- if err != nil {
- return err
- }
- }
- elapsed := time.Since(now)
- var perunit time.Duration
- var avg time.Duration
- if i > 0 {
- perunit = elapsed / time.Duration(i)
- avg = sum / time.Duration(i)
- }
- log.Debug("file \"%s\": read %d entries in %v, %v/entry, %v over %v, %v avg", file, i, elapsed, perunit, over, max, avg)
- }
- return nil
-}
diff --git a/dnsforward/querylog_top.go b/dnsforward/querylog_top.go
deleted file mode 100644
index 3a81182d..00000000
--- a/dnsforward/querylog_top.go
+++ /dev/null
@@ -1,315 +0,0 @@
-package dnsforward
-
-import (
- "fmt"
- "os"
- "path"
- "runtime"
- "strings"
- "sync"
- "time"
-
- "github.com/AdguardTeam/golibs/log"
- "github.com/bluele/gcache"
- "github.com/miekg/dns"
-)
-
-type hourTop struct {
- domains gcache.Cache
- blocked gcache.Cache
- clients gcache.Cache
-
- mutex sync.RWMutex
-}
-
-func (h *hourTop) init() {
- h.domains = gcache.New(queryLogTopSize).LRU().Build()
- h.blocked = gcache.New(queryLogTopSize).LRU().Build()
- h.clients = gcache.New(queryLogTopSize).LRU().Build()
-}
-
-type dayTop struct {
- hours []*hourTop
- hoursLock sync.RWMutex // writelock this lock ONLY WHEN rotating or intializing hours!
-
- loaded bool
- loadedLock sync.Mutex
-}
-
-func (d *dayTop) init() {
- d.hoursWriteLock()
- for i := 0; i < 24; i++ {
- hour := hourTop{}
- hour.init()
- d.hours = append(d.hours, &hour)
- }
- d.hoursWriteUnlock()
-}
-
-func (d *dayTop) rotateHourlyTop() {
- log.Printf("Rotating hourly top")
- hour := &hourTop{}
- hour.init()
- d.hoursWriteLock()
- d.hours = append([]*hourTop{hour}, d.hours...)
- d.hours = d.hours[:24]
- d.hoursWriteUnlock()
-}
-
-func (d *dayTop) periodicHourlyTopRotate() {
- t := time.Hour
- for range time.Tick(t) {
- d.rotateHourlyTop()
- }
-}
-
-func (h *hourTop) incrementValue(key string, cache gcache.Cache) error {
- h.Lock()
- defer h.Unlock()
- ivalue, err := cache.Get(key)
- if err == gcache.KeyNotFoundError {
- // we just set it and we're done
- err = cache.Set(key, 1)
- if err != nil {
- log.Printf("Failed to set hourly top value: %s", err)
- return err
- }
- return nil
- }
-
- if err != nil {
- log.Printf("gcache encountered an error during get: %s", err)
- return err
- }
-
- cachedValue, ok := ivalue.(int)
- if !ok {
- err = fmt.Errorf("SHOULD NOT HAPPEN: gcache has non-int as value: %v", ivalue)
- log.Println(err)
- return err
- }
-
- err = cache.Set(key, cachedValue+1)
- if err != nil {
- log.Printf("Failed to set hourly top value: %s", err)
- return err
- }
- return nil
-}
-
-func (h *hourTop) incrementDomains(key string) error {
- return h.incrementValue(key, h.domains)
-}
-
-func (h *hourTop) incrementBlocked(key string) error {
- return h.incrementValue(key, h.blocked)
-}
-
-func (h *hourTop) incrementClients(key string) error {
- return h.incrementValue(key, h.clients)
-}
-
-// if does not exist -- return 0
-func (h *hourTop) lockedGetValue(key string, cache gcache.Cache) (int, error) {
- ivalue, err := cache.Get(key)
- if err == gcache.KeyNotFoundError {
- return 0, nil
- }
-
- if err != nil {
- log.Printf("gcache encountered an error during get: %s", err)
- return 0, err
- }
-
- value, ok := ivalue.(int)
- if !ok {
- err := fmt.Errorf("SHOULD NOT HAPPEN: gcache has non-int as value: %v", ivalue)
- log.Println(err)
- return 0, err
- }
-
- return value, nil
-}
-
-func (h *hourTop) lockedGetDomains(key string) (int, error) {
- return h.lockedGetValue(key, h.domains)
-}
-
-func (h *hourTop) lockedGetBlocked(key string) (int, error) {
- return h.lockedGetValue(key, h.blocked)
-}
-
-func (h *hourTop) lockedGetClients(key string) (int, error) {
- return h.lockedGetValue(key, h.clients)
-}
-
-func (d *dayTop) addEntry(entry *logEntry, q *dns.Msg, now time.Time) error {
- // figure out which hour bucket it belongs to
- hour := int(now.Sub(entry.Time).Hours())
- if hour >= 24 {
- log.Printf("t %v is >24 hours ago, ignoring", entry.Time)
- return nil
- }
-
- // if a DNS query doesn't have questions, do nothing
- if len(q.Question) == 0 {
- return nil
- }
-
- hostname := strings.ToLower(strings.TrimSuffix(q.Question[0].Name, "."))
-
- // if question hostname is empty, do nothing
- if hostname == "" {
- return nil
- }
-
- // get value, if not set, crate one
- d.hoursReadLock()
- defer d.hoursReadUnlock()
- err := d.hours[hour].incrementDomains(hostname)
- if err != nil {
- log.Printf("Failed to increment value: %s", err)
- return err
- }
-
- if entry.Result.IsFiltered {
- err := d.hours[hour].incrementBlocked(hostname)
- if err != nil {
- log.Printf("Failed to increment value: %s", err)
- return err
- }
- }
-
- if len(entry.IP) > 0 {
- err := d.hours[hour].incrementClients(entry.IP)
- if err != nil {
- log.Printf("Failed to increment value: %s", err)
- return err
- }
- }
-
- return nil
-}
-
-func (l *queryLog) fillStatsFromQueryLog(s *stats) error {
- now := time.Now()
- l.runningTop.loadedWriteLock()
- defer l.runningTop.loadedWriteUnlock()
- if l.runningTop.loaded {
- return nil
- }
- onEntry := func(entry *logEntry) error {
- if len(entry.Question) == 0 {
- log.Printf("entry question is absent, skipping")
- return nil
- }
-
- if entry.Time.After(now) {
- log.Printf("t %v vs %v is in the future, ignoring", entry.Time, now)
- return nil
- }
-
- q := new(dns.Msg)
- if err := q.Unpack(entry.Question); err != nil {
- log.Printf("failed to unpack dns message question: %s", err)
- return err
- }
-
- if len(q.Question) != 1 {
- log.Printf("malformed dns message, has no questions, skipping")
- return nil
- }
-
- err := l.runningTop.addEntry(entry, q, now)
- if err != nil {
- log.Printf("Failed to add entry to running top: %s", err)
- return err
- }
-
- l.queryLogLock.Lock()
- l.queryLogCache = append(l.queryLogCache, entry)
- if len(l.queryLogCache) > queryLogSize {
- toremove := len(l.queryLogCache) - queryLogSize
- l.queryLogCache = l.queryLogCache[toremove:]
- }
- l.queryLogLock.Unlock()
-
- s.incrementCounters(entry)
- return nil
- }
-
- needMore := func() bool { return true }
- err := l.genericLoader(onEntry, needMore, queryLogTimeLimit)
- if err != nil {
- log.Printf("Failed to load entries from querylog: %s", err)
- return err
- }
-
- l.runningTop.loaded = true
- return nil
-}
-
-// StatsTop represents top stat charts
-type StatsTop struct {
- Domains map[string]int // Domains - top requested domains
- Blocked map[string]int // Blocked - top blocked domains
- Clients map[string]int // Clients - top DNS clients
-}
-
-// getStatsTop returns the current top stats
-func (d *dayTop) getStatsTop() *StatsTop {
- s := &StatsTop{
- Domains: map[string]int{},
- Blocked: map[string]int{},
- Clients: map[string]int{},
- }
-
- do := func(keys []interface{}, getter func(key string) (int, error), result map[string]int) {
- for _, ikey := range keys {
- key, ok := ikey.(string)
- if !ok {
- continue
- }
- value, err := getter(key)
- if err != nil {
- log.Printf("Failed to get top domains value for %v: %s", key, err)
- return
- }
- result[key] += value
- }
- }
-
- d.hoursReadLock()
- for hour := 0; hour < 24; hour++ {
- d.hours[hour].RLock()
- do(d.hours[hour].domains.Keys(false), d.hours[hour].lockedGetDomains, s.Domains)
- do(d.hours[hour].blocked.Keys(false), d.hours[hour].lockedGetBlocked, s.Blocked)
- do(d.hours[hour].clients.Keys(false), d.hours[hour].lockedGetClients, s.Clients)
- d.hours[hour].RUnlock()
- }
- d.hoursReadUnlock()
-
- return s
-}
-
-func (d *dayTop) hoursWriteLock() { tracelock(); d.hoursLock.Lock() }
-func (d *dayTop) hoursWriteUnlock() { tracelock(); d.hoursLock.Unlock() }
-func (d *dayTop) hoursReadLock() { tracelock(); d.hoursLock.RLock() }
-func (d *dayTop) hoursReadUnlock() { tracelock(); d.hoursLock.RUnlock() }
-func (d *dayTop) loadedWriteLock() { tracelock(); d.loadedLock.Lock() }
-func (d *dayTop) loadedWriteUnlock() { tracelock(); d.loadedLock.Unlock() }
-
-func (h *hourTop) Lock() { tracelock(); h.mutex.Lock() }
-func (h *hourTop) RLock() { tracelock(); h.mutex.RLock() }
-func (h *hourTop) RUnlock() { tracelock(); h.mutex.RUnlock() }
-func (h *hourTop) Unlock() { tracelock(); h.mutex.Unlock() }
-
-func tracelock() {
- if false { // not commented out to make code checked during compilation
- pc := make([]uintptr, 10) // at least 1 entry needed
- runtime.Callers(2, pc)
- f := path.Base(runtime.FuncForPC(pc[1]).Name())
- lockf := path.Base(runtime.FuncForPC(pc[0]).Name())
- fmt.Fprintf(os.Stderr, "%s(): %s\n", f, lockf)
- }
-}
diff --git a/dnsforward/stats.go b/dnsforward/stats.go
deleted file mode 100644
index 62565a98..00000000
--- a/dnsforward/stats.go
+++ /dev/null
@@ -1,355 +0,0 @@
-package dnsforward
-
-import (
- "fmt"
- "sync"
- "time"
-
- "github.com/AdguardTeam/AdGuardHome/dnsfilter"
-)
-
-// how far back to keep the stats
-const statsHistoryElements = 60 + 1 // +1 for calculating delta
-
-// entries for single time period (for example all per-second entries)
-type statsEntries map[string][statsHistoryElements]float64
-
-// each periodic stat is a map of arrays
-type periodicStats struct {
- entries statsEntries
- period time.Duration // how long one entry lasts
- lastRotate time.Time // last time this data was rotated
-
- sync.RWMutex
-}
-
-// stats is the DNS server historical statistics
-type stats struct {
- perSecond periodicStats
- perMinute periodicStats
- perHour periodicStats
- perDay periodicStats
-
- requests *counter // total number of requests
- filtered *counter // total number of filtered requests
- filteredLists *counter // total number of requests blocked by filter lists
- filteredSafebrowsing *counter // total number of requests blocked by safebrowsing
- filteredParental *counter // total number of requests blocked by the parental control
- whitelisted *counter // total number of requests whitelisted by filter lists
- safesearch *counter // total number of requests for which safe search rules were applied
- errorsTotal *counter // total number of errors
- elapsedTime *histogram // requests duration histogram
-}
-
-// initializes an empty stats structure
-func newStats() *stats {
- s := &stats{
- requests: newDNSCounter("requests_total"),
- filtered: newDNSCounter("filtered_total"),
- filteredLists: newDNSCounter("filtered_lists_total"),
- filteredSafebrowsing: newDNSCounter("filtered_safebrowsing_total"),
- filteredParental: newDNSCounter("filtered_parental_total"),
- whitelisted: newDNSCounter("whitelisted_total"),
- safesearch: newDNSCounter("safesearch_total"),
- errorsTotal: newDNSCounter("errors_total"),
- elapsedTime: newDNSHistogram("request_duration"),
- }
-
- // Initializes empty per-sec/minute/hour/day stats
- s.purgeStats()
- return s
-}
-
-func initPeriodicStats(periodic *periodicStats, period time.Duration) {
- periodic.Lock()
- periodic.entries = statsEntries{}
- periodic.lastRotate = time.Now()
- periodic.period = period
- periodic.Unlock()
-}
-
-func (s *stats) purgeStats() {
- initPeriodicStats(&s.perSecond, time.Second)
- initPeriodicStats(&s.perMinute, time.Minute)
- initPeriodicStats(&s.perHour, time.Hour)
- initPeriodicStats(&s.perDay, time.Hour*24)
-}
-
-func (p *periodicStats) Inc(name string, when time.Time) {
- // calculate how many periods ago this happened
- elapsed := int64(time.Since(when) / p.period)
- // log.Tracef("%s: %v as %v -> [%v]", name, time.Since(when), p.period, elapsed)
- if elapsed >= statsHistoryElements {
- return // outside of our timeframe
- }
- p.Lock()
- currentValues := p.entries[name]
- currentValues[elapsed]++
- p.entries[name] = currentValues
- p.Unlock()
-}
-
-func (p *periodicStats) Observe(name string, when time.Time, value float64) {
- // calculate how many periods ago this happened
- elapsed := int64(time.Since(when) / p.period)
- // log.Tracef("%s: %v as %v -> [%v]", name, time.Since(when), p.period, elapsed)
- if elapsed >= statsHistoryElements {
- return // outside of our timeframe
- }
- p.Lock()
- {
- countname := name + "_count"
- currentValues := p.entries[countname]
- v := currentValues[elapsed]
- // log.Tracef("Will change p.entries[%s][%d] from %v to %v", countname, elapsed, value, value+1)
- v++
- currentValues[elapsed] = v
- p.entries[countname] = currentValues
- }
- {
- totalname := name + "_sum"
- currentValues := p.entries[totalname]
- currentValues[elapsed] += value
- p.entries[totalname] = currentValues
- }
- p.Unlock()
-}
-
-func (p *periodicStats) statsRotate(now time.Time) {
- p.Lock()
- rotations := int64(now.Sub(p.lastRotate) / p.period)
- if rotations > statsHistoryElements {
- rotations = statsHistoryElements
- }
- // calculate how many times we should rotate
- for r := int64(0); r < rotations; r++ {
- for key, values := range p.entries {
- newValues := [statsHistoryElements]float64{}
- for i := 1; i < len(values); i++ {
- newValues[i] = values[i-1]
- }
- p.entries[key] = newValues
- }
- }
- if rotations > 0 {
- p.lastRotate = now
- }
- p.Unlock()
-}
-
-func (s *stats) statsRotator() {
- for range time.Tick(time.Second) {
- now := time.Now()
- s.perSecond.statsRotate(now)
- s.perMinute.statsRotate(now)
- s.perHour.statsRotate(now)
- s.perDay.statsRotate(now)
- }
-}
-
-// counter that wraps around prometheus Counter but also adds to periodic stats
-type counter struct {
- name string // used as key in periodic stats
- value int64
-
- sync.Mutex
-}
-
-func newDNSCounter(name string) *counter {
- // log.Tracef("called")
- return &counter{
- name: name,
- }
-}
-
-func (s *stats) incWithTime(c *counter, when time.Time) {
- s.perSecond.Inc(c.name, when)
- s.perMinute.Inc(c.name, when)
- s.perHour.Inc(c.name, when)
- s.perDay.Inc(c.name, when)
- c.Lock()
- c.value++
- c.Unlock()
-}
-
-type histogram struct {
- name string // used as key in periodic stats
- count int64
- total float64
-
- sync.Mutex
-}
-
-func newDNSHistogram(name string) *histogram {
- return &histogram{
- name: name,
- }
-}
-
-func (s *stats) observeWithTime(h *histogram, value float64, when time.Time) {
- s.perSecond.Observe(h.name, when, value)
- s.perMinute.Observe(h.name, when, value)
- s.perHour.Observe(h.name, when, value)
- s.perDay.Observe(h.name, when, value)
- h.Lock()
- h.count++
- h.total += value
- h.Unlock()
-}
-
-// -----
-// stats
-// -----
-func (s *stats) incrementCounters(entry *logEntry) {
- s.incWithTime(s.requests, entry.Time)
- if entry.Result.IsFiltered {
- s.incWithTime(s.filtered, entry.Time)
- }
-
- switch entry.Result.Reason {
- case dnsfilter.NotFilteredWhiteList:
- s.incWithTime(s.whitelisted, entry.Time)
- case dnsfilter.NotFilteredError:
- s.incWithTime(s.errorsTotal, entry.Time)
- case dnsfilter.FilteredBlackList:
- s.incWithTime(s.filteredLists, entry.Time)
- case dnsfilter.FilteredSafeBrowsing:
- s.incWithTime(s.filteredSafebrowsing, entry.Time)
- case dnsfilter.FilteredParental:
- s.incWithTime(s.filteredParental, entry.Time)
- case dnsfilter.FilteredInvalid:
- // do nothing
- case dnsfilter.FilteredSafeSearch:
- s.incWithTime(s.safesearch, entry.Time)
- }
- s.observeWithTime(s.elapsedTime, entry.Elapsed.Seconds(), entry.Time)
-}
-
-// getAggregatedStats returns aggregated stats data for the 24 hours
-func (s *stats) getAggregatedStats() map[string]interface{} {
- const numHours = 24
- historical := s.generateMapFromStats(&s.perHour, 0, numHours)
- // sum them up
- summed := map[string]interface{}{}
- for key, values := range historical {
- summedValue := 0.0
- floats, ok := values.([]float64)
- if !ok {
- continue
- }
- for _, v := range floats {
- summedValue += v
- }
- summed[key] = summedValue
- }
- // don't forget to divide by number of elements in returned slice
- if val, ok := summed["avg_processing_time"]; ok {
- if flval, flok := val.(float64); flok {
- flval /= numHours
- summed["avg_processing_time"] = flval
- }
- }
-
- summed["stats_period"] = "24 hours"
- return summed
-}
-
-func (s *stats) generateMapFromStats(stats *periodicStats, start int, end int) map[string]interface{} {
- stats.RLock()
- defer stats.RUnlock()
-
- // clamp
- start = clamp(start, 0, statsHistoryElements)
- end = clamp(end, 0, statsHistoryElements)
-
- avgProcessingTime := make([]float64, 0)
-
- count := getReversedSlice(stats.entries[s.elapsedTime.name+"_count"], start, end)
- sum := getReversedSlice(stats.entries[s.elapsedTime.name+"_sum"], start, end)
- for i := 0; i < len(count); i++ {
- var avg float64
- if count[i] != 0 {
- avg = sum[i] / count[i]
- avg *= 1000
- }
- avgProcessingTime = append(avgProcessingTime, avg)
- }
-
- result := map[string]interface{}{
- "dns_queries": getReversedSlice(stats.entries[s.requests.name], start, end),
- "blocked_filtering": getReversedSlice(stats.entries[s.filtered.name], start, end),
- "replaced_safebrowsing": getReversedSlice(stats.entries[s.filteredSafebrowsing.name], start, end),
- "replaced_safesearch": getReversedSlice(stats.entries[s.safesearch.name], start, end),
- "replaced_parental": getReversedSlice(stats.entries[s.filteredParental.name], start, end),
- "avg_processing_time": avgProcessingTime,
- }
- return result
-}
-
-// getStatsHistory gets stats history aggregated by the specified time unit
-// timeUnit is either time.Second, time.Minute, time.Hour, or 24*time.Hour
-// start is start of the time range
-// end is end of the time range
-// returns nil if time unit is not supported
-func (s *stats) getStatsHistory(timeUnit time.Duration, startTime time.Time, endTime time.Time) (map[string]interface{}, error) {
- var stats *periodicStats
-
- switch timeUnit {
- case time.Second:
- stats = &s.perSecond
- case time.Minute:
- stats = &s.perMinute
- case time.Hour:
- stats = &s.perHour
- case 24 * time.Hour:
- stats = &s.perDay
- }
-
- if stats == nil {
- return nil, fmt.Errorf("unsupported time unit: %v", timeUnit)
- }
-
- now := time.Now()
-
- // check if start and time times are within supported time range
- timeRange := timeUnit * statsHistoryElements
- if startTime.Add(timeRange).Before(now) {
- return nil, fmt.Errorf("start_time parameter is outside of supported range: %s", startTime.String())
- }
- if endTime.Add(timeRange).Before(now) {
- return nil, fmt.Errorf("end_time parameter is outside of supported range: %s", startTime.String())
- }
-
- // calculate start and end of our array
- // basically it's how many hours/minutes/etc have passed since now
- start := int(now.Sub(endTime) / timeUnit)
- end := int(now.Sub(startTime) / timeUnit)
-
- // swap them around if they're inverted
- if start > end {
- start, end = end, start
- }
-
- return s.generateMapFromStats(stats, start, end), nil
-}
-
-func clamp(value, low, high int) int {
- if value < low {
- return low
- }
- if value > high {
- return high
- }
- return value
-}
-
-// --------------------------
-// helper functions for stats
-// --------------------------
-func getReversedSlice(input [statsHistoryElements]float64, start int, end int) []float64 {
- output := make([]float64, 0)
- for i := start; i <= end; i++ {
- output = append([]float64{input[i]}, output...)
- }
- return output
-}
diff --git a/go.mod b/go.mod
index 783486bd..8949d9b8 100644
--- a/go.mod
+++ b/go.mod
@@ -7,7 +7,9 @@ require (
github.com/AdguardTeam/golibs v0.2.1
github.com/AdguardTeam/urlfilter v0.5.0
github.com/NYTimes/gziphandler v1.1.1
+ github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf
github.com/bluele/gcache v0.0.0-20190518031135-bc40bd653833
+ github.com/etcd-io/bbolt v1.3.3
github.com/go-test/deep v1.0.1
github.com/gobuffalo/packr v1.19.0
github.com/joomcode/errorx v0.8.0
@@ -17,8 +19,8 @@ require (
github.com/miekg/dns v1.1.8
github.com/sparrc/go-ping v0.0.0-20181106165434-ef3ab45e41b0
github.com/stretchr/testify v1.4.0
+ go.etcd.io/bbolt v1.3.3 // indirect
golang.org/x/net v0.0.0-20190620200207-3b0461eec859
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0
- gopkg.in/asaskevich/govalidator.v4 v4.0.0-20160518190739-766470278477
gopkg.in/yaml.v2 v2.2.2
)
diff --git a/go.sum b/go.sum
index 433dac2d..c26688a1 100644
--- a/go.sum
+++ b/go.sum
@@ -28,6 +28,7 @@ github.com/bluele/gcache v0.0.0-20190518031135-bc40bd653833/go.mod h1:8c4/i2Vlov
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
github.com/go-test/deep v1.0.1 h1:UQhStjbkDClarlmv0am7OXXO4/GaPdCGiUiMTvi28sg=
@@ -80,6 +81,7 @@ github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9 h1:mKdxBk7AujPs8kU4m80U72y/zjbZ3UcXC7dClwKbUI0=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
@@ -112,8 +114,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-gopkg.in/asaskevich/govalidator.v4 v4.0.0-20160518190739-766470278477 h1:5xUJw+lg4zao9W4HIDzlFbMYgSgtvNVHh00MEHvbGpQ=
-gopkg.in/asaskevich/govalidator.v4 v4.0.0-20160518190739-766470278477/go.mod h1:QDV1vrFSrowdoOba0UM8VJPUZONT7dnfdLsM+GG53Z8=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/home/config.go b/home/config.go
index b4c943dc..b3a62efe 100644
--- a/home/config.go
+++ b/home/config.go
@@ -12,6 +12,7 @@ import (
"github.com/AdguardTeam/AdGuardHome/dhcpd"
"github.com/AdguardTeam/AdGuardHome/dnsfilter"
"github.com/AdguardTeam/AdGuardHome/dnsforward"
+ "github.com/AdguardTeam/AdGuardHome/stats"
"github.com/AdguardTeam/golibs/file"
"github.com/AdguardTeam/golibs/log"
yaml "gopkg.in/yaml.v2"
@@ -68,6 +69,7 @@ type configuration struct {
controlLock sync.Mutex
transport *http.Transport
client *http.Client
+ stats stats.Stats
// cached version.json to avoid hammering github.io for each page reload
versionCheckJSON []byte
@@ -107,6 +109,9 @@ type dnsConfig struct {
BindHost string `yaml:"bind_host"`
Port int `yaml:"port"`
+ // time interval for statistics (in days)
+ StatsInterval uint `yaml:"statistics_interval"`
+
dnsforward.FilteringConfig `yaml:",inline"`
UpstreamDNS []string `yaml:"upstream_dns"`
@@ -161,8 +166,9 @@ var config = configuration{
BindPort: 3000,
BindHost: "0.0.0.0",
DNS: dnsConfig{
- BindHost: "0.0.0.0",
- Port: 53,
+ BindHost: "0.0.0.0",
+ Port: 53,
+ StatsInterval: 1,
FilteringConfig: dnsforward.FilteringConfig{
ProtectionEnabled: true, // whether or not use any of dnsfilter features
FilteringEnabled: true, // whether or not use filter lists
@@ -264,6 +270,10 @@ func parseConfig() error {
return err
}
+ if !checkStatsInterval(config.DNS.StatsInterval) {
+ config.DNS.StatsInterval = 1
+ }
+
for _, cy := range config.Clients {
cli := Client{
Name: cy.Name,
diff --git a/home/control.go b/home/control.go
index 6601b364..5ac71d6d 100644
--- a/home/control.go
+++ b/home/control.go
@@ -1,12 +1,10 @@
package home
import (
- "bytes"
"encoding/json"
"fmt"
"net"
"net/http"
- "sort"
"strconv"
"strings"
"time"
@@ -177,149 +175,6 @@ func handleQueryLog(w http.ResponseWriter, r *http.Request) {
}
}
-func handleStatsTop(w http.ResponseWriter, r *http.Request) {
- s := config.dnsServer.GetStatsTop()
-
- // use manual json marshalling because we want maps to be sorted by value
- statsJSON := bytes.Buffer{}
- statsJSON.WriteString("{\n")
-
- gen := func(json *bytes.Buffer, name string, top map[string]int, addComma bool) {
- json.WriteString(" ")
- json.WriteString(fmt.Sprintf("%q", name))
- json.WriteString(": {\n")
- sorted := sortByValue(top)
- // no more than 50 entries
- if len(sorted) > 50 {
- sorted = sorted[:50]
- }
- for i, key := range sorted {
- json.WriteString(" ")
- json.WriteString(fmt.Sprintf("%q", key))
- json.WriteString(": ")
- json.WriteString(strconv.Itoa(top[key]))
- if i+1 != len(sorted) {
- json.WriteByte(',')
- }
- json.WriteByte('\n')
- }
- json.WriteString(" }")
- if addComma {
- json.WriteByte(',')
- }
- json.WriteByte('\n')
- }
- gen(&statsJSON, "top_queried_domains", s.Domains, true)
- gen(&statsJSON, "top_blocked_domains", s.Blocked, true)
- gen(&statsJSON, "top_clients", s.Clients, true)
- statsJSON.WriteString(" \"stats_period\": \"24 hours\"\n")
- statsJSON.WriteString("}\n")
-
- w.Header().Set("Content-Type", "application/json")
- _, err := w.Write(statsJSON.Bytes())
- if err != nil {
- httpError(w, http.StatusInternalServerError, "Couldn't write body: %s", err)
- }
-}
-
-// handleStatsReset resets the stats caches
-func handleStatsReset(w http.ResponseWriter, r *http.Request) {
- config.dnsServer.PurgeStats()
- _, err := fmt.Fprintf(w, "OK\n")
- if err != nil {
- httpError(w, http.StatusInternalServerError, "Couldn't write body: %s", err)
- }
-}
-
-// handleStats returns aggregated stats data for the 24 hours
-func handleStats(w http.ResponseWriter, r *http.Request) {
- summed := config.dnsServer.GetAggregatedStats()
-
- statsJSON, err := json.Marshal(summed)
- if err != nil {
- httpError(w, http.StatusInternalServerError, "Unable to marshal status json: %s", err)
- return
- }
- w.Header().Set("Content-Type", "application/json")
- _, err = w.Write(statsJSON)
- if err != nil {
- httpError(w, http.StatusInternalServerError, "Unable to write response json: %s", err)
- return
- }
-}
-
-// HandleStatsHistory returns historical stats data for the 24 hours
-func handleStatsHistory(w http.ResponseWriter, r *http.Request) {
- // handle time unit and prepare our time window size
- timeUnitString := r.URL.Query().Get("time_unit")
- var timeUnit time.Duration
- switch timeUnitString {
- case "seconds":
- timeUnit = time.Second
- case "minutes":
- timeUnit = time.Minute
- case "hours":
- timeUnit = time.Hour
- case "days":
- timeUnit = time.Hour * 24
- default:
- http.Error(w, "Must specify valid time_unit parameter", http.StatusBadRequest)
- return
- }
-
- // parse start and end time
- startTime, err := time.Parse(time.RFC3339, r.URL.Query().Get("start_time"))
- if err != nil {
- httpError(w, http.StatusBadRequest, "Must specify valid start_time parameter: %s", err)
- return
- }
- endTime, err := time.Parse(time.RFC3339, r.URL.Query().Get("end_time"))
- if err != nil {
- httpError(w, http.StatusBadRequest, "Must specify valid end_time parameter: %s", err)
- return
- }
-
- data, err := config.dnsServer.GetStatsHistory(timeUnit, startTime, endTime)
- if err != nil {
- httpError(w, http.StatusBadRequest, "Cannot get stats history: %s", err)
- return
- }
-
- statsJSON, err := json.Marshal(data)
- if err != nil {
- httpError(w, http.StatusInternalServerError, "Unable to marshal status json: %s", err)
- return
- }
-
- w.Header().Set("Content-Type", "application/json")
- _, err = w.Write(statsJSON)
- if err != nil {
- httpError(w, http.StatusInternalServerError, "Unable to write response json: %s", err)
- return
- }
-}
-
-// sortByValue is a helper function for querylog API
-func sortByValue(m map[string]int) []string {
- type kv struct {
- k string
- v int
- }
- var ss []kv
- for k, v := range m {
- ss = append(ss, kv{k, v})
- }
- sort.Slice(ss, func(l, r int) bool {
- return ss[l].v > ss[r].v
- })
-
- sorted := []string{}
- for _, v := range ss {
- sorted = append(sorted, v.k)
- }
- return sorted
-}
-
// -----------------------
// upstreams configuration
// -----------------------
@@ -722,10 +577,6 @@ func registerControlHandlers() {
httpRegister(http.MethodPost, "/control/test_upstream_dns", handleTestUpstreamDNS)
httpRegister(http.MethodPost, "/control/i18n/change_language", handleI18nChangeLanguage)
httpRegister(http.MethodGet, "/control/i18n/current_language", handleI18nCurrentLanguage)
- httpRegister(http.MethodGet, "/control/stats_top", handleStatsTop)
- httpRegister(http.MethodGet, "/control/stats", handleStats)
- httpRegister(http.MethodGet, "/control/stats_history", handleStatsHistory)
- httpRegister(http.MethodPost, "/control/stats_reset", handleStatsReset)
http.HandleFunc("/control/version.json", postInstall(optionalAuth(handleGetVersionJSON)))
httpRegister(http.MethodPost, "/control/update", handleUpdate)
httpRegister(http.MethodPost, "/control/filtering/enable", handleFilteringEnable)
@@ -760,6 +611,7 @@ func registerControlHandlers() {
RegisterClientsHandlers()
registerRewritesHandlers()
RegisterBlockedServicesHandlers()
+ RegisterStatsHandlers()
http.HandleFunc("/dns-query", postInstall(handleDOH))
}
diff --git a/home/control_stats.go b/home/control_stats.go
new file mode 100644
index 00000000..710eb577
--- /dev/null
+++ b/home/control_stats.go
@@ -0,0 +1,92 @@
+package home
+
+import (
+ "encoding/json"
+ "net/http"
+
+ "github.com/AdguardTeam/AdGuardHome/stats"
+ "github.com/AdguardTeam/golibs/log"
+)
+
+type statsConfig struct {
+ Interval uint `json:"interval"`
+}
+
+// Get stats configuration
+func handleStatsInfo(w http.ResponseWriter, r *http.Request) {
+ resp := statsConfig{}
+ resp.Interval = config.DNS.StatsInterval
+
+ jsonVal, err := json.Marshal(resp)
+ if err != nil {
+ httpError(w, http.StatusInternalServerError, "json encode: %s", err)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ _, err = w.Write(jsonVal)
+ if err != nil {
+ httpError(w, http.StatusInternalServerError, "http write: %s", err)
+ }
+}
+
+// Set stats configuration
+func handleStatsConfig(w http.ResponseWriter, r *http.Request) {
+ reqData := statsConfig{}
+ err := json.NewDecoder(r.Body).Decode(&reqData)
+ if err != nil {
+ httpError(w, http.StatusBadRequest, "json decode: %s", err)
+ return
+ }
+
+ if !checkStatsInterval(reqData.Interval) {
+ httpError(w, http.StatusBadRequest, "Unsupported interval")
+ return
+ }
+
+ config.DNS.StatsInterval = reqData.Interval
+ config.stats.Configure(int(config.DNS.StatsInterval))
+
+ returnOK(w)
+}
+
+// handleStats returns aggregated stats data
+func handleStats(w http.ResponseWriter, r *http.Request) {
+ units := stats.Hours
+ if config.DNS.StatsInterval > 7 {
+ units = stats.Days
+ }
+ counter := log.StartTimer()
+ d := config.stats.GetData(units)
+ counter.LogElapsed("Stats: prepared data")
+
+ if d == nil {
+ httpError(w, http.StatusInternalServerError, "Couldn't get statistics data")
+ return
+ }
+
+ data, err := json.Marshal(d)
+ if err != nil {
+ httpError(w, http.StatusInternalServerError, "json encode: %s", err)
+ return
+ }
+
+ w.Write(data)
+}
+
+// handleStatsReset resets the stats
+func handleStatsReset(w http.ResponseWriter, r *http.Request) {
+ config.stats.Clear()
+ returnOK(w)
+}
+
+// RegisterStatsHandlers - register handlers
+func RegisterStatsHandlers() {
+ httpRegister(http.MethodGet, "/control/stats", handleStats)
+ httpRegister(http.MethodPost, "/control/stats_reset", handleStatsReset)
+ httpRegister(http.MethodPost, "/control/stats_config", handleStatsConfig)
+ httpRegister(http.MethodGet, "/control/stats_info", handleStatsInfo)
+}
+
+func checkStatsInterval(i uint) bool {
+ return i == 1 || i == 7 || i == 30 || i == 90
+}
diff --git a/home/dns.go b/home/dns.go
index ee25ebc1..c00ddf51 100644
--- a/home/dns.go
+++ b/home/dns.go
@@ -8,6 +8,7 @@ import (
"github.com/AdguardTeam/AdGuardHome/dnsfilter"
"github.com/AdguardTeam/AdGuardHome/dnsforward"
+ "github.com/AdguardTeam/AdGuardHome/stats"
"github.com/AdguardTeam/dnsproxy/proxy"
"github.com/AdguardTeam/dnsproxy/upstream"
"github.com/AdguardTeam/golibs/log"
@@ -33,7 +34,11 @@ func initDNSServer(baseDir string) {
log.Fatalf("Cannot create DNS data dir at %s: %s", baseDir, err)
}
- config.dnsServer = dnsforward.NewServer(baseDir)
+ config.stats = stats.New("./data/stats.db", int(config.DNS.StatsInterval), nil)
+ if config.stats == nil {
+ log.Fatal("config.stats == nil")
+ }
+ config.dnsServer = dnsforward.NewServer(baseDir, config.stats)
initRDNS()
}
@@ -152,11 +157,6 @@ func startDNSServer() error {
return errorx.Decorate(err, "Couldn't start forwarding DNS server")
}
- top := config.dnsServer.GetStatsTop()
- for k := range top.Clients {
- beginAsyncRDNS(k)
- }
-
return nil
}
@@ -183,5 +183,7 @@ func stopDNSServer() error {
return errorx.Decorate(err, "Couldn't stop forwarding DNS server")
}
+ config.stats.Close()
+
return nil
}
diff --git a/openapi/openapi.yaml b/openapi/openapi.yaml
index a730678f..b3b9e42c 100644
--- a/openapi/openapi.yaml
+++ b/openapi/openapi.yaml
@@ -214,18 +214,6 @@ paths:
# General statistics methods
# --------------------------------------------------
- /stats_top:
- get:
- tags:
- - stats
- operationId: statusTop
- summary: 'Get DNS server top client, domain and blocked statistics'
- responses:
- 200:
- description: OK
- schema:
- $ref: "#/definitions/StatsTop"
-
/stats:
get:
tags:
@@ -234,46 +222,10 @@ paths:
summary: 'Get DNS server statistics'
responses:
200:
- description: 'Returns general statistics for the last 24 hours'
+ description: 'Returns statistics data'
schema:
$ref: "#/definitions/Stats"
- /stats_history:
- get:
- tags:
- - stats
- operationId: stats_history
- summary: 'Get historical DNS server statistics for the last 24 hours'
- parameters:
- -
- name: start_time
- in: query
- type: string
- description: 'Start time in ISO8601 (example: `2018-05-04T17:55:33+00:00`)'
- required: true
- -
- name: end_time
- in: query
- type: string
- description: 'End time in ISO8601 (example: `2018-05-04T17:55:33+00:00`)'
- required: true
- -
- name: time_unit
- in: query
- type: string
- description: 'Time unit (`minutes` or `hours`)'
- required: true
- enum:
- - minutes
- - hours
- responses:
- 501:
- description: 'Requested time window is outside of supported range. It will be supported later, but not now.'
- 200:
- description: 'Returns historical stats for the specified time interval.'
- schema:
- $ref: '#/definitions/StatsHistory'
-
/stats_reset:
post:
tags:
@@ -284,6 +236,34 @@ paths:
200:
description: OK
+ /stats_info:
+ get:
+ tags:
+ - stats
+ operationId: statsInfo
+ summary: 'Get statistics parameters'
+ responses:
+ 200:
+ schema:
+ $ref: "#/definitions/StatsConfig"
+
+ /stats_config:
+ post:
+ tags:
+ - stats
+ operationId: statsConfig
+ summary: "Set statistics parameters"
+ consumes:
+ - application/json
+ parameters:
+ - in: "body"
+ name: "body"
+ schema:
+ $ref: "#/definitions/StatsConfig"
+ responses:
+ 200:
+ description: OK
+
# --------------------------------------------------
# TLS server methods
# --------------------------------------------------
@@ -1103,28 +1083,29 @@ definitions:
type: "boolean"
Stats:
type: "object"
- description: "General server stats for the last 24 hours"
- required:
- - "dns_queries"
- - "blocked_filtering"
- - "replaced_safebrowsing"
- - "replaced_parental"
- - "replaced_safesearch"
- - "avg_processing_time"
+ description: "Server statistics data"
properties:
- dns_queries:
+ time_units:
+ type: "string"
+ description: "Time units (hours | days)"
+ example: "hours"
+ num_dns_queries:
type: "integer"
description: "Total number of DNS queries"
example: 123
- blocked_filtering:
+ num_blocked_filtering:
type: "integer"
description: "Number of requests blocked by filtering rules"
example: 50
- replaced_safebrowsing:
+ num_replaced_safebrowsing:
type: "integer"
- description: "Number of requests blocked by the safebrowsing module"
+ description: "Number of requests blocked by safebrowsing module"
example: 5
- replaced_parental:
+ num_replaced_safesearch:
+ type: "integer"
+ description: "Number of requests blocked by safesearch module"
+ example: 5
+ num_replaced_parental:
type: "integer"
description: "Number of blocked adult websites"
example: 15
@@ -1133,110 +1114,43 @@ definitions:
format: "float"
description: "Average time in milliseconds on processing a DNS"
example: 0.34
- StatsTop:
- type: "object"
- description: "Server stats top charts"
- required:
- - "top_queried_domains"
- - "top_clients"
- - "top_blocked_domains"
- properties:
top_queried_domains:
type: "array"
items:
type: "object"
- example:
- example.org: 12312
- example.com: 321
- example.net: 5555
top_clients:
type: "array"
items:
type: "object"
- example:
- 127.0.0.1: 12312
- 192.168.0.1: 13211
- 192.168.0.3: 13211
top_blocked_domains:
type: "array"
items:
type: "object"
- example:
- example.org: 12312
- example.com: 321
- example.net: 5555
- StatsHistory:
- type: "object"
- description: "Historical stats of the DNS server. Example below is for 5 minutes. Values are from oldest to newest."
- required:
- - "dns_queries"
- - "blocked_filtering"
- - "replaced_safebrowsing"
- - "replaced_parental"
- - "replaced_safesearch"
- - "avg_processing_time"
- properties:
dns_queries:
type: "array"
items:
type: "integer"
- example:
- - 1201
- - 1501
- - 1251
- - 1231
- - 120
blocked_filtering:
type: "array"
items:
type: "integer"
- example:
- - 421
- - 124
- - 5
- - 12
- - 43
replaced_safebrowsing:
type: "array"
items:
type: "integer"
- example:
- - 1
- - 0
- - 5
- - 0
- - 0
replaced_parental:
type: "array"
items:
type: "integer"
- example:
- - 120
- - 10
- - 5
- - 12
- - 1
- replaced_safesearch:
- type: "array"
- items:
- type: "integer"
- example:
- - 1
- - 0
- - 0
- - 0
- - 5
- avg_processing_time:
- type: "array"
- items:
- type: "number"
- format: "float"
- example:
- - 1.25
- - 5.12
- - 4.12
- - 123.12
- - 0.12
+
+ StatsConfig:
+ type: "object"
+ description: "Statistics configuration"
+ properties:
+ interval:
+ type: "integer"
+ description: "Time period to keep data (1 | 7 | 30 | 90)"
+
DhcpConfig:
type: "object"
description: "Built-in DHCP server configuration"
diff --git a/stats/stats.go b/stats/stats.go
new file mode 100644
index 00000000..2542b16d
--- /dev/null
+++ b/stats/stats.go
@@ -0,0 +1,68 @@
+// Module for managing statistics for DNS filtering server
+
+package stats
+
+import (
+ "net"
+)
+
+type unitIDCallback func() int
+
+// New - create object
+// filename: DB file name
+// limit: time limit (in days)
+// unitID: user function to get the current unit ID. If nil, the current time hour is used.
+func New(filename string, limit int, unitID unitIDCallback) Stats {
+ return createObject(filename, limit, unitID)
+}
+
+// Stats - main interface
+type Stats interface {
+ // Close object.
+ // This function is not thread safe
+ // (can't be called in parallel with any other function of this interface).
+ Close()
+
+ // Set new configuration at runtime.
+ // limit: time limit (in days)
+ Configure(limit int)
+
+ // Reset counters and clear database
+ Clear()
+
+ // Update counters
+ Update(e Entry)
+
+ // Get data
+ GetData(timeUnit TimeUnit) map[string]interface{}
+}
+
+// TimeUnit - time unit
+type TimeUnit int
+
+// Supported time units
+const (
+ Hours TimeUnit = iota
+ Days
+)
+
+// Result of DNS request processing
+type Result int
+
+// Supported result values
+const (
+ RNotFiltered Result = iota + 1
+ RFiltered
+ RSafeBrowsing
+ RSafeSearch
+ RParental
+ rLast
+)
+
+// Entry - data to add
+type Entry struct {
+ Domain string
+ Client net.IP
+ Result Result
+ Time uint // processing time (msec)
+}
diff --git a/stats/stats_test.go b/stats/stats_test.go
new file mode 100644
index 00000000..45b06520
--- /dev/null
+++ b/stats/stats_test.go
@@ -0,0 +1,115 @@
+package stats
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func UIntArrayEquals(a []uint, b []uint) bool {
+ if len(a) != len(b) {
+ return false
+ }
+
+ for i := range a {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+func TestStats(t *testing.T) {
+ s := New("./stats.db", 1, nil)
+
+ e := Entry{}
+
+ e.Domain = "domain"
+ e.Client = net.ParseIP("127.0.0.1")
+ e.Result = RFiltered
+ e.Time = 123456
+ s.Update(e)
+
+ e.Domain = "domain"
+ e.Client = net.ParseIP("127.0.0.1")
+ e.Result = RNotFiltered
+ e.Time = 123456
+ s.Update(e)
+
+ d := s.GetData(Hours)
+ a := []uint{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2}
+ assert.True(t, UIntArrayEquals(d["dns_queries"].([]uint), a))
+
+ a = []uint{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}
+ assert.True(t, UIntArrayEquals(d["blocked_filtering"].([]uint), a))
+
+ a = []uint{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ assert.True(t, UIntArrayEquals(d["replaced_safebrowsing"].([]uint), a))
+
+ a = []uint{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ assert.True(t, UIntArrayEquals(d["replaced_parental"].([]uint), a))
+
+ m := d["top_queried_domains"].([]map[string]uint)
+ assert.True(t, m[0]["domain"] == 1)
+
+ m = d["top_blocked_domains"].([]map[string]uint)
+ assert.True(t, m[0]["domain"] == 1)
+
+ m = d["top_clients"].([]map[string]uint)
+ assert.True(t, m[0]["127.0.0.1"] == 2)
+
+ assert.True(t, d["num_dns_queries"].(uint) == 2)
+ assert.True(t, d["num_blocked_filtering"].(uint) == 1)
+ assert.True(t, d["num_replaced_safebrowsing"].(uint) == 0)
+ assert.True(t, d["num_replaced_safesearch"].(uint) == 0)
+ assert.True(t, d["num_replaced_parental"].(uint) == 0)
+ assert.True(t, d["avg_processing_time"].(float64) == 0.123456)
+
+ s.Clear()
+ s.Close()
+ os.Remove("./stats.db")
+}
+
+func TestLargeNumbers(t *testing.T) {
+ var hour int32
+ hour = 1
+ newID := func() int {
+ // use "atomic" to make Go race detector happy
+ return int(atomic.LoadInt32(&hour))
+ }
+
+ // log.SetLevel(log.DEBUG)
+ fn := "./stats.db"
+ os.Remove(fn)
+ s := New(fn, 1, newID)
+ e := Entry{}
+
+ n := 1000 // number of distinct clients and domains every hour
+ for h := 0; h != 12; h++ {
+ if h != 0 {
+ atomic.AddInt32(&hour, 1)
+ time.Sleep(1500 * time.Millisecond)
+ }
+ for i := 0; i != n; i++ {
+ e.Domain = fmt.Sprintf("domain%d", i)
+ e.Client = net.ParseIP("127.0.0.1")
+ e.Client[2] = byte((i & 0xff00) >> 8)
+ e.Client[3] = byte(i & 0xff)
+ e.Result = RNotFiltered
+ e.Time = 123456
+ s.Update(e)
+ }
+ }
+
+ d := s.GetData(Hours)
+ assert.True(t, d["num_dns_queries"].(uint) == uint(int(hour)*n))
+
+ s.Close()
+ os.Remove(fn)
+}
diff --git a/stats/stats_unit.go b/stats/stats_unit.go
new file mode 100644
index 00000000..1cc69324
--- /dev/null
+++ b/stats/stats_unit.go
@@ -0,0 +1,672 @@
+package stats
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/gob"
+ "fmt"
+ "os"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/AdguardTeam/golibs/log"
+ bolt "github.com/etcd-io/bbolt"
+)
+
+const (
+ maxDomains = 100 // max number of top domains to store in file or return via Get()
+ maxClients = 100 // max number of top clients to store in file or return via Get()
+)
+
+// statsCtx - global context
+type statsCtx struct {
+ limit int // maximum time we need to keep data for (in hours)
+ filename string // database file name
+ unitID unitIDCallback // user function which returns the current unit ID
+ db *bolt.DB
+
+ unit *unit // the current unit
+ unitLock sync.Mutex // protect 'unit'
+}
+
+// data for 1 time unit
+type unit struct {
+ id int // unit ID. Default: absolute hour since Jan 1, 1970
+
+ nTotal int // total requests
+ nResult []int // number of requests per one result
+ timeSum int // sum of processing time of all requests (usec)
+
+ // top:
+ domains map[string]int // number of requests per domain
+ blockedDomains map[string]int // number of blocked requests per domain
+ clients map[string]int // number of requests per client
+}
+
+// name-count pair
+type countPair struct {
+ Name string
+ Count uint
+}
+
+// structure for storing data in file
+type unitDB struct {
+ NTotal uint
+ NResult []uint
+
+ Domains []countPair
+ BlockedDomains []countPair
+ Clients []countPair
+
+ TimeAvg uint // usec
+}
+
+func createObject(filename string, limitDays int, unitID unitIDCallback) *statsCtx {
+ s := statsCtx{}
+ s.limit = limitDays * 24
+ s.filename = filename
+ s.unitID = newUnitID
+ if unitID != nil {
+ s.unitID = unitID
+ }
+
+ if !s.dbOpen() {
+ return nil
+ }
+
+ id := s.unitID()
+ tx := s.beginTxn(true)
+ var udb *unitDB
+ if tx != nil {
+ log.Tracef("Deleting old units...")
+ firstID := id - s.limit - 1
+ unitDel := 0
+ forEachBkt := func(name []byte, b *bolt.Bucket) error {
+ id := btoi(name)
+ if id < firstID {
+ err := tx.DeleteBucket(name)
+ if err != nil {
+ log.Debug("tx.DeleteBucket: %s", err)
+ }
+ log.Debug("Stats: deleted unit %d", id)
+ unitDel++
+ return nil
+ }
+ return fmt.Errorf("")
+ }
+ _ = tx.ForEach(forEachBkt)
+
+ udb = s.loadUnitFromDB(tx, id)
+
+ if unitDel != 0 {
+ s.commitTxn(tx)
+ } else {
+ _ = tx.Rollback()
+ }
+ }
+
+ u := unit{}
+ s.initUnit(&u, id)
+ if udb != nil {
+ deserialize(&u, udb)
+ }
+ s.unit = &u
+
+ go s.periodicFlush()
+
+ log.Debug("Stats: initialized")
+ return &s
+}
+
+func (s *statsCtx) dbOpen() bool {
+ var err error
+ log.Tracef("db.Open...")
+ s.db, err = bolt.Open(s.filename, 0644, nil)
+ if err != nil {
+ log.Error("Stats: open DB: %s: %s", s.filename, err)
+ return false
+ }
+ log.Tracef("db.Open")
+ return true
+}
+
+// Atomically swap the currently active unit with a new value
+// Return old value
+func (s *statsCtx) swapUnit(new *unit) *unit {
+ s.unitLock.Lock()
+ u := s.unit
+ s.unit = new
+ s.unitLock.Unlock()
+ return u
+}
+
+// Get unit ID for the current hour
+func newUnitID() int {
+ return int(time.Now().Unix() / (60 * 60))
+}
+
+// Initialize a unit
+func (s *statsCtx) initUnit(u *unit, id int) {
+ u.id = id
+ u.nResult = make([]int, rLast)
+ u.domains = make(map[string]int)
+ u.blockedDomains = make(map[string]int)
+ u.clients = make(map[string]int)
+}
+
+// Open a DB transaction
+func (s *statsCtx) beginTxn(wr bool) *bolt.Tx {
+ db := s.db
+ if db == nil {
+ return nil
+ }
+
+ log.Tracef("db.Begin...")
+ tx, err := db.Begin(wr)
+ if err != nil {
+ log.Error("db.Begin: %s", err)
+ return nil
+ }
+ log.Tracef("db.Begin")
+ return tx
+}
+
+func (s *statsCtx) commitTxn(tx *bolt.Tx) {
+ err := tx.Commit()
+ if err != nil {
+ log.Debug("tx.Commit: %s", err)
+ return
+ }
+ log.Tracef("tx.Commit")
+}
+
+// Get unit name
+func unitName(id int) []byte {
+ return itob(id)
+}
+
+// Convert integer to 8-byte array (big endian)
+func itob(v int) []byte {
+ b := make([]byte, 8)
+ binary.BigEndian.PutUint64(b, uint64(v))
+ return b
+}
+
+// Convert 8-byte array (big endian) to integer
+func btoi(b []byte) int {
+ return int(binary.BigEndian.Uint64(b))
+}
+
+// Flush the current unit to DB and delete an old unit when a new hour is started
+func (s *statsCtx) periodicFlush() {
+ for {
+ s.unitLock.Lock()
+ ptr := s.unit
+ s.unitLock.Unlock()
+ if ptr == nil {
+ break
+ }
+
+ id := s.unitID()
+ if ptr.id == id {
+ time.Sleep(time.Second)
+ continue
+ }
+
+ nu := unit{}
+ s.initUnit(&nu, id)
+ u := s.swapUnit(&nu)
+ udb := serialize(u)
+
+ tx := s.beginTxn(true)
+ if tx == nil {
+ continue
+ }
+ ok1 := s.flushUnitToDB(tx, u.id, udb)
+ ok2 := s.deleteUnit(tx, id-s.limit)
+ if ok1 || ok2 {
+ s.commitTxn(tx)
+ } else {
+ _ = tx.Rollback()
+ }
+ }
+ log.Tracef("periodicFlush() exited")
+}
+
+// Delete unit's data from file
+func (s *statsCtx) deleteUnit(tx *bolt.Tx, id int) bool {
+ err := tx.DeleteBucket(unitName(id))
+ if err != nil {
+ log.Tracef("bolt DeleteBucket: %s", err)
+ return false
+ }
+ log.Debug("Stats: deleted unit %d", id)
+ return true
+}
+
+func convertMapToArray(m map[string]int, max int) []countPair {
+ a := []countPair{}
+ for k, v := range m {
+ pair := countPair{}
+ pair.Name = k
+ pair.Count = uint(v)
+ a = append(a, pair)
+ }
+ less := func(i, j int) bool {
+ if a[i].Count >= a[j].Count {
+ return true
+ }
+ return false
+ }
+ sort.Slice(a, less)
+ if max > len(a) {
+ max = len(a)
+ }
+ return a[:max]
+}
+
+func convertArrayToMap(a []countPair) map[string]int {
+ m := map[string]int{}
+ for _, it := range a {
+ m[it.Name] = int(it.Count)
+ }
+ return m
+}
+
+func serialize(u *unit) *unitDB {
+ udb := unitDB{}
+ udb.NTotal = uint(u.nTotal)
+ for _, it := range u.nResult {
+ udb.NResult = append(udb.NResult, uint(it))
+ }
+ if u.nTotal != 0 {
+ udb.TimeAvg = uint(u.timeSum / u.nTotal)
+ }
+ udb.Domains = convertMapToArray(u.domains, maxDomains)
+ udb.BlockedDomains = convertMapToArray(u.blockedDomains, maxDomains)
+ udb.Clients = convertMapToArray(u.clients, maxClients)
+ return &udb
+}
+
+func deserialize(u *unit, udb *unitDB) {
+ u.nTotal = int(udb.NTotal)
+ for _, it := range udb.NResult {
+ u.nResult = append(u.nResult, int(it))
+ }
+ u.domains = convertArrayToMap(udb.Domains)
+ u.blockedDomains = convertArrayToMap(udb.BlockedDomains)
+ u.clients = convertArrayToMap(udb.Clients)
+ u.timeSum = int(udb.TimeAvg) * u.nTotal
+}
+
+func (s *statsCtx) flushUnitToDB(tx *bolt.Tx, id int, udb *unitDB) bool {
+ log.Tracef("Flushing unit %d", id)
+
+ bkt, err := tx.CreateBucketIfNotExists(unitName(id))
+ if err != nil {
+ log.Error("tx.CreateBucketIfNotExists: %s", err)
+ return false
+ }
+
+ var buf bytes.Buffer
+ enc := gob.NewEncoder(&buf)
+ err = enc.Encode(udb)
+ if err != nil {
+ log.Error("gob.Encode: %s", err)
+ return false
+ }
+
+ err = bkt.Put([]byte{0}, buf.Bytes())
+ if err != nil {
+ log.Error("bkt.Put: %s", err)
+ return false
+ }
+
+ return true
+}
+
+func (s *statsCtx) loadUnitFromDB(tx *bolt.Tx, id int) *unitDB {
+ bkt := tx.Bucket(unitName(id))
+ if bkt == nil {
+ return nil
+ }
+
+ log.Tracef("Loading unit %d", id)
+
+ var buf bytes.Buffer
+ buf.Write(bkt.Get([]byte{0}))
+ dec := gob.NewDecoder(&buf)
+ udb := unitDB{}
+ err := dec.Decode(&udb)
+ if err != nil {
+ log.Error("gob Decode: %s", err)
+ return nil
+ }
+
+ return &udb
+}
+
+func convertTopArray(a []countPair) []map[string]uint {
+ m := []map[string]uint{}
+ for _, it := range a {
+ ent := map[string]uint{}
+ ent[it.Name] = it.Count
+ m = append(m, ent)
+ }
+ return m
+}
+
+func (s *statsCtx) Configure(limit int) {
+ if limit < 0 {
+ return
+ }
+ s.limit = limit * 24
+ log.Debug("Stats: set limit: %d", limit)
+}
+
+func (s *statsCtx) Close() {
+ u := s.swapUnit(nil)
+ udb := serialize(u)
+ tx := s.beginTxn(true)
+ if tx != nil {
+ if s.flushUnitToDB(tx, u.id, udb) {
+ s.commitTxn(tx)
+ } else {
+ _ = tx.Rollback()
+ }
+ }
+
+ if s.db != nil {
+ log.Tracef("db.Close...")
+ _ = s.db.Close()
+ log.Tracef("db.Close")
+ }
+
+ log.Debug("Stats: closed")
+}
+
+func (s *statsCtx) Clear() {
+ tx := s.beginTxn(true)
+ if tx != nil {
+ db := s.db
+ s.db = nil
+ _ = tx.Rollback()
+ // the active transactions can continue using database,
+ // but no new transactions will be opened
+ _ = db.Close()
+ log.Tracef("db.Close")
+ // all active transactions are now closed
+ }
+
+ u := unit{}
+ s.initUnit(&u, s.unitID())
+ _ = s.swapUnit(&u)
+
+ err := os.Remove(s.filename)
+ if err != nil {
+ log.Error("os.Remove: %s", err)
+ }
+
+ _ = s.dbOpen()
+
+ log.Debug("Stats: cleared")
+}
+
+func (s *statsCtx) Update(e Entry) {
+ if e.Result == 0 ||
+ len(e.Domain) == 0 ||
+ !(len(e.Client) == 4 || len(e.Client) == 16) {
+ return
+ }
+ client := e.Client.String()
+
+ s.unitLock.Lock()
+ u := s.unit
+
+ u.nResult[e.Result]++
+
+ if e.Result == RNotFiltered {
+ u.domains[e.Domain]++
+ } else {
+ u.blockedDomains[e.Domain]++
+ }
+
+ u.clients[client]++
+ u.timeSum += int(e.Time)
+ u.nTotal++
+ s.unitLock.Unlock()
+}
+
+/* Algorithm:
+. Prepare array of N units, where N is the value of "limit" configuration setting
+ . Load data for the most recent units from file
+ If a unit with required ID doesn't exist, just add an empty unit
+ . Get data for the current unit
+. Process data from the units and prepare an output map object:
+ * per time unit counters:
+ * DNS-queries/time-unit
+ * blocked/time-unit
+ * safebrowsing-blocked/time-unit
+ * parental-blocked/time-unit
+ If time-unit is an hour, just add values from each unit to an array.
+ If time-unit is a day, aggregate per-hour data into days.
+ * top counters:
+ * queries/domain
+ * queries/blocked-domain
+ * queries/client
+ To get these values we first sum up data for all units into a single map.
+ Then we get the pairs with the highest numbers (the values are sorted in descending order)
+ * total counters:
+ * DNS-queries
+ * blocked
+ * safebrowsing-blocked
+ * safesearch-blocked
+ * parental-blocked
+ These values are just the sum of data for all units.
+*/
+// nolint (gocyclo)
+func (s *statsCtx) GetData(timeUnit TimeUnit) map[string]interface{} {
+ d := map[string]interface{}{}
+
+ tx := s.beginTxn(false)
+ if tx == nil {
+ return nil
+ }
+
+ units := []*unitDB{} //per-hour units
+ lastID := s.unitID()
+ firstID := lastID - s.limit + 1
+ for i := firstID; i != lastID; i++ {
+ u := s.loadUnitFromDB(tx, i)
+ if u == nil {
+ u = &unitDB{}
+ u.NResult = make([]uint, rLast)
+ }
+ units = append(units, u)
+ }
+
+ _ = tx.Rollback()
+
+ s.unitLock.Lock()
+ cu := serialize(s.unit)
+ cuID := s.unit.id
+ s.unitLock.Unlock()
+ if cuID != lastID {
+ units = units[1:]
+ }
+ units = append(units, cu)
+
+ if len(units) != s.limit {
+ log.Fatalf("len(units) != s.limit: %d %d", len(units), s.limit)
+ }
+
+ // per time unit counters:
+
+ // 720 hours may span 31 days, so we skip data for the first day in this case
+ firstDayID := (firstID + 24 - 1) / 24 * 24 // align_ceil(24)
+
+ a := []uint{}
+ if timeUnit == Hours {
+ for _, u := range units {
+ a = append(a, u.NTotal)
+ }
+ } else {
+ var sum uint
+ id := firstDayID
+ nextDayID := firstDayID + 24
+ for i := firstDayID - firstID; i != len(units); i++ {
+ sum += units[i].NTotal
+ if id == nextDayID {
+ a = append(a, sum)
+ sum = 0
+ nextDayID += 24
+ }
+ id++
+ }
+ if id < nextDayID {
+ a = append(a, sum)
+ }
+ if len(a) != s.limit/24 {
+ log.Fatalf("len(a) != s.limit: %d %d", len(a), s.limit)
+ }
+ }
+ d["dns_queries"] = a
+
+ a = []uint{}
+ if timeUnit == Hours {
+ for _, u := range units {
+ a = append(a, u.NResult[RFiltered])
+ }
+ } else {
+ var sum uint
+ id := firstDayID
+ nextDayID := firstDayID + 24
+ for i := firstDayID - firstID; i != len(units); i++ {
+ sum += units[i].NResult[RFiltered]
+ if id == nextDayID {
+ a = append(a, sum)
+ sum = 0
+ nextDayID += 24
+ }
+ id++
+ }
+ if id < nextDayID {
+ a = append(a, sum)
+ }
+ }
+ d["blocked_filtering"] = a
+
+ a = []uint{}
+ if timeUnit == Hours {
+ for _, u := range units {
+ a = append(a, u.NResult[RSafeBrowsing])
+ }
+ } else {
+ var sum uint
+ id := firstDayID
+ nextDayID := firstDayID + 24
+ for i := firstDayID - firstID; i != len(units); i++ {
+ sum += units[i].NResult[RSafeBrowsing]
+ if id == nextDayID {
+ a = append(a, sum)
+ sum = 0
+ nextDayID += 24
+ }
+ id++
+ }
+ if id < nextDayID {
+ a = append(a, sum)
+ }
+ }
+ d["replaced_safebrowsing"] = a
+
+ a = []uint{}
+ if timeUnit == Hours {
+ for _, u := range units {
+ a = append(a, u.NResult[RParental])
+ }
+ } else {
+ var sum uint
+ id := firstDayID
+ nextDayID := firstDayID + 24
+ for i := firstDayID - firstID; i != len(units); i++ {
+ sum += units[i].NResult[RParental]
+ if id == nextDayID {
+ a = append(a, sum)
+ sum = 0
+ nextDayID += 24
+ }
+ id++
+ }
+ if id < nextDayID {
+ a = append(a, sum)
+ }
+ }
+ d["replaced_parental"] = a
+
+ // top counters:
+
+ m := map[string]int{}
+ for _, u := range units {
+ for _, it := range u.Domains {
+ m[it.Name] += int(it.Count)
+ }
+ }
+ a2 := convertMapToArray(m, maxDomains)
+ d["top_queried_domains"] = convertTopArray(a2)
+
+ m = map[string]int{}
+ for _, u := range units {
+ for _, it := range u.BlockedDomains {
+ m[it.Name] += int(it.Count)
+ }
+ }
+ a2 = convertMapToArray(m, maxDomains)
+ d["top_blocked_domains"] = convertTopArray(a2)
+
+ m = map[string]int{}
+ for _, u := range units {
+ for _, it := range u.Clients {
+ m[it.Name] += int(it.Count)
+ }
+ }
+ a2 = convertMapToArray(m, maxClients)
+ d["top_clients"] = convertTopArray(a2)
+
+ // total counters:
+
+ sum := unitDB{}
+ timeN := 0
+ sum.NResult = make([]uint, rLast)
+ for _, u := range units {
+ sum.NTotal += u.NTotal
+ sum.TimeAvg += u.TimeAvg
+ if u.TimeAvg != 0 {
+ timeN++
+ }
+ sum.NResult[RFiltered] += u.NResult[RFiltered]
+ sum.NResult[RSafeBrowsing] += u.NResult[RSafeBrowsing]
+ sum.NResult[RSafeSearch] += u.NResult[RSafeSearch]
+ sum.NResult[RParental] += u.NResult[RParental]
+ }
+
+ d["num_dns_queries"] = sum.NTotal
+ d["num_blocked_filtering"] = sum.NResult[RFiltered]
+ d["num_replaced_safebrowsing"] = sum.NResult[RSafeBrowsing]
+ d["num_replaced_safesearch"] = sum.NResult[RSafeSearch]
+ d["num_replaced_parental"] = sum.NResult[RParental]
+
+ avgTime := float64(0)
+ if timeN != 0 {
+ avgTime = float64(sum.TimeAvg/uint(timeN)) / 1000000
+ }
+ d["avg_processing_time"] = avgTime
+
+ d["time_units"] = "hours"
+ if timeUnit == Days {
+ d["time_units"] = "days"
+ }
+
+ return d
+}