@@ -479,6 +407,7 @@ Logs.propTypes = {
getLogsConfig: PropTypes.func.isRequired,
setLogsPagination: PropTypes.func.isRequired,
setLogsFilter: PropTypes.func.isRequired,
+ setLogsPage: PropTypes.func.isRequired,
t: PropTypes.func.isRequired,
};
diff --git a/client/src/components/ui/Card.css b/client/src/components/ui/Card.css
index 6794a791..176b0160 100644
--- a/client/src/components/ui/Card.css
+++ b/client/src/components/ui/Card.css
@@ -33,6 +33,36 @@
text-align: center;
}
+.card-body--loading {
+ position: relative;
+}
+
+.card-body--loading:before {
+ content: "";
+ position: absolute;
+ top: 0;
+ left: 0;
+ z-index: 100;
+ width: 100%;
+ height: 100%;
+ background-color: rgba(255, 255, 255, 0.6);
+}
+
+.card-body--loading:after {
+ content: "";
+ position: absolute;
+ z-index: 101;
+ left: 50%;
+ top: 50%;
+ width: 40px;
+ height: 40px;
+ margin-top: -20px;
+ margin-left: -20px;
+ background-image: url("data:image/svg+xml;charset=utf-8,%3Csvg%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20viewBox%3D%220%200%2047.6%2047.6%22%20height%3D%22100%25%22%20width%3D%22100%25%22%3E%3Cpath%20opacity%3D%22.235%22%20fill%3D%22%23979797%22%20d%3D%22M44.4%2011.9l-5.2%203c1.5%202.6%202.4%205.6%202.4%208.9%200%209.8-8%2017.8-17.8%2017.8-6.6%200-12.3-3.6-15.4-8.9l-5.2%203C7.3%2042.8%2015%2047.6%2023.8%2047.6c13.1%200%2023.8-10.7%2023.8-23.8%200-4.3-1.2-8.4-3.2-11.9z%22%2F%3E%3Cpath%20fill%3D%22%2366b574%22%20d%3D%22M3.2%2035.7C0%2030.2-.8%2023.8.8%2017.6%202.5%2011.5%206.4%206.4%2011.9%203.2%2017.4%200%2023.8-.8%2030%20.8c6.1%201.6%2011.3%205.6%2014.4%2011.1l-5.2%203c-2.4-4.1-6.2-7.1-10.8-8.3C23.8%205.4%2019%206%2014.9%208.4s-7.1%206.2-8.3%2010.8c-1.2%204.6-.6%209.4%201.8%2013.5l-5.2%203z%22%2F%3E%3C%2Fsvg%3E");
+ will-change: transform;
+ animation: clockwise 2s linear infinite;
+}
+
.card-title-stats {
font-size: 13px;
color: #9aa0ac;
diff --git a/client/src/components/ui/Tooltip.css b/client/src/components/ui/Tooltip.css
index 27505a68..9ad8af3b 100644
--- a/client/src/components/ui/Tooltip.css
+++ b/client/src/components/ui/Tooltip.css
@@ -64,6 +64,7 @@
top: calc(100% + 10px);
right: -10px;
left: initial;
+ width: 255px;
transform: none;
}
diff --git a/client/src/containers/Logs.js b/client/src/containers/Logs.js
index 3b93ec94..be328fbd 100644
--- a/client/src/containers/Logs.js
+++ b/client/src/containers/Logs.js
@@ -1,7 +1,7 @@
import { connect } from 'react-redux';
import { addSuccessToast, getClients } from '../actions';
import { getFilteringStatus, setRules } from '../actions/filtering';
-import { getLogs, getLogsConfig, setLogsPagination, setLogsFilter } from '../actions/queryLogs';
+import { getLogs, getLogsConfig, setLogsPagination, setLogsFilter, setLogsPage } from '../actions/queryLogs';
import Logs from '../components/Logs';
const mapStateToProps = (state) => {
@@ -19,6 +19,7 @@ const mapDispatchToProps = {
getLogsConfig,
setLogsPagination,
setLogsFilter,
+ setLogsPage,
};
export default connect(
diff --git a/client/src/helpers/constants.js b/client/src/helpers/constants.js
index fb82c1c9..ca19818e 100644
--- a/client/src/helpers/constants.js
+++ b/client/src/helpers/constants.js
@@ -141,6 +141,7 @@ export const STANDARD_HTTPS_PORT = 443;
export const EMPTY_DATE = '0001-01-01T00:00:00Z';
export const DEBOUNCE_TIMEOUT = 300;
+export const DEBOUNCE_FILTER_TIMEOUT = 500;
export const CHECK_TIMEOUT = 1000;
export const STOP_TIMEOUT = 10000;
@@ -379,3 +380,9 @@ export const DEFAULT_LOGS_FILTER = {
};
export const DEFAULT_LANGUAGE = 'en';
+
+export const TABLE_DEFAULT_PAGE_SIZE = 100;
+export const RESPONSE_FILTER = {
+ ALL: 'all',
+ FILTERED: 'filtered',
+};
diff --git a/client/src/reducers/queryLogs.js b/client/src/reducers/queryLogs.js
index 2f04099b..ee1fc91b 100644
--- a/client/src/reducers/queryLogs.js
+++ b/client/src/reducers/queryLogs.js
@@ -20,25 +20,50 @@ const queryLogs = handleActions(
};
},
- [actions.setLogsFilter]: (state, { payload }) => (
- { ...state, filter: payload }
- ),
+ [actions.setLogsPage]: (state, { payload }) => ({
+ ...state,
+ page: payload,
+ }),
+
+ [actions.setLogsFilterRequest]: state => ({ ...state, processingGetLogs: true }),
+ [actions.setLogsFilterFailure]: state => ({ ...state, processingGetLogs: false }),
+ [actions.setLogsFilterSuccess]: (state, { payload }) => {
+ const { logs, oldest, filter } = payload;
+ const pageSize = 100;
+ const page = 0;
+
+ const pages = Math.ceil(logs.length / pageSize);
+ const total = logs.length;
+ const rowsStart = pageSize * page;
+ const rowsEnd = (pageSize * page) + pageSize;
+ const logsSlice = logs.slice(rowsStart, rowsEnd);
+ const isFiltered = Object.keys(filter).some(key => filter[key]);
+
+ return {
+ ...state,
+ oldest,
+ filter,
+ isFiltered,
+ pages,
+ total,
+ logs: logsSlice,
+ allLogs: logs,
+ processingGetLogs: false,
+ };
+ },
[actions.getLogsRequest]: state => ({ ...state, processingGetLogs: true }),
[actions.getLogsFailure]: state => ({ ...state, processingGetLogs: false }),
[actions.getLogsSuccess]: (state, { payload }) => {
const {
- logs, lastRowTime, page, pageSize, filtered,
+ logs, oldest, older_than, page, pageSize,
} = payload;
let logsWithOffset = state.allLogs.length > 0 ? state.allLogs : logs;
let allLogs = logs;
- if (lastRowTime) {
+ if (older_than) {
logsWithOffset = [...state.allLogs, ...logs];
allLogs = [...state.allLogs, ...logs];
- } else if (filtered) {
- logsWithOffset = logs;
- allLogs = logs;
}
const pages = Math.ceil(logsWithOffset.length / pageSize);
@@ -49,6 +74,7 @@ const queryLogs = handleActions(
return {
...state,
+ oldest,
pages,
total,
allLogs,
@@ -81,20 +107,33 @@ const queryLogs = handleActions(
...payload,
processingSetConfig: false,
}),
+
+ [actions.getAdditionalLogsRequest]: state => ({
+ ...state, processingAdditionalLogs: true, processingGetLogs: true,
+ }),
+ [actions.getAdditionalLogsFailure]: state => ({
+ ...state, processingAdditionalLogs: false, processingGetLogs: false,
+ }),
+ [actions.getAdditionalLogsSuccess]: state => ({
+ ...state, processingAdditionalLogs: false, processingGetLogs: false,
+ }),
},
{
processingGetLogs: true,
processingClear: false,
processingGetConfig: false,
processingSetConfig: false,
+ processingAdditionalLogs: false,
logs: [],
interval: 1,
allLogs: [],
+ page: 0,
pages: 0,
total: 0,
enabled: true,
- older_than: '',
+ oldest: '',
filter: DEFAULT_LOGS_FILTER,
+ isFiltered: false,
},
);
diff --git a/dnsforward/dnsforward.go b/dnsforward/dnsforward.go
index 3ecd80d5..cb86c235 100644
--- a/dnsforward/dnsforward.go
+++ b/dnsforward/dnsforward.go
@@ -462,7 +462,7 @@ func (s *Server) handleDNSRequest(p *proxy.Proxy, d *proxy.DNSContext) error {
if d.Upstream != nil {
upstreamAddr = d.Upstream.Address()
}
- s.queryLog.Add(msg, d.Res, res, elapsed, d.Addr, upstreamAddr)
+ s.queryLog.Add(msg, d.Res, res, elapsed, getIP(d.Addr), upstreamAddr)
}
s.updateStats(d, elapsed, *res)
@@ -471,6 +471,17 @@ func (s *Server) handleDNSRequest(p *proxy.Proxy, d *proxy.DNSContext) error {
return nil
}
+// Get IP address from net.Addr
+func getIP(addr net.Addr) net.IP {
+ switch addr := addr.(type) {
+ case *net.UDPAddr:
+ return addr.IP
+ case *net.TCPAddr:
+ return addr.IP
+ }
+ return nil
+}
+
func (s *Server) updateStats(d *proxy.DNSContext, elapsed time.Duration, res dnsfilter.Result) {
if s.stats == nil {
return
diff --git a/openapi/CHANGELOG.md b/openapi/CHANGELOG.md
index 281d58e5..3172be07 100644
--- a/openapi/CHANGELOG.md
+++ b/openapi/CHANGELOG.md
@@ -1,6 +1,46 @@
# AdGuard Home API Change Log
+## v0.99.3: API changes
+
+### API: Get query log: GET /control/querylog
+
+The response data is now a JSON object, not an array.
+
+Response:
+
+ 200 OK
+
+ {
+ "oldest":"2006-01-02T15:04:05.999999999Z07:00"
+ "data":[
+ {
+ "answer":[
+ {
+ "ttl":10,
+ "type":"AAAA",
+ "value":"::"
+ }
+ ...
+ ],
+ "client":"127.0.0.1",
+ "elapsedMs":"0.098403",
+ "filterId":1,
+ "question":{
+ "class":"IN",
+ "host":"doubleclick.net",
+ "type":"AAAA"
+ },
+ "reason":"FilteredBlackList",
+ "rule":"||doubleclick.net^",
+ "status":"NOERROR",
+ "time":"2006-01-02T15:04:05.999999999Z07:00"
+ }
+ ...
+ ]
+ }
+
+
## v0.99.1: API changes
### API: Get current user info: GET /control/profile
diff --git a/openapi/openapi.yaml b/openapi/openapi.yaml
index eca972b3..d871fef3 100644
--- a/openapi/openapi.yaml
+++ b/openapi/openapi.yaml
@@ -1417,10 +1417,16 @@ definitions:
example: "2018-11-26T00:02:41+03:00"
QueryLog:
- type: "array"
+ type: "object"
description: "Query log"
- items:
- $ref: "#/definitions/QueryLogItem"
+ properties:
+ oldest:
+ type: "string"
+ example: "2018-11-26T00:02:41+03:00"
+ data:
+ type: "array"
+ items:
+ $ref: "#/definitions/QueryLogItem"
QueryLogConfig:
type: "object"
diff --git a/querylog/qlog.go b/querylog/qlog.go
index 436216ae..713a2849 100644
--- a/querylog/qlog.go
+++ b/querylog/qlog.go
@@ -20,8 +20,8 @@ const (
queryLogFileName = "querylog.json" // .gz added during compression
getDataLimit = 500 // GetData(): maximum log entries to return
- // maximum data chunks to parse when filtering entries
- maxFilteringChunks = 10
+ // maximum entries to parse when searching
+ maxSearchEntries = 50000
)
// queryLog is a structure that writes and reads the DNS query log
@@ -94,45 +94,31 @@ func (l *queryLog) clear() {
}
type logEntry struct {
- Question []byte
+ IP string `json:"IP"`
+ Time time.Time `json:"T"`
+
+ QHost string `json:"QH"`
+ QType string `json:"QT"`
+ QClass string `json:"QC"`
+
Answer []byte `json:",omitempty"` // sometimes empty answers happen like binerdunt.top or rev2.globalrootservers.net
Result dnsfilter.Result
- Time time.Time
Elapsed time.Duration
- IP string
Upstream string `json:",omitempty"` // if empty, means it was cached
}
-// getIPString is a helper function that extracts IP address from net.Addr
-func getIPString(addr net.Addr) string {
- switch addr := addr.(type) {
- case *net.UDPAddr:
- return addr.IP.String()
- case *net.TCPAddr:
- return addr.IP.String()
- }
- return ""
-}
-
-func (l *queryLog) Add(question *dns.Msg, answer *dns.Msg, result *dnsfilter.Result, elapsed time.Duration, addr net.Addr, upstream string) {
+func (l *queryLog) Add(question *dns.Msg, answer *dns.Msg, result *dnsfilter.Result, elapsed time.Duration, ip net.IP, upstream string) {
if !l.conf.Enabled {
return
}
- var q []byte
+ if question == nil || len(question.Question) != 1 || len(question.Question[0].Name) == 0 ||
+ ip == nil {
+ return
+ }
+
var a []byte
var err error
- ip := getIPString(addr)
-
- if question == nil {
- return
- }
-
- q, err = question.Pack()
- if err != nil {
- log.Printf("failed to pack question for querylog: %s", err)
- return
- }
if answer != nil {
a, err = answer.Pack()
@@ -148,14 +134,18 @@ func (l *queryLog) Add(question *dns.Msg, answer *dns.Msg, result *dnsfilter.Res
now := time.Now()
entry := logEntry{
- Question: q,
+ IP: ip.String(),
+ Time: now,
+
Answer: a,
Result: *result,
- Time: now,
Elapsed: elapsed,
- IP: ip,
Upstream: upstream,
}
+ q := question.Question[0]
+ entry.QHost = strings.ToLower(q.Name[:len(q.Name)-1]) // remove the last dot
+ entry.QType = dns.Type(q.Qtype).String()
+ entry.QClass = dns.Class(q.Qclass).String()
l.bufferLock.Lock()
l.buffer = append(l.buffer, &entry)
@@ -182,33 +172,22 @@ func isNeeded(entry *logEntry, params getDataParams) bool {
return false
}
- if len(params.Domain) != 0 || params.QuestionType != 0 {
- m := dns.Msg{}
- _ = m.Unpack(entry.Question)
-
- if params.QuestionType != 0 {
- if m.Question[0].Qtype != params.QuestionType {
- return false
- }
- }
-
- if len(params.Domain) != 0 && params.StrictMatchDomain {
- if m.Question[0].Name != params.Domain {
- return false
- }
- } else if len(params.Domain) != 0 {
- if strings.Index(m.Question[0].Name, params.Domain) == -1 {
- return false
- }
+ if len(params.QuestionType) != 0 {
+ if entry.QType != params.QuestionType {
+ return false
}
}
- if len(params.Client) != 0 && params.StrictMatchClient {
- if entry.IP != params.Client {
+ if len(params.Domain) != 0 {
+ if (params.StrictMatchDomain && entry.QHost != params.Domain) ||
+ (!params.StrictMatchDomain && strings.Index(entry.QHost, params.Domain) == -1) {
return false
}
- } else if len(params.Client) != 0 {
- if strings.Index(entry.IP, params.Client) == -1 {
+ }
+
+ if len(params.Client) != 0 {
+ if (params.StrictMatchClient && entry.IP != params.Client) ||
+ (!params.StrictMatchClient && strings.Index(entry.IP, params.Client) == -1) {
return false
}
}
@@ -216,31 +195,23 @@ func isNeeded(entry *logEntry, params getDataParams) bool {
return true
}
-func (l *queryLog) readFromFile(params getDataParams) ([]*logEntry, int) {
+func (l *queryLog) readFromFile(params getDataParams) ([]*logEntry, time.Time, int) {
entries := []*logEntry{}
- olderThan := params.OlderThan
- totalChunks := 0
- total := 0
+ oldest := time.Time{}
r := l.OpenReader()
if r == nil {
- return entries, 0
+ return entries, time.Time{}, 0
}
- r.BeginRead(olderThan, getDataLimit)
- for totalChunks < maxFilteringChunks {
- first := true
+ r.BeginRead(params.OlderThan, getDataLimit, ¶ms)
+ total := uint64(0)
+ for total <= maxSearchEntries {
newEntries := []*logEntry{}
for {
entry := r.Next()
if entry == nil {
break
}
- total++
-
- if first {
- first = false
- olderThan = entry.Time
- }
if !isNeeded(entry, params) {
continue
@@ -251,7 +222,7 @@ func (l *queryLog) readFromFile(params getDataParams) ([]*logEntry, int) {
newEntries = append(newEntries, entry)
}
- log.Debug("entries: +%d (%d) older-than:%s", len(newEntries), len(entries), olderThan)
+ log.Debug("entries: +%d (%d) [%d]", len(newEntries), len(entries), r.Total())
entries = append(newEntries, entries...)
if len(entries) > getDataLimit {
@@ -259,15 +230,16 @@ func (l *queryLog) readFromFile(params getDataParams) ([]*logEntry, int) {
entries = entries[toremove:]
break
}
- if first || len(entries) == getDataLimit {
+ if r.Total() == 0 || len(entries) == getDataLimit {
break
}
- totalChunks++
- r.BeginReadPrev(olderThan, getDataLimit)
+ total += r.Total()
+ oldest = r.Oldest()
+ r.BeginReadPrev(getDataLimit)
}
r.Close()
- return entries, total
+ return entries, oldest, int(total)
}
// Parameters for getData()
@@ -275,7 +247,7 @@ type getDataParams struct {
OlderThan time.Time // return entries that are older than this value
Domain string // filter by domain name in question
Client string // filter by client IP
- QuestionType uint16 // filter by question type
+ QuestionType string // filter by question type
ResponseStatus responseStatusType // filter by response status
StrictMatchDomain bool // if Domain value must be matched strictly
StrictMatchClient bool // if Client value must be matched strictly
@@ -291,19 +263,16 @@ const (
)
// Get log entries
-func (l *queryLog) getData(params getDataParams) []map[string]interface{} {
+func (l *queryLog) getData(params getDataParams) map[string]interface{} {
var data = []map[string]interface{}{}
- if len(params.Domain) != 0 && params.StrictMatchDomain {
- params.Domain = params.Domain + "."
- }
-
+ var oldest time.Time
now := time.Now()
entries := []*logEntry{}
total := 0
// add from file
- entries, total = l.readFromFile(params)
+ entries, oldest, total = l.readFromFile(params)
if params.OlderThan.IsZero() {
params.OlderThan = now
@@ -332,26 +301,12 @@ func (l *queryLog) getData(params getDataParams) []map[string]interface{} {
// process the elements from latest to oldest
for i := len(entries) - 1; i >= 0; i-- {
entry := entries[i]
- var q *dns.Msg
var a *dns.Msg
- if len(entry.Question) == 0 {
- continue
- }
- q = new(dns.Msg)
- if err := q.Unpack(entry.Question); err != nil {
- log.Tracef("q.Unpack(): %s", err)
- continue
- }
- if len(q.Question) != 1 {
- log.Tracef("len(q.Question) != 1")
- continue
- }
-
if len(entry.Answer) > 0 {
a = new(dns.Msg)
if err := a.Unpack(entry.Answer); err != nil {
- log.Debug("Failed to unpack dns message answer: %s", err)
+ log.Debug("Failed to unpack dns message answer: %s: %s", err, string(entry.Answer))
a = nil
}
}
@@ -363,9 +318,9 @@ func (l *queryLog) getData(params getDataParams) []map[string]interface{} {
"client": entry.IP,
}
jsonEntry["question"] = map[string]interface{}{
- "host": strings.ToLower(strings.TrimSuffix(q.Question[0].Name, ".")),
- "type": dns.Type(q.Question[0].Qtype).String(),
- "class": dns.Class(q.Question[0].Qclass).String(),
+ "host": entry.QHost,
+ "type": entry.QType,
+ "class": entry.QClass,
}
if a != nil {
@@ -390,7 +345,17 @@ func (l *queryLog) getData(params getDataParams) []map[string]interface{} {
log.Debug("QueryLog: prepared data (%d/%d) older than %s in %s",
len(entries), total, params.OlderThan, time.Since(now))
- return data
+
+ var result = map[string]interface{}{}
+ if len(entries) == getDataLimit {
+ oldest = entries[0].Time
+ }
+ result["oldest"] = ""
+ if !oldest.IsZero() {
+ result["oldest"] = oldest.Format(time.RFC3339Nano)
+ }
+ result["data"] = data
+ return result
}
func answerToMap(a *dns.Msg) []map[string]interface{} {
@@ -408,9 +373,9 @@ func answerToMap(a *dns.Msg) []map[string]interface{} {
// try most common record types
switch v := k.(type) {
case *dns.A:
- answer["value"] = v.A
+ answer["value"] = v.A.String()
case *dns.AAAA:
- answer["value"] = v.AAAA
+ answer["value"] = v.AAAA.String()
case *dns.MX:
answer["value"] = fmt.Sprintf("%v %v", v.Preference, v.Mx)
case *dns.CNAME:
diff --git a/querylog/qlog_http.go b/querylog/qlog_http.go
index d48cd053..d9feb9b6 100644
--- a/querylog/qlog_http.go
+++ b/querylog/qlog_http.go
@@ -67,12 +67,12 @@ func (l *queryLog) handleQueryLog(w http.ResponseWriter, r *http.Request) {
}
if len(req.filterQuestionType) != 0 {
- qtype, ok := dns.StringToType[req.filterQuestionType]
+ _, ok := dns.StringToType[req.filterQuestionType]
if !ok {
httpError(r, w, http.StatusBadRequest, "invalid question_type")
return
}
- params.QuestionType = qtype
+ params.QuestionType = req.filterQuestionType
}
if len(req.filterResponseStatus) != 0 {
diff --git a/querylog/querylog.go b/querylog/querylog.go
index 26bd55a0..5158a211 100644
--- a/querylog/querylog.go
+++ b/querylog/querylog.go
@@ -21,7 +21,7 @@ type QueryLog interface {
Close()
// Add a log entry
- Add(question *dns.Msg, answer *dns.Msg, result *dnsfilter.Result, elapsed time.Duration, addr net.Addr, upstream string)
+ Add(question *dns.Msg, answer *dns.Msg, result *dnsfilter.Result, elapsed time.Duration, ip net.IP, upstream string)
// WriteDiskConfig - write configuration
WriteDiskConfig(dc *DiskConfig)
diff --git a/querylog/querylog_file.go b/querylog/querylog_file.go
index 3b8f2663..1a466813 100644
--- a/querylog/querylog_file.go
+++ b/querylog/querylog_file.go
@@ -4,13 +4,17 @@ import (
"bufio"
"bytes"
"compress/gzip"
+ "encoding/base64"
"encoding/json"
"io"
"os"
+ "strconv"
"strings"
"time"
+ "github.com/AdguardTeam/AdGuardHome/dnsfilter"
"github.com/AdguardTeam/golibs/log"
+ "github.com/miekg/dns"
)
const enableGzip = false
@@ -145,13 +149,15 @@ func (l *queryLog) periodicRotate() {
// Reader is the DB reader context
type Reader struct {
- ql *queryLog
+ ql *queryLog
+ search *getDataParams
f *os.File
reader *bufio.Reader // reads file line by line
now time.Time
validFrom int64 // UNIX time (ns)
olderThan int64 // UNIX time (ns)
+ oldest time.Time
files []string
ifile int
@@ -161,10 +167,12 @@ type Reader struct {
latest bool // return the latest entries
filePrepared bool
- searching bool // we're seaching for an entry with exact time stamp
+ seeking bool // we're seaching for an entry with exact time stamp
fseeker fileSeeker // file seeker object
fpos uint64 // current file offset
nSeekRequests uint32 // number of Seek() requests made (finding a new line doesn't count)
+
+ timecnt uint64
}
type fileSeeker struct {
@@ -197,8 +205,8 @@ func (r *Reader) Close() {
if r.count > 0 {
perunit = elapsed / time.Duration(r.count)
}
- log.Debug("querylog: read %d entries in %v, %v/entry, seek-reqs:%d",
- r.count, elapsed, perunit, r.nSeekRequests)
+ log.Debug("querylog: read %d entries in %v, %v/entry, seek-reqs:%d time:%dus (%d%%)",
+ r.count, elapsed, perunit, r.nSeekRequests, r.timecnt/1000, r.timecnt*100/uint64(elapsed.Nanoseconds()))
if r.f != nil {
r.f.Close()
@@ -208,25 +216,26 @@ func (r *Reader) Close() {
// BeginRead - start reading
// olderThan: stop returning entries when an entry with this time is reached
// count: minimum number of entries to return
-func (r *Reader) BeginRead(olderThan time.Time, count uint64) {
+func (r *Reader) BeginRead(olderThan time.Time, count uint64, search *getDataParams) {
r.olderThan = olderThan.UnixNano()
r.latest = olderThan.IsZero()
+ r.oldest = time.Time{}
+ r.search = search
r.limit = count
if r.latest {
r.olderThan = r.now.UnixNano()
}
r.filePrepared = false
- r.searching = false
+ r.seeking = false
}
// BeginReadPrev - start reading the previous data chunk
-func (r *Reader) BeginReadPrev(olderThan time.Time, count uint64) {
- r.olderThan = olderThan.UnixNano()
- r.latest = olderThan.IsZero()
+func (r *Reader) BeginReadPrev(count uint64) {
+ r.olderThan = r.oldest.UnixNano()
+ r.oldest = time.Time{}
+ r.latest = false
r.limit = count
- if r.latest {
- r.olderThan = r.now.UnixNano()
- }
+ r.count = 0
off := r.fpos - maxEntrySize*(r.limit+1)
if int64(off) < maxEntrySize {
@@ -245,7 +254,7 @@ func (r *Reader) BeginReadPrev(olderThan time.Time, count uint64) {
r.fseeker.pos = r.fpos
r.filePrepared = true
- r.searching = false
+ r.seeking = false
}
// Perform binary seek
@@ -335,7 +344,7 @@ func (r *Reader) prepareRead() bool {
}
} else {
// start searching in file: we'll read the first chunk of data from the middle of file
- r.searching = true
+ r.seeking = true
r.fseeker = fileSeeker{}
r.fseeker.target = uint64(r.olderThan)
r.fseeker.hi = fsize
@@ -358,6 +367,226 @@ func (r *Reader) prepareRead() bool {
return true
}
+// Get bool value from "key":bool
+func readJSONBool(s, name string) (bool, bool) {
+ i := strings.Index(s, "\""+name+"\":")
+ if i == -1 {
+ return false, false
+ }
+ start := i + 1 + len(name) + 2
+ b := false
+ if strings.HasPrefix(s[start:], "true") {
+ b = true
+ } else if !strings.HasPrefix(s[start:], "false") {
+ return false, false
+ }
+ return b, true
+}
+
+// Get value from "key":"value"
+func readJSONValue(s, name string) string {
+ i := strings.Index(s, "\""+name+"\":\"")
+ if i == -1 {
+ return ""
+ }
+ start := i + 1 + len(name) + 3
+ i = strings.IndexByte(s[start:], '"')
+ if i == -1 {
+ return ""
+ }
+ end := start + i
+ return s[start:end]
+}
+
+func (r *Reader) applySearch(str string) bool {
+ if r.search.ResponseStatus == responseStatusFiltered {
+ boolVal, ok := readJSONBool(str, "IsFiltered")
+ if !ok || !boolVal {
+ return false
+ }
+ }
+
+ if len(r.search.Domain) != 0 {
+ val := readJSONValue(str, "QH")
+ if len(val) == 0 {
+ return false
+ }
+
+ if (r.search.StrictMatchDomain && val != r.search.Domain) ||
+ (!r.search.StrictMatchDomain && strings.Index(val, r.search.Domain) == -1) {
+ return false
+ }
+ }
+
+ if len(r.search.QuestionType) != 0 {
+ val := readJSONValue(str, "QT")
+ if len(val) == 0 {
+ return false
+ }
+ if val != r.search.QuestionType {
+ return false
+ }
+ }
+
+ if len(r.search.Client) != 0 {
+ val := readJSONValue(str, "IP")
+ if len(val) == 0 {
+ log.Debug("QueryLog: failed to decode")
+ return false
+ }
+
+ if (r.search.StrictMatchClient && val != r.search.Client) ||
+ (!r.search.StrictMatchClient && strings.Index(val, r.search.Client) == -1) {
+ return false
+ }
+ }
+
+ return true
+}
+
+const (
+ jsonTErr = iota
+ jsonTObj
+ jsonTStr
+ jsonTNum
+ jsonTBool
+)
+
+// Parse JSON key-value pair
+// e.g.: "key":VALUE where VALUE is "string", true|false (boolean), or 123.456 (number)
+// Note the limitations:
+// . doesn't support whitespace
+// . doesn't support "null"
+// . doesn't validate boolean or number
+// . no proper handling of {} braces
+// . no handling of [] brackets
+// Return (key, value, type)
+func readJSON(ps *string) (string, string, int32) {
+ s := *ps
+ k := ""
+ v := ""
+ t := int32(jsonTErr)
+
+ q1 := strings.IndexByte(s, '"')
+ if q1 == -1 {
+ return k, v, t
+ }
+ q2 := strings.IndexByte(s[q1+1:], '"')
+ if q2 == -1 {
+ return k, v, t
+ }
+ k = s[q1+1 : q1+1+q2]
+ s = s[q1+1+q2+1:]
+
+ if len(s) < 2 || s[0] != ':' {
+ return k, v, t
+ }
+
+ if s[1] == '"' {
+ q2 = strings.IndexByte(s[2:], '"')
+ if q2 == -1 {
+ return k, v, t
+ }
+ v = s[2 : 2+q2]
+ t = jsonTStr
+ s = s[2+q2+1:]
+
+ } else if s[1] == '{' {
+ t = jsonTObj
+ s = s[1+1:]
+
+ } else {
+ sep := strings.IndexAny(s[1:], ",}")
+ if sep == -1 {
+ return k, v, t
+ }
+ v = s[1 : 1+sep]
+ if s[1] == 't' || s[1] == 'f' {
+ t = jsonTBool
+ } else if s[1] == '.' || (s[1] >= '0' && s[1] <= '9') {
+ t = jsonTNum
+ }
+ s = s[1+sep+1:]
+ }
+
+ *ps = s
+ return k, v, t
+}
+
+// nolint (gocyclo)
+func decode(ent *logEntry, str string) {
+ var b bool
+ var i int
+ var err error
+ for {
+ k, v, t := readJSON(&str)
+ if t == jsonTErr {
+ break
+ }
+ switch k {
+ case "IP":
+ ent.IP = v
+ case "T":
+ ent.Time, err = time.Parse(time.RFC3339, v)
+
+ case "QH":
+ ent.QHost = v
+ case "QT":
+ ent.QType = v
+ case "QC":
+ ent.QClass = v
+
+ case "Answer":
+ ent.Answer, err = base64.StdEncoding.DecodeString(v)
+
+ case "IsFiltered":
+ b, err = strconv.ParseBool(v)
+ ent.Result.IsFiltered = b
+ case "Rule":
+ ent.Result.Rule = v
+ case "FilterID":
+ i, err = strconv.Atoi(v)
+ ent.Result.FilterID = int64(i)
+ case "Reason":
+ i, err = strconv.Atoi(v)
+ ent.Result.Reason = dnsfilter.Reason(i)
+
+ case "Upstream":
+ ent.Upstream = v
+ case "Elapsed":
+ i, err = strconv.Atoi(v)
+ ent.Elapsed = time.Duration(i)
+
+ // pre-v0.99.3 compatibility:
+ case "Question":
+ var qstr []byte
+ qstr, err = base64.StdEncoding.DecodeString(v)
+ if err != nil {
+ break
+ }
+ q := new(dns.Msg)
+ err = q.Unpack(qstr)
+ if err != nil {
+ break
+ }
+ ent.QHost = q.Question[0].Name
+ if len(ent.QHost) == 0 {
+ break
+ }
+ ent.QHost = ent.QHost[:len(ent.QHost)-1]
+ ent.QType = dns.TypeToString[q.Question[0].Qtype]
+ ent.QClass = dns.ClassToString[q.Question[0].Qclass]
+ case "Time":
+ ent.Time, err = time.Parse(time.RFC3339, v)
+ }
+
+ if err != nil {
+ log.Debug("decode err: %s", err)
+ break
+ }
+ }
+}
+
// Next - return the next entry or nil if reading is finished
func (r *Reader) Next() *logEntry { // nolint
for {
@@ -379,24 +608,28 @@ func (r *Reader) Next() *logEntry { // nolint
r.filePrepared = true
}
- // open decoder
b, err := r.reader.ReadBytes('\n')
if err != nil {
return nil
}
- strReader := strings.NewReader(string(b))
- jd := json.NewDecoder(strReader)
+ str := string(b)
- // read data
- var entry logEntry
- err = jd.Decode(&entry)
- if err != nil {
- log.Debug("QueryLog: Failed to decode: %s", err)
+ val := readJSONValue(str, "T")
+ if len(val) == 0 {
+ val = readJSONValue(str, "Time")
+ }
+ if len(val) == 0 {
+ log.Debug("QueryLog: failed to decode")
continue
}
+ tm, err := time.Parse(time.RFC3339, val)
+ if err != nil {
+ log.Debug("QueryLog: failed to decode")
+ continue
+ }
+ t := tm.UnixNano()
- t := entry.Time.UnixNano()
- if r.searching {
+ if r.seeking {
r.reader = nil
rr := r.fseeker.seekBinary(uint64(t))
@@ -407,7 +640,7 @@ func (r *Reader) Next() *logEntry { // nolint
} else if rr == 0 {
// We found the target entry.
// We'll start reading the previous chunk of data.
- r.searching = false
+ r.seeking = false
off := r.fpos - (maxEntrySize * (r.limit + 1))
if int64(off) < maxEntrySize {
@@ -430,19 +663,37 @@ func (r *Reader) Next() *logEntry { // nolint
continue
}
+ if r.oldest.IsZero() {
+ r.oldest = tm
+ }
+
if t < r.validFrom {
continue
}
if t >= r.olderThan {
return nil
}
-
r.count++
- return &entry
+
+ if !r.applySearch(str) {
+ continue
+ }
+
+ st := time.Now()
+ var ent logEntry
+ decode(&ent, str)
+ r.timecnt += uint64(time.Now().Sub(st).Nanoseconds())
+
+ return &ent
}
}
-// Total returns the total number of items
-func (r *Reader) Total() int {
- return 0
+// Total returns the total number of processed items
+func (r *Reader) Total() uint64 {
+ return r.count
+}
+
+// Oldest returns the time of the oldest processed entry
+func (r *Reader) Oldest() time.Time {
+ return r.oldest
}
diff --git a/querylog/querylog_test.go b/querylog/querylog_test.go
index e300cba1..63bcbfd8 100644
--- a/querylog/querylog_test.go
+++ b/querylog/querylog_test.go
@@ -2,6 +2,7 @@ package querylog
import (
"net"
+ "os"
"testing"
"time"
@@ -10,16 +11,94 @@ import (
"github.com/stretchr/testify/assert"
)
+func prepareTestDir() string {
+ const dir = "./agh-test"
+ _ = os.RemoveAll(dir)
+ _ = os.MkdirAll(dir, 0755)
+ return dir
+}
+
+// Check adding and loading (with filtering) entries from disk and memory
func TestQueryLog(t *testing.T) {
conf := Config{
Enabled: true,
Interval: 1,
}
+ conf.BaseDir = prepareTestDir()
+ defer func() { _ = os.RemoveAll(conf.BaseDir) }()
l := newQueryLog(conf)
+ // add disk entries
+ addEntry(l, "example.org", "1.2.3.4", "0.1.2.3")
+ addEntry(l, "example.org", "1.2.3.4", "0.1.2.3")
+
+ // write to disk
+ l.flushLogBuffer(true)
+
+ // add memory entries
+ addEntry(l, "test.example.org", "2.2.3.4", "0.1.2.4")
+
+ // get all entries
+ params := getDataParams{
+ OlderThan: time.Time{},
+ }
+ d := l.getData(params)
+ mdata := d["data"].([]map[string]interface{})
+ assert.True(t, len(mdata) == 2)
+ assert.True(t, checkEntry(t, mdata[0], "test.example.org", "2.2.3.4", "0.1.2.4"))
+ assert.True(t, checkEntry(t, mdata[1], "example.org", "1.2.3.4", "0.1.2.3"))
+
+ // search by domain (strict)
+ params = getDataParams{
+ OlderThan: time.Time{},
+ Domain: "test.example.org",
+ StrictMatchDomain: true,
+ }
+ d = l.getData(params)
+ mdata = d["data"].([]map[string]interface{})
+ assert.True(t, len(mdata) == 1)
+ assert.True(t, checkEntry(t, mdata[0], "test.example.org", "2.2.3.4", "0.1.2.4"))
+
+ // search by domain
+ params = getDataParams{
+ OlderThan: time.Time{},
+ Domain: "example.org",
+ StrictMatchDomain: false,
+ }
+ d = l.getData(params)
+ mdata = d["data"].([]map[string]interface{})
+ assert.True(t, len(mdata) == 2)
+ assert.True(t, checkEntry(t, mdata[0], "test.example.org", "2.2.3.4", "0.1.2.4"))
+ assert.True(t, checkEntry(t, mdata[1], "example.org", "1.2.3.4", "0.1.2.3"))
+
+ // search by client IP (strict)
+ params = getDataParams{
+ OlderThan: time.Time{},
+ Client: "0.1.2.3",
+ StrictMatchClient: true,
+ }
+ d = l.getData(params)
+ mdata = d["data"].([]map[string]interface{})
+ assert.True(t, len(mdata) == 1)
+ assert.True(t, checkEntry(t, mdata[0], "example.org", "1.2.3.4", "0.1.2.3"))
+
+ // search by client IP
+ params = getDataParams{
+ OlderThan: time.Time{},
+ Client: "0.1.2",
+ StrictMatchClient: false,
+ }
+ d = l.getData(params)
+ mdata = d["data"].([]map[string]interface{})
+ assert.True(t, len(mdata) == 2)
+ assert.True(t, checkEntry(t, mdata[0], "test.example.org", "2.2.3.4", "0.1.2.4"))
+ assert.True(t, checkEntry(t, mdata[1], "example.org", "1.2.3.4", "0.1.2.3"))
+}
+
+func addEntry(l *queryLog, host, answerStr, client string) {
q := dns.Msg{}
q.Question = append(q.Question, dns.Question{
- Name: "example.org.",
+ Name: host + ".",
Qtype: dns.TypeA,
Qclass: dns.ClassINET,
})
@@ -32,17 +111,49 @@ func TestQueryLog(t *testing.T) {
Rrtype: dns.TypeA,
Class: dns.ClassINET,
}
- answer.A = net.IP{1, 2, 3, 4}
+ answer.A = net.ParseIP(answerStr)
a.Answer = append(a.Answer, answer)
-
res := dnsfilter.Result{}
- l.Add(&q, &a, &res, 0, nil, "upstream")
-
- params := getDataParams{
- OlderThan: time.Now(),
- }
- d := l.getData(params)
- m := d[0]
- mq := m["question"].(map[string]interface{})
- assert.True(t, mq["host"].(string) == "example.org")
+ l.Add(&q, &a, &res, 0, net.ParseIP(client), "upstream")
+}
+
+func checkEntry(t *testing.T, m map[string]interface{}, host, answer, client string) bool {
+ mq := m["question"].(map[string]interface{})
+ ma := m["answer"].([]map[string]interface{})
+ ma0 := ma[0]
+ if !assert.True(t, mq["host"].(string) == host) ||
+ !assert.True(t, mq["class"].(string) == "IN") ||
+ !assert.True(t, mq["type"].(string) == "A") ||
+ !assert.True(t, ma0["value"].(string) == answer) ||
+ !assert.True(t, m["client"].(string) == client) {
+ return false
+ }
+ return true
+}
+
+func TestJSON(t *testing.T) {
+ s := `
+ {"keystr":"val","obj":{"keybool":true,"keyint":123456}}
+ `
+ k, v, jtype := readJSON(&s)
+ assert.Equal(t, jtype, int32(jsonTStr))
+ assert.Equal(t, "keystr", k)
+ assert.Equal(t, "val", v)
+
+ k, v, jtype = readJSON(&s)
+ assert.Equal(t, jtype, int32(jsonTObj))
+ assert.Equal(t, "obj", k)
+
+ k, v, jtype = readJSON(&s)
+ assert.Equal(t, jtype, int32(jsonTBool))
+ assert.Equal(t, "keybool", k)
+ assert.Equal(t, "true", v)
+
+ k, v, jtype = readJSON(&s)
+ assert.Equal(t, jtype, int32(jsonTNum))
+ assert.Equal(t, "keyint", k)
+ assert.Equal(t, "123456", v)
+
+ k, v, jtype = readJSON(&s)
+ assert.True(t, jtype == jsonTErr)
}