Merge: + QueryLog: server-side paging and filtering

Close #776

* commit 'f04c028e380182558d772ac6d65efe4f7bdb48d8':
  + client: handle filter change
  + client: server side pagination for the query logs
  * querylog: POST /control/querylog
  * openapi
This commit is contained in:
Simon Zolin 2019-09-26 18:38:27 +03:00
commit 02d0c0a8f3
21 changed files with 959 additions and 227 deletions

View File

@ -43,6 +43,7 @@ Contents:
* API: Set statistics parameters * API: Set statistics parameters
* API: Get statistics parameters * API: Get statistics parameters
* Query logs * Query logs
* API: Get query log
* API: Set querylog parameters * API: Set querylog parameters
* API: Get querylog parameters * API: Get querylog parameters
* Filtering * Filtering
@ -1007,6 +1008,92 @@ Response:
## Query logs ## Query logs
When a new DNS request is received and processed, we store information about this event in "query log". It is a file on disk in JSON format:
{
"Question":"...","
Answer":"...",
"Result":{
"IsFiltered":true,
"Reason":3,
"Rule":"...",
"FilterID":1
},
"Time":"...",
"Elapsed":12345,
"IP":"127.0.0.1"
}
### Adding new data
First, new data is stored in a memory region. When this array is filled to a particular amount of entries (e.g. 5000), we flush this data to a file and clear the array.
### Getting data
When UI asks for data from query log (see "API: Get query log"), server reads the newest entries from memory array and the file. The maximum number of items returned per one request is limited by configuration.
### Removing old data
We store data for a limited amount of time - the log file is automatically rotated.
### API: Get query log
Request:
POST /control/querylog
{
older_than: "2006-01-02T15:04:05.999999999Z07:00" // must be "" for the first request
filter:{
domain: "..."
client: "..."
question_type: "A" | "AAAA"
response_status: "" | "filtered"
}
}
If `older_than` value is set, server returns the next chunk of entries that are older than this time stamp. This setting is used for paging. UI sets this value to `""` on the first request and gets the latest log entries. To get the older entries, UI sets this value to the timestamp of the last (the oldest) entry from the previous response from Server.
If "filter" settings are set, server returns only entries that match the specified request.
For `filter.domain` and `filter.client` the server matches substrings by default: `adguard.com` matches `www.adguard.com`. Strict matching can be enabled by enclosing the value in double quotes: `"adguard.com"` matches `adguard.com` but doesn't match `www.adguard.com`.
Response:
[
{
"answer":[
{
"ttl":10,
"type":"AAAA",
"value":"::"
}
...
],
"client":"127.0.0.1",
"elapsedMs":"0.098403",
"filterId":1,
"question":{
"class":"IN",
"host":"doubleclick.net",
"type":"AAAA"
},
"reason":"FilteredBlackList",
"rule":"||doubleclick.net^",
"status":"NOERROR",
"time":"2006-01-02T15:04:05.999999999Z07:00"
}
...
]
The most recent entries are at the top of list.
### API: Set querylog parameters ### API: Set querylog parameters
Request: Request:

24
client/package-lock.json generated vendored
View File

@ -9868,12 +9868,20 @@
"dev": true "dev": true
}, },
"prop-types": { "prop-types": {
"version": "15.6.2", "version": "15.7.2",
"resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.6.2.tgz", "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.7.2.tgz",
"integrity": "sha512-3pboPvLiWD7dkI3qf3KbUe6hKFKa52w+AE0VCqECtf+QHAKgOL37tTaNCnuX1nAAQ4ZhyP+kYVKf8rLmJ/feDQ==", "integrity": "sha512-8QQikdH7//R2vurIJSutZ1smHYTcLpRWEOlHnzcWHmBYrOGUysKwSsrC89BCiFj3CbrfJ/nXFdJepOVrY1GCHQ==",
"requires": { "requires": {
"loose-envify": "^1.3.1", "loose-envify": "^1.4.0",
"object-assign": "^4.1.1" "object-assign": "^4.1.1",
"react-is": "^16.8.1"
},
"dependencies": {
"react-is": {
"version": "16.9.0",
"resolved": "https://registry.npmjs.org/react-is/-/react-is-16.9.0.tgz",
"integrity": "sha512-tJBzzzIgnnRfEm046qRcURvwQnZVXmuCbscxUO5RWrGTXpon2d4c8mI0D8WE6ydVIm29JiLB6+RslkIvym9Rjw=="
}
} }
}, },
"proxy-addr": { "proxy-addr": {
@ -10212,9 +10220,9 @@
} }
}, },
"react-table": { "react-table": {
"version": "6.8.6", "version": "6.10.3",
"resolved": "https://registry.npmjs.org/react-table/-/react-table-6.8.6.tgz", "resolved": "https://registry.npmjs.org/react-table/-/react-table-6.10.3.tgz",
"integrity": "sha1-oK2LSDkxkFLVvvwBJgP7Fh5S7eM=", "integrity": "sha512-sVlq2/rxVaQJywGD95+qGiMr/SMHFIFnXdx619BLOWE/Os5FOGtV6pQJNAjZixbQZiOu7dmBO1kME28uxh6wmA==",
"requires": { "requires": {
"classnames": "^2.2.5" "classnames": "^2.2.5"
} }

4
client/package.json vendored
View File

@ -17,7 +17,7 @@
"i18next-browser-languagedetector": "^2.2.3", "i18next-browser-languagedetector": "^2.2.3",
"lodash": "^4.17.15", "lodash": "^4.17.15",
"nanoid": "^1.2.3", "nanoid": "^1.2.3",
"prop-types": "^15.6.1", "prop-types": "^15.7.2",
"react": "^16.4.0", "react": "^16.4.0",
"react-click-outside": "^3.0.1", "react-click-outside": "^3.0.1",
"react-dom": "^16.4.0", "react-dom": "^16.4.0",
@ -27,7 +27,7 @@
"react-redux-loading-bar": "^4.0.7", "react-redux-loading-bar": "^4.0.7",
"react-router-dom": "^4.2.2", "react-router-dom": "^4.2.2",
"react-router-hash-link": "^1.2.2", "react-router-hash-link": "^1.2.2",
"react-table": "^6.8.6", "react-table": "^6.10.3",
"react-transition-group": "^2.4.0", "react-transition-group": "^2.4.0",
"redux": "^4.0.0", "redux": "^4.0.0",
"redux-actions": "^2.4.0", "redux-actions": "^2.4.0",

View File

@ -163,7 +163,6 @@
"show_filtered_type": "Show filtered", "show_filtered_type": "Show filtered",
"no_logs_found": "No logs found", "no_logs_found": "No logs found",
"refresh_btn": "Refresh", "refresh_btn": "Refresh",
"last_dns_queries": "Last 5000 DNS queries",
"previous_btn": "Previous", "previous_btn": "Previous",
"next_btn": "Next", "next_btn": "Next",
"loading_table_status": "Loading...", "loading_table_status": "Loading...",
@ -182,6 +181,7 @@
"query_log_enable": "Enable log", "query_log_enable": "Enable log",
"query_log_configuration": "Logs configuration", "query_log_configuration": "Logs configuration",
"query_log_disabled": "The query log is disabled and can be configured in the <0>settings</0>", "query_log_disabled": "The query log is disabled and can be configured in the <0>settings</0>",
"query_log_strict_search": "Use double quotes for strict search",
"source_label": "Source", "source_label": "Source",
"found_in_known_domain_db": "Found in the known domains database.", "found_in_known_domain_db": "Found in the known domains database.",
"category_label": "Category", "category_label": "Category",

View File

@ -4,15 +4,19 @@ import apiClient from '../api/Api';
import { addErrorToast, addSuccessToast } from './index'; import { addErrorToast, addSuccessToast } from './index';
import { normalizeLogs } from '../helpers/helpers'; import { normalizeLogs } from '../helpers/helpers';
export const setLogsPagination = createAction('LOGS_PAGINATION');
export const setLogsFilter = createAction('LOGS_FILTER');
export const getLogsRequest = createAction('GET_LOGS_REQUEST'); export const getLogsRequest = createAction('GET_LOGS_REQUEST');
export const getLogsFailure = createAction('GET_LOGS_FAILURE'); export const getLogsFailure = createAction('GET_LOGS_FAILURE');
export const getLogsSuccess = createAction('GET_LOGS_SUCCESS'); export const getLogsSuccess = createAction('GET_LOGS_SUCCESS');
export const getLogs = () => async (dispatch) => { export const getLogs = config => async (dispatch) => {
dispatch(getLogsRequest()); dispatch(getLogsRequest());
try { try {
const logs = normalizeLogs(await apiClient.getQueryLog()); const { filter, lastRowTime: older_than } = config;
dispatch(getLogsSuccess(logs)); const logs = normalizeLogs(await apiClient.getQueryLog({ filter, older_than }));
dispatch(getLogsSuccess({ logs, ...config }));
} catch (error) { } catch (error) {
dispatch(addErrorToast({ error })); dispatch(addErrorToast({ error }));
dispatch(getLogsFailure(error)); dispatch(getLogsFailure(error));

View File

@ -482,14 +482,18 @@ class Api {
} }
// Query log // Query log
GET_QUERY_LOG = { path: 'querylog', method: 'GET' }; GET_QUERY_LOG = { path: 'querylog', method: 'POST' };
QUERY_LOG_CONFIG = { path: 'querylog_config', method: 'POST' }; QUERY_LOG_CONFIG = { path: 'querylog_config', method: 'POST' };
QUERY_LOG_INFO = { path: 'querylog_info', method: 'GET' }; QUERY_LOG_INFO = { path: 'querylog_info', method: 'GET' };
QUERY_LOG_CLEAR = { path: 'querylog_clear', method: 'POST' }; QUERY_LOG_CLEAR = { path: 'querylog_clear', method: 'POST' };
getQueryLog() { getQueryLog(data) {
const { path, method } = this.GET_QUERY_LOG; const { path, method } = this.GET_QUERY_LOG;
return this.makeRequest(path, method); const config = {
data,
headers: { 'Content-Type': 'application/json' },
};
return this.makeRequest(path, method, config);
} }
getQueryLogInfo() { getQueryLogInfo() {

View File

@ -107,6 +107,11 @@
border: 1px solid rgba(0, 40, 100, 0.12); border: 1px solid rgba(0, 40, 100, 0.12);
} }
.logs__table .rt-thead.-filters select {
background: #fff url("data:image/svg+xml;base64,PHN2ZyB4bWxucz0naHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmcnIHZpZXdCb3g9JzAgMCAxMCA1Jz48cGF0aCBmaWxsPScjOTk5JyBkPSdNMCAwTDEwIDBMNSA1TDAgMCcvPjwvc3ZnPg==") no-repeat right 0.75rem center;
background-size: 8px 10px;
}
.logs__table .rt-thead.-filters input:focus, .logs__table .rt-thead.-filters input:focus,
.logs__table .rt-thead.-filters select:focus { .logs__table .rt-thead.-filters select:focus {
border-color: #1991eb; border-color: #1991eb;
@ -130,6 +135,21 @@
overflow: hidden; overflow: hidden;
} }
.logs__input-wrap {
position: relative;
}
.logs__notice {
position: absolute;
z-index: 1;
top: 8px;
right: 10px;
margin-top: 3px;
font-size: 12px;
text-align: left;
color: #a5a5a5;
}
.logs__whois { .logs__whois {
display: inline; display: inline;
} }

View File

@ -5,9 +5,14 @@ import escapeRegExp from 'lodash/escapeRegExp';
import endsWith from 'lodash/endsWith'; import endsWith from 'lodash/endsWith';
import { Trans, withNamespaces } from 'react-i18next'; import { Trans, withNamespaces } from 'react-i18next';
import { HashLink as Link } from 'react-router-hash-link'; import { HashLink as Link } from 'react-router-hash-link';
import debounce from 'lodash/debounce';
import { formatTime, formatDateTime } from '../../helpers/helpers'; import {
import { SERVICES, FILTERED_STATUS } from '../../helpers/constants'; formatTime,
formatDateTime,
isValidQuestionType,
} from '../../helpers/helpers';
import { SERVICES, FILTERED_STATUS, DEBOUNCE_TIMEOUT, DEFAULT_LOGS_FILTER } from '../../helpers/constants';
import { getTrackerData } from '../../helpers/trackers/trackers'; import { getTrackerData } from '../../helpers/trackers/trackers';
import { formatClientCell } from '../../helpers/formatClientCell'; import { formatClientCell } from '../../helpers/formatClientCell';
@ -16,8 +21,12 @@ import Card from '../ui/Card';
import Loading from '../ui/Loading'; import Loading from '../ui/Loading';
import PopoverFiltered from '../ui/PopoverFilter'; import PopoverFiltered from '../ui/PopoverFilter';
import Popover from '../ui/Popover'; import Popover from '../ui/Popover';
import Tooltip from '../ui/Tooltip';
import './Logs.css'; import './Logs.css';
const TABLE_FIRST_PAGE = 0;
const TABLE_DEFAULT_PAGE_SIZE = 50;
const INITIAL_REQUEST_DATA = ['', DEFAULT_LOGS_FILTER, TABLE_FIRST_PAGE, TABLE_DEFAULT_PAGE_SIZE];
const FILTERED_REASON = 'Filtered'; const FILTERED_REASON = 'Filtered';
const RESPONSE_FILTER = { const RESPONSE_FILTER = {
ALL: 'all', ALL: 'all',
@ -26,26 +35,34 @@ const RESPONSE_FILTER = {
class Logs extends Component { class Logs extends Component {
componentDidMount() { componentDidMount() {
this.getLogs(); this.getLogs(...INITIAL_REQUEST_DATA);
this.props.getFilteringStatus(); this.props.getFilteringStatus();
this.props.getClients(); this.props.getClients();
this.props.getLogsConfig(); this.props.getLogsConfig();
} }
componentDidUpdate(prevProps) { getLogs = (lastRowTime, filter, page, pageSize, filtered) => {
// get logs when queryLog becomes enabled
if (this.props.queryLogs.enabled && !prevProps.queryLogs.enabled) {
this.props.getLogs();
}
}
getLogs = () => {
// get logs on initialization if queryLogIsEnabled
if (this.props.queryLogs.enabled) { if (this.props.queryLogs.enabled) {
this.props.getLogs(); this.props.getLogs({
lastRowTime, filter, page, pageSize, filtered,
});
} }
}; };
refreshLogs = () => {
window.location.reload();
};
handleLogsFiltering = debounce((lastRowTime, filter, page, pageSize, filtered) => {
this.props.getLogs({
lastRowTime,
filter,
page,
pageSize,
filtered,
});
}, DEBOUNCE_TIMEOUT);
renderTooltip = (isFiltered, rule, filter, service) => renderTooltip = (isFiltered, rule, filter, service) =>
isFiltered && <PopoverFiltered rule={rule} filter={filter} service={service} />; isFiltered && <PopoverFiltered rule={rule} filter={filter} service={service} />;
@ -215,8 +232,75 @@ class Logs extends Component {
); );
}; };
renderLogs(logs) { getFilterInput = ({ filter, onChange }) => (
const { t } = this.props; <Fragment>
<div className="logs__input-wrap">
<input
type="text"
className="form-control"
onChange={event => onChange(event.target.value)}
value={filter ? filter.value : ''}
/>
<span className="logs__notice">
<Tooltip text={this.props.t('query_log_strict_search')} type='tooltip-custom--logs' />
</span>
</div>
</Fragment>
);
getFilters = (filtered) => {
const filteredObj = filtered.reduce((acc, cur) => ({ ...acc, [cur.id]: cur.value }), {});
const {
domain, client, type, response,
} = filteredObj;
return {
domain: domain || '',
client: client || '',
question_type: isValidQuestionType(type) ? type.toUpperCase() : '',
response_status: response === RESPONSE_FILTER.FILTERED ? response : '',
};
};
fetchData = (state) => {
const { pageSize, page, pages } = state;
const { allLogs, filter } = this.props.queryLogs;
const isLastPage = page + 1 === pages;
if (isLastPage) {
const lastRow = allLogs[allLogs.length - 1];
const lastRowTime = (lastRow && lastRow.time) || '';
this.getLogs(lastRowTime, filter, page, pageSize, true);
} else {
this.props.setLogsPagination({ page, pageSize });
}
};
handleFilterChange = (filtered) => {
const filters = this.getFilters(filtered);
this.props.setLogsFilter(filters);
this.handleLogsFiltering('', filters, TABLE_FIRST_PAGE, TABLE_DEFAULT_PAGE_SIZE, true);
}
showTotalPagesCount = (pages) => {
const { total, isEntireLog } = this.props.queryLogs;
const showEllipsis = !isEntireLog && total >= 500;
return (
<span className="-totalPages">
{pages || 1}{showEllipsis && '…' }
</span>
);
}
renderLogs() {
const { queryLogs, dashboard, t } = this.props;
const { processingClients } = dashboard;
const {
processingGetLogs, processingGetConfig, logs, pages,
} = queryLogs;
const isLoading = processingGetLogs || processingClients || processingGetConfig;
const columns = [ const columns = [
{ {
Header: t('time_table_header'), Header: t('time_table_header'),
@ -230,6 +314,7 @@ class Logs extends Component {
accessor: 'domain', accessor: 'domain',
minWidth: 180, minWidth: 180,
Cell: this.getDomainCell, Cell: this.getDomainCell,
Filter: this.getFilterInput,
}, },
{ {
Header: t('type_table_header'), Header: t('type_table_header'),
@ -251,7 +336,7 @@ class Logs extends Component {
}, },
Filter: ({ filter, onChange }) => ( Filter: ({ filter, onChange }) => (
<select <select
className="form-control" className="form-control custom-select"
onChange={event => onChange(event.target.value)} onChange={event => onChange(event.target.value)}
value={filter ? filter.value : RESPONSE_FILTER.ALL} value={filter ? filter.value : RESPONSE_FILTER.ALL}
> >
@ -270,82 +355,84 @@ class Logs extends Component {
maxWidth: 240, maxWidth: 240,
minWidth: 240, minWidth: 240,
Cell: this.getClientCell, Cell: this.getClientCell,
Filter: this.getFilterInput,
}, },
]; ];
if (logs) { return (
return ( <ReactTable
<ReactTable manual
className="logs__table" filterable
filterable minRows={5}
data={logs} pages={pages}
columns={columns} columns={columns}
showPagination={true} sortable={false}
defaultPageSize={50} data={logs || []}
minRows={7} loading={isLoading}
previousText={t('previous_btn')} showPageJump={false}
nextText={t('next_btn')} onFetchData={this.fetchData}
loadingText={t('loading_table_status')} onFilteredChange={this.handleFilterChange}
pageText={t('page_table_footer_text')} className="logs__table"
ofText={t('of_table_footer_text')} showPagination={true}
rowsText={t('rows_table_footer_text')} defaultPageSize={TABLE_DEFAULT_PAGE_SIZE}
noDataText={t('no_logs_found')} previousText={t('previous_btn')}
defaultFilterMethod={(filter, row) => { nextText={t('next_btn')}
const id = filter.pivotId || filter.id; loadingText={t('loading_table_status')}
return row[id] !== undefined pageText={t('page_table_footer_text')}
? String(row[id]).indexOf(filter.value) !== -1 ofText={t('of_table_footer_text')}
: true; rowsText={t('rows_table_footer_text')}
}} noDataText={t('no_logs_found')}
defaultSorted={[ renderTotalPagesCount={this.showTotalPagesCount}
{ defaultFilterMethod={(filter, row) => {
id: 'time', const id = filter.pivotId || filter.id;
desc: true, return row[id] !== undefined
}, ? String(row[id]).indexOf(filter.value) !== -1
]} : true;
getTrProps={(_state, rowInfo) => { }}
if (!rowInfo) { defaultSorted={[
return {}; {
} id: 'time',
desc: true,
},
]}
getTrProps={(_state, rowInfo) => {
if (!rowInfo) {
return {};
}
const { reason } = rowInfo.original; const { reason } = rowInfo.original;
if (this.checkFiltered(reason)) {
return {
className: 'red',
};
} else if (this.checkWhiteList(reason)) {
return {
className: 'green',
};
} else if (this.checkRewrite(reason)) {
return {
className: 'blue',
};
}
if (this.checkFiltered(reason)) {
return { return {
className: '', className: 'red',
}; };
}} } else if (this.checkWhiteList(reason)) {
/> return {
); className: 'green',
} };
} else if (this.checkRewrite(reason)) {
return {
className: 'blue',
};
}
return null; return {
className: '',
};
}}
/>
);
} }
render() { render() {
const { queryLogs, dashboard, t } = this.props; const { queryLogs, t } = this.props;
const { enabled, processingGetLogs, processingGetConfig } = queryLogs; const { enabled, processingGetConfig } = queryLogs;
const { processingClients } = dashboard;
const isDataReady =
!processingGetLogs && !processingGetConfig && !dashboard.processingClients;
const refreshButton = enabled ? ( const refreshButton = enabled ? (
<button <button
type="button"
className="btn btn-icon btn-outline-primary btn-sm ml-3" className="btn btn-icon btn-outline-primary btn-sm ml-3"
type="submit" onClick={this.refreshLogs}
onClick={this.getLogs}
> >
<svg className="icons"> <svg className="icons">
<use xlinkHref="#refresh" /> <use xlinkHref="#refresh" />
@ -357,22 +444,24 @@ class Logs extends Component {
return ( return (
<Fragment> <Fragment>
<PageTitle title={t('query_log')} subtitle={t('last_dns_queries')}> <PageTitle title={t('query_log')}>{refreshButton}</PageTitle>
{refreshButton} {enabled && processingGetConfig && <Loading />}
</PageTitle> {enabled && !processingGetConfig && <Card>{this.renderLogs()}</Card>}
<Card> {!enabled && !processingGetConfig && (
{enabled && (processingGetLogs || processingClients || processingGetConfig) && ( <Card>
<Loading />
)}
{enabled && isDataReady && this.renderLogs(queryLogs.logs)}
{!enabled && !processingGetConfig && (
<div className="lead text-center py-6"> <div className="lead text-center py-6">
<Trans components={[<Link to="/settings#logs-config" key="0">link</Link>]}> <Trans
components={[
<Link to="/settings#logs-config" key="0">
link
</Link>,
]}
>
query_log_disabled query_log_disabled
</Trans> </Trans>
</div> </div>
)} </Card>
</Card> )}
</Fragment> </Fragment>
); );
} }
@ -388,6 +477,8 @@ Logs.propTypes = {
addSuccessToast: PropTypes.func.isRequired, addSuccessToast: PropTypes.func.isRequired,
getClients: PropTypes.func.isRequired, getClients: PropTypes.func.isRequired,
getLogsConfig: PropTypes.func.isRequired, getLogsConfig: PropTypes.func.isRequired,
setLogsPagination: PropTypes.func.isRequired,
setLogsFilter: PropTypes.func.isRequired,
t: PropTypes.func.isRequired, t: PropTypes.func.isRequired,
}; };

View File

@ -38,6 +38,7 @@
position: absolute; position: absolute;
bottom: calc(100% + 3px); bottom: calc(100% + 3px);
left: 50%; left: 50%;
z-index: 1;
min-width: 275px; min-width: 275px;
padding: 10px 15px; padding: 10px 15px;
font-size: 0.8rem; font-size: 0.8rem;

View File

@ -52,3 +52,23 @@
.tooltip-custom--narrow:before { .tooltip-custom--narrow:before {
width: 220px; width: 220px;
} }
.tooltip-custom--logs {
border-radius: 50%;
background-image: url("./svg/help-circle-gray.svg");
background-color: #fff;
}
.tooltip-custom--logs:before {
bottom: initial;
top: calc(100% + 10px);
right: -10px;
left: initial;
transform: none;
}
.tooltip-custom--logs:after {
top: 8px;
border-top: none;
border-bottom: 6px solid #585965;
}

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="#9aa0ac" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-help-circle"><circle cx="12" cy="12" r="10"></circle><path d="M9.09 9a3 3 0 0 1 5.83 1c0 2-3 3-3 3"></path><line x1="12" y1="17" x2="12" y2="17"></line></svg>

After

Width:  |  Height:  |  Size: 357 B

View File

@ -1,7 +1,7 @@
import { connect } from 'react-redux'; import { connect } from 'react-redux';
import { addSuccessToast, getClients } from '../actions'; import { addSuccessToast, getClients } from '../actions';
import { getFilteringStatus, setRules } from '../actions/filtering'; import { getFilteringStatus, setRules } from '../actions/filtering';
import { getLogs, getLogsConfig } from '../actions/queryLogs'; import { getLogs, getLogsConfig, setLogsPagination, setLogsFilter } from '../actions/queryLogs';
import Logs from '../components/Logs'; import Logs from '../components/Logs';
const mapStateToProps = (state) => { const mapStateToProps = (state) => {
@ -17,6 +17,8 @@ const mapDispatchToProps = {
addSuccessToast, addSuccessToast,
getClients, getClients,
getLogsConfig, getLogsConfig,
setLogsPagination,
setLogsFilter,
}; };
export default connect( export default connect(

View File

@ -274,3 +274,54 @@ export const WHOIS_ICONS = {
netname: 'network', netname: 'network',
descr: '', descr: '',
}; };
export const DNS_RECORD_TYPES = [
'A',
'AAAA',
'AFSDB',
'APL',
'CAA',
'CDNSKEY',
'CDS',
'CERT',
'CNAME',
'CSYNC',
'DHCID',
'DLV',
'DNAME',
'DNSKEY',
'DS',
'HIP',
'IPSECKEY',
'KEY',
'KX',
'LOC',
'MX',
'NAPTR',
'NS',
'NSEC',
'NSEC3',
'NSEC3PARAM',
'OPENPGPKEY',
'PTR',
'RRSIG',
'RP',
'SIG',
'SMIMEA',
'SOA',
'SRV',
'SSHFP',
'TA',
'TKEY',
'TLSA',
'TSIG',
'TXT',
'URI',
];
export const DEFAULT_LOGS_FILTER = {
domain: '',
client: '',
question_type: '',
response_status: '',
};

View File

@ -14,6 +14,7 @@ import {
STANDARD_WEB_PORT, STANDARD_WEB_PORT,
STANDARD_HTTPS_PORT, STANDARD_HTTPS_PORT,
CHECK_TIMEOUT, CHECK_TIMEOUT,
DNS_RECORD_TYPES,
} from './constants'; } from './constants';
export const formatTime = (time) => { export const formatTime = (time) => {
@ -318,3 +319,5 @@ export const normalizeWhois = (whois) => {
return whois; return whois;
}; };
export const isValidQuestionType = type => type && DNS_RECORD_TYPES.includes(type.toUpperCase());

View File

@ -1,14 +1,61 @@
import { handleActions } from 'redux-actions'; import { handleActions } from 'redux-actions';
import * as actions from '../actions/queryLogs'; import * as actions from '../actions/queryLogs';
import { DEFAULT_LOGS_FILTER } from '../helpers/constants';
const queryLogs = handleActions( const queryLogs = handleActions(
{ {
[actions.setLogsPagination]: (state, { payload }) => {
const { page, pageSize } = payload;
const { allLogs } = state;
const rowsStart = pageSize * page;
const rowsEnd = (pageSize * page) + pageSize;
const logsSlice = allLogs.slice(rowsStart, rowsEnd);
const pages = Math.ceil(allLogs.length / pageSize);
return {
...state,
pages,
logs: logsSlice,
};
},
[actions.setLogsFilter]: (state, { payload }) => (
{ ...state, filter: payload }
),
[actions.getLogsRequest]: state => ({ ...state, processingGetLogs: true }), [actions.getLogsRequest]: state => ({ ...state, processingGetLogs: true }),
[actions.getLogsFailure]: state => ({ ...state, processingGetLogs: false }), [actions.getLogsFailure]: state => ({ ...state, processingGetLogs: false }),
[actions.getLogsSuccess]: (state, { payload }) => { [actions.getLogsSuccess]: (state, { payload }) => {
const newState = { ...state, logs: payload, processingGetLogs: false }; const {
return newState; logs, lastRowTime, page, pageSize, filtered,
} = payload;
let logsWithOffset = state.allLogs.length > 0 ? state.allLogs : logs;
let allLogs = logs;
if (lastRowTime) {
logsWithOffset = [...state.allLogs, ...logs];
allLogs = [...state.allLogs, ...logs];
} else if (filtered) {
logsWithOffset = logs;
allLogs = logs;
}
const pages = Math.ceil(logsWithOffset.length / pageSize);
const total = logsWithOffset.length;
const rowsStart = pageSize * page;
const rowsEnd = (pageSize * page) + pageSize;
const logsSlice = logsWithOffset.slice(rowsStart, rowsEnd);
return {
...state,
pages,
total,
allLogs,
logs: logsSlice,
isEntireLog: logs.length < 1,
processingGetLogs: false,
};
}, },
[actions.clearLogsRequest]: state => ({ ...state, processingClear: true }), [actions.clearLogsRequest]: state => ({ ...state, processingClear: true }),
@ -42,7 +89,12 @@ const queryLogs = handleActions(
processingSetConfig: false, processingSetConfig: false,
logs: [], logs: [],
interval: 1, interval: 1,
allLogs: [],
pages: 1,
total: 0,
enabled: true, enabled: true,
older_than: '',
filter: DEFAULT_LOGS_FILTER,
}, },
); );

View File

@ -3,12 +3,81 @@ package home
import ( import (
"encoding/json" "encoding/json"
"net/http" "net/http"
"time"
"github.com/AdguardTeam/AdGuardHome/querylog" "github.com/AdguardTeam/AdGuardHome/querylog"
"github.com/miekg/dns"
) )
type qlogFilterJSON struct {
Domain string `json:"domain"`
Client string `json:"client"`
QuestionType string `json:"question_type"`
ResponseStatus string `json:"response_status"`
}
type queryLogRequest struct {
OlderThan string `json:"older_than"`
Filter qlogFilterJSON `json:"filter"`
}
// "value" -> value, return TRUE
func getDoubleQuotesEnclosedValue(s *string) bool {
t := *s
if len(t) >= 2 && t[0] == '"' && t[len(t)-1] == '"' {
*s = t[1 : len(t)-1]
return true
}
return false
}
func handleQueryLog(w http.ResponseWriter, r *http.Request) { func handleQueryLog(w http.ResponseWriter, r *http.Request) {
data := config.queryLog.GetData() req := queryLogRequest{}
err := json.NewDecoder(r.Body).Decode(&req)
if err != nil {
httpError(w, http.StatusBadRequest, "json decode: %s", err)
return
}
params := querylog.GetDataParams{
Domain: req.Filter.Domain,
Client: req.Filter.Client,
}
if len(req.OlderThan) != 0 {
params.OlderThan, err = time.Parse(time.RFC3339Nano, req.OlderThan)
if err != nil {
httpError(w, http.StatusBadRequest, "invalid time stamp: %s", err)
return
}
}
if getDoubleQuotesEnclosedValue(&params.Domain) {
params.StrictMatchDomain = true
}
if getDoubleQuotesEnclosedValue(&params.Client) {
params.StrictMatchClient = true
}
if len(req.Filter.QuestionType) != 0 {
qtype, ok := dns.StringToType[req.Filter.QuestionType]
if !ok {
httpError(w, http.StatusBadRequest, "invalid question_type")
return
}
params.QuestionType = qtype
}
if len(req.Filter.ResponseStatus) != 0 {
switch req.Filter.ResponseStatus {
case "filtered":
params.ResponseStatus = querylog.ResponseStatusFiltered
default:
httpError(w, http.StatusBadRequest, "invalid response_status")
return
}
}
data := config.queryLog.GetData(params)
jsonVal, err := json.Marshal(data) jsonVal, err := json.Marshal(data)
if err != nil { if err != nil {
@ -84,7 +153,7 @@ func checkQueryLogInterval(i uint32) bool {
// RegisterQueryLogHandlers - register handlers // RegisterQueryLogHandlers - register handlers
func RegisterQueryLogHandlers() { func RegisterQueryLogHandlers() {
httpRegister(http.MethodGet, "/control/querylog", handleQueryLog) httpRegister("POST", "/control/querylog", handleQueryLog)
httpRegister(http.MethodGet, "/control/querylog_info", handleQueryLogInfo) httpRegister(http.MethodGet, "/control/querylog_info", handleQueryLogInfo)
httpRegister(http.MethodPost, "/control/querylog_clear", handleQueryLogClear) httpRegister(http.MethodPost, "/control/querylog_clear", handleQueryLogClear)
httpRegister(http.MethodPost, "/control/querylog_config", handleQueryLogConfig) httpRegister(http.MethodPost, "/control/querylog_config", handleQueryLogConfig)

View File

@ -176,16 +176,16 @@ paths:
# -------------------------------------------------- # --------------------------------------------------
/querylog: /querylog:
get: post:
tags: tags:
- log - log
operationId: queryLog operationId: queryLog
summary: 'Get DNS server query log' summary: 'Get DNS server query log'
parameters: parameters:
- in: query - in: "body"
name: download name: "body"
type: boolean schema:
description: 'If any value is set, make the browser download the query instead of displaying it by setting Content-Disposition header' $ref: '#/definitions/QueryLogRequest'
responses: responses:
200: 200:
description: OK description: OK
@ -1393,12 +1393,21 @@ definitions:
type: "string" type: "string"
description: "DNS request processing start time" description: "DNS request processing start time"
example: "2018-11-26T00:02:41+03:00" example: "2018-11-26T00:02:41+03:00"
QueryLog: QueryLog:
type: "array" type: "array"
description: "Query log" description: "Query log"
items: items:
$ref: "#/definitions/QueryLogItem" $ref: "#/definitions/QueryLogItem"
QueryLogRequest:
type: "object"
description: "Query log request data"
properties:
offset:
type: "integer"
example: 1234
QueryLogConfig: QueryLogConfig:
type: "object" type: "object"
description: "Query log configuration" description: "Query log configuration"

View File

@ -18,7 +18,10 @@ import (
const ( const (
logBufferCap = 5000 // maximum capacity of logBuffer before it's flushed to disk logBufferCap = 5000 // maximum capacity of logBuffer before it's flushed to disk
queryLogFileName = "querylog.json" // .gz added during compression queryLogFileName = "querylog.json" // .gz added during compression
queryLogSize = 5000 // maximum API response for /querylog getDataLimit = 500 // GetData(): maximum log entries to return
// maximum data chunks to parse when filtering entries
maxFilteringChunks = 10
) )
// queryLog is a structure that writes and reads the DNS query log // queryLog is a structure that writes and reads the DNS query log
@ -30,9 +33,6 @@ type queryLog struct {
logBuffer []*logEntry logBuffer []*logEntry
fileFlushLock sync.Mutex // synchronize a file-flushing goroutine and main thread fileFlushLock sync.Mutex // synchronize a file-flushing goroutine and main thread
flushPending bool // don't start another goroutine while the previous one is still running flushPending bool // don't start another goroutine while the previous one is still running
cache []*logEntry
lock sync.RWMutex
} }
// create a new instance of the query log // create a new instance of the query log
@ -41,7 +41,6 @@ func newQueryLog(conf Config) *queryLog {
l.logFile = filepath.Join(conf.BaseDir, queryLogFileName) l.logFile = filepath.Join(conf.BaseDir, queryLogFileName)
l.conf = conf l.conf = conf
go l.periodicQueryLogRotate() go l.periodicQueryLogRotate()
go l.fillFromFile()
return &l return &l
} }
@ -62,10 +61,6 @@ func (l *queryLog) Clear() {
l.flushPending = false l.flushPending = false
l.logBufferLock.Unlock() l.logBufferLock.Unlock()
l.lock.Lock()
l.cache = nil
l.lock.Unlock()
err := os.Remove(l.logFile + ".1") err := os.Remove(l.logFile + ".1")
if err != nil { if err != nil {
log.Error("file remove: %s: %s", l.logFile+".1", err) log.Error("file remove: %s: %s", l.logFile+".1", err)
@ -147,13 +142,6 @@ func (l *queryLog) Add(question *dns.Msg, answer *dns.Msg, result *dnsfilter.Res
} }
} }
l.logBufferLock.Unlock() l.logBufferLock.Unlock()
l.lock.Lock()
l.cache = append(l.cache, &entry)
if len(l.cache) > queryLogSize {
toremove := len(l.cache) - queryLogSize
l.cache = l.cache[toremove:]
}
l.lock.Unlock()
// if buffer needs to be flushed to disk, do it now // if buffer needs to be flushed to disk, do it now
if needFlush { if needFlush {
@ -163,20 +151,143 @@ func (l *queryLog) Add(question *dns.Msg, answer *dns.Msg, result *dnsfilter.Res
} }
} }
func (l *queryLog) GetData() []map[string]interface{} { // Return TRUE if this entry is needed
l.lock.RLock() func isNeeded(entry *logEntry, params GetDataParams) bool {
values := make([]*logEntry, len(l.cache)) if params.ResponseStatus != 0 {
copy(values, l.cache) if params.ResponseStatus == ResponseStatusFiltered && !entry.Result.IsFiltered {
l.lock.RUnlock() return false
}
// reverse it so that newest is first
for left, right := 0, len(values)-1; left < right; left, right = left+1, right-1 {
values[left], values[right] = values[right], values[left]
} }
// iterate if len(params.Domain) != 0 || params.QuestionType != 0 {
m := dns.Msg{}
_ = m.Unpack(entry.Question)
if params.QuestionType != 0 {
if m.Question[0].Qtype != params.QuestionType {
return false
}
}
if len(params.Domain) != 0 && params.StrictMatchDomain {
if m.Question[0].Name != params.Domain {
return false
}
} else if len(params.Domain) != 0 {
if strings.Index(m.Question[0].Name, params.Domain) == -1 {
return false
}
}
}
if len(params.Client) != 0 && params.StrictMatchClient {
if entry.IP != params.Client {
return false
}
} else if len(params.Client) != 0 {
if strings.Index(entry.IP, params.Client) == -1 {
return false
}
}
return true
}
func (l *queryLog) readFromFile(params GetDataParams) ([]*logEntry, int) {
entries := []*logEntry{}
olderThan := params.OlderThan
totalChunks := 0
total := 0
r := l.OpenReader()
if r == nil {
return entries, 0
}
r.BeginRead(olderThan, getDataLimit)
for totalChunks < maxFilteringChunks {
first := true
newEntries := []*logEntry{}
for {
entry := r.Next()
if entry == nil {
break
}
total++
if first {
first = false
olderThan = entry.Time
}
if !isNeeded(entry, params) {
continue
}
if len(newEntries) == getDataLimit {
newEntries = newEntries[1:]
}
newEntries = append(newEntries, entry)
}
log.Debug("entries: +%d (%d) older-than:%s", len(newEntries), len(entries), olderThan)
entries = append(newEntries, entries...)
if len(entries) > getDataLimit {
toremove := len(entries) - getDataLimit
entries = entries[toremove:]
break
}
if first || len(entries) == getDataLimit {
break
}
totalChunks++
r.BeginReadPrev(olderThan, getDataLimit)
}
r.Close()
return entries, total
}
func (l *queryLog) GetData(params GetDataParams) []map[string]interface{} {
var data = []map[string]interface{}{} var data = []map[string]interface{}{}
for _, entry := range values {
if len(params.Domain) != 0 && params.StrictMatchDomain {
params.Domain = params.Domain + "."
}
now := time.Now()
entries := []*logEntry{}
total := 0
// add from file
entries, total = l.readFromFile(params)
if params.OlderThan.IsZero() {
params.OlderThan = now
}
// add from memory buffer
l.logBufferLock.Lock()
total += len(l.logBuffer)
for _, entry := range l.logBuffer {
if !isNeeded(entry, params) {
continue
}
if entry.Time.UnixNano() >= params.OlderThan.UnixNano() {
break
}
if len(entries) == getDataLimit {
entries = entries[1:]
}
entries = append(entries, entry)
}
l.logBufferLock.Unlock()
// process the elements from latest to oldest
for i := len(entries) - 1; i >= 0; i-- {
entry := entries[i]
var q *dns.Msg var q *dns.Msg
var a *dns.Msg var a *dns.Msg
@ -200,7 +311,7 @@ func (l *queryLog) GetData() []map[string]interface{} {
jsonEntry := map[string]interface{}{ jsonEntry := map[string]interface{}{
"reason": entry.Result.Reason.String(), "reason": entry.Result.Reason.String(),
"elapsedMs": strconv.FormatFloat(entry.Elapsed.Seconds()*1000, 'f', -1, 64), "elapsedMs": strconv.FormatFloat(entry.Elapsed.Seconds()*1000, 'f', -1, 64),
"time": entry.Time.Format(time.RFC3339), "time": entry.Time.Format(time.RFC3339Nano),
"client": entry.IP, "client": entry.IP,
} }
if q != nil { if q != nil {
@ -231,6 +342,8 @@ func (l *queryLog) GetData() []map[string]interface{} {
data = append(data, jsonEntry) data = append(data, jsonEntry)
} }
log.Debug("QueryLog: prepared data (%d/%d) older than %s in %s",
len(entries), total, params.OlderThan, time.Since(now))
return data return data
} }

View File

@ -21,7 +21,7 @@ type QueryLog interface {
Add(question *dns.Msg, answer *dns.Msg, result *dnsfilter.Result, elapsed time.Duration, addr net.Addr, upstream string) Add(question *dns.Msg, answer *dns.Msg, result *dnsfilter.Result, elapsed time.Duration, addr net.Addr, upstream string)
// Get log entries // Get log entries
GetData() []map[string]interface{} GetData(params GetDataParams) []map[string]interface{}
// Clear memory buffer and remove log files // Clear memory buffer and remove log files
Clear() Clear()
@ -37,3 +37,23 @@ type Config struct {
func New(conf Config) QueryLog { func New(conf Config) QueryLog {
return newQueryLog(conf) return newQueryLog(conf)
} }
// GetDataParams - parameters for GetData()
type GetDataParams struct {
OlderThan time.Time // return entries that are older than this value
Domain string // filter by domain name in question
Client string // filter by client IP
QuestionType uint16 // filter by question type
ResponseStatus ResponseStatusType // filter by response status
StrictMatchDomain bool // if Domain value must be matched strictly
StrictMatchClient bool // if Client value must be matched strictly
}
// ResponseStatusType - response status
type ResponseStatusType int32
// Response status constants
const (
ResponseStatusAll ResponseStatusType = iota + 1
ResponseStatusFiltered
)

View File

@ -5,13 +5,13 @@ import (
"compress/gzip" "compress/gzip"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"os" "os"
"sync" "sync"
"time" "time"
"github.com/AdguardTeam/golibs/log" "github.com/AdguardTeam/golibs/log"
"github.com/go-test/deep" "github.com/go-test/deep"
"github.com/miekg/dns"
) )
var ( var (
@ -19,6 +19,7 @@ var (
) )
const enableGzip = false const enableGzip = false
const maxEntrySize = 1000
// flushLogBuffer flushes the current buffer to file and resets the current buffer // flushLogBuffer flushes the current buffer to file and resets the current buffer
func (l *queryLog) flushLogBuffer(fullFlush bool) error { func (l *queryLog) flushLogBuffer(fullFlush bool) error {
@ -182,50 +183,232 @@ func (l *queryLog) periodicQueryLogRotate() {
// Reader is the DB reader context // Reader is the DB reader context
type Reader struct { type Reader struct {
f *os.File ql *queryLog
jd *json.Decoder
now time.Time f *os.File
ql *queryLog jd *json.Decoder
now time.Time
validFrom int64 // UNIX time (ns)
olderThan int64 // UNIX time (ns)
files []string files []string
ifile int ifile int
count uint64 // returned elements counter limit uint64
count uint64 // counter for returned elements
latest bool // return the latest entries
filePrepared bool
searching bool // we're seaching for an entry with exact time stamp
fseeker fileSeeker // file seeker object
fpos uint64 // current file offset
nSeekRequests uint32 // number of Seek() requests made (finding a new line doesn't count)
} }
// OpenReader locks the file and returns reader object or nil on error type fileSeeker struct {
target uint64 // target value
pos uint64 // current offset, may be adjusted by user for increased accuracy
lastpos uint64 // the last offset returned
lo uint64 // low boundary offset
hi uint64 // high boundary offset
}
// OpenReader - return reader object
func (l *queryLog) OpenReader() *Reader { func (l *queryLog) OpenReader() *Reader {
r := Reader{} r := Reader{}
r.ql = l r.ql = l
r.now = time.Now() r.now = time.Now()
r.validFrom = r.now.Unix() - int64(l.conf.Interval*60*60)
r.validFrom *= 1000000000
r.files = []string{
r.ql.logFile,
r.ql.logFile + ".1",
}
return &r return &r
} }
// Close closes the reader // Close - close the reader
func (r *Reader) Close() { func (r *Reader) Close() {
elapsed := time.Since(r.now) elapsed := time.Since(r.now)
var perunit time.Duration var perunit time.Duration
if r.count > 0 { if r.count > 0 {
perunit = elapsed / time.Duration(r.count) perunit = elapsed / time.Duration(r.count)
} }
log.Debug("querylog: read %d entries in %v, %v/entry", log.Debug("querylog: read %d entries in %v, %v/entry, seek-reqs:%d",
r.count, elapsed, perunit) r.count, elapsed, perunit, r.nSeekRequests)
if r.f != nil { if r.f != nil {
r.f.Close() r.f.Close()
} }
} }
// BeginRead starts reading // BeginRead - start reading
func (r *Reader) BeginRead() { // olderThan: stop returning entries when an entry with this time is reached
r.files = []string{ // count: minimum number of entries to return
r.ql.logFile, func (r *Reader) BeginRead(olderThan time.Time, count uint64) {
r.ql.logFile + ".1", r.olderThan = olderThan.UnixNano()
r.latest = olderThan.IsZero()
r.limit = count
if r.latest {
r.olderThan = r.now.UnixNano()
} }
r.filePrepared = false
r.searching = false
r.jd = nil
} }
// Next returns the next entry or nil if reading is finished // BeginReadPrev - start reading the previous data chunk
func (r *Reader) BeginReadPrev(olderThan time.Time, count uint64) {
r.olderThan = olderThan.UnixNano()
r.latest = olderThan.IsZero()
r.limit = count
if r.latest {
r.olderThan = r.now.UnixNano()
}
off := r.fpos - maxEntrySize*(r.limit+1)
if int64(off) < maxEntrySize {
off = 0
}
r.fpos = uint64(off)
log.Debug("QueryLog: seek: %x", off)
_, err := r.f.Seek(int64(off), io.SeekStart)
if err != nil {
log.Error("file.Seek: %s: %s", r.files[r.ifile], err)
return
}
r.nSeekRequests++
r.seekToNewLine()
r.fseeker.pos = r.fpos
r.filePrepared = true
r.searching = false
r.jd = nil
}
// Perform binary seek
// Return 0: success; 1: seek reqiured; -1: error
func (fs *fileSeeker) seekBinary(cur uint64) int32 {
log.Debug("QueryLog: seek: tgt=%x cur=%x, %x: [%x..%x]", fs.target, cur, fs.pos, fs.lo, fs.hi)
off := uint64(0)
if fs.pos >= fs.lo && fs.pos < fs.hi {
if cur == fs.target {
return 0
} else if cur < fs.target {
fs.lo = fs.pos + 1
} else {
fs.hi = fs.pos
}
off = fs.lo + (fs.hi-fs.lo)/2
} else {
// we didn't find another entry from the last file offset: now return the boundary beginning
off = fs.lo
}
if off == fs.lastpos {
return -1
}
fs.lastpos = off
fs.pos = off
return 1
}
// Seek to a new line
func (r *Reader) seekToNewLine() bool {
b := make([]byte, maxEntrySize*2)
_, err := r.f.Read(b)
if err != nil {
log.Error("QueryLog: file.Read: %s: %s", r.files[r.ifile], err)
return false
}
off := bytes.IndexByte(b, '\n') + 1
if off == 0 {
log.Error("QueryLog: Can't find a new line: %s", r.files[r.ifile])
return false
}
r.fpos += uint64(off)
log.Debug("QueryLog: seek: %x (+%d)", r.fpos, off)
_, err = r.f.Seek(int64(r.fpos), io.SeekStart)
if err != nil {
log.Error("QueryLog: file.Seek: %s: %s", r.files[r.ifile], err)
return false
}
return true
}
// Open a file
func (r *Reader) openFile() bool {
var err error
fn := r.files[r.ifile]
r.f, err = os.Open(fn)
if err != nil {
if !os.IsNotExist(err) {
log.Error("QueryLog: Failed to open file \"%s\": %s", fn, err)
}
return false
}
return true
}
// Seek to the needed position
func (r *Reader) prepareRead() bool {
fn := r.files[r.ifile]
fi, err := r.f.Stat()
if err != nil {
log.Error("QueryLog: file.Stat: %s: %s", fn, err)
return false
}
fsize := uint64(fi.Size())
off := uint64(0)
if r.latest {
// read data from the end of file
off = fsize - maxEntrySize*(r.limit+1)
if int64(off) < maxEntrySize {
off = 0
}
r.fpos = uint64(off)
log.Debug("QueryLog: seek: %x", off)
_, err = r.f.Seek(int64(off), io.SeekStart)
if err != nil {
log.Error("QueryLog: file.Seek: %s: %s", fn, err)
return false
}
} else {
// start searching in file: we'll read the first chunk of data from the middle of file
r.searching = true
r.fseeker = fileSeeker{}
r.fseeker.target = uint64(r.olderThan)
r.fseeker.hi = fsize
rc := r.fseeker.seekBinary(0)
r.fpos = r.fseeker.pos
if rc == 1 {
_, err = r.f.Seek(int64(r.fpos), io.SeekStart)
if err != nil {
log.Error("QueryLog: file.Seek: %s: %s", fn, err)
return false
}
}
}
r.nSeekRequests++
if !r.seekToNewLine() {
return false
}
r.fseeker.pos = r.fpos
return true
}
// Next - return the next entry or nil if reading is finished
func (r *Reader) Next() *logEntry { // nolint func (r *Reader) Next() *logEntry { // nolint
var err error var err error
for { for {
@ -234,15 +417,19 @@ func (r *Reader) Next() *logEntry { // nolint
if r.ifile == len(r.files) { if r.ifile == len(r.files) {
return nil return nil
} }
fn := r.files[r.ifile] if !r.openFile() {
r.f, err = os.Open(fn)
if err != nil {
log.Error("Failed to open file \"%s\": %s", fn, err)
r.ifile++ r.ifile++
continue continue
} }
} }
if !r.filePrepared {
if !r.prepareRead() {
return nil
}
r.filePrepared = true
}
// open decoder if needed // open decoder if needed
if r.jd == nil { if r.jd == nil {
r.jd = json.NewDecoder(r.f) r.jd = json.NewDecoder(r.f)
@ -251,20 +438,60 @@ func (r *Reader) Next() *logEntry { // nolint
// check if there's data // check if there's data
if !r.jd.More() { if !r.jd.More() {
r.jd = nil r.jd = nil
r.f.Close() return nil
r.f = nil
r.ifile++
continue
} }
// read data // read data
var entry logEntry var entry logEntry
err = r.jd.Decode(&entry) err = r.jd.Decode(&entry)
if err != nil { if err != nil {
log.Error("Failed to decode: %s", err) log.Error("QueryLog: Failed to decode: %s", err)
// next entry can be fine, try more r.jd = nil
return nil
}
t := entry.Time.UnixNano()
if r.searching {
r.jd = nil
rr := r.fseeker.seekBinary(uint64(t))
r.fpos = r.fseeker.pos
if rr < 0 {
log.Error("QueryLog: File seek error: can't find the target entry: %s", r.files[r.ifile])
return nil
} else if rr == 0 {
// We found the target entry.
// We'll start reading the previous chunk of data.
r.searching = false
off := r.fpos - (maxEntrySize * (r.limit + 1))
if int64(off) < maxEntrySize {
off = 0
}
r.fpos = off
}
_, err = r.f.Seek(int64(r.fpos), io.SeekStart)
if err != nil {
log.Error("QueryLog: file.Seek: %s: %s", r.files[r.ifile], err)
return nil
}
r.nSeekRequests++
if !r.seekToNewLine() {
return nil
}
r.fseeker.pos = r.fpos
continue continue
} }
if t < r.validFrom {
continue
}
if t >= r.olderThan {
return nil
}
r.count++ r.count++
return &entry return &entry
} }
@ -274,57 +501,3 @@ func (r *Reader) Next() *logEntry { // nolint
func (r *Reader) Total() int { func (r *Reader) Total() int {
return 0 return 0
} }
// Fill cache from file
func (l *queryLog) fillFromFile() {
now := time.Now()
validFrom := now.Unix() - int64(l.conf.Interval*60*60)
r := l.OpenReader()
if r == nil {
return
}
r.BeginRead()
for {
entry := r.Next()
if entry == nil {
break
}
if entry.Time.Unix() < validFrom {
continue
}
if len(entry.Question) == 0 {
log.Printf("entry question is absent, skipping")
continue
}
if entry.Time.After(now) {
log.Printf("t %v vs %v is in the future, ignoring", entry.Time, now)
continue
}
q := new(dns.Msg)
if err := q.Unpack(entry.Question); err != nil {
log.Printf("failed to unpack dns message question: %s", err)
continue
}
if len(q.Question) != 1 {
log.Printf("malformed dns message, has no questions, skipping")
continue
}
l.lock.Lock()
l.cache = append(l.cache, entry)
if len(l.cache) > queryLogSize {
toremove := len(l.cache) - queryLogSize
l.cache = l.cache[toremove:]
}
l.lock.Unlock()
}
r.Close()
}

View File

@ -3,6 +3,7 @@ package querylog
import ( import (
"net" "net"
"testing" "testing"
"time"
"github.com/AdguardTeam/AdGuardHome/dnsfilter" "github.com/AdguardTeam/AdGuardHome/dnsfilter"
"github.com/miekg/dns" "github.com/miekg/dns"
@ -36,7 +37,10 @@ func TestQueryLog(t *testing.T) {
res := dnsfilter.Result{} res := dnsfilter.Result{}
l.Add(&q, &a, &res, 0, nil, "upstream") l.Add(&q, &a, &res, 0, nil, "upstream")
d := l.GetData() params := GetDataParams{
OlderThan: time.Now(),
}
d := l.GetData(params)
m := d[0] m := d[0]
mq := m["question"].(map[string]interface{}) mq := m["question"].(map[string]interface{})
assert.True(t, mq["host"].(string) == "example.org") assert.True(t, mq["host"].(string) == "example.org")