1fb497adf8
Added the one year time range to the analytics panes. Dates are now shown on detail panes for Request, Topic and Post analytics instead of times for higher time ranges. The labels should now show up properly for the three month time range charts. The paginator should now work properly for login logs. Pushed a potential fix for subsequent pages with only one item not showing. up. Executing a search query should now change the title. Fixed a bug where the user agent parser choked on : characters. Fixed the ordering of items in the multi-series charts which caused the most important items to get booted out rather then the least important ones. Tweaked the padding on the User Manager items for Nox so they won't break onto multiple lines so readily. Fixed a potential issue with topic list titles. Fixed a potential crash bug in the Forum Analytics for deleted forums. Added the Count method to LoginLogStore. Continued work on the ElasticSearch mapping setup utility. Added the topic_list.search_head phrase. Added the panel_statistics_time_range_one_year phrase.
199 lines
5.3 KiB
Go
199 lines
5.3 KiB
Go
package common
|
|
|
|
import (
|
|
"database/sql"
|
|
"time"
|
|
|
|
"github.com/Azareal/Gosora/query_gen"
|
|
)
|
|
|
|
var RegLogs RegLogStore
|
|
var LoginLogs LoginLogStore
|
|
|
|
type RegLogItem struct {
|
|
ID int
|
|
Username string
|
|
Email string
|
|
FailureReason string
|
|
Success bool
|
|
IPAddress string
|
|
DoneAt string
|
|
}
|
|
|
|
type RegLogStmts struct {
|
|
update *sql.Stmt
|
|
create *sql.Stmt
|
|
}
|
|
|
|
var regLogStmts RegLogStmts
|
|
|
|
func init() {
|
|
DbInits.Add(func(acc *qgen.Accumulator) error {
|
|
regLogStmts = RegLogStmts{
|
|
update: acc.Update("registration_logs").Set("username = ?, email = ?, failureReason = ?, success = ?").Where("rlid = ?").Prepare(),
|
|
create: acc.Insert("registration_logs").Columns("username, email, failureReason, success, ipaddress, doneAt").Fields("?,?,?,?,?,UTC_TIMESTAMP()").Prepare(),
|
|
}
|
|
return acc.FirstError()
|
|
})
|
|
}
|
|
|
|
// TODO: Reload this item in the store, probably doesn't matter right now, but it might when we start caching this stuff in memory
|
|
// ! Retroactive updates of date are not permitted for integrity reasons
|
|
func (log *RegLogItem) Commit() error {
|
|
_, err := regLogStmts.update.Exec(log.Username, log.Email, log.FailureReason, log.Success, log.ID)
|
|
return err
|
|
}
|
|
|
|
func (log *RegLogItem) Create() (id int, err error) {
|
|
res, err := regLogStmts.create.Exec(log.Username, log.Email, log.FailureReason, log.Success, log.IPAddress)
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
id64, err := res.LastInsertId()
|
|
log.ID = int(id64)
|
|
return log.ID, err
|
|
}
|
|
|
|
type RegLogStore interface {
|
|
GlobalCount() (logCount int)
|
|
GetOffset(offset int, perPage int) (logs []RegLogItem, err error)
|
|
}
|
|
|
|
type SQLRegLogStore struct {
|
|
count *sql.Stmt
|
|
getOffset *sql.Stmt
|
|
}
|
|
|
|
func NewRegLogStore(acc *qgen.Accumulator) (*SQLRegLogStore, error) {
|
|
return &SQLRegLogStore{
|
|
count: acc.Count("registration_logs").Prepare(),
|
|
getOffset: acc.Select("registration_logs").Columns("rlid, username, email, failureReason, success, ipaddress, doneAt").Orderby("doneAt DESC").Limit("?,?").Prepare(),
|
|
}, acc.FirstError()
|
|
}
|
|
|
|
func (store *SQLRegLogStore) GlobalCount() (logCount int) {
|
|
err := store.count.QueryRow().Scan(&logCount)
|
|
if err != nil {
|
|
LogError(err)
|
|
}
|
|
return logCount
|
|
}
|
|
|
|
func (store *SQLRegLogStore) GetOffset(offset int, perPage int) (logs []RegLogItem, err error) {
|
|
rows, err := store.getOffset.Query(offset, perPage)
|
|
if err != nil {
|
|
return logs, err
|
|
}
|
|
defer rows.Close()
|
|
|
|
for rows.Next() {
|
|
var log RegLogItem
|
|
var doneAt time.Time
|
|
err := rows.Scan(&log.ID, &log.Username, &log.Email, &log.FailureReason, &log.Success, &log.IPAddress, &doneAt)
|
|
if err != nil {
|
|
return logs, err
|
|
}
|
|
log.DoneAt = doneAt.Format("2006-01-02 15:04:05")
|
|
logs = append(logs, log)
|
|
}
|
|
return logs, rows.Err()
|
|
}
|
|
|
|
type LoginLogItem struct {
|
|
ID int
|
|
UID int
|
|
Success bool
|
|
IPAddress string
|
|
DoneAt string
|
|
}
|
|
|
|
type LoginLogStmts struct {
|
|
update *sql.Stmt
|
|
create *sql.Stmt
|
|
}
|
|
|
|
var loginLogStmts LoginLogStmts
|
|
|
|
func init() {
|
|
DbInits.Add(func(acc *qgen.Accumulator) error {
|
|
loginLogStmts = LoginLogStmts{
|
|
update: acc.Update("login_logs").Set("uid = ?, success = ?").Where("lid = ?").Prepare(),
|
|
create: acc.Insert("login_logs").Columns("uid, success, ipaddress, doneAt").Fields("?,?,?,UTC_TIMESTAMP()").Prepare(),
|
|
}
|
|
return acc.FirstError()
|
|
})
|
|
}
|
|
|
|
// TODO: Reload this item in the store, probably doesn't matter right now, but it might when we start caching this stuff in memory
|
|
// ! Retroactive updates of date are not permitted for integrity reasons
|
|
func (log *LoginLogItem) Commit() error {
|
|
_, err := loginLogStmts.update.Exec(log.UID, log.Success, log.ID)
|
|
return err
|
|
}
|
|
|
|
func (log *LoginLogItem) Create() (id int, err error) {
|
|
res, err := loginLogStmts.create.Exec(log.UID, log.Success, log.IPAddress)
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
id64, err := res.LastInsertId()
|
|
log.ID = int(id64)
|
|
return log.ID, err
|
|
}
|
|
|
|
type LoginLogStore interface {
|
|
GlobalCount() (logCount int)
|
|
Count(uid int) (logCount int)
|
|
GetOffset(uid int, offset int, perPage int) (logs []LoginLogItem, err error)
|
|
}
|
|
|
|
type SQLLoginLogStore struct {
|
|
count *sql.Stmt
|
|
countForUser *sql.Stmt
|
|
getOffsetByUser *sql.Stmt
|
|
}
|
|
|
|
func NewLoginLogStore(acc *qgen.Accumulator) (*SQLLoginLogStore, error) {
|
|
return &SQLLoginLogStore{
|
|
count: acc.Count("login_logs").Prepare(),
|
|
countForUser: acc.Count("login_logs").Where("uid = ?").Prepare(),
|
|
getOffsetByUser: acc.Select("login_logs").Columns("lid, success, ipaddress, doneAt").Where("uid = ?").Orderby("doneAt DESC").Limit("?,?").Prepare(),
|
|
}, acc.FirstError()
|
|
}
|
|
|
|
func (store *SQLLoginLogStore) GlobalCount() (logCount int) {
|
|
err := store.count.QueryRow().Scan(&logCount)
|
|
if err != nil {
|
|
LogError(err)
|
|
}
|
|
return logCount
|
|
}
|
|
|
|
func (store *SQLLoginLogStore) Count(uid int) (logCount int) {
|
|
err := store.countForUser.QueryRow(uid).Scan(&logCount)
|
|
if err != nil {
|
|
LogError(err)
|
|
}
|
|
return logCount
|
|
}
|
|
|
|
func (store *SQLLoginLogStore) GetOffset(uid int, offset int, perPage int) (logs []LoginLogItem, err error) {
|
|
rows, err := store.getOffsetByUser.Query(uid, offset, perPage)
|
|
if err != nil {
|
|
return logs, err
|
|
}
|
|
defer rows.Close()
|
|
|
|
for rows.Next() {
|
|
var log = LoginLogItem{UID: uid}
|
|
var doneAt time.Time
|
|
err := rows.Scan(&log.ID, &log.Success, &log.IPAddress, &doneAt)
|
|
if err != nil {
|
|
return logs, err
|
|
}
|
|
log.DoneAt = doneAt.Format("2006-01-02 15:04:05")
|
|
logs = append(logs, log)
|
|
}
|
|
return logs, rows.Err()
|
|
}
|