Add ClearIPs() to TopicStore.

Add LockMany() to TopicStore.
Add RemoveMany() to TopicCache.
Add ClearIPs() to PollStore.
Add Purge() to RegLogStore.
Add DeleteOlderThanDays() to RegLogStore.
Add Purge() to LoginLogStore.
Add DeleteOlderThanDays() to LoginLogStore.
Add SetInt() to MetaStore.
Add SetInt64() to MetaStore.

Use Createf() in RegLogItem.Create()
Use Count() in SQLRegLogStore.Count()
Use Count() in SQLLoginLogStore.Count()
Use Countf() in SQLLoginLogStore.CountUser()

Add trailing triple dot parser test case.
Removed a block of commented code in gen router.
Reduce boilerplate.
This commit is contained in:
Azareal 2021-04-27 20:20:26 +10:00
parent 1b7d6ac724
commit 6870d242e9
14 changed files with 284 additions and 176 deletions

View File

@ -48,18 +48,18 @@ func (co *DefaultForumViewCounter) Tick() error {
l.Lock() l.Lock()
delete(m, fid) delete(m, fid)
l.Unlock() l.Unlock()
err := co.insertChunk(count, fid) e := co.insertChunk(count, fid)
if err != nil { if e != nil {
return errors.Wrap(errors.WithStack(err),"forum counter") return errors.Wrap(errors.WithStack(e),"forum counter")
} }
l.RLock() l.RLock()
} }
l.RUnlock() l.RUnlock()
return nil return nil
} }
err := cLoop(&co.oddLock,co.oddMap) e := cLoop(&co.oddLock,co.oddMap)
if err != nil { if e != nil {
return err return e
} }
return cLoop(&co.evenLock,co.evenMap) return cLoop(&co.evenLock,co.evenMap)
} }
@ -69,8 +69,8 @@ func (co *DefaultForumViewCounter) insertChunk(count, forum int) error {
return nil return nil
} }
c.DebugLogf("Inserting a vchunk with a count of %d for forum %d", count, forum) c.DebugLogf("Inserting a vchunk with a count of %d for forum %d", count, forum)
_, err := co.insert.Exec(count, forum) _, e := co.insert.Exec(count, forum)
return err return e
} }
func (co *DefaultForumViewCounter) Bump(fid int) { func (co *DefaultForumViewCounter) Bump(fid int) {

View File

@ -127,9 +127,9 @@ func NewDefaultLangViewCounter(acc *qgen.Accumulator) (*DefaultLangViewCounter,
func (co *DefaultLangViewCounter) Tick() error { func (co *DefaultLangViewCounter) Tick() error {
for id := 0; id < len(co.buckets); id++ { for id := 0; id < len(co.buckets); id++ {
count := atomic.SwapInt64(&co.buckets[id], 0) count := atomic.SwapInt64(&co.buckets[id], 0)
err := co.insertChunk(count, id) // TODO: Bulk insert for speed? e := co.insertChunk(count, id) // TODO: Bulk insert for speed?
if err != nil { if e != nil {
return errors.Wrap(errors.WithStack(err), "langview counter") return errors.Wrap(errors.WithStack(e), "langview counter")
} }
} }
return nil return nil
@ -144,8 +144,8 @@ func (co *DefaultLangViewCounter) insertChunk(count int64, id int) error {
langCode = "none" langCode = "none"
} }
c.DebugLogf("Inserting a vchunk with a count of %d for lang %s (%d)", count, langCode, id) c.DebugLogf("Inserting a vchunk with a count of %d for lang %s (%d)", count, langCode, id)
_, err := co.insert.Exec(count, langCode) _, e := co.insert.Exec(count, langCode)
return err return e
} }
func (co *DefaultLangViewCounter) Bump(langCode string) (validCode bool) { func (co *DefaultLangViewCounter) Bump(langCode string) (validCode bool) {

View File

@ -54,7 +54,7 @@ func NewMemoryCounter(acc *qgen.Accumulator) (*DefaultMemoryCounter, error) {
return co, acc.FirstError() return co, acc.FirstError()
} }
func (co *DefaultMemoryCounter) Tick() (err error) { func (co *DefaultMemoryCounter) Tick() (e error) {
var m runtime.MemStats var m runtime.MemStats
runtime.ReadMemStats(&m) runtime.ReadMemStats(&m)
var rTotMem, rTotCount, rStackMem, rStackCount, rHeapMem, rHeapCount uint64 var rTotMem, rTotCount, rStackMem, rStackCount, rHeapMem, rHeapCount uint64
@ -83,9 +83,9 @@ func (co *DefaultMemoryCounter) Tick() (err error) {
avgHeap = (rHeapMem + m.HeapAlloc) / (rHeapCount + 1) avgHeap = (rHeapMem + m.HeapAlloc) / (rHeapCount + 1)
c.DebugLogf("Inserting a memchunk with a value of %d - %d - %d", avgMem, avgStack, avgHeap) c.DebugLogf("Inserting a memchunk with a value of %d - %d - %d", avgMem, avgStack, avgHeap)
_, err = co.insert.Exec(avgMem, avgStack, avgHeap) _, e = co.insert.Exec(avgMem, avgStack, avgHeap)
if err != nil { if e != nil {
return errors.Wrap(errors.WithStack(err), "mem counter") return errors.Wrap(errors.WithStack(e), "mem counter")
} }
return nil return nil
} }

View File

@ -30,7 +30,7 @@ func NewTopicCounter() (*DefaultTopicCounter, error) {
return co, acc.FirstError() return co, acc.FirstError()
} }
func (co *DefaultTopicCounter) Tick() (err error) { func (co *DefaultTopicCounter) Tick() (e error) {
oldBucket := co.currentBucket oldBucket := co.currentBucket
var nextBucket int64 // 0 var nextBucket int64 // 0
if co.currentBucket == 0 { if co.currentBucket == 0 {
@ -42,9 +42,9 @@ func (co *DefaultTopicCounter) Tick() (err error) {
previousViewChunk := co.buckets[oldBucket] previousViewChunk := co.buckets[oldBucket]
atomic.AddInt64(&co.buckets[oldBucket], -previousViewChunk) atomic.AddInt64(&co.buckets[oldBucket], -previousViewChunk)
err = co.insertChunk(previousViewChunk) e = co.insertChunk(previousViewChunk)
if err != nil { if e != nil {
return errors.Wrap(errors.WithStack(err), "topics counter") return errors.Wrap(errors.WithStack(e), "topics counter")
} }
return nil return nil
} }
@ -58,6 +58,6 @@ func (co *DefaultTopicCounter) insertChunk(count int64) error {
return nil return nil
} }
c.DebugLogf("Inserting a topicchunk with a count of %d", count) c.DebugLogf("Inserting a topicchunk with a count of %d", count)
_, err := co.insert.Exec(count) _, e := co.insert.Exec(count)
return err return e
} }

View File

@ -10,6 +10,8 @@ import (
type MetaStore interface { type MetaStore interface {
Get(name string) (val string, err error) Get(name string) (val string, err error)
Set(name, val string) error Set(name, val string) error
SetInt(name string, val int) error
SetInt64(name string, val int64) error
} }
type DefaultMetaStore struct { type DefaultMetaStore struct {
@ -19,28 +21,41 @@ type DefaultMetaStore struct {
} }
func NewDefaultMetaStore(acc *qgen.Accumulator) (*DefaultMetaStore, error) { func NewDefaultMetaStore(acc *qgen.Accumulator) (*DefaultMetaStore, error) {
t := "meta"
m := &DefaultMetaStore{ m := &DefaultMetaStore{
get: acc.Select("meta").Columns("value").Where("name = ?").Prepare(), get: acc.Select(t).Columns("value").Where("name=?").Prepare(),
set: acc.Update("meta").Set("value = ?").Where("name = ?").Prepare(), set: acc.Update(t).Set("value=?").Where("name=?").Prepare(),
add: acc.Insert("meta").Columns("name,value").Fields("?,''").Prepare(), add: acc.Insert(t).Columns("name,value").Fields("?,''").Prepare(),
} }
return m, acc.FirstError() return m, acc.FirstError()
} }
func (s *DefaultMetaStore) Get(name string) (val string, err error) { func (s *DefaultMetaStore) Get(name string) (val string, e error) {
err = s.get.QueryRow(name).Scan(&val) e = s.get.QueryRow(name).Scan(&val)
return val, err return val, e
} }
// TODO: Use timestamped rows as a more robust method of ensuring data integrity // TODO: Use timestamped rows as a more robust method of ensuring data integrity
func (s *DefaultMetaStore) setVal(name string, val interface{}) error {
_, e := s.Get(name)
if e == sql.ErrNoRows {
_, e := s.add.Exec(name)
if e != nil {
return e
}
}
_, e = s.set.Exec(val, name)
return e
}
func (s *DefaultMetaStore) Set(name, val string) error { func (s *DefaultMetaStore) Set(name, val string) error {
_, err := s.Get(name) return s.setVal(name, val)
if err == sql.ErrNoRows {
_, err := s.add.Exec(name)
if err != nil {
return err
} }
func (s *DefaultMetaStore) SetInt(name string, val int) error {
return s.setVal(name, val)
} }
_, err = s.set.Exec(val, name)
return err func (s *DefaultMetaStore) SetInt64(name string, val int64) error {
return s.setVal(name, val)
} }

View File

@ -31,7 +31,7 @@ func init() {
DbInits.Add(func(acc *qgen.Accumulator) error { DbInits.Add(func(acc *qgen.Accumulator) error {
rl := "registration_logs" rl := "registration_logs"
regLogStmts = RegLogStmts{ regLogStmts = RegLogStmts{
update: acc.Update(rl).Set("username=?,email=?,failureReason=?,success=?").Where("rlid=?").Prepare(), update: acc.Update(rl).Set("username=?,email=?,failureReason=?,success=?,doneAt=?").Where("rlid=?").Prepare(),
create: acc.Insert(rl).Columns("username,email,failureReason,success,ipaddress,doneAt").Fields("?,?,?,?,?,UTC_TIMESTAMP()").Prepare(), create: acc.Insert(rl).Columns("username,email,failureReason,success,ipaddress,doneAt").Fields("?,?,?,?,?,UTC_TIMESTAMP()").Prepare(),
} }
return acc.FirstError() return acc.FirstError()
@ -40,29 +40,32 @@ func init() {
// TODO: Reload this item in the store, probably doesn't matter right now, but it might when we start caching this stuff in memory // TODO: Reload this item in the store, probably doesn't matter right now, but it might when we start caching this stuff in memory
// ! Retroactive updates of date are not permitted for integrity reasons // ! Retroactive updates of date are not permitted for integrity reasons
// TODO: Do we even use this anymore or can we just make the logs immutable (except for deletes) for simplicity sake?
func (l *RegLogItem) Commit() error { func (l *RegLogItem) Commit() error {
_, err := regLogStmts.update.Exec(l.Username, l.Email, l.FailureReason, l.Success, l.ID) _, e := regLogStmts.update.Exec(l.Username, l.Email, l.FailureReason, l.Success, l.DoneAt, l.ID)
return err return e
} }
func (l *RegLogItem) Create() (id int, err error) { func (l *RegLogItem) Create() (id int, e error) {
res, err := regLogStmts.create.Exec(l.Username, l.Email, l.FailureReason, l.Success, l.IP) id, e = Createf(regLogStmts.create, l.Username, l.Email, l.FailureReason, l.Success, l.IP)
if err != nil { l.ID = id
return 0, err return l.ID, e
}
id64, err := res.LastInsertId()
l.ID = int(id64)
return l.ID, err
} }
type RegLogStore interface { type RegLogStore interface {
Count() (count int) Count() (count int)
GetOffset(offset, perPage int) (logs []RegLogItem, err error) GetOffset(offset, perPage int) (logs []RegLogItem, err error)
Purge() error
DeleteOlderThanDays(days int) error
} }
type SQLRegLogStore struct { type SQLRegLogStore struct {
count *sql.Stmt count *sql.Stmt
getOffset *sql.Stmt getOffset *sql.Stmt
purge *sql.Stmt
deleteOlderThanDays *sql.Stmt
} }
func NewRegLogStore(acc *qgen.Accumulator) (*SQLRegLogStore, error) { func NewRegLogStore(acc *qgen.Accumulator) (*SQLRegLogStore, error) {
@ -70,30 +73,29 @@ func NewRegLogStore(acc *qgen.Accumulator) (*SQLRegLogStore, error) {
return &SQLRegLogStore{ return &SQLRegLogStore{
count: acc.Count(rl).Prepare(), count: acc.Count(rl).Prepare(),
getOffset: acc.Select(rl).Columns("rlid,username,email,failureReason,success,ipaddress,doneAt").Orderby("doneAt DESC").Limit("?,?").Prepare(), getOffset: acc.Select(rl).Columns("rlid,username,email,failureReason,success,ipaddress,doneAt").Orderby("doneAt DESC").Limit("?,?").Prepare(),
purge: acc.Purge(rl),
deleteOlderThanDays: acc.Delete(rl).DateOlderThanQ("doneAt", "day").Prepare(),
}, acc.FirstError() }, acc.FirstError()
} }
func (s *SQLRegLogStore) Count() (count int) { func (s *SQLRegLogStore) Count() (count int) {
err := s.count.QueryRow().Scan(&count) return Count(s.count)
if err != nil {
LogError(err)
}
return count
} }
func (s *SQLRegLogStore) GetOffset(offset, perPage int) (logs []RegLogItem, err error) { func (s *SQLRegLogStore) GetOffset(offset, perPage int) (logs []RegLogItem, e error) {
rows, err := s.getOffset.Query(offset, perPage) rows, e := s.getOffset.Query(offset, perPage)
if err != nil { if e != nil {
return logs, err return logs, e
} }
defer rows.Close() defer rows.Close()
for rows.Next() { for rows.Next() {
var l RegLogItem var l RegLogItem
var doneAt time.Time var doneAt time.Time
err := rows.Scan(&l.ID, &l.Username, &l.Email, &l.FailureReason, &l.Success, &l.IP, &doneAt) e := rows.Scan(&l.ID, &l.Username, &l.Email, &l.FailureReason, &l.Success, &l.IP, &doneAt)
if err != nil { if e != nil {
return logs, err return logs, e
} }
l.DoneAt = doneAt.Format("2006-01-02 15:04:05") l.DoneAt = doneAt.Format("2006-01-02 15:04:05")
logs = append(logs, l) logs = append(logs, l)
@ -101,6 +103,17 @@ func (s *SQLRegLogStore) GetOffset(offset, perPage int) (logs []RegLogItem, err
return logs, rows.Err() return logs, rows.Err()
} }
func (s *SQLRegLogStore) DeleteOlderThanDays(days int) error {
_, e := s.deleteOlderThanDays.Exec(days)
return e
}
// Delete all registration logs
func (s *SQLRegLogStore) Purge() error {
_, e := s.purge.Exec()
return e
}
type LoginLogItem struct { type LoginLogItem struct {
ID int ID int
UID int UID int
@ -120,7 +133,7 @@ func init() {
DbInits.Add(func(acc *qgen.Accumulator) error { DbInits.Add(func(acc *qgen.Accumulator) error {
ll := "login_logs" ll := "login_logs"
loginLogStmts = LoginLogStmts{ loginLogStmts = LoginLogStmts{
update: acc.Update(ll).Set("uid=?,success=?").Where("lid=?").Prepare(), update: acc.Update(ll).Set("uid=?,success=?,doneAt=?").Where("lid=?").Prepare(),
create: acc.Insert(ll).Columns("uid,success,ipaddress,doneAt").Fields("?,?,?,UTC_TIMESTAMP()").Prepare(), create: acc.Insert(ll).Columns("uid,success,ipaddress,doneAt").Fields("?,?,?,UTC_TIMESTAMP()").Prepare(),
} }
return acc.FirstError() return acc.FirstError()
@ -130,30 +143,36 @@ func init() {
// TODO: Reload this item in the store, probably doesn't matter right now, but it might when we start caching this stuff in memory // TODO: Reload this item in the store, probably doesn't matter right now, but it might when we start caching this stuff in memory
// ! Retroactive updates of date are not permitted for integrity reasons // ! Retroactive updates of date are not permitted for integrity reasons
func (l *LoginLogItem) Commit() error { func (l *LoginLogItem) Commit() error {
_, err := loginLogStmts.update.Exec(l.UID, l.Success, l.ID) _, e := loginLogStmts.update.Exec(l.UID, l.Success, l.DoneAt, l.ID)
return err return e
} }
func (l *LoginLogItem) Create() (id int, err error) { func (l *LoginLogItem) Create() (id int, e error) {
res, err := loginLogStmts.create.Exec(l.UID, l.Success, l.IP) res, e := loginLogStmts.create.Exec(l.UID, l.Success, l.IP)
if err != nil { if e != nil {
return 0, err return 0, e
} }
id64, err := res.LastInsertId() id64, e := res.LastInsertId()
l.ID = int(id64) l.ID = int(id64)
return l.ID, err return l.ID, e
} }
type LoginLogStore interface { type LoginLogStore interface {
Count() (count int) Count() (count int)
CountUser(uid int) (count int) CountUser(uid int) (count int)
GetOffset(uid, offset, perPage int) (logs []LoginLogItem, err error) GetOffset(uid, offset, perPage int) (logs []LoginLogItem, err error)
Purge() error
DeleteOlderThanDays(days int) error
} }
type SQLLoginLogStore struct { type SQLLoginLogStore struct {
count *sql.Stmt count *sql.Stmt
countForUser *sql.Stmt countForUser *sql.Stmt
getOffsetByUser *sql.Stmt getOffsetByUser *sql.Stmt
purge *sql.Stmt
deleteOlderThanDays *sql.Stmt
} }
func NewLoginLogStore(acc *qgen.Accumulator) (*SQLLoginLogStore, error) { func NewLoginLogStore(acc *qgen.Accumulator) (*SQLLoginLogStore, error) {
@ -162,41 +181,47 @@ func NewLoginLogStore(acc *qgen.Accumulator) (*SQLLoginLogStore, error) {
count: acc.Count(ll).Prepare(), count: acc.Count(ll).Prepare(),
countForUser: acc.Count(ll).Where("uid=?").Prepare(), countForUser: acc.Count(ll).Where("uid=?").Prepare(),
getOffsetByUser: acc.Select(ll).Columns("lid,success,ipaddress,doneAt").Where("uid=?").Orderby("doneAt DESC").Limit("?,?").Prepare(), getOffsetByUser: acc.Select(ll).Columns("lid,success,ipaddress,doneAt").Where("uid=?").Orderby("doneAt DESC").Limit("?,?").Prepare(),
purge: acc.Purge(ll),
deleteOlderThanDays: acc.Delete(ll).DateOlderThanQ("doneAt", "day").Prepare(),
}, acc.FirstError() }, acc.FirstError()
} }
func (s *SQLLoginLogStore) Count() (count int) { func (s *SQLLoginLogStore) Count() (count int) {
err := s.count.QueryRow().Scan(&count) return Count(s.count)
if err != nil {
LogError(err)
}
return count
} }
func (s *SQLLoginLogStore) CountUser(uid int) (count int) { func (s *SQLLoginLogStore) CountUser(uid int) (count int) {
err := s.countForUser.QueryRow(uid).Scan(&count) return Countf(s.countForUser, uid)
if err != nil {
LogError(err)
}
return count
} }
func (s *SQLLoginLogStore) GetOffset(uid, offset, perPage int) (logs []LoginLogItem, err error) { func (s *SQLLoginLogStore) GetOffset(uid, offset, perPage int) (logs []LoginLogItem, e error) {
rows, err := s.getOffsetByUser.Query(uid, offset, perPage) rows, e := s.getOffsetByUser.Query(uid, offset, perPage)
if err != nil { if e != nil {
return logs, err return logs, e
} }
defer rows.Close() defer rows.Close()
for rows.Next() { for rows.Next() {
l := LoginLogItem{UID: uid} l := LoginLogItem{UID: uid}
var doneAt time.Time var doneAt time.Time
err := rows.Scan(&l.ID, &l.Success, &l.IP, &doneAt) e := rows.Scan(&l.ID, &l.Success, &l.IP, &doneAt)
if err != nil { if e != nil {
return logs, err return logs, e
} }
l.DoneAt = doneAt.Format("2006-01-02 15:04:05") l.DoneAt = doneAt.Format("2006-01-02 15:04:05")
logs = append(logs, l) logs = append(logs, l)
} }
return logs, rows.Err() return logs, rows.Err()
} }
func (s *SQLLoginLogStore) DeleteOlderThanDays(days int) error {
_, e := s.deleteOlderThanDays.Exec(days)
return e
}
// Delete all login logs
func (s *SQLLoginLogStore) Purge() error {
_, e := s.purge.Exec()
return e
}

View File

@ -31,6 +31,9 @@ func (c *NullTopicCache) AddUnsafe(_ *Topic) error {
func (c *NullTopicCache) Remove(id int) error { func (c *NullTopicCache) Remove(id int) error {
return nil return nil
} }
func (c *NullTopicCache) RemoveMany(ids []int) error {
return nil
}
func (c *NullTopicCache) RemoveUnsafe(id int) error { func (c *NullTopicCache) RemoveUnsafe(id int) error {
return nil return nil
} }

View File

@ -191,23 +191,23 @@ func RebuildGroupPermissions(g *Group) error {
log.Print("Reloading a group") log.Print("Reloading a group")
// TODO: Avoid re-initting this all the time // TODO: Avoid re-initting this all the time
getGroupPerms, err := qgen.Builder.SimpleSelect("users_groups", "permissions", "gid=?", "", "") getGroupPerms, e := qgen.Builder.SimpleSelect("users_groups", "permissions", "gid=?", "", "")
if err != nil { if e != nil {
return err return e
} }
defer getGroupPerms.Close() defer getGroupPerms.Close()
err = getGroupPerms.QueryRow(g.ID).Scan(&permstr) e = getGroupPerms.QueryRow(g.ID).Scan(&permstr)
if err != nil { if e != nil {
return err return e
} }
tmpPerms := Perms{ tmpPerms := Perms{
//ExtData: make(map[string]bool), //ExtData: make(map[string]bool),
} }
err = json.Unmarshal(permstr, &tmpPerms) e = json.Unmarshal(permstr, &tmpPerms)
if err != nil { if e != nil {
return err return e
} }
g.Perms = tmpPerms g.Perms = tmpPerms
return nil return nil

View File

@ -26,6 +26,7 @@ type Pollable interface {
type PollStore interface { type PollStore interface {
Get(id int) (*Poll, error) Get(id int) (*Poll, error)
Exists(id int) bool Exists(id int) bool
ClearIPs() error
Create(parent Pollable, pollType int, pollOptions map[int]string) (int, error) Create(parent Pollable, pollType int, pollOptions map[int]string) (int, error)
Reload(id int) error Reload(id int) error
//Count() int //Count() int
@ -43,6 +44,8 @@ type DefaultPollStore struct {
createPollOption *sql.Stmt createPollOption *sql.Stmt
delete *sql.Stmt delete *sql.Stmt
//count *sql.Stmt //count *sql.Stmt
clearIPs *sql.Stmt
} }
func NewDefaultPollStore(cache PollCache) (*DefaultPollStore, error) { func NewDefaultPollStore(cache PollCache) (*DefaultPollStore, error) {
@ -54,11 +57,13 @@ func NewDefaultPollStore(cache PollCache) (*DefaultPollStore, error) {
p := "polls" p := "polls"
return &DefaultPollStore{ return &DefaultPollStore{
cache: cache, cache: cache,
get: acc.Select(p).Columns("parentID, parentTable, type, options, votes").Where("pollID=?").Prepare(), get: acc.Select(p).Columns("parentID,parentTable,type,options,votes").Where("pollID=?").Stmt(),
exists: acc.Select(p).Columns("pollID").Where("pollID=?").Prepare(), exists: acc.Select(p).Columns("pollID").Where("pollID=?").Stmt(),
createPoll: acc.Insert(p).Columns("parentID,parentTable,type,options").Fields("?,?,?,?").Prepare(), createPoll: acc.Insert(p).Columns("parentID,parentTable,type,options").Fields("?,?,?,?").Prepare(),
createPollOption: acc.Insert("polls_options").Columns("pollID,option,votes").Fields("?,?,0").Prepare(), createPollOption: acc.Insert("polls_options").Columns("pollID,option,votes").Fields("?,?,0").Prepare(),
//count: acc.SimpleCount(p, "", ""), //count: acc.SimpleCount(p, "", ""),
clearIPs: acc.Update("polls_votes").Set("ip=''").Where("ip!=''").Stmt(),
}, acc.FirstError() }, acc.FirstError()
} }
@ -144,8 +149,7 @@ func (s *DefaultPollStore) BulkGetMap(ids []int) (list map[int]*Poll, err error)
if idCount > len(list) { if idCount > len(list) {
var sidList string var sidList string
for _, id := range ids { for _, id := range ids {
_, ok := list[id] if _, ok := list[id]; !ok {
if !ok {
sidList += strconv.Itoa(id) + "," sidList += strconv.Itoa(id) + ","
} }
} }
@ -172,18 +176,16 @@ func (s *DefaultPollStore) BulkGetMap(ids []int) (list map[int]*Poll, err error)
func (s *DefaultPollStore) Reload(id int) error { func (s *DefaultPollStore) Reload(id int) error {
p := &Poll{ID: id} p := &Poll{ID: id}
var optionTxt []byte var optionTxt []byte
err := s.get.QueryRow(id).Scan(&p.ParentID, &p.ParentTable, &p.Type, &optionTxt, &p.VoteCount) e := s.get.QueryRow(id).Scan(&p.ParentID, &p.ParentTable, &p.Type, &optionTxt, &p.VoteCount)
if err != nil { if e != nil {
s.cache.Remove(id) _ = s.cache.Remove(id)
return err return e
} }
e = json.Unmarshal(optionTxt, &p.Options)
err = json.Unmarshal(optionTxt, &p.Options) if e != nil {
if err != nil { _ = s.cache.Remove(id)
s.cache.Remove(id) return e
return err
} }
p.QuickOptions = s.unpackOptionsMap(p.Options) p.QuickOptions = s.unpackOptionsMap(p.Options)
_ = s.cache.Set(p) _ = s.cache.Set(p)
return nil return nil
@ -197,6 +199,11 @@ func (s *DefaultPollStore) unpackOptionsMap(rawOptions map[int]string) []PollOpt
return opts return opts
} }
func (s *DefaultPollStore) ClearIPs() error {
_, e := s.clearIPs.Exec()
return e
}
// TODO: Use a transaction for this // TODO: Use a transaction for this
func (s *DefaultPollStore) Create(parent Pollable, pollType int, pollOptions map[int]string) (id int, e error) { func (s *DefaultPollStore) Create(parent Pollable, pollType int, pollOptions map[int]string) (id int, e error) {
// TODO: Move the option names into the polls_options table and get rid of this json sludge? // TODO: Move the option names into the polls_options table and get rid of this json sludge?

View File

@ -15,6 +15,7 @@ type TopicCache interface {
AddUnsafe(item *Topic) error AddUnsafe(item *Topic) error
Remove(id int) error Remove(id int) error
RemoveUnsafe(id int) error RemoveUnsafe(id int) error
RemoveMany(ids []int) error
Flush() Flush()
Length() int Length() int
SetCapacity(cap int) SetCapacity(cap int)
@ -70,16 +71,16 @@ func (s *MemoryTopicCache) BulkGet(ids []int) (list []*Topic) {
} }
// Set overwrites the value of a topic in the cache, whether it's present or not. May return a capacity overflow error. // Set overwrites the value of a topic in the cache, whether it's present or not. May return a capacity overflow error.
func (s *MemoryTopicCache) Set(item *Topic) error { func (s *MemoryTopicCache) Set(it *Topic) error {
s.Lock() s.Lock()
_, ok := s.items[item.ID] _, ok := s.items[it.ID]
if ok { if ok {
s.items[item.ID] = item s.items[it.ID] = it
} else if int(s.length) >= s.capacity { } else if int(s.length) >= s.capacity {
s.Unlock() s.Unlock()
return ErrStoreCapacityOverflow return ErrStoreCapacityOverflow
} else { } else {
s.items[item.ID] = item s.items[it.ID] = it
atomic.AddInt64(&s.length, 1) atomic.AddInt64(&s.length, 1)
} }
s.Unlock() s.Unlock()
@ -112,9 +113,9 @@ func (s *MemoryTopicCache) AddUnsafe(item *Topic) error {
// Remove removes a topic from the cache by ID, if they exist. Returns ErrNoRows if no items exist. // Remove removes a topic from the cache by ID, if they exist. Returns ErrNoRows if no items exist.
func (s *MemoryTopicCache) Remove(id int) error { func (s *MemoryTopicCache) Remove(id int) error {
var ok bool
s.Lock() s.Lock()
_, ok := s.items[id] if _, ok = s.items[id]; !ok {
if !ok {
s.Unlock() s.Unlock()
return ErrNoRows return ErrNoRows
} }
@ -124,10 +125,24 @@ func (s *MemoryTopicCache) Remove(id int) error {
return nil return nil
} }
func (s *MemoryTopicCache) RemoveMany(ids []int) error {
var n int64
var ok bool
s.Lock()
for _, id := range ids {
if _, ok = s.items[id]; ok {
delete(s.items, id)
n++
}
}
atomic.AddInt64(&s.length, -n)
s.Unlock()
return nil
}
// RemoveUnsafe is the unsafe version of Remove. THIS METHOD IS NOT THREAD-SAFE. // RemoveUnsafe is the unsafe version of Remove. THIS METHOD IS NOT THREAD-SAFE.
func (s *MemoryTopicCache) RemoveUnsafe(id int) error { func (s *MemoryTopicCache) RemoveUnsafe(id int) error {
_, ok := s.items[id] if _, ok := s.items[id]; !ok {
if !ok {
return ErrNoRows return ErrNoRows
} }
delete(s.items, id) delete(s.items, id)

View File

@ -40,6 +40,9 @@ type TopicStore interface {
CountMegaUser(uid int) int CountMegaUser(uid int) int
CountBigUser(uid int) int CountBigUser(uid int) int
ClearIPs() error
LockMany(tids []int) error
SetCache(cache TopicCache) SetCache(cache TopicCache)
GetCache() TopicCache GetCache() TopicCache
} }
@ -53,6 +56,9 @@ type DefaultTopicStore struct {
countUser *sql.Stmt countUser *sql.Stmt
countWordUser *sql.Stmt countWordUser *sql.Stmt
create *sql.Stmt create *sql.Stmt
clearIPs *sql.Stmt
lockTen *sql.Stmt
} }
// NewDefaultTopicStore gives you a new instance of DefaultTopicStore // NewDefaultTopicStore gives you a new instance of DefaultTopicStore
@ -64,22 +70,25 @@ func NewDefaultTopicStore(cache TopicCache) (*DefaultTopicStore, error) {
t := "topics" t := "topics"
return &DefaultTopicStore{ return &DefaultTopicStore{
cache: cache, cache: cache,
get: acc.Select(t).Columns("title,content,createdBy,createdAt,lastReplyBy,lastReplyAt,lastReplyID,is_closed,sticky,parentID,ip,views,postCount,likeCount,attachCount,poll,data").Where("tid=?").Prepare(), get: acc.Select(t).Columns("title,content,createdBy,createdAt,lastReplyBy,lastReplyAt,lastReplyID,is_closed,sticky,parentID,ip,views,postCount,likeCount,attachCount,poll,data").Where("tid=?").Stmt(),
exists: acc.Exists(t, "tid").Prepare(), exists: acc.Exists(t, "tid").Stmt(),
count: acc.Count(t).Prepare(), count: acc.Count(t).Stmt(),
countUser: acc.Count(t).Where("createdBy=?").Prepare(), countUser: acc.Count(t).Where("createdBy=?").Stmt(),
countWordUser: acc.Count(t).Where("createdBy=? AND words>=?").Prepare(), countWordUser: acc.Count(t).Where("createdBy=? AND words>=?").Stmt(),
create: acc.Insert(t).Columns("parentID,title,content,parsed_content,createdAt,lastReplyAt,lastReplyBy,ip,words,createdBy").Fields("?,?,?,?,UTC_TIMESTAMP(),UTC_TIMESTAMP(),?,?,?,?").Prepare(), create: acc.Insert(t).Columns("parentID,title,content,parsed_content,createdAt,lastReplyAt,lastReplyBy,ip,words,createdBy").Fields("?,?,?,?,UTC_TIMESTAMP(),UTC_TIMESTAMP(),?,?,?,?").Prepare(),
clearIPs: acc.Update(t).Set("ip=''").Where("ip!=''").Stmt(),
lockTen: acc.Update(t).Set("is_closed=1").Where("tid IN(" + inqbuild2(10) + ")").Stmt(),
}, acc.FirstError() }, acc.FirstError()
} }
func (s *DefaultTopicStore) DirtyGet(id int) *Topic { func (s *DefaultTopicStore) DirtyGet(id int) *Topic {
t, err := s.cache.Get(id) t, e := s.cache.Get(id)
if err == nil { if e == nil {
return t return t
} }
t, err = s.BypassGet(id) t, e = s.BypassGet(id)
if err == nil { if e == nil {
_ = s.cache.Set(t) _ = s.cache.Set(t)
return t return t
} }
@ -87,26 +96,26 @@ func (s *DefaultTopicStore) DirtyGet(id int) *Topic {
} }
// TODO: Log weird cache errors? // TODO: Log weird cache errors?
func (s *DefaultTopicStore) Get(id int) (t *Topic, err error) { func (s *DefaultTopicStore) Get(id int) (t *Topic, e error) {
t, err = s.cache.Get(id) t, e = s.cache.Get(id)
if err == nil { if e == nil {
return t, nil return t, nil
} }
t, err = s.BypassGet(id) t, e = s.BypassGet(id)
if err == nil { if e == nil {
_ = s.cache.Set(t) _ = s.cache.Set(t)
} }
return t, err return t, e
} }
// BypassGet will always bypass the cache and pull the topic directly from the database // BypassGet will always bypass the cache and pull the topic directly from the database
func (s *DefaultTopicStore) BypassGet(id int) (*Topic, error) { func (s *DefaultTopicStore) BypassGet(id int) (*Topic, error) {
t := &Topic{ID: id} t := &Topic{ID: id}
err := s.get.QueryRow(id).Scan(&t.Title, &t.Content, &t.CreatedBy, &t.CreatedAt, &t.LastReplyBy, &t.LastReplyAt, &t.LastReplyID, &t.IsClosed, &t.Sticky, &t.ParentID, &t.IP, &t.ViewCount, &t.PostCount, &t.LikeCount, &t.AttachCount, &t.Poll, &t.Data) e := s.get.QueryRow(id).Scan(&t.Title, &t.Content, &t.CreatedBy, &t.CreatedAt, &t.LastReplyBy, &t.LastReplyAt, &t.LastReplyID, &t.IsClosed, &t.Sticky, &t.ParentID, &t.IP, &t.ViewCount, &t.PostCount, &t.LikeCount, &t.AttachCount, &t.Poll, &t.Data)
if err == nil { if e == nil {
t.Link = BuildTopicURL(NameToSlug(t.Title), id) t.Link = BuildTopicURL(NameToSlug(t.Title), id)
} }
return t, err return t, e
} }
/*func (s *DefaultTopicStore) GetByUser(uid int) (list map[int]*Topic, err error) { /*func (s *DefaultTopicStore) GetByUser(uid int) (list map[int]*Topic, err error) {
@ -119,7 +128,7 @@ func (s *DefaultTopicStore) BypassGet(id int) (*Topic, error) {
}*/ }*/
// TODO: Avoid duplicating much of this logic from user_store.go // TODO: Avoid duplicating much of this logic from user_store.go
func (s *DefaultTopicStore) BulkGetMap(ids []int) (list map[int]*Topic, err error) { func (s *DefaultTopicStore) BulkGetMap(ids []int) (list map[int]*Topic, e error) {
idCount := len(ids) idCount := len(ids)
list = make(map[int]*Topic) list = make(map[int]*Topic)
if idCount == 0 { if idCount == 0 {
@ -143,68 +152,115 @@ func (s *DefaultTopicStore) BulkGetMap(ids []int) (list map[int]*Topic, err erro
if len(ids) == 0 { if len(ids) == 0 {
return list, nil return list, nil
} else if len(ids) == 1 { } else if len(ids) == 1 {
topic, err := s.Get(ids[0]) t, e := s.Get(ids[0])
if err != nil { if e != nil {
return list, err return list, e
} }
list[topic.ID] = topic list[t.ID] = t
return list, nil return list, nil
} }
idList, q := inqbuild(ids) idList, q := inqbuild(ids)
rows, err := qgen.NewAcc().Select("topics").Columns("tid,title,content,createdBy,createdAt,lastReplyBy,lastReplyAt,lastReplyID,is_closed,sticky,parentID,ip,views,postCount,likeCount,attachCount,poll,data").Where("tid IN(" + q + ")").Query(idList...) rows, e := qgen.NewAcc().Select("topics").Columns("tid,title,content,createdBy,createdAt,lastReplyBy,lastReplyAt,lastReplyID,is_closed,sticky,parentID,ip,views,postCount,likeCount,attachCount,poll,data").Where("tid IN(" + q + ")").Query(idList...)
if err != nil { if e != nil {
return list, err return list, e
} }
defer rows.Close() defer rows.Close()
for rows.Next() { for rows.Next() {
t := &Topic{} t := &Topic{}
err := rows.Scan(&t.ID, &t.Title, &t.Content, &t.CreatedBy, &t.CreatedAt, &t.LastReplyBy, &t.LastReplyAt, &t.LastReplyID, &t.IsClosed, &t.Sticky, &t.ParentID, &t.IP, &t.ViewCount, &t.PostCount, &t.LikeCount, &t.AttachCount, &t.Poll, &t.Data) e := rows.Scan(&t.ID, &t.Title, &t.Content, &t.CreatedBy, &t.CreatedAt, &t.LastReplyBy, &t.LastReplyAt, &t.LastReplyID, &t.IsClosed, &t.Sticky, &t.ParentID, &t.IP, &t.ViewCount, &t.PostCount, &t.LikeCount, &t.AttachCount, &t.Poll, &t.Data)
if err != nil { if e != nil {
return list, err return list, e
} }
t.Link = BuildTopicURL(NameToSlug(t.Title), t.ID) t.Link = BuildTopicURL(NameToSlug(t.Title), t.ID)
s.cache.Set(t) _ = s.cache.Set(t)
list[t.ID] = t list[t.ID] = t
} }
if err = rows.Err(); err != nil { if e = rows.Err(); e != nil {
return list, err return list, e
} }
// Did we miss any topics? // Did we miss any topics?
if idCount > len(list) { if idCount > len(list) {
var sidList string var sidList string
for _, id := range ids { for i, id := range ids {
_, ok := list[id] if _, ok := list[id]; !ok {
if !ok { if i == 0 {
sidList += strconv.Itoa(id) + "," sidList += strconv.Itoa(id)
} else {
sidList += ","+strconv.Itoa(id)
}
} }
} }
if sidList != "" { if sidList != "" {
sidList = sidList[0 : len(sidList)-1] e = errors.New("Unable to find topics with the following IDs: " + sidList)
err = errors.New("Unable to find topics with the following IDs: " + sidList)
} }
} }
return list, err return list, e
} }
func (s *DefaultTopicStore) Reload(id int) error { func (s *DefaultTopicStore) Reload(id int) error {
topic, err := s.BypassGet(id) t, e := s.BypassGet(id)
if err == nil { if e == nil {
_ = s.cache.Set(topic) _ = s.cache.Set(t)
} else { } else {
_ = s.cache.Remove(id) _ = s.cache.Remove(id)
} }
TopicListThaw.Thaw() TopicListThaw.Thaw()
return err return e
} }
func (s *DefaultTopicStore) Exists(id int) bool { func (s *DefaultTopicStore) Exists(id int) bool {
return s.exists.QueryRow(id).Scan(&id) == nil return s.exists.QueryRow(id).Scan(&id) == nil
} }
func (s *DefaultTopicStore) ClearIPs() error {
_, e := s.clearIPs.Exec()
return e
}
func (s *DefaultTopicStore) LockMany(tids []int) (e error) {
tc, i := Topics.GetCache(), 0
singles := func() error {
for ; i < len(tids); i++ {
_, e := topicStmts.lock.Exec(tids[i])
if e != nil {
return e
}
}
return nil
}
if len(tids) < 10 {
if e = singles(); e != nil {
return e
}
if tc != nil {
_ = tc.RemoveMany(tids)
}
TopicListThaw.Thaw()
return nil
}
for ; (i + 10) < len(tids); i += 10 {
_, e := s.lockTen.Exec(tids[i], tids[i+1], tids[i+2], tids[i+3], tids[i+4], tids[i+5], tids[i+6], tids[i+7], tids[i+8], tids[i+9])
if e != nil {
return e
}
}
if e = singles(); e != nil {
return e
}
if tc != nil {
_ = tc.RemoveMany(tids)
}
TopicListThaw.Thaw()
return nil
}
func (s *DefaultTopicStore) Create(fid int, name, content string, uid int, ip string) (tid int, err error) { func (s *DefaultTopicStore) Create(fid int, name, content string, uid int, ip string) (tid int, err error) {
if name == "" { if name == "" {
return 0, ErrNoTitle return 0, ErrNoTitle

View File

@ -924,13 +924,6 @@ func (r *GenRouter) SuspiciousRequest(req *http.Request, pre string) {
// TODO: SetDefaultPath // TODO: SetDefaultPath
// TODO: GetDefaultPath // TODO: GetDefaultPath
func (r *GenRouter) ServeHTTP(w http.ResponseWriter, req *http.Request) { func (r *GenRouter) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// HTTP/1.1 hanging conn fix
/*if req.ProtoMajor == 1 && c.Dev.ExpFix1 {
defer func() {
//io.Copy(ioutil.Discard, req.Body)
req.Body.Close()
}()
}*/
malformedRequest := func(typ int) { malformedRequest := func(typ int) {
w.WriteHeader(200) // 400 w.WriteHeader(200) // 400
w.Write([]byte("")) w.Write([]byte(""))

View File

@ -248,6 +248,7 @@ func TestParser(t *testing.T) {
l.Add("ddd ddd //a ", "ddd ddd <a rel='ugc'href='//a'>a</a> ") l.Add("ddd ddd //a ", "ddd ddd <a rel='ugc'href='//a'>a</a> ")
l.Add("https://"+url, "<a rel='ugc'href='https://"+url+"'>"+url+"</a>") l.Add("https://"+url, "<a rel='ugc'href='https://"+url+"'>"+url+"</a>")
l.Add("https://t", "<a rel='ugc'href='https://t'>t</a>") l.Add("https://t", "<a rel='ugc'href='https://t'>t</a>")
l.Add("https://en.wikipedia.org/wiki/First_they_came_...", "<a rel='ugc'href='https://en.wikipedia.org/wiki/First_they_came_...'>en.wikipedia.org/wiki/First_they_came_...</a>") // this frequently fails in some chat clients, we should make sure that doesn't happen here
l.Add("http://"+url, "<a rel='ugc'href='http://"+url+"'>"+url+"</a>") l.Add("http://"+url, "<a rel='ugc'href='http://"+url+"'>"+url+"</a>")
l.Add("#http://"+url, "#http://"+url) l.Add("#http://"+url, "#http://"+url)
l.Add("@http://"+url, "<red>[Invalid Profile]</red>ttp://"+url) l.Add("@http://"+url, "<red>[Invalid Profile]</red>ttp://"+url)

View File

@ -507,13 +507,6 @@ func (r *GenRouter) SuspiciousRequest(req *http.Request, pre string) {
// TODO: SetDefaultPath // TODO: SetDefaultPath
// TODO: GetDefaultPath // TODO: GetDefaultPath
func (r *GenRouter) ServeHTTP(w http.ResponseWriter, req *http.Request) { func (r *GenRouter) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// HTTP/1.1 hanging conn fix
/*if req.ProtoMajor == 1 && c.Dev.ExpFix1 {
defer func() {
//io.Copy(ioutil.Discard, req.Body)
req.Body.Close()
}()
}*/
malformedRequest := func(typ int) { malformedRequest := func(typ int) {
w.WriteHeader(200) // 400 w.WriteHeader(200) // 400
w.Write([]byte("")) w.Write([]byte(""))