Add ClearIPs() to TopicStore.

Add LockMany() to TopicStore.
Add RemoveMany() to TopicCache.
Add ClearIPs() to PollStore.
Add Purge() to RegLogStore.
Add DeleteOlderThanDays() to RegLogStore.
Add Purge() to LoginLogStore.
Add DeleteOlderThanDays() to LoginLogStore.
Add SetInt() to MetaStore.
Add SetInt64() to MetaStore.

Use Createf() in RegLogItem.Create()
Use Count() in SQLRegLogStore.Count()
Use Count() in SQLLoginLogStore.Count()
Use Countf() in SQLLoginLogStore.CountUser()

Add trailing triple dot parser test case.
Removed a block of commented code in gen router.
Reduce boilerplate.
This commit is contained in:
Azareal 2021-04-27 20:20:26 +10:00
parent 1b7d6ac724
commit 6870d242e9
14 changed files with 284 additions and 176 deletions

View File

@ -48,18 +48,18 @@ func (co *DefaultForumViewCounter) Tick() error {
l.Lock()
delete(m, fid)
l.Unlock()
err := co.insertChunk(count, fid)
if err != nil {
return errors.Wrap(errors.WithStack(err),"forum counter")
e := co.insertChunk(count, fid)
if e != nil {
return errors.Wrap(errors.WithStack(e),"forum counter")
}
l.RLock()
}
l.RUnlock()
return nil
}
err := cLoop(&co.oddLock,co.oddMap)
if err != nil {
return err
e := cLoop(&co.oddLock,co.oddMap)
if e != nil {
return e
}
return cLoop(&co.evenLock,co.evenMap)
}
@ -69,8 +69,8 @@ func (co *DefaultForumViewCounter) insertChunk(count, forum int) error {
return nil
}
c.DebugLogf("Inserting a vchunk with a count of %d for forum %d", count, forum)
_, err := co.insert.Exec(count, forum)
return err
_, e := co.insert.Exec(count, forum)
return e
}
func (co *DefaultForumViewCounter) Bump(fid int) {

View File

@ -127,9 +127,9 @@ func NewDefaultLangViewCounter(acc *qgen.Accumulator) (*DefaultLangViewCounter,
func (co *DefaultLangViewCounter) Tick() error {
for id := 0; id < len(co.buckets); id++ {
count := atomic.SwapInt64(&co.buckets[id], 0)
err := co.insertChunk(count, id) // TODO: Bulk insert for speed?
if err != nil {
return errors.Wrap(errors.WithStack(err), "langview counter")
e := co.insertChunk(count, id) // TODO: Bulk insert for speed?
if e != nil {
return errors.Wrap(errors.WithStack(e), "langview counter")
}
}
return nil
@ -144,8 +144,8 @@ func (co *DefaultLangViewCounter) insertChunk(count int64, id int) error {
langCode = "none"
}
c.DebugLogf("Inserting a vchunk with a count of %d for lang %s (%d)", count, langCode, id)
_, err := co.insert.Exec(count, langCode)
return err
_, e := co.insert.Exec(count, langCode)
return e
}
func (co *DefaultLangViewCounter) Bump(langCode string) (validCode bool) {

View File

@ -54,7 +54,7 @@ func NewMemoryCounter(acc *qgen.Accumulator) (*DefaultMemoryCounter, error) {
return co, acc.FirstError()
}
func (co *DefaultMemoryCounter) Tick() (err error) {
func (co *DefaultMemoryCounter) Tick() (e error) {
var m runtime.MemStats
runtime.ReadMemStats(&m)
var rTotMem, rTotCount, rStackMem, rStackCount, rHeapMem, rHeapCount uint64
@ -83,9 +83,9 @@ func (co *DefaultMemoryCounter) Tick() (err error) {
avgHeap = (rHeapMem + m.HeapAlloc) / (rHeapCount + 1)
c.DebugLogf("Inserting a memchunk with a value of %d - %d - %d", avgMem, avgStack, avgHeap)
_, err = co.insert.Exec(avgMem, avgStack, avgHeap)
if err != nil {
return errors.Wrap(errors.WithStack(err), "mem counter")
_, e = co.insert.Exec(avgMem, avgStack, avgHeap)
if e != nil {
return errors.Wrap(errors.WithStack(e), "mem counter")
}
return nil
}

View File

@ -30,7 +30,7 @@ func NewTopicCounter() (*DefaultTopicCounter, error) {
return co, acc.FirstError()
}
func (co *DefaultTopicCounter) Tick() (err error) {
func (co *DefaultTopicCounter) Tick() (e error) {
oldBucket := co.currentBucket
var nextBucket int64 // 0
if co.currentBucket == 0 {
@ -42,9 +42,9 @@ func (co *DefaultTopicCounter) Tick() (err error) {
previousViewChunk := co.buckets[oldBucket]
atomic.AddInt64(&co.buckets[oldBucket], -previousViewChunk)
err = co.insertChunk(previousViewChunk)
if err != nil {
return errors.Wrap(errors.WithStack(err), "topics counter")
e = co.insertChunk(previousViewChunk)
if e != nil {
return errors.Wrap(errors.WithStack(e), "topics counter")
}
return nil
}
@ -58,6 +58,6 @@ func (co *DefaultTopicCounter) insertChunk(count int64) error {
return nil
}
c.DebugLogf("Inserting a topicchunk with a count of %d", count)
_, err := co.insert.Exec(count)
return err
_, e := co.insert.Exec(count)
return e
}

View File

@ -10,6 +10,8 @@ import (
type MetaStore interface {
Get(name string) (val string, err error)
Set(name, val string) error
SetInt(name string, val int) error
SetInt64(name string, val int64) error
}
type DefaultMetaStore struct {
@ -19,28 +21,41 @@ type DefaultMetaStore struct {
}
func NewDefaultMetaStore(acc *qgen.Accumulator) (*DefaultMetaStore, error) {
t := "meta"
m := &DefaultMetaStore{
get: acc.Select("meta").Columns("value").Where("name = ?").Prepare(),
set: acc.Update("meta").Set("value = ?").Where("name = ?").Prepare(),
add: acc.Insert("meta").Columns("name,value").Fields("?,''").Prepare(),
get: acc.Select(t).Columns("value").Where("name=?").Prepare(),
set: acc.Update(t).Set("value=?").Where("name=?").Prepare(),
add: acc.Insert(t).Columns("name,value").Fields("?,''").Prepare(),
}
return m, acc.FirstError()
}
func (s *DefaultMetaStore) Get(name string) (val string, err error) {
err = s.get.QueryRow(name).Scan(&val)
return val, err
func (s *DefaultMetaStore) Get(name string) (val string, e error) {
e = s.get.QueryRow(name).Scan(&val)
return val, e
}
// TODO: Use timestamped rows as a more robust method of ensuring data integrity
func (s *DefaultMetaStore) Set(name, val string) error {
_, err := s.Get(name)
if err == sql.ErrNoRows {
_, err := s.add.Exec(name)
if err != nil {
return err
func (s *DefaultMetaStore) setVal(name string, val interface{}) error {
_, e := s.Get(name)
if e == sql.ErrNoRows {
_, e := s.add.Exec(name)
if e != nil {
return e
}
}
_, err = s.set.Exec(val, name)
return err
_, e = s.set.Exec(val, name)
return e
}
func (s *DefaultMetaStore) Set(name, val string) error {
return s.setVal(name, val)
}
func (s *DefaultMetaStore) SetInt(name string, val int) error {
return s.setVal(name, val)
}
func (s *DefaultMetaStore) SetInt64(name string, val int64) error {
return s.setVal(name, val)
}

View File

@ -31,7 +31,7 @@ func init() {
DbInits.Add(func(acc *qgen.Accumulator) error {
rl := "registration_logs"
regLogStmts = RegLogStmts{
update: acc.Update(rl).Set("username=?,email=?,failureReason=?,success=?").Where("rlid=?").Prepare(),
update: acc.Update(rl).Set("username=?,email=?,failureReason=?,success=?,doneAt=?").Where("rlid=?").Prepare(),
create: acc.Insert(rl).Columns("username,email,failureReason,success,ipaddress,doneAt").Fields("?,?,?,?,?,UTC_TIMESTAMP()").Prepare(),
}
return acc.FirstError()
@ -40,29 +40,32 @@ func init() {
// TODO: Reload this item in the store, probably doesn't matter right now, but it might when we start caching this stuff in memory
// ! Retroactive updates of date are not permitted for integrity reasons
// TODO: Do we even use this anymore or can we just make the logs immutable (except for deletes) for simplicity sake?
func (l *RegLogItem) Commit() error {
_, err := regLogStmts.update.Exec(l.Username, l.Email, l.FailureReason, l.Success, l.ID)
return err
_, e := regLogStmts.update.Exec(l.Username, l.Email, l.FailureReason, l.Success, l.DoneAt, l.ID)
return e
}
func (l *RegLogItem) Create() (id int, err error) {
res, err := regLogStmts.create.Exec(l.Username, l.Email, l.FailureReason, l.Success, l.IP)
if err != nil {
return 0, err
}
id64, err := res.LastInsertId()
l.ID = int(id64)
return l.ID, err
func (l *RegLogItem) Create() (id int, e error) {
id, e = Createf(regLogStmts.create, l.Username, l.Email, l.FailureReason, l.Success, l.IP)
l.ID = id
return l.ID, e
}
type RegLogStore interface {
Count() (count int)
GetOffset(offset, perPage int) (logs []RegLogItem, err error)
Purge() error
DeleteOlderThanDays(days int) error
}
type SQLRegLogStore struct {
count *sql.Stmt
getOffset *sql.Stmt
purge *sql.Stmt
deleteOlderThanDays *sql.Stmt
}
func NewRegLogStore(acc *qgen.Accumulator) (*SQLRegLogStore, error) {
@ -70,30 +73,29 @@ func NewRegLogStore(acc *qgen.Accumulator) (*SQLRegLogStore, error) {
return &SQLRegLogStore{
count: acc.Count(rl).Prepare(),
getOffset: acc.Select(rl).Columns("rlid,username,email,failureReason,success,ipaddress,doneAt").Orderby("doneAt DESC").Limit("?,?").Prepare(),
purge: acc.Purge(rl),
deleteOlderThanDays: acc.Delete(rl).DateOlderThanQ("doneAt", "day").Prepare(),
}, acc.FirstError()
}
func (s *SQLRegLogStore) Count() (count int) {
err := s.count.QueryRow().Scan(&count)
if err != nil {
LogError(err)
}
return count
return Count(s.count)
}
func (s *SQLRegLogStore) GetOffset(offset, perPage int) (logs []RegLogItem, err error) {
rows, err := s.getOffset.Query(offset, perPage)
if err != nil {
return logs, err
func (s *SQLRegLogStore) GetOffset(offset, perPage int) (logs []RegLogItem, e error) {
rows, e := s.getOffset.Query(offset, perPage)
if e != nil {
return logs, e
}
defer rows.Close()
for rows.Next() {
var l RegLogItem
var doneAt time.Time
err := rows.Scan(&l.ID, &l.Username, &l.Email, &l.FailureReason, &l.Success, &l.IP, &doneAt)
if err != nil {
return logs, err
e := rows.Scan(&l.ID, &l.Username, &l.Email, &l.FailureReason, &l.Success, &l.IP, &doneAt)
if e != nil {
return logs, e
}
l.DoneAt = doneAt.Format("2006-01-02 15:04:05")
logs = append(logs, l)
@ -101,6 +103,17 @@ func (s *SQLRegLogStore) GetOffset(offset, perPage int) (logs []RegLogItem, err
return logs, rows.Err()
}
func (s *SQLRegLogStore) DeleteOlderThanDays(days int) error {
_, e := s.deleteOlderThanDays.Exec(days)
return e
}
// Delete all registration logs
func (s *SQLRegLogStore) Purge() error {
_, e := s.purge.Exec()
return e
}
type LoginLogItem struct {
ID int
UID int
@ -120,7 +133,7 @@ func init() {
DbInits.Add(func(acc *qgen.Accumulator) error {
ll := "login_logs"
loginLogStmts = LoginLogStmts{
update: acc.Update(ll).Set("uid=?,success=?").Where("lid=?").Prepare(),
update: acc.Update(ll).Set("uid=?,success=?,doneAt=?").Where("lid=?").Prepare(),
create: acc.Insert(ll).Columns("uid,success,ipaddress,doneAt").Fields("?,?,?,UTC_TIMESTAMP()").Prepare(),
}
return acc.FirstError()
@ -130,30 +143,36 @@ func init() {
// TODO: Reload this item in the store, probably doesn't matter right now, but it might when we start caching this stuff in memory
// ! Retroactive updates of date are not permitted for integrity reasons
func (l *LoginLogItem) Commit() error {
_, err := loginLogStmts.update.Exec(l.UID, l.Success, l.ID)
return err
_, e := loginLogStmts.update.Exec(l.UID, l.Success, l.DoneAt, l.ID)
return e
}
func (l *LoginLogItem) Create() (id int, err error) {
res, err := loginLogStmts.create.Exec(l.UID, l.Success, l.IP)
if err != nil {
return 0, err
func (l *LoginLogItem) Create() (id int, e error) {
res, e := loginLogStmts.create.Exec(l.UID, l.Success, l.IP)
if e != nil {
return 0, e
}
id64, err := res.LastInsertId()
id64, e := res.LastInsertId()
l.ID = int(id64)
return l.ID, err
return l.ID, e
}
type LoginLogStore interface {
Count() (count int)
CountUser(uid int) (count int)
GetOffset(uid, offset, perPage int) (logs []LoginLogItem, err error)
Purge() error
DeleteOlderThanDays(days int) error
}
type SQLLoginLogStore struct {
count *sql.Stmt
countForUser *sql.Stmt
getOffsetByUser *sql.Stmt
purge *sql.Stmt
deleteOlderThanDays *sql.Stmt
}
func NewLoginLogStore(acc *qgen.Accumulator) (*SQLLoginLogStore, error) {
@ -162,41 +181,47 @@ func NewLoginLogStore(acc *qgen.Accumulator) (*SQLLoginLogStore, error) {
count: acc.Count(ll).Prepare(),
countForUser: acc.Count(ll).Where("uid=?").Prepare(),
getOffsetByUser: acc.Select(ll).Columns("lid,success,ipaddress,doneAt").Where("uid=?").Orderby("doneAt DESC").Limit("?,?").Prepare(),
purge: acc.Purge(ll),
deleteOlderThanDays: acc.Delete(ll).DateOlderThanQ("doneAt", "day").Prepare(),
}, acc.FirstError()
}
func (s *SQLLoginLogStore) Count() (count int) {
err := s.count.QueryRow().Scan(&count)
if err != nil {
LogError(err)
}
return count
return Count(s.count)
}
func (s *SQLLoginLogStore) CountUser(uid int) (count int) {
err := s.countForUser.QueryRow(uid).Scan(&count)
if err != nil {
LogError(err)
}
return count
return Countf(s.countForUser, uid)
}
func (s *SQLLoginLogStore) GetOffset(uid, offset, perPage int) (logs []LoginLogItem, err error) {
rows, err := s.getOffsetByUser.Query(uid, offset, perPage)
if err != nil {
return logs, err
func (s *SQLLoginLogStore) GetOffset(uid, offset, perPage int) (logs []LoginLogItem, e error) {
rows, e := s.getOffsetByUser.Query(uid, offset, perPage)
if e != nil {
return logs, e
}
defer rows.Close()
for rows.Next() {
l := LoginLogItem{UID: uid}
var doneAt time.Time
err := rows.Scan(&l.ID, &l.Success, &l.IP, &doneAt)
if err != nil {
return logs, err
e := rows.Scan(&l.ID, &l.Success, &l.IP, &doneAt)
if e != nil {
return logs, e
}
l.DoneAt = doneAt.Format("2006-01-02 15:04:05")
logs = append(logs, l)
}
return logs, rows.Err()
}
func (s *SQLLoginLogStore) DeleteOlderThanDays(days int) error {
_, e := s.deleteOlderThanDays.Exec(days)
return e
}
// Delete all login logs
func (s *SQLLoginLogStore) Purge() error {
_, e := s.purge.Exec()
return e
}

View File

@ -31,6 +31,9 @@ func (c *NullTopicCache) AddUnsafe(_ *Topic) error {
func (c *NullTopicCache) Remove(id int) error {
return nil
}
func (c *NullTopicCache) RemoveMany(ids []int) error {
return nil
}
func (c *NullTopicCache) RemoveUnsafe(id int) error {
return nil
}

View File

@ -191,23 +191,23 @@ func RebuildGroupPermissions(g *Group) error {
log.Print("Reloading a group")
// TODO: Avoid re-initting this all the time
getGroupPerms, err := qgen.Builder.SimpleSelect("users_groups", "permissions", "gid=?", "", "")
if err != nil {
return err
getGroupPerms, e := qgen.Builder.SimpleSelect("users_groups", "permissions", "gid=?", "", "")
if e != nil {
return e
}
defer getGroupPerms.Close()
err = getGroupPerms.QueryRow(g.ID).Scan(&permstr)
if err != nil {
return err
e = getGroupPerms.QueryRow(g.ID).Scan(&permstr)
if e != nil {
return e
}
tmpPerms := Perms{
//ExtData: make(map[string]bool),
}
err = json.Unmarshal(permstr, &tmpPerms)
if err != nil {
return err
e = json.Unmarshal(permstr, &tmpPerms)
if e != nil {
return e
}
g.Perms = tmpPerms
return nil

View File

@ -26,6 +26,7 @@ type Pollable interface {
type PollStore interface {
Get(id int) (*Poll, error)
Exists(id int) bool
ClearIPs() error
Create(parent Pollable, pollType int, pollOptions map[int]string) (int, error)
Reload(id int) error
//Count() int
@ -43,6 +44,8 @@ type DefaultPollStore struct {
createPollOption *sql.Stmt
delete *sql.Stmt
//count *sql.Stmt
clearIPs *sql.Stmt
}
func NewDefaultPollStore(cache PollCache) (*DefaultPollStore, error) {
@ -54,11 +57,13 @@ func NewDefaultPollStore(cache PollCache) (*DefaultPollStore, error) {
p := "polls"
return &DefaultPollStore{
cache: cache,
get: acc.Select(p).Columns("parentID, parentTable, type, options, votes").Where("pollID=?").Prepare(),
exists: acc.Select(p).Columns("pollID").Where("pollID=?").Prepare(),
createPoll: acc.Insert(p).Columns("parentID, parentTable, type, options").Fields("?,?,?,?").Prepare(),
get: acc.Select(p).Columns("parentID,parentTable,type,options,votes").Where("pollID=?").Stmt(),
exists: acc.Select(p).Columns("pollID").Where("pollID=?").Stmt(),
createPoll: acc.Insert(p).Columns("parentID,parentTable,type,options").Fields("?,?,?,?").Prepare(),
createPollOption: acc.Insert("polls_options").Columns("pollID,option,votes").Fields("?,?,0").Prepare(),
//count: acc.SimpleCount(p, "", ""),
clearIPs: acc.Update("polls_votes").Set("ip=''").Where("ip!=''").Stmt(),
}, acc.FirstError()
}
@ -144,8 +149,7 @@ func (s *DefaultPollStore) BulkGetMap(ids []int) (list map[int]*Poll, err error)
if idCount > len(list) {
var sidList string
for _, id := range ids {
_, ok := list[id]
if !ok {
if _, ok := list[id]; !ok {
sidList += strconv.Itoa(id) + ","
}
}
@ -172,18 +176,16 @@ func (s *DefaultPollStore) BulkGetMap(ids []int) (list map[int]*Poll, err error)
func (s *DefaultPollStore) Reload(id int) error {
p := &Poll{ID: id}
var optionTxt []byte
err := s.get.QueryRow(id).Scan(&p.ParentID, &p.ParentTable, &p.Type, &optionTxt, &p.VoteCount)
if err != nil {
s.cache.Remove(id)
return err
e := s.get.QueryRow(id).Scan(&p.ParentID, &p.ParentTable, &p.Type, &optionTxt, &p.VoteCount)
if e != nil {
_ = s.cache.Remove(id)
return e
}
err = json.Unmarshal(optionTxt, &p.Options)
if err != nil {
s.cache.Remove(id)
return err
e = json.Unmarshal(optionTxt, &p.Options)
if e != nil {
_ = s.cache.Remove(id)
return e
}
p.QuickOptions = s.unpackOptionsMap(p.Options)
_ = s.cache.Set(p)
return nil
@ -197,6 +199,11 @@ func (s *DefaultPollStore) unpackOptionsMap(rawOptions map[int]string) []PollOpt
return opts
}
func (s *DefaultPollStore) ClearIPs() error {
_, e := s.clearIPs.Exec()
return e
}
// TODO: Use a transaction for this
func (s *DefaultPollStore) Create(parent Pollable, pollType int, pollOptions map[int]string) (id int, e error) {
// TODO: Move the option names into the polls_options table and get rid of this json sludge?

View File

@ -15,6 +15,7 @@ type TopicCache interface {
AddUnsafe(item *Topic) error
Remove(id int) error
RemoveUnsafe(id int) error
RemoveMany(ids []int) error
Flush()
Length() int
SetCapacity(cap int)
@ -70,16 +71,16 @@ func (s *MemoryTopicCache) BulkGet(ids []int) (list []*Topic) {
}
// Set overwrites the value of a topic in the cache, whether it's present or not. May return a capacity overflow error.
func (s *MemoryTopicCache) Set(item *Topic) error {
func (s *MemoryTopicCache) Set(it *Topic) error {
s.Lock()
_, ok := s.items[item.ID]
_, ok := s.items[it.ID]
if ok {
s.items[item.ID] = item
s.items[it.ID] = it
} else if int(s.length) >= s.capacity {
s.Unlock()
return ErrStoreCapacityOverflow
} else {
s.items[item.ID] = item
s.items[it.ID] = it
atomic.AddInt64(&s.length, 1)
}
s.Unlock()
@ -112,9 +113,9 @@ func (s *MemoryTopicCache) AddUnsafe(item *Topic) error {
// Remove removes a topic from the cache by ID, if they exist. Returns ErrNoRows if no items exist.
func (s *MemoryTopicCache) Remove(id int) error {
var ok bool
s.Lock()
_, ok := s.items[id]
if !ok {
if _, ok = s.items[id]; !ok {
s.Unlock()
return ErrNoRows
}
@ -124,10 +125,24 @@ func (s *MemoryTopicCache) Remove(id int) error {
return nil
}
func (s *MemoryTopicCache) RemoveMany(ids []int) error {
var n int64
var ok bool
s.Lock()
for _, id := range ids {
if _, ok = s.items[id]; ok {
delete(s.items, id)
n++
}
}
atomic.AddInt64(&s.length, -n)
s.Unlock()
return nil
}
// RemoveUnsafe is the unsafe version of Remove. THIS METHOD IS NOT THREAD-SAFE.
func (s *MemoryTopicCache) RemoveUnsafe(id int) error {
_, ok := s.items[id]
if !ok {
if _, ok := s.items[id]; !ok {
return ErrNoRows
}
delete(s.items, id)

View File

@ -40,6 +40,9 @@ type TopicStore interface {
CountMegaUser(uid int) int
CountBigUser(uid int) int
ClearIPs() error
LockMany(tids []int) error
SetCache(cache TopicCache)
GetCache() TopicCache
}
@ -53,6 +56,9 @@ type DefaultTopicStore struct {
countUser *sql.Stmt
countWordUser *sql.Stmt
create *sql.Stmt
clearIPs *sql.Stmt
lockTen *sql.Stmt
}
// NewDefaultTopicStore gives you a new instance of DefaultTopicStore
@ -64,22 +70,25 @@ func NewDefaultTopicStore(cache TopicCache) (*DefaultTopicStore, error) {
t := "topics"
return &DefaultTopicStore{
cache: cache,
get: acc.Select(t).Columns("title,content,createdBy,createdAt,lastReplyBy,lastReplyAt,lastReplyID,is_closed,sticky,parentID,ip,views,postCount,likeCount,attachCount,poll,data").Where("tid=?").Prepare(),
exists: acc.Exists(t, "tid").Prepare(),
count: acc.Count(t).Prepare(),
countUser: acc.Count(t).Where("createdBy=?").Prepare(),
countWordUser: acc.Count(t).Where("createdBy=? AND words>=?").Prepare(),
create: acc.Insert(t).Columns("parentID, title, content, parsed_content, createdAt, lastReplyAt, lastReplyBy, ip, words, createdBy").Fields("?,?,?,?,UTC_TIMESTAMP(),UTC_TIMESTAMP(),?,?,?,?").Prepare(),
get: acc.Select(t).Columns("title,content,createdBy,createdAt,lastReplyBy,lastReplyAt,lastReplyID,is_closed,sticky,parentID,ip,views,postCount,likeCount,attachCount,poll,data").Where("tid=?").Stmt(),
exists: acc.Exists(t, "tid").Stmt(),
count: acc.Count(t).Stmt(),
countUser: acc.Count(t).Where("createdBy=?").Stmt(),
countWordUser: acc.Count(t).Where("createdBy=? AND words>=?").Stmt(),
create: acc.Insert(t).Columns("parentID,title,content,parsed_content,createdAt,lastReplyAt,lastReplyBy,ip,words,createdBy").Fields("?,?,?,?,UTC_TIMESTAMP(),UTC_TIMESTAMP(),?,?,?,?").Prepare(),
clearIPs: acc.Update(t).Set("ip=''").Where("ip!=''").Stmt(),
lockTen: acc.Update(t).Set("is_closed=1").Where("tid IN(" + inqbuild2(10) + ")").Stmt(),
}, acc.FirstError()
}
func (s *DefaultTopicStore) DirtyGet(id int) *Topic {
t, err := s.cache.Get(id)
if err == nil {
t, e := s.cache.Get(id)
if e == nil {
return t
}
t, err = s.BypassGet(id)
if err == nil {
t, e = s.BypassGet(id)
if e == nil {
_ = s.cache.Set(t)
return t
}
@ -87,26 +96,26 @@ func (s *DefaultTopicStore) DirtyGet(id int) *Topic {
}
// TODO: Log weird cache errors?
func (s *DefaultTopicStore) Get(id int) (t *Topic, err error) {
t, err = s.cache.Get(id)
if err == nil {
func (s *DefaultTopicStore) Get(id int) (t *Topic, e error) {
t, e = s.cache.Get(id)
if e == nil {
return t, nil
}
t, err = s.BypassGet(id)
if err == nil {
t, e = s.BypassGet(id)
if e == nil {
_ = s.cache.Set(t)
}
return t, err
return t, e
}
// BypassGet will always bypass the cache and pull the topic directly from the database
func (s *DefaultTopicStore) BypassGet(id int) (*Topic, error) {
t := &Topic{ID: id}
err := s.get.QueryRow(id).Scan(&t.Title, &t.Content, &t.CreatedBy, &t.CreatedAt, &t.LastReplyBy, &t.LastReplyAt, &t.LastReplyID, &t.IsClosed, &t.Sticky, &t.ParentID, &t.IP, &t.ViewCount, &t.PostCount, &t.LikeCount, &t.AttachCount, &t.Poll, &t.Data)
if err == nil {
e := s.get.QueryRow(id).Scan(&t.Title, &t.Content, &t.CreatedBy, &t.CreatedAt, &t.LastReplyBy, &t.LastReplyAt, &t.LastReplyID, &t.IsClosed, &t.Sticky, &t.ParentID, &t.IP, &t.ViewCount, &t.PostCount, &t.LikeCount, &t.AttachCount, &t.Poll, &t.Data)
if e == nil {
t.Link = BuildTopicURL(NameToSlug(t.Title), id)
}
return t, err
return t, e
}
/*func (s *DefaultTopicStore) GetByUser(uid int) (list map[int]*Topic, err error) {
@ -119,7 +128,7 @@ func (s *DefaultTopicStore) BypassGet(id int) (*Topic, error) {
}*/
// TODO: Avoid duplicating much of this logic from user_store.go
func (s *DefaultTopicStore) BulkGetMap(ids []int) (list map[int]*Topic, err error) {
func (s *DefaultTopicStore) BulkGetMap(ids []int) (list map[int]*Topic, e error) {
idCount := len(ids)
list = make(map[int]*Topic)
if idCount == 0 {
@ -143,68 +152,115 @@ func (s *DefaultTopicStore) BulkGetMap(ids []int) (list map[int]*Topic, err erro
if len(ids) == 0 {
return list, nil
} else if len(ids) == 1 {
topic, err := s.Get(ids[0])
if err != nil {
return list, err
t, e := s.Get(ids[0])
if e != nil {
return list, e
}
list[topic.ID] = topic
list[t.ID] = t
return list, nil
}
idList, q := inqbuild(ids)
rows, err := qgen.NewAcc().Select("topics").Columns("tid,title,content,createdBy,createdAt,lastReplyBy,lastReplyAt,lastReplyID,is_closed,sticky,parentID,ip,views,postCount,likeCount,attachCount,poll,data").Where("tid IN(" + q + ")").Query(idList...)
if err != nil {
return list, err
rows, e := qgen.NewAcc().Select("topics").Columns("tid,title,content,createdBy,createdAt,lastReplyBy,lastReplyAt,lastReplyID,is_closed,sticky,parentID,ip,views,postCount,likeCount,attachCount,poll,data").Where("tid IN(" + q + ")").Query(idList...)
if e != nil {
return list, e
}
defer rows.Close()
for rows.Next() {
t := &Topic{}
err := rows.Scan(&t.ID, &t.Title, &t.Content, &t.CreatedBy, &t.CreatedAt, &t.LastReplyBy, &t.LastReplyAt, &t.LastReplyID, &t.IsClosed, &t.Sticky, &t.ParentID, &t.IP, &t.ViewCount, &t.PostCount, &t.LikeCount, &t.AttachCount, &t.Poll, &t.Data)
if err != nil {
return list, err
e := rows.Scan(&t.ID, &t.Title, &t.Content, &t.CreatedBy, &t.CreatedAt, &t.LastReplyBy, &t.LastReplyAt, &t.LastReplyID, &t.IsClosed, &t.Sticky, &t.ParentID, &t.IP, &t.ViewCount, &t.PostCount, &t.LikeCount, &t.AttachCount, &t.Poll, &t.Data)
if e != nil {
return list, e
}
t.Link = BuildTopicURL(NameToSlug(t.Title), t.ID)
s.cache.Set(t)
_ = s.cache.Set(t)
list[t.ID] = t
}
if err = rows.Err(); err != nil {
return list, err
if e = rows.Err(); e != nil {
return list, e
}
// Did we miss any topics?
if idCount > len(list) {
var sidList string
for _, id := range ids {
_, ok := list[id]
if !ok {
sidList += strconv.Itoa(id) + ","
for i, id := range ids {
if _, ok := list[id]; !ok {
if i == 0 {
sidList += strconv.Itoa(id)
} else {
sidList += ","+strconv.Itoa(id)
}
}
}
if sidList != "" {
sidList = sidList[0 : len(sidList)-1]
err = errors.New("Unable to find topics with the following IDs: " + sidList)
e = errors.New("Unable to find topics with the following IDs: " + sidList)
}
}
return list, err
return list, e
}
func (s *DefaultTopicStore) Reload(id int) error {
topic, err := s.BypassGet(id)
if err == nil {
_ = s.cache.Set(topic)
t, e := s.BypassGet(id)
if e == nil {
_ = s.cache.Set(t)
} else {
_ = s.cache.Remove(id)
}
TopicListThaw.Thaw()
return err
return e
}
func (s *DefaultTopicStore) Exists(id int) bool {
return s.exists.QueryRow(id).Scan(&id) == nil
}
func (s *DefaultTopicStore) ClearIPs() error {
_, e := s.clearIPs.Exec()
return e
}
func (s *DefaultTopicStore) LockMany(tids []int) (e error) {
tc, i := Topics.GetCache(), 0
singles := func() error {
for ; i < len(tids); i++ {
_, e := topicStmts.lock.Exec(tids[i])
if e != nil {
return e
}
}
return nil
}
if len(tids) < 10 {
if e = singles(); e != nil {
return e
}
if tc != nil {
_ = tc.RemoveMany(tids)
}
TopicListThaw.Thaw()
return nil
}
for ; (i + 10) < len(tids); i += 10 {
_, e := s.lockTen.Exec(tids[i], tids[i+1], tids[i+2], tids[i+3], tids[i+4], tids[i+5], tids[i+6], tids[i+7], tids[i+8], tids[i+9])
if e != nil {
return e
}
}
if e = singles(); e != nil {
return e
}
if tc != nil {
_ = tc.RemoveMany(tids)
}
TopicListThaw.Thaw()
return nil
}
func (s *DefaultTopicStore) Create(fid int, name, content string, uid int, ip string) (tid int, err error) {
if name == "" {
return 0, ErrNoTitle

View File

@ -924,13 +924,6 @@ func (r *GenRouter) SuspiciousRequest(req *http.Request, pre string) {
// TODO: SetDefaultPath
// TODO: GetDefaultPath
func (r *GenRouter) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// HTTP/1.1 hanging conn fix
/*if req.ProtoMajor == 1 && c.Dev.ExpFix1 {
defer func() {
//io.Copy(ioutil.Discard, req.Body)
req.Body.Close()
}()
}*/
malformedRequest := func(typ int) {
w.WriteHeader(200) // 400
w.Write([]byte(""))

View File

@ -248,6 +248,7 @@ func TestParser(t *testing.T) {
l.Add("ddd ddd //a ", "ddd ddd <a rel='ugc'href='//a'>a</a> ")
l.Add("https://"+url, "<a rel='ugc'href='https://"+url+"'>"+url+"</a>")
l.Add("https://t", "<a rel='ugc'href='https://t'>t</a>")
l.Add("https://en.wikipedia.org/wiki/First_they_came_...", "<a rel='ugc'href='https://en.wikipedia.org/wiki/First_they_came_...'>en.wikipedia.org/wiki/First_they_came_...</a>") // this frequently fails in some chat clients, we should make sure that doesn't happen here
l.Add("http://"+url, "<a rel='ugc'href='http://"+url+"'>"+url+"</a>")
l.Add("#http://"+url, "#http://"+url)
l.Add("@http://"+url, "<red>[Invalid Profile]</red>ttp://"+url)

View File

@ -507,13 +507,6 @@ func (r *GenRouter) SuspiciousRequest(req *http.Request, pre string) {
// TODO: SetDefaultPath
// TODO: GetDefaultPath
func (r *GenRouter) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// HTTP/1.1 hanging conn fix
/*if req.ProtoMajor == 1 && c.Dev.ExpFix1 {
defer func() {
//io.Copy(ioutil.Discard, req.Body)
req.Body.Close()
}()
}*/
malformedRequest := func(typ int) {
w.WriteHeader(200) // 400
w.Write([]byte(""))