Add TickLoop struct.

Add a number of new test cases and refactor others.
Replace NotifyOne() with ActivityMatches.Add()
Shorten arr_ to a_ in tmpl_list.go
Reduce boilerplate.
This commit is contained in:
Azareal 2021-05-02 18:47:19 +10:00
parent 78fbbcda21
commit fc5b29e648
8 changed files with 1045 additions and 623 deletions

View File

@ -34,7 +34,6 @@ type Alert struct {
type AlertStmts struct {
notifyWatchers *sql.Stmt
notifyOne *sql.Stmt
getWatchers *sql.Stmt
}
@ -49,7 +48,6 @@ func init() {
qgen.DBInsert{"activity_stream_matches", "watcher,asid", ""},
qgen.DBJoin{"activity_stream", "activity_subscriptions", "activity_subscriptions.user, activity_stream.asid", "activity_subscriptions.targetType = activity_stream.elementType AND activity_subscriptions.targetID = activity_stream.elementID AND activity_subscriptions.user != activity_stream.actor", "asid=?", "", ""},
),
notifyOne: acc.Insert("activity_stream_matches").Columns("watcher,asid").Fields("?,?").Prepare(),
getWatchers: acc.SimpleInnerJoin("activity_stream", "activity_subscriptions", "activity_subscriptions.user", "activity_subscriptions.targetType = activity_stream.elementType AND activity_subscriptions.targetID = activity_stream.elementID AND activity_subscriptions.user != activity_stream.actor", "asid=?", "", ""),
}
return acc.FirstError()
@ -101,6 +99,7 @@ func BuildAlert(a Alert, user User /* The current user */) (out string, err erro
var url, area, phraseName string
own := false
// TODO: Avoid loading a bit of data twice
switch a.ElementType {
case "convo":
convo, err := Convos.Get(a.ElementID)
@ -232,6 +231,7 @@ func BuildAlertSb(sb *strings.Builder, a *Alert, u *User /* The current user */)
var url, area string
own := false
// TODO: Avoid loading a bit of data twice
switch a.ElementType {
case "convo":
convo, err := Convos.Get(a.ElementID)
@ -300,6 +300,7 @@ func BuildAlertSb(sb *strings.Builder, a *Alert, u *User /* The current user */)
//const AlertsGrowHint3 = len(`{"msg":"._","sub":["",""],"path":"","img":"","id":}`) + 3 + 2 + 2 + 2 + 2 + 1
// TODO: Create a notifier structure?
func AddActivityAndNotifyAll(a Alert) error {
id, err := Activity.Add(a)
if err != nil {
@ -308,13 +309,14 @@ func AddActivityAndNotifyAll(a Alert) error {
return NotifyWatchers(id)
}
// TODO: Create a notifier structure?
func AddActivityAndNotifyTarget(a Alert) error {
id, err := Activity.Add(a)
if err != nil {
return err
}
err = NotifyOne(a.TargetUserID, id)
err = ActivityMatches.Add(a.TargetUserID, id)
if err != nil {
return err
}
@ -330,11 +332,7 @@ func AddActivityAndNotifyTarget(a Alert) error {
return nil
}
func NotifyOne(watcher, asid int) error {
_, err := alertStmts.notifyOne.Exec(watcher, asid)
return err
}
// TODO: Create a notifier structure?
func NotifyWatchers(asid int) error {
_, err := alertStmts.notifyWatchers.Exec(asid)
if err != nil {

View File

@ -622,7 +622,7 @@ func compileJSTemplates(wg *sync.WaitGroup, c *tmpl.CTemplateSet, themeName stri
}
var poutlen = len("\n// nolint\nfunc init() {\n")
var poutlooplen = len("__frags[0]=arr_0[:]\n")
var poutlooplen = len("__frags[0]=a_0[:]\n")
func getTemplateList(c *tmpl.CTemplateSet, wg *sync.WaitGroup, prefix string) string {
DebugLog("in getTemplateList")
@ -666,16 +666,16 @@ func getTemplateList(c *tmpl.CTemplateSet, wg *sync.WaitGroup, prefix string) st
}
}
tmpStr := strconv.Itoa(tmpCount)
//"arr_" + tmpStr + ":=[...]byte{" + /*bits*/ bsb.String() + "}\n"
poutsb.WriteString("arr_")
//"a_" + tmpStr + ":=[...]byte{" + /*bits*/ bsb.String() + "}\n"
poutsb.WriteString("a_")
poutsb.WriteString(tmpStr)
poutsb.WriteString(":=[...]byte{")
poutsb.WriteString(bsb.String())
poutsb.WriteString("}\n")
//front + "=arr_" + tmpStr + "[:]\n"
//front + "=a_" + tmpStr + "[:]\n"
poutsb.WriteString(front)
poutsb.WriteString("=arr_")
poutsb.WriteString("=a_")
poutsb.WriteString(tmpStr)
poutsb.WriteString("[:]\n")
tmpCount++

215
common/tickloop.go Normal file
View File

@ -0,0 +1,215 @@
package common
import (
"log"
"strconv"
"sync/atomic"
"time"
qgen "github.com/Azareal/Gosora/query_gen"
"github.com/pkg/errors"
)
type TickLoop struct {
HalfSec *time.Ticker
Sec *time.Ticker
FifteenMin *time.Ticker
Hour *time.Ticker
Day *time.Ticker
HalfSecf func() error
Secf func() error
FifteenMinf func() error
Hourf func() error
Dayf func() error
}
func NewTickLoop() *TickLoop {
return &TickLoop{
// TODO: Write tests for these
// Run this goroutine once every half second
HalfSec: time.NewTicker(time.Second / 2),
Sec: time.NewTicker(time.Second),
FifteenMin: time.NewTicker(15 * time.Minute),
Hour: time.NewTicker(time.Hour),
Day: time.NewTicker(time.Hour * 24),
}
}
func (l *TickLoop) Loop() {
r := func(e error) {
if e != nil {
LogError(e)
}
}
for {
select {
case <-l.HalfSec.C:
r(l.HalfSecf())
case <-l.Sec.C:
r(l.Secf())
case <-l.FifteenMin.C:
r(l.FifteenMinf())
case <-l.Hour.C:
r(l.Hourf())
// TODO: Handle the instance going down a lot better
case <-l.Day.C:
r(l.Dayf())
}
}
}
var ErrDBDown = errors.New("The database is down")
func StartTick() (abort bool) {
db := qgen.Builder.GetConn()
isDBDown := atomic.LoadInt32(&IsDBDown)
if e := db.Ping(); e != nil {
// TODO: There's a bit of a race here, but it doesn't matter if this error appears multiple times in the logs as it's capped at three times, we just want to cut it down 99% of the time
if isDBDown == 0 {
db.SetConnMaxLifetime(time.Second / 2) // Drop all the connections and start over
LogWarning(e, ErrDBDown.Error())
}
atomic.StoreInt32(&IsDBDown, 1)
return true
}
if isDBDown == 1 {
log.Print("The database is back")
}
//db.SetConnMaxLifetime(time.Second * 60 * 5) // Make this infinite as the temporary lifetime change will purge the stale connections?
db.SetConnMaxLifetime(-1)
atomic.StoreInt32(&IsDBDown, 0)
return false
}
// TODO: Move these into DailyTick() methods?
func asmMatches() error {
// TODO: Find a more efficient way of doing this
return qgen.NewAcc().Select("activity_stream").Cols("asid").EachInt(func(asid int) error {
if ActivityMatches.CountAsid(asid) > 0 {
return nil
}
return Activity.Delete(asid)
})
}
// TODO: Name the tasks so we can figure out which one it was when something goes wrong? Or maybe toss it up WithStack down there?
func RunTasks(tasks []func() error) error {
for _, task := range tasks {
if e := task(); e != nil {
return e
}
}
return nil
}
/*func init() {
DbInits.Add(func(acc *qgen.Accumulator) error {
replyStmts = ReplyStmts{
isLiked: acc.Select("likes").Columns("targetItem").Where("sentBy=? and targetItem=? and targetType='replies'").Prepare(),
}
return acc.FirstError()
})
}*/
func StartupTasks() (e error) {
r := func(ee error) {
if e == nil {
e = ee
}
}
if Config.DisableRegLog {
r(RegLogs.Purge())
}
if Config.DisableLoginLog {
r(LoginLogs.Purge())
}
if Config.DisablePostIP {
// TODO: Clear the caches?
r(Topics.ClearIPs())
r(Rstore.ClearIPs())
r(Prstore.ClearIPs())
}
if Config.DisablePollIP {
r(Polls.ClearIPs())
}
if Config.DisableLastIP {
r(Users.ClearLastIPs())
}
return e
}
func Dailies() (e error) {
if e = asmMatches(); e != nil {
return e
}
newAcc := func() *qgen.Accumulator {
return qgen.NewAcc()
}
exec := func(ac qgen.AccExec) {
if e != nil {
return
}
_, ee := ac.Exec()
e = ee
}
r := func(ee error) {
if e == nil {
e = ee
}
}
if Config.LogPruneCutoff > -1 {
// TODO: Clear the caches?
if !Config.DisableLoginLog {
r(LoginLogs.DeleteOlderThanDays(Config.LogPruneCutoff))
}
if !Config.DisableRegLog {
r(RegLogs.DeleteOlderThanDays(Config.LogPruneCutoff))
}
}
if !Config.DisablePostIP && Config.PostIPCutoff > -1 {
// TODO: Use unixtime to remove this MySQLesque logic?
f := func(tbl string) {
exec(newAcc().Update(tbl).Set("ip=''").DateOlderThan("createdAt", Config.PostIPCutoff, "day").Where("ip!=''"))
}
f("topics")
f("replies")
f("users_replies")
}
if !Config.DisablePollIP && Config.PollIPCutoff > -1 {
// TODO: Use unixtime to remove this MySQLesque logic?
exec(newAcc().Update("polls_votes").Set("ip=''").DateOlderThan("castAt", Config.PollIPCutoff, "day").Where("ip!=''"))
// TODO: Find some way of purging the ip data in polls_votes without breaking any anti-cheat measures which might be running... maybe hash it instead?
}
// TODO: lastActiveAt isn't currently set, so we can't rely on this to purge last_ips of users who haven't been on in a while
if !Config.DisableLastIP && Config.LastIPCutoff > 0 {
//exec(newAcc().Update("users").Set("last_ip='0'").DateOlderThan("lastActiveAt",c.Config.PostIPCutoff,"day").Where("last_ip!='0'"))
mon := time.Now().Month()
exec(newAcc().Update("users").Set("last_ip=''").Where("last_ip!='' AND last_ip NOT LIKE '" + strconv.Itoa(int(mon)) + "-%'"))
}
if e != nil {
return e
}
if e = RunTasks(ScheduledDayTasks); e != nil {
return e
}
e = ForumActionStore.DailyTick()
if e != nil {
return e
}
{
e := Meta.SetInt64("lastDaily", time.Now().Unix())
if e != nil {
return e
}
}
return nil
}

View File

@ -66,8 +66,9 @@ type User struct {
}
type UserPrivacy struct {
ShowComments int // 0 = default, 1 = public, 2 = registered, 3 = friends, 4 = self, 5 = disabled / unused
AllowMessage int // 0 = default, 1 = registered, 2 = friends, 3 = mods, 4 = disabled / unused
ShowComments int // 0 = default, 1 = public, 2 = registered, 3 = friends, 4 = self, 5 = disabled / unused
AllowMessage int // 0 = default, 1 = registered, 2 = friends, 3 = mods, 4 = disabled / unused
NoPresence bool // false = default, true = true
}
func (u *User) WebSockets() *WsJSONUser {
@ -169,37 +170,39 @@ var userStmts UserStmts
func init() {
DbInits.Add(func(acc *qgen.Accumulator) error {
u := "users"
w := "uid=?"
u, w := "users", "uid=?"
set := func(s string) *sql.Stmt {
return acc.Update(u).Set(s).Where(w).Prepare()
}
userStmts = UserStmts{
activate: acc.SimpleUpdate(u, "active=1", w),
changeGroup: acc.SimpleUpdate(u, "group=?", w), // TODO: Implement user_count for users_groups here
activate: set("active=1"),
changeGroup: set("group=?"), // TODO: Implement user_count for users_groups here
delete: acc.Delete(u).Where(w).Prepare(),
setAvatar: acc.Update(u).Set("avatar=?").Where(w).Prepare(),
setName: acc.Update(u).Set("name=?").Where(w).Prepare(),
update: acc.Update(u).Set("name=?,email=?,group=?").Where(w).Prepare(), // TODO: Implement user_count for users_groups on things which use this
setAvatar: set("avatar=?"),
setName: set("name=?"),
update: set("name=?,email=?,group=?"), // TODO: Implement user_count for users_groups on things which use this
// Stat Statements
// TODO: Do +0 to avoid having as many statements?
incScore: acc.Update(u).Set("score=score+?").Where(w).Prepare(),
incPosts: acc.Update(u).Set("posts=posts+?").Where(w).Prepare(),
incBigposts: acc.Update(u).Set("posts=posts+?,bigposts=bigposts+?").Where(w).Prepare(),
incMegaposts: acc.Update(u).Set("posts=posts+?,bigposts=bigposts+?,megaposts=megaposts+?").Where(w).Prepare(),
incPostStats: acc.Update(u).Set("posts=posts+?,score=score+?,level=?").Where(w).Prepare(),
incBigpostStats: acc.Update(u).Set("posts=posts+?,bigposts=bigposts+?,score=score+?,level=?").Where(w).Prepare(),
incMegapostStats: acc.Update(u).Set("posts=posts+?,bigposts=bigposts+?,megaposts=megaposts+?,score=score+?,level=?").Where(w).Prepare(),
incTopics: acc.SimpleUpdate(u, "topics=topics+?", w),
updateLevel: acc.SimpleUpdate(u, "level=?", w),
resetStats: acc.Update(u).Set("score=0,posts=0,bigposts=0,megaposts=0,topics=0,level=0").Where(w).Prepare(),
setStats: acc.Update(u).Set("score=?,posts=?,bigposts=?,megaposts=?,topics=?,level=?").Where(w).Prepare(),
incScore: set("score=score+?"),
incPosts: set("posts=posts+?"),
incBigposts: set("posts=posts+?,bigposts=bigposts+?"),
incMegaposts: set("posts=posts+?,bigposts=bigposts+?,megaposts=megaposts+?"),
incPostStats: set("posts=posts+?,score=score+?,level=?"),
incBigpostStats: set("posts=posts+?,bigposts=bigposts+?,score=score+?,level=?"),
incMegapostStats: set("posts=posts+?,bigposts=bigposts+?,megaposts=megaposts+?,score=score+?,level=?"),
incTopics: set("topics=topics+?"),
updateLevel: set("level=?"),
resetStats: set("score=0,posts=0,bigposts=0,megaposts=0,topics=0,level=0"),
setStats: set("score=?,posts=?,bigposts=?,megaposts=?,topics=?,level=?"),
incLiked: acc.Update(u).Set("liked=liked+?,lastLiked=UTC_TIMESTAMP()").Where(w).Prepare(),
decLiked: acc.Update(u).Set("liked=liked-?").Where(w).Prepare(),
incLiked: set("liked=liked+?,lastLiked=UTC_TIMESTAMP()"),
decLiked: set("liked=liked-?"),
//recalcLastLiked: acc...
updateLastIP: acc.SimpleUpdate(u, "last_ip=?", w),
updatePrivacy: acc.Update(u).Set("profile_comments=?,enable_embeds=?").Where(w).Prepare(),
updateLastIP: set("last_ip=?"),
updatePrivacy: set("profile_comments=?,enable_embeds=?"),
setPassword: acc.Update(u).Set("password=?,salt=?").Where(w).Prepare(),
setPassword: set("password=?,salt=?"),
scheduleAvatarResize: acc.Insert("users_avatar_queue").Columns("uid").Fields("?").Prepare(),
@ -269,34 +272,34 @@ func (u *User) ScheduleGroupUpdate(gid, issuedBy int, dur time.Duration) error {
}
revertAt := time.Now().Add(dur)
tx, err := qgen.Builder.Begin()
if err != nil {
return err
tx, e := qgen.Builder.Begin()
if e != nil {
return e
}
defer tx.Rollback()
err = u.deleteScheduleGroupTx(tx)
if err != nil {
return err
e = u.deleteScheduleGroupTx(tx)
if e != nil {
return e
}
createScheduleGroupTx, err := qgen.Builder.SimpleInsertTx(tx, "users_groups_scheduler", "uid,set_group,issued_by,issued_at,revert_at,temporary", "?,?,?,UTC_TIMESTAMP(),?,?")
if err != nil {
return err
createScheduleGroupTx, e := qgen.Builder.SimpleInsertTx(tx, "users_groups_scheduler", "uid,set_group,issued_by,issued_at,revert_at,temporary", "?,?,?,UTC_TIMESTAMP(),?,?")
if e != nil {
return e
}
_, err = createScheduleGroupTx.Exec(u.ID, gid, issuedBy, revertAt, temp)
if err != nil {
return err
_, e = createScheduleGroupTx.Exec(u.ID, gid, issuedBy, revertAt, temp)
if e != nil {
return e
}
err = u.setTempGroupTx(tx, gid)
if err != nil {
return err
e = u.setTempGroupTx(tx, gid)
if e != nil {
return e
}
err = tx.Commit()
e = tx.Commit()
u.CacheRemove()
return err
return e
}
func (u *User) RevertGroupUpdate() error {
@ -338,11 +341,8 @@ func (u *User) Activate() (e error) {
// TODO: Expose this to the admin?
func (u *User) Delete() error {
_, e := userStmts.delete.Exec(u.ID)
if e != nil {
return e
}
u.CacheRemove()
return nil
return e
}
// TODO: dismiss-event
@ -530,17 +530,17 @@ func (u *User) ChangeAvatar(avatar string) error {
}
// TODO: Abstract this with an interface so we can scale this with an actual dedicated queue in a real cluster
func (u *User) ScheduleAvatarResize() (err error) {
_, err = userStmts.scheduleAvatarResize.Exec(u.ID)
if err != nil {
func (u *User) ScheduleAvatarResize() (e error) {
_, e = userStmts.scheduleAvatarResize.Exec(u.ID)
if e != nil {
// TODO: Do a more generic check so that we're not as tied to MySQL
me, ok := err.(*mysql.MySQLError)
me, ok := e.(*mysql.MySQLError)
if !ok {
return err
return e
}
// If it's just telling us that the item already exists in the database, then we can ignore it, as it doesn't matter if it's this call or another which schedules the item in the queue
if me.Number != 1062 {
return err
return e
}
}
return nil
@ -557,11 +557,11 @@ func (u *User) GetIP() string {
// ! Only updates the database not the *User for safety reasons
func (u *User) UpdateIP(ip string) error {
_, err := userStmts.updateLastIP.Exec(ip, u.ID)
_, e := userStmts.updateLastIP.Exec(ip, u.ID)
if uc := Users.GetCache(); uc != nil {
uc.Remove(u.ID)
}
return err
return e
}
//var ErrMalformedInteger = errors.New("malformed integer")
@ -722,6 +722,28 @@ func (u *User) InitPerms() {
}
}
// TODO: Write unit tests for this
func InitPerms2(group int, superAdmin bool, tempGroup int) (perms *Perms, admin, superMod, banned bool) {
if tempGroup != 0 {
group = tempGroup
}
g := Groups.DirtyGet(group)
if superAdmin {
perms = &AllPerms
} else {
perms = &g.Perms
}
admin = superAdmin || g.IsAdmin
superMod = admin || g.IsMod
banned = g.IsBanned
if banned && superMod {
banned = false
}
return perms, admin, superMod, banned
}
// TODO: Write tests
// TODO: Implement and use this
// TODO: Implement friends
@ -781,7 +803,14 @@ func buildNoavatar(uid, width int) string {
l(10)
}
if !Config.DisableDefaultNoavatar && uid < 11 {
if width == 200 {
/*if uid < 6 {
if width == 200 {
return noavatarCache200Avif[uid]
} else if width == 48 {
return noavatarCache48Avif[uid]
}
return StaticFiles.Prefix + "n" + strconv.Itoa(uid) + "-" + strconv.Itoa(width) + ".avif?i=0"
} else */if width == 200 {
return noavatarCache200[uid]
} else if width == 48 {
return noavatarCache48[uid]

20
main.go
View File

@ -278,6 +278,10 @@ func storeInit() (e error) {
if e != nil {
return ws(e)
}
c.ActivityMatches, e = c.NewDefaultActivityStreamMatches(acc)
if e != nil {
return ws(e)
}
// TODO: Let the admin choose other thumbnailers, maybe ones defined in plugins
c.Thumbnailer = c.NewCaireThumbnailer()
c.Recalc, e = c.NewDefaultRecalc(acc)
@ -545,7 +549,9 @@ func main() {
// TODO: Could we expand this to attachments and other things too?
thumbChan := make(chan bool)
go c.ThumbTask(thumbChan)
go tickLoop(thumbChan)
if err = tickLoop(thumbChan); err != nil {
c.LogError(err)
}
// Resource Management Goroutine
go func() {
@ -575,7 +581,9 @@ func main() {
}()
log.Print("Initialising the router")
router, err = NewGenRouter(http.FileServer(http.Dir("./uploads")))
router, err = NewGenRouter(&RouterConfig{
Uploads: http.FileServer(http.Dir("./uploads")),
})
if err != nil {
log.Fatal(err)
}
@ -589,7 +597,7 @@ func main() {
go func() {
sig := <-sigs
// TODO: Gracefully shutdown the HTTP server
runTasks(c.ShutdownTasks)
c.RunTasks(c.ShutdownTasks)
c.StoppedServer("Received a signal to shutdown: ", sig)
}()
@ -597,9 +605,9 @@ func main() {
c.WsHub.Start()
if false {
f, err := os.Create(c.Config.LogDir + "cpu.prof")
if err != nil {
log.Fatal(err)
f, e := os.Create(c.Config.LogDir + "cpu.prof")
if e != nil {
log.Fatal(e)
}
pprof.StartCPUProfile(f)
}

File diff suppressed because it is too large Load Diff

View File

@ -123,7 +123,12 @@ func (r *GenRouter) DailyTick() error {
return rotateLog(r.reqLog2, "reqs-")
}
func NewGenRouter(uploads http.Handler) (*GenRouter, error) {
type RouterConfig struct {
Uploads http.Handler
DisableTick bool
}
func NewGenRouter(cfg *RouterConfig) (*GenRouter, error) {
stimestr := strconv.FormatInt(c.StartTime.Unix(), 10)
createLog := func(name, stimestr string) (*RouterLog, error) {
f, err := os.OpenFile(c.Config.LogDir+name+"-"+stimestr+".log", os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0755)
@ -154,17 +159,21 @@ func NewGenRouter(uploads http.Handler) (*GenRouter, error) {
}
reqMiscLog := log.New(f3, "", log.LstdFlags)
return &GenRouter{
ro := &GenRouter{
UploadHandler: func(w http.ResponseWriter, r *http.Request) {
writ := NewWriterIntercept(w)
http.StripPrefix("/uploads/", uploads).ServeHTTP(writ, r)
http.StripPrefix("/uploads/", cfg.Uploads).ServeHTTP(writ, r)
},
extraRoutes: make(map[string]func(http.ResponseWriter, *http.Request, *c.User) c.RouteError),
reqLogger: reqMiscLog,
reqLog2: reqLog,
suspLog: suspReqLog,
}, nil
}
if !cfg.DisableTick {
c.AddScheduledDayTask(ro.DailyTick)
}
return ro, nil
}
func (r *GenRouter) handleError(err c.RouteError, w http.ResponseWriter, req *http.Request, u *c.User) {

View File

@ -4,266 +4,138 @@ import (
"database/sql"
"log"
"strconv"
"sync/atomic"
"time"
c "github.com/Azareal/Gosora/common"
qgen "github.com/Azareal/Gosora/query_gen"
"github.com/pkg/errors"
)
// TODO: Name the tasks so we can figure out which one it was when something goes wrong? Or maybe toss it up WithStack down there?
func runTasks(tasks []func() error) {
for _, task := range tasks {
if e := task(); e != nil {
c.LogError(e)
}
}
}
var TickLoop *c.TickLoop
func startTick() (abort bool) {
isDBDown := atomic.LoadInt32(&c.IsDBDown)
if err := db.Ping(); err != nil {
// TODO: There's a bit of a race here, but it doesn't matter if this error appears multiple times in the logs as it's capped at three times, we just want to cut it down 99% of the time
if isDBDown == 0 {
db.SetConnMaxLifetime(time.Second) // Drop all the connections and start over
c.LogWarning(err)
c.LogWarning(errors.New("The database is down"))
}
atomic.StoreInt32(&c.IsDBDown, 1)
return true
}
if isDBDown == 1 {
log.Print("The database is back")
}
//db.SetConnMaxLifetime(time.Second * 60 * 5) // Make this infinite as the temporary lifetime change will purge the stale connections?
db.SetConnMaxLifetime(-1)
atomic.StoreInt32(&c.IsDBDown, 0)
return false
}
func runHook(name string) {
func runHook(name string) error {
if e := c.RunTaskHook(name); e != nil {
c.LogError(e, "Failed at task '"+name+"'")
return errors.Wrap(e, "Failed at task '"+name+"'")
}
return nil
}
func tickLoop(thumbChan chan bool) {
lastDailyStr, err := c.Meta.Get("lastDaily")
func deferredDailies() error {
lastDailyStr, e := c.Meta.Get("lastDaily")
// TODO: Report this error back correctly...
if err != nil && err != sql.ErrNoRows {
c.LogError(err)
if e != nil && e != sql.ErrNoRows {
return e
}
lastDaily, _ := strconv.ParseInt(lastDailyStr, 10, 64)
low := time.Now().Unix() - (60 * 60 * 24)
if lastDaily < low {
dailies()
}
// TODO: Write tests for these
// Run this goroutine once every half second
halfSecondTicker := time.NewTicker(time.Second / 2)
secondTicker := time.NewTicker(time.Second)
fifteenMinuteTicker := time.NewTicker(15 * time.Minute)
hourTicker := time.NewTicker(time.Hour)
dailyTicker := time.NewTicker(time.Hour * 24)
tick := func(name string, tasks []func() error) bool {
if startTick() {
return true
if e := c.Dailies(); e != nil {
return e
}
runHook("before_" + name + "_tick")
runTasks(tasks)
runHook("after_" + name + "_tick")
return false
}
for {
select {
case <-halfSecondTicker.C:
if tick("half_second", c.ScheduledHalfSecondTasks) {
continue
}
case <-secondTicker.C:
if startTick() {
continue
}
runHook("before_second_tick")
go func() { thumbChan <- true }()
runTasks(c.ScheduledSecondTasks)
// TODO: Stop hard-coding this
if err := c.HandleExpiredScheduledGroups(); err != nil {
c.LogError(err)
}
// TODO: Handle delayed moderation tasks
// Sync with the database, if there are any changes
if err = c.HandleServerSync(); err != nil {
c.LogError(err)
}
// TODO: Manage the TopicStore, UserStore, and ForumStore
// TODO: Alert the admin, if CPU usage, RAM usage, or the number of posts in the past second are too high
// TODO: Clean-up alerts with no unread matches which are over two weeks old. Move this to a 24 hour task?
// TODO: Rescan the static files for changes
runHook("after_second_tick")
case <-fifteenMinuteTicker.C:
if startTick() {
continue
}
runHook("before_fifteen_minute_tick")
runTasks(c.ScheduledFifteenMinuteTasks)
// TODO: Automatically lock topics, if they're really old, and the associated setting is enabled.
// TODO: Publish scheduled posts.
runHook("after_fifteen_minute_tick")
case <-hourTicker.C:
if startTick() {
continue
}
runHook("before_hour_tick")
jsToken, err := c.GenerateSafeString(80)
if err != nil {
c.LogError(err)
}
c.JSTokenBox.Store(jsToken)
c.OldSessionSigningKeyBox.Store(c.SessionSigningKeyBox.Load().(string)) // TODO: We probably don't need this type conversion
sessionSigningKey, err := c.GenerateSafeString(80)
if err != nil {
c.LogError(err)
}
c.SessionSigningKeyBox.Store(sessionSigningKey)
runTasks(c.ScheduledHourTasks)
runHook("after_hour_tick")
// TODO: Handle the instance going down a lot better
case <-dailyTicker.C:
dailies()
}
// TODO: Handle the daily clean-up.
}
return nil
}
func asmMatches() {
// TODO: Find a more efficient way of doing this
acc := qgen.NewAcc()
countStmt := acc.Count("activity_stream_matches").Where("asid=?").Prepare()
if err := acc.FirstError(); err != nil {
c.LogError(err)
return
func tickLoop(thumbChan chan bool) error {
tl := c.NewTickLoop()
TickLoop = tl
if e := deferredDailies(); e != nil {
return e
}
if e := c.StartupTasks(); e != nil {
return e
}
err := acc.Select("activity_stream").Cols("asid").EachInt(func(asid int) error {
var count int
err := countStmt.QueryRow(asid).Scan(&count)
if err != sql.ErrNoRows {
return err
}
if count > 0 {
tick := func(name string, tasks []func() error) error {
if c.StartTick() {
return nil
}
_, err = qgen.NewAcc().Delete("activity_stream").Where("asid=?").Run(asid)
return err
})
if err != nil && err != sql.ErrNoRows {
c.LogError(err)
}
}
func dailies() {
asmMatches()
if c.Config.DisableRegLog {
_, err := qgen.NewAcc().Purge("registration_logs").Exec()
if err != nil {
c.LogError(err)
if e := runHook("before_" + name + "_tick"); e != nil {
return e
}
}
if c.Config.LogPruneCutoff > -1 {
f := func(tbl string) {
_, err := qgen.NewAcc().Delete(tbl).DateOlderThan("doneAt", c.Config.LogPruneCutoff, "day").Run()
if err != nil {
c.LogError(err)
}
if e := c.RunTasks(tasks); e != nil {
return e
}
f("login_logs")
f("registration_logs")
return runHook("after_" + name + "_tick")
}
if c.Config.DisablePostIP {
f := func(tbl string) {
_, err := qgen.NewAcc().Update(tbl).Set("ip=''").Where("ip!=''").Exec()
if err != nil {
c.LogError(err)
}
tl.HalfSecf = func() error {
return tick("half_second", c.ScheduledHalfSecondTasks)
}
// TODO: Automatically lock topics, if they're really old, and the associated setting is enabled.
// TODO: Publish scheduled posts.
tl.FifteenMinf = func() error {
return tick("fifteen_minute", c.ScheduledFifteenMinuteTasks)
}
// TODO: Handle the instance going down a lot better
// TODO: Handle the daily clean-up.
tl.Dayf = func() error {
if c.StartTick() {
return nil
}
f("topics")
f("replies")
f("users_replies")
} else if c.Config.PostIPCutoff > -1 {
// TODO: Use unixtime to remove this MySQLesque logic?
f := func(tbl string) {
_, err := qgen.NewAcc().Update(tbl).Set("ip=''").DateOlderThan("createdAt", c.Config.PostIPCutoff, "day").Where("ip!=''").Exec()
if err != nil {
c.LogError(err)
}
}
f("topics")
f("replies")
f("users_replies")
return c.Dailies()
}
if c.Config.DisablePollIP {
_, err := qgen.NewAcc().Update("polls_votes").Set("ip=''").Where("ip!=''").Exec()
if err != nil {
c.LogError(err)
tl.Secf = func() (e error) {
if c.StartTick() {
return nil
}
} else if c.Config.PollIPCutoff > -1 {
// TODO: Use unixtime to remove this MySQLesque logic?
_, err := qgen.NewAcc().Update("polls_votes").Set("ip=''").DateOlderThan("castAt", c.Config.PollIPCutoff, "day").Where("ip!=''").Exec()
if err != nil {
c.LogError(err)
if e = runHook("before_second_tick"); e != nil {
return e
}
go func() { thumbChan <- true }()
if e = c.RunTasks(c.ScheduledSecondTasks); e != nil {
return e
}
// TODO: Find some way of purging the ip data in polls_votes without breaking any anti-cheat measures which might be running... maybe hash it instead?
}
// TODO: lastActiveAt isn't currently set, so we can't rely on this to purge last_ips of users who haven't been on in a while
if c.Config.DisableLastIP {
_, err := qgen.NewAcc().Update("users").Set("last_ip=''").Where("last_ip!=''").Exec()
if err != nil {
c.LogError(err)
// TODO: Stop hard-coding this
if e = c.HandleExpiredScheduledGroups(); e != nil {
return e
}
} else if c.Config.LastIPCutoff > 0 {
/*_, err = qgen.NewAcc().Update("users").Set("last_ip='0'").DateOlderThan("lastActiveAt",c.Config.PostIPCutoff,"day").Where("last_ip!='0'").Exec()
if err != nil {
c.LogError(err)
}*/
mon := time.Now().Month()
_, err := qgen.NewAcc().Update("users").Set("last_ip=''").Where("last_ip!='' AND last_ip NOT LIKE '" + strconv.Itoa(int(mon)) + "-%'").Exec()
if err != nil {
c.LogError(err)
// TODO: Handle delayed moderation tasks
// Sync with the database, if there are any changes
if e = c.HandleServerSync(); e != nil {
return e
}
// TODO: Manage the TopicStore, UserStore, and ForumStore
// TODO: Alert the admin, if CPU usage, RAM usage, or the number of posts in the past second are too high
// TODO: Clean-up alerts with no unread matches which are over two weeks old. Move this to a 24 hour task?
// TODO: Rescan the static files for changes
return runHook("after_second_tick")
}
e := router.DailyTick()
if e != nil {
c.LogError(e)
}
e = c.ForumActionStore.DailyTick()
if e != nil {
c.LogError(e)
}
tl.Hourf = func() error {
if c.StartTick() {
return nil
}
if e := runHook("before_hour_tick"); e != nil {
return e
}
{
e := c.Meta.Set("lastDaily", strconv.FormatInt(time.Now().Unix(), 10))
jsToken, e := c.GenerateSafeString(80)
if e != nil {
c.LogError(e)
return e
}
c.JSTokenBox.Store(jsToken)
c.OldSessionSigningKeyBox.Store(c.SessionSigningKeyBox.Load().(string)) // TODO: We probably don't need this type conversion
sessionSigningKey, e := c.GenerateSafeString(80)
if e != nil {
return e
}
c.SessionSigningKeyBox.Store(sessionSigningKey)
if e = c.RunTasks(c.ScheduledHourTasks); e != nil {
return e
}
return runHook("after_hour_tick")
}
go tl.Loop()
return nil
}
func sched() error {