2017-05-11 13:04:43 +00:00
// +build !no_ws
2017-09-13 15:09:13 +00:00
/ *
*
* Gosora WebSocket Subsystem
* Copyright Azareal 2017 - 2018
*
* /
2018-03-08 03:59:47 +00:00
package common
2017-05-11 13:04:43 +00:00
2017-09-03 04:50:31 +00:00
import (
2017-06-10 07:58:15 +00:00
"bytes"
2018-06-24 13:49:29 +00:00
"encoding/json"
2017-06-10 07:58:15 +00:00
"errors"
2017-09-03 04:50:31 +00:00
"fmt"
2017-06-10 07:58:15 +00:00
"net/http"
2017-09-03 04:50:31 +00:00
"runtime"
"strconv"
"sync"
"time"
2017-06-10 07:58:15 +00:00
2017-07-17 10:23:42 +00:00
"github.com/Azareal/gopsutil/cpu"
"github.com/Azareal/gopsutil/mem"
2017-09-03 04:50:31 +00:00
"github.com/gorilla/websocket"
2017-06-10 07:58:15 +00:00
)
2017-05-11 13:04:43 +00:00
2017-09-13 15:09:13 +00:00
type WSUser struct {
2017-05-11 13:04:43 +00:00
conn * websocket . Conn
2018-03-08 03:59:47 +00:00
User * User
2017-05-11 13:04:43 +00:00
}
2018-06-24 13:49:29 +00:00
// TODO: Make this an interface?
type WsHubImpl struct {
// TODO: Shard this map
2018-03-08 03:59:47 +00:00
OnlineUsers map [ int ] * WSUser
OnlineGuests map [ * WSUser ] bool
GuestLock sync . RWMutex
UserLock sync . RWMutex
2018-06-24 13:49:29 +00:00
lastTick time . Time
lastTopicList [ ] * TopicsRow
2017-05-11 13:04:43 +00:00
}
2017-09-13 15:09:13 +00:00
// TODO: Disable WebSockets on high load? Add a Control Panel interface for disabling it?
2018-03-08 03:59:47 +00:00
var EnableWebsockets = true // Put this in caps for consistency with the other constants?
2017-09-13 15:09:13 +00:00
2018-06-24 13:49:29 +00:00
// TODO: Rename this to WebSockets?
var WsHub WsHubImpl
2017-09-03 04:50:31 +00:00
var wsUpgrader = websocket . Upgrader { ReadBufferSize : 1024 , WriteBufferSize : 1024 }
var errWsNouser = errors . New ( "This user isn't connected via WebSockets" )
2017-05-11 13:04:43 +00:00
func init ( ) {
2017-09-13 15:09:13 +00:00
adminStatsWatchers = make ( map [ * WSUser ] bool )
2018-06-24 13:49:29 +00:00
topicListWatchers = make ( map [ * WSUser ] bool )
// TODO: Do we really want to initialise this here instead of in main.go / general_test.go like the other things?
WsHub = WsHubImpl {
2018-03-08 03:59:47 +00:00
OnlineUsers : make ( map [ int ] * WSUser ) ,
OnlineGuests : make ( map [ * WSUser ] bool ) ,
2017-05-11 13:04:43 +00:00
}
}
2018-06-24 13:49:29 +00:00
func ( hub * WsHubImpl ) Start ( ) {
//fmt.Println("running hub.Start")
if Config . DisableLiveTopicList {
return
}
hub . lastTick = time . Now ( )
AddScheduledSecondTask ( hub . Tick )
}
type WsTopicList struct {
Topics [ ] * WsTopicsRow
}
// This Tick is seperate from the admin one, as we want to process that in parallel with this due to the blocking calls to gopsutil
func ( hub * WsHubImpl ) Tick ( ) error {
//fmt.Println("running hub.Tick")
// Don't waste CPU time if nothing has happened
// TODO: Get a topic list method which strips stickies?
tList , _ , _ , err := TopicList . GetList ( 1 )
if err != nil {
hub . lastTick = time . Now ( )
return err // TODO: Do we get ErrNoRows here?
}
defer func ( ) {
hub . lastTick = time . Now ( )
hub . lastTopicList = tList
} ( )
if len ( tList ) == 0 {
return nil
}
//fmt.Println("checking for changes")
// TODO: Optimise this by only sniffing the top non-sticky
if len ( tList ) == len ( hub . lastTopicList ) {
var hasItem = false
for j , tItem := range tList {
if ! tItem . Sticky {
if tItem . ID != hub . lastTopicList [ j ] . ID {
hasItem = true
}
}
}
if ! hasItem {
return nil
}
}
// TODO: Implement this for guests too? Should be able to optimise it far better there due to them sharing the same permission set
// TODO: Be less aggressive with the locking, maybe use an array of sorts instead of hitting the main map every-time
topicListMutex . RLock ( )
if len ( topicListWatchers ) == 0 {
//fmt.Println("no watchers")
topicListMutex . RUnlock ( )
return nil
}
//fmt.Println("found changes")
// Copy these over so we close this loop as fast as possible so we can release the read lock, especially if the group gets are backed by calls to the database
var groupIDs = make ( map [ int ] bool )
var currentWatchers = make ( [ ] * WSUser , len ( topicListWatchers ) )
var i = 0
for wsUser , _ := range topicListWatchers {
currentWatchers [ i ] = wsUser
groupIDs [ wsUser . User . Group ] = true
i ++
}
topicListMutex . RUnlock ( )
var groups = make ( map [ int ] * Group )
var canSeeMap = make ( map [ string ] [ ] int )
for groupID , _ := range groupIDs {
group , err := Groups . Get ( groupID )
if err != nil {
// TODO: Do we really want to halt all pushes for what is possibly just one user?
return err
}
groups [ group . ID ] = group
var canSee = make ( [ ] byte , len ( group . CanSee ) )
for i , item := range group . CanSee {
canSee [ i ] = byte ( item )
}
canSeeMap [ string ( canSee ) ] = group . CanSee
}
var canSeeRenders = make ( map [ string ] [ ] byte )
for name , canSee := range canSeeMap {
topicList , forumList , _ , err := TopicList . GetListByCanSee ( canSee , 1 )
if err != nil {
return err // TODO: Do we get ErrNoRows here?
}
if len ( topicList ) == 0 {
continue
}
_ = forumList // Might use this later after we get the base feature working
//fmt.Println("canSeeItem")
if topicList [ 0 ] . Sticky {
var lastSticky = 0
for i , row := range topicList {
if ! row . Sticky {
lastSticky = i
break
}
}
if lastSticky == 0 {
continue
}
//fmt.Println("lastSticky: ", lastSticky)
//fmt.Println("before topicList: ", topicList)
topicList = topicList [ lastSticky : ]
//fmt.Println("after topicList: ", topicList)
}
// TODO: Compare to previous tick to eliminate unnecessary work and data
var wsTopicList = make ( [ ] * WsTopicsRow , len ( topicList ) )
for i , topicRow := range topicList {
wsTopicList [ i ] = topicRow . WebSockets ( )
}
outBytes , err := json . Marshal ( & WsTopicList { wsTopicList } )
if err != nil {
return err
}
canSeeRenders [ name ] = outBytes
}
// TODO: Use MessagePack for additional speed?
//fmt.Println("writing to the clients")
for _ , wsUser := range currentWatchers {
group := groups [ wsUser . User . Group ]
var canSee = make ( [ ] byte , len ( group . CanSee ) )
for i , item := range group . CanSee {
canSee [ i ] = byte ( item )
}
w , err := wsUser . conn . NextWriter ( websocket . TextMessage )
if err != nil {
//fmt.Printf("werr for #%d: %s\n", wsUser.User.ID, err)
topicListMutex . Lock ( )
delete ( topicListWatchers , wsUser )
topicListMutex . Unlock ( )
continue
}
//fmt.Println("writing to user #", wsUser.User.ID)
outBytes := canSeeRenders [ string ( canSee ) ]
//fmt.Println("outBytes: ", string(outBytes))
w . Write ( outBytes )
w . Close ( )
}
return nil
}
func ( hub * WsHubImpl ) GuestCount ( ) int {
2018-03-08 03:59:47 +00:00
defer hub . GuestLock . RUnlock ( )
hub . GuestLock . RLock ( )
return len ( hub . OnlineGuests )
2017-05-11 13:04:43 +00:00
}
2018-06-24 13:49:29 +00:00
func ( hub * WsHubImpl ) UserCount ( ) int {
2018-03-08 03:59:47 +00:00
defer hub . UserLock . RUnlock ( )
hub . UserLock . RLock ( )
return len ( hub . OnlineUsers )
2017-05-11 13:04:43 +00:00
}
2018-06-24 13:49:29 +00:00
func ( hub * WsHubImpl ) broadcastMessage ( msg string ) error {
2018-03-08 03:59:47 +00:00
hub . UserLock . RLock ( )
2018-06-24 13:49:29 +00:00
defer hub . UserLock . RUnlock ( )
2018-03-08 03:59:47 +00:00
for _ , wsUser := range hub . OnlineUsers {
2017-09-03 04:50:31 +00:00
w , err := wsUser . conn . NextWriter ( websocket . TextMessage )
2017-06-10 07:58:15 +00:00
if err != nil {
return err
}
2017-09-10 16:57:22 +00:00
_ , _ = w . Write ( [ ] byte ( msg ) )
2018-06-24 13:49:29 +00:00
w . Close ( )
2017-06-10 07:58:15 +00:00
}
return nil
}
2018-06-24 13:49:29 +00:00
func ( hub * WsHubImpl ) pushMessage ( targetUser int , msg string ) error {
2018-03-08 03:59:47 +00:00
hub . UserLock . RLock ( )
wsUser , ok := hub . OnlineUsers [ targetUser ]
hub . UserLock . RUnlock ( )
2017-06-10 07:58:15 +00:00
if ! ok {
2017-09-03 04:50:31 +00:00
return errWsNouser
2017-06-10 07:58:15 +00:00
}
2017-09-03 04:50:31 +00:00
w , err := wsUser . conn . NextWriter ( websocket . TextMessage )
2017-06-10 07:58:15 +00:00
if err != nil {
return err
}
w . Write ( [ ] byte ( msg ) )
w . Close ( )
return nil
}
2018-06-24 13:49:29 +00:00
func ( hub * WsHubImpl ) pushAlert ( targetUser int , asid int , event string , elementType string , actorID int , targetUserID int , elementID int ) error {
2017-09-10 16:57:22 +00:00
//log.Print("In pushAlert")
2018-03-08 03:59:47 +00:00
hub . UserLock . RLock ( )
wsUser , ok := hub . OnlineUsers [ targetUser ]
hub . UserLock . RUnlock ( )
2017-06-10 07:58:15 +00:00
if ! ok {
2017-09-03 04:50:31 +00:00
return errWsNouser
2017-06-10 07:58:15 +00:00
}
2017-08-13 11:22:34 +00:00
//log.Print("Building alert")
2018-03-08 03:59:47 +00:00
alert , err := BuildAlert ( asid , event , elementType , actorID , targetUserID , elementID , * wsUser . User )
2017-06-10 07:58:15 +00:00
if err != nil {
return err
}
2017-08-13 11:22:34 +00:00
//log.Print("Getting WS Writer")
2017-09-03 04:50:31 +00:00
w , err := wsUser . conn . NextWriter ( websocket . TextMessage )
2017-06-10 07:58:15 +00:00
if err != nil {
return err
}
w . Write ( [ ] byte ( alert ) )
2017-09-10 16:57:22 +00:00
_ = w . Close ( )
2017-06-10 07:58:15 +00:00
return nil
}
2018-06-24 13:49:29 +00:00
func ( hub * WsHubImpl ) pushAlerts ( users [ ] int , asid int , event string , elementType string , actorID int , targetUserID int , elementID int ) error {
2017-09-13 15:09:13 +00:00
var wsUsers [ ] * WSUser
2018-03-08 03:59:47 +00:00
hub . UserLock . RLock ( )
2017-06-12 09:03:14 +00:00
// We don't want to keep a lock on this for too long, so we'll accept some nil pointers
for _ , uid := range users {
2018-03-08 03:59:47 +00:00
wsUsers = append ( wsUsers , hub . OnlineUsers [ uid ] )
2017-06-12 09:03:14 +00:00
}
2018-03-08 03:59:47 +00:00
hub . UserLock . RUnlock ( )
2017-09-03 04:50:31 +00:00
if len ( wsUsers ) == 0 {
return errWsNouser
2017-06-12 09:03:14 +00:00
}
var errs [ ] error
2017-09-03 04:50:31 +00:00
for _ , wsUser := range wsUsers {
if wsUser == nil {
2017-06-12 09:03:14 +00:00
continue
}
2018-03-08 03:59:47 +00:00
alert , err := BuildAlert ( asid , event , elementType , actorID , targetUserID , elementID , * wsUser . User )
2017-06-12 09:03:14 +00:00
if err != nil {
2017-09-03 04:50:31 +00:00
errs = append ( errs , err )
2017-06-12 09:03:14 +00:00
}
2017-09-03 04:50:31 +00:00
w , err := wsUser . conn . NextWriter ( websocket . TextMessage )
2017-06-12 09:03:14 +00:00
if err != nil {
2017-09-03 04:50:31 +00:00
errs = append ( errs , err )
2017-06-12 09:03:14 +00:00
}
w . Write ( [ ] byte ( alert ) )
w . Close ( )
}
// Return the first error
if len ( errs ) != 0 {
for _ , err := range errs {
return err
}
}
return nil
}
2017-10-30 09:57:08 +00:00
// TODO: How should we handle errors for this?
2018-03-08 03:59:47 +00:00
// TODO: Move this out of common?
func RouteWebsockets ( w http . ResponseWriter , r * http . Request , user User ) RouteError {
2017-09-03 04:50:31 +00:00
conn , err := wsUpgrader . Upgrade ( w , r , nil )
2017-05-11 13:04:43 +00:00
if err != nil {
2017-10-30 09:57:08 +00:00
return nil
2017-05-11 13:04:43 +00:00
}
2018-03-08 03:59:47 +00:00
userptr , err := Users . Get ( user . ID )
if err != nil && err != ErrStoreCapacityOverflow {
2017-10-30 09:57:08 +00:00
return nil
2017-05-11 13:04:43 +00:00
}
2017-06-10 07:58:15 +00:00
2017-09-13 15:09:13 +00:00
wsUser := & WSUser { conn , userptr }
2017-05-11 13:04:43 +00:00
if user . ID == 0 {
2018-03-08 03:59:47 +00:00
WsHub . GuestLock . Lock ( )
WsHub . OnlineGuests [ wsUser ] = true
WsHub . GuestLock . Unlock ( )
2017-05-11 13:04:43 +00:00
} else {
2018-03-08 03:59:47 +00:00
WsHub . UserLock . Lock ( )
WsHub . OnlineUsers [ user . ID ] = wsUser
WsHub . UserLock . Unlock ( )
2017-05-11 13:04:43 +00:00
}
2017-06-10 07:58:15 +00:00
2017-05-11 13:04:43 +00:00
//conn.SetReadLimit(/* put the max request size from earlier here? */)
//conn.SetReadDeadline(time.Now().Add(60 * time.Second))
2017-09-03 04:50:31 +00:00
var currentPage [ ] byte
2017-05-11 13:04:43 +00:00
for {
_ , message , err := conn . ReadMessage ( )
if err != nil {
if user . ID == 0 {
2018-03-08 03:59:47 +00:00
WsHub . GuestLock . Lock ( )
delete ( WsHub . OnlineGuests , wsUser )
WsHub . GuestLock . Unlock ( )
2017-05-11 13:04:43 +00:00
} else {
2018-06-24 13:49:29 +00:00
// TODO: Make sure the admin is removed from the admin stats list in the case that an error happens
2018-03-08 03:59:47 +00:00
WsHub . UserLock . Lock ( )
delete ( WsHub . OnlineUsers , user . ID )
WsHub . UserLock . Unlock ( )
2017-05-11 13:04:43 +00:00
}
break
}
2017-06-10 07:58:15 +00:00
2017-10-30 09:57:08 +00:00
//log.Print("Message", message)
//log.Print("string(Message)", string(message))
2017-09-03 04:50:31 +00:00
messages := bytes . Split ( message , [ ] byte ( "\r" ) )
2017-05-11 13:04:43 +00:00
for _ , msg := range messages {
2017-10-30 09:57:08 +00:00
//log.Print("Submessage", msg)
//log.Print("Submessage", string(msg))
2017-09-03 04:50:31 +00:00
if bytes . HasPrefix ( msg , [ ] byte ( "page " ) ) {
msgblocks := bytes . SplitN ( msg , [ ] byte ( " " ) , 2 )
2017-05-11 13:04:43 +00:00
if len ( msgblocks ) < 2 {
continue
}
2017-06-10 07:58:15 +00:00
2017-09-03 04:50:31 +00:00
if ! bytes . Equal ( msgblocks [ 1 ] , currentPage ) {
wsLeavePage ( wsUser , currentPage )
currentPage = msgblocks [ 1 ]
2017-10-30 09:57:08 +00:00
//log.Print("Current Page:", currentPage)
//log.Print("Current Page:", string(currentPage))
2017-09-03 04:50:31 +00:00
wsPageResponses ( wsUser , currentPage )
2017-05-11 13:04:43 +00:00
}
}
/ * if bytes . Equal ( message , [ ] byte ( ` start-view ` ) ) {
} else if bytes . Equal ( message , [ ] byte ( ` end-view ` ) ) {
} * /
}
}
conn . Close ( )
2017-10-30 09:57:08 +00:00
return nil
2017-05-11 13:04:43 +00:00
}
2018-06-24 13:49:29 +00:00
// TODO: Use a map instead of a switch to make this more modular?
2017-09-13 15:09:13 +00:00
func wsPageResponses ( wsUser * WSUser , page [ ] byte ) {
2018-06-25 06:24:38 +00:00
// TODO: Could do this more efficiently?
if string ( page ) == "/" {
page = [ ] byte ( Config . DefaultPath )
}
2018-06-24 13:49:29 +00:00
//fmt.Println("entering page: ", string(page))
2017-09-03 04:50:31 +00:00
switch string ( page ) {
2018-06-24 13:49:29 +00:00
// Live Topic List is an experimental feature
// TODO: Optimise this to reduce the amount of contention
case "/topics/" :
topicListMutex . Lock ( )
topicListWatchers [ wsUser ] = true
topicListMutex . Unlock ( )
2017-09-03 04:50:31 +00:00
case "/panel/" :
// Listen for changes and inform the admins...
adminStatsMutex . Lock ( )
watchers := len ( adminStatsWatchers )
adminStatsWatchers [ wsUser ] = true
if watchers == 0 {
go adminStatsTicker ( )
}
adminStatsMutex . Unlock ( )
2017-05-11 13:04:43 +00:00
}
}
2018-06-24 13:49:29 +00:00
// TODO: Use a map instead of a switch to make this more modular?
2017-09-13 15:09:13 +00:00
func wsLeavePage ( wsUser * WSUser , page [ ] byte ) {
2018-06-25 06:24:38 +00:00
// TODO: Could do this more efficiently?
if string ( page ) == "/" {
page = [ ] byte ( Config . DefaultPath )
}
2018-06-24 13:49:29 +00:00
//fmt.Println("leaving page: ", string(page))
2017-09-03 04:50:31 +00:00
switch string ( page ) {
2018-06-24 13:49:29 +00:00
// Live Topic List is an experimental feature
case "/topics/" :
topicListMutex . Lock ( )
delete ( topicListWatchers , wsUser )
topicListMutex . Unlock ( )
2017-09-03 04:50:31 +00:00
case "/panel/" :
adminStatsMutex . Lock ( )
delete ( adminStatsWatchers , wsUser )
adminStatsMutex . Unlock ( )
2017-05-11 13:04:43 +00:00
}
}
2018-06-24 13:49:29 +00:00
// TODO: Abstract this
// TODO: Use odd-even sharding
var topicListWatchers map [ * WSUser ] bool
var topicListMutex sync . RWMutex
2017-09-13 15:09:13 +00:00
var adminStatsWatchers map [ * WSUser ] bool
2017-09-03 04:50:31 +00:00
var adminStatsMutex sync . RWMutex
func adminStatsTicker ( ) {
2017-05-11 13:04:43 +00:00
time . Sleep ( time . Second )
2017-06-10 07:58:15 +00:00
2017-09-10 16:57:22 +00:00
var lastUonline = - 1
var lastGonline = - 1
var lastTotonline = - 1
var lastCPUPerc = - 1
var lastAvailableRAM int64 = - 1
var noStatUpdates bool
var noRAMUpdates bool
2017-06-10 07:58:15 +00:00
2017-05-11 13:04:43 +00:00
var onlineColour , onlineGuestsColour , onlineUsersColour , cpustr , cpuColour , ramstr , ramColour string
var cpuerr , ramerr error
var memres * mem . VirtualMemoryStat
2017-09-10 16:57:22 +00:00
var cpuPerc [ ] float64
2017-06-10 07:58:15 +00:00
2017-05-12 13:25:12 +00:00
var totunit , uunit , gunit string
2017-06-10 07:58:15 +00:00
2017-12-30 05:47:46 +00:00
lessThanSwitch := func ( number int , lowerBound int , midBound int ) string {
switch {
case number < lowerBound :
return "stat_green"
case number < midBound :
return "stat_orange"
}
return "stat_red"
}
greaterThanSwitch := func ( number int , lowerBound int , midBound int ) string {
switch {
case number > midBound :
return "stat_green"
case number > lowerBound :
return "stat_orange"
}
return "stat_red"
}
2017-05-11 13:04:43 +00:00
AdminStatLoop :
for {
2017-09-03 04:50:31 +00:00
adminStatsMutex . RLock ( )
2017-09-10 16:57:22 +00:00
watchCount := len ( adminStatsWatchers )
2017-09-03 04:50:31 +00:00
adminStatsMutex . RUnlock ( )
2017-09-10 16:57:22 +00:00
if watchCount == 0 {
2017-05-11 13:04:43 +00:00
break AdminStatLoop
}
2017-06-10 07:58:15 +00:00
2017-09-10 16:57:22 +00:00
cpuPerc , cpuerr = cpu . Percent ( time . Second , true )
2017-05-11 13:04:43 +00:00
memres , ramerr = mem . VirtualMemory ( )
2018-03-08 03:59:47 +00:00
uonline := WsHub . UserCount ( )
gonline := WsHub . GuestCount ( )
2017-05-11 13:04:43 +00:00
totonline := uonline + gonline
2017-12-30 05:47:46 +00:00
reqCount := 0
2017-06-10 07:58:15 +00:00
2017-10-30 09:57:08 +00:00
// It's far more likely that the CPU Usage will change than the other stats, so we'll optimise them separately...
2017-09-10 16:57:22 +00:00
noStatUpdates = ( uonline == lastUonline && gonline == lastGonline && totonline == lastTotonline )
noRAMUpdates = ( lastAvailableRAM == int64 ( memres . Available ) )
if int ( cpuPerc [ 0 ] ) == lastCPUPerc && noStatUpdates && noRAMUpdates {
2017-05-11 13:04:43 +00:00
time . Sleep ( time . Second )
continue
}
2017-06-10 07:58:15 +00:00
2017-09-10 16:57:22 +00:00
if ! noStatUpdates {
2017-12-30 05:47:46 +00:00
onlineColour = greaterThanSwitch ( totonline , 3 , 10 )
onlineGuestsColour = greaterThanSwitch ( gonline , 1 , 10 )
onlineUsersColour = greaterThanSwitch ( uonline , 1 , 5 )
2017-06-10 07:58:15 +00:00
2018-03-08 03:59:47 +00:00
totonline , totunit = ConvertFriendlyUnit ( totonline )
uonline , uunit = ConvertFriendlyUnit ( uonline )
gonline , gunit = ConvertFriendlyUnit ( gonline )
2017-05-11 13:04:43 +00:00
}
2017-06-10 07:58:15 +00:00
2017-05-11 13:04:43 +00:00
if cpuerr != nil {
cpustr = "Unknown"
} else {
2017-09-10 16:57:22 +00:00
calcperc := int ( cpuPerc [ 0 ] ) / runtime . NumCPU ( )
2017-05-11 13:04:43 +00:00
cpustr = strconv . Itoa ( calcperc )
2017-11-23 05:37:08 +00:00
switch {
case calcperc < 30 :
2017-05-11 13:04:43 +00:00
cpuColour = "stat_green"
2017-11-23 05:37:08 +00:00
case calcperc < 75 :
2017-05-11 13:04:43 +00:00
cpuColour = "stat_orange"
2017-11-23 05:37:08 +00:00
default :
2017-05-11 13:04:43 +00:00
cpuColour = "stat_red"
}
}
2017-06-10 07:58:15 +00:00
2017-09-10 16:57:22 +00:00
if ! noRAMUpdates {
2017-05-12 13:25:12 +00:00
if ramerr != nil {
ramstr = "Unknown"
2017-05-11 13:04:43 +00:00
} else {
2018-03-08 03:59:47 +00:00
totalCount , totalUnit := ConvertByteUnit ( float64 ( memres . Total ) )
usedCount := ConvertByteInUnit ( float64 ( memres . Total - memres . Available ) , totalUnit )
2017-06-10 07:58:15 +00:00
2017-05-12 13:25:12 +00:00
// Round totals with .9s up, it's how most people see it anyway. Floats are notoriously imprecise, so do it off 0.85
var totstr string
2017-09-10 16:57:22 +00:00
if ( totalCount - float64 ( int ( totalCount ) ) ) > 0.85 {
usedCount += 1.0 - ( totalCount - float64 ( int ( totalCount ) ) )
totstr = strconv . Itoa ( int ( totalCount ) + 1 )
2017-05-12 13:25:12 +00:00
} else {
2017-09-10 16:57:22 +00:00
totstr = fmt . Sprintf ( "%.1f" , totalCount )
2017-05-12 13:25:12 +00:00
}
2017-06-10 07:58:15 +00:00
2017-09-10 16:57:22 +00:00
if usedCount > totalCount {
usedCount = totalCount
2017-05-12 13:25:12 +00:00
}
2017-09-10 16:57:22 +00:00
ramstr = fmt . Sprintf ( "%.1f" , usedCount ) + " / " + totstr + totalUnit
2017-06-10 07:58:15 +00:00
2017-05-12 13:25:12 +00:00
ramperc := ( ( memres . Total - memres . Available ) * 100 ) / memres . Total
2017-12-30 05:47:46 +00:00
ramColour = lessThanSwitch ( int ( ramperc ) , 50 , 75 )
2017-05-11 13:04:43 +00:00
}
}
2017-06-10 07:58:15 +00:00
2018-06-24 13:49:29 +00:00
// Acquire a write lock for now, so we can handle the delete() case below and the read one simultaneously
// TODO: Stop taking a write lock here if it isn't necessary
adminStatsMutex . Lock ( )
for watcher := range adminStatsWatchers {
2017-05-11 13:04:43 +00:00
w , err := watcher . conn . NextWriter ( websocket . TextMessage )
if err != nil {
2017-09-03 04:50:31 +00:00
delete ( adminStatsWatchers , watcher )
2017-05-11 13:04:43 +00:00
continue
}
2017-06-10 07:58:15 +00:00
2017-09-10 16:57:22 +00:00
// nolint
2018-06-24 13:49:29 +00:00
// TODO: Use JSON for this to make things more portable and easier to convert to MessagePack, if need be?
2017-09-10 16:57:22 +00:00
if ! noStatUpdates {
2017-11-23 05:37:08 +00:00
w . Write ( [ ] byte ( "set #dash-totonline <span>" + strconv . Itoa ( totonline ) + totunit + " online</span>\r" ) )
w . Write ( [ ] byte ( "set #dash-gonline <span>" + strconv . Itoa ( gonline ) + gunit + " guests online</span>\r" ) )
w . Write ( [ ] byte ( "set #dash-uonline <span>" + strconv . Itoa ( uonline ) + uunit + " users online</span>\r" ) )
2017-12-30 05:47:46 +00:00
w . Write ( [ ] byte ( "set #dash-reqs <span>" + strconv . Itoa ( reqCount ) + " reqs / second</span>\r" ) )
2017-06-10 07:58:15 +00:00
2017-05-12 13:25:12 +00:00
w . Write ( [ ] byte ( "set-class #dash-totonline grid_item grid_stat " + onlineColour + "\r" ) )
w . Write ( [ ] byte ( "set-class #dash-gonline grid_item grid_stat " + onlineGuestsColour + "\r" ) )
w . Write ( [ ] byte ( "set-class #dash-uonline grid_item grid_stat " + onlineUsersColour + "\r" ) )
2017-12-30 05:47:46 +00:00
//w.Write([]byte("set-class #dash-reqs grid_item grid_stat grid_end_group \r"))
2017-05-11 13:04:43 +00:00
}
2017-06-10 07:58:15 +00:00
2017-11-23 05:37:08 +00:00
w . Write ( [ ] byte ( "set #dash-cpu <span>CPU: " + cpustr + "%</span>\r" ) )
2017-05-12 13:25:12 +00:00
w . Write ( [ ] byte ( "set-class #dash-cpu grid_item grid_istat " + cpuColour + "\r" ) )
2017-06-10 07:58:15 +00:00
2017-09-10 16:57:22 +00:00
if ! noRAMUpdates {
2017-11-23 05:37:08 +00:00
w . Write ( [ ] byte ( "set #dash-ram <span>RAM: " + ramstr + "</span>\r" ) )
2017-05-12 13:25:12 +00:00
w . Write ( [ ] byte ( "set-class #dash-ram grid_item grid_istat " + ramColour + "\r" ) )
}
2017-06-10 07:58:15 +00:00
2017-05-11 13:04:43 +00:00
w . Close ( )
}
2018-06-24 13:49:29 +00:00
adminStatsMutex . Unlock ( )
2017-06-10 07:58:15 +00:00
2017-09-10 16:57:22 +00:00
lastUonline = uonline
lastGonline = gonline
lastTotonline = totonline
lastCPUPerc = int ( cpuPerc [ 0 ] )
lastAvailableRAM = int64 ( memres . Available )
2017-06-10 07:58:15 +00:00
2017-05-11 13:04:43 +00:00
//time.Sleep(time.Second)
}
2017-06-10 07:58:15 +00:00
}