node-api/node/load.go

142 lines
3.1 KiB
Go

package node
import (
"github.com/shirou/gopsutil/cpu"
"github.com/shirou/gopsutil/disk"
"github.com/shirou/gopsutil/load"
"github.com/shirou/gopsutil/mem"
"github.com/jinzhu/gorm"
)
const history = 72 * 3600 / 300 // 3 days, one record every five minutes
// Processor covers Node related methods for monitoring and calculating performance indexes.
type Processor struct {
DB *gorm.DB
}
func (p *Processor) Init() {
p.DB.AutoMigrate(PerformanceLog{})
}
// GetNodeInfo returns information about this node
func (p *Processor) GetNodeInfo() (*Node, error) {
node, err := p.Index()
return node, err
}
// Log creates a record for all important metrics used as
func (p *Processor) Log() error {
performanceLog := PerformanceLog{}
// Load
loadStats, err := load.Avg()
if err != nil {
return err
}
performanceLog.Load1 = loadStats.Load1
performanceLog.Load5 = loadStats.Load5
performanceLog.Load15 = loadStats.Load15
// Memory
memoryStat, err := mem.VirtualMemory()
if err != nil {
return err
}
performanceLog.Memory = memoryStat.UsedPercent / 100.0
// Disk space
diskUsage, err := disk.Usage("/srv")
if err != nil {
return err
}
performanceLog.DiskSpaceUsage = diskUsage.UsedPercent / 100.0
// Save
err = p.DB.Create(&performanceLog).Error
if err != nil {
return err
}
// and clean
// we have to use this stupid approach because DELETE doesn't support ORDER BY and LIMIT
toDeleteLogs := []PerformanceLog{}
err = p.DB.Order("id DESC").Limit("99").Offset(history).Find(&toDeleteLogs).Error
if err != nil {
return err
}
for _, toDeleteLog := range toDeleteLogs {
err = p.DB.Delete(&toDeleteLog).Error
if err != nil {
return err
}
}
return nil
}
// Index returns number from 0 to 1 where 0 means least loaded and 1 maximally loaded.
// It uses history of last 72 hours
func (p *Processor) Index() (*Node, error) {
node := Node{
Index: 1.0,
}
// Number of CPUs
cpus, err := cpu.Counts(true)
if err != nil {
return &node, err
}
logs := []PerformanceLog{}
err = p.DB.Find(&logs).Error
if err != nil {
return &node, err
}
var totalLoad1 float64
var totalLoad5 float64
var totalLoad15 float64
var totalDiskSpaceUsage float64
var totalMemory float64
// If there is no record we have to wait until it is, until then the server is "full"
if len(logs) == 0 {
return &node, nil
}
for _, log := range logs {
totalLoad1 += log.Load1
totalLoad5 += log.Load5
totalLoad15 += log.Load15
totalDiskSpaceUsage += log.DiskSpaceUsage
totalMemory += log.Memory
}
node.Load1Index = totalLoad1 / float64(len(logs)) / float64(cpus)
node.Load5Index = totalLoad5 / float64(len(logs)) / float64(cpus)
node.Load15Index = totalLoad15 / float64(len(logs)) / float64(cpus)
node.MemoryIndex = totalMemory / float64(len(logs))
node.DiskSpaceIndex = totalDiskSpaceUsage / float64(len(logs))
var indexes []float64
indexes = append(indexes, node.Load5Index)
indexes = append(indexes, node.MemoryIndex)
indexes = append(indexes, node.DiskSpaceIndex)
finalIndex := float64(0)
for _, index := range indexes {
if index > finalIndex {
finalIndex = index
}
}
node.Index = finalIndex
return &node, nil
}