code review fixes
This commit is contained in:
parent
e8ea485a0d
commit
48129c03d1
@ -36,11 +36,8 @@ func TestGetContainerHintsFromFile(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFileNotExist(t *testing.T) {
|
||||
cHints, err := getContainerHintsFromFile("/file_does_not_exist.json")
|
||||
_, err := getContainerHintsFromFile("/file_does_not_exist.json")
|
||||
if err != nil {
|
||||
t.Fatalf("getContainerHintsFromFile must not error for blank file: %s", err)
|
||||
}
|
||||
for _, container := range cHints.AllHosts {
|
||||
t.Logf("Container: %s", container)
|
||||
}
|
||||
}
|
||||
|
@ -198,9 +198,9 @@ func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
fs.DiskStats.WritesMerged,
|
||||
fs.DiskStats.SectorsWritten,
|
||||
fs.DiskStats.WriteTime,
|
||||
fs.DiskStats.IOInProgress,
|
||||
fs.DiskStats.IOTime,
|
||||
fs.DiskStats.WeightedIOTime,
|
||||
fs.DiskStats.IoInProgress,
|
||||
fs.DiskStats.IoTime,
|
||||
fs.DiskStats.WeightedIoTime,
|
||||
})
|
||||
}
|
||||
} else if len(self.externalMounts) > 0 {
|
||||
@ -224,9 +224,9 @@ func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
fs.DiskStats.WritesMerged,
|
||||
fs.DiskStats.SectorsWritten,
|
||||
fs.DiskStats.WriteTime,
|
||||
fs.DiskStats.IOInProgress,
|
||||
fs.DiskStats.IOTime,
|
||||
fs.DiskStats.WeightedIOTime,
|
||||
fs.DiskStats.IoInProgress,
|
||||
fs.DiskStats.IoTime,
|
||||
fs.DiskStats.WeightedIoTime,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
46
fs/fs.go
46
fs/fs.go
@ -12,6 +12,7 @@ import "C"
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"path"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
@ -24,6 +25,7 @@ import (
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var partitionRegex = regexp.MustCompile("sd[a-z]+\\d")
|
||||
type partition struct {
|
||||
mountpoint string
|
||||
major uint
|
||||
@ -83,16 +85,20 @@ func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, er
|
||||
}
|
||||
|
||||
func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) {
|
||||
diskStatsMap := make(map[string]DiskStats)
|
||||
file, err := os.Open(diskStatsFile)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
glog.Infof("not collecting filesystem statistics because file %q was not available", diskStatsFile)
|
||||
return diskStatsMap, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer file.Close()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
diskStatsMap := make(map[string]DiskStats)
|
||||
partitionRegex, _ := regexp.Compile("sd[a-z]\\d")
|
||||
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
words := strings.Fields(line)
|
||||
@ -100,28 +106,32 @@ func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) {
|
||||
continue
|
||||
}
|
||||
// 8 50 sdd2 40 0 280 223 7 0 22 108 0 330 330
|
||||
deviceName := "/dev/" + words[2]
|
||||
deviceName := path.Join("/dev", words[2])
|
||||
wordLength := len(words)
|
||||
var stats = make([]uint64, wordLength)
|
||||
offset := 3
|
||||
var stats = make([]uint64, wordLength - offset)
|
||||
if len(stats) < 11 {
|
||||
return nil, fmt.Errorf("could not parse all 11 columns of /proc/diskstats")
|
||||
}
|
||||
var error error
|
||||
for i := 3; i < wordLength; i++ {
|
||||
stats[i], error = strconv.ParseUint(words[i], 10, 64)
|
||||
for i := offset; i < wordLength; i++ {
|
||||
stats[i - offset], error = strconv.ParseUint(words[i], 10, 64)
|
||||
if error != nil {
|
||||
return nil, error
|
||||
}
|
||||
}
|
||||
diskStats := DiskStats{
|
||||
ReadsCompleted: stats[3],
|
||||
ReadsMerged: stats[4],
|
||||
SectorsRead: stats[5],
|
||||
ReadTime: stats[6],
|
||||
WritesCompleted: stats[7],
|
||||
WritesMerged: stats[8],
|
||||
SectorsWritten: stats[9],
|
||||
WriteTime: stats[10],
|
||||
IOInProgress: stats[11],
|
||||
IOTime: stats[12],
|
||||
WeightedIOTime: stats[13],
|
||||
ReadsCompleted: stats[0],
|
||||
ReadsMerged: stats[1],
|
||||
SectorsRead: stats[2],
|
||||
ReadTime: stats[3],
|
||||
WritesCompleted: stats[4],
|
||||
WritesMerged: stats[5],
|
||||
SectorsWritten: stats[6],
|
||||
WriteTime: stats[7],
|
||||
IoInProgress: stats[8],
|
||||
IoTime: stats[9],
|
||||
WeightedIoTime: stats[10],
|
||||
}
|
||||
diskStatsMap[deviceName] = diskStats
|
||||
}
|
||||
|
@ -45,3 +45,10 @@ func TestGetDiskStatsMap(t *testing.T) {
|
||||
t.Errorf("diskStatsMap %s contains illegal keys %s", diskStatsMap, keySet)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileNotExist(t *testing.T) {
|
||||
_, err := getDiskStatsMap("/file_does_not_exist")
|
||||
if err != nil {
|
||||
t.Fatalf("getDiskStatsMap must not error for absent file: %s", err)
|
||||
}
|
||||
}
|
||||
|
@ -22,9 +22,9 @@ type DiskStats struct {
|
||||
WritesMerged uint64
|
||||
SectorsWritten uint64
|
||||
WriteTime uint64
|
||||
IOInProgress uint64
|
||||
IOTime uint64
|
||||
WeightedIOTime uint64
|
||||
IoInProgress uint64
|
||||
IoTime uint64
|
||||
WeightedIoTime uint64
|
||||
}
|
||||
|
||||
type FsInfo interface {
|
||||
|
@ -241,59 +241,59 @@ type FsStats struct {
|
||||
// Number of bytes that is consumed by the container on this filesystem.
|
||||
Usage uint64 `json:"usage"`
|
||||
|
||||
// # of reads completed
|
||||
// Number of reads completed
|
||||
// This is the total number of reads completed successfully.
|
||||
ReadsCompleted uint64 `json:"reads_completed"`
|
||||
|
||||
// # of reads merged
|
||||
// Number of reads merged
|
||||
// Reads and writes which are adjacent to each other may be merged for
|
||||
// efficiency. Thus two 4K reads may become one 8K read before it is
|
||||
// ultimately handed to the disk, and so it will be counted (and queued)
|
||||
// as only one I/O. This field lets you know how often this was done.
|
||||
ReadsMerged uint64 `json:"reads_merged"`
|
||||
|
||||
// # of sectors read
|
||||
// Number of sectors read
|
||||
// This is the total number of sectors read successfully.
|
||||
SectorsRead uint64 `json:"sectors_read"`
|
||||
|
||||
// # of milliseconds spent reading
|
||||
// Number of milliseconds spent reading
|
||||
// This is the total number of milliseconds spent by all reads (as
|
||||
// measured from __make_request() to end_that_request_last()).
|
||||
ReadTime uint64 `json:"read_time"`
|
||||
|
||||
// # of writes completed
|
||||
// Number of writes completed
|
||||
// This is the total number of writes completed successfully.
|
||||
WritesCompleted uint64 `json:"writes_completed"`
|
||||
|
||||
// # of writes merged
|
||||
// Number of writes merged
|
||||
// See the description of reads merged.
|
||||
WritesMerged uint64 `json:"writes_merged"`
|
||||
|
||||
// # of sectors written
|
||||
// Number of sectors written
|
||||
// This is the total number of sectors written successfully.
|
||||
SectorsWritten uint64 `json:"sectors_written"`
|
||||
|
||||
// # of milliseconds spent writing
|
||||
// Number of milliseconds spent writing
|
||||
// This is the total number of milliseconds spent by all writes (as
|
||||
// measured from __make_request() to end_that_request_last()).
|
||||
WriteTime uint64 `json:"write_time"`
|
||||
|
||||
// # of I/Os currently in progress
|
||||
// Number of I/Os currently in progress
|
||||
// The only field that should go to zero. Incremented as requests are
|
||||
// given to appropriate struct request_queue and decremented as they finish.
|
||||
IOInProgress uint64 `json:"io_in_progress"`
|
||||
IoInProgress uint64 `json:"io_in_progress"`
|
||||
|
||||
// # of milliseconds spent doing I/Os
|
||||
// Number of milliseconds spent doing I/Os
|
||||
// This field increases so long as field 9 is nonzero.
|
||||
IOTime uint64 `json:"io_time"`
|
||||
IoTime uint64 `json:"io_time"`
|
||||
|
||||
// weighted # of milliseconds spent doing I/Os
|
||||
// weighted number of milliseconds spent doing I/Os
|
||||
// This field is incremented at each I/O start, I/O completion, I/O
|
||||
// merge, or read of these stats by the number of I/Os in progress
|
||||
// (field 9) times the number of milliseconds spent doing I/O since the
|
||||
// last update of this field. This can provide an easy measure of both
|
||||
// I/O completion time and the backlog that may be accumulating.
|
||||
WeightedIOTime uint64 `json:"weighted_io_time"`
|
||||
WeightedIoTime uint64 `json:"weighted_io_time"`
|
||||
}
|
||||
|
||||
type ContainerStats struct {
|
||||
|
Loading…
Reference in New Issue
Block a user