code review fixes

This commit is contained in:
Abin Shahab 2014-10-20 02:08:54 +00:00
parent e8ea485a0d
commit 48129c03d1
6 changed files with 59 additions and 45 deletions

View File

@ -36,11 +36,8 @@ func TestGetContainerHintsFromFile(t *testing.T) {
} }
func TestFileNotExist(t *testing.T) { func TestFileNotExist(t *testing.T) {
cHints, err := getContainerHintsFromFile("/file_does_not_exist.json") _, err := getContainerHintsFromFile("/file_does_not_exist.json")
if err != nil { if err != nil {
t.Fatalf("getContainerHintsFromFile must not error for blank file: %s", err) t.Fatalf("getContainerHintsFromFile must not error for blank file: %s", err)
} }
for _, container := range cHints.AllHosts {
t.Logf("Container: %s", container)
}
} }

View File

@ -198,9 +198,9 @@ func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
fs.DiskStats.WritesMerged, fs.DiskStats.WritesMerged,
fs.DiskStats.SectorsWritten, fs.DiskStats.SectorsWritten,
fs.DiskStats.WriteTime, fs.DiskStats.WriteTime,
fs.DiskStats.IOInProgress, fs.DiskStats.IoInProgress,
fs.DiskStats.IOTime, fs.DiskStats.IoTime,
fs.DiskStats.WeightedIOTime, fs.DiskStats.WeightedIoTime,
}) })
} }
} else if len(self.externalMounts) > 0 { } else if len(self.externalMounts) > 0 {
@ -224,9 +224,9 @@ func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
fs.DiskStats.WritesMerged, fs.DiskStats.WritesMerged,
fs.DiskStats.SectorsWritten, fs.DiskStats.SectorsWritten,
fs.DiskStats.WriteTime, fs.DiskStats.WriteTime,
fs.DiskStats.IOInProgress, fs.DiskStats.IoInProgress,
fs.DiskStats.IOTime, fs.DiskStats.IoTime,
fs.DiskStats.WeightedIOTime, fs.DiskStats.WeightedIoTime,
}) })
} }
} }

View File

@ -12,6 +12,7 @@ import "C"
import ( import (
"bufio" "bufio"
"fmt" "fmt"
"path"
"os" "os"
"os/exec" "os/exec"
"regexp" "regexp"
@ -24,6 +25,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
) )
var partitionRegex = regexp.MustCompile("sd[a-z]+\\d")
type partition struct { type partition struct {
mountpoint string mountpoint string
major uint major uint
@ -83,16 +85,20 @@ func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, er
} }
func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) { func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) {
diskStatsMap := make(map[string]DiskStats)
file, err := os.Open(diskStatsFile) file, err := os.Open(diskStatsFile)
if err != nil { if err != nil {
if os.IsNotExist(err) {
glog.Infof("not collecting filesystem statistics because file %q was not available", diskStatsFile)
return diskStatsMap, nil
}
return nil, err return nil, err
} }
defer file.Close() defer file.Close()
scanner := bufio.NewScanner(file) scanner := bufio.NewScanner(file)
diskStatsMap := make(map[string]DiskStats)
partitionRegex, _ := regexp.Compile("sd[a-z]\\d")
for scanner.Scan() { for scanner.Scan() {
line := scanner.Text() line := scanner.Text()
words := strings.Fields(line) words := strings.Fields(line)
@ -100,28 +106,32 @@ func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) {
continue continue
} }
// 8 50 sdd2 40 0 280 223 7 0 22 108 0 330 330 // 8 50 sdd2 40 0 280 223 7 0 22 108 0 330 330
deviceName := "/dev/" + words[2] deviceName := path.Join("/dev", words[2])
wordLength := len(words) wordLength := len(words)
var stats = make([]uint64, wordLength) offset := 3
var stats = make([]uint64, wordLength - offset)
if len(stats) < 11 {
return nil, fmt.Errorf("could not parse all 11 columns of /proc/diskstats")
}
var error error var error error
for i := 3; i < wordLength; i++ { for i := offset; i < wordLength; i++ {
stats[i], error = strconv.ParseUint(words[i], 10, 64) stats[i - offset], error = strconv.ParseUint(words[i], 10, 64)
if error != nil { if error != nil {
return nil, error return nil, error
} }
} }
diskStats := DiskStats{ diskStats := DiskStats{
ReadsCompleted: stats[3], ReadsCompleted: stats[0],
ReadsMerged: stats[4], ReadsMerged: stats[1],
SectorsRead: stats[5], SectorsRead: stats[2],
ReadTime: stats[6], ReadTime: stats[3],
WritesCompleted: stats[7], WritesCompleted: stats[4],
WritesMerged: stats[8], WritesMerged: stats[5],
SectorsWritten: stats[9], SectorsWritten: stats[6],
WriteTime: stats[10], WriteTime: stats[7],
IOInProgress: stats[11], IoInProgress: stats[8],
IOTime: stats[12], IoTime: stats[9],
WeightedIOTime: stats[13], WeightedIoTime: stats[10],
} }
diskStatsMap[deviceName] = diskStats diskStatsMap[deviceName] = diskStats
} }

View File

@ -45,3 +45,10 @@ func TestGetDiskStatsMap(t *testing.T) {
t.Errorf("diskStatsMap %s contains illegal keys %s", diskStatsMap, keySet) t.Errorf("diskStatsMap %s contains illegal keys %s", diskStatsMap, keySet)
} }
} }
func TestFileNotExist(t *testing.T) {
_, err := getDiskStatsMap("/file_does_not_exist")
if err != nil {
t.Fatalf("getDiskStatsMap must not error for absent file: %s", err)
}
}

View File

@ -22,9 +22,9 @@ type DiskStats struct {
WritesMerged uint64 WritesMerged uint64
SectorsWritten uint64 SectorsWritten uint64
WriteTime uint64 WriteTime uint64
IOInProgress uint64 IoInProgress uint64
IOTime uint64 IoTime uint64
WeightedIOTime uint64 WeightedIoTime uint64
} }
type FsInfo interface { type FsInfo interface {

View File

@ -241,59 +241,59 @@ type FsStats struct {
// Number of bytes that is consumed by the container on this filesystem. // Number of bytes that is consumed by the container on this filesystem.
Usage uint64 `json:"usage"` Usage uint64 `json:"usage"`
// # of reads completed // Number of reads completed
// This is the total number of reads completed successfully. // This is the total number of reads completed successfully.
ReadsCompleted uint64 `json:"reads_completed"` ReadsCompleted uint64 `json:"reads_completed"`
// # of reads merged // Number of reads merged
// Reads and writes which are adjacent to each other may be merged for // Reads and writes which are adjacent to each other may be merged for
// efficiency. Thus two 4K reads may become one 8K read before it is // efficiency. Thus two 4K reads may become one 8K read before it is
// ultimately handed to the disk, and so it will be counted (and queued) // ultimately handed to the disk, and so it will be counted (and queued)
// as only one I/O. This field lets you know how often this was done. // as only one I/O. This field lets you know how often this was done.
ReadsMerged uint64 `json:"reads_merged"` ReadsMerged uint64 `json:"reads_merged"`
// # of sectors read // Number of sectors read
// This is the total number of sectors read successfully. // This is the total number of sectors read successfully.
SectorsRead uint64 `json:"sectors_read"` SectorsRead uint64 `json:"sectors_read"`
// # of milliseconds spent reading // Number of milliseconds spent reading
// This is the total number of milliseconds spent by all reads (as // This is the total number of milliseconds spent by all reads (as
// measured from __make_request() to end_that_request_last()). // measured from __make_request() to end_that_request_last()).
ReadTime uint64 `json:"read_time"` ReadTime uint64 `json:"read_time"`
// # of writes completed // Number of writes completed
// This is the total number of writes completed successfully. // This is the total number of writes completed successfully.
WritesCompleted uint64 `json:"writes_completed"` WritesCompleted uint64 `json:"writes_completed"`
// # of writes merged // Number of writes merged
// See the description of reads merged. // See the description of reads merged.
WritesMerged uint64 `json:"writes_merged"` WritesMerged uint64 `json:"writes_merged"`
// # of sectors written // Number of sectors written
// This is the total number of sectors written successfully. // This is the total number of sectors written successfully.
SectorsWritten uint64 `json:"sectors_written"` SectorsWritten uint64 `json:"sectors_written"`
// # of milliseconds spent writing // Number of milliseconds spent writing
// This is the total number of milliseconds spent by all writes (as // This is the total number of milliseconds spent by all writes (as
// measured from __make_request() to end_that_request_last()). // measured from __make_request() to end_that_request_last()).
WriteTime uint64 `json:"write_time"` WriteTime uint64 `json:"write_time"`
// # of I/Os currently in progress // Number of I/Os currently in progress
// The only field that should go to zero. Incremented as requests are // The only field that should go to zero. Incremented as requests are
// given to appropriate struct request_queue and decremented as they finish. // given to appropriate struct request_queue and decremented as they finish.
IOInProgress uint64 `json:"io_in_progress"` IoInProgress uint64 `json:"io_in_progress"`
// # of milliseconds spent doing I/Os // Number of milliseconds spent doing I/Os
// This field increases so long as field 9 is nonzero. // This field increases so long as field 9 is nonzero.
IOTime uint64 `json:"io_time"` IoTime uint64 `json:"io_time"`
// weighted # of milliseconds spent doing I/Os // weighted number of milliseconds spent doing I/Os
// This field is incremented at each I/O start, I/O completion, I/O // This field is incremented at each I/O start, I/O completion, I/O
// merge, or read of these stats by the number of I/Os in progress // merge, or read of these stats by the number of I/Os in progress
// (field 9) times the number of milliseconds spent doing I/O since the // (field 9) times the number of milliseconds spent doing I/O since the
// last update of this field. This can provide an easy measure of both // last update of this field. This can provide an easy measure of both
// I/O completion time and the backlog that may be accumulating. // I/O completion time and the backlog that may be accumulating.
WeightedIOTime uint64 `json:"weighted_io_time"` WeightedIoTime uint64 `json:"weighted_io_time"`
} }
type ContainerStats struct { type ContainerStats struct {