Added /proc/diskstats

Read disk io information from /proc/diskstats.
This will allow the user who provides partition container hints to get partition-specific io (blkio provides io for the container, but at the disk device level).
This commit is contained in:
Abin Shahab 2014-10-18 22:01:33 +00:00
parent b1796da270
commit e9d6289964
6 changed files with 252 additions and 6 deletions

View File

@ -188,7 +188,20 @@ func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
return err
}
for _, fs := range filesystems {
stats.Filesystem = append(stats.Filesystem, info.FsStats{fs.Device, fs.Capacity, fs.Capacity - fs.Free})
stats.Filesystem = append(stats.Filesystem,
info.FsStats{fs.Device, fs.Capacity, fs.Capacity - fs.Free,
fs.DiskStats.ReadsCompleted,
fs.DiskStats.ReadsMerged,
fs.DiskStats.SectorsRead,
fs.DiskStats.ReadTime,
fs.DiskStats.WritesCompleted,
fs.DiskStats.WritesMerged,
fs.DiskStats.SectorsWritten,
fs.DiskStats.WriteTime,
fs.DiskStats.IOInProgress,
fs.DiskStats.IOTime,
fs.DiskStats.WeightedIOTime,
})
}
} else if len(self.externalMounts) > 0 {
var mountSet map[string]struct{}
@ -201,7 +214,20 @@ func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
return err
}
for _, fs := range filesystems {
stats.Filesystem = append(stats.Filesystem, info.FsStats{fs.Device, fs.Capacity, fs.Capacity - fs.Free})
stats.Filesystem = append(stats.Filesystem,
info.FsStats{fs.Device, fs.Capacity, fs.Capacity - fs.Free,
fs.DiskStats.ReadsCompleted,
fs.DiskStats.ReadsMerged,
fs.DiskStats.SectorsRead,
fs.DiskStats.ReadTime,
fs.DiskStats.WritesCompleted,
fs.DiskStats.WritesMerged,
fs.DiskStats.SectorsWritten,
fs.DiskStats.WriteTime,
fs.DiskStats.IOInProgress,
fs.DiskStats.IOTime,
fs.DiskStats.WeightedIOTime,
})
}
}
return nil

View File

@ -10,8 +10,11 @@ package fs
import "C"
import (
"bufio"
"fmt"
"os/exec"
"os"
"regexp"
"strconv"
"strings"
"syscall"
@ -53,6 +56,10 @@ func NewFsInfo() (FsInfo, error) {
func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, error) {
filesystems := make([]Fs, 0)
deviceSet := make(map[string]struct{})
diskStatsMap, err := getDiskStatsMap("/proc/diskstats")
if err != nil {
return nil, err
}
for device, partition := range self.partitions {
_, hasMount := mountSet[partition.mountpoint]
_, hasDevice := deviceSet[device]
@ -67,7 +74,7 @@ func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, er
Major: uint(partition.major),
Minor: uint(partition.minor),
}
fs := Fs{deviceInfo, total, free}
fs := Fs{deviceInfo, total, free, diskStatsMap[device]}
filesystems = append(filesystems, fs)
}
}
@ -75,6 +82,52 @@ func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, er
return filesystems, nil
}
func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) {
file, err := os.Open(diskStatsFile)
if err != nil {
return nil, err
}
defer file.Close()
scanner := bufio.NewScanner(file)
diskStatsMap := make(map[string]DiskStats)
partitionRegex, _ := regexp.Compile("sd[a-z]\\d")
for scanner.Scan() {
line :=scanner.Text()
words := strings.Fields(line)
if !partitionRegex.MatchString(words[2]) {
continue
}
// 8 50 sdd2 40 0 280 223 7 0 22 108 0 330 330
deviceName := "/dev/" + words[2]
wordLength := len(words)
var stats = make([]uint64,wordLength)
var error error
for i := 3; i < wordLength; i++ {
stats[i], error = strconv.ParseUint(words[i], 10, 64)
if error != nil {
return nil, error
}
}
diskStats := DiskStats {
ReadsCompleted: stats[3],
ReadsMerged: stats[4],
SectorsRead: stats[5],
ReadTime: stats[6],
WritesCompleted:stats[7],
WritesMerged: stats[8],
SectorsWritten: stats[9],
WriteTime: stats[10],
IOInProgress: stats[11],
IOTime: stats[12],
WeightedIOTime: stats[13],
}
diskStatsMap[deviceName] = diskStats
}
return diskStatsMap, nil
}
func (self *RealFsInfo) GetGlobalFsInfo() ([]Fs, error) {
return self.GetFsInfoForPath(nil)
}

46
fs/fs_test.go Normal file
View File

@ -0,0 +1,46 @@
package fs
import (
"testing"
)
func TestGetDiskStatsMap(t *testing.T) {
diskStatsMap, err := getDiskStatsMap("test_resources/diskstats")
if err != nil {
t.Errorf("Error calling getDiskStatMap %s", err)
}
if len(diskStatsMap) != 20 {
t.Errorf("diskStatsMap %s not valid", diskStatsMap)
}
keySet := map[string]string{
"/dev/sdb1": "/dev/sdb1",
"/dev/sdb2": "/dev/sdb2",
"/dev/sda1": "/dev/sda1",
"/dev/sda2": "/dev/sda2",
"/dev/sdc1": "/dev/sdc1",
"/dev/sdc2": "/dev/sdc2",
"/dev/sdc3": "/dev/sdc3",
"/dev/sdc4": "/dev/sdc4",
"/dev/sdd1": "/dev/sdd1",
"/dev/sdd2": "/dev/sdd2",
"/dev/sdd3": "/dev/sdd3",
"/dev/sdd4": "/dev/sdd4",
"/dev/sde1": "/dev/sde1",
"/dev/sde2": "/dev/sde2",
"/dev/sdf1": "/dev/sdf1",
"/dev/sdf2": "/dev/sdf2",
"/dev/sdg1": "/dev/sdg1",
"/dev/sdg2": "/dev/sdg2",
"/dev/sdh1": "/dev/sdh1",
"/dev/sdh2": "/dev/sdh2",
}
for device := range diskStatsMap {
if _, ok := keySet[device]; !ok {
t.Errorf("Cannot find device %s", device)
}
delete(keySet, device)
}
if len(keySet) != 0 {
t.Errorf("diskStatsMap %s contains illegal keys %s", diskStatsMap, keySet)
}
}

View File

@ -0,0 +1,52 @@
1 0 ram0 0 0 0 0 0 0 0 0 0 0 0
1 1 ram1 0 0 0 0 0 0 0 0 0 0 0
1 2 ram2 0 0 0 0 0 0 0 0 0 0 0
1 3 ram3 0 0 0 0 0 0 0 0 0 0 0
1 4 ram4 0 0 0 0 0 0 0 0 0 0 0
1 5 ram5 0 0 0 0 0 0 0 0 0 0 0
1 6 ram6 0 0 0 0 0 0 0 0 0 0 0
1 7 ram7 0 0 0 0 0 0 0 0 0 0 0
1 8 ram8 0 0 0 0 0 0 0 0 0 0 0
1 9 ram9 0 0 0 0 0 0 0 0 0 0 0
1 10 ram10 0 0 0 0 0 0 0 0 0 0 0
1 11 ram11 0 0 0 0 0 0 0 0 0 0 0
1 12 ram12 0 0 0 0 0 0 0 0 0 0 0
1 13 ram13 0 0 0 0 0 0 0 0 0 0 0
1 14 ram14 0 0 0 0 0 0 0 0 0 0 0
1 15 ram15 0 0 0 0 0 0 0 0 0 0 0
7 0 loop0 0 0 0 0 0 0 0 0 0 0 0
7 1 loop1 0 0 0 0 0 0 0 0 0 0 0
7 2 loop2 0 0 0 0 0 0 0 0 0 0 0
7 3 loop3 0 0 0 0 0 0 0 0 0 0 0
7 4 loop4 0 0 0 0 0 0 0 0 0 0 0
7 5 loop5 0 0 0 0 0 0 0 0 0 0 0
7 6 loop6 0 0 0 0 0 0 0 0 0 0 0
7 7 loop7 0 0 0 0 0 0 0 0 0 0 0
8 16 sdb 931 1157 7601 960 2 0 16 0 0 919 960
8 17 sdb1 477 1147 3895 271 1 0 8 0 0 271 271
8 18 sdb2 395 0 3154 326 1 0 8 0 0 326 326
8 0 sda 931 1157 7601 1065 2 0 16 0 0 873 1065
8 1 sda1 477 1147 3895 419 1 0 8 0 0 419 419
8 2 sda2 395 0 3154 328 1 0 8 0 0 328 328
8 32 sdc 12390 470 457965 36363 72184 244851 9824537 5359169 0 607738 5437210
8 33 sdc1 10907 221 446193 34366 72173 244851 9824499 5359063 0 606972 5435214
8 34 sdc2 650 249 5120 901 7 0 22 93 0 956 994
8 35 sdc3 264 0 2106 380 1 0 8 0 0 380 380
8 36 sdc4 392 0 3130 476 1 0 8 0 0 475 475
8 48 sdd 3371 134 58909 18327 73997 243043 9824537 4532714 0 594248 4602162
8 49 sdd1 2498 134 51977 17192 73986 243043 9824499 4532600 0 593618 4600885
8 50 sdd2 40 0 280 223 7 0 22 108 0 330 330
8 51 sdd3 264 0 2106 328 1 0 8 0 0 328 328
8 52 sdd4 392 0 3130 373 1 0 8 1 0 374 374
8 64 sde 931 1157 7601 768 2 0 16 0 0 632 768
8 65 sde1 477 1147 3895 252 1 0 8 0 0 252 252
8 66 sde2 395 0 3154 281 1 0 8 0 0 281 281
8 80 sdf 931 1157 7601 936 2 0 16 0 0 717 936
8 81 sdf1 477 1147 3895 382 1 0 8 0 0 382 382
8 82 sdf2 395 0 3154 321 1 0 8 0 0 321 321
8 96 sdg 931 1157 7601 858 2 0 16 0 0 804 858
8 97 sdg1 477 1147 3895 244 1 0 8 0 0 244 244
8 98 sdg2 395 0 3154 299 1 0 8 0 0 299 299
8 112 sdh 931 1157 7601 895 2 0 16 0 0 841 895
8 113 sdh1 477 1147 3895 264 1 0 8 0 0 264 264
8 114 sdh2 395 0 3154 311 1 0 8 0 0 311 311

View File

@ -8,8 +8,23 @@ type DeviceInfo struct {
type Fs struct {
DeviceInfo
Capacity uint64
Free uint64
Capacity uint64
Free uint64
DiskStats DiskStats
}
type DiskStats struct {
ReadsCompleted uint64
ReadsMerged uint64
SectorsRead uint64
ReadTime uint64
WritesCompleted uint64
WritesMerged uint64
SectorsWritten uint64
WriteTime uint64
IOInProgress uint64
IOTime uint64
WeightedIOTime uint64
}
type FsInfo interface {

View File

@ -218,7 +218,7 @@ type NetworkStats struct {
// Cumulative count of packets received.
RxPackets uint64 `json:"rx_packets"`
// Cumulative count of receive errors encountered.
RxErrors uint64 `json:"rx_errors"`
RxErrors uint64 `json:"rx_errors"`
// Cumulative count of packets dropped while receiving.
RxDropped uint64 `json:"rx_dropped"`
// Cumulative count of bytes transmitted.
@ -240,6 +240,60 @@ type FsStats struct {
// Number of bytes that is consumed by the container on this filesystem.
Usage uint64 `json:"usage"`
// # of reads completed
// This is the total number of reads completed successfully.
ReadsCompleted uint64 `json:"reads_completed"`
// # of reads merged
// Reads and writes which are adjacent to each other may be merged for
// efficiency. Thus two 4K reads may become one 8K read before it is
// ultimately handed to the disk, and so it will be counted (and queued)
// as only one I/O. This field lets you know how often this was done.
ReadsMerged uint64 `json:"reads_merged"`
// # of sectors read
// This is the total number of sectors read successfully.
SectorsRead uint64 `json:"sectors_read"`
// # of milliseconds spent reading
// This is the total number of milliseconds spent by all reads (as
// measured from __make_request() to end_that_request_last()).
ReadTime uint64 `json:"read_time"`
// # of writes completed
// This is the total number of writes completed successfully.
WritesCompleted uint64 `json:"writes_completed"`
// # of writes merged
// See the description of reads merged.
WritesMerged uint64 `json:"writes_merged"`
// # of sectors written
// This is the total number of sectors written successfully.
SectorsWritten uint64 `json:"sectors_written"`
// # of milliseconds spent writing
// This is the total number of milliseconds spent by all writes (as
// measured from __make_request() to end_that_request_last()).
WriteTime uint64 `json:"write_time"`
// # of I/Os currently in progress
// The only field that should go to zero. Incremented as requests are
// given to appropriate struct request_queue and decremented as they finish.
IOInProgress uint64 `json:"io_in_progress"`
// # of milliseconds spent doing I/Os
// This field increases so long as field 9 is nonzero.
IOTime uint64 `json:"io_time"`
// weighted # of milliseconds spent doing I/Os
// This field is incremented at each I/O start, I/O completion, I/O
// merge, or read of these stats by the number of I/Os in progress
// (field 9) times the number of milliseconds spent doing I/O since the
// last update of this field. This can provide an easy measure of both
// I/O completion time and the backlog that may be accumulating.
WeightedIOTime uint64 `json:"weighted_io_time"`
}
type ContainerStats struct {