Gofmt all files
This commit is contained in:
parent
e9d6289964
commit
e8ea485a0d
@ -238,9 +238,9 @@ func (self *rawContainerHandler) GetStats() (*info.ContainerStats, error) {
|
|||||||
if self.networkInterface != nil {
|
if self.networkInterface != nil {
|
||||||
state = dockerlibcontainer.State{
|
state = dockerlibcontainer.State{
|
||||||
NetworkState: network.NetworkState{
|
NetworkState: network.NetworkState{
|
||||||
VethHost: self.networkInterface.VethHost,
|
VethHost: self.networkInterface.VethHost,
|
||||||
VethChild: self.networkInterface.VethChild,
|
VethChild: self.networkInterface.VethChild,
|
||||||
NsPath: "unknown",
|
NsPath: "unknown",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
32
fs/fs.go
32
fs/fs.go
@ -12,8 +12,8 @@ import "C"
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os/exec"
|
|
||||||
"os"
|
"os"
|
||||||
|
"os/exec"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -63,7 +63,7 @@ func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, er
|
|||||||
for device, partition := range self.partitions {
|
for device, partition := range self.partitions {
|
||||||
_, hasMount := mountSet[partition.mountpoint]
|
_, hasMount := mountSet[partition.mountpoint]
|
||||||
_, hasDevice := deviceSet[device]
|
_, hasDevice := deviceSet[device]
|
||||||
if mountSet == nil || hasMount && !hasDevice {
|
if mountSet == nil || hasMount && !hasDevice {
|
||||||
total, free, err := getVfsStats(partition.mountpoint)
|
total, free, err := getVfsStats(partition.mountpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Statvfs failed. Error: %v", err)
|
glog.Errorf("Statvfs failed. Error: %v", err)
|
||||||
@ -94,7 +94,7 @@ func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) {
|
|||||||
diskStatsMap := make(map[string]DiskStats)
|
diskStatsMap := make(map[string]DiskStats)
|
||||||
partitionRegex, _ := regexp.Compile("sd[a-z]\\d")
|
partitionRegex, _ := regexp.Compile("sd[a-z]\\d")
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
line :=scanner.Text()
|
line := scanner.Text()
|
||||||
words := strings.Fields(line)
|
words := strings.Fields(line)
|
||||||
if !partitionRegex.MatchString(words[2]) {
|
if !partitionRegex.MatchString(words[2]) {
|
||||||
continue
|
continue
|
||||||
@ -102,7 +102,7 @@ func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) {
|
|||||||
// 8 50 sdd2 40 0 280 223 7 0 22 108 0 330 330
|
// 8 50 sdd2 40 0 280 223 7 0 22 108 0 330 330
|
||||||
deviceName := "/dev/" + words[2]
|
deviceName := "/dev/" + words[2]
|
||||||
wordLength := len(words)
|
wordLength := len(words)
|
||||||
var stats = make([]uint64,wordLength)
|
var stats = make([]uint64, wordLength)
|
||||||
var error error
|
var error error
|
||||||
for i := 3; i < wordLength; i++ {
|
for i := 3; i < wordLength; i++ {
|
||||||
stats[i], error = strconv.ParseUint(words[i], 10, 64)
|
stats[i], error = strconv.ParseUint(words[i], 10, 64)
|
||||||
@ -110,18 +110,18 @@ func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) {
|
|||||||
return nil, error
|
return nil, error
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
diskStats := DiskStats {
|
diskStats := DiskStats{
|
||||||
ReadsCompleted: stats[3],
|
ReadsCompleted: stats[3],
|
||||||
ReadsMerged: stats[4],
|
ReadsMerged: stats[4],
|
||||||
SectorsRead: stats[5],
|
SectorsRead: stats[5],
|
||||||
ReadTime: stats[6],
|
ReadTime: stats[6],
|
||||||
WritesCompleted:stats[7],
|
WritesCompleted: stats[7],
|
||||||
WritesMerged: stats[8],
|
WritesMerged: stats[8],
|
||||||
SectorsWritten: stats[9],
|
SectorsWritten: stats[9],
|
||||||
WriteTime: stats[10],
|
WriteTime: stats[10],
|
||||||
IOInProgress: stats[11],
|
IOInProgress: stats[11],
|
||||||
IOTime: stats[12],
|
IOTime: stats[12],
|
||||||
WeightedIOTime: stats[13],
|
WeightedIOTime: stats[13],
|
||||||
}
|
}
|
||||||
diskStatsMap[deviceName] = diskStats
|
diskStatsMap[deviceName] = diskStats
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
package fs
|
package fs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
@ -12,26 +13,26 @@ func TestGetDiskStatsMap(t *testing.T) {
|
|||||||
t.Errorf("diskStatsMap %s not valid", diskStatsMap)
|
t.Errorf("diskStatsMap %s not valid", diskStatsMap)
|
||||||
}
|
}
|
||||||
keySet := map[string]string{
|
keySet := map[string]string{
|
||||||
"/dev/sdb1": "/dev/sdb1",
|
"/dev/sdb1": "/dev/sdb1",
|
||||||
"/dev/sdb2": "/dev/sdb2",
|
"/dev/sdb2": "/dev/sdb2",
|
||||||
"/dev/sda1": "/dev/sda1",
|
"/dev/sda1": "/dev/sda1",
|
||||||
"/dev/sda2": "/dev/sda2",
|
"/dev/sda2": "/dev/sda2",
|
||||||
"/dev/sdc1": "/dev/sdc1",
|
"/dev/sdc1": "/dev/sdc1",
|
||||||
"/dev/sdc2": "/dev/sdc2",
|
"/dev/sdc2": "/dev/sdc2",
|
||||||
"/dev/sdc3": "/dev/sdc3",
|
"/dev/sdc3": "/dev/sdc3",
|
||||||
"/dev/sdc4": "/dev/sdc4",
|
"/dev/sdc4": "/dev/sdc4",
|
||||||
"/dev/sdd1": "/dev/sdd1",
|
"/dev/sdd1": "/dev/sdd1",
|
||||||
"/dev/sdd2": "/dev/sdd2",
|
"/dev/sdd2": "/dev/sdd2",
|
||||||
"/dev/sdd3": "/dev/sdd3",
|
"/dev/sdd3": "/dev/sdd3",
|
||||||
"/dev/sdd4": "/dev/sdd4",
|
"/dev/sdd4": "/dev/sdd4",
|
||||||
"/dev/sde1": "/dev/sde1",
|
"/dev/sde1": "/dev/sde1",
|
||||||
"/dev/sde2": "/dev/sde2",
|
"/dev/sde2": "/dev/sde2",
|
||||||
"/dev/sdf1": "/dev/sdf1",
|
"/dev/sdf1": "/dev/sdf1",
|
||||||
"/dev/sdf2": "/dev/sdf2",
|
"/dev/sdf2": "/dev/sdf2",
|
||||||
"/dev/sdg1": "/dev/sdg1",
|
"/dev/sdg1": "/dev/sdg1",
|
||||||
"/dev/sdg2": "/dev/sdg2",
|
"/dev/sdg2": "/dev/sdg2",
|
||||||
"/dev/sdh1": "/dev/sdh1",
|
"/dev/sdh1": "/dev/sdh1",
|
||||||
"/dev/sdh2": "/dev/sdh2",
|
"/dev/sdh2": "/dev/sdh2",
|
||||||
}
|
}
|
||||||
|
|
||||||
for device := range diskStatsMap {
|
for device := range diskStatsMap {
|
||||||
|
28
fs/types.go
28
fs/types.go
@ -8,23 +8,23 @@ type DeviceInfo struct {
|
|||||||
|
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
DeviceInfo
|
DeviceInfo
|
||||||
Capacity uint64
|
Capacity uint64
|
||||||
Free uint64
|
Free uint64
|
||||||
DiskStats DiskStats
|
DiskStats DiskStats
|
||||||
}
|
}
|
||||||
|
|
||||||
type DiskStats struct {
|
type DiskStats struct {
|
||||||
ReadsCompleted uint64
|
ReadsCompleted uint64
|
||||||
ReadsMerged uint64
|
ReadsMerged uint64
|
||||||
SectorsRead uint64
|
SectorsRead uint64
|
||||||
ReadTime uint64
|
ReadTime uint64
|
||||||
WritesCompleted uint64
|
WritesCompleted uint64
|
||||||
WritesMerged uint64
|
WritesMerged uint64
|
||||||
SectorsWritten uint64
|
SectorsWritten uint64
|
||||||
WriteTime uint64
|
WriteTime uint64
|
||||||
IOInProgress uint64
|
IOInProgress uint64
|
||||||
IOTime uint64
|
IOTime uint64
|
||||||
WeightedIOTime uint64
|
WeightedIOTime uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
type FsInfo interface {
|
type FsInfo interface {
|
||||||
|
@ -218,7 +218,7 @@ type NetworkStats struct {
|
|||||||
// Cumulative count of packets received.
|
// Cumulative count of packets received.
|
||||||
RxPackets uint64 `json:"rx_packets"`
|
RxPackets uint64 `json:"rx_packets"`
|
||||||
// Cumulative count of receive errors encountered.
|
// Cumulative count of receive errors encountered.
|
||||||
RxErrors uint64 `json:"rx_errors"`
|
RxErrors uint64 `json:"rx_errors"`
|
||||||
// Cumulative count of packets dropped while receiving.
|
// Cumulative count of packets dropped while receiving.
|
||||||
RxDropped uint64 `json:"rx_dropped"`
|
RxDropped uint64 `json:"rx_dropped"`
|
||||||
// Cumulative count of bytes transmitted.
|
// Cumulative count of bytes transmitted.
|
||||||
@ -243,49 +243,49 @@ type FsStats struct {
|
|||||||
|
|
||||||
// # of reads completed
|
// # of reads completed
|
||||||
// This is the total number of reads completed successfully.
|
// This is the total number of reads completed successfully.
|
||||||
ReadsCompleted uint64 `json:"reads_completed"`
|
ReadsCompleted uint64 `json:"reads_completed"`
|
||||||
|
|
||||||
// # of reads merged
|
// # of reads merged
|
||||||
// Reads and writes which are adjacent to each other may be merged for
|
// Reads and writes which are adjacent to each other may be merged for
|
||||||
// efficiency. Thus two 4K reads may become one 8K read before it is
|
// efficiency. Thus two 4K reads may become one 8K read before it is
|
||||||
// ultimately handed to the disk, and so it will be counted (and queued)
|
// ultimately handed to the disk, and so it will be counted (and queued)
|
||||||
// as only one I/O. This field lets you know how often this was done.
|
// as only one I/O. This field lets you know how often this was done.
|
||||||
ReadsMerged uint64 `json:"reads_merged"`
|
ReadsMerged uint64 `json:"reads_merged"`
|
||||||
|
|
||||||
// # of sectors read
|
// # of sectors read
|
||||||
// This is the total number of sectors read successfully.
|
// This is the total number of sectors read successfully.
|
||||||
SectorsRead uint64 `json:"sectors_read"`
|
SectorsRead uint64 `json:"sectors_read"`
|
||||||
|
|
||||||
// # of milliseconds spent reading
|
// # of milliseconds spent reading
|
||||||
// This is the total number of milliseconds spent by all reads (as
|
// This is the total number of milliseconds spent by all reads (as
|
||||||
// measured from __make_request() to end_that_request_last()).
|
// measured from __make_request() to end_that_request_last()).
|
||||||
ReadTime uint64 `json:"read_time"`
|
ReadTime uint64 `json:"read_time"`
|
||||||
|
|
||||||
// # of writes completed
|
// # of writes completed
|
||||||
// This is the total number of writes completed successfully.
|
// This is the total number of writes completed successfully.
|
||||||
WritesCompleted uint64 `json:"writes_completed"`
|
WritesCompleted uint64 `json:"writes_completed"`
|
||||||
|
|
||||||
// # of writes merged
|
// # of writes merged
|
||||||
// See the description of reads merged.
|
// See the description of reads merged.
|
||||||
WritesMerged uint64 `json:"writes_merged"`
|
WritesMerged uint64 `json:"writes_merged"`
|
||||||
|
|
||||||
// # of sectors written
|
// # of sectors written
|
||||||
// This is the total number of sectors written successfully.
|
// This is the total number of sectors written successfully.
|
||||||
SectorsWritten uint64 `json:"sectors_written"`
|
SectorsWritten uint64 `json:"sectors_written"`
|
||||||
|
|
||||||
// # of milliseconds spent writing
|
// # of milliseconds spent writing
|
||||||
// This is the total number of milliseconds spent by all writes (as
|
// This is the total number of milliseconds spent by all writes (as
|
||||||
// measured from __make_request() to end_that_request_last()).
|
// measured from __make_request() to end_that_request_last()).
|
||||||
WriteTime uint64 `json:"write_time"`
|
WriteTime uint64 `json:"write_time"`
|
||||||
|
|
||||||
// # of I/Os currently in progress
|
// # of I/Os currently in progress
|
||||||
// The only field that should go to zero. Incremented as requests are
|
// The only field that should go to zero. Incremented as requests are
|
||||||
// given to appropriate struct request_queue and decremented as they finish.
|
// given to appropriate struct request_queue and decremented as they finish.
|
||||||
IOInProgress uint64 `json:"io_in_progress"`
|
IOInProgress uint64 `json:"io_in_progress"`
|
||||||
|
|
||||||
// # of milliseconds spent doing I/Os
|
// # of milliseconds spent doing I/Os
|
||||||
// This field increases so long as field 9 is nonzero.
|
// This field increases so long as field 9 is nonzero.
|
||||||
IOTime uint64 `json:"io_time"`
|
IOTime uint64 `json:"io_time"`
|
||||||
|
|
||||||
// weighted # of milliseconds spent doing I/Os
|
// weighted # of milliseconds spent doing I/Os
|
||||||
// This field is incremented at each I/O start, I/O completion, I/O
|
// This field is incremented at each I/O start, I/O completion, I/O
|
||||||
@ -293,7 +293,7 @@ type FsStats struct {
|
|||||||
// (field 9) times the number of milliseconds spent doing I/O since the
|
// (field 9) times the number of milliseconds spent doing I/O since the
|
||||||
// last update of this field. This can provide an easy measure of both
|
// last update of this field. This can provide an easy measure of both
|
||||||
// I/O completion time and the backlog that may be accumulating.
|
// I/O completion time and the backlog that may be accumulating.
|
||||||
WeightedIOTime uint64 `json:"weighted_io_time"`
|
WeightedIOTime uint64 `json:"weighted_io_time"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ContainerStats struct {
|
type ContainerStats struct {
|
||||||
|
Loading…
Reference in New Issue
Block a user