diff --git a/info/v1/machine.go b/info/v1/machine.go index cf5e828d..959f65e7 100644 --- a/info/v1/machine.go +++ b/info/v1/machine.go @@ -38,9 +38,10 @@ type FsInfo struct { type Node struct { Id int `json:"node_id"` // Per-node memory - Memory uint64 `json:"memory"` - Cores []Core `json:"cores"` - Caches []Cache `json:"caches"` + Memory uint64 `json:"memory"` + HugePages []HugePagesInfo `json:"hugepages"` + Cores []Core `json:"cores"` + Caches []Cache `json:"caches"` } type Core struct { diff --git a/machine/info.go b/machine/info.go index c3385cc4..cbe95bc6 100644 --- a/machine/info.go +++ b/machine/info.go @@ -17,10 +17,8 @@ package machine import ( "bytes" "flag" - "fmt" "io/ioutil" "path/filepath" - "strconv" "strings" "github.com/docker/docker/pkg/parsers/operatingsystem" @@ -54,45 +52,6 @@ func getInfoFromFiles(filePaths string) string { return "" } -// GetHugePagesInfo returns information about pre-allocated huge pages -func GetHugePagesInfo() ([]info.HugePagesInfo, error) { - var hugePagesInfo []info.HugePagesInfo - files, err := ioutil.ReadDir(hugepagesDirectory) - if err != nil { - // treat as non-fatal since kernels and machine can be - // configured to disable hugepage support - return hugePagesInfo, nil - } - for _, st := range files { - nameArray := strings.Split(st.Name(), "-") - pageSizeArray := strings.Split(nameArray[1], "kB") - pageSize, err := strconv.ParseUint(string(pageSizeArray[0]), 10, 64) - if err != nil { - return hugePagesInfo, err - } - - numFile := hugepagesDirectory + st.Name() + "/nr_hugepages" - val, err := ioutil.ReadFile(numFile) - if err != nil { - return hugePagesInfo, err - } - var numPages uint64 - // we use sscanf as the file as a new-line that trips up ParseUint - // it returns the number of tokens successfully parsed, so if - // n != 1, it means we were unable to parse a number from the file - n, err := fmt.Sscanf(string(val), "%d", &numPages) - if err != nil || n != 1 { - return hugePagesInfo, fmt.Errorf("could not parse file %v contents %q", numFile, string(val)) - } - - hugePagesInfo = append(hugePagesInfo, info.HugePagesInfo{ - NumPages: numPages, - PageSize: pageSize, - }) - } - return hugePagesInfo, nil -} - func Info(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.MachineInfo, error) { rootFs := "/" if !inHostNamespace { @@ -113,7 +72,7 @@ func Info(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.Mach return nil, err } - hugePagesInfo, err := GetHugePagesInfo() + hugePagesInfo, err := GetHugePagesInfo(hugepagesDirectory) if err != nil { return nil, err } diff --git a/machine/machine.go b/machine/machine.go index d85e38f1..34ed9b99 100644 --- a/machine/machine.go +++ b/machine/machine.go @@ -23,6 +23,7 @@ import ( "regexp" "strconv" "strings" + // s390/s390x changes "runtime" @@ -49,6 +50,7 @@ var ( const maxFreqFile = "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq" const cpuBusPath = "/sys/bus/cpu/devices/" +const nodePath = "/sys/devices/system/node" // GetClockSpeed returns the CPU clock speed, given a []byte formatted as the /proc/cpuinfo file. func GetClockSpeed(procInfo []byte) (uint64, error) { @@ -191,6 +193,47 @@ func getNodeIdFromCpuBus(cpuBusPath string, threadId int) (int, error) { return nodeId, nil } +// GetHugePagesInfo returns information about pre-allocated huge pages +// hugepagesDirectory should be top directory of hugepages +// Such as: /sys/kernel/mm/hugepages/ +func GetHugePagesInfo(hugepagesDirectory string) ([]info.HugePagesInfo, error) { + var hugePagesInfo []info.HugePagesInfo + files, err := ioutil.ReadDir(hugepagesDirectory) + if err != nil { + // treat as non-fatal since kernels and machine can be + // configured to disable hugepage support + return hugePagesInfo, nil + } + for _, st := range files { + nameArray := strings.Split(st.Name(), "-") + pageSizeArray := strings.Split(nameArray[1], "kB") + pageSize, err := strconv.ParseUint(string(pageSizeArray[0]), 10, 64) + if err != nil { + return hugePagesInfo, err + } + + numFile := hugepagesDirectory + st.Name() + "/nr_hugepages" + val, err := ioutil.ReadFile(numFile) + if err != nil { + return hugePagesInfo, err + } + var numPages uint64 + // we use sscanf as the file as a new-line that trips up ParseUint + // it returns the number of tokens successfully parsed, so if + // n != 1, it means we were unable to parse a number from the file + n, err := fmt.Sscanf(string(val), "%d", &numPages) + if err != nil || n != 1 { + return hugePagesInfo, fmt.Errorf("could not parse file %v contents %q", numFile, string(val)) + } + + hugePagesInfo = append(hugePagesInfo, info.HugePagesInfo{ + NumPages: numPages, + PageSize: pageSize, + }) + } + return hugePagesInfo, nil +} + func GetTopology(sysFs sysfs.SysFs, cpuinfo string) ([]info.Node, int, error) { nodes := []info.Node{} @@ -352,6 +395,15 @@ func addNode(nodes *[]info.Node, id int) (int, error) { } node.Memory = uint64(m) } + // Look for per-node hugepages info using node id + // Such as: /sys/devices/system/node/node%d/hugepages + hugepagesDirectory := fmt.Sprintf("%s/node%d/hugepages/", nodePath, id) + hugePagesInfo, err := GetHugePagesInfo(hugepagesDirectory) + if err != nil { + return -1, err + } + node.HugePages = hugePagesInfo + *nodes = append(*nodes, node) idx = len(*nodes) - 1 } diff --git a/machine/testdata/hugepages/hugepages-1048576kB/nr_hugepages b/machine/testdata/hugepages/hugepages-1048576kB/nr_hugepages new file mode 100644 index 00000000..d00491fd --- /dev/null +++ b/machine/testdata/hugepages/hugepages-1048576kB/nr_hugepages @@ -0,0 +1 @@ +1 diff --git a/machine/testdata/hugepages/hugepages-2048kB/nr_hugepages b/machine/testdata/hugepages/hugepages-2048kB/nr_hugepages new file mode 100644 index 00000000..0cfbf088 --- /dev/null +++ b/machine/testdata/hugepages/hugepages-2048kB/nr_hugepages @@ -0,0 +1 @@ +2 diff --git a/machine/topology_test.go b/machine/topology_test.go index ca25ef7d..2f40d5e0 100644 --- a/machine/topology_test.go +++ b/machine/topology_test.go @@ -59,6 +59,8 @@ func TestTopology(t *testing.T) { node := info.Node{Id: i} // Copy over Memory from result. TODO(rjnagal): Use memory from fake. node.Memory = topology[i].Memory + // Copy over HugePagesInfo from result. TODO(ohsewon): Use HugePagesInfo from fake. + node.HugePages = topology[i].HugePages for j := 0; j < numCoresPerNode; j++ { core := info.Core{Id: i*numCoresPerNode + j} core.Caches = append(core.Caches, cache) @@ -100,6 +102,8 @@ func TestTopologyWithSimpleCpuinfo(t *testing.T) { node.Cores = append(node.Cores, core) // Copy over Memory from result. TODO(rjnagal): Use memory from fake. node.Memory = topology[0].Memory + // Copy over HugePagesInfo from result. TODO(ohsewon): Use HugePagesInfo from fake. + node.HugePages = topology[0].HugePages expected := []info.Node{node} if !reflect.DeepEqual(topology, expected) { t.Errorf("Expected topology %+v, got %+v", expected, topology) @@ -139,3 +143,26 @@ func TestTopologyNodeId(t *testing.T) { t.Errorf("Expected core 1234 , found %d", val) } } + +func TestGetHugePagesInfo(t *testing.T) { + testPath := "./testdata/hugepages/" + expected := []info.HugePagesInfo{ + { + NumPages: 1, + PageSize: 1048576, + }, + { + NumPages: 2, + PageSize: 2048, + }, + } + + val, err := GetHugePagesInfo(testPath) + if err != nil { + t.Errorf("Failed to GetHugePagesInfo() for sample path %s: %v", testPath, err) + } + + if !reflect.DeepEqual(expected, val) { + t.Errorf("Expected HugePagesInfo %+v, got %+v", expected, val) + } +}