Remove duplicated logic
Signed-off-by: sewon.oh <sewon.oh@samsung.com>
This commit is contained in:
parent
d138b59aa2
commit
cb3a2bef46
@ -39,7 +39,7 @@ type Node struct {
|
|||||||
Id int `json:"node_id"`
|
Id int `json:"node_id"`
|
||||||
// Per-node memory
|
// Per-node memory
|
||||||
Memory uint64 `json:"memory"`
|
Memory uint64 `json:"memory"`
|
||||||
HugePages []HugePagesInfo `json:"huge_pages"`
|
HugePages []HugePagesInfo `json:"hugepages"`
|
||||||
Cores []Core `json:"cores"`
|
Cores []Core `json:"cores"`
|
||||||
Caches []Cache `json:"caches"`
|
Caches []Cache `json:"caches"`
|
||||||
}
|
}
|
||||||
|
@ -17,10 +17,8 @@ package machine
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/parsers/operatingsystem"
|
"github.com/docker/docker/pkg/parsers/operatingsystem"
|
||||||
@ -54,45 +52,6 @@ func getInfoFromFiles(filePaths string) string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetHugePagesInfo returns information about pre-allocated huge pages
|
|
||||||
func GetHugePagesInfo() ([]info.HugePagesInfo, error) {
|
|
||||||
var hugePagesInfo []info.HugePagesInfo
|
|
||||||
files, err := ioutil.ReadDir(hugepagesDirectory)
|
|
||||||
if err != nil {
|
|
||||||
// treat as non-fatal since kernels and machine can be
|
|
||||||
// configured to disable hugepage support
|
|
||||||
return hugePagesInfo, nil
|
|
||||||
}
|
|
||||||
for _, st := range files {
|
|
||||||
nameArray := strings.Split(st.Name(), "-")
|
|
||||||
pageSizeArray := strings.Split(nameArray[1], "kB")
|
|
||||||
pageSize, err := strconv.ParseUint(string(pageSizeArray[0]), 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return hugePagesInfo, err
|
|
||||||
}
|
|
||||||
|
|
||||||
numFile := hugepagesDirectory + st.Name() + "/nr_hugepages"
|
|
||||||
val, err := ioutil.ReadFile(numFile)
|
|
||||||
if err != nil {
|
|
||||||
return hugePagesInfo, err
|
|
||||||
}
|
|
||||||
var numPages uint64
|
|
||||||
// we use sscanf as the file as a new-line that trips up ParseUint
|
|
||||||
// it returns the number of tokens successfully parsed, so if
|
|
||||||
// n != 1, it means we were unable to parse a number from the file
|
|
||||||
n, err := fmt.Sscanf(string(val), "%d", &numPages)
|
|
||||||
if err != nil || n != 1 {
|
|
||||||
return hugePagesInfo, fmt.Errorf("could not parse file %v contents %q", numFile, string(val))
|
|
||||||
}
|
|
||||||
|
|
||||||
hugePagesInfo = append(hugePagesInfo, info.HugePagesInfo{
|
|
||||||
NumPages: numPages,
|
|
||||||
PageSize: pageSize,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return hugePagesInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func Info(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.MachineInfo, error) {
|
func Info(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.MachineInfo, error) {
|
||||||
rootFs := "/"
|
rootFs := "/"
|
||||||
if !inHostNamespace {
|
if !inHostNamespace {
|
||||||
@ -110,7 +69,7 @@ func Info(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.Mach
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
hugePagesInfo, err := GetHugePagesInfo()
|
hugePagesInfo, err := GetHugePagesInfo(hugepagesDirectory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -46,7 +46,6 @@ var (
|
|||||||
cpuClockSpeedMHz = regexp.MustCompile(`(?:cpu MHz|clock)\s*:\s*([0-9]+\.[0-9]+)(?:MHz)?`)
|
cpuClockSpeedMHz = regexp.MustCompile(`(?:cpu MHz|clock)\s*:\s*([0-9]+\.[0-9]+)(?:MHz)?`)
|
||||||
memoryCapacityRegexp = regexp.MustCompile(`MemTotal:\s*([0-9]+) kB`)
|
memoryCapacityRegexp = regexp.MustCompile(`MemTotal:\s*([0-9]+) kB`)
|
||||||
swapCapacityRegexp = regexp.MustCompile(`SwapTotal:\s*([0-9]+) kB`)
|
swapCapacityRegexp = regexp.MustCompile(`SwapTotal:\s*([0-9]+) kB`)
|
||||||
hugePageSizeRegexp = regexp.MustCompile(`hugepages-\s*([0-9]+)kB`)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const maxFreqFile = "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq"
|
const maxFreqFile = "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq"
|
||||||
@ -194,42 +193,42 @@ func getNodeIdFromCpuBus(cpuBusPath string, threadId int) (int, error) {
|
|||||||
return nodeId, nil
|
return nodeId, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Look for per-node hugepages info using node id */
|
// GetHugePagesInfo returns information about pre-allocated huge pages
|
||||||
/* Such as: /sys/devices/system/node/node%d/hugepages */
|
func GetHugePagesInfo(hugepagesDirectory string) ([]info.HugePagesInfo, error) {
|
||||||
func getHugePagesInfoFromNode(nodePath string, nodeIndex int) ([]info.HugePagesInfo, error) {
|
var hugePagesInfo []info.HugePagesInfo
|
||||||
hugePagesInfo := []info.HugePagesInfo{}
|
files, err := ioutil.ReadDir(hugepagesDirectory)
|
||||||
path := filepath.Join(nodePath, fmt.Sprintf("node%d/hugepages", nodeIndex))
|
|
||||||
files, err := ioutil.ReadDir(path)
|
|
||||||
// Ignore if per-node info is not available.
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("failed to get hugepages information for node %d: %v", nodeIndex, err)
|
// treat as non-fatal since kernels and machine can be
|
||||||
return nil, nil
|
// configured to disable hugepage support
|
||||||
|
return hugePagesInfo, nil
|
||||||
}
|
}
|
||||||
|
for _, st := range files {
|
||||||
for _, file := range files {
|
nameArray := strings.Split(st.Name(), "-")
|
||||||
fileName := file.Name()
|
pageSizeArray := strings.Split(nameArray[1], "kB")
|
||||||
pageSize, err := parseCapacity([]byte(fileName), hugePageSizeRegexp)
|
pageSize, err := strconv.ParseUint(string(pageSizeArray[0]), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return hugePagesInfo, err
|
||||||
}
|
}
|
||||||
|
|
||||||
file := filepath.Join(path, fileName, "nr_hugepages")
|
numFile := hugepagesDirectory + st.Name() + "/nr_hugepages"
|
||||||
num, err := ioutil.ReadFile(file)
|
val, err := ioutil.ReadFile(numFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return hugePagesInfo, err
|
||||||
}
|
}
|
||||||
|
var numPages uint64
|
||||||
pageNum, err := strconv.ParseUint(string(bytes.TrimSpace(num)), 10, 64)
|
// we use sscanf as the file as a new-line that trips up ParseUint
|
||||||
if err != nil {
|
// it returns the number of tokens successfully parsed, so if
|
||||||
return nil, err
|
// n != 1, it means we were unable to parse a number from the file
|
||||||
|
n, err := fmt.Sscanf(string(val), "%d", &numPages)
|
||||||
|
if err != nil || n != 1 {
|
||||||
|
return hugePagesInfo, fmt.Errorf("could not parse file %v contents %q", numFile, string(val))
|
||||||
}
|
}
|
||||||
|
|
||||||
hugePagesInfo = append(hugePagesInfo, info.HugePagesInfo{
|
hugePagesInfo = append(hugePagesInfo, info.HugePagesInfo{
|
||||||
PageSize: pageSize / 1024, // Convert to kB.
|
NumPages: numPages,
|
||||||
NumPages: pageNum,
|
PageSize: pageSize,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return hugePagesInfo, nil
|
return hugePagesInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -347,13 +346,6 @@ func GetTopology(sysFs sysfs.SysFs, cpuinfo string) ([]info.Node, int, error) {
|
|||||||
}
|
}
|
||||||
// Ignore unknown caches.
|
// Ignore unknown caches.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add a node-level huge pages info.
|
|
||||||
hugePagesInfo, err := getHugePagesInfoFromNode(nodePath, node.Id)
|
|
||||||
if err != nil {
|
|
||||||
return nil, -1, err
|
|
||||||
}
|
|
||||||
nodes[idx].HugePages = hugePagesInfo
|
|
||||||
}
|
}
|
||||||
return nodes, numCores, nil
|
return nodes, numCores, nil
|
||||||
}
|
}
|
||||||
@ -401,6 +393,15 @@ func addNode(nodes *[]info.Node, id int) (int, error) {
|
|||||||
}
|
}
|
||||||
node.Memory = uint64(m)
|
node.Memory = uint64(m)
|
||||||
}
|
}
|
||||||
|
// Look for per-node hugepages info using node id
|
||||||
|
// Such as: /sys/devices/system/node/node%d/hugepages
|
||||||
|
hugepagesDirectory := nodePath + "/node" + string(id) + "/hugepages/"
|
||||||
|
hugePagesInfo, err := GetHugePagesInfo(hugepagesDirectory)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
node.HugePages = hugePagesInfo
|
||||||
|
|
||||||
*nodes = append(*nodes, node)
|
*nodes = append(*nodes, node)
|
||||||
idx = len(*nodes) - 1
|
idx = len(*nodes) - 1
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user