Add an api to support ps/top.
This commit is contained in:
parent
86dd6cc99d
commit
5e10989a78
@ -38,6 +38,7 @@ const (
|
||||
storageApi = "storage"
|
||||
attributesApi = "attributes"
|
||||
versionApi = "version"
|
||||
psApi = "ps"
|
||||
)
|
||||
|
||||
// Interface for a cAdvisor API version
|
||||
@ -304,7 +305,7 @@ func (self *version2_0) Version() string {
|
||||
}
|
||||
|
||||
func (self *version2_0) SupportedRequestTypes() []string {
|
||||
return []string{versionApi, attributesApi, eventsApi, machineApi, summaryApi, statsApi, specApi, storageApi}
|
||||
return []string{versionApi, attributesApi, eventsApi, machineApi, summaryApi, statsApi, specApi, storageApi, psApi}
|
||||
}
|
||||
|
||||
func (self *version2_0) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error {
|
||||
@ -391,6 +392,17 @@ func (self *version2_0) HandleRequest(requestType string, request []string, m ma
|
||||
return writeResult(fi, w)
|
||||
case eventsApi:
|
||||
return handleEventRequest(request, m, w, r)
|
||||
case psApi:
|
||||
// reuse container type from request.
|
||||
// ignore recursive.
|
||||
// TODO(rjnagal): consider count to limit ps output.
|
||||
name := getContainerName(request)
|
||||
glog.V(4).Infof("Api - Spec for container %q, options %+v", name, opt)
|
||||
ps, err := m.GetProcessList(name, opt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("process listing failed: %v", err)
|
||||
}
|
||||
return writeResult(ps, w)
|
||||
default:
|
||||
return fmt.Errorf("unknown request type %q", requestType)
|
||||
}
|
||||
|
@ -325,8 +325,7 @@ func (self *dockerContainerHandler) ListThreads(listType container.ListType) ([]
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
|
||||
// TODO(vmarmol): Implement.
|
||||
return nil, nil
|
||||
return containerLibcontainer.GetProcesses(self.cgroupManager)
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) WatchSubcontainers(events chan container.SubcontainerEvent) error {
|
||||
|
@ -96,6 +96,14 @@ func GetStats(cgroupManager cgroups.Manager, networkInterfaces []string) (*info.
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func GetProcesses(cgroupManager cgroups.Manager) ([]int, error) {
|
||||
pids, err := cgroupManager.GetPids()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pids, nil
|
||||
}
|
||||
|
||||
func DockerStateDir(dockerRoot string) string {
|
||||
return path.Join(dockerRoot, "containers")
|
||||
}
|
||||
|
@ -399,8 +399,7 @@ func (self *rawContainerHandler) ListThreads(listType container.ListType) ([]int
|
||||
}
|
||||
|
||||
func (self *rawContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
|
||||
// TODO(vmarmol): Implement
|
||||
return nil, nil
|
||||
return libcontainer.GetProcesses(self.cgroupManager)
|
||||
}
|
||||
|
||||
func (self *rawContainerHandler) watchDirectory(dir string, containerName string) error {
|
||||
|
@ -166,3 +166,16 @@ type RequestOptions struct {
|
||||
// Whether to include stats for child subcontainers.
|
||||
Recursive bool `json:"recursive"`
|
||||
}
|
||||
|
||||
type ProcessInfo struct {
|
||||
User string `json:"user"`
|
||||
Pid int `json:"pid"`
|
||||
Ppid int `json:"parent_pid"`
|
||||
StartTime string `json:"start_time"`
|
||||
PercentCpu string `json:"percent_cpu"`
|
||||
RSS string `json:"rss"`
|
||||
VirtualSize string `json:"virtual_size"`
|
||||
Status string `json:"status"`
|
||||
RunningTime string `json:"running_time"`
|
||||
Cmd string `json:"cmd"`
|
||||
}
|
||||
|
@ -18,7 +18,10 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math"
|
||||
"os/exec"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -113,6 +116,59 @@ func (c *containerData) DerivedStats() (v2.DerivedStats, error) {
|
||||
return c.summaryReader.DerivedStats()
|
||||
}
|
||||
|
||||
func (c *containerData) GetProcessList() ([]v2.ProcessInfo, error) {
|
||||
pids, err := c.handler.ListProcesses(container.ListSelf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pidMap := map[int]bool{}
|
||||
for _, pid := range pids {
|
||||
pidMap[pid] = true
|
||||
}
|
||||
// TODO(rjnagal): Take format as an option?
|
||||
format := "user,pid,ppid,stime,pcpu,rss,vsz,stat,time,comm"
|
||||
args := []string{"-e", "-o", format}
|
||||
expectedFields := 10
|
||||
out, err := exec.Command("ps", args...).Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute ps command: %v", err)
|
||||
}
|
||||
processes := []v2.ProcessInfo{}
|
||||
lines := strings.Split(string(out), "\n")
|
||||
for _, line := range lines[1:] {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < expectedFields {
|
||||
return nil, fmt.Errorf("expected at least %d fields, found %d: output: %q", expectedFields, len(fields), line)
|
||||
}
|
||||
pid, err := strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid pid %q: %v", fields[1], err)
|
||||
}
|
||||
ppid, err := strconv.Atoi(fields[2])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid ppid %q: %v", fields[2], err)
|
||||
}
|
||||
if pidMap[pid] == true {
|
||||
processes = append(processes, v2.ProcessInfo{
|
||||
User: fields[0],
|
||||
Pid: pid,
|
||||
Ppid: ppid,
|
||||
StartTime: fields[3],
|
||||
PercentCpu: fields[4],
|
||||
RSS: fields[5],
|
||||
VirtualSize: fields[6],
|
||||
Status: fields[7],
|
||||
RunningTime: fields[8],
|
||||
Cmd: strings.Join(fields[9:], " "),
|
||||
})
|
||||
}
|
||||
}
|
||||
return processes, nil
|
||||
}
|
||||
|
||||
func newContainerData(containerName string, memoryStorage *memory.InMemoryStorage, handler container.ContainerHandler, loadReader cpuload.CpuLoadReader, logUsage bool, collectorManager collector.CollectorManager) (*containerData, error) {
|
||||
if memoryStorage == nil {
|
||||
return nil, fmt.Errorf("nil memory storage")
|
||||
|
@ -90,6 +90,9 @@ type Manager interface {
|
||||
// Returns information for all global filesystems if label is empty.
|
||||
GetFsInfo(label string) ([]v2.FsInfo, error)
|
||||
|
||||
// Get ps output for a container.
|
||||
GetProcessList(containerName string, options v2.RequestOptions) ([]v2.ProcessInfo, error)
|
||||
|
||||
// Get events streamed through passedChannel that fit the request.
|
||||
WatchForEvents(request *events.Request) (*events.EventChannel, error)
|
||||
|
||||
@ -640,6 +643,27 @@ func (m *manager) Exists(containerName string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *manager) GetProcessList(containerName string, options v2.RequestOptions) ([]v2.ProcessInfo, error) {
|
||||
// override recursive. Only support single container listing.
|
||||
options.Recursive = false
|
||||
conts, err := m.getRequestedContainers(containerName, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(conts) != 1 {
|
||||
return nil, fmt.Errorf("Expected the request to match only one container")
|
||||
}
|
||||
// TODO(rjnagal): handle count? Only if we can do count by type (eg. top 5 cpu users)
|
||||
ps := []v2.ProcessInfo{}
|
||||
for _, cont := range conts {
|
||||
ps, err = cont.GetProcessList()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
// Create a container.
|
||||
func (m *manager) createContainer(containerName string) error {
|
||||
handler, accept, err := container.NewContainerHandler(containerName)
|
||||
|
@ -99,3 +99,8 @@ func (c *ManagerMock) GetFsInfo() ([]v2.FsInfo, error) {
|
||||
args := c.Called()
|
||||
return args.Get(0).([]v2.FsInfo), args.Error(1)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) GetProcessList(name string, options v2.RequestOptions) ([]v2.ProcessInfo, error) {
|
||||
args := c.Called()
|
||||
return args.Get(0).([]v2.ProcessInfo), args.Error(1)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user