Carefully fixing style (#2509)

* Use golangci-lint to add lint presubmit test, and fix linter errors

Signed-off-by: Maciej "Iwan" Iwanowski <maciej.iwanowski@intel.com>
This commit is contained in:
iwankgb 2020-04-23 01:26:36 +02:00 committed by GitHub
parent 1223982cc4
commit 854445c010
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
67 changed files with 1488 additions and 1396 deletions

View File

@ -1,6 +1,35 @@
name: Test
on: [push, pull_request]
jobs:
lint:
strategy:
matrix:
go-versions: [1.14]
platform: [ubuntu-latest]
environment-variables: [build/config/plain.sh, build/config/libpfm4.sh]
runs-on: ${{ matrix.platform }}
timeout-minutes: 10
steps:
- name: Install Go
uses: actions/setup-go@v1
with:
go-version: ${{ matrix.go-versions }}
# This is not the most elegant way of installing golangci-lint but I have filed a PR
# and I will fix it as soon as it's merged: https://github.com/golangci/golangci-lint/pull/1036
# cAdvisor has existed for a while without golint and we need to figure out how to
# fix issues affecting exported identifiers that are part of public API. Let's avoid breaking Kubernetes.
- name: Install golangci-lint
run: |
go get -d github.com/golangci/golangci-lint/cmd/golangci-lint &&
cd $(go env GOPATH)/src/github.com/golangci/golangci-lint/ &&
git remote add iwankgb https://github.com/iwankgb/golangci-lint.git &&
git fetch iwankgb &&
git reset --hard iwankgb/exclude-case-sensitive &&
go install -i github.com/golangci/golangci-lint/cmd/golangci-lint
- name: Checkout code
uses: actions/checkout@v2
- name: Run golangci-lint
run: source ${{ matrix.environment-variables }} && make lint
test:
strategy:
matrix:
@ -31,19 +60,19 @@ jobs:
runs-on: ${{ matrix.platform }}
timeout-minutes: 30
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
fetch-depth: 1
path: go/src/github.com/google/cadvisor
- name: Run integration tests
env:
GOLANG_VERSION: ${{ matrix.go-versions }}
run: |
cd $GITHUB_WORKSPACE/go/src/github.com/google/cadvisor && source ${{ matrix.environment-variables }} && make docker-test-integration
- name: Upload cAdvisor log file
uses: actions/upload-artifact@v1
if: failure()
with:
name: cadvisor.log
path: ${{ github.workspace }}/go/src/github.com/google/cadvisor/cadvisor.log
- name: Checkout code
uses: actions/checkout@v2
with:
fetch-depth: 1
path: go/src/github.com/google/cadvisor
- name: Run integration tests
env:
GOLANG_VERSION: ${{ matrix.go-versions }}
run: |
cd $GITHUB_WORKSPACE/go/src/github.com/google/cadvisor && source ${{ matrix.environment-variables }} && make docker-test-integration
- name: Upload cAdvisor log file
uses: actions/upload-artifact@v1
if: failure()
with:
name: cadvisor.log
path: ${{ github.workspace }}/go/src/github.com/google/cadvisor/cadvisor.log

60
.golangci.yml Normal file
View File

@ -0,0 +1,60 @@
run:
linters-settings:
govet:
enable-all: true
golint:
min-confidence: 0
linters:
disable-all: true
enable:
- govet
- errcheck
- staticcheck
- unused
- gosimple
- structcheck
- varcheck
- ineffassign
- deadcode
- typecheck
- golint
issues:
max-issues-per-linter: 0
max-same-issues: 0
exclude-case-sensitive: true
exclude:
# integration/tests/api/event_test.go:66:6: func `waitForStaticEvent` is unused (unused)
# Flaky test skipped.
- waitForStaticEvent
# Initialism or acronyms for fields, vars and types:
- "(struct field|var|type|const) `[A-Z].*` should be `.*`"
# Initialism or acronyms - renaming exported methods and functions can be tricky:
- "(method|func) [A-Z].* should be .*"
# Stuttering affects exported names:
- "type name will be used as .*\\.[A-Z]{1}.* by other packages, and that stutters"
# We would have to change exported function return type:
- "exported func .* returns unexported type"
exclude-rules:
# container/containerd/client.go:67:4: SA1019: grpc.WithDialer is deprecated: use WithContextDialer instead. Will be supported throughout 1.x. (staticcheck)
# There are more similar issues in following lines.
- path: container/containerd/client.go
text: "SA1019:"
# utils/cpuload/netlink/netlink.go:102:15: Error return value of `binary.Write` is not checked (errcheck)
# There are more similar issues in this file
- path: utils/cpuload/netlink/netlink.go
text: "Error return value of `binary.Write` is not checked"
# utils/cloudinfo/aws/aws.go:60:28: SA1019: session.New is deprecated: Use NewSession functions to create sessions instead. NewSession has the same functionality as New except an error can be returned when the func is called instead of waiting to receive an error until a request is made. (staticcheck)
- path: utils/cloudinfo/aws/aws.go
text: "SA1019:"
# events/handler.go:151:51: exported func NewEventManager returns unexported type *github.com/google/cadvisor/events.events, which can be annoying to use (golint)
- path: events/handler.go
text: "exported func NewEventManager returns unexported type .{1}github.com/google/cadvisor/events.events, which can be annoying to use"
# manager/manager_test.go:277:29: Error return value of `(*github.com/google/cadvisor/container/testing.MockContainerHandler).GetSpec` is not checked (errcheck)
- path: manager/manager_test.go
text: "Error return value of `.{2}github.com/google/cadvisor/container/testing.MockContainerHandler.{1}.GetSpec` is not checked"
# utils/sysinfo/sysinfo.go:208:7: ineffectual assignment to `err` (ineffassign)
- path: utils/sysinfo/sysinfo.go
text: "ineffectual assignment to `err`|SA4006:"
# cache/memory/memory_test.go:81:23: Error return value of `memoryCache.AddStats` is not checked (errcheck)
- path: cache/memory/memory_test.go
text: "Error return value of `memoryCache.AddStats` is not checked"

View File

@ -16,6 +16,7 @@ GO := go
pkgs = $(shell $(GO) list ./... | grep -v vendor)
cmd_pkgs = $(shell cd cmd && $(GO) list ./... | grep -v vendor)
arch ?= $(shell go env GOARCH)
go_path = $(shell go env GOPATH)
ifeq ($(arch), amd64)
Dockerfile_tag := ''
@ -86,6 +87,10 @@ presubmit: vet
@echo ">> checking file boilerplate"
@./build/check_boilerplate.sh
lint:
@echo ">> running golangci-lint using configuration at .golangci.yml"
@$(go_path)/bin/golangci-lint run
clean:
@rm -f *.test cadvisor

View File

@ -46,7 +46,7 @@ type nvidiaManager struct {
var sysFsPCIDevicesPath = "/sys/bus/pci/devices/"
const nvidiaVendorId = "0x10de"
const nvidiaVendorID = "0x10de"
func NewNvidiaManager() stats.Manager {
manager := &nvidiaManager{}
@ -61,8 +61,8 @@ func NewNvidiaManager() stats.Manager {
// setup initializes NVML if nvidia devices are present on the node.
func (nm *nvidiaManager) setup() error {
if !detectDevices(nvidiaVendorId) {
return fmt.Errorf("No NVIDIA devices found.")
if !detectDevices(nvidiaVendorID) {
return fmt.Errorf("no NVIDIA devices found")
}
nm.devicesPresent = true
@ -71,7 +71,7 @@ func (nm *nvidiaManager) setup() error {
}
// detectDevices returns true if a device with given pci id is present on the node.
func detectDevices(vendorId string) bool {
func detectDevices(vendorID string) bool {
devices, err := ioutil.ReadDir(sysFsPCIDevicesPath)
if err != nil {
klog.Warningf("Error reading %q: %v", sysFsPCIDevicesPath, err)
@ -85,8 +85,8 @@ func detectDevices(vendorId string) bool {
klog.V(4).Infof("Error while reading %q: %v", vendorPath, err)
continue
}
if strings.EqualFold(strings.TrimSpace(string(content)), vendorId) {
klog.V(3).Infof("Found device with vendorId %q", vendorId)
if strings.EqualFold(strings.TrimSpace(string(content)), vendorID) {
klog.V(3).Infof("Found device with vendorID %q", vendorID)
return true
}
}

View File

@ -38,19 +38,19 @@ type containerCache struct {
lock sync.RWMutex
}
func (self *containerCache) AddStats(stats *info.ContainerStats) error {
self.lock.Lock()
defer self.lock.Unlock()
func (c *containerCache) AddStats(stats *info.ContainerStats) error {
c.lock.Lock()
defer c.lock.Unlock()
// Add the stat to storage.
self.recentStats.Add(stats.Timestamp, stats)
c.recentStats.Add(stats.Timestamp, stats)
return nil
}
func (self *containerCache) RecentStats(start, end time.Time, maxStats int) ([]*info.ContainerStats, error) {
self.lock.RLock()
defer self.lock.RUnlock()
result := self.recentStats.InTimeRange(start, end, maxStats)
func (c *containerCache) RecentStats(start, end time.Time, maxStats int) ([]*info.ContainerStats, error) {
c.lock.RLock()
defer c.lock.RUnlock()
result := c.recentStats.InTimeRange(start, end, maxStats)
converted := make([]*info.ContainerStats, len(result))
for i, el := range result {
converted[i] = el.(*info.ContainerStats)
@ -73,20 +73,20 @@ type InMemoryCache struct {
backend []storage.StorageDriver
}
func (self *InMemoryCache) AddStats(cInfo *info.ContainerInfo, stats *info.ContainerStats) error {
func (c *InMemoryCache) AddStats(cInfo *info.ContainerInfo, stats *info.ContainerStats) error {
var cstore *containerCache
var ok bool
func() {
self.lock.Lock()
defer self.lock.Unlock()
if cstore, ok = self.containerCacheMap[cInfo.ContainerReference.Name]; !ok {
cstore = newContainerStore(cInfo.ContainerReference, self.maxAge)
self.containerCacheMap[cInfo.ContainerReference.Name] = cstore
c.lock.Lock()
defer c.lock.Unlock()
if cstore, ok = c.containerCacheMap[cInfo.ContainerReference.Name]; !ok {
cstore = newContainerStore(cInfo.ContainerReference, c.maxAge)
c.containerCacheMap[cInfo.ContainerReference.Name] = cstore
}
}()
for _, backend := range self.backend {
for _, backend := range c.backend {
// TODO(monnand): To deal with long delay write operations, we
// may want to start a pool of goroutines to do write
// operations.
@ -97,13 +97,13 @@ func (self *InMemoryCache) AddStats(cInfo *info.ContainerInfo, stats *info.Conta
return cstore.AddStats(stats)
}
func (self *InMemoryCache) RecentStats(name string, start, end time.Time, maxStats int) ([]*info.ContainerStats, error) {
func (c *InMemoryCache) RecentStats(name string, start, end time.Time, maxStats int) ([]*info.ContainerStats, error) {
var cstore *containerCache
var ok bool
err := func() error {
self.lock.RLock()
defer self.lock.RUnlock()
if cstore, ok = self.containerCacheMap[name]; !ok {
c.lock.RLock()
defer c.lock.RUnlock()
if cstore, ok = c.containerCacheMap[name]; !ok {
return ErrDataNotFound
}
return nil
@ -115,17 +115,17 @@ func (self *InMemoryCache) RecentStats(name string, start, end time.Time, maxSta
return cstore.RecentStats(start, end, maxStats)
}
func (self *InMemoryCache) Close() error {
self.lock.Lock()
self.containerCacheMap = make(map[string]*containerCache, 32)
self.lock.Unlock()
func (c *InMemoryCache) Close() error {
c.lock.Lock()
c.containerCacheMap = make(map[string]*containerCache, 32)
c.lock.Unlock()
return nil
}
func (self *InMemoryCache) RemoveContainer(containerName string) error {
self.lock.Lock()
delete(self.containerCacheMap, containerName)
self.lock.Unlock()
func (c *InMemoryCache) RemoveContainer(containerName string) error {
c.lock.Lock()
delete(c.containerCacheMap, containerName)
c.lock.Unlock()
return nil
}

View File

@ -35,7 +35,7 @@ import (
// Client represents the base URL for a cAdvisor client.
type Client struct {
baseUrl string
baseURL string
httpClient *http.Client
}
@ -50,16 +50,16 @@ func newClient(url string, client *http.Client) (*Client, error) {
}
return &Client{
baseUrl: fmt.Sprintf("%sapi/v1.3/", url),
baseURL: fmt.Sprintf("%sapi/v1.3/", url),
httpClient: client,
}, nil
}
// Returns all past events that satisfy the request
func (self *Client) EventStaticInfo(name string) (einfo []*v1.Event, err error) {
u := self.eventsInfoUrl(name)
func (c *Client) EventStaticInfo(name string) (einfo []*v1.Event, err error) {
u := c.eventsInfoURL(name)
ret := new([]*v1.Event)
if err = self.httpGetJsonData(ret, nil, u, "event info"); err != nil {
if err = c.httpGetJSONData(ret, nil, u, "event info"); err != nil {
return
}
einfo = *ret
@ -68,9 +68,9 @@ func (self *Client) EventStaticInfo(name string) (einfo []*v1.Event, err error)
// Streams all events that occur that satisfy the request into the channel
// that is passed
func (self *Client) EventStreamingInfo(name string, einfo chan *v1.Event) (err error) {
u := self.eventsInfoUrl(name)
if err = self.getEventStreamingData(u, einfo); err != nil {
func (c *Client) EventStreamingInfo(name string, einfo chan *v1.Event) (err error) {
u := c.eventsInfoURL(name)
if err = c.getEventStreamingData(u, einfo); err != nil {
return
}
return nil
@ -79,10 +79,10 @@ func (self *Client) EventStreamingInfo(name string, einfo chan *v1.Event) (err e
// MachineInfo returns the JSON machine information for this client.
// A non-nil error result indicates a problem with obtaining
// the JSON machine information data.
func (self *Client) MachineInfo() (minfo *v1.MachineInfo, err error) {
u := self.machineInfoUrl()
func (c *Client) MachineInfo() (minfo *v1.MachineInfo, err error) {
u := c.machineInfoURL()
ret := new(v1.MachineInfo)
if err = self.httpGetJsonData(ret, nil, u, "machine info"); err != nil {
if err = c.httpGetJSONData(ret, nil, u, "machine info"); err != nil {
return
}
minfo = ret
@ -91,10 +91,10 @@ func (self *Client) MachineInfo() (minfo *v1.MachineInfo, err error) {
// ContainerInfo returns the JSON container information for the specified
// container and request.
func (self *Client) ContainerInfo(name string, query *v1.ContainerInfoRequest) (cinfo *v1.ContainerInfo, err error) {
u := self.containerInfoUrl(name)
func (c *Client) ContainerInfo(name string, query *v1.ContainerInfoRequest) (cinfo *v1.ContainerInfo, err error) {
u := c.containerInfoURL(name)
ret := new(v1.ContainerInfo)
if err = self.httpGetJsonData(ret, query, u, fmt.Sprintf("container info for %q", name)); err != nil {
if err = c.httpGetJSONData(ret, query, u, fmt.Sprintf("container info for %q", name)); err != nil {
return
}
cinfo = ret
@ -102,10 +102,10 @@ func (self *Client) ContainerInfo(name string, query *v1.ContainerInfoRequest) (
}
// Returns the information about all subcontainers (recursive) of the specified container (including itself).
func (self *Client) SubcontainersInfo(name string, query *v1.ContainerInfoRequest) ([]v1.ContainerInfo, error) {
func (c *Client) SubcontainersInfo(name string, query *v1.ContainerInfoRequest) ([]v1.ContainerInfo, error) {
var response []v1.ContainerInfo
url := self.subcontainersInfoUrl(name)
err := self.httpGetJsonData(&response, query, url, fmt.Sprintf("subcontainers container info for %q", name))
url := c.subcontainersInfoURL(name)
err := c.httpGetJSONData(&response, query, url, fmt.Sprintf("subcontainers container info for %q", name))
if err != nil {
return []v1.ContainerInfo{}, err
@ -115,10 +115,10 @@ func (self *Client) SubcontainersInfo(name string, query *v1.ContainerInfoReques
// Returns the JSON container information for the specified
// Docker container and request.
func (self *Client) DockerContainer(name string, query *v1.ContainerInfoRequest) (cinfo v1.ContainerInfo, err error) {
u := self.dockerInfoUrl(name)
func (c *Client) DockerContainer(name string, query *v1.ContainerInfoRequest) (cinfo v1.ContainerInfo, err error) {
u := c.dockerInfoURL(name)
ret := make(map[string]v1.ContainerInfo)
if err = self.httpGetJsonData(&ret, query, u, fmt.Sprintf("Docker container info for %q", name)); err != nil {
if err = c.httpGetJSONData(&ret, query, u, fmt.Sprintf("Docker container info for %q", name)); err != nil {
return
}
if len(ret) != 1 {
@ -132,10 +132,10 @@ func (self *Client) DockerContainer(name string, query *v1.ContainerInfoRequest)
}
// Returns the JSON container information for all Docker containers.
func (self *Client) AllDockerContainers(query *v1.ContainerInfoRequest) (cinfo []v1.ContainerInfo, err error) {
u := self.dockerInfoUrl("/")
func (c *Client) AllDockerContainers(query *v1.ContainerInfoRequest) (cinfo []v1.ContainerInfo, err error) {
u := c.dockerInfoURL("/")
ret := make(map[string]v1.ContainerInfo)
if err = self.httpGetJsonData(&ret, query, u, "all Docker containers info"); err != nil {
if err = c.httpGetJSONData(&ret, query, u, "all Docker containers info"); err != nil {
return
}
cinfo = make([]v1.ContainerInfo, 0, len(ret))
@ -145,27 +145,27 @@ func (self *Client) AllDockerContainers(query *v1.ContainerInfoRequest) (cinfo [
return
}
func (self *Client) machineInfoUrl() string {
return self.baseUrl + path.Join("machine")
func (c *Client) machineInfoURL() string {
return c.baseURL + path.Join("machine")
}
func (self *Client) containerInfoUrl(name string) string {
return self.baseUrl + path.Join("containers", name)
func (c *Client) containerInfoURL(name string) string {
return c.baseURL + path.Join("containers", name)
}
func (self *Client) subcontainersInfoUrl(name string) string {
return self.baseUrl + path.Join("subcontainers", name)
func (c *Client) subcontainersInfoURL(name string) string {
return c.baseURL + path.Join("subcontainers", name)
}
func (self *Client) dockerInfoUrl(name string) string {
return self.baseUrl + path.Join("docker", name)
func (c *Client) dockerInfoURL(name string) string {
return c.baseURL + path.Join("docker", name)
}
func (self *Client) eventsInfoUrl(name string) string {
return self.baseUrl + path.Join("events", name)
func (c *Client) eventsInfoURL(name string) string {
return c.baseURL + path.Join("events", name)
}
func (self *Client) httpGetJsonData(data, postData interface{}, url, infoName string) error {
func (c *Client) httpGetJSONData(data, postData interface{}, url, infoName string) error {
var resp *http.Response
var err error
@ -174,9 +174,9 @@ func (self *Client) httpGetJsonData(data, postData interface{}, url, infoName st
if marshalErr != nil {
return fmt.Errorf("unable to marshal data: %v", marshalErr)
}
resp, err = self.httpClient.Post(url, "application/json", bytes.NewBuffer(data))
resp, err = c.httpClient.Post(url, "application/json", bytes.NewBuffer(data))
} else {
resp, err = self.httpClient.Get(url)
resp, err = c.httpClient.Get(url)
}
if err != nil {
return fmt.Errorf("unable to get %q from %q: %v", infoName, url, err)
@ -200,12 +200,12 @@ func (self *Client) httpGetJsonData(data, postData interface{}, url, infoName st
return nil
}
func (self *Client) getEventStreamingData(url string, einfo chan *v1.Event) error {
func (c *Client) getEventStreamingData(url string, einfo chan *v1.Event) error {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return err
}
resp, err := self.httpClient.Do(req)
resp, err := c.httpClient.Do(req)
if err != nil {
return err
}

View File

@ -32,7 +32,7 @@ import (
// Client represents the base URL for a cAdvisor client.
type Client struct {
baseUrl string
baseURL string
}
// NewClient returns a new client with the specified base URL.
@ -42,17 +42,17 @@ func NewClient(url string) (*Client, error) {
}
return &Client{
baseUrl: fmt.Sprintf("%sapi/v2.1/", url),
baseURL: fmt.Sprintf("%sapi/v2.1/", url),
}, nil
}
// MachineInfo returns the JSON machine information for this client.
// A non-nil error result indicates a problem with obtaining
// the JSON machine information data.
func (self *Client) MachineInfo() (minfo *v1.MachineInfo, err error) {
u := self.machineInfoUrl()
func (c *Client) MachineInfo() (minfo *v1.MachineInfo, err error) {
u := c.machineInfoURL()
ret := new(v1.MachineInfo)
if err = self.httpGetJsonData(ret, nil, u, "machine info"); err != nil {
if err = c.httpGetJSONData(ret, nil, u, "machine info"); err != nil {
return
}
minfo = ret
@ -62,25 +62,25 @@ func (self *Client) MachineInfo() (minfo *v1.MachineInfo, err error) {
// MachineStats returns the JSON machine statistics for this client.
// A non-nil error result indicates a problem with obtaining
// the JSON machine information data.
func (self *Client) MachineStats() ([]v2.MachineStats, error) {
func (c *Client) MachineStats() ([]v2.MachineStats, error) {
var ret []v2.MachineStats
u := self.machineStatsUrl()
err := self.httpGetJsonData(&ret, nil, u, "machine stats")
u := c.machineStatsURL()
err := c.httpGetJSONData(&ret, nil, u, "machine stats")
return ret, err
}
// VersionInfo returns the version info for cAdvisor.
func (self *Client) VersionInfo() (version string, err error) {
u := self.versionInfoUrl()
version, err = self.httpGetString(u, "version info")
func (c *Client) VersionInfo() (version string, err error) {
u := c.versionInfoURL()
version, err = c.httpGetString(u, "version info")
return
}
// Attributes returns hardware and software attributes of the machine.
func (self *Client) Attributes() (attr *v2.Attributes, err error) {
u := self.attributesUrl()
func (c *Client) Attributes() (attr *v2.Attributes, err error) {
u := c.attributesURL()
ret := new(v2.Attributes)
if err = self.httpGetJsonData(ret, nil, u, "attributes"); err != nil {
if err = c.httpGetJSONData(ret, nil, u, "attributes"); err != nil {
return
}
attr = ret
@ -88,8 +88,8 @@ func (self *Client) Attributes() (attr *v2.Attributes, err error) {
}
// Stats returns stats for the requested container.
func (self *Client) Stats(name string, request *v2.RequestOptions) (map[string]v2.ContainerInfo, error) {
u := self.statsUrl(name)
func (c *Client) Stats(name string, request *v2.RequestOptions) (map[string]v2.ContainerInfo, error) {
u := c.statsURL(name)
ret := make(map[string]v2.ContainerInfo)
data := url.Values{
"type": []string{request.IdType},
@ -98,33 +98,33 @@ func (self *Client) Stats(name string, request *v2.RequestOptions) (map[string]v
}
u = fmt.Sprintf("%s?%s", u, data.Encode())
if err := self.httpGetJsonData(&ret, nil, u, "stats"); err != nil {
if err := c.httpGetJSONData(&ret, nil, u, "stats"); err != nil {
return nil, err
}
return ret, nil
}
func (self *Client) machineInfoUrl() string {
return self.baseUrl + path.Join("machine")
func (c *Client) machineInfoURL() string {
return c.baseURL + path.Join("machine")
}
func (self *Client) machineStatsUrl() string {
return self.baseUrl + path.Join("machinestats")
func (c *Client) machineStatsURL() string {
return c.baseURL + path.Join("machinestats")
}
func (self *Client) versionInfoUrl() string {
return self.baseUrl + path.Join("version")
func (c *Client) versionInfoURL() string {
return c.baseURL + path.Join("version")
}
func (self *Client) attributesUrl() string {
return self.baseUrl + path.Join("attributes")
func (c *Client) attributesURL() string {
return c.baseURL + path.Join("attributes")
}
func (self *Client) statsUrl(name string) string {
return self.baseUrl + path.Join("stats", name)
func (c *Client) statsURL(name string) string {
return c.baseURL + path.Join("stats", name)
}
func (self *Client) httpGetResponse(postData interface{}, urlPath, infoName string) ([]byte, error) {
func (c *Client) httpGetResponse(postData interface{}, urlPath, infoName string) ([]byte, error) {
var resp *http.Response
var err error
@ -155,16 +155,16 @@ func (self *Client) httpGetResponse(postData interface{}, urlPath, infoName stri
return body, nil
}
func (self *Client) httpGetString(url, infoName string) (string, error) {
body, err := self.httpGetResponse(nil, url, infoName)
func (c *Client) httpGetString(url, infoName string) (string, error) {
body, err := c.httpGetResponse(nil, url, infoName)
if err != nil {
return "", err
}
return string(body), nil
}
func (self *Client) httpGetJsonData(data, postData interface{}, url, infoName string) error {
body, err := self.httpGetResponse(postData, url, infoName)
func (c *Client) httpGetJSONData(data, postData interface{}, url, infoName string) error {
body, err := c.httpGetResponse(postData, url, infoName)
if err != nil {
return err
}

View File

@ -74,15 +74,15 @@ func getApiVersions() []ApiVersion {
type version1_0 struct {
}
func (self *version1_0) Version() string {
func (api *version1_0) Version() string {
return "v1.0"
}
func (self *version1_0) SupportedRequestTypes() []string {
func (api *version1_0) SupportedRequestTypes() []string {
return []string{containersApi, machineApi}
}
func (self *version1_0) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error {
func (api *version1_0) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error {
switch requestType {
case machineApi:
klog.V(4).Infof("Api - Machine")
@ -137,15 +137,15 @@ func newVersion1_1(v *version1_0) *version1_1 {
}
}
func (self *version1_1) Version() string {
func (api *version1_1) Version() string {
return "v1.1"
}
func (self *version1_1) SupportedRequestTypes() []string {
return append(self.baseVersion.SupportedRequestTypes(), subcontainersApi)
func (api *version1_1) SupportedRequestTypes() []string {
return append(api.baseVersion.SupportedRequestTypes(), subcontainersApi)
}
func (self *version1_1) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error {
func (api *version1_1) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error {
switch requestType {
case subcontainersApi:
containerName := getContainerName(request)
@ -170,7 +170,7 @@ func (self *version1_1) HandleRequest(requestType string, request []string, m ma
}
return nil
default:
return self.baseVersion.HandleRequest(requestType, request, m, w, r)
return api.baseVersion.HandleRequest(requestType, request, m, w, r)
}
}
@ -187,15 +187,15 @@ func newVersion1_2(v *version1_1) *version1_2 {
}
}
func (self *version1_2) Version() string {
func (api *version1_2) Version() string {
return "v1.2"
}
func (self *version1_2) SupportedRequestTypes() []string {
return append(self.baseVersion.SupportedRequestTypes(), dockerApi)
func (api *version1_2) SupportedRequestTypes() []string {
return append(api.baseVersion.SupportedRequestTypes(), dockerApi)
}
func (self *version1_2) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error {
func (api *version1_2) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error {
switch requestType {
case dockerApi:
klog.V(4).Infof("Api - Docker(%v)", request)
@ -239,7 +239,7 @@ func (self *version1_2) HandleRequest(requestType string, request []string, m ma
}
return nil
default:
return self.baseVersion.HandleRequest(requestType, request, m, w, r)
return api.baseVersion.HandleRequest(requestType, request, m, w, r)
}
}
@ -256,20 +256,20 @@ func newVersion1_3(v *version1_2) *version1_3 {
}
}
func (self *version1_3) Version() string {
func (api *version1_3) Version() string {
return "v1.3"
}
func (self *version1_3) SupportedRequestTypes() []string {
return append(self.baseVersion.SupportedRequestTypes(), eventsApi)
func (api *version1_3) SupportedRequestTypes() []string {
return append(api.baseVersion.SupportedRequestTypes(), eventsApi)
}
func (self *version1_3) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error {
func (api *version1_3) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error {
switch requestType {
case eventsApi:
return handleEventRequest(request, m, w, r)
default:
return self.baseVersion.HandleRequest(requestType, request, m, w, r)
return api.baseVersion.HandleRequest(requestType, request, m, w, r)
}
}
@ -304,15 +304,15 @@ func newVersion2_0() *version2_0 {
return &version2_0{}
}
func (self *version2_0) Version() string {
func (api *version2_0) Version() string {
return "v2.0"
}
func (self *version2_0) SupportedRequestTypes() []string {
func (api *version2_0) SupportedRequestTypes() []string {
return []string{versionApi, attributesApi, eventsApi, machineApi, summaryApi, statsApi, specApi, storageApi, psApi, customMetricsApi}
}
func (self *version2_0) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error {
func (api *version2_0) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error {
opt, err := getRequestOptions(r)
if err != nil {
return err
@ -472,15 +472,15 @@ func newVersion2_1(v *version2_0) *version2_1 {
}
}
func (self *version2_1) Version() string {
func (api *version2_1) Version() string {
return "v2.1"
}
func (self *version2_1) SupportedRequestTypes() []string {
return append([]string{machineStatsApi}, self.baseVersion.SupportedRequestTypes()...)
func (api *version2_1) SupportedRequestTypes() []string {
return append([]string{machineStatsApi}, api.baseVersion.SupportedRequestTypes()...)
}
func (self *version2_1) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error {
func (api *version2_1) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error {
// Get the query request.
opt, err := getRequestOptions(r)
if err != nil {
@ -521,7 +521,7 @@ func (self *version2_1) HandleRequest(requestType string, request []string, m ma
}
return writeResult(contStats, w)
default:
return self.baseVersion.HandleRequest(requestType, request, m, w, r)
return api.baseVersion.HandleRequest(requestType, request, m, w, r)
}
}

View File

@ -79,31 +79,31 @@ func Client() (mesosAgentClient, error) {
}
// ContainerInfo returns the container information of the given container id
func (self *client) ContainerInfo(id string) (*containerInfo, error) {
c, err := self.getContainer(id)
func (c *client) ContainerInfo(id string) (*containerInfo, error) {
container, err := c.getContainer(id)
if err != nil {
return nil, err
}
// Get labels of the container
l, err := self.getLabels(c)
l, err := c.getLabels(container)
if err != nil {
return nil, err
}
return &containerInfo{
cntr: c,
cntr: container,
labels: l,
}, nil
}
// Get the Pid of the container
func (self *client) ContainerPid(id string) (int, error) {
func (c *client) ContainerPid(id string) (int, error) {
var pid int
var err error
err = retry.Retry(
func(attempt uint) error {
c, err := self.ContainerInfo(id)
c, err := c.ContainerInfo(id)
if err != nil {
return err
}
@ -123,9 +123,9 @@ func (self *client) ContainerPid(id string) (int, error) {
return pid, err
}
func (self *client) getContainer(id string) (*mContainer, error) {
func (c *client) getContainer(id string) (*mContainer, error) {
// Get all containers
cntrs, err := self.getContainers()
cntrs, err := c.getContainers()
if err != nil {
return nil, err
}
@ -139,9 +139,9 @@ func (self *client) getContainer(id string) (*mContainer, error) {
return nil, fmt.Errorf("can't locate container %s", id)
}
func (self *client) getVersion() (string, error) {
func (c *client) getVersion() (string, error) {
req := calls.NonStreaming(calls.GetVersion())
result, err := self.fetchAndDecode(req)
result, err := c.fetchAndDecode(req)
if err != nil {
return "", fmt.Errorf("failed to get mesos version: %v", err)
}
@ -153,9 +153,9 @@ func (self *client) getVersion() (string, error) {
return version.VersionInfo.Version, nil
}
func (self *client) getContainers() (mContainers, error) {
func (c *client) getContainers() (mContainers, error) {
req := calls.NonStreaming(calls.GetContainers())
result, err := self.fetchAndDecode(req)
result, err := c.fetchAndDecode(req)
if err != nil {
return nil, fmt.Errorf("failed to get mesos containers: %v", err)
}
@ -167,18 +167,18 @@ func (self *client) getContainers() (mContainers, error) {
return cntrs, nil
}
func (self *client) getLabels(c *mContainer) (map[string]string, error) {
func (c *client) getLabels(container *mContainer) (map[string]string, error) {
// Get mesos agent state which contains all containers labels
var s state
req := calls.NonStreaming(calls.GetState())
result, err := self.fetchAndDecode(req)
result, err := c.fetchAndDecode(req)
if err != nil {
return map[string]string{}, fmt.Errorf("failed to get mesos agent state: %v", err)
}
s.st = result.GetState
// Fetch labels from state object
labels, err := s.FetchLabels(c.FrameworkID.Value, c.ExecutorID.Value)
labels, err := s.FetchLabels(container.FrameworkID.Value, container.ExecutorID.Value)
if err != nil {
return labels, fmt.Errorf("error while fetching labels from executor: %v", err)
}
@ -186,7 +186,7 @@ func (self *client) getLabels(c *mContainer) (map[string]string, error) {
return labels, nil
}
func (self *client) fetchAndDecode(req calls.RequestFunc) (*agent.Response, error) {
func (c *client) fetchAndDecode(req calls.RequestFunc) (*agent.Response, error) {
var res mesos.Response
var err error

View File

@ -55,11 +55,11 @@ type mesosFactory struct {
client mesosAgentClient
}
func (self *mesosFactory) String() string {
func (f *mesosFactory) String() string {
return MesosNamespace
}
func (self *mesosFactory) NewContainerHandler(name string, inHostNamespace bool) (container.ContainerHandler, error) {
func (f *mesosFactory) NewContainerHandler(name string, inHostNamespace bool) (container.ContainerHandler, error) {
client, err := Client()
if err != nil {
return nil, err
@ -67,10 +67,10 @@ func (self *mesosFactory) NewContainerHandler(name string, inHostNamespace bool)
return newMesosContainerHandler(
name,
&self.cgroupSubsystems,
self.machineInfoFactory,
self.fsInfo,
self.includedMetrics,
&f.cgroupSubsystems,
f.machineInfoFactory,
f.fsInfo,
f.includedMetrics,
inHostNamespace,
client,
)
@ -98,7 +98,7 @@ func isContainerName(name string) bool {
}
// The mesos factory can handle any container.
func (self *mesosFactory) CanHandleAndAccept(name string) (handle bool, accept bool, err error) {
func (f *mesosFactory) CanHandleAndAccept(name string) (handle bool, accept bool, err error) {
// if the container is not associated with mesos, we can't handle it or accept it.
if !isContainerName(name) {
return false, false, nil
@ -107,7 +107,7 @@ func (self *mesosFactory) CanHandleAndAccept(name string) (handle bool, accept b
// Check if the container is known to mesos and it is active.
id := ContainerNameToMesosId(name)
_, err = self.client.ContainerInfo(id)
_, err = f.client.ContainerInfo(id)
if err != nil {
return false, true, fmt.Errorf("error getting running container: %v", err)
}
@ -115,7 +115,7 @@ func (self *mesosFactory) CanHandleAndAccept(name string) (handle bool, accept b
return true, true, nil
}
func (self *mesosFactory) DebugInfo() map[string][]string {
func (f *mesosFactory) DebugInfo() map[string][]string {
return map[string][]string{}
}

View File

@ -114,56 +114,56 @@ func newMesosContainerHandler(
return handler, nil
}
func (self *mesosContainerHandler) ContainerReference() (info.ContainerReference, error) {
func (h *mesosContainerHandler) ContainerReference() (info.ContainerReference, error) {
// We only know the container by its one name.
return self.reference, nil
return h.reference, nil
}
// Nothing to start up.
func (self *mesosContainerHandler) Start() {}
func (h *mesosContainerHandler) Start() {}
// Nothing to clean up.
func (self *mesosContainerHandler) Cleanup() {}
func (h *mesosContainerHandler) Cleanup() {}
func (self *mesosContainerHandler) GetSpec() (info.ContainerSpec, error) {
func (h *mesosContainerHandler) GetSpec() (info.ContainerSpec, error) {
// TODO: Since we dont collect disk usage and network stats for mesos containers, we set
// hasFilesystem and hasNetwork to false. Revisit when we support disk usage, network
// stats for mesos containers.
hasNetwork := false
hasFilesystem := false
spec, err := common.GetSpec(self.cgroupPaths, self.machineInfoFactory, hasNetwork, hasFilesystem)
spec, err := common.GetSpec(h.cgroupPaths, h.machineInfoFactory, hasNetwork, hasFilesystem)
if err != nil {
return spec, err
}
spec.Labels = self.labels
spec.Labels = h.labels
return spec, nil
}
func (self *mesosContainerHandler) getFsStats(stats *info.ContainerStats) error {
func (h *mesosContainerHandler) getFsStats(stats *info.ContainerStats) error {
mi, err := self.machineInfoFactory.GetMachineInfo()
mi, err := h.machineInfoFactory.GetMachineInfo()
if err != nil {
return err
}
if self.includedMetrics.Has(container.DiskIOMetrics) {
if h.includedMetrics.Has(container.DiskIOMetrics) {
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
}
return nil
}
func (self *mesosContainerHandler) GetStats() (*info.ContainerStats, error) {
stats, err := self.libcontainerHandler.GetStats()
func (h *mesosContainerHandler) GetStats() (*info.ContainerStats, error) {
stats, err := h.libcontainerHandler.GetStats()
if err != nil {
return stats, err
}
// Get filesystem stats.
err = self.getFsStats(stats)
err = h.getFsStats(stats)
if err != nil {
return stats, err
}
@ -171,35 +171,35 @@ func (self *mesosContainerHandler) GetStats() (*info.ContainerStats, error) {
return stats, nil
}
func (self *mesosContainerHandler) GetCgroupPath(resource string) (string, error) {
path, ok := self.cgroupPaths[resource]
func (h *mesosContainerHandler) GetCgroupPath(resource string) (string, error) {
path, ok := h.cgroupPaths[resource]
if !ok {
return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, self.name)
return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, h.name)
}
return path, nil
}
func (self *mesosContainerHandler) GetContainerLabels() map[string]string {
return self.labels
func (h *mesosContainerHandler) GetContainerLabels() map[string]string {
return h.labels
}
func (self *mesosContainerHandler) GetContainerIPAddress() string {
func (h *mesosContainerHandler) GetContainerIPAddress() string {
// the IP address for the mesos container corresponds to the system ip address.
return "127.0.0.1"
}
func (self *mesosContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {
return common.ListContainers(self.name, self.cgroupPaths, listType)
func (h *mesosContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {
return common.ListContainers(h.name, h.cgroupPaths, listType)
}
func (self *mesosContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
return self.libcontainerHandler.GetProcesses()
func (h *mesosContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
return h.libcontainerHandler.GetProcesses()
}
func (self *mesosContainerHandler) Exists() bool {
return common.CgroupExists(self.cgroupPaths)
func (h *mesosContainerHandler) Exists() bool {
return common.CgroupExists(h.cgroupPaths)
}
func (self *mesosContainerHandler) Type() container.ContainerType {
func (h *mesosContainerHandler) Type() container.ContainerType {
return container.ContainerTypeMesos
}

View File

@ -87,7 +87,7 @@ func new() (storage.StorageDriver, error) {
}
// TODO(jnagal): Infer schema through reflection. (See bigquery/client/example)
func (self *bigqueryStorage) GetSchema() *bigquery.TableSchema {
func (s *bigqueryStorage) GetSchema() *bigquery.TableSchema {
fields := make([]*bigquery.TableFieldSchema, 19)
i := 0
fields[i] = &bigquery.TableFieldSchema{
@ -192,7 +192,7 @@ func (self *bigqueryStorage) GetSchema() *bigquery.TableSchema {
}
}
func (self *bigqueryStorage) containerStatsToRows(
func (s *bigqueryStorage) containerStatsToRows(
cInfo *info.ContainerInfo,
stats *info.ContainerStats,
) (row map[string]interface{}) {
@ -202,7 +202,7 @@ func (self *bigqueryStorage) containerStatsToRows(
row[colTimestamp] = stats.Timestamp
// Machine name
row[colMachineName] = self.machineName
row[colMachineName] = s.machineName
// Container name
name := cInfo.ContainerReference.Name
@ -249,7 +249,7 @@ func (self *bigqueryStorage) containerStatsToRows(
return
}
func (self *bigqueryStorage) containerFilesystemStatsToRows(
func (s *bigqueryStorage) containerFilesystemStatsToRows(
cInfo *info.ContainerInfo,
stats *info.ContainerStats,
) (rows []map[string]interface{}) {
@ -263,15 +263,15 @@ func (self *bigqueryStorage) containerFilesystemStatsToRows(
return rows
}
func (self *bigqueryStorage) AddStats(cInfo *info.ContainerInfo, stats *info.ContainerStats) error {
func (s *bigqueryStorage) AddStats(cInfo *info.ContainerInfo, stats *info.ContainerStats) error {
if stats == nil {
return nil
}
rows := make([]map[string]interface{}, 0)
rows = append(rows, self.containerStatsToRows(cInfo, stats))
rows = append(rows, self.containerFilesystemStatsToRows(cInfo, stats)...)
rows = append(rows, s.containerStatsToRows(cInfo, stats))
rows = append(rows, s.containerFilesystemStatsToRows(cInfo, stats)...)
for _, row := range rows {
err := self.client.InsertRow(row)
err := s.client.InsertRow(row)
if err != nil {
return err
}
@ -279,9 +279,9 @@ func (self *bigqueryStorage) AddStats(cInfo *info.ContainerInfo, stats *info.Con
return nil
}
func (self *bigqueryStorage) Close() error {
self.client.Close()
self.client = nil
func (s *bigqueryStorage) Close() error {
s.client.Close()
s.client = nil
return nil
}

View File

@ -67,7 +67,7 @@ func new() (storage.StorageDriver, error) {
)
}
func (self *elasticStorage) containerStatsAndDefaultValues(
func (s *elasticStorage) containerStatsAndDefaultValues(
cInfo *info.ContainerInfo, stats *info.ContainerStats) *detailSpec {
timestamp := stats.Timestamp.UnixNano() / 1e3
var containerName string
@ -78,27 +78,27 @@ func (self *elasticStorage) containerStatsAndDefaultValues(
}
detail := &detailSpec{
Timestamp: timestamp,
MachineName: self.machineName,
MachineName: s.machineName,
ContainerName: containerName,
ContainerStats: stats,
}
return detail
}
func (self *elasticStorage) AddStats(cInfo *info.ContainerInfo, stats *info.ContainerStats) error {
func (s *elasticStorage) AddStats(cInfo *info.ContainerInfo, stats *info.ContainerStats) error {
if stats == nil {
return nil
}
func() {
// AddStats will be invoked simultaneously from multiple threads and only one of them will perform a write.
self.lock.Lock()
defer self.lock.Unlock()
s.lock.Lock()
defer s.lock.Unlock()
// Add some default params based on ContainerStats
detail := self.containerStatsAndDefaultValues(cInfo, stats)
detail := s.containerStatsAndDefaultValues(cInfo, stats)
// Index a cadvisor (using JSON serialization)
_, err := self.client.Index().
Index(self.indexName).
Type(self.typeName).
_, err := s.client.Index().
Index(s.indexName).
Type(s.typeName).
BodyJson(detail).
Do()
if err != nil {
@ -110,8 +110,8 @@ func (self *elasticStorage) AddStats(cInfo *info.ContainerInfo, stats *info.Cont
return nil
}
func (self *elasticStorage) Close() error {
self.client = nil
func (s *elasticStorage) Close() error {
s.client = nil
return nil
}

View File

@ -105,7 +105,7 @@ const (
tagContainerName string = "container_name"
)
func (self *influxdbStorage) containerFilesystemStatsToPoints(
func (s *influxdbStorage) containerFilesystemStatsToPoints(
cInfo *info.ContainerInfo,
stats *info.ContainerStats) (points []*influxdb.Point) {
if len(stats.Filesystem) == 0 {
@ -141,14 +141,14 @@ func (self *influxdbStorage) containerFilesystemStatsToPoints(
points = append(points, pointFsUsage, pointFsLimit)
}
self.tagPoints(cInfo, stats, points)
s.tagPoints(cInfo, stats, points)
return points
}
// Set tags and timestamp for all points of the batch.
// Points should inherit the tags that are set for BatchPoints, but that does not seem to work.
func (self *influxdbStorage) tagPoints(cInfo *info.ContainerInfo, stats *info.ContainerStats, points []*influxdb.Point) {
func (s *influxdbStorage) tagPoints(cInfo *info.ContainerInfo, stats *info.ContainerStats, points []*influxdb.Point) {
// Use container alias if possible
var containerName string
if len(cInfo.ContainerReference.Aliases) > 0 {
@ -158,7 +158,7 @@ func (self *influxdbStorage) tagPoints(cInfo *info.ContainerInfo, stats *info.Co
}
commonTags := map[string]string{
tagMachineName: self.machineName,
tagMachineName: s.machineName,
tagContainerName: containerName,
}
for i := 0; i < len(points); i++ {
@ -169,7 +169,7 @@ func (self *influxdbStorage) tagPoints(cInfo *info.ContainerInfo, stats *info.Co
}
}
func (self *influxdbStorage) containerStatsToPoints(
func (s *influxdbStorage) containerStatsToPoints(
cInfo *info.ContainerInfo,
stats *info.ContainerStats,
) (points []*influxdb.Point) {
@ -206,35 +206,35 @@ func (self *influxdbStorage) containerStatsToPoints(
points = append(points, makePoint(serTxBytes, stats.Network.TxBytes))
points = append(points, makePoint(serTxErrors, stats.Network.TxErrors))
self.tagPoints(cInfo, stats, points)
s.tagPoints(cInfo, stats, points)
return points
}
func (self *influxdbStorage) OverrideReadyToFlush(readyToFlush func() bool) {
self.readyToFlush = readyToFlush
func (s *influxdbStorage) OverrideReadyToFlush(readyToFlush func() bool) {
s.readyToFlush = readyToFlush
}
func (self *influxdbStorage) defaultReadyToFlush() bool {
return time.Since(self.lastWrite) >= self.bufferDuration
func (s *influxdbStorage) defaultReadyToFlush() bool {
return time.Since(s.lastWrite) >= s.bufferDuration
}
func (self *influxdbStorage) AddStats(cInfo *info.ContainerInfo, stats *info.ContainerStats) error {
func (s *influxdbStorage) AddStats(cInfo *info.ContainerInfo, stats *info.ContainerStats) error {
if stats == nil {
return nil
}
var pointsToFlush []*influxdb.Point
func() {
// AddStats will be invoked simultaneously from multiple threads and only one of them will perform a write.
self.lock.Lock()
defer self.lock.Unlock()
s.lock.Lock()
defer s.lock.Unlock()
self.points = append(self.points, self.containerStatsToPoints(cInfo, stats)...)
self.points = append(self.points, self.containerFilesystemStatsToPoints(cInfo, stats)...)
if self.readyToFlush() {
pointsToFlush = self.points
self.points = make([]*influxdb.Point, 0)
self.lastWrite = time.Now()
s.points = append(s.points, s.containerStatsToPoints(cInfo, stats)...)
s.points = append(s.points, s.containerFilesystemStatsToPoints(cInfo, stats)...)
if s.readyToFlush() {
pointsToFlush = s.points
s.points = make([]*influxdb.Point, 0)
s.lastWrite = time.Now()
}
}()
if len(pointsToFlush) > 0 {
@ -243,15 +243,15 @@ func (self *influxdbStorage) AddStats(cInfo *info.ContainerInfo, stats *info.Con
points[i] = *p
}
batchTags := map[string]string{tagMachineName: self.machineName}
batchTags := map[string]string{tagMachineName: s.machineName}
bp := influxdb.BatchPoints{
Points: points,
Database: self.database,
RetentionPolicy: self.retentionPolicy,
Database: s.database,
RetentionPolicy: s.retentionPolicy,
Tags: batchTags,
Time: stats.Timestamp,
}
response, err := self.client.Write(bp)
response, err := s.client.Write(bp)
if err != nil || checkResponseForErrors(response) != nil {
return fmt.Errorf("failed to write stats to influxDb - %s", err)
}
@ -259,8 +259,8 @@ func (self *influxdbStorage) AddStats(cInfo *info.ContainerInfo, stats *info.Con
return nil
}
func (self *influxdbStorage) Close() error {
self.client = nil
func (s *influxdbStorage) Close() error {
s.client = nil
return nil
}

View File

@ -62,7 +62,7 @@ type detailSpec struct {
ContainerStats *info.ContainerStats `json:"container_stats,omitempty"`
}
func (driver *kafkaStorage) infoToDetailSpec(cInfo *info.ContainerInfo, stats *info.ContainerStats) *detailSpec {
func (s *kafkaStorage) infoToDetailSpec(cInfo *info.ContainerInfo, stats *info.ContainerStats) *detailSpec {
timestamp := time.Now()
containerID := cInfo.ContainerReference.Id
containerLabels := cInfo.Spec.Labels
@ -70,7 +70,7 @@ func (driver *kafkaStorage) infoToDetailSpec(cInfo *info.ContainerInfo, stats *i
detail := &detailSpec{
Timestamp: timestamp,
MachineName: driver.machineName,
MachineName: s.machineName,
ContainerName: containerName,
ContainerID: containerID,
ContainerLabels: containerLabels,
@ -79,20 +79,20 @@ func (driver *kafkaStorage) infoToDetailSpec(cInfo *info.ContainerInfo, stats *i
return detail
}
func (driver *kafkaStorage) AddStats(cInfo *info.ContainerInfo, stats *info.ContainerStats) error {
detail := driver.infoToDetailSpec(cInfo, stats)
func (s *kafkaStorage) AddStats(cInfo *info.ContainerInfo, stats *info.ContainerStats) error {
detail := s.infoToDetailSpec(cInfo, stats)
b, err := json.Marshal(detail)
driver.producer.Input() <- &kafka.ProducerMessage{
Topic: driver.topic,
s.producer.Input() <- &kafka.ProducerMessage{
Topic: s.topic,
Value: kafka.StringEncoder(b),
}
return err
}
func (self *kafkaStorage) Close() error {
return self.producer.Close()
func (s *kafkaStorage) Close() error {
return s.producer.Close()
}
func new() (storage.StorageDriver, error) {

View File

@ -60,12 +60,12 @@ func new() (storage.StorageDriver, error) {
)
}
func (self *redisStorage) defaultReadyToFlush() bool {
return time.Since(self.lastWrite) >= self.bufferDuration
func (s *redisStorage) defaultReadyToFlush() bool {
return time.Since(s.lastWrite) >= s.bufferDuration
}
// We must add some default params (for example: MachineName,ContainerName...)because containerStats do not include them
func (self *redisStorage) containerStatsAndDefaultValues(cInfo *info.ContainerInfo, stats *info.ContainerStats) *detailSpec {
func (s *redisStorage) containerStatsAndDefaultValues(cInfo *info.ContainerInfo, stats *info.ContainerStats) *detailSpec {
timestamp := stats.Timestamp.UnixNano() / 1e3
var containerName string
if len(cInfo.ContainerReference.Aliases) > 0 {
@ -75,7 +75,7 @@ func (self *redisStorage) containerStatsAndDefaultValues(cInfo *info.ContainerIn
}
detail := &detailSpec{
Timestamp: timestamp,
MachineName: self.machineName,
MachineName: s.machineName,
ContainerName: containerName,
ContainerStats: stats,
}
@ -83,33 +83,33 @@ func (self *redisStorage) containerStatsAndDefaultValues(cInfo *info.ContainerIn
}
// Push the data into redis
func (self *redisStorage) AddStats(cInfo *info.ContainerInfo, stats *info.ContainerStats) error {
func (s *redisStorage) AddStats(cInfo *info.ContainerInfo, stats *info.ContainerStats) error {
if stats == nil {
return nil
}
var seriesToFlush []byte
func() {
// AddStats will be invoked simultaneously from multiple threads and only one of them will perform a write.
self.lock.Lock()
defer self.lock.Unlock()
s.lock.Lock()
defer s.lock.Unlock()
// Add some default params based on containerStats
detail := self.containerStatsAndDefaultValues(cInfo, stats)
detail := s.containerStatsAndDefaultValues(cInfo, stats)
// To json
b, _ := json.Marshal(detail)
if self.readyToFlush() {
if s.readyToFlush() {
seriesToFlush = b
self.lastWrite = time.Now()
s.lastWrite = time.Now()
}
}()
if len(seriesToFlush) > 0 {
// We use redis's "LPUSH" to push the data to the redis
self.conn.Send("LPUSH", self.redisKey, seriesToFlush)
s.conn.Send("LPUSH", s.redisKey, seriesToFlush)
}
return nil
}
func (self *redisStorage) Close() error {
return self.conn.Close()
func (s *redisStorage) Close() error {
return s.conn.Close()
}
// Create a new redis storage driver.

View File

@ -27,27 +27,27 @@ type Client struct {
conn net.Conn
}
func (self *Client) Open() error {
conn, err := net.Dial("udp", self.HostPort)
func (c *Client) Open() error {
conn, err := net.Dial("udp", c.HostPort)
if err != nil {
klog.Errorf("failed to open udp connection to %q: %v", self.HostPort, err)
klog.Errorf("failed to open udp connection to %q: %v", c.HostPort, err)
return err
}
self.conn = conn
c.conn = conn
return nil
}
func (self *Client) Close() error {
self.conn.Close()
self.conn = nil
func (c *Client) Close() error {
c.conn.Close()
c.conn = nil
return nil
}
// Simple send to statsd daemon without sampling.
func (self *Client) Send(namespace, containerName, key string, value uint64) error {
func (c *Client) Send(namespace, containerName, key string, value uint64) error {
// only send counter value
formatted := fmt.Sprintf("%s.%s.%s:%d|g", namespace, containerName, key, value)
_, err := fmt.Fprintf(self.conn, formatted)
_, err := fmt.Fprintf(c.conn, formatted)
if err != nil {
return fmt.Errorf("failed to send data %q: %v", formatted, err)
}

View File

@ -65,7 +65,7 @@ func new() (storage.StorageDriver, error) {
return newStorage(*storage.ArgDbName, *storage.ArgDbHost)
}
func (self *statsdStorage) containerStatsToValues(
func (s *statsdStorage) containerStatsToValues(
stats *info.ContainerStats,
) (series map[string]uint64) {
series = make(map[string]uint64)
@ -99,7 +99,7 @@ func (self *statsdStorage) containerStatsToValues(
return series
}
func (self *statsdStorage) containerFsStatsToValues(
func (s *statsdStorage) containerFsStatsToValues(
series *map[string]uint64,
stats *info.ContainerStats,
) {
@ -115,7 +115,7 @@ func (self *statsdStorage) containerFsStatsToValues(
}
// Push the data into redis
func (self *statsdStorage) AddStats(cInfo *info.ContainerInfo, stats *info.ContainerStats) error {
func (s *statsdStorage) AddStats(cInfo *info.ContainerInfo, stats *info.ContainerStats) error {
if stats == nil {
return nil
}
@ -127,10 +127,10 @@ func (self *statsdStorage) AddStats(cInfo *info.ContainerInfo, stats *info.Conta
containerName = cInfo.ContainerReference.Name
}
series := self.containerStatsToValues(stats)
self.containerFsStatsToValues(&series, stats)
series := s.containerStatsToValues(stats)
s.containerFsStatsToValues(&series, stats)
for key, value := range series {
err := self.client.Send(self.Namespace, containerName, key, value)
err := s.client.Send(s.Namespace, containerName, key, value)
if err != nil {
return err
}
@ -138,9 +138,9 @@ func (self *statsdStorage) AddStats(cInfo *info.ContainerInfo, stats *info.Conta
return nil
}
func (self *statsdStorage) Close() error {
self.client.Close()
self.client = nil
func (s *statsdStorage) Close() error {
s.client.Close()
s.client = nil
return nil
}

View File

@ -25,14 +25,14 @@ type MockStorageDriver struct {
MockCloseMethod bool
}
func (self *MockStorageDriver) AddStats(cInfo *info.ContainerInfo, stats *info.ContainerStats) error {
args := self.Called(cInfo.ContainerReference, stats)
func (d *MockStorageDriver) AddStats(cInfo *info.ContainerInfo, stats *info.ContainerStats) error {
args := d.Called(cInfo.ContainerReference, stats)
return args.Error(0)
}
func (self *MockStorageDriver) Close() error {
if self.MockCloseMethod {
args := self.Called()
func (d *MockStorageDriver) Close() error {
if d.MockCloseMethod {
args := d.Called()
return args.Error(0)
}
return nil

View File

@ -87,17 +87,17 @@ metric_with_multiple_labels{label1="One", label2="Two", label3="Three"} 81
assert.NoError(errMetric)
go_gc_duration := metrics["go_gc_duration_seconds"]
assert.Equal(5.8348000000000004e-05, go_gc_duration[0].FloatValue)
assert.Equal("__name__=go_gc_duration_seconds\xffquantile=0", go_gc_duration[0].Label)
assert.Equal(0.000499764, go_gc_duration[1].FloatValue)
assert.Equal("__name__=go_gc_duration_seconds\xffquantile=1", go_gc_duration[1].Label)
go_gc_duration_sum := metrics["go_gc_duration_seconds_sum"]
assert.Equal(1.7560473e+07, go_gc_duration_sum[0].FloatValue)
assert.Equal("__name__=go_gc_duration_seconds_sum", go_gc_duration_sum[0].Label)
go_gc_duration_count := metrics["go_gc_duration_seconds_count"]
assert.Equal(float64(2693), go_gc_duration_count[0].FloatValue)
assert.Equal("__name__=go_gc_duration_seconds_count", go_gc_duration_count[0].Label)
gcDuration := metrics["go_gc_duration_seconds"]
assert.Equal(5.8348000000000004e-05, gcDuration[0].FloatValue)
assert.Equal("__name__=go_gc_duration_seconds\xffquantile=0", gcDuration[0].Label)
assert.Equal(0.000499764, gcDuration[1].FloatValue)
assert.Equal("__name__=go_gc_duration_seconds\xffquantile=1", gcDuration[1].Label)
gcDurationSum := metrics["go_gc_duration_seconds_sum"]
assert.Equal(1.7560473e+07, gcDurationSum[0].FloatValue)
assert.Equal("__name__=go_gc_duration_seconds_sum", gcDurationSum[0].Label)
gcDurationCount := metrics["go_gc_duration_seconds_count"]
assert.Equal(float64(2693), gcDurationCount[0].FloatValue)
assert.Equal("__name__=go_gc_duration_seconds_count", gcDurationCount[0].Label)
goRoutines := metrics["go_goroutines"]
assert.Equal(float64(16), goRoutines[0].FloatValue)

View File

@ -52,11 +52,11 @@ type containerdFactory struct {
includedMetrics container.MetricSet
}
func (self *containerdFactory) String() string {
func (f *containerdFactory) String() string {
return k8sContainerdNamespace
}
func (self *containerdFactory) NewContainerHandler(name string, inHostNamespace bool) (handler container.ContainerHandler, err error) {
func (f *containerdFactory) NewContainerHandler(name string, inHostNamespace bool) (handler container.ContainerHandler, err error) {
client, err := Client(*ArgContainerdEndpoint, *ArgContainerdNamespace)
if err != nil {
return
@ -66,12 +66,12 @@ func (self *containerdFactory) NewContainerHandler(name string, inHostNamespace
return newContainerdContainerHandler(
client,
name,
self.machineInfoFactory,
self.fsInfo,
&self.cgroupSubsystems,
f.machineInfoFactory,
f.fsInfo,
&f.cgroupSubsystems,
inHostNamespace,
metadataEnvs,
self.includedMetrics,
f.includedMetrics,
)
}
@ -95,7 +95,7 @@ func isContainerName(name string) bool {
}
// Containerd can handle and accept all containerd created containers
func (self *containerdFactory) CanHandleAndAccept(name string) (bool, bool, error) {
func (f *containerdFactory) CanHandleAndAccept(name string) (bool, bool, error) {
// if the container is not associated with containerd, we can't handle it or accept it.
if !isContainerName(name) {
return false, false, nil
@ -105,7 +105,7 @@ func (self *containerdFactory) CanHandleAndAccept(name string) (bool, bool, erro
// If container and task lookup in containerd fails then we assume
// that the container state is not known to containerd
ctx := context.Background()
_, err := self.client.LoadContainer(ctx, id)
_, err := f.client.LoadContainer(ctx, id)
if err != nil {
return false, false, fmt.Errorf("failed to load container: %v", err)
}
@ -113,7 +113,7 @@ func (self *containerdFactory) CanHandleAndAccept(name string) (bool, bool, erro
return true, true, nil
}
func (self *containerdFactory) DebugInfo() map[string][]string {
func (f *containerdFactory) DebugInfo() map[string][]string {
return map[string][]string{}
}

View File

@ -149,47 +149,47 @@ func newContainerdContainerHandler(
return handler, nil
}
func (self *containerdContainerHandler) ContainerReference() (info.ContainerReference, error) {
return self.reference, nil
func (h *containerdContainerHandler) ContainerReference() (info.ContainerReference, error) {
return h.reference, nil
}
func (self *containerdContainerHandler) needNet() bool {
func (h *containerdContainerHandler) needNet() bool {
// Since containerd does not handle networking ideally we need to return based
// on includedMetrics list. Here the assumption is the presence of cri-containerd
// label
if self.includedMetrics.Has(container.NetworkUsageMetrics) {
if h.includedMetrics.Has(container.NetworkUsageMetrics) {
//TODO change it to exported cri-containerd constants
return self.labels["io.cri-containerd.kind"] == "sandbox"
return h.labels["io.cri-containerd.kind"] == "sandbox"
}
return false
}
func (self *containerdContainerHandler) GetSpec() (info.ContainerSpec, error) {
func (h *containerdContainerHandler) GetSpec() (info.ContainerSpec, error) {
// TODO: Since we dont collect disk usage stats for containerd, we set hasFilesystem
// to false. Revisit when we support disk usage stats for containerd
hasFilesystem := false
spec, err := common.GetSpec(self.cgroupPaths, self.machineInfoFactory, self.needNet(), hasFilesystem)
spec.Labels = self.labels
spec.Envs = self.envs
spec.Image = self.image
spec, err := common.GetSpec(h.cgroupPaths, h.machineInfoFactory, h.needNet(), hasFilesystem)
spec.Labels = h.labels
spec.Envs = h.envs
spec.Image = h.image
return spec, err
}
func (self *containerdContainerHandler) getFsStats(stats *info.ContainerStats) error {
mi, err := self.machineInfoFactory.GetMachineInfo()
func (h *containerdContainerHandler) getFsStats(stats *info.ContainerStats) error {
mi, err := h.machineInfoFactory.GetMachineInfo()
if err != nil {
return err
}
if self.includedMetrics.Has(container.DiskIOMetrics) {
if h.includedMetrics.Has(container.DiskIOMetrics) {
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
}
return nil
}
func (self *containerdContainerHandler) GetStats() (*info.ContainerStats, error) {
stats, err := self.libcontainerHandler.GetStats()
func (h *containerdContainerHandler) GetStats() (*info.ContainerStats, error) {
stats, err := h.libcontainerHandler.GetStats()
if err != nil {
return stats, err
}
@ -197,50 +197,50 @@ func (self *containerdContainerHandler) GetStats() (*info.ContainerStats, error)
// includes containers running in Kubernetes pods that use the network of the
// infrastructure container. This stops metrics being reported multiple times
// for each container in a pod.
if !self.needNet() {
if !h.needNet() {
stats.Network = info.NetworkStats{}
}
// Get filesystem stats.
err = self.getFsStats(stats)
err = h.getFsStats(stats)
return stats, err
}
func (self *containerdContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {
func (h *containerdContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {
return []info.ContainerReference{}, nil
}
func (self *containerdContainerHandler) GetCgroupPath(resource string) (string, error) {
path, ok := self.cgroupPaths[resource]
func (h *containerdContainerHandler) GetCgroupPath(resource string) (string, error) {
path, ok := h.cgroupPaths[resource]
if !ok {
return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, self.reference.Name)
return "", fmt.Errorf("could not find path for resource %q for container %q", resource, h.reference.Name)
}
return path, nil
}
func (self *containerdContainerHandler) GetContainerLabels() map[string]string {
return self.labels
func (h *containerdContainerHandler) GetContainerLabels() map[string]string {
return h.labels
}
func (self *containerdContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
return self.libcontainerHandler.GetProcesses()
func (h *containerdContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
return h.libcontainerHandler.GetProcesses()
}
func (self *containerdContainerHandler) Exists() bool {
return common.CgroupExists(self.cgroupPaths)
func (h *containerdContainerHandler) Exists() bool {
return common.CgroupExists(h.cgroupPaths)
}
func (self *containerdContainerHandler) Type() container.ContainerType {
func (h *containerdContainerHandler) Type() container.ContainerType {
return container.ContainerTypeContainerd
}
func (self *containerdContainerHandler) Start() {
func (h *containerdContainerHandler) Start() {
}
func (self *containerdContainerHandler) Cleanup() {
func (h *containerdContainerHandler) Cleanup() {
}
func (self *containerdContainerHandler) GetContainerIPAddress() string {
func (h *containerdContainerHandler) GetContainerIPAddress() string {
// containerd doesnt take care of networking.So it doesnt maintain networking states
return ""
}

View File

@ -60,11 +60,11 @@ type crioFactory struct {
client crioClient
}
func (self *crioFactory) String() string {
func (f *crioFactory) String() string {
return CrioNamespace
}
func (self *crioFactory) NewContainerHandler(name string, inHostNamespace bool) (handler container.ContainerHandler, err error) {
func (f *crioFactory) NewContainerHandler(name string, inHostNamespace bool) (handler container.ContainerHandler, err error) {
client, err := Client()
if err != nil {
return
@ -74,14 +74,14 @@ func (self *crioFactory) NewContainerHandler(name string, inHostNamespace bool)
handler, err = newCrioContainerHandler(
client,
name,
self.machineInfoFactory,
self.fsInfo,
self.storageDriver,
self.storageDir,
&self.cgroupSubsystems,
f.machineInfoFactory,
f.fsInfo,
f.storageDriver,
f.storageDir,
&f.cgroupSubsystems,
inHostNamespace,
metadataEnvs,
self.includedMetrics,
f.includedMetrics,
)
return
}
@ -108,7 +108,7 @@ func isContainerName(name string) bool {
}
// crio handles all containers under /crio
func (self *crioFactory) CanHandleAndAccept(name string) (bool, bool, error) {
func (f *crioFactory) CanHandleAndAccept(name string) (bool, bool, error) {
if strings.HasPrefix(path.Base(name), "crio-conmon") {
// TODO(runcom): should we include crio-conmon cgroups?
return false, false, nil
@ -123,7 +123,7 @@ func (self *crioFactory) CanHandleAndAccept(name string) (bool, bool, error) {
return true, true, nil
}
func (self *crioFactory) DebugInfo() map[string][]string {
func (f *crioFactory) DebugInfo() map[string][]string {
return map[string][]string{}
}

View File

@ -197,59 +197,59 @@ func newCrioContainerHandler(
return handler, nil
}
func (self *crioContainerHandler) Start() {
if self.fsHandler != nil {
self.fsHandler.Start()
func (h *crioContainerHandler) Start() {
if h.fsHandler != nil {
h.fsHandler.Start()
}
}
func (self *crioContainerHandler) Cleanup() {
if self.fsHandler != nil {
self.fsHandler.Stop()
func (h *crioContainerHandler) Cleanup() {
if h.fsHandler != nil {
h.fsHandler.Stop()
}
}
func (self *crioContainerHandler) ContainerReference() (info.ContainerReference, error) {
return self.reference, nil
func (h *crioContainerHandler) ContainerReference() (info.ContainerReference, error) {
return h.reference, nil
}
func (self *crioContainerHandler) needNet() bool {
if self.includedMetrics.Has(container.NetworkUsageMetrics) {
return self.labels["io.kubernetes.container.name"] == "POD"
func (h *crioContainerHandler) needNet() bool {
if h.includedMetrics.Has(container.NetworkUsageMetrics) {
return h.labels["io.kubernetes.container.name"] == "POD"
}
return false
}
func (self *crioContainerHandler) GetSpec() (info.ContainerSpec, error) {
hasFilesystem := self.includedMetrics.Has(container.DiskUsageMetrics)
spec, err := common.GetSpec(self.cgroupPaths, self.machineInfoFactory, self.needNet(), hasFilesystem)
func (h *crioContainerHandler) GetSpec() (info.ContainerSpec, error) {
hasFilesystem := h.includedMetrics.Has(container.DiskUsageMetrics)
spec, err := common.GetSpec(h.cgroupPaths, h.machineInfoFactory, h.needNet(), hasFilesystem)
spec.Labels = self.labels
spec.Envs = self.envs
spec.Image = self.image
spec.Labels = h.labels
spec.Envs = h.envs
spec.Image = h.image
return spec, err
}
func (self *crioContainerHandler) getFsStats(stats *info.ContainerStats) error {
mi, err := self.machineInfoFactory.GetMachineInfo()
func (h *crioContainerHandler) getFsStats(stats *info.ContainerStats) error {
mi, err := h.machineInfoFactory.GetMachineInfo()
if err != nil {
return err
}
if self.includedMetrics.Has(container.DiskIOMetrics) {
if h.includedMetrics.Has(container.DiskIOMetrics) {
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
}
if !self.includedMetrics.Has(container.DiskUsageMetrics) {
if !h.includedMetrics.Has(container.DiskUsageMetrics) {
return nil
}
var device string
switch self.storageDriver {
switch h.storageDriver {
case overlay2StorageDriver, overlayStorageDriver:
deviceInfo, err := self.fsInfo.GetDirFsDevice(self.rootfsStorageDir)
deviceInfo, err := h.fsInfo.GetDirFsDevice(h.rootfsStorageDir)
if err != nil {
return fmt.Errorf("unable to determine device info for dir: %v: %v", self.rootfsStorageDir, err)
return fmt.Errorf("unable to determine device info for dir: %v: %v", h.rootfsStorageDir, err)
}
device = deviceInfo.Device
default:
@ -274,7 +274,7 @@ func (self *crioContainerHandler) getFsStats(stats *info.ContainerStats) error {
return fmt.Errorf("unable to determine fs type for device: %v", device)
}
fsStat := info.FsStats{Device: device, Type: fsType, Limit: limit}
usage := self.fsHandler.Usage()
usage := h.fsHandler.Usage()
fsStat.BaseUsage = usage.BaseUsageBytes
fsStat.Usage = usage.TotalUsageBytes
fsStat.Inodes = usage.InodeUsage
@ -284,26 +284,26 @@ func (self *crioContainerHandler) getFsStats(stats *info.ContainerStats) error {
return nil
}
func (self *crioContainerHandler) getLibcontainerHandler() *containerlibcontainer.Handler {
if self.pidKnown {
return self.libcontainerHandler
func (h *crioContainerHandler) getLibcontainerHandler() *containerlibcontainer.Handler {
if h.pidKnown {
return h.libcontainerHandler
}
id := ContainerNameToCrioId(self.name)
id := ContainerNameToCrioId(h.name)
cInfo, err := self.client.ContainerInfo(id)
cInfo, err := h.client.ContainerInfo(id)
if err != nil || cInfo.Pid == 0 {
return self.libcontainerHandler
return h.libcontainerHandler
}
self.pidKnown = true
self.libcontainerHandler = containerlibcontainer.NewHandler(self.cgroupManager, self.rootFs, cInfo.Pid, self.includedMetrics)
h.pidKnown = true
h.libcontainerHandler = containerlibcontainer.NewHandler(h.cgroupManager, h.rootFs, cInfo.Pid, h.includedMetrics)
return self.libcontainerHandler
return h.libcontainerHandler
}
func (self *crioContainerHandler) GetStats() (*info.ContainerStats, error) {
libcontainerHandler := self.getLibcontainerHandler()
func (h *crioContainerHandler) GetStats() (*info.ContainerStats, error) {
libcontainerHandler := h.getLibcontainerHandler()
stats, err := libcontainerHandler.GetStats()
if err != nil {
return stats, err
@ -312,12 +312,12 @@ func (self *crioContainerHandler) GetStats() (*info.ContainerStats, error) {
// includes containers running in Kubernetes pods that use the network of the
// infrastructure container. This stops metrics being reported multiple times
// for each container in a pod.
if !self.needNet() {
if !h.needNet() {
stats.Network = info.NetworkStats{}
}
// Get filesystem stats.
err = self.getFsStats(stats)
err = h.getFsStats(stats)
if err != nil {
return stats, err
}
@ -325,35 +325,35 @@ func (self *crioContainerHandler) GetStats() (*info.ContainerStats, error) {
return stats, nil
}
func (self *crioContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {
func (h *crioContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {
// No-op for Docker driver.
return []info.ContainerReference{}, nil
}
func (self *crioContainerHandler) GetCgroupPath(resource string) (string, error) {
path, ok := self.cgroupPaths[resource]
func (h *crioContainerHandler) GetCgroupPath(resource string) (string, error) {
path, ok := h.cgroupPaths[resource]
if !ok {
return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, self.reference.Name)
return "", fmt.Errorf("could not find path for resource %q for container %q", resource, h.reference.Name)
}
return path, nil
}
func (self *crioContainerHandler) GetContainerLabels() map[string]string {
return self.labels
func (h *crioContainerHandler) GetContainerLabels() map[string]string {
return h.labels
}
func (self *crioContainerHandler) GetContainerIPAddress() string {
return self.ipAddress
func (h *crioContainerHandler) GetContainerIPAddress() string {
return h.ipAddress
}
func (self *crioContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
return self.libcontainerHandler.GetProcesses()
func (h *crioContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
return h.libcontainerHandler.GetProcesses()
}
func (self *crioContainerHandler) Exists() bool {
return common.CgroupExists(self.cgroupPaths)
func (h *crioContainerHandler) Exists() bool {
return common.CgroupExists(h.cgroupPaths)
}
func (self *crioContainerHandler) Type() container.ContainerType {
func (h *crioContainerHandler) Type() container.ContainerType {
return container.ContainerTypeCrio
}

View File

@ -134,7 +134,7 @@ func ValidateInfo() (*dockertypes.Info, error) {
}
dockerInfo.ServerVersion = version.Version
}
version, err := parseVersion(dockerInfo.ServerVersion, version_re, 3)
version, err := parseVersion(dockerInfo.ServerVersion, versionRe, 3)
if err != nil {
return nil, err
}
@ -155,46 +155,46 @@ func APIVersion() ([]int, error) {
if err != nil {
return nil, err
}
return parseVersion(ver, apiversion_re, 2)
return parseVersion(ver, apiVersionRe, 2)
}
func VersionString() (string, error) {
docker_version := "Unknown"
dockerVersion := "Unknown"
client, err := Client()
if err == nil {
version, err := client.ServerVersion(defaultContext())
if err == nil {
docker_version = version.Version
dockerVersion = version.Version
}
}
return docker_version, err
return dockerVersion, err
}
func APIVersionString() (string, error) {
docker_api_version := "Unknown"
apiVersion := "Unknown"
client, err := Client()
if err == nil {
version, err := client.ServerVersion(defaultContext())
if err == nil {
docker_api_version = version.APIVersion
apiVersion = version.APIVersion
}
}
return docker_api_version, err
return apiVersion, err
}
func parseVersion(version_string string, regex *regexp.Regexp, length int) ([]int, error) {
matches := regex.FindAllStringSubmatch(version_string, -1)
func parseVersion(versionString string, regex *regexp.Regexp, length int) ([]int, error) {
matches := regex.FindAllStringSubmatch(versionString, -1)
if len(matches) != 1 {
return nil, fmt.Errorf("version string \"%v\" doesn't match expected regular expression: \"%v\"", version_string, regex.String())
return nil, fmt.Errorf("version string \"%v\" doesn't match expected regular expression: \"%v\"", versionString, regex.String())
}
version_string_array := matches[0][1:]
version_array := make([]int, length)
for index, version_str := range version_string_array {
version, err := strconv.Atoi(version_str)
versionStringArray := matches[0][1:]
versionArray := make([]int, length)
for index, versionStr := range versionStringArray {
version, err := strconv.Atoi(versionStr)
if err != nil {
return nil, fmt.Errorf("error while parsing \"%v\" in \"%v\"", version_str, version_string)
return nil, fmt.Errorf("error while parsing \"%v\" in \"%v\"", versionStr, versionString)
}
version_array[index] = version
versionArray[index] = version
}
return version_array, nil
return versionArray, nil
}

View File

@ -28,10 +28,10 @@ func TestParseDockerAPIVersion(t *testing.T) {
expected []int
expectedError string
}{
{"17.03.0", version_re, 3, []int{17, 03, 0}, ""},
{"17.a3.0", version_re, 3, []int{}, `version string "17.a3.0" doesn't match expected regular expression: "(\d+)\.(\d+)\.(\d+)"`},
{"1.20", apiversion_re, 2, []int{1, 20}, ""},
{"1.a", apiversion_re, 2, []int{}, `version string "1.a" doesn't match expected regular expression: "(\d+)\.(\d+)"`},
{"17.03.0", versionRe, 3, []int{17, 03, 0}, ""},
{"17.a3.0", versionRe, 3, []int{}, `version string "17.a3.0" doesn't match expected regular expression: "(\d+)\.(\d+)\.(\d+)"`},
{"1.20", apiVersionRe, 2, []int{1, 20}, ""},
{"1.a", apiVersionRe, 2, []int{}, `version string "1.a" doesn't match expected regular expression: "(\d+)\.(\d+)"`},
}
for _, test := range tests {

View File

@ -132,11 +132,11 @@ type dockerFactory struct {
zfsWatcher *zfs.ZfsWatcher
}
func (self *dockerFactory) String() string {
func (f *dockerFactory) String() string {
return DockerNamespace
}
func (self *dockerFactory) NewContainerHandler(name string, inHostNamespace bool) (handler container.ContainerHandler, err error) {
func (f *dockerFactory) NewContainerHandler(name string, inHostNamespace bool) (handler container.ContainerHandler, err error) {
client, err := Client()
if err != nil {
return
@ -147,18 +147,18 @@ func (self *dockerFactory) NewContainerHandler(name string, inHostNamespace bool
handler, err = newDockerContainerHandler(
client,
name,
self.machineInfoFactory,
self.fsInfo,
self.storageDriver,
self.storageDir,
&self.cgroupSubsystems,
f.machineInfoFactory,
f.fsInfo,
f.storageDriver,
f.storageDir,
&f.cgroupSubsystems,
inHostNamespace,
metadataEnvs,
self.dockerVersion,
self.includedMetrics,
self.thinPoolName,
self.thinPoolWatcher,
self.zfsWatcher,
f.dockerVersion,
f.includedMetrics,
f.thinPoolName,
f.thinPoolWatcher,
f.zfsWatcher,
)
return
}
@ -185,7 +185,7 @@ func isContainerName(name string) bool {
}
// Docker handles all containers under /docker
func (self *dockerFactory) CanHandleAndAccept(name string) (bool, bool, error) {
func (f *dockerFactory) CanHandleAndAccept(name string) (bool, bool, error) {
// if the container is not associated with docker, we can't handle it or accept it.
if !isContainerName(name) {
return false, false, nil
@ -195,7 +195,7 @@ func (self *dockerFactory) CanHandleAndAccept(name string) (bool, bool, error) {
id := ContainerNameToDockerId(name)
// We assume that if Inspect fails then the container is not known to docker.
ctnr, err := self.client.ContainerInspect(context.Background(), id)
ctnr, err := f.client.ContainerInspect(context.Background(), id)
if err != nil || !ctnr.State.Running {
return false, true, fmt.Errorf("error inspecting container: %v", err)
}
@ -203,15 +203,15 @@ func (self *dockerFactory) CanHandleAndAccept(name string) (bool, bool, error) {
return true, true, nil
}
func (self *dockerFactory) DebugInfo() map[string][]string {
func (f *dockerFactory) DebugInfo() map[string][]string {
return map[string][]string{}
}
var (
version_regexp_string = `(\d+)\.(\d+)\.(\d+)`
version_re = regexp.MustCompile(version_regexp_string)
apiversion_regexp_string = `(\d+)\.(\d+)`
apiversion_re = regexp.MustCompile(apiversion_regexp_string)
versionRegexpString = `(\d+)\.(\d+)\.(\d+)`
versionRe = regexp.MustCompile(versionRegexpString)
apiVersionRegexpString = `(\d+)\.(\d+)`
apiVersionRe = regexp.MustCompile(apiVersionRegexpString)
)
func startThinPoolWatcher(dockerInfo *dockertypes.Info) (*devicemapper.ThinPoolWatcher, error) {
@ -269,7 +269,7 @@ func ensureThinLsKernelVersion(kernelVersion string) error {
// thin_ls to work without corrupting the thin pool
minRhel7KernelVersion := semver.MustParse("3.10.0")
matches := version_re.FindStringSubmatch(kernelVersion)
matches := versionRe.FindStringSubmatch(kernelVersion)
if len(matches) < 4 {
return fmt.Errorf("error parsing kernel version: %q is not a semver", kernelVersion)
}
@ -335,7 +335,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics
}
// Version already validated above, assume no error here.
dockerVersion, _ := parseVersion(dockerInfo.ServerVersion, version_re, 3)
dockerVersion, _ := parseVersion(dockerInfo.ServerVersion, versionRe, 3)
dockerAPIVersion, _ := APIVersion()

View File

@ -232,8 +232,8 @@ func newDockerContainerHandler(
ipAddress := ctnr.NetworkSettings.IPAddress
networkMode := string(ctnr.HostConfig.NetworkMode)
if ipAddress == "" && strings.HasPrefix(networkMode, "container:") {
containerId := strings.TrimPrefix(networkMode, "container:")
c, err := client.ContainerInspect(context.Background(), containerId)
containerID := strings.TrimPrefix(networkMode, "container:")
c, err := client.ContainerInspect(context.Background(), containerID)
if err != nil {
return nil, fmt.Errorf("failed to inspect container %q: %v", id, err)
}
@ -331,68 +331,68 @@ func (h *dockerFsHandler) Usage() common.FsUsage {
return usage
}
func (self *dockerContainerHandler) Start() {
if self.fsHandler != nil {
self.fsHandler.Start()
func (h *dockerContainerHandler) Start() {
if h.fsHandler != nil {
h.fsHandler.Start()
}
}
func (self *dockerContainerHandler) Cleanup() {
if self.fsHandler != nil {
self.fsHandler.Stop()
func (h *dockerContainerHandler) Cleanup() {
if h.fsHandler != nil {
h.fsHandler.Stop()
}
}
func (self *dockerContainerHandler) ContainerReference() (info.ContainerReference, error) {
return self.reference, nil
func (h *dockerContainerHandler) ContainerReference() (info.ContainerReference, error) {
return h.reference, nil
}
func (self *dockerContainerHandler) needNet() bool {
if self.includedMetrics.Has(container.NetworkUsageMetrics) {
return !self.networkMode.IsContainer()
func (h *dockerContainerHandler) needNet() bool {
if h.includedMetrics.Has(container.NetworkUsageMetrics) {
return !h.networkMode.IsContainer()
}
return false
}
func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {
hasFilesystem := self.includedMetrics.Has(container.DiskUsageMetrics)
spec, err := common.GetSpec(self.cgroupPaths, self.machineInfoFactory, self.needNet(), hasFilesystem)
func (h *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {
hasFilesystem := h.includedMetrics.Has(container.DiskUsageMetrics)
spec, err := common.GetSpec(h.cgroupPaths, h.machineInfoFactory, h.needNet(), hasFilesystem)
spec.Labels = self.labels
spec.Envs = self.envs
spec.Image = self.image
spec.CreationTime = self.creationTime
spec.Labels = h.labels
spec.Envs = h.envs
spec.Image = h.image
spec.CreationTime = h.creationTime
return spec, err
}
func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error {
mi, err := self.machineInfoFactory.GetMachineInfo()
func (h *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error {
mi, err := h.machineInfoFactory.GetMachineInfo()
if err != nil {
return err
}
if self.includedMetrics.Has(container.DiskIOMetrics) {
if h.includedMetrics.Has(container.DiskIOMetrics) {
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
}
if !self.includedMetrics.Has(container.DiskUsageMetrics) {
if !h.includedMetrics.Has(container.DiskUsageMetrics) {
return nil
}
var device string
switch self.storageDriver {
switch h.storageDriver {
case devicemapperStorageDriver:
// Device has to be the pool name to correlate with the device name as
// set in the machine info filesystems.
device = self.poolName
device = h.poolName
case aufsStorageDriver, overlayStorageDriver, overlay2StorageDriver, vfsStorageDriver:
deviceInfo, err := self.fsInfo.GetDirFsDevice(self.rootfsStorageDir)
deviceInfo, err := h.fsInfo.GetDirFsDevice(h.rootfsStorageDir)
if err != nil {
return fmt.Errorf("unable to determine device info for dir: %v: %v", self.rootfsStorageDir, err)
return fmt.Errorf("unable to determine device info for dir: %v: %v", h.rootfsStorageDir, err)
}
device = deviceInfo.Device
case zfsStorageDriver:
device = self.zfsParent
device = h.zfsParent
default:
return nil
}
@ -412,7 +412,7 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error
}
fsStat := info.FsStats{Device: device, Type: fsType, Limit: limit}
usage := self.fsHandler.Usage()
usage := h.fsHandler.Usage()
fsStat.BaseUsage = usage.BaseUsageBytes
fsStat.Usage = usage.TotalUsageBytes
fsStat.Inodes = usage.InodeUsage
@ -423,8 +423,8 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error
}
// TODO(vmarmol): Get from libcontainer API instead of cgroup manager when we don't have to support older Dockers.
func (self *dockerContainerHandler) GetStats() (*info.ContainerStats, error) {
stats, err := self.libcontainerHandler.GetStats()
func (h *dockerContainerHandler) GetStats() (*info.ContainerStats, error) {
stats, err := h.libcontainerHandler.GetStats()
if err != nil {
return stats, err
}
@ -432,12 +432,12 @@ func (self *dockerContainerHandler) GetStats() (*info.ContainerStats, error) {
// includes containers running in Kubernetes pods that use the network of the
// infrastructure container. This stops metrics being reported multiple times
// for each container in a pod.
if !self.needNet() {
if !h.needNet() {
stats.Network = info.NetworkStats{}
}
// Get filesystem stats.
err = self.getFsStats(stats)
err = h.getFsStats(stats)
if err != nil {
return stats, err
}
@ -445,35 +445,35 @@ func (self *dockerContainerHandler) GetStats() (*info.ContainerStats, error) {
return stats, nil
}
func (self *dockerContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {
func (h *dockerContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {
// No-op for Docker driver.
return []info.ContainerReference{}, nil
}
func (self *dockerContainerHandler) GetCgroupPath(resource string) (string, error) {
path, ok := self.cgroupPaths[resource]
func (h *dockerContainerHandler) GetCgroupPath(resource string) (string, error) {
path, ok := h.cgroupPaths[resource]
if !ok {
return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, self.reference.Name)
return "", fmt.Errorf("could not find path for resource %q for container %q", resource, h.reference.Name)
}
return path, nil
}
func (self *dockerContainerHandler) GetContainerLabels() map[string]string {
return self.labels
func (h *dockerContainerHandler) GetContainerLabels() map[string]string {
return h.labels
}
func (self *dockerContainerHandler) GetContainerIPAddress() string {
return self.ipAddress
func (h *dockerContainerHandler) GetContainerIPAddress() string {
return h.ipAddress
}
func (self *dockerContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
return self.libcontainerHandler.GetProcesses()
func (h *dockerContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
return h.libcontainerHandler.GetProcesses()
}
func (self *dockerContainerHandler) Exists() bool {
return common.CgroupExists(self.cgroupPaths)
func (h *dockerContainerHandler) Exists() bool {
return common.CgroupExists(h.cgroupPaths)
}
func (self *dockerContainerHandler) Type() container.ContainerType {
func (h *dockerContainerHandler) Type() container.ContainerType {
return container.ContainerTypeDocker
}

View File

@ -208,9 +208,8 @@ func NewContainerHandler(name string, watchType watcher.ContainerWatchSource, in
klog.V(3).Infof("Using factory %q for container %q", factory, name)
handle, err := factory.NewContainerHandler(name, inHostNamespace)
return handle, canAccept, err
} else {
klog.V(4).Infof("Factory %q was unable to handle container %q", factory, name)
}
klog.V(4).Infof("Factory %q was unable to handle container %q", factory, name)
}
return nil, false, fmt.Errorf("no known factory can handle creation of container")

View File

@ -31,20 +31,20 @@ type mockContainerHandlerFactory struct {
CanAcceptValue bool
}
func (self *mockContainerHandlerFactory) String() string {
return self.Name
func (f *mockContainerHandlerFactory) String() string {
return f.Name
}
func (self *mockContainerHandlerFactory) DebugInfo() map[string][]string {
func (f *mockContainerHandlerFactory) DebugInfo() map[string][]string {
return map[string][]string{}
}
func (self *mockContainerHandlerFactory) CanHandleAndAccept(name string) (bool, bool, error) {
return self.CanHandleValue, self.CanAcceptValue, nil
func (f *mockContainerHandlerFactory) CanHandleAndAccept(name string) (bool, bool, error) {
return f.CanHandleValue, f.CanAcceptValue, nil
}
func (self *mockContainerHandlerFactory) NewContainerHandler(name string, isHostNamespace bool) (container.ContainerHandler, error) {
args := self.Called(name)
func (f *mockContainerHandlerFactory) NewContainerHandler(name string, isHostNamespace bool) (container.ContainerHandler, error) {
args := f.Called(name)
return args.Get(0).(container.ContainerHandler), args.Error(1)
}

View File

@ -48,7 +48,6 @@ var (
clearRefsFilePathPattern = "/proc/%d/clear_refs"
referencedRegexp = regexp.MustCompile(`Referenced:\s*([0-9]+)\s*kB`)
isDigitRegExp = regexp.MustCompile("\\d+")
)
type Handler struct {
@ -135,7 +134,7 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
}
if h.includedMetrics.Has(container.NetworkAdvancedTcpUsageMetrics) {
ta, err := advancedTcpStatsFromProc(h.rootFs, h.pid, "net/netstat", "net/snmp")
ta, err := advancedTCPStatsFromProc(h.rootFs, h.pid, "net/netstat", "net/snmp")
if err != nil {
klog.V(4).Infof("Unable to get advanced tcp stats from pid %d: %v", h.pid, err)
} else {
@ -221,17 +220,17 @@ func processLimitsFile(fileData string) []info.UlimitSpec {
}
soft := strings.TrimSpace(fields[1])
soft_num, soft_err := parseUlimit(soft)
softNum, softErr := parseUlimit(soft)
hard := strings.TrimSpace(fields[2])
hard_num, hard_err := parseUlimit(hard)
hardNum, hardErr := parseUlimit(hard)
// Omit metric if there were any parsing errors
if soft_err == nil && hard_err == nil {
if softErr == nil && hardErr == nil {
ulimitSpec := info.UlimitSpec{
Name: name,
SoftLimit: int64(soft_num),
HardLimit: int64(hard_num),
SoftLimit: int64(softNum),
HardLimit: int64(hardNum),
}
ulimits = append(ulimits, ulimitSpec)
}
@ -521,7 +520,7 @@ func setInterfaceStatValues(fields []string, pointers []*uint64) error {
func tcpStatsFromProc(rootFs string, pid int, file string) (info.TcpStat, error) {
tcpStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), file)
tcpStats, err := scanTcpStats(tcpStatsFile)
tcpStats, err := scanTCPStats(tcpStatsFile)
if err != nil {
return tcpStats, fmt.Errorf("couldn't read tcp stats: %v", err)
}
@ -529,18 +528,18 @@ func tcpStatsFromProc(rootFs string, pid int, file string) (info.TcpStat, error)
return tcpStats, nil
}
func advancedTcpStatsFromProc(rootFs string, pid int, file1, file2 string) (info.TcpAdvancedStat, error) {
func advancedTCPStatsFromProc(rootFs string, pid int, file1, file2 string) (info.TcpAdvancedStat, error) {
var advancedStats info.TcpAdvancedStat
var err error
netstatFile := path.Join(rootFs, "proc", strconv.Itoa(pid), file1)
err = scanAdvancedTcpStats(&advancedStats, netstatFile)
err = scanAdvancedTCPStats(&advancedStats, netstatFile)
if err != nil {
return advancedStats, err
}
snmpFile := path.Join(rootFs, "proc", strconv.Itoa(pid), file2)
err = scanAdvancedTcpStats(&advancedStats, snmpFile)
err = scanAdvancedTCPStats(&advancedStats, snmpFile)
if err != nil {
return advancedStats, err
}
@ -548,17 +547,17 @@ func advancedTcpStatsFromProc(rootFs string, pid int, file1, file2 string) (info
return advancedStats, nil
}
func scanAdvancedTcpStats(advancedStats *info.TcpAdvancedStat, advancedTcpStatsFile string) error {
data, err := ioutil.ReadFile(advancedTcpStatsFile)
func scanAdvancedTCPStats(advancedStats *info.TcpAdvancedStat, advancedTCPStatsFile string) error {
data, err := ioutil.ReadFile(advancedTCPStatsFile)
if err != nil {
return fmt.Errorf("failure opening %s: %v", advancedTcpStatsFile, err)
return fmt.Errorf("failure opening %s: %v", advancedTCPStatsFile, err)
}
reader := strings.NewReader(string(data))
scanner := bufio.NewScanner(reader)
scanner.Split(bufio.ScanLines)
advancedTcpStats := make(map[string]interface{})
advancedTCPStats := make(map[string]interface{})
for scanner.Scan() {
nameParts := strings.Split(scanner.Text(), " ")
scanner.Scan()
@ -570,7 +569,7 @@ func scanAdvancedTcpStats(advancedStats *info.TcpAdvancedStat, advancedTcpStatsF
}
if len(nameParts) != len(valueParts) {
return fmt.Errorf("mismatch field count mismatch in %s: %s",
advancedTcpStatsFile, protocol)
advancedTCPStatsFile, protocol)
}
for i := 1; i < len(nameParts); i++ {
if strings.Contains(valueParts[i], "-") {
@ -578,18 +577,18 @@ func scanAdvancedTcpStats(advancedStats *info.TcpAdvancedStat, advancedTcpStatsF
if err != nil {
return fmt.Errorf("decode value: %s to int64 error: %s", valueParts[i], err)
}
advancedTcpStats[nameParts[i]] = vInt64
advancedTCPStats[nameParts[i]] = vInt64
} else {
vUint64, err := strconv.ParseUint(valueParts[i], 10, 64)
if err != nil {
return fmt.Errorf("decode value: %s to uint64 error: %s", valueParts[i], err)
}
advancedTcpStats[nameParts[i]] = vUint64
advancedTCPStats[nameParts[i]] = vUint64
}
}
}
b, err := json.Marshal(advancedTcpStats)
b, err := json.Marshal(advancedTCPStats)
if err != nil {
return err
}
@ -603,7 +602,7 @@ func scanAdvancedTcpStats(advancedStats *info.TcpAdvancedStat, advancedTcpStatsF
}
func scanTcpStats(tcpStatsFile string) (info.TcpStat, error) {
func scanTCPStats(tcpStatsFile string) (info.TcpStat, error) {
var stats info.TcpStat
@ -678,7 +677,7 @@ func udpStatsFromProc(rootFs string, pid int, file string) (info.UdpStat, error)
return udpStats, fmt.Errorf("failure opening %s: %v", udpStatsFile, err)
}
udpStats, err = scanUdpStats(r)
udpStats, err = scanUDPStats(r)
if err != nil {
return udpStats, fmt.Errorf("couldn't read udp stats: %v", err)
}
@ -686,7 +685,7 @@ func udpStatsFromProc(rootFs string, pid int, file string) (info.UdpStat, error)
return udpStats, nil
}
func scanUdpStats(r io.Reader) (info.UdpStat, error) {
func scanUDPStats(r io.Reader) (info.UdpStat, error) {
var stats info.UdpStat
scanner := bufio.NewScanner(r)
@ -754,7 +753,7 @@ func minUint32(x, y uint32) uint32 {
var numCpusFunc = getNumberOnlineCPUs
// Convert libcontainer stats to info.ContainerStats.
func setCpuStats(s *cgroups.Stats, ret *info.ContainerStats, withPerCPU bool) {
func setCPUStats(s *cgroups.Stats, ret *info.ContainerStats, withPerCPU bool) {
ret.Cpu.Usage.User = s.CpuStats.CpuUsage.UsageInUsermode
ret.Cpu.Usage.System = s.CpuStats.CpuUsage.UsageInKernelmode
ret.Cpu.Usage.Total = s.CpuStats.CpuUsage.TotalUsage
@ -898,7 +897,7 @@ func newContainerStats(libcontainerStats *libcontainer.Stats, includedMetrics co
}
if s := libcontainerStats.CgroupStats; s != nil {
setCpuStats(s, ret, includedMetrics.Has(container.PerCpuUsageMetrics))
setCPUStats(s, ret, includedMetrics.Has(container.PerCpuUsageMetrics))
if includedMetrics.Has(container.DiskIOMetrics) {
setDiskIoStats(s, ret)
}

View File

@ -74,7 +74,7 @@ func TestScanUDPStats(t *testing.T) {
t.Errorf("failure opening %s: %v", udpStatsFile, err)
}
stats, err := scanUdpStats(r)
stats, err := scanUDPStats(r)
if err != nil {
t.Error(err)
}
@ -103,15 +103,15 @@ func TestMorePossibleCPUs(t *testing.T) {
}
possibleCPUs := uint32(31)
perCpuUsage := make([]uint64, possibleCPUs)
perCPUUsage := make([]uint64, possibleCPUs)
for i := uint32(0); i < realNumCPUs; i++ {
perCpuUsage[i] = 8562955455524
perCPUUsage[i] = 8562955455524
}
s := &cgroups.Stats{
CpuStats: cgroups.CpuStats{
CpuUsage: cgroups.CpuUsage{
PercpuUsage: perCpuUsage,
PercpuUsage: perCPUUsage,
TotalUsage: 33802947350272,
UsageInKernelmode: 734746 * nanosecondsInSeconds / clockTicks,
UsageInUsermode: 2767637 * nanosecondsInSeconds / clockTicks,
@ -119,12 +119,12 @@ func TestMorePossibleCPUs(t *testing.T) {
},
}
var ret info.ContainerStats
setCpuStats(s, &ret, true)
setCPUStats(s, &ret, true)
expected := info.ContainerStats{
Cpu: info.CpuStats{
Usage: info.CpuUsage{
PerCpu: perCpuUsage[0:realNumCPUs],
PerCpu: perCPUUsage[0:realNumCPUs],
User: s.CpuStats.CpuUsage.UsageInUsermode,
System: s.CpuStats.CpuUsage.UsageInKernelmode,
Total: 33802947350272,

View File

@ -130,38 +130,38 @@ type DiskKey struct {
Minor uint64
}
func DiskStatsCopy1(disk_stat map[DiskKey]*info.PerDiskStats) []info.PerDiskStats {
func DiskStatsCopy1(diskStat map[DiskKey]*info.PerDiskStats) []info.PerDiskStats {
i := 0
stat := make([]info.PerDiskStats, len(disk_stat))
for _, disk := range disk_stat {
stat := make([]info.PerDiskStats, len(diskStat))
for _, disk := range diskStat {
stat[i] = *disk
i++
}
return stat
}
func DiskStatsCopy(blkio_stats []cgroups.BlkioStatEntry) (stat []info.PerDiskStats) {
if len(blkio_stats) == 0 {
func DiskStatsCopy(blkioStats []cgroups.BlkioStatEntry) (stat []info.PerDiskStats) {
if len(blkioStats) == 0 {
return
}
disk_stat := make(map[DiskKey]*info.PerDiskStats)
for i := range blkio_stats {
major := blkio_stats[i].Major
minor := blkio_stats[i].Minor
disk_key := DiskKey{
diskStat := make(map[DiskKey]*info.PerDiskStats)
for i := range blkioStats {
major := blkioStats[i].Major
minor := blkioStats[i].Minor
key := DiskKey{
Major: major,
Minor: minor,
}
diskp, ok := disk_stat[disk_key]
diskp, ok := diskStat[key]
if !ok {
diskp = DiskStatsCopy0(major, minor)
disk_stat[disk_key] = diskp
diskStat[key] = diskp
}
op := blkio_stats[i].Op
op := blkioStats[i].Op
if op == "" {
op = "Count"
}
diskp.Stats[op] = blkio_stats[i].Value
diskp.Stats[op] = blkioStats[i].Value
}
return DiskStatsCopy1(disk_stat)
return DiskStatsCopy1(diskStat)
}

View File

@ -52,27 +52,27 @@ type rawFactory struct {
rawPrefixWhiteList []string
}
func (self *rawFactory) String() string {
func (f *rawFactory) String() string {
return "raw"
}
func (self *rawFactory) NewContainerHandler(name string, inHostNamespace bool) (container.ContainerHandler, error) {
func (f *rawFactory) NewContainerHandler(name string, inHostNamespace bool) (container.ContainerHandler, error) {
rootFs := "/"
if !inHostNamespace {
rootFs = "/rootfs"
}
return newRawContainerHandler(name, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo, self.watcher, rootFs, self.includedMetrics)
return newRawContainerHandler(name, f.cgroupSubsystems, f.machineInfoFactory, f.fsInfo, f.watcher, rootFs, f.includedMetrics)
}
// The raw factory can handle any container. If --docker_only is set to true, non-docker containers are ignored except for "/" and those whitelisted by raw_cgroup_prefix_whitelist flag.
func (self *rawFactory) CanHandleAndAccept(name string) (bool, bool, error) {
func (f *rawFactory) CanHandleAndAccept(name string) (bool, bool, error) {
if name == "/" {
return true, true, nil
}
if *dockerOnly && self.rawPrefixWhiteList[0] == "" {
if *dockerOnly && f.rawPrefixWhiteList[0] == "" {
return true, false, nil
}
for _, prefix := range self.rawPrefixWhiteList {
for _, prefix := range f.rawPrefixWhiteList {
if strings.HasPrefix(name, prefix) {
return true, true, nil
}
@ -80,8 +80,8 @@ func (self *rawFactory) CanHandleAndAccept(name string) (bool, bool, error) {
return true, false, nil
}
func (self *rawFactory) DebugInfo() map[string][]string {
return common.DebugInfo(self.watcher.GetWatches())
func (f *rawFactory) DebugInfo() map[string][]string {
return common.DebugInfo(f.watcher.GetWatches())
}
func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics map[container.MetricKind]struct{}, rawPrefixWhiteList []string) error {

View File

@ -95,17 +95,17 @@ func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSu
}, nil
}
func (self *rawContainerHandler) ContainerReference() (info.ContainerReference, error) {
func (h *rawContainerHandler) ContainerReference() (info.ContainerReference, error) {
// We only know the container by its one name.
return info.ContainerReference{
Name: self.name,
Name: h.name,
}, nil
}
func (self *rawContainerHandler) GetRootNetworkDevices() ([]info.NetInfo, error) {
func (h *rawContainerHandler) GetRootNetworkDevices() ([]info.NetInfo, error) {
nd := []info.NetInfo{}
if isRootCgroup(self.name) {
mi, err := self.machineInfoFactory.GetMachineInfo()
if isRootCgroup(h.name) {
mi, err := h.machineInfoFactory.GetMachineInfo()
if err != nil {
return nd, err
}
@ -115,22 +115,22 @@ func (self *rawContainerHandler) GetRootNetworkDevices() ([]info.NetInfo, error)
}
// Nothing to start up.
func (self *rawContainerHandler) Start() {}
func (h *rawContainerHandler) Start() {}
// Nothing to clean up.
func (self *rawContainerHandler) Cleanup() {}
func (h *rawContainerHandler) Cleanup() {}
func (self *rawContainerHandler) GetSpec() (info.ContainerSpec, error) {
func (h *rawContainerHandler) GetSpec() (info.ContainerSpec, error) {
const hasNetwork = false
hasFilesystem := isRootCgroup(self.name) || len(self.externalMounts) > 0
spec, err := common.GetSpec(self.cgroupPaths, self.machineInfoFactory, hasNetwork, hasFilesystem)
hasFilesystem := isRootCgroup(h.name) || len(h.externalMounts) > 0
spec, err := common.GetSpec(h.cgroupPaths, h.machineInfoFactory, hasNetwork, hasFilesystem)
if err != nil {
return spec, err
}
if isRootCgroup(self.name) {
if isRootCgroup(h.name) {
// Check physical network devices for root container.
nd, err := self.GetRootNetworkDevices()
nd, err := h.GetRootNetworkDevices()
if err != nil {
return spec, err
}
@ -189,53 +189,53 @@ func fsToFsStats(fs *fs.Fs) info.FsStats {
}
}
func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
func (h *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
var filesystems []fs.Fs
var err error
// Get Filesystem information only for the root cgroup.
if isRootCgroup(self.name) {
filesystems, err = self.fsInfo.GetGlobalFsInfo()
if isRootCgroup(h.name) {
filesystems, err = h.fsInfo.GetGlobalFsInfo()
if err != nil {
return err
}
} else if self.includedMetrics.Has(container.DiskUsageMetrics) || self.includedMetrics.Has(container.DiskIOMetrics) {
if len(self.externalMounts) > 0 {
} else if h.includedMetrics.Has(container.DiskUsageMetrics) || h.includedMetrics.Has(container.DiskIOMetrics) {
if len(h.externalMounts) > 0 {
mountSet := make(map[string]struct{})
for _, mount := range self.externalMounts {
for _, mount := range h.externalMounts {
mountSet[mount.HostDir] = struct{}{}
}
filesystems, err = self.fsInfo.GetFsInfoForPath(mountSet)
filesystems, err = h.fsInfo.GetFsInfoForPath(mountSet)
if err != nil {
return err
}
}
}
if isRootCgroup(self.name) || self.includedMetrics.Has(container.DiskUsageMetrics) {
if isRootCgroup(h.name) || h.includedMetrics.Has(container.DiskUsageMetrics) {
for i := range filesystems {
fs := filesystems[i]
stats.Filesystem = append(stats.Filesystem, fsToFsStats(&fs))
}
}
if isRootCgroup(self.name) || self.includedMetrics.Has(container.DiskIOMetrics) {
common.AssignDeviceNamesToDiskStats(&fsNamer{fs: filesystems, factory: self.machineInfoFactory}, &stats.DiskIo)
if isRootCgroup(h.name) || h.includedMetrics.Has(container.DiskIOMetrics) {
common.AssignDeviceNamesToDiskStats(&fsNamer{fs: filesystems, factory: h.machineInfoFactory}, &stats.DiskIo)
}
return nil
}
func (self *rawContainerHandler) GetStats() (*info.ContainerStats, error) {
if *disableRootCgroupStats && isRootCgroup(self.name) {
func (h *rawContainerHandler) GetStats() (*info.ContainerStats, error) {
if *disableRootCgroupStats && isRootCgroup(h.name) {
return nil, nil
}
stats, err := self.libcontainerHandler.GetStats()
stats, err := h.libcontainerHandler.GetStats()
if err != nil {
return stats, err
}
// Get filesystem stats.
err = self.getFsStats(stats)
err = h.getFsStats(stats)
if err != nil {
return stats, err
}
@ -243,36 +243,36 @@ func (self *rawContainerHandler) GetStats() (*info.ContainerStats, error) {
return stats, nil
}
func (self *rawContainerHandler) GetCgroupPath(resource string) (string, error) {
path, ok := self.cgroupPaths[resource]
func (h *rawContainerHandler) GetCgroupPath(resource string) (string, error) {
path, ok := h.cgroupPaths[resource]
if !ok {
return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, self.name)
return "", fmt.Errorf("could not find path for resource %q for container %q", resource, h.name)
}
return path, nil
}
func (self *rawContainerHandler) GetContainerLabels() map[string]string {
func (h *rawContainerHandler) GetContainerLabels() map[string]string {
return map[string]string{}
}
func (self *rawContainerHandler) GetContainerIPAddress() string {
func (h *rawContainerHandler) GetContainerIPAddress() string {
// the IP address for the raw container corresponds to the system ip address.
return "127.0.0.1"
}
func (self *rawContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {
return common.ListContainers(self.name, self.cgroupPaths, listType)
func (h *rawContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {
return common.ListContainers(h.name, h.cgroupPaths, listType)
}
func (self *rawContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
return self.libcontainerHandler.GetProcesses()
func (h *rawContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
return h.libcontainerHandler.GetProcesses()
}
func (self *rawContainerHandler) Exists() bool {
return common.CgroupExists(self.cgroupPaths)
func (h *rawContainerHandler) Exists() bool {
return common.CgroupExists(h.cgroupPaths)
}
func (self *rawContainerHandler) Type() container.ContainerType {
func (h *rawContainerHandler) Type() container.ContainerType {
return container.ContainerTypeRaw
}

View File

@ -68,10 +68,10 @@ func NewRawContainerWatcher() (watcher.ContainerWatcher, error) {
return rawWatcher, nil
}
func (self *rawContainerWatcher) Start(events chan watcher.ContainerEvent) error {
func (w *rawContainerWatcher) Start(events chan watcher.ContainerEvent) error {
// Watch this container (all its cgroups) and all subdirectories.
for _, cgroupPath := range self.cgroupPaths {
_, err := self.watchDirectory(events, cgroupPath, "/")
for _, cgroupPath := range w.cgroupPaths {
_, err := w.watchDirectory(events, cgroupPath, "/")
if err != nil {
return err
}
@ -81,17 +81,17 @@ func (self *rawContainerWatcher) Start(events chan watcher.ContainerEvent) error
go func() {
for {
select {
case event := <-self.watcher.Event():
err := self.processEvent(event, events)
case event := <-w.watcher.Event():
err := w.processEvent(event, events)
if err != nil {
klog.Warningf("Error while processing event (%+v): %v", event, err)
}
case err := <-self.watcher.Error():
case err := <-w.watcher.Error():
klog.Warningf("Error while watching %q: %v", "/", err)
case <-self.stopWatcher:
err := self.watcher.Close()
case <-w.stopWatcher:
err := w.watcher.Close()
if err == nil {
self.stopWatcher <- err
w.stopWatcher <- err
return
}
}
@ -101,21 +101,21 @@ func (self *rawContainerWatcher) Start(events chan watcher.ContainerEvent) error
return nil
}
func (self *rawContainerWatcher) Stop() error {
func (w *rawContainerWatcher) Stop() error {
// Rendezvous with the watcher thread.
self.stopWatcher <- nil
return <-self.stopWatcher
w.stopWatcher <- nil
return <-w.stopWatcher
}
// Watches the specified directory and all subdirectories. Returns whether the path was
// already being watched and an error (if any).
func (self *rawContainerWatcher) watchDirectory(events chan watcher.ContainerEvent, dir string, containerName string) (bool, error) {
func (w *rawContainerWatcher) watchDirectory(events chan watcher.ContainerEvent, dir string, containerName string) (bool, error) {
// Don't watch .mount cgroups because they never have containers as sub-cgroups. A single container
// can have many .mount cgroups associated with it which can quickly exhaust the inotify watches on a node.
if strings.HasSuffix(containerName, ".mount") {
return false, nil
}
alreadyWatching, err := self.watcher.AddWatch(containerName, dir)
alreadyWatching, err := w.watcher.AddWatch(containerName, dir)
if err != nil {
return alreadyWatching, err
}
@ -124,7 +124,7 @@ func (self *rawContainerWatcher) watchDirectory(events chan watcher.ContainerEve
cleanup := true
defer func() {
if cleanup {
_, err := self.watcher.RemoveWatch(containerName, dir)
_, err := w.watcher.RemoveWatch(containerName, dir)
if err != nil {
klog.Warningf("Failed to remove inotify watch for %q: %v", dir, err)
}
@ -141,7 +141,7 @@ func (self *rawContainerWatcher) watchDirectory(events chan watcher.ContainerEve
if entry.IsDir() {
entryPath := path.Join(dir, entry.Name())
subcontainerName := path.Join(containerName, entry.Name())
alreadyWatchingSubDir, err := self.watchDirectory(events, entryPath, subcontainerName)
alreadyWatchingSubDir, err := w.watchDirectory(events, entryPath, subcontainerName)
if err != nil {
klog.Errorf("Failed to watch directory %q: %v", entryPath, err)
if os.IsNotExist(err) {
@ -168,7 +168,7 @@ func (self *rawContainerWatcher) watchDirectory(events chan watcher.ContainerEve
return alreadyWatching, nil
}
func (self *rawContainerWatcher) processEvent(event *inotify.Event, events chan watcher.ContainerEvent) error {
func (w *rawContainerWatcher) processEvent(event *inotify.Event, events chan watcher.ContainerEvent) error {
// Convert the inotify event type to a container create or delete.
var eventType watcher.ContainerEventType
switch {
@ -187,7 +187,7 @@ func (self *rawContainerWatcher) processEvent(event *inotify.Event, events chan
// Derive the container name from the path name.
var containerName string
for _, mount := range self.cgroupSubsystems.Mounts {
for _, mount := range w.cgroupSubsystems.Mounts {
mountLocation := path.Clean(mount.Mountpoint) + "/"
if strings.HasPrefix(event.Name, mountLocation) {
containerName = event.Name[len(mountLocation)-1:]
@ -202,7 +202,7 @@ func (self *rawContainerWatcher) processEvent(event *inotify.Event, events chan
switch eventType {
case watcher.ContainerAdd:
// New container was created, watch it.
alreadyWatched, err := self.watchDirectory(events, event.Name, containerName)
alreadyWatched, err := w.watchDirectory(events, event.Name, containerName)
if err != nil {
return err
}
@ -213,7 +213,7 @@ func (self *rawContainerWatcher) processEvent(event *inotify.Event, events chan
}
case watcher.ContainerDelete:
// Container was deleted, stop watching for it.
lastWatched, err := self.watcher.RemoveWatch(containerName, event.Name)
lastWatched, err := w.watcher.RemoveWatch(containerName, event.Name)
if err != nil {
return err
}

View File

@ -36,68 +36,68 @@ func NewMockContainerHandler(containerName string) *MockContainerHandler {
// If self.Name is not empty, then ContainerReference() will return self.Name and self.Aliases.
// Otherwise, it will use the value provided by .On().Return().
func (self *MockContainerHandler) ContainerReference() (info.ContainerReference, error) {
if len(self.Name) > 0 {
func (h *MockContainerHandler) ContainerReference() (info.ContainerReference, error) {
if len(h.Name) > 0 {
var aliases []string
if len(self.Aliases) > 0 {
aliases = make([]string, len(self.Aliases))
copy(aliases, self.Aliases)
if len(h.Aliases) > 0 {
aliases = make([]string, len(h.Aliases))
copy(aliases, h.Aliases)
}
return info.ContainerReference{
Name: self.Name,
Name: h.Name,
Aliases: aliases,
}, nil
}
args := self.Called()
args := h.Called()
return args.Get(0).(info.ContainerReference), args.Error(1)
}
func (self *MockContainerHandler) Start() {}
func (h *MockContainerHandler) Start() {}
func (self *MockContainerHandler) Cleanup() {}
func (h *MockContainerHandler) Cleanup() {}
func (self *MockContainerHandler) GetSpec() (info.ContainerSpec, error) {
args := self.Called()
func (h *MockContainerHandler) GetSpec() (info.ContainerSpec, error) {
args := h.Called()
return args.Get(0).(info.ContainerSpec), args.Error(1)
}
func (self *MockContainerHandler) GetStats() (*info.ContainerStats, error) {
args := self.Called()
func (h *MockContainerHandler) GetStats() (*info.ContainerStats, error) {
args := h.Called()
return args.Get(0).(*info.ContainerStats), args.Error(1)
}
func (self *MockContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {
args := self.Called(listType)
func (h *MockContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {
args := h.Called(listType)
return args.Get(0).([]info.ContainerReference), args.Error(1)
}
func (self *MockContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
args := self.Called(listType)
func (h *MockContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
args := h.Called(listType)
return args.Get(0).([]int), args.Error(1)
}
func (self *MockContainerHandler) Exists() bool {
args := self.Called()
func (h *MockContainerHandler) Exists() bool {
args := h.Called()
return args.Get(0).(bool)
}
func (self *MockContainerHandler) GetCgroupPath(path string) (string, error) {
args := self.Called(path)
func (h *MockContainerHandler) GetCgroupPath(path string) (string, error) {
args := h.Called(path)
return args.Get(0).(string), args.Error(1)
}
func (self *MockContainerHandler) GetContainerLabels() map[string]string {
args := self.Called()
func (h *MockContainerHandler) GetContainerLabels() map[string]string {
args := h.Called()
return args.Get(0).(map[string]string)
}
func (self *MockContainerHandler) Type() container.ContainerType {
args := self.Called()
func (h *MockContainerHandler) Type() container.ContainerType {
args := h.Called()
return args.Get(0).(container.ContainerType)
}
func (self *MockContainerHandler) GetContainerIPAddress() string {
args := self.Called()
func (h *MockContainerHandler) GetContainerIPAddress() string {
args := h.Called()
return args.Get(0).(string)
}
@ -106,18 +106,18 @@ type FactoryForMockContainerHandler struct {
PrepareContainerHandlerFunc func(name string, handler *MockContainerHandler)
}
func (self *FactoryForMockContainerHandler) String() string {
return self.Name
func (h *FactoryForMockContainerHandler) String() string {
return h.Name
}
func (self *FactoryForMockContainerHandler) NewContainerHandler(name string, inHostNamespace bool) (container.ContainerHandler, error) {
func (h *FactoryForMockContainerHandler) NewContainerHandler(name string, inHostNamespace bool) (container.ContainerHandler, error) {
handler := &MockContainerHandler{}
if self.PrepareContainerHandlerFunc != nil {
self.PrepareContainerHandlerFunc(name, handler)
if h.PrepareContainerHandlerFunc != nil {
h.PrepareContainerHandlerFunc(name, handler)
}
return handler, nil
}
func (self *FactoryForMockContainerHandler) CanHandle(name string) bool {
func (h *FactoryForMockContainerHandler) CanHandle(name string) bool {
return true
}

View File

@ -85,13 +85,13 @@ func (w *ThinPoolWatcher) Stop() {
}
// GetUsage gets the cached usage value of the given device.
func (w *ThinPoolWatcher) GetUsage(deviceId string) (uint64, error) {
func (w *ThinPoolWatcher) GetUsage(deviceID string) (uint64, error) {
w.lock.RLock()
defer w.lock.RUnlock()
v, ok := w.cache[deviceId]
v, ok := w.cache[deviceID]
if !ok {
return 0, fmt.Errorf("no cached value for usage of device %v", deviceId)
return 0, fmt.Errorf("no cached value for usage of device %v", deviceID)
}
return v, nil
@ -129,9 +129,8 @@ func (w *ThinPoolWatcher) Refresh() error {
if output, err := w.dmsetup.Message(w.poolName, 0, reserveMetadataMessage); err != nil {
err = fmt.Errorf("error reserving metadata for thin-pool %v: %v output: %v", w.poolName, err, string(output))
return err
} else {
klog.V(5).Infof("reserved metadata snapshot for thin-pool %v", w.poolName)
}
klog.V(5).Infof("reserved metadata snapshot for thin-pool %v", w.poolName)
defer func() {
klog.V(5).Infof("releasing metadata snapshot for thin-pool %v", w.poolName)

View File

@ -35,7 +35,7 @@ func TestRefresh(t *testing.T) {
thinLsOutput map[string]uint64
thinLsErr error
expectedError bool
deviceId string
deviceID string
expectedUsage uint64
}{
{
@ -54,7 +54,7 @@ func TestRefresh(t *testing.T) {
},
thinLsOutput: usage,
expectedError: false,
deviceId: "2",
deviceID: "2",
expectedUsage: 23456,
},
{
@ -66,7 +66,7 @@ func TestRefresh(t *testing.T) {
},
thinLsOutput: usage,
expectedError: false,
deviceId: "2",
deviceID: "2",
expectedUsage: 23456,
},
{
@ -83,7 +83,7 @@ func TestRefresh(t *testing.T) {
},
thinLsOutput: usage,
expectedError: false,
deviceId: "3",
deviceID: "3",
expectedUsage: 34567,
},
{
@ -149,9 +149,9 @@ func TestRefresh(t *testing.T) {
continue
}
actualUsage, err := watcher.GetUsage(tc.deviceId)
actualUsage, err := watcher.GetUsage(tc.deviceID)
if err != nil {
t.Errorf("%v: device ID not found: %v", tc.deviceId, err)
t.Errorf("%v: device ID not found: %v", tc.deviceID, err)
continue
}

View File

@ -44,7 +44,7 @@ func (e byTimestamp) Less(i, j int) bool {
type EventChannel struct {
// Watch ID. Can be used by the caller to request cancellation of watch events.
watchId int
watchID int
// Channel on which the caller can receive watch events.
channel chan *info.Event
}
@ -84,9 +84,9 @@ type EventManager interface {
GetEvents(request *Request) ([]*info.Event, error)
// AddEvent allows the caller to add an event to an EventManager
// object
AddEvent(e *info.Event) error
AddEvent(event *info.Event) error
// Cancels a previously requested watch event.
StopWatch(watch_id int)
StopWatch(watchID int)
}
// events provides an implementation for the EventManager interface.
@ -100,7 +100,7 @@ type events struct {
// lock guarding watchers.
watcherLock sync.RWMutex
// last allocated watch id.
lastId int
lastID int
// Event storage policy.
storagePolicy StoragePolicy
}
@ -118,9 +118,9 @@ type watch struct {
eventChannel *EventChannel
}
func NewEventChannel(watchId int) *EventChannel {
func NewEventChannel(watchID int) *EventChannel {
return &EventChannel{
watchId: watchId,
watchID: watchID,
channel: make(chan *info.Event, 10),
}
}
@ -173,12 +173,12 @@ func newWatch(request *Request, eventChannel *EventChannel) *watch {
}
}
func (self *EventChannel) GetChannel() chan *info.Event {
return self.channel
func (ch *EventChannel) GetChannel() chan *info.Event {
return ch.channel
}
func (self *EventChannel) GetWatchId() int {
return self.watchId
func (ch *EventChannel) GetWatchId() int {
return ch.watchID
}
// sorts and returns up to the last MaxEventsReturned chronological elements
@ -231,15 +231,15 @@ func checkIfEventSatisfiesRequest(request *Request, event *info.Event) bool {
// adds it to a slice of *Event objects that is returned. If both MaxEventsReturned
// and StartTime/EndTime are specified in the request object, then only
// up to the most recent MaxEventsReturned events in that time range are returned.
func (self *events) GetEvents(request *Request) ([]*info.Event, error) {
func (e *events) GetEvents(request *Request) ([]*info.Event, error) {
returnEventList := []*info.Event{}
self.eventsLock.RLock()
defer self.eventsLock.RUnlock()
e.eventsLock.RLock()
defer e.eventsLock.RUnlock()
for eventType, fetch := range request.EventType {
if !fetch {
continue
}
evs, ok := self.eventStore[eventType]
evs, ok := e.eventStore[eventType]
if !ok {
continue
}
@ -261,50 +261,50 @@ func (self *events) GetEvents(request *Request) ([]*info.Event, error) {
// Request object it is fed to the channel. The StartTime and EndTime of the watch
// request should be uninitialized because the purpose is to watch indefinitely
// for events that will happen in the future
func (self *events) WatchEvents(request *Request) (*EventChannel, error) {
func (e *events) WatchEvents(request *Request) (*EventChannel, error) {
if !request.StartTime.IsZero() || !request.EndTime.IsZero() {
return nil, errors.New(
"for a call to watch, request.StartTime and request.EndTime must be uninitialized")
}
self.watcherLock.Lock()
defer self.watcherLock.Unlock()
new_id := self.lastId + 1
returnEventChannel := NewEventChannel(new_id)
e.watcherLock.Lock()
defer e.watcherLock.Unlock()
newID := e.lastID + 1
returnEventChannel := NewEventChannel(newID)
newWatcher := newWatch(request, returnEventChannel)
self.watchers[new_id] = newWatcher
self.lastId = new_id
e.watchers[newID] = newWatcher
e.lastID = newID
return returnEventChannel, nil
}
// helper function to update the event manager's eventStore
func (self *events) updateEventStore(e *info.Event) {
self.eventsLock.Lock()
defer self.eventsLock.Unlock()
if _, ok := self.eventStore[e.EventType]; !ok {
maxNumEvents := self.storagePolicy.DefaultMaxNumEvents
if numEvents, ok := self.storagePolicy.PerTypeMaxNumEvents[e.EventType]; ok {
func (e *events) updateEventStore(event *info.Event) {
e.eventsLock.Lock()
defer e.eventsLock.Unlock()
if _, ok := e.eventStore[event.EventType]; !ok {
maxNumEvents := e.storagePolicy.DefaultMaxNumEvents
if numEvents, ok := e.storagePolicy.PerTypeMaxNumEvents[event.EventType]; ok {
maxNumEvents = numEvents
}
if maxNumEvents == 0 {
// Event storage is disabled for e.EventType
// Event storage is disabled for event.EventType
return
}
maxAge := self.storagePolicy.DefaultMaxAge
if age, ok := self.storagePolicy.PerTypeMaxAge[e.EventType]; ok {
maxAge := e.storagePolicy.DefaultMaxAge
if age, ok := e.storagePolicy.PerTypeMaxAge[event.EventType]; ok {
maxAge = age
}
self.eventStore[e.EventType] = utils.NewTimedStore(maxAge, maxNumEvents)
e.eventStore[event.EventType] = utils.NewTimedStore(maxAge, maxNumEvents)
}
self.eventStore[e.EventType].Add(e.Timestamp, e)
e.eventStore[event.EventType].Add(event.Timestamp, event)
}
func (self *events) findValidWatchers(e *info.Event) []*watch {
func (e *events) findValidWatchers(event *info.Event) []*watch {
watchesToSend := make([]*watch, 0)
for _, watcher := range self.watchers {
for _, watcher := range e.watchers {
watchRequest := watcher.request
if checkIfEventSatisfiesRequest(watchRequest, e) {
if checkIfEventSatisfiesRequest(watchRequest, event) {
watchesToSend = append(watchesToSend, watcher)
}
}
@ -314,26 +314,26 @@ func (self *events) findValidWatchers(e *info.Event) []*watch {
// method of Events object that adds the argument Event object to the
// eventStore. It also feeds the event to a set of watch channels
// held by the manager if it satisfies the request keys of the channels
func (self *events) AddEvent(e *info.Event) error {
self.updateEventStore(e)
self.watcherLock.RLock()
defer self.watcherLock.RUnlock()
watchesToSend := self.findValidWatchers(e)
func (e *events) AddEvent(event *info.Event) error {
e.updateEventStore(event)
e.watcherLock.RLock()
defer e.watcherLock.RUnlock()
watchesToSend := e.findValidWatchers(event)
for _, watchObject := range watchesToSend {
watchObject.eventChannel.GetChannel() <- e
watchObject.eventChannel.GetChannel() <- event
}
klog.V(4).Infof("Added event %v", e)
klog.V(4).Infof("Added event %v", event)
return nil
}
// Removes a watch instance from the EventManager's watchers map
func (self *events) StopWatch(watchId int) {
self.watcherLock.Lock()
defer self.watcherLock.Unlock()
_, ok := self.watchers[watchId]
func (e *events) StopWatch(watchID int) {
e.watcherLock.Lock()
defer e.watcherLock.Unlock()
_, ok := e.watchers[watchID]
if !ok {
klog.Errorf("Could not find watcher instance %v", watchId)
klog.Errorf("Could not find watcher instance %v", watchID)
}
close(self.watchers[watchId].eventChannel.GetChannel())
delete(self.watchers, watchId)
close(e.watchers[watchID].eventChannel.GetChannel())
delete(e.watchers, watchID)
}

View File

@ -137,7 +137,7 @@ func TestWatchEventsDetectsNewEvents(t *testing.T) {
eventsFound := 0
go func() {
for event := range returnEventChannel.GetChannel() {
eventsFound += 1
eventsFound++
if eventsFound == 1 {
ensureProperEventReturned(t, fakeEvent, event)
} else if eventsFound == 2 {

View File

@ -232,7 +232,7 @@ func processMounts(mounts []mount.MountInfo, excludedMountpointPrefixes []string
// docker is using devicemapper for its storage driver. If a loopback device is being used, don't
// return any information or error, as we want to report based on the actual partition where the
// loopback file resides, inside of the loopback file itself.
func (self *RealFsInfo) getDockerDeviceMapperInfo(context DockerContext) (string, *partition, error) {
func (i *RealFsInfo) getDockerDeviceMapperInfo(context DockerContext) (string, *partition, error) {
if context.Driver != DeviceMapper.String() {
return "", nil, nil
}
@ -242,7 +242,7 @@ func (self *RealFsInfo) getDockerDeviceMapperInfo(context DockerContext) (string
return "", nil, nil
}
dev, major, minor, blockSize, err := dockerDMDevice(context.DriverStatus, self.dmsetup)
dev, major, minor, blockSize, err := dockerDMDevice(context.DriverStatus, i.dmsetup)
if err != nil {
return "", nil, err
}
@ -256,36 +256,36 @@ func (self *RealFsInfo) getDockerDeviceMapperInfo(context DockerContext) (string
}
// addSystemRootLabel attempts to determine which device contains the mount for /.
func (self *RealFsInfo) addSystemRootLabel(mounts []mount.MountInfo) {
func (i *RealFsInfo) addSystemRootLabel(mounts []mount.MountInfo) {
for _, m := range mounts {
if m.MountPoint == "/" {
self.partitions[m.Source] = partition{
i.partitions[m.Source] = partition{
fsType: m.FsType,
mountpoint: m.MountPoint,
major: uint(m.Major),
minor: uint(m.Minor),
}
self.labels[LabelSystemRoot] = m.Source
i.labels[LabelSystemRoot] = m.Source
return
}
}
}
// addDockerImagesLabel attempts to determine which device contains the mount for docker images.
func (self *RealFsInfo) addDockerImagesLabel(context Context, mounts []mount.MountInfo) {
dockerDev, dockerPartition, err := self.getDockerDeviceMapperInfo(context.Docker)
func (i *RealFsInfo) addDockerImagesLabel(context Context, mounts []mount.MountInfo) {
dockerDev, dockerPartition, err := i.getDockerDeviceMapperInfo(context.Docker)
if err != nil {
klog.Warningf("Could not get Docker devicemapper device: %v", err)
}
if len(dockerDev) > 0 && dockerPartition != nil {
self.partitions[dockerDev] = *dockerPartition
self.labels[LabelDockerImages] = dockerDev
i.partitions[dockerDev] = *dockerPartition
i.labels[LabelDockerImages] = dockerDev
} else {
self.updateContainerImagesPath(LabelDockerImages, mounts, getDockerImagePaths(context))
i.updateContainerImagesPath(LabelDockerImages, mounts, getDockerImagePaths(context))
}
}
func (self *RealFsInfo) addCrioImagesLabel(context Context, mounts []mount.MountInfo) {
func (i *RealFsInfo) addCrioImagesLabel(context Context, mounts []mount.MountInfo) {
if context.Crio.Root != "" {
crioPath := context.Crio.Root
crioImagePaths := map[string]struct{}{
@ -298,7 +298,7 @@ func (self *RealFsInfo) addCrioImagesLabel(context Context, mounts []mount.Mount
crioImagePaths[crioPath] = struct{}{}
crioPath = filepath.Dir(crioPath)
}
self.updateContainerImagesPath(LabelCrioImages, mounts, crioImagePaths)
i.updateContainerImagesPath(LabelCrioImages, mounts, crioImagePaths)
}
}
@ -324,7 +324,7 @@ func getDockerImagePaths(context Context) map[string]struct{} {
// This method compares the mountpoints with possible container image mount points. If a match is found,
// the label is added to the partition.
func (self *RealFsInfo) updateContainerImagesPath(label string, mounts []mount.MountInfo, containerImagePaths map[string]struct{}) {
func (i *RealFsInfo) updateContainerImagesPath(label string, mounts []mount.MountInfo, containerImagePaths map[string]struct{}) {
var useMount *mount.MountInfo
for _, m := range mounts {
if _, ok := containerImagePaths[m.MountPoint]; ok {
@ -334,27 +334,27 @@ func (self *RealFsInfo) updateContainerImagesPath(label string, mounts []mount.M
}
}
if useMount != nil {
self.partitions[useMount.Source] = partition{
i.partitions[useMount.Source] = partition{
fsType: useMount.FsType,
mountpoint: useMount.MountPoint,
major: uint(useMount.Major),
minor: uint(useMount.Minor),
}
self.labels[label] = useMount.Source
i.labels[label] = useMount.Source
}
}
func (self *RealFsInfo) GetDeviceForLabel(label string) (string, error) {
dev, ok := self.labels[label]
func (i *RealFsInfo) GetDeviceForLabel(label string) (string, error) {
dev, ok := i.labels[label]
if !ok {
return "", fmt.Errorf("non-existent label %q", label)
}
return dev, nil
}
func (self *RealFsInfo) GetLabelsForDevice(device string) ([]string, error) {
func (i *RealFsInfo) GetLabelsForDevice(device string) ([]string, error) {
labels := []string{}
for label, dev := range self.labels {
for label, dev := range i.labels {
if dev == device {
labels = append(labels, label)
}
@ -362,22 +362,22 @@ func (self *RealFsInfo) GetLabelsForDevice(device string) ([]string, error) {
return labels, nil
}
func (self *RealFsInfo) GetMountpointForDevice(dev string) (string, error) {
p, ok := self.partitions[dev]
func (i *RealFsInfo) GetMountpointForDevice(dev string) (string, error) {
p, ok := i.partitions[dev]
if !ok {
return "", fmt.Errorf("no partition info for device %q", dev)
}
return p.mountpoint, nil
}
func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, error) {
func (i *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, error) {
filesystems := make([]Fs, 0)
deviceSet := make(map[string]struct{})
diskStatsMap, err := getDiskStatsMap("/proc/diskstats")
if err != nil {
return nil, err
}
for device, partition := range self.partitions {
for device, partition := range i.partitions {
_, hasMount := mountSet[partition.mountpoint]
_, hasDevice := deviceSet[device]
if mountSet == nil || (hasMount && !hasDevice) {
@ -481,8 +481,8 @@ func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) {
return diskStatsMap, nil
}
func (self *RealFsInfo) GetGlobalFsInfo() ([]Fs, error) {
return self.GetFsInfoForPath(nil)
func (i *RealFsInfo) GetGlobalFsInfo() ([]Fs, error) {
return i.GetFsInfoForPath(nil)
}
func major(devNumber uint64) uint {
@ -493,19 +493,19 @@ func minor(devNumber uint64) uint {
return uint((devNumber & 0xff) | ((devNumber >> 12) & 0xfff00))
}
func (self *RealFsInfo) GetDeviceInfoByFsUUID(uuid string) (*DeviceInfo, error) {
deviceName, found := self.fsUUIDToDeviceName[uuid]
func (i *RealFsInfo) GetDeviceInfoByFsUUID(uuid string) (*DeviceInfo, error) {
deviceName, found := i.fsUUIDToDeviceName[uuid]
if !found {
return nil, ErrNoSuchDevice
}
p, found := self.partitions[deviceName]
p, found := i.partitions[deviceName]
if !found {
return nil, fmt.Errorf("cannot find device %q in partitions", deviceName)
}
return &DeviceInfo{deviceName, p.major, p.minor}, nil
}
func (self *RealFsInfo) GetDirFsDevice(dir string) (*DeviceInfo, error) {
func (i *RealFsInfo) GetDirFsDevice(dir string) (*DeviceInfo, error) {
buf := new(syscall.Stat_t)
err := syscall.Stat(dir, buf)
if err != nil {
@ -515,13 +515,13 @@ func (self *RealFsInfo) GetDirFsDevice(dir string) (*DeviceInfo, error) {
// The type Dev in Stat_t is 32bit on mips.
major := major(uint64(buf.Dev)) // nolint: unconvert
minor := minor(uint64(buf.Dev)) // nolint: unconvert
for device, partition := range self.partitions {
for device, partition := range i.partitions {
if partition.major == major && partition.minor == minor {
return &DeviceInfo{device, major, minor}, nil
}
}
mount, found := self.mounts[dir]
mount, found := i.mounts[dir]
// try the parent dir if not found until we reach the root dir
// this is an issue on btrfs systems where the directory is not
// the subvolume
@ -534,7 +534,7 @@ func (self *RealFsInfo) GetDirFsDevice(dir string) (*DeviceInfo, error) {
// trim "/" from the new parent path otherwise the next possible
// filepath.Split in the loop will not split the string any further
dir = strings.TrimSuffix(pathdir, "/")
mount, found = self.mounts[dir]
mount, found = i.mounts[dir]
}
if found && mount.FsType == "btrfs" && mount.Major == 0 && strings.HasPrefix(mount.Source, "/dev/") {
@ -565,7 +565,7 @@ func GetDirUsage(dir string) (UsageInfo, error) {
return usage, fmt.Errorf("unsuported fileinfo for getting inode usage of %q", dir)
}
rootDevId := rootStat.Dev
rootDevID := rootStat.Dev
// dedupedInode stores inodes that could be duplicates (nlink > 1)
dedupedInodes := make(map[uint64]struct{})
@ -589,7 +589,7 @@ func GetDirUsage(dir string) (UsageInfo, error) {
return fmt.Errorf("unsupported fileinfo; could not convert to stat_t")
}
if s.Dev != rootDevId {
if s.Dev != rootDevID {
// don't descend into directories on other devices
return filepath.SkipDir
}
@ -611,7 +611,7 @@ func GetDirUsage(dir string) (UsageInfo, error) {
return usage, err
}
func (self *RealFsInfo) GetDirUsage(dir string) (UsageInfo, error) {
func (i *RealFsInfo) GetDirUsage(dir string) (UsageInfo, error) {
claimToken()
defer releaseToken()
return GetDirUsage(dir)
@ -752,7 +752,6 @@ func getBtrfsMajorMinorIds(mount *mount.MountInfo) (int, int, error) {
klog.V(4).Infof("btrfs rdev major:minor %d:%d\n", int(major(uint64(buf.Rdev))), int(minor(uint64(buf.Rdev)))) // nolint: unconvert
return int(major(uint64(buf.Dev))), int(minor(uint64(buf.Dev))), nil // nolint: unconvert
} else {
return 0, 0, fmt.Errorf("%s is not a block device", mount.Source)
}
return 0, 0, fmt.Errorf("%s is not a block device", mount.Source)
}

View File

@ -99,9 +99,9 @@ type ContainerReference struct {
// Sorts by container name.
type ContainerReferenceSlice []ContainerReference
func (self ContainerReferenceSlice) Len() int { return len(self) }
func (self ContainerReferenceSlice) Swap(i, j int) { self[i], self[j] = self[j], self[i] }
func (self ContainerReferenceSlice) Less(i, j int) bool { return self[i].Name < self[j].Name }
func (s ContainerReferenceSlice) Len() int { return len(s) }
func (s ContainerReferenceSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s ContainerReferenceSlice) Less(i, j int) bool { return s[i].Name < s[j].Name }
// ContainerInfoRequest is used when users check a container info from the REST API.
// It specifies how much data users want to get about a container
@ -126,10 +126,10 @@ func DefaultContainerInfoRequest() ContainerInfoRequest {
}
}
func (self *ContainerInfoRequest) Equals(other ContainerInfoRequest) bool {
return self.NumStats == other.NumStats &&
self.Start.Equal(other.Start) &&
self.End.Equal(other.End)
func (r *ContainerInfoRequest) Equals(other ContainerInfoRequest) bool {
return r.NumStats == other.NumStats &&
r.Start.Equal(other.Start) &&
r.End.Equal(other.End)
}
type ContainerInfo struct {
@ -151,30 +151,30 @@ type ContainerInfo struct {
// en/decoded. This will lead to small but acceptable differences between a
// ContainerInfo and its encode-then-decode version. Eq() is used to compare
// two ContainerInfo accepting small difference (<10ms) of Time fields.
func (self *ContainerInfo) Eq(b *ContainerInfo) bool {
func (ci *ContainerInfo) Eq(b *ContainerInfo) bool {
// If both self and b are nil, then Eq() returns true
if self == nil {
// If both ci and b are nil, then Eq() returns true
if ci == nil {
return b == nil
}
if b == nil {
return self == nil
return ci == nil
}
// For fields other than time.Time, we will compare them precisely.
// This would require that any slice should have same order.
if !reflect.DeepEqual(self.ContainerReference, b.ContainerReference) {
if !reflect.DeepEqual(ci.ContainerReference, b.ContainerReference) {
return false
}
if !reflect.DeepEqual(self.Subcontainers, b.Subcontainers) {
if !reflect.DeepEqual(ci.Subcontainers, b.Subcontainers) {
return false
}
if !self.Spec.Eq(&b.Spec) {
if !ci.Spec.Eq(&b.Spec) {
return false
}
for i, expectedStats := range b.Stats {
selfStats := self.Stats[i]
selfStats := ci.Stats[i]
if !expectedStats.Eq(selfStats) {
return false
}
@ -183,66 +183,66 @@ func (self *ContainerInfo) Eq(b *ContainerInfo) bool {
return true
}
func (self *ContainerSpec) Eq(b *ContainerSpec) bool {
func (s *ContainerSpec) Eq(b *ContainerSpec) bool {
// Creation within 1s of each other.
diff := self.CreationTime.Sub(b.CreationTime)
diff := s.CreationTime.Sub(b.CreationTime)
if (diff > time.Second) || (diff < -time.Second) {
return false
}
if self.HasCpu != b.HasCpu {
if s.HasCpu != b.HasCpu {
return false
}
if !reflect.DeepEqual(self.Cpu, b.Cpu) {
if !reflect.DeepEqual(s.Cpu, b.Cpu) {
return false
}
if self.HasMemory != b.HasMemory {
if s.HasMemory != b.HasMemory {
return false
}
if !reflect.DeepEqual(self.Memory, b.Memory) {
if !reflect.DeepEqual(s.Memory, b.Memory) {
return false
}
if self.HasHugetlb != b.HasHugetlb {
if s.HasHugetlb != b.HasHugetlb {
return false
}
if self.HasNetwork != b.HasNetwork {
if s.HasNetwork != b.HasNetwork {
return false
}
if self.HasProcesses != b.HasProcesses {
if s.HasProcesses != b.HasProcesses {
return false
}
if self.HasFilesystem != b.HasFilesystem {
if s.HasFilesystem != b.HasFilesystem {
return false
}
if self.HasDiskIo != b.HasDiskIo {
if s.HasDiskIo != b.HasDiskIo {
return false
}
if self.HasCustomMetrics != b.HasCustomMetrics {
if s.HasCustomMetrics != b.HasCustomMetrics {
return false
}
if self.Image != b.Image {
if s.Image != b.Image {
return false
}
return true
}
func (self *ContainerInfo) StatsAfter(ref time.Time) []*ContainerStats {
n := len(self.Stats) + 1
for i, s := range self.Stats {
func (ci *ContainerInfo) StatsAfter(ref time.Time) []*ContainerStats {
n := len(ci.Stats) + 1
for i, s := range ci.Stats {
if s.Timestamp.After(ref) {
n = i
break
}
}
if n > len(self.Stats) {
if n > len(ci.Stats) {
return nil
}
return self.Stats[n:]
return ci.Stats[n:]
}
func (self *ContainerInfo) StatsStartTime() time.Time {
func (ci *ContainerInfo) StatsStartTime() time.Time {
var ret time.Time
for _, s := range self.Stats {
for _, s := range ci.Stats {
if s.Timestamp.Before(ret) || ret.IsZero() {
ret = s.Timestamp
}
@ -250,10 +250,10 @@ func (self *ContainerInfo) StatsStartTime() time.Time {
return ret
}
func (self *ContainerInfo) StatsEndTime() time.Time {
func (ci *ContainerInfo) StatsEndTime() time.Time {
var ret time.Time
for i := len(self.Stats) - 1; i >= 0; i-- {
s := self.Stats[i]
for i := len(ci.Stats) - 1; i >= 0; i-- {
s := ci.Stats[i]
if s.Timestamp.After(ret) {
ret = s.Timestamp
}

View File

@ -59,8 +59,8 @@ type Cache struct {
Level int `json:"level"`
}
func (self *Node) FindCore(id int) (bool, int) {
for i, n := range self.Cores {
func (n *Node) FindCore(id int) (bool, int) {
for i, n := range n.Cores {
if n.Id == id {
return true, i
}
@ -68,30 +68,30 @@ func (self *Node) FindCore(id int) (bool, int) {
return false, -1
}
func (self *Node) AddThread(thread int, core int) {
func (n *Node) AddThread(thread int, core int) {
var coreIdx int
if core == -1 {
// Assume one hyperthread per core when topology data is missing.
core = thread
}
ok, coreIdx := self.FindCore(core)
ok, coreIdx := n.FindCore(core)
if !ok {
// New core
core := Core{Id: core}
self.Cores = append(self.Cores, core)
coreIdx = len(self.Cores) - 1
n.Cores = append(n.Cores, core)
coreIdx = len(n.Cores) - 1
}
self.Cores[coreIdx].Threads = append(self.Cores[coreIdx].Threads, thread)
n.Cores[coreIdx].Threads = append(n.Cores[coreIdx].Threads, thread)
}
func (self *Node) AddNodeCache(c Cache) {
self.Caches = append(self.Caches, c)
func (n *Node) AddNodeCache(c Cache) {
n.Caches = append(n.Caches, c)
}
func (self *Node) AddPerCoreCache(c Cache) {
for idx := range self.Cores {
self.Cores[idx].Caches = append(self.Cores[idx].Caches, c)
func (n *Node) AddPerCoreCache(c Cache) {
for idx := range n.Cores {
n.Cores[idx].Caches = append(n.Cores[idx].Caches, c)
}
}

View File

@ -155,70 +155,70 @@ type HostnameInfo struct {
}
// Returns: http://<host>:<port>/
func (self HostnameInfo) FullHostname() string {
return fmt.Sprintf("http://%s:%d/", self.Host, self.Port)
func (h HostnameInfo) FullHostname() string {
return fmt.Sprintf("http://%s:%d/", h.Host, h.Port)
}
func (self *realFramework) T() *testing.T {
return self.t
func (f *realFramework) T() *testing.T {
return f.t
}
func (self *realFramework) Hostname() HostnameInfo {
return self.hostname
func (f *realFramework) Hostname() HostnameInfo {
return f.hostname
}
func (self *realFramework) Shell() ShellActions {
return self.shellActions
func (f *realFramework) Shell() ShellActions {
return f.shellActions
}
func (self *realFramework) Docker() DockerActions {
return self.dockerActions
func (f *realFramework) Docker() DockerActions {
return f.dockerActions
}
func (self *realFramework) Cadvisor() CadvisorActions {
return self
func (f *realFramework) Cadvisor() CadvisorActions {
return f
}
// Call all cleanup functions.
func (self *realFramework) Cleanup() {
for _, cleanupFunc := range self.cleanups {
func (f *realFramework) Cleanup() {
for _, cleanupFunc := range f.cleanups {
cleanupFunc()
}
}
// Gets a client to the cAdvisor being tested.
func (self *realFramework) Client() *client.Client {
if self.cadvisorClient == nil {
cadvisorClient, err := client.NewClient(self.Hostname().FullHostname())
func (f *realFramework) Client() *client.Client {
if f.cadvisorClient == nil {
cadvisorClient, err := client.NewClient(f.Hostname().FullHostname())
if err != nil {
self.t.Fatalf("Failed to instantiate the cAdvisor client: %v", err)
f.t.Fatalf("Failed to instantiate the cAdvisor client: %v", err)
}
self.cadvisorClient = cadvisorClient
f.cadvisorClient = cadvisorClient
}
return self.cadvisorClient
return f.cadvisorClient
}
// Gets a v2 client to the cAdvisor being tested.
func (self *realFramework) ClientV2() *v2.Client {
if self.cadvisorClientV2 == nil {
cadvisorClientV2, err := v2.NewClient(self.Hostname().FullHostname())
func (f *realFramework) ClientV2() *v2.Client {
if f.cadvisorClientV2 == nil {
cadvisorClientV2, err := v2.NewClient(f.Hostname().FullHostname())
if err != nil {
self.t.Fatalf("Failed to instantiate the cAdvisor client: %v", err)
f.t.Fatalf("Failed to instantiate the cAdvisor client: %v", err)
}
self.cadvisorClientV2 = cadvisorClientV2
f.cadvisorClientV2 = cadvisorClientV2
}
return self.cadvisorClientV2
return f.cadvisorClientV2
}
func (self dockerActions) RunPause() string {
return self.Run(DockerRunArgs{
func (a dockerActions) RunPause() string {
return a.Run(DockerRunArgs{
Image: "kubernetes/pause",
})
}
// Run the specified command in a Docker busybox container.
func (self dockerActions) RunBusybox(cmd ...string) string {
return self.Run(DockerRunArgs{
func (a dockerActions) RunBusybox(cmd ...string) string {
return a.Run(DockerRunArgs{
Image: "busybox",
}, cmd...)
}
@ -240,36 +240,36 @@ type DockerRunArgs struct {
// e.g.:
// RunDockerContainer(DockerRunArgs{Image: "busybox"}, "ping", "www.google.com")
// -> docker run busybox ping www.google.com
func (self dockerActions) Run(args DockerRunArgs, cmd ...string) string {
func (a dockerActions) Run(args DockerRunArgs, cmd ...string) string {
dockerCommand := append(append([]string{"docker", "run", "-d"}, args.Args...), args.Image)
dockerCommand = append(dockerCommand, cmd...)
output, _ := self.fm.Shell().Run("sudo", dockerCommand...)
output, _ := a.fm.Shell().Run("sudo", dockerCommand...)
// The last line is the container ID.
elements := strings.Fields(output)
containerId := elements[len(elements)-1]
containerID := elements[len(elements)-1]
self.fm.cleanups = append(self.fm.cleanups, func() {
self.fm.Shell().Run("sudo", "docker", "rm", "-f", containerId)
a.fm.cleanups = append(a.fm.cleanups, func() {
a.fm.Shell().Run("sudo", "docker", "rm", "-f", containerID)
})
return containerId
return containerID
}
func (self dockerActions) Version() []string {
func (a dockerActions) Version() []string {
dockerCommand := []string{"docker", "version", "-f", "'{{.Server.Version}}'"}
output, _ := self.fm.Shell().Run("sudo", dockerCommand...)
output, _ := a.fm.Shell().Run("sudo", dockerCommand...)
output = strings.TrimSpace(output)
ret := strings.Split(output, ".")
if len(ret) != 3 {
self.fm.T().Fatalf("invalid version %v", output)
a.fm.T().Fatalf("invalid version %v", output)
}
return ret
}
func (self dockerActions) StorageDriver() string {
func (a dockerActions) StorageDriver() string {
dockerCommand := []string{"docker", "info"}
output, _ := self.fm.Shell().Run("sudo", dockerCommand...)
output, _ := a.fm.Shell().Run("sudo", dockerCommand...)
if len(output) < 1 {
self.fm.T().Fatalf("failed to find docker storage driver - %v", output)
a.fm.T().Fatalf("failed to find docker storage driver - %v", output)
}
for _, line := range strings.Split(output, "\n") {
line = strings.TrimSpace(line)
@ -284,30 +284,30 @@ func (self dockerActions) StorageDriver() string {
}
}
}
self.fm.T().Fatalf("failed to find docker storage driver from info - %v", output)
a.fm.T().Fatalf("failed to find docker storage driver from info - %v", output)
return Unknown
}
func (self dockerActions) RunStress(args DockerRunArgs, cmd ...string) string {
func (a dockerActions) RunStress(args DockerRunArgs, cmd ...string) string {
dockerCommand := append(append(append(append([]string{"docker", "run", "-m=4M", "-d", "-t", "-i"}, args.Args...), args.Image), args.InnerArgs...), cmd...)
output, _ := self.fm.Shell().RunStress("sudo", dockerCommand...)
output, _ := a.fm.Shell().RunStress("sudo", dockerCommand...)
// The last line is the container ID.
if len(output) < 1 {
self.fm.T().Fatalf("need 1 arguments in output %v to get the name but have %v", output, len(output))
a.fm.T().Fatalf("need 1 arguments in output %v to get the name but have %v", output, len(output))
}
elements := strings.Fields(output)
containerId := elements[len(elements)-1]
containerID := elements[len(elements)-1]
self.fm.cleanups = append(self.fm.cleanups, func() {
self.fm.Shell().Run("sudo", "docker", "rm", "-f", containerId)
a.fm.cleanups = append(a.fm.cleanups, func() {
a.fm.Shell().Run("sudo", "docker", "rm", "-f", containerID)
})
return containerId
return containerID
}
func (self shellActions) wrapSsh(command string, args ...string) *exec.Cmd {
cmd := []string{self.fm.Hostname().Host, "--", "sh", "-c", "\"", command}
func (a shellActions) wrapSSH(command string, args ...string) *exec.Cmd {
cmd := []string{a.fm.Hostname().Host, "--", "sh", "-c", "\"", command}
cmd = append(cmd, args...)
cmd = append(cmd, "\"")
if *sshOptions != "" {
@ -316,14 +316,14 @@ func (self shellActions) wrapSsh(command string, args ...string) *exec.Cmd {
return exec.Command("ssh", cmd...)
}
func (self shellActions) Run(command string, args ...string) (string, string) {
func (a shellActions) Run(command string, args ...string) (string, string) {
var cmd *exec.Cmd
if self.fm.Hostname().Host == "localhost" {
if a.fm.Hostname().Host == "localhost" {
// Just run locally.
cmd = exec.Command(command, args...)
} else {
// We must SSH to the remote machine and run the command.
cmd = self.wrapSsh(command, args...)
cmd = a.wrapSSH(command, args...)
}
var stdout bytes.Buffer
var stderr bytes.Buffer
@ -332,20 +332,20 @@ func (self shellActions) Run(command string, args ...string) (string, string) {
klog.Infof("About to run - %v", cmd.Args)
err := cmd.Run()
if err != nil {
self.fm.T().Fatalf("Failed to run %q %v in %q with error: %q. Stdout: %q, Stderr: %s", command, args, self.fm.Hostname().Host, err, stdout.String(), stderr.String())
a.fm.T().Fatalf("Failed to run %q %v in %q with error: %q. Stdout: %q, Stderr: %s", command, args, a.fm.Hostname().Host, err, stdout.String(), stderr.String())
return "", ""
}
return stdout.String(), stderr.String()
}
func (self shellActions) RunStress(command string, args ...string) (string, string) {
func (a shellActions) RunStress(command string, args ...string) (string, string) {
var cmd *exec.Cmd
if self.fm.Hostname().Host == "localhost" {
if a.fm.Hostname().Host == "localhost" {
// Just run locally.
cmd = exec.Command(command, args...)
} else {
// We must SSH to the remote machine and run the command.
cmd = self.wrapSsh(command, args...)
cmd = a.wrapSSH(command, args...)
}
var stdout bytes.Buffer
var stderr bytes.Buffer
@ -353,7 +353,7 @@ func (self shellActions) RunStress(command string, args ...string) (string, stri
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
self.fm.T().Logf("Ran %q %v in %q and received error: %q. Stdout: %q, Stderr: %s", command, args, self.fm.Hostname().Host, err, stdout.String(), stderr.String())
a.fm.T().Logf("Ran %q %v in %q and received error: %q. Stdout: %q, Stderr: %s", command, args, a.fm.Hostname().Host, err, stdout.String(), stderr.String())
return stdout.String(), stderr.String()
}
return stdout.String(), stderr.String()

View File

@ -68,18 +68,18 @@ func TestDockerContainerById(t *testing.T) {
fm := framework.New(t)
defer fm.Cleanup()
containerId := fm.Docker().RunPause()
containerID := fm.Docker().RunPause()
// Wait for the container to show up.
waitForContainer(containerId, fm)
waitForContainer(containerID, fm)
request := &info.ContainerInfoRequest{
NumStats: 1,
}
containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerId, request)
containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerID, request)
require.NoError(t, err)
sanityCheck(containerId, containerInfo, t)
sanityCheck(containerID, containerInfo, t)
}
// A Docker container in /docker/<name>
@ -124,10 +124,10 @@ func TestGetAllDockerContainers(t *testing.T) {
defer fm.Cleanup()
// Wait for the containers to show up.
containerId1 := fm.Docker().RunPause()
containerId2 := fm.Docker().RunPause()
waitForContainer(containerId1, fm)
waitForContainer(containerId2, fm)
containerID1 := fm.Docker().RunPause()
containerID2 := fm.Docker().RunPause()
waitForContainer(containerID1, fm)
waitForContainer(containerID2, fm)
request := &info.ContainerInfoRequest{
NumStats: 1,
@ -138,8 +138,8 @@ func TestGetAllDockerContainers(t *testing.T) {
if len(containersInfo) < 2 {
t.Fatalf("At least 2 Docker containers should exist, received %d: %+v", len(containersInfo), containersInfo)
}
sanityCheck(containerId1, findContainer(containerId1, containersInfo, t), t)
sanityCheck(containerId2, findContainer(containerId2, containersInfo, t), t)
sanityCheck(containerID1, findContainer(containerID1, containersInfo, t), t)
sanityCheck(containerID2, findContainer(containerID2, containersInfo, t), t)
}
// Check expected properties of a Docker container.
@ -148,7 +148,7 @@ func TestBasicDockerContainer(t *testing.T) {
defer fm.Cleanup()
containerName := fmt.Sprintf("test-basic-docker-container-%d", os.Getpid())
containerId := fm.Docker().Run(framework.DockerRunArgs{
containerID := fm.Docker().Run(framework.DockerRunArgs{
Image: "kubernetes/pause",
Args: []string{
"--name", containerName,
@ -156,16 +156,16 @@ func TestBasicDockerContainer(t *testing.T) {
})
// Wait for the container to show up.
waitForContainer(containerId, fm)
waitForContainer(containerID, fm)
request := &info.ContainerInfoRequest{
NumStats: 1,
}
containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerId, request)
containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerID, request)
require.NoError(t, err)
// Check that the contianer is known by both its name and ID.
sanityCheck(containerId, containerInfo, t)
sanityCheck(containerID, containerInfo, t)
sanityCheck(containerName, containerInfo, t)
assert.Empty(t, containerInfo.Subcontainers, "Should not have subcontainers")
@ -187,7 +187,7 @@ func TestDockerContainerSpec(t *testing.T) {
labels = map[string]string{"bar": "baz"}
)
containerId := fm.Docker().Run(framework.DockerRunArgs{
containerID := fm.Docker().Run(framework.DockerRunArgs{
Image: image,
Args: []string{
"--cpu-shares", strconv.FormatUint(cpuShares, 10),
@ -199,14 +199,14 @@ func TestDockerContainerSpec(t *testing.T) {
})
// Wait for the container to show up.
waitForContainer(containerId, fm)
waitForContainer(containerID, fm)
request := &info.ContainerInfoRequest{
NumStats: 1,
}
containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerId, request)
containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerID, request)
require.NoError(t, err)
sanityCheck(containerId, containerInfo, t)
sanityCheck(containerID, containerInfo, t)
assert := assert.New(t)
@ -229,20 +229,20 @@ func TestDockerContainerCpuStats(t *testing.T) {
defer fm.Cleanup()
// Wait for the container to show up.
containerId := fm.Docker().RunBusybox("ping", "www.google.com")
waitForContainer(containerId, fm)
containerID := fm.Docker().RunBusybox("ping", "www.google.com")
waitForContainer(containerID, fm)
request := &info.ContainerInfoRequest{
NumStats: 1,
}
containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerId, request)
containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerID, request)
if err != nil {
t.Fatal(err)
}
sanityCheck(containerId, containerInfo, t)
sanityCheck(containerID, containerInfo, t)
// Checks for CpuStats.
checkCpuStats(t, containerInfo.Stats[0].Cpu)
checkCPUStats(t, containerInfo.Stats[0].Cpu)
}
// Check the memory ContainerStats.
@ -251,15 +251,15 @@ func TestDockerContainerMemoryStats(t *testing.T) {
defer fm.Cleanup()
// Wait for the container to show up.
containerId := fm.Docker().RunBusybox("ping", "www.google.com")
waitForContainer(containerId, fm)
containerID := fm.Docker().RunBusybox("ping", "www.google.com")
waitForContainer(containerID, fm)
request := &info.ContainerInfoRequest{
NumStats: 1,
}
containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerId, request)
containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerID, request)
require.NoError(t, err)
sanityCheck(containerId, containerInfo, t)
sanityCheck(containerID, containerInfo, t)
// Checks for MemoryStats.
checkMemoryStats(t, containerInfo.Stats[0].Memory)
@ -271,16 +271,16 @@ func TestDockerContainerNetworkStats(t *testing.T) {
defer fm.Cleanup()
// Wait for the container to show up.
containerId := fm.Docker().RunBusybox("watch", "-n1", "wget", "http://www.google.com/")
waitForContainer(containerId, fm)
containerID := fm.Docker().RunBusybox("watch", "-n1", "wget", "http://www.google.com/")
waitForContainer(containerID, fm)
time.Sleep(10 * time.Second)
request := &info.ContainerInfoRequest{
NumStats: 1,
}
containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerId, request)
containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerID, request)
require.NoError(t, err)
sanityCheck(containerId, containerInfo, t)
sanityCheck(containerID, containerInfo, t)
stat := containerInfo.Stats[0]
ifaceStats := stat.Network.InterfaceStats
@ -326,8 +326,8 @@ func TestDockerFilesystemStats(t *testing.T) {
if fm.Hostname().Host != "localhost" {
dockerCmd = fmt.Sprintf("'%s'", dockerCmd)
}
containerId := fm.Docker().RunBusybox("/bin/sh", "-c", dockerCmd)
waitForContainer(containerId, fm)
containerID := fm.Docker().RunBusybox("/bin/sh", "-c", dockerCmd)
waitForContainer(containerID, fm)
request := &v2.RequestOptions{
IdType: v2.TypeDocker,
Count: 1,
@ -340,7 +340,7 @@ func TestDockerFilesystemStats(t *testing.T) {
pass := false
// We need to wait for the `dd` operation to complete.
for i := 0; i < 10; i++ {
containerInfo, err := fm.Cadvisor().ClientV2().Stats(containerId, request)
containerInfo, err := fm.Cadvisor().ClientV2().Stats(containerID, request)
if err != nil {
t.Logf("%v stats unavailable - %v", time.Now().String(), err)
t.Logf("retrying after %s...", sleepDuration.String())
@ -355,7 +355,7 @@ func TestDockerFilesystemStats(t *testing.T) {
for _, cInfo := range containerInfo {
info = cInfo
}
sanityCheckV2(containerId, info, t)
sanityCheckV2(containerID, info, t)
require.NotNil(t, info.Stats[0], "got info: %+v", info)
require.NotNil(t, info.Stats[0].Filesystem, "got info: %+v", info)

View File

@ -39,7 +39,7 @@ func TestStreamingEventInformationIsReturned(t *testing.T) {
}()
// Create a short-lived container.
containerId := fm.Docker().RunBusybox("sleep", "2")
containerID := fm.Docker().RunBusybox("sleep", "2")
// Wait for the deletion event.
timeout := time.After(30 * time.Second)
@ -48,28 +48,28 @@ func TestStreamingEventInformationIsReturned(t *testing.T) {
select {
case ev := <-einfo:
if ev.EventType == info.EventContainerDeletion {
if strings.Contains(ev.ContainerName, containerId) {
if strings.Contains(ev.ContainerName, containerID) {
done = true
}
}
case <-timeout:
t.Errorf(
"timeout happened before destruction event was detected for container %q", containerId)
"timeout happened before destruction event was detected for container %q", containerID)
done = true
}
}
// We should have already received a creation event.
waitForStaticEvent(containerId, "?creation_events=true&subcontainers=true", t, fm, info.EventContainerCreation)
waitForStaticEvent(containerID, "?creation_events=true&subcontainers=true", t, fm, info.EventContainerCreation)
}
func waitForStaticEvent(containerId string, urlRequest string, t *testing.T, fm framework.Framework, typeEvent info.EventType) {
func waitForStaticEvent(containerID string, urlRequest string, t *testing.T, fm framework.Framework, typeEvent info.EventType) {
einfo, err := fm.Cadvisor().Client().EventStaticInfo(urlRequest)
require.NoError(t, err)
found := false
for _, ev := range einfo {
if ev.EventType == typeEvent {
if strings.Contains(ev.ContainerName, containerId) {
if strings.Contains(ev.ContainerName, containerID) {
found = true
break
}

View File

@ -42,7 +42,7 @@ func inDelta(t *testing.T, expected, actual, delta uint64, description string) {
}
// Checks that CPU stats are valid.
func checkCpuStats(t *testing.T, stat info.CpuStats) {
func checkCPUStats(t *testing.T, stat info.CpuStats) {
assert := assert.New(t)
assert.NotEqual(0, stat.Usage.Total, "Total CPU usage should not be zero")

View File

@ -36,8 +36,8 @@ import (
const hugepagesDirectory = "/sys/kernel/mm/hugepages/"
const memoryControllerPath = "/sys/devices/system/edac/mc/"
var machineIdFilePath = flag.String("machine_id_file", "/etc/machine-id,/var/lib/dbus/machine-id", "Comma-separated list of files to check for machine-id. Use the first one that exists.")
var bootIdFilePath = flag.String("boot_id_file", "/proc/sys/kernel/random/boot_id", "Comma-separated list of files to check for boot-id. Use the first one that exists.")
var machineIDFilePath = flag.String("machine_id_file", "/etc/machine-id,/var/lib/dbus/machine-id", "Comma-separated list of files to check for machine-id. Use the first one that exists.")
var bootIDFilePath = flag.String("boot_id_file", "/proc/sys/kernel/random/boot_id", "Comma-separated list of files to check for boot-id. Use the first one that exists.")
func getInfoFromFiles(filePaths string) string {
if len(filePaths) == 0 {
@ -130,9 +130,9 @@ func Info(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.Mach
DiskMap: diskMap,
NetworkDevices: netDevices,
Topology: topology,
MachineID: getInfoFromFiles(filepath.Join(rootFs, *machineIdFilePath)),
MachineID: getInfoFromFiles(filepath.Join(rootFs, *machineIDFilePath)),
SystemUUID: systemUUID,
BootID: getInfoFromFiles(filepath.Join(rootFs, *bootIdFilePath)),
BootID: getInfoFromFiles(filepath.Join(rootFs, *bootIDFilePath)),
CloudProvider: cloudProvider,
InstanceType: instanceType,
InstanceID: instanceID,

View File

@ -37,19 +37,18 @@ func getOperatingSystem() (string, error) {
return "", err
}
return string(osName), nil
} else {
bytes, err := ioutil.ReadFile("/etc/os-release")
if err != nil && os.IsNotExist(err) {
// /usr/lib/os-release in stateless systems like Clear Linux
bytes, err = ioutil.ReadFile("/usr/lib/os-release")
}
if err != nil {
return "", fmt.Errorf("error opening file : %v", err)
}
line := rex.FindAllStringSubmatch(string(bytes), -1)
if len(line) > 0 {
return strings.Trim(line[0][2], "\""), nil
}
return "Linux", nil
}
bytes, err := ioutil.ReadFile("/etc/os-release")
if err != nil && os.IsNotExist(err) {
// /usr/lib/os-release in stateless systems like Clear Linux
bytes, err = ioutil.ReadFile("/usr/lib/os-release")
}
if err != nil {
return "", fmt.Errorf("error opening file : %v", err)
}
line := rex.FindAllStringSubmatch(string(bytes), -1)
if len(line) > 0 {
return strings.Trim(line[0][2], "\""), nil
}
return "Linux", nil
}

View File

@ -171,7 +171,7 @@ func TestTopology(t *testing.T) {
assert.Nil(t, err)
assert.Equal(t, 12, numCores)
expected_topology := []info.Node{}
expectedTopology := []info.Node{}
numNodes := 2
numCoresPerNode := 3
numThreads := 2
@ -194,10 +194,10 @@ func TestTopology(t *testing.T) {
}
node.Cores = append(node.Cores, core)
}
expected_topology = append(expected_topology, node)
expectedTopology = append(expectedTopology, node)
}
assert.NotNil(t, reflect.DeepEqual(topology, expected_topology))
assert.NotNil(t, reflect.DeepEqual(topology, expectedTopology))
}
func TestTopologyEmptySysFs(t *testing.T) {

View File

@ -107,24 +107,24 @@ func jitter(duration time.Duration, maxFactor float64) time.Duration {
return wait
}
func (c *containerData) Start() error {
go c.housekeeping()
func (cd *containerData) Start() error {
go cd.housekeeping()
return nil
}
func (c *containerData) Stop() error {
err := c.memoryCache.RemoveContainer(c.info.Name)
func (cd *containerData) Stop() error {
err := cd.memoryCache.RemoveContainer(cd.info.Name)
if err != nil {
return err
}
close(c.stop)
c.perfCollector.Destroy()
close(cd.stop)
cd.perfCollector.Destroy()
return nil
}
func (c *containerData) allowErrorLogging() bool {
if c.clock.Since(c.lastErrorTime) > time.Minute {
c.lastErrorTime = c.clock.Now()
func (cd *containerData) allowErrorLogging() bool {
if cd.clock.Since(cd.lastErrorTime) > time.Minute {
cd.lastErrorTime = cd.clock.Now()
return true
}
return false
@ -134,22 +134,22 @@ func (c *containerData) allowErrorLogging() bool {
// It is designed to be used in conjunction with periodic housekeeping, and will cause the timer for
// periodic housekeeping to reset. This should be used sparingly, as calling OnDemandHousekeeping frequently
// can have serious performance costs.
func (c *containerData) OnDemandHousekeeping(maxAge time.Duration) {
if c.clock.Since(c.statsLastUpdatedTime) > maxAge {
func (cd *containerData) OnDemandHousekeeping(maxAge time.Duration) {
if cd.clock.Since(cd.statsLastUpdatedTime) > maxAge {
housekeepingFinishedChan := make(chan struct{})
c.onDemandChan <- housekeepingFinishedChan
cd.onDemandChan <- housekeepingFinishedChan
select {
case <-c.stop:
case <-cd.stop:
case <-housekeepingFinishedChan:
}
}
}
// notifyOnDemand notifies all calls to OnDemandHousekeeping that housekeeping is finished
func (c *containerData) notifyOnDemand() {
func (cd *containerData) notifyOnDemand() {
for {
select {
case finishedChan := <-c.onDemandChan:
case finishedChan := <-cd.onDemandChan:
close(finishedChan)
default:
return
@ -157,42 +157,42 @@ func (c *containerData) notifyOnDemand() {
}
}
func (c *containerData) GetInfo(shouldUpdateSubcontainers bool) (*containerInfo, error) {
func (cd *containerData) GetInfo(shouldUpdateSubcontainers bool) (*containerInfo, error) {
// Get spec and subcontainers.
if c.clock.Since(c.infoLastUpdatedTime) > 5*time.Second {
err := c.updateSpec()
if cd.clock.Since(cd.infoLastUpdatedTime) > 5*time.Second {
err := cd.updateSpec()
if err != nil {
return nil, err
}
if shouldUpdateSubcontainers {
err = c.updateSubcontainers()
err = cd.updateSubcontainers()
if err != nil {
return nil, err
}
}
c.infoLastUpdatedTime = c.clock.Now()
cd.infoLastUpdatedTime = cd.clock.Now()
}
c.lock.Lock()
defer c.lock.Unlock()
cd.lock.Lock()
defer cd.lock.Unlock()
cInfo := containerInfo{
Subcontainers: c.info.Subcontainers,
Spec: c.info.Spec,
Subcontainers: cd.info.Subcontainers,
Spec: cd.info.Spec,
}
cInfo.Id = c.info.Id
cInfo.Name = c.info.Name
cInfo.Aliases = c.info.Aliases
cInfo.Namespace = c.info.Namespace
cInfo.Id = cd.info.Id
cInfo.Name = cd.info.Name
cInfo.Aliases = cd.info.Aliases
cInfo.Namespace = cd.info.Namespace
return &cInfo, nil
}
func (c *containerData) DerivedStats() (v2.DerivedStats, error) {
if c.summaryReader == nil {
return v2.DerivedStats{}, fmt.Errorf("derived stats not enabled for container %q", c.info.Name)
func (cd *containerData) DerivedStats() (v2.DerivedStats, error) {
if cd.summaryReader == nil {
return v2.DerivedStats{}, fmt.Errorf("derived stats not enabled for container %q", cd.info.Name)
}
return c.summaryReader.DerivedStats()
return cd.summaryReader.DerivedStats()
}
func (c *containerData) getCgroupPath(cgroups string) (string, error) {
func (cd *containerData) getCgroupPath(cgroups string) (string, error) {
if cgroups == "-" {
return "/", nil
}
@ -210,8 +210,8 @@ func (c *containerData) getCgroupPath(cgroups string) (string, error) {
// Returns contents of a file inside the container root.
// Takes in a path relative to container root.
func (c *containerData) ReadFile(filepath string, inHostNamespace bool) ([]byte, error) {
pids, err := c.getContainerPids(inHostNamespace)
func (cd *containerData) ReadFile(filepath string, inHostNamespace bool) ([]byte, error) {
pids, err := cd.getContainerPids(inHostNamespace)
if err != nil {
return nil, err
}
@ -229,11 +229,11 @@ func (c *containerData) ReadFile(filepath string, inHostNamespace bool) ([]byte,
}
}
// No process paths could be found. Declare config non-existent.
return nil, fmt.Errorf("file %q does not exist.", filepath)
return nil, fmt.Errorf("file %q does not exist", filepath)
}
// Return output for ps command in host /proc with specified format
func (c *containerData) getPsOutput(inHostNamespace bool, format string) ([]byte, error) {
func (cd *containerData) getPsOutput(inHostNamespace bool, format string) ([]byte, error) {
args := []string{}
command := "ps"
if !inHostNamespace {
@ -250,9 +250,9 @@ func (c *containerData) getPsOutput(inHostNamespace bool, format string) ([]byte
// Get pids of processes in this container.
// A slightly lighterweight call than GetProcessList if other details are not required.
func (c *containerData) getContainerPids(inHostNamespace bool) ([]string, error) {
func (cd *containerData) getContainerPids(inHostNamespace bool) ([]string, error) {
format := "pid,cgroup"
out, err := c.getPsOutput(inHostNamespace, format)
out, err := cd.getPsOutput(inHostNamespace, format)
if err != nil {
return nil, err
}
@ -268,26 +268,26 @@ func (c *containerData) getContainerPids(inHostNamespace bool) ([]string, error)
return nil, fmt.Errorf("expected at least %d fields, found %d: output: %q", expectedFields, len(fields), line)
}
pid := fields[0]
cgroup, err := c.getCgroupPath(fields[1])
cgroup, err := cd.getCgroupPath(fields[1])
if err != nil {
return nil, fmt.Errorf("could not parse cgroup path from %q: %v", fields[1], err)
}
if c.info.Name == cgroup {
if cd.info.Name == cgroup {
pids = append(pids, pid)
}
}
return pids, nil
}
func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace bool) ([]v2.ProcessInfo, error) {
func (cd *containerData) GetProcessList(cadvisorContainer string, inHostNamespace bool) ([]v2.ProcessInfo, error) {
// report all processes for root.
isRoot := c.info.Name == "/"
isRoot := cd.info.Name == "/"
rootfs := "/"
if !inHostNamespace {
rootfs = "/rootfs"
}
format := "user,pid,ppid,stime,pcpu,pmem,rss,vsz,stat,time,comm,cgroup"
out, err := c.getPsOutput(inHostNamespace, format)
out, err := cd.getPsOutput(inHostNamespace, format)
if err != nil {
return nil, err
}
@ -310,7 +310,7 @@ func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace
if err != nil {
return nil, fmt.Errorf("invalid ppid %q: %v", fields[2], err)
}
percentCpu, err := strconv.ParseFloat(fields[4], 32)
percentCPU, err := strconv.ParseFloat(fields[4], 32)
if err != nil {
return nil, fmt.Errorf("invalid cpu percent %q: %v", fields[4], err)
}
@ -330,7 +330,7 @@ func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace
}
// convert to bytes
vs *= 1024
cgroup, err := c.getCgroupPath(fields[11])
cgroup, err := cd.getCgroupPath(fields[11])
if err != nil {
return nil, fmt.Errorf("could not parse cgroup path from %q: %v", fields[11], err)
}
@ -353,13 +353,13 @@ func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace
}
fdCount = len(fds)
if isRoot || c.info.Name == cgroup {
if isRoot || cd.info.Name == cgroup {
processes = append(processes, v2.ProcessInfo{
User: fields[0],
Pid: pid,
Ppid: ppid,
StartTime: fields[3],
PercentCpu: float32(percentCpu),
PercentCpu: float32(percentCPU),
PercentMemory: float32(percentMem),
RSS: rss,
VirtualSize: vs,
@ -429,45 +429,45 @@ func newContainerData(containerName string, memoryCache *memory.InMemoryCache, h
}
// Determine when the next housekeeping should occur.
func (self *containerData) nextHousekeepingInterval() time.Duration {
if self.allowDynamicHousekeeping {
func (cd *containerData) nextHousekeepingInterval() time.Duration {
if cd.allowDynamicHousekeeping {
var empty time.Time
stats, err := self.memoryCache.RecentStats(self.info.Name, empty, empty, 2)
stats, err := cd.memoryCache.RecentStats(cd.info.Name, empty, empty, 2)
if err != nil {
if self.allowErrorLogging() {
klog.Warningf("Failed to get RecentStats(%q) while determining the next housekeeping: %v", self.info.Name, err)
if cd.allowErrorLogging() {
klog.Warningf("Failed to get RecentStats(%q) while determining the next housekeeping: %v", cd.info.Name, err)
}
} else if len(stats) == 2 {
// TODO(vishnuk): Use no processes as a signal.
// Raise the interval if usage hasn't changed in the last housekeeping.
if stats[0].StatsEq(stats[1]) && (self.housekeepingInterval < self.maxHousekeepingInterval) {
self.housekeepingInterval *= 2
if self.housekeepingInterval > self.maxHousekeepingInterval {
self.housekeepingInterval = self.maxHousekeepingInterval
if stats[0].StatsEq(stats[1]) && (cd.housekeepingInterval < cd.maxHousekeepingInterval) {
cd.housekeepingInterval *= 2
if cd.housekeepingInterval > cd.maxHousekeepingInterval {
cd.housekeepingInterval = cd.maxHousekeepingInterval
}
} else if self.housekeepingInterval != *HousekeepingInterval {
} else if cd.housekeepingInterval != *HousekeepingInterval {
// Lower interval back to the baseline.
self.housekeepingInterval = *HousekeepingInterval
cd.housekeepingInterval = *HousekeepingInterval
}
}
}
return jitter(self.housekeepingInterval, 1.0)
return jitter(cd.housekeepingInterval, 1.0)
}
// TODO(vmarmol): Implement stats collecting as a custom collector.
func (c *containerData) housekeeping() {
// Start any background goroutines - must be cleaned up in c.handler.Cleanup().
c.handler.Start()
defer c.handler.Cleanup()
func (cd *containerData) housekeeping() {
// Start any background goroutines - must be cleaned up in cd.handler.Cleanup().
cd.handler.Start()
defer cd.handler.Cleanup()
// Initialize cpuload reader - must be cleaned up in c.loadReader.Stop()
if c.loadReader != nil {
err := c.loadReader.Start()
// Initialize cpuload reader - must be cleaned up in cd.loadReader.Stop()
if cd.loadReader != nil {
err := cd.loadReader.Start()
if err != nil {
klog.Warningf("Could not start cpu load stat collector for %q: %s", c.info.Name, err)
klog.Warningf("Could not start cpu load stat collector for %q: %s", cd.info.Name, err)
}
defer c.loadReader.Stop()
defer cd.loadReader.Stop()
}
// Long housekeeping is either 100ms or half of the housekeeping interval.
@ -477,11 +477,11 @@ func (c *containerData) housekeeping() {
}
// Housekeep every second.
klog.V(3).Infof("Start housekeeping for container %q\n", c.info.Name)
houseKeepingTimer := c.clock.NewTimer(0 * time.Second)
klog.V(3).Infof("Start housekeeping for container %q\n", cd.info.Name)
houseKeepingTimer := cd.clock.NewTimer(0 * time.Second)
defer houseKeepingTimer.Stop()
for {
if !c.housekeepingTick(houseKeepingTimer.C(), longHousekeeping) {
if !cd.housekeepingTick(houseKeepingTimer.C(), longHousekeeping) {
return
}
// Stop and drain the timer so that it is safe to reset it
@ -492,74 +492,74 @@ func (c *containerData) housekeeping() {
}
}
// Log usage if asked to do so.
if c.logUsage {
if cd.logUsage {
const numSamples = 60
var empty time.Time
stats, err := c.memoryCache.RecentStats(c.info.Name, empty, empty, numSamples)
stats, err := cd.memoryCache.RecentStats(cd.info.Name, empty, empty, numSamples)
if err != nil {
if c.allowErrorLogging() {
klog.Warningf("[%s] Failed to get recent stats for logging usage: %v", c.info.Name, err)
if cd.allowErrorLogging() {
klog.Warningf("[%s] Failed to get recent stats for logging usage: %v", cd.info.Name, err)
}
} else if len(stats) < numSamples {
// Ignore, not enough stats yet.
} else {
usageCpuNs := uint64(0)
usageCPUNs := uint64(0)
for i := range stats {
if i > 0 {
usageCpuNs += (stats[i].Cpu.Usage.Total - stats[i-1].Cpu.Usage.Total)
usageCPUNs += (stats[i].Cpu.Usage.Total - stats[i-1].Cpu.Usage.Total)
}
}
usageMemory := stats[numSamples-1].Memory.Usage
instantUsageInCores := float64(stats[numSamples-1].Cpu.Usage.Total-stats[numSamples-2].Cpu.Usage.Total) / float64(stats[numSamples-1].Timestamp.Sub(stats[numSamples-2].Timestamp).Nanoseconds())
usageInCores := float64(usageCpuNs) / float64(stats[numSamples-1].Timestamp.Sub(stats[0].Timestamp).Nanoseconds())
usageInCores := float64(usageCPUNs) / float64(stats[numSamples-1].Timestamp.Sub(stats[0].Timestamp).Nanoseconds())
usageInHuman := units.HumanSize(float64(usageMemory))
// Don't set verbosity since this is already protected by the logUsage flag.
klog.Infof("[%s] %.3f cores (average: %.3f cores), %s of memory", c.info.Name, instantUsageInCores, usageInCores, usageInHuman)
klog.Infof("[%s] %.3f cores (average: %.3f cores), %s of memory", cd.info.Name, instantUsageInCores, usageInCores, usageInHuman)
}
}
houseKeepingTimer.Reset(c.nextHousekeepingInterval())
houseKeepingTimer.Reset(cd.nextHousekeepingInterval())
}
}
func (c *containerData) housekeepingTick(timer <-chan time.Time, longHousekeeping time.Duration) bool {
func (cd *containerData) housekeepingTick(timer <-chan time.Time, longHousekeeping time.Duration) bool {
select {
case <-c.stop:
case <-cd.stop:
// Stop housekeeping when signaled.
return false
case finishedChan := <-c.onDemandChan:
case finishedChan := <-cd.onDemandChan:
// notify the calling function once housekeeping has completed
defer close(finishedChan)
case <-timer:
}
start := c.clock.Now()
err := c.updateStats()
start := cd.clock.Now()
err := cd.updateStats()
if err != nil {
if c.allowErrorLogging() {
klog.Warningf("Failed to update stats for container \"%s\": %s", c.info.Name, err)
if cd.allowErrorLogging() {
klog.Warningf("Failed to update stats for container \"%s\": %s", cd.info.Name, err)
}
}
// Log if housekeeping took too long.
duration := c.clock.Since(start)
duration := cd.clock.Since(start)
if duration >= longHousekeeping {
klog.V(3).Infof("[%s] Housekeeping took %s", c.info.Name, duration)
klog.V(3).Infof("[%s] Housekeeping took %s", cd.info.Name, duration)
}
c.notifyOnDemand()
c.statsLastUpdatedTime = c.clock.Now()
cd.notifyOnDemand()
cd.statsLastUpdatedTime = cd.clock.Now()
return true
}
func (c *containerData) updateSpec() error {
spec, err := c.handler.GetSpec()
func (cd *containerData) updateSpec() error {
spec, err := cd.handler.GetSpec()
if err != nil {
// Ignore errors if the container is dead.
if !c.handler.Exists() {
if !cd.handler.Exists() {
return nil
}
return err
}
customMetrics, err := c.collectorManager.GetSpec()
customMetrics, err := cd.collectorManager.GetSpec()
if err != nil {
return err
}
@ -567,28 +567,28 @@ func (c *containerData) updateSpec() error {
spec.HasCustomMetrics = true
spec.CustomMetrics = customMetrics
}
c.lock.Lock()
defer c.lock.Unlock()
c.info.Spec = spec
cd.lock.Lock()
defer cd.lock.Unlock()
cd.info.Spec = spec
return nil
}
// Calculate new smoothed load average using the new sample of runnable threads.
// The decay used ensures that the load will stabilize on a new constant value within
// 10 seconds.
func (c *containerData) updateLoad(newLoad uint64) {
if c.loadAvg < 0 {
c.loadAvg = float64(newLoad) // initialize to the first seen sample for faster stabilization.
func (cd *containerData) updateLoad(newLoad uint64) {
if cd.loadAvg < 0 {
cd.loadAvg = float64(newLoad) // initialize to the first seen sample for faster stabilization.
} else {
c.loadAvg = c.loadAvg*c.loadDecay + float64(newLoad)*(1.0-c.loadDecay)
cd.loadAvg = cd.loadAvg*cd.loadDecay + float64(newLoad)*(1.0-cd.loadDecay)
}
}
func (c *containerData) updateStats() error {
stats, statsErr := c.handler.GetStats()
func (cd *containerData) updateStats() error {
stats, statsErr := cd.handler.GetStats()
if statsErr != nil {
// Ignore errors if the container is dead.
if !c.handler.Exists() {
if !cd.handler.Exists() {
return nil
}
@ -598,32 +598,32 @@ func (c *containerData) updateStats() error {
if stats == nil {
return statsErr
}
if c.loadReader != nil {
if cd.loadReader != nil {
// TODO(vmarmol): Cache this path.
path, err := c.handler.GetCgroupPath("cpu")
path, err := cd.handler.GetCgroupPath("cpu")
if err == nil {
loadStats, err := c.loadReader.GetCpuLoad(c.info.Name, path)
loadStats, err := cd.loadReader.GetCpuLoad(cd.info.Name, path)
if err != nil {
return fmt.Errorf("failed to get load stat for %q - path %q, error %s", c.info.Name, path, err)
return fmt.Errorf("failed to get load stat for %q - path %q, error %s", cd.info.Name, path, err)
}
stats.TaskStats = loadStats
c.updateLoad(loadStats.NrRunning)
cd.updateLoad(loadStats.NrRunning)
// convert to 'milliLoad' to avoid floats and preserve precision.
stats.Cpu.LoadAverage = int32(c.loadAvg * 1000)
stats.Cpu.LoadAverage = int32(cd.loadAvg * 1000)
}
}
if c.summaryReader != nil {
err := c.summaryReader.AddSample(*stats)
if cd.summaryReader != nil {
err := cd.summaryReader.AddSample(*stats)
if err != nil {
// Ignore summary errors for now.
klog.V(2).Infof("Failed to add summary stats for %q: %v", c.info.Name, err)
klog.V(2).Infof("Failed to add summary stats for %q: %v", cd.info.Name, err)
}
}
var customStatsErr error
cm := c.collectorManager.(*collector.GenericCollectorManager)
cm := cd.collectorManager.(*collector.GenericCollectorManager)
if len(cm.Collectors) > 0 {
if cm.NextCollectionTime.Before(c.clock.Now()) {
customStats, err := c.updateCustomStats()
if cm.NextCollectionTime.Before(cd.clock.Now()) {
customStats, err := cd.updateCustomStats()
if customStats != nil {
stats.CustomMetrics = customStats
}
@ -634,17 +634,17 @@ func (c *containerData) updateStats() error {
}
var nvidiaStatsErr error
if c.nvidiaCollector != nil {
if cd.nvidiaCollector != nil {
// This updates the Accelerators field of the stats struct
nvidiaStatsErr = c.nvidiaCollector.UpdateStats(stats)
nvidiaStatsErr = cd.nvidiaCollector.UpdateStats(stats)
}
perfStatsErr := c.perfCollector.UpdateStats(stats)
perfStatsErr := cd.perfCollector.UpdateStats(stats)
ref, err := c.handler.ContainerReference()
ref, err := cd.handler.ContainerReference()
if err != nil {
// Ignore errors if the container is dead.
if !c.handler.Exists() {
if !cd.handler.Exists() {
return nil
}
return err
@ -654,7 +654,7 @@ func (c *containerData) updateStats() error {
ContainerReference: ref,
}
err = c.memoryCache.AddStats(&cInfo, stats)
err = cd.memoryCache.AddStats(&cInfo, stats)
if err != nil {
return err
}
@ -672,10 +672,10 @@ func (c *containerData) updateStats() error {
return customStatsErr
}
func (c *containerData) updateCustomStats() (map[string][]info.MetricVal, error) {
_, customStats, customStatsErr := c.collectorManager.Collect()
func (cd *containerData) updateCustomStats() (map[string][]info.MetricVal, error) {
_, customStats, customStatsErr := cd.collectorManager.Collect()
if customStatsErr != nil {
if !c.handler.Exists() {
if !cd.handler.Exists() {
return customStats, nil
}
customStatsErr = fmt.Errorf("%v, continuing to push custom stats", customStatsErr)
@ -683,19 +683,19 @@ func (c *containerData) updateCustomStats() (map[string][]info.MetricVal, error)
return customStats, customStatsErr
}
func (c *containerData) updateSubcontainers() error {
func (cd *containerData) updateSubcontainers() error {
var subcontainers info.ContainerReferenceSlice
subcontainers, err := c.handler.ListContainers(container.ListSelf)
subcontainers, err := cd.handler.ListContainers(container.ListSelf)
if err != nil {
// Ignore errors if the container is dead.
if !c.handler.Exists() {
if !cd.handler.Exists() {
return nil
}
return err
}
sort.Sort(subcontainers)
c.lock.Lock()
defer c.lock.Unlock()
c.info.Subcontainers = subcontainers
cd.lock.Lock()
defer cd.lock.Unlock()
cd.info.Subcontainers = subcontainers
return nil
}

View File

@ -123,7 +123,7 @@ type Manager interface {
// Get past events that have been detected and that fit the request.
GetPastEvents(request *events.Request) ([]*info.Event, error)
CloseEventChannel(watch_id int)
CloseEventChannel(watchID int)
// Get status information about docker.
DockerInfo() (info.DockerStatus, error)
@ -142,7 +142,7 @@ type HouskeepingConfig = struct {
}
// New takes a memory storage and returns a new manager.
func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, houskeepingConfig HouskeepingConfig, includedMetricsSet container.MetricSet, collectorHttpClient *http.Client, rawContainerCgroupPathPrefixWhiteList []string, perfEventsFile string) (Manager, error) {
func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, houskeepingConfig HouskeepingConfig, includedMetricsSet container.MetricSet, collectorHTTPClient *http.Client, rawContainerCgroupPathPrefixWhiteList []string, perfEventsFile string) (Manager, error) {
if memoryCache == nil {
return nil, fmt.Errorf("manager requires memory storage")
}
@ -189,7 +189,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, houskeepingConfig
includedMetrics: includedMetricsSet,
containerWatchers: []watcher.ContainerWatcher{},
eventsChannel: eventsChannel,
collectorHttpClient: collectorHttpClient,
collectorHTTPClient: collectorHTTPClient,
nvidiaManager: accelerators.NewNvidiaManager(),
rawContainerCgroupPathPrefixWhiteList: rawContainerCgroupPathPrefixWhiteList,
}
@ -243,7 +243,7 @@ type manager struct {
includedMetrics container.MetricSet
containerWatchers []watcher.ContainerWatcher
eventsChannel chan watcher.ContainerEvent
collectorHttpClient *http.Client
collectorHTTPClient *http.Client
nvidiaManager stats.Manager
perfManager stats.Manager
// List of raw container cgroup path prefix whitelist.
@ -251,10 +251,10 @@ type manager struct {
}
// Start the container manager.
func (self *manager) Start() error {
self.containerWatchers = container.InitializePlugins(self, self.fsInfo, self.includedMetrics)
func (m *manager) Start() error {
m.containerWatchers = container.InitializePlugins(m, m.fsInfo, m.includedMetrics)
err := raw.Register(self, self.fsInfo, self.includedMetrics, self.rawContainerCgroupPathPrefixWhiteList)
err := raw.Register(m, m.fsInfo, m.includedMetrics, m.rawContainerCgroupPathPrefixWhiteList)
if err != nil {
klog.Errorf("Registration of the raw container factory failed: %v", err)
}
@ -263,10 +263,10 @@ func (self *manager) Start() error {
if err != nil {
return err
}
self.containerWatchers = append(self.containerWatchers, rawWatcher)
m.containerWatchers = append(m.containerWatchers, rawWatcher)
// Watch for OOMs.
err = self.watchForNewOoms()
err = m.watchForNewOoms()
if err != nil {
klog.Warningf("Could not configure a source for OOM detection, disabling OOM events: %v", err)
}
@ -277,12 +277,12 @@ func (self *manager) Start() error {
}
// Create root and then recover all containers.
err = self.createContainer("/", watcher.Raw)
err = m.createContainer("/", watcher.Raw)
if err != nil {
return err
}
klog.V(2).Infof("Starting recovery of all containers")
err = self.detectSubcontainers("/")
err = m.detectSubcontainers("/")
if err != nil {
return err
}
@ -290,63 +290,63 @@ func (self *manager) Start() error {
// Watch for new container.
quitWatcher := make(chan error)
err = self.watchForNewContainers(quitWatcher)
err = m.watchForNewContainers(quitWatcher)
if err != nil {
return err
}
self.quitChannels = append(self.quitChannels, quitWatcher)
m.quitChannels = append(m.quitChannels, quitWatcher)
// Look for new containers in the main housekeeping thread.
quitGlobalHousekeeping := make(chan error)
self.quitChannels = append(self.quitChannels, quitGlobalHousekeeping)
go self.globalHousekeeping(quitGlobalHousekeeping)
m.quitChannels = append(m.quitChannels, quitGlobalHousekeeping)
go m.globalHousekeeping(quitGlobalHousekeeping)
quitUpdateMachineInfo := make(chan error)
self.quitChannels = append(self.quitChannels, quitUpdateMachineInfo)
go self.updateMachineInfo(quitUpdateMachineInfo)
m.quitChannels = append(m.quitChannels, quitUpdateMachineInfo)
go m.updateMachineInfo(quitUpdateMachineInfo)
return nil
}
func (self *manager) Stop() error {
defer self.nvidiaManager.Destroy()
defer self.destroyPerfCollectors()
func (m *manager) Stop() error {
defer m.nvidiaManager.Destroy()
defer m.destroyPerfCollectors()
// Stop and wait on all quit channels.
for i, c := range self.quitChannels {
for i, c := range m.quitChannels {
// Send the exit signal and wait on the thread to exit (by closing the channel).
c <- nil
err := <-c
if err != nil {
// Remove the channels that quit successfully.
self.quitChannels = self.quitChannels[i:]
m.quitChannels = m.quitChannels[i:]
return err
}
}
self.quitChannels = make([]chan error, 0, 2)
m.quitChannels = make([]chan error, 0, 2)
nvm.Finalize()
perf.Finalize()
return nil
}
func (self *manager) destroyPerfCollectors() {
for _, container := range self.containers {
func (m *manager) destroyPerfCollectors() {
for _, container := range m.containers {
container.perfCollector.Destroy()
}
}
func (self *manager) updateMachineInfo(quit chan error) {
func (m *manager) updateMachineInfo(quit chan error) {
ticker := time.NewTicker(*updateMachineInfoInterval)
for {
select {
case <-ticker.C:
info, err := machine.Info(self.sysFs, self.fsInfo, self.inHostNamespace)
info, err := machine.Info(m.sysFs, m.fsInfo, m.inHostNamespace)
if err != nil {
klog.Errorf("Could not get machine info: %v", err)
break
}
self.machineMu.Lock()
self.machineInfo = *info
self.machineMu.Unlock()
m.machineMu.Lock()
m.machineInfo = *info
m.machineMu.Unlock()
klog.V(5).Infof("Update machine info: %+v", *info)
case <-quit:
ticker.Stop()
@ -356,7 +356,7 @@ func (self *manager) updateMachineInfo(quit chan error) {
}
}
func (self *manager) globalHousekeeping(quit chan error) {
func (m *manager) globalHousekeeping(quit chan error) {
// Long housekeeping is either 100ms or half of the housekeeping interval.
longHousekeeping := 100 * time.Millisecond
if *globalHousekeepingInterval/2 < longHousekeeping {
@ -370,7 +370,7 @@ func (self *manager) globalHousekeeping(quit chan error) {
start := time.Now()
// Check for new containers.
err := self.detectSubcontainers("/")
err := m.detectSubcontainers("/")
if err != nil {
klog.Errorf("Failed to detect containers: %s", err)
}
@ -389,15 +389,15 @@ func (self *manager) globalHousekeeping(quit chan error) {
}
}
func (self *manager) getContainerData(containerName string) (*containerData, error) {
func (m *manager) getContainerData(containerName string) (*containerData, error) {
var cont *containerData
var ok bool
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
m.containersLock.RLock()
defer m.containersLock.RUnlock()
// Ensure we have the container.
cont, ok = self.containers[namespacedContainerName{
cont, ok = m.containers[namespacedContainerName{
Name: containerName,
}]
}()
@ -407,8 +407,8 @@ func (self *manager) getContainerData(containerName string) (*containerData, err
return cont, nil
}
func (self *manager) GetDerivedStats(containerName string, options v2.RequestOptions) (map[string]v2.DerivedStats, error) {
conts, err := self.getRequestedContainers(containerName, options)
func (m *manager) GetDerivedStats(containerName string, options v2.RequestOptions) (map[string]v2.DerivedStats, error) {
conts, err := m.getRequestedContainers(containerName, options)
if err != nil {
return nil, err
}
@ -424,8 +424,8 @@ func (self *manager) GetDerivedStats(containerName string, options v2.RequestOpt
return stats, errs.OrNil()
}
func (self *manager) GetContainerSpec(containerName string, options v2.RequestOptions) (map[string]v2.ContainerSpec, error) {
conts, err := self.getRequestedContainers(containerName, options)
func (m *manager) GetContainerSpec(containerName string, options v2.RequestOptions) (map[string]v2.ContainerSpec, error) {
conts, err := m.getRequestedContainers(containerName, options)
if err != nil {
return nil, err
}
@ -436,43 +436,43 @@ func (self *manager) GetContainerSpec(containerName string, options v2.RequestOp
if err != nil {
errs.append(name, "GetInfo", err)
}
spec := self.getV2Spec(cinfo)
spec := m.getV2Spec(cinfo)
specs[name] = spec
}
return specs, errs.OrNil()
}
// Get V2 container spec from v1 container info.
func (self *manager) getV2Spec(cinfo *containerInfo) v2.ContainerSpec {
spec := self.getAdjustedSpec(cinfo)
func (m *manager) getV2Spec(cinfo *containerInfo) v2.ContainerSpec {
spec := m.getAdjustedSpec(cinfo)
return v2.ContainerSpecFromV1(&spec, cinfo.Aliases, cinfo.Namespace)
}
func (self *manager) getAdjustedSpec(cinfo *containerInfo) info.ContainerSpec {
func (m *manager) getAdjustedSpec(cinfo *containerInfo) info.ContainerSpec {
spec := cinfo.Spec
// Set default value to an actual value
if spec.HasMemory {
// Memory.Limit is 0 means there's no limit
if spec.Memory.Limit == 0 {
self.machineMu.RLock()
spec.Memory.Limit = uint64(self.machineInfo.MemoryCapacity)
self.machineMu.RUnlock()
m.machineMu.RLock()
spec.Memory.Limit = uint64(m.machineInfo.MemoryCapacity)
m.machineMu.RUnlock()
}
}
return spec
}
func (self *manager) GetContainerInfo(containerName string, query *info.ContainerInfoRequest) (*info.ContainerInfo, error) {
cont, err := self.getContainerData(containerName)
func (m *manager) GetContainerInfo(containerName string, query *info.ContainerInfoRequest) (*info.ContainerInfo, error) {
cont, err := m.getContainerData(containerName)
if err != nil {
return nil, err
}
return self.containerDataToContainerInfo(cont, query)
return m.containerDataToContainerInfo(cont, query)
}
func (self *manager) GetContainerInfoV2(containerName string, options v2.RequestOptions) (map[string]v2.ContainerInfo, error) {
containers, err := self.getRequestedContainers(containerName, options)
func (m *manager) GetContainerInfoV2(containerName string, options v2.RequestOptions) (map[string]v2.ContainerInfo, error) {
containers, err := m.getRequestedContainers(containerName, options)
if err != nil {
return nil, err
}
@ -489,9 +489,9 @@ func (self *manager) GetContainerInfoV2(containerName string, options v2.Request
infos[name] = result
continue
}
result.Spec = self.getV2Spec(cinfo)
result.Spec = m.getV2Spec(cinfo)
stats, err := self.memoryCache.RecentStats(name, nilTime, nilTime, options.Count)
stats, err := m.memoryCache.RecentStats(name, nilTime, nilTime, options.Count)
if err != nil {
errs.append(name, "RecentStats", err)
infos[name] = result
@ -505,14 +505,14 @@ func (self *manager) GetContainerInfoV2(containerName string, options v2.Request
return infos, errs.OrNil()
}
func (self *manager) containerDataToContainerInfo(cont *containerData, query *info.ContainerInfoRequest) (*info.ContainerInfo, error) {
func (m *manager) containerDataToContainerInfo(cont *containerData, query *info.ContainerInfoRequest) (*info.ContainerInfo, error) {
// Get the info from the container.
cinfo, err := cont.GetInfo(true)
if err != nil {
return nil, err
}
stats, err := self.memoryCache.RecentStats(cinfo.Name, query.Start, query.End, query.NumStats)
stats, err := m.memoryCache.RecentStats(cinfo.Name, query.Start, query.End, query.NumStats)
if err != nil {
return nil, err
}
@ -521,55 +521,55 @@ func (self *manager) containerDataToContainerInfo(cont *containerData, query *in
ret := &info.ContainerInfo{
ContainerReference: cinfo.ContainerReference,
Subcontainers: cinfo.Subcontainers,
Spec: self.getAdjustedSpec(cinfo),
Spec: m.getAdjustedSpec(cinfo),
Stats: stats,
}
return ret, nil
}
func (self *manager) getContainer(containerName string) (*containerData, error) {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
cont, ok := self.containers[namespacedContainerName{Name: containerName}]
func (m *manager) getContainer(containerName string) (*containerData, error) {
m.containersLock.RLock()
defer m.containersLock.RUnlock()
cont, ok := m.containers[namespacedContainerName{Name: containerName}]
if !ok {
return nil, fmt.Errorf("unknown container %q", containerName)
}
return cont, nil
}
func (self *manager) getSubcontainers(containerName string) map[string]*containerData {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
containersMap := make(map[string]*containerData, len(self.containers))
func (m *manager) getSubcontainers(containerName string) map[string]*containerData {
m.containersLock.RLock()
defer m.containersLock.RUnlock()
containersMap := make(map[string]*containerData, len(m.containers))
// Get all the unique subcontainers of the specified container
matchedName := path.Join(containerName, "/")
for i := range self.containers {
name := self.containers[i].info.Name
for i := range m.containers {
name := m.containers[i].info.Name
if name == containerName || strings.HasPrefix(name, matchedName) {
containersMap[self.containers[i].info.Name] = self.containers[i]
containersMap[m.containers[i].info.Name] = m.containers[i]
}
}
return containersMap
}
func (self *manager) SubcontainersInfo(containerName string, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) {
containersMap := self.getSubcontainers(containerName)
func (m *manager) SubcontainersInfo(containerName string, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) {
containersMap := m.getSubcontainers(containerName)
containers := make([]*containerData, 0, len(containersMap))
for _, cont := range containersMap {
containers = append(containers, cont)
}
return self.containerDataSliceToContainerInfoSlice(containers, query)
return m.containerDataSliceToContainerInfoSlice(containers, query)
}
func (self *manager) getAllDockerContainers() map[string]*containerData {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
containers := make(map[string]*containerData, len(self.containers))
func (m *manager) getAllDockerContainers() map[string]*containerData {
m.containersLock.RLock()
defer m.containersLock.RUnlock()
containers := make(map[string]*containerData, len(m.containers))
// Get containers in the Docker namespace.
for name, cont := range self.containers {
for name, cont := range m.containers {
if name.Namespace == docker.DockerNamespace {
containers[cont.info.Name] = cont
}
@ -577,12 +577,12 @@ func (self *manager) getAllDockerContainers() map[string]*containerData {
return containers
}
func (self *manager) AllDockerContainers(query *info.ContainerInfoRequest) (map[string]info.ContainerInfo, error) {
containers := self.getAllDockerContainers()
func (m *manager) AllDockerContainers(query *info.ContainerInfoRequest) (map[string]info.ContainerInfo, error) {
containers := m.getAllDockerContainers()
output := make(map[string]info.ContainerInfo, len(containers))
for name, cont := range containers {
inf, err := self.containerDataToContainerInfo(cont, query)
inf, err := m.containerDataToContainerInfo(cont, query)
if err != nil {
// Ignore the error because of race condition and return best-effort result.
if err == memory.ErrDataNotFound {
@ -596,19 +596,19 @@ func (self *manager) AllDockerContainers(query *info.ContainerInfoRequest) (map[
return output, nil
}
func (self *manager) getDockerContainer(containerName string) (*containerData, error) {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
func (m *manager) getDockerContainer(containerName string) (*containerData, error) {
m.containersLock.RLock()
defer m.containersLock.RUnlock()
// Check for the container in the Docker container namespace.
cont, ok := self.containers[namespacedContainerName{
cont, ok := m.containers[namespacedContainerName{
Namespace: docker.DockerNamespace,
Name: containerName,
}]
// Look for container by short prefix name if no exact match found.
if !ok {
for contName, c := range self.containers {
for contName, c := range m.containers {
if contName.Namespace == docker.DockerNamespace && strings.HasPrefix(contName.Name, containerName) {
if cont == nil {
cont = c
@ -626,20 +626,20 @@ func (self *manager) getDockerContainer(containerName string) (*containerData, e
return cont, nil
}
func (self *manager) DockerContainer(containerName string, query *info.ContainerInfoRequest) (info.ContainerInfo, error) {
container, err := self.getDockerContainer(containerName)
func (m *manager) DockerContainer(containerName string, query *info.ContainerInfoRequest) (info.ContainerInfo, error) {
container, err := m.getDockerContainer(containerName)
if err != nil {
return info.ContainerInfo{}, err
}
inf, err := self.containerDataToContainerInfo(container, query)
inf, err := m.containerDataToContainerInfo(container, query)
if err != nil {
return info.ContainerInfo{}, err
}
return *inf, nil
}
func (self *manager) containerDataSliceToContainerInfoSlice(containers []*containerData, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) {
func (m *manager) containerDataSliceToContainerInfoSlice(containers []*containerData, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) {
if len(containers) == 0 {
return nil, fmt.Errorf("no containers found")
}
@ -647,7 +647,7 @@ func (self *manager) containerDataSliceToContainerInfoSlice(containers []*contai
// Get the info for each container.
output := make([]*info.ContainerInfo, 0, len(containers))
for i := range containers {
cinfo, err := self.containerDataToContainerInfo(containers[i], query)
cinfo, err := m.containerDataToContainerInfo(containers[i], query)
if err != nil {
// Skip containers with errors, we try to degrade gracefully.
continue
@ -658,8 +658,8 @@ func (self *manager) containerDataSliceToContainerInfoSlice(containers []*contai
return output, nil
}
func (self *manager) GetRequestedContainersInfo(containerName string, options v2.RequestOptions) (map[string]*info.ContainerInfo, error) {
containers, err := self.getRequestedContainers(containerName, options)
func (m *manager) GetRequestedContainersInfo(containerName string, options v2.RequestOptions) (map[string]*info.ContainerInfo, error) {
containers, err := m.getRequestedContainers(containerName, options)
if err != nil {
return nil, err
}
@ -669,7 +669,7 @@ func (self *manager) GetRequestedContainersInfo(containerName string, options v2
NumStats: options.Count,
}
for name, data := range containers {
info, err := self.containerDataToContainerInfo(data, &query)
info, err := m.containerDataToContainerInfo(data, &query)
if err != nil {
errs.append(name, "containerDataToContainerInfo", err)
}
@ -678,18 +678,18 @@ func (self *manager) GetRequestedContainersInfo(containerName string, options v2
return containersMap, errs.OrNil()
}
func (self *manager) getRequestedContainers(containerName string, options v2.RequestOptions) (map[string]*containerData, error) {
func (m *manager) getRequestedContainers(containerName string, options v2.RequestOptions) (map[string]*containerData, error) {
containersMap := make(map[string]*containerData)
switch options.IdType {
case v2.TypeName:
if !options.Recursive {
cont, err := self.getContainer(containerName)
cont, err := m.getContainer(containerName)
if err != nil {
return containersMap, err
}
containersMap[cont.info.Name] = cont
} else {
containersMap = self.getSubcontainers(containerName)
containersMap = m.getSubcontainers(containerName)
if len(containersMap) == 0 {
return containersMap, fmt.Errorf("unknown container: %q", containerName)
}
@ -697,7 +697,7 @@ func (self *manager) getRequestedContainers(containerName string, options v2.Req
case v2.TypeDocker:
if !options.Recursive {
containerName = strings.TrimPrefix(containerName, "/")
cont, err := self.getDockerContainer(containerName)
cont, err := m.getDockerContainer(containerName)
if err != nil {
return containersMap, err
}
@ -706,7 +706,7 @@ func (self *manager) getRequestedContainers(containerName string, options v2.Req
if containerName != "/" {
return containersMap, fmt.Errorf("invalid request for docker container %q with subcontainers", containerName)
}
containersMap = self.getAllDockerContainers()
containersMap = m.getAllDockerContainers()
}
default:
return containersMap, fmt.Errorf("invalid request type %q", options.IdType)
@ -726,32 +726,32 @@ func (self *manager) getRequestedContainers(containerName string, options v2.Req
return containersMap, nil
}
func (self *manager) GetDirFsInfo(dir string) (v2.FsInfo, error) {
device, err := self.fsInfo.GetDirFsDevice(dir)
func (m *manager) GetDirFsInfo(dir string) (v2.FsInfo, error) {
device, err := m.fsInfo.GetDirFsDevice(dir)
if err != nil {
return v2.FsInfo{}, fmt.Errorf("failed to get device for dir %q: %v", dir, err)
}
return self.getFsInfoByDeviceName(device.Device)
return m.getFsInfoByDeviceName(device.Device)
}
func (self *manager) GetFsInfoByFsUUID(uuid string) (v2.FsInfo, error) {
device, err := self.fsInfo.GetDeviceInfoByFsUUID(uuid)
func (m *manager) GetFsInfoByFsUUID(uuid string) (v2.FsInfo, error) {
device, err := m.fsInfo.GetDeviceInfoByFsUUID(uuid)
if err != nil {
return v2.FsInfo{}, err
}
return self.getFsInfoByDeviceName(device.Device)
return m.getFsInfoByDeviceName(device.Device)
}
func (self *manager) GetFsInfo(label string) ([]v2.FsInfo, error) {
func (m *manager) GetFsInfo(label string) ([]v2.FsInfo, error) {
var empty time.Time
// Get latest data from filesystems hanging off root container.
stats, err := self.memoryCache.RecentStats("/", empty, empty, 1)
stats, err := m.memoryCache.RecentStats("/", empty, empty, 1)
if err != nil {
return nil, err
}
dev := ""
if len(label) != 0 {
dev, err = self.fsInfo.GetDeviceForLabel(label)
dev, err = m.fsInfo.GetDeviceForLabel(label)
if err != nil {
return nil, err
}
@ -762,11 +762,11 @@ func (self *manager) GetFsInfo(label string) ([]v2.FsInfo, error) {
if len(label) != 0 && fs.Device != dev {
continue
}
mountpoint, err := self.fsInfo.GetMountpointForDevice(fs.Device)
mountpoint, err := m.fsInfo.GetMountpointForDevice(fs.Device)
if err != nil {
return nil, err
}
labels, err := self.fsInfo.GetLabelsForDevice(fs.Device)
labels, err := m.fsInfo.GetLabelsForDevice(fs.Device)
if err != nil {
return nil, err
}
@ -847,7 +847,7 @@ func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *c
klog.V(4).Infof("Got config from %q: %q", v, configFile)
if strings.HasPrefix(k, "prometheus") || strings.HasPrefix(k, "Prometheus") {
newCollector, err := collector.NewPrometheusCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHttpClient)
newCollector, err := collector.NewPrometheusCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHTTPClient)
if err != nil {
return fmt.Errorf("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err)
}
@ -856,7 +856,7 @@ func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *c
return fmt.Errorf("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err)
}
} else {
newCollector, err := collector.NewCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHttpClient)
newCollector, err := collector.NewCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHTTPClient)
if err != nil {
return fmt.Errorf("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err)
}
@ -1097,16 +1097,16 @@ func (m *manager) detectSubcontainers(containerName string) error {
}
// Watches for new containers started in the system. Runs forever unless there is a setup error.
func (self *manager) watchForNewContainers(quit chan error) error {
for _, watcher := range self.containerWatchers {
err := watcher.Start(self.eventsChannel)
func (m *manager) watchForNewContainers(quit chan error) error {
for _, watcher := range m.containerWatchers {
err := watcher.Start(m.eventsChannel)
if err != nil {
return err
}
}
// There is a race between starting the watch and new container creation so we do a detection before we read new containers.
err := self.detectSubcontainers("/")
err := m.detectSubcontainers("/")
if err != nil {
return err
}
@ -1115,15 +1115,15 @@ func (self *manager) watchForNewContainers(quit chan error) error {
go func() {
for {
select {
case event := <-self.eventsChannel:
case event := <-m.eventsChannel:
switch {
case event.EventType == watcher.ContainerAdd:
switch event.WatchSource {
default:
err = self.createContainer(event.Name, event.WatchSource)
err = m.createContainer(event.Name, event.WatchSource)
}
case event.EventType == watcher.ContainerDelete:
err = self.destroyContainer(event.Name)
err = m.destroyContainer(event.Name)
}
if err != nil {
klog.Warningf("Failed to process watch event %+v: %v", event, err)
@ -1132,7 +1132,7 @@ func (self *manager) watchForNewContainers(quit chan error) error {
var errs partialFailure
// Stop processing events if asked to quit.
for i, watcher := range self.containerWatchers {
for i, watcher := range m.containerWatchers {
err := watcher.Stop()
if err != nil {
errs.append(fmt.Sprintf("watcher %d", i), "Stop", err)
@ -1152,7 +1152,7 @@ func (self *manager) watchForNewContainers(quit chan error) error {
return nil
}
func (self *manager) watchForNewOoms() error {
func (m *manager) watchForNewOoms() error {
klog.V(2).Infof("Started watching for new ooms in manager")
outStream := make(chan *oomparser.OomInstance, 10)
oomLog, err := oomparser.New()
@ -1169,7 +1169,7 @@ func (self *manager) watchForNewOoms() error {
Timestamp: oomInstance.TimeOfDeath,
EventType: info.EventOom,
}
err := self.eventHandler.AddEvent(newEvent)
err := m.eventHandler.AddEvent(newEvent)
if err != nil {
klog.Errorf("failed to add OOM event for %q: %v", oomInstance.ContainerName, err)
}
@ -1186,7 +1186,7 @@ func (self *manager) watchForNewOoms() error {
},
},
}
err = self.eventHandler.AddEvent(newEvent)
err = m.eventHandler.AddEvent(newEvent)
if err != nil {
klog.Errorf("failed to add OOM kill event for %q: %v", oomInstance.ContainerName, err)
}
@ -1196,18 +1196,18 @@ func (self *manager) watchForNewOoms() error {
}
// can be called by the api which will take events returned on the channel
func (self *manager) WatchForEvents(request *events.Request) (*events.EventChannel, error) {
return self.eventHandler.WatchEvents(request)
func (m *manager) WatchForEvents(request *events.Request) (*events.EventChannel, error) {
return m.eventHandler.WatchEvents(request)
}
// can be called by the api which will return all events satisfying the request
func (self *manager) GetPastEvents(request *events.Request) ([]*info.Event, error) {
return self.eventHandler.GetEvents(request)
func (m *manager) GetPastEvents(request *events.Request) ([]*info.Event, error) {
return m.eventHandler.GetEvents(request)
}
// called by the api when a client is no longer listening to the channel
func (self *manager) CloseEventChannel(watch_id int) {
self.eventHandler.StopWatch(watch_id)
func (m *manager) CloseEventChannel(watchID int) {
m.eventHandler.StopWatch(watchID)
}
// Parses the events StoragePolicy from the flags.
@ -1300,12 +1300,12 @@ func (m *manager) DebugInfo() map[string][]string {
return debugInfo
}
func (self *manager) getFsInfoByDeviceName(deviceName string) (v2.FsInfo, error) {
mountPoint, err := self.fsInfo.GetMountpointForDevice(deviceName)
func (m *manager) getFsInfoByDeviceName(deviceName string) (v2.FsInfo, error) {
mountPoint, err := m.fsInfo.GetMountpointForDevice(deviceName)
if err != nil {
return v2.FsInfo{}, fmt.Errorf("failed to get mount point for device %q: %v", deviceName, err)
}
infos, err := self.GetFsInfo("")
infos, err := m.GetFsInfo("")
if err != nil {
return v2.FsInfo{}, err
}
@ -1319,22 +1319,22 @@ func (self *manager) getFsInfoByDeviceName(deviceName string) (v2.FsInfo, error)
func getVersionInfo() (*info.VersionInfo, error) {
kernel_version := machine.KernelVersion()
container_os := machine.ContainerOsVersion()
docker_version, err := docker.VersionString()
kernelVersion := machine.KernelVersion()
osVersion := machine.ContainerOsVersion()
dockerVersion, err := docker.VersionString()
if err != nil {
return nil, err
}
docker_api_version, err := docker.APIVersionString()
dockerAPIVersion, err := docker.APIVersionString()
if err != nil {
return nil, err
}
return &info.VersionInfo{
KernelVersion: kernel_version,
ContainerOsVersion: container_os,
DockerVersion: docker_version,
DockerAPIVersion: docker_api_version,
KernelVersion: kernelVersion,
ContainerOsVersion: osVersion,
DockerVersion: dockerVersion,
DockerAPIVersion: dockerAPIVersion,
CadvisorVersion: version.Info["version"],
CadvisorRevision: version.Info["revision"],
}, nil

View File

@ -1,3 +1,5 @@
// +build libpfm,cgo
// Copyright 2020 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");

View File

@ -30,26 +30,26 @@ const secondsToNanoSeconds = secondsToMilliSeconds * milliSecondsToNanoSeconds
type Uint64Slice []uint64
func (a Uint64Slice) Len() int { return len(a) }
func (a Uint64Slice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a Uint64Slice) Less(i, j int) bool { return a[i] < a[j] }
func (s Uint64Slice) Len() int { return len(s) }
func (s Uint64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s Uint64Slice) Less(i, j int) bool { return s[i] < s[j] }
// Get percentile of the provided samples. Round to integer.
func (self Uint64Slice) GetPercentile(d float64) uint64 {
func (s Uint64Slice) GetPercentile(d float64) uint64 {
if d < 0.0 || d > 1.0 {
return 0
}
count := self.Len()
count := s.Len()
if count == 0 {
return 0
}
sort.Sort(self)
sort.Sort(s)
n := float64(d * (float64(count) + 1))
idx, frac := math.Modf(n)
index := int(idx)
percentile := float64(self[index-1])
percentile := float64(s[index-1])
if index > 1 && index < count {
percentile += frac * float64(self[index]-self[index-1])
percentile += frac * float64(s[index]-s[index-1])
}
return uint64(percentile)
}
@ -61,15 +61,15 @@ type mean struct {
Mean float64
}
func (self *mean) Add(value uint64) {
self.count++
if self.count == 1 {
self.Mean = float64(value)
func (m *mean) Add(value uint64) {
m.count++
if m.count == 1 {
m.Mean = float64(value)
return
}
c := float64(self.count)
c := float64(m.count)
v := float64(value)
self.Mean = (self.Mean*(c-1) + v) / c
m.Mean = (m.Mean*(c-1) + v) / c
}
type resource struct {
@ -82,20 +82,20 @@ type resource struct {
}
// Adds a new percentile sample.
func (self *resource) Add(p info.Percentiles) {
func (r *resource) Add(p info.Percentiles) {
if !p.Present {
return
}
if p.Max > self.max {
self.max = p.Max
if p.Max > r.max {
r.max = p.Max
}
self.mean.Add(p.Mean)
r.mean.Add(p.Mean)
// Selecting 90p of 90p :(
self.samples = append(self.samples, p.Ninety)
r.samples = append(r.samples, p.Ninety)
}
// Add a single sample. Internally, we convert it to a fake percentile sample.
func (self *resource) AddSample(val uint64) {
func (r *resource) AddSample(val uint64) {
sample := info.Percentiles{
Present: true,
Mean: val,
@ -104,17 +104,17 @@ func (self *resource) AddSample(val uint64) {
Ninety: val,
NinetyFive: val,
}
self.Add(sample)
r.Add(sample)
}
// Get max, average, and 90p from existing samples.
func (self *resource) GetAllPercentiles() info.Percentiles {
func (r *resource) GetAllPercentiles() info.Percentiles {
p := info.Percentiles{}
p.Mean = uint64(self.mean.Mean)
p.Max = self.max
p.Fifty = self.samples.GetPercentile(0.5)
p.Ninety = self.samples.GetPercentile(0.9)
p.NinetyFive = self.samples.GetPercentile(0.95)
p.Mean = uint64(r.mean.Mean)
p.Max = r.max
p.Fifty = r.samples.GetPercentile(0.5)
p.Ninety = r.samples.GetPercentile(0.9)
p.NinetyFive = r.samples.GetPercentile(0.95)
p.Present = true
return p
}
@ -155,7 +155,7 @@ func getPercentComplete(stats []*secondSample) (percent int32) {
}
// Calculate cpurate from two consecutive total cpu usage samples.
func getCpuRate(latest, previous secondSample) (uint64, error) {
func getCPURate(latest, previous secondSample) (uint64, error) {
elapsed := latest.Timestamp.Sub(previous.Timestamp).Nanoseconds()
if elapsed < 10*milliSecondsToNanoSeconds {
return 0, fmt.Errorf("elapsed time too small: %d ns: time now %s last %s", elapsed, latest.Timestamp.String(), previous.Timestamp.String())
@ -175,7 +175,7 @@ func GetMinutePercentiles(stats []*secondSample) info.Usage {
memory := NewResource(len(stats))
for _, stat := range stats {
if !lastSample.Timestamp.IsZero() {
cpuRate, err := getCpuRate(*stat, lastSample)
cpuRate, err := getCPURate(*stat, lastSample)
if err != nil {
continue
}

View File

@ -103,7 +103,7 @@ func (s *StatsSummary) updateLatestUsage() {
usage.Memory = latest.Memory
if numStats > 1 {
previous := s.secondSamples[numStats-2]
cpu, err := getCpuRate(*latest, *previous)
cpu, err := getCPURate(*latest, *previous)
if err == nil {
usage.Cpu = cpu
}
@ -151,7 +151,7 @@ func (s *StatsSummary) getDerivedUsage(n int) (info.Usage, error) {
samples := s.minuteSamples.RecentStats(n)
numSamples := len(samples)
if numSamples < 1 {
return info.Usage{}, fmt.Errorf("failed to retrieve any minute stats.")
return info.Usage{}, fmt.Errorf("failed to retrieve any minute stats")
}
// We generate derived stats even with partial data.
usage := GetDerivedPercentiles(samples)
@ -177,7 +177,7 @@ func New(spec v1.ContainerSpec) (*StatsSummary, error) {
summary.available.Memory = true
}
if !summary.available.Cpu && !summary.available.Memory {
return nil, fmt.Errorf("none of the resources are being tracked.")
return nil, fmt.Errorf("none of the resources are being tracked")
}
summary.minuteSamples = NewSamplesBuffer(60 /* one hour */)
return &summary, nil

View File

@ -75,14 +75,14 @@ func NewRealCloudInfo() CloudInfo {
}
}
func (self *realCloudInfo) GetCloudProvider() info.CloudProvider {
return self.cloudProvider
func (i *realCloudInfo) GetCloudProvider() info.CloudProvider {
return i.cloudProvider
}
func (self *realCloudInfo) GetInstanceType() info.InstanceType {
return self.instanceType
func (i *realCloudInfo) GetInstanceType() info.InstanceType {
return i.instanceType
}
func (self *realCloudInfo) GetInstanceID() info.InstanceID {
return self.instanceID
func (i *realCloudInfo) GetInstanceID() info.InstanceID {
return i.instanceID
}

View File

@ -55,26 +55,26 @@ func newConnection() (*Connection, error) {
return conn, err
}
func (self *Connection) Read(b []byte) (n int, err error) {
n, _, err = syscall.Recvfrom(self.fd, b, 0)
func (c *Connection) Read(b []byte) (n int, err error) {
n, _, err = syscall.Recvfrom(c.fd, b, 0)
return n, err
}
func (self *Connection) Write(b []byte) (n int, err error) {
err = syscall.Sendto(self.fd, b, 0, &self.addr)
func (c *Connection) Write(b []byte) (n int, err error) {
err = syscall.Sendto(c.fd, b, 0, &c.addr)
return len(b), err
}
func (self *Connection) Close() error {
return syscall.Close(self.fd)
func (c *Connection) Close() error {
return syscall.Close(c.fd)
}
func (self *Connection) WriteMessage(msg syscall.NetlinkMessage) error {
func (c *Connection) WriteMessage(msg syscall.NetlinkMessage) error {
w := bytes.NewBuffer(nil)
msg.Header.Len = uint32(syscall.NLMSG_HDRLEN + len(msg.Data))
msg.Header.Seq = self.seq
self.seq++
msg.Header.Pid = self.pid
msg.Header.Seq = c.seq
c.seq++
msg.Header.Pid = c.pid
err := binary.Write(w, binary.LittleEndian, msg.Header)
if err != nil {
return err
@ -83,16 +83,16 @@ func (self *Connection) WriteMessage(msg syscall.NetlinkMessage) error {
if err != nil {
return err
}
_, err = self.Write(w.Bytes())
_, err = c.Write(w.Bytes())
return err
}
func (self *Connection) ReadMessage() (msg syscall.NetlinkMessage, err error) {
err = binary.Read(self.rbuf, binary.LittleEndian, &msg.Header)
func (c *Connection) ReadMessage() (msg syscall.NetlinkMessage, err error) {
err = binary.Read(c.rbuf, binary.LittleEndian, &msg.Header)
if err != nil {
return msg, err
}
msg.Data = make([]byte, msg.Header.Len-syscall.NLMSG_HDRLEN)
_, err = self.rbuf.Read(msg.Data)
_, err = c.rbuf.Read(msg.Data)
return msg, err
}

View File

@ -27,6 +27,7 @@ import (
var (
// TODO(rjnagal): Verify and fix for other architectures.
Endian = binary.LittleEndian
)
@ -42,11 +43,11 @@ type netlinkMessage struct {
Data []byte
}
func (self netlinkMessage) toRawMsg() (rawmsg syscall.NetlinkMessage) {
rawmsg.Header = self.Header
func (m netlinkMessage) toRawMsg() (rawmsg syscall.NetlinkMessage) {
rawmsg.Header = m.Header
w := bytes.NewBuffer([]byte{})
binary.Write(w, Endian, self.GenHeader)
w.Write(self.Data)
binary.Write(w, Endian, m.GenHeader)
w.Write(m.Data)
rawmsg.Data = w.Bytes()
return rawmsg
}
@ -64,7 +65,7 @@ func padding(size int, alignment int) int {
}
// Get family id for taskstats subsystem.
func getFamilyId(conn *Connection) (uint16, error) {
func getFamilyID(conn *Connection) (uint16, error) {
msg := prepareFamilyMessage()
err := conn.WriteMessage(msg.toRawMsg())
if err != nil {
@ -167,7 +168,7 @@ func parseFamilyResp(msg syscall.NetlinkMessage) (uint16, error) {
return 0, err
}
}
return 0, fmt.Errorf("family id not found in the response.")
return 0, fmt.Errorf("family id not found in the response")
}
// Extract task stats from response returned by kernel.

View File

@ -24,7 +24,7 @@ import (
)
type NetlinkReader struct {
familyId uint16
familyID uint16
conn *Connection
}
@ -34,24 +34,24 @@ func New() (*NetlinkReader, error) {
return nil, fmt.Errorf("failed to create a new connection: %s", err)
}
id, err := getFamilyId(conn)
id, err := getFamilyID(conn)
if err != nil {
return nil, fmt.Errorf("failed to get netlink family id for task stats: %s", err)
}
klog.V(4).Infof("Family id for taskstats: %d", id)
return &NetlinkReader{
familyId: id,
familyID: id,
conn: conn,
}, nil
}
func (self *NetlinkReader) Stop() {
if self.conn != nil {
self.conn.Close()
func (r *NetlinkReader) Stop() {
if r.conn != nil {
r.conn.Close()
}
}
func (self *NetlinkReader) Start() error {
func (r *NetlinkReader) Start() error {
// We do the start setup for netlink in New(). Nothing to do here.
return nil
}
@ -60,9 +60,9 @@ func (self *NetlinkReader) Start() error {
// Caller can use historical data to calculate cpu load.
// path is an absolute filesystem path for a container under the CPU cgroup hierarchy.
// NOTE: non-hierarchical load is returned. It does not include load for subcontainers.
func (self *NetlinkReader) GetCpuLoad(name string, path string) (info.LoadStats, error) {
func (r *NetlinkReader) GetCpuLoad(name string, path string) (info.LoadStats, error) {
if len(path) == 0 {
return info.LoadStats{}, fmt.Errorf("cgroup path can not be empty!")
return info.LoadStats{}, fmt.Errorf("cgroup path can not be empty")
}
cfd, err := os.Open(path)
@ -71,7 +71,7 @@ func (self *NetlinkReader) GetCpuLoad(name string, path string) (info.LoadStats,
}
defer cfd.Close()
stats, err := getLoadStats(self.familyId, cfd, self.conn)
stats, err := getLoadStats(r.familyID, cfd, r.conn)
if err != nil {
return info.LoadStats{}, err
}

View File

@ -114,13 +114,13 @@ func checkIfStartOfOomMessages(line string) bool {
// StreamOoms writes to a provided a stream of OomInstance objects representing
// OOM events that are found in the logs.
// It will block and should be called from a goroutine.
func (self *OomParser) StreamOoms(outStream chan<- *OomInstance) {
kmsgEntries := self.parser.Parse()
defer self.parser.Close()
func (p *OomParser) StreamOoms(outStream chan<- *OomInstance) {
kmsgEntries := p.parser.Parse()
defer p.parser.Close()
for msg := range kmsgEntries {
in_oom_kernel_log := checkIfStartOfOomMessages(msg.Message)
if in_oom_kernel_log {
isOomMessage := checkIfStartOfOomMessages(msg.Message)
if isOomMessage {
oomCurrentInstance := &OomInstance{
ContainerName: "/",
VictimContainerName: "/",

View File

@ -27,27 +27,27 @@ type FileInfo struct {
EntryName string
}
func (self *FileInfo) Name() string {
return self.EntryName
func (i *FileInfo) Name() string {
return i.EntryName
}
func (self *FileInfo) Size() int64 {
func (i *FileInfo) Size() int64 {
return 1234567
}
func (self *FileInfo) Mode() os.FileMode {
func (i *FileInfo) Mode() os.FileMode {
return 0
}
func (self *FileInfo) ModTime() time.Time {
func (i *FileInfo) ModTime() time.Time {
return time.Time{}
}
func (self *FileInfo) IsDir() bool {
func (i *FileInfo) IsDir() bool {
return true
}
func (self *FileInfo) Sys() interface{} {
func (i *FileInfo) Sys() interface{} {
return nil
}
@ -77,124 +77,124 @@ type FakeSysFs struct {
hugePagesNrErr error
}
func (self *FakeSysFs) GetNodesPaths() ([]string, error) {
return self.nodesPaths, self.nodePathErr
func (fs *FakeSysFs) GetNodesPaths() ([]string, error) {
return fs.nodesPaths, fs.nodePathErr
}
func (self *FakeSysFs) GetCPUsPaths(cpusPath string) ([]string, error) {
return self.cpusPaths[cpusPath], self.cpuPathErr
func (fs *FakeSysFs) GetCPUsPaths(cpusPath string) ([]string, error) {
return fs.cpusPaths[cpusPath], fs.cpuPathErr
}
func (self *FakeSysFs) GetCoreID(coreIDPath string) (string, error) {
return self.coreThread[coreIDPath], self.coreIDErr
func (fs *FakeSysFs) GetCoreID(coreIDPath string) (string, error) {
return fs.coreThread[coreIDPath], fs.coreIDErr
}
func (self *FakeSysFs) GetCPUPhysicalPackageID(cpuPath string) (string, error) {
return self.physicalPackageIDs[cpuPath], self.physicalPackageIDErr
func (fs *FakeSysFs) GetCPUPhysicalPackageID(cpuPath string) (string, error) {
return fs.physicalPackageIDs[cpuPath], fs.physicalPackageIDErr
}
func (self *FakeSysFs) GetMemInfo(nodePath string) (string, error) {
return self.memTotal, self.memErr
func (fs *FakeSysFs) GetMemInfo(nodePath string) (string, error) {
return fs.memTotal, fs.memErr
}
func (self *FakeSysFs) GetHugePagesInfo(hugepagesDirectory string) ([]os.FileInfo, error) {
return self.hugePages, self.hugePagesErr
func (fs *FakeSysFs) GetHugePagesInfo(hugepagesDirectory string) ([]os.FileInfo, error) {
return fs.hugePages, fs.hugePagesErr
}
func (self *FakeSysFs) GetHugePagesNr(hugepagesDirectory string, hugePageName string) (string, error) {
func (fs *FakeSysFs) GetHugePagesNr(hugepagesDirectory string, hugePageName string) (string, error) {
hugePageFile := fmt.Sprintf("%s%s/%s", hugepagesDirectory, hugePageName, sysfs.HugePagesNrFile)
return self.hugePagesNr[hugePageFile], self.hugePagesNrErr
return fs.hugePagesNr[hugePageFile], fs.hugePagesNrErr
}
func (self *FakeSysFs) GetBlockDevices() ([]os.FileInfo, error) {
self.info.EntryName = "sda"
return []os.FileInfo{&self.info}, nil
func (fs *FakeSysFs) GetBlockDevices() ([]os.FileInfo, error) {
fs.info.EntryName = "sda"
return []os.FileInfo{&fs.info}, nil
}
func (self *FakeSysFs) GetBlockDeviceSize(name string) (string, error) {
func (fs *FakeSysFs) GetBlockDeviceSize(name string) (string, error) {
return "1234567", nil
}
func (self *FakeSysFs) GetBlockDeviceScheduler(name string) (string, error) {
func (fs *FakeSysFs) GetBlockDeviceScheduler(name string) (string, error) {
return "noop deadline [cfq]", nil
}
func (self *FakeSysFs) GetBlockDeviceNumbers(name string) (string, error) {
func (fs *FakeSysFs) GetBlockDeviceNumbers(name string) (string, error) {
return "8:0\n", nil
}
func (self *FakeSysFs) GetNetworkDevices() ([]os.FileInfo, error) {
return []os.FileInfo{&self.info}, nil
func (fs *FakeSysFs) GetNetworkDevices() ([]os.FileInfo, error) {
return []os.FileInfo{&fs.info}, nil
}
func (self *FakeSysFs) GetNetworkAddress(name string) (string, error) {
func (fs *FakeSysFs) GetNetworkAddress(name string) (string, error) {
return "42:01:02:03:04:f4\n", nil
}
func (self *FakeSysFs) GetNetworkMtu(name string) (string, error) {
func (fs *FakeSysFs) GetNetworkMtu(name string) (string, error) {
return "1024\n", nil
}
func (self *FakeSysFs) GetNetworkSpeed(name string) (string, error) {
func (fs *FakeSysFs) GetNetworkSpeed(name string) (string, error) {
return "1000\n", nil
}
func (self *FakeSysFs) GetNetworkStatValue(name string, stat string) (uint64, error) {
func (fs *FakeSysFs) GetNetworkStatValue(name string, stat string) (uint64, error) {
return 1024, nil
}
func (self *FakeSysFs) GetCaches(id int) ([]os.FileInfo, error) {
self.info.EntryName = "index0"
return []os.FileInfo{&self.info}, nil
func (fs *FakeSysFs) GetCaches(id int) ([]os.FileInfo, error) {
fs.info.EntryName = "index0"
return []os.FileInfo{&fs.info}, nil
}
func (self *FakeSysFs) GetCacheInfo(cpu int, cache string) (sysfs.CacheInfo, error) {
return self.cache, nil
func (fs *FakeSysFs) GetCacheInfo(cpu int, cache string) (sysfs.CacheInfo, error) {
return fs.cache, nil
}
func (self *FakeSysFs) SetCacheInfo(cache sysfs.CacheInfo) {
self.cache = cache
func (fs *FakeSysFs) SetCacheInfo(cache sysfs.CacheInfo) {
fs.cache = cache
}
func (self *FakeSysFs) SetNodesPaths(paths []string, err error) {
self.nodesPaths = paths
self.nodePathErr = err
func (fs *FakeSysFs) SetNodesPaths(paths []string, err error) {
fs.nodesPaths = paths
fs.nodePathErr = err
}
func (self *FakeSysFs) SetCPUsPaths(paths map[string][]string, err error) {
self.cpusPaths = paths
self.cpuPathErr = err
func (fs *FakeSysFs) SetCPUsPaths(paths map[string][]string, err error) {
fs.cpusPaths = paths
fs.cpuPathErr = err
}
func (self *FakeSysFs) SetCoreThreads(coreThread map[string]string, err error) {
self.coreThread = coreThread
self.coreIDErr = err
func (fs *FakeSysFs) SetCoreThreads(coreThread map[string]string, err error) {
fs.coreThread = coreThread
fs.coreIDErr = err
}
func (self *FakeSysFs) SetPhysicalPackageIDs(physicalPackageIDs map[string]string, err error) {
self.physicalPackageIDs = physicalPackageIDs
self.physicalPackageIDErr = err
func (fs *FakeSysFs) SetPhysicalPackageIDs(physicalPackageIDs map[string]string, err error) {
fs.physicalPackageIDs = physicalPackageIDs
fs.physicalPackageIDErr = err
}
func (self *FakeSysFs) SetMemory(memTotal string, err error) {
self.memTotal = memTotal
self.memErr = err
func (fs *FakeSysFs) SetMemory(memTotal string, err error) {
fs.memTotal = memTotal
fs.memErr = err
}
func (self *FakeSysFs) SetHugePages(hugePages []os.FileInfo, err error) {
self.hugePages = hugePages
self.hugePagesErr = err
func (fs *FakeSysFs) SetHugePages(hugePages []os.FileInfo, err error) {
fs.hugePages = hugePages
fs.hugePagesErr = err
}
func (self *FakeSysFs) SetHugePagesNr(hugePagesNr map[string]string, err error) {
self.hugePagesNr = hugePagesNr
self.hugePagesNrErr = err
func (fs *FakeSysFs) SetHugePagesNr(hugePagesNr map[string]string, err error) {
fs.hugePagesNr = hugePagesNr
fs.hugePagesNrErr = err
}
func (self *FakeSysFs) SetEntryName(name string) {
self.info.EntryName = name
func (fs *FakeSysFs) SetEntryName(name string) {
fs.info.EntryName = name
}
func (self *FakeSysFs) GetSystemUUID() (string, error) {
func (fs *FakeSysFs) GetSystemUUID() (string, error) {
return "1F862619-BA9F-4526-8F85-ECEAF0C97430", nil
}

View File

@ -103,17 +103,17 @@ func NewRealSysFs() SysFs {
return &realSysFs{}
}
func (self *realSysFs) GetNodesPaths() ([]string, error) {
func (fs *realSysFs) GetNodesPaths() ([]string, error) {
pathPattern := fmt.Sprintf("%s%s", nodeDir, nodeDirPattern)
return filepath.Glob(pathPattern)
}
func (self *realSysFs) GetCPUsPaths(cpusPath string) ([]string, error) {
func (fs *realSysFs) GetCPUsPaths(cpusPath string) ([]string, error) {
pathPattern := fmt.Sprintf("%s/%s", cpusPath, cpuDirPattern)
return filepath.Glob(pathPattern)
}
func (self *realSysFs) GetCoreID(cpuPath string) (string, error) {
func (fs *realSysFs) GetCoreID(cpuPath string) (string, error) {
coreIDFilePath := fmt.Sprintf("%s%s", cpuPath, coreIDFilePath)
coreID, err := ioutil.ReadFile(coreIDFilePath)
if err != nil {
@ -122,7 +122,7 @@ func (self *realSysFs) GetCoreID(cpuPath string) (string, error) {
return strings.TrimSpace(string(coreID)), err
}
func (self *realSysFs) GetCPUPhysicalPackageID(cpuPath string) (string, error) {
func (fs *realSysFs) GetCPUPhysicalPackageID(cpuPath string) (string, error) {
packageIDFilePath := fmt.Sprintf("%s%s", cpuPath, packageIDFilePath)
packageID, err := ioutil.ReadFile(packageIDFilePath)
if err != nil {
@ -131,7 +131,7 @@ func (self *realSysFs) GetCPUPhysicalPackageID(cpuPath string) (string, error) {
return strings.TrimSpace(string(packageID)), err
}
func (self *realSysFs) GetMemInfo(nodePath string) (string, error) {
func (fs *realSysFs) GetMemInfo(nodePath string) (string, error) {
meminfoPath := fmt.Sprintf("%s/%s", nodePath, meminfoFile)
meminfo, err := ioutil.ReadFile(meminfoPath)
if err != nil {
@ -140,11 +140,11 @@ func (self *realSysFs) GetMemInfo(nodePath string) (string, error) {
return strings.TrimSpace(string(meminfo)), err
}
func (self *realSysFs) GetHugePagesInfo(hugePagesDirectory string) ([]os.FileInfo, error) {
func (fs *realSysFs) GetHugePagesInfo(hugePagesDirectory string) ([]os.FileInfo, error) {
return ioutil.ReadDir(hugePagesDirectory)
}
func (self *realSysFs) GetHugePagesNr(hugepagesDirectory string, hugePageName string) (string, error) {
func (fs *realSysFs) GetHugePagesNr(hugepagesDirectory string, hugePageName string) (string, error) {
hugePageFilePath := fmt.Sprintf("%s%s/%s", hugepagesDirectory, hugePageName, HugePagesNrFile)
hugePageFile, err := ioutil.ReadFile(hugePageFilePath)
if err != nil {
@ -153,11 +153,11 @@ func (self *realSysFs) GetHugePagesNr(hugepagesDirectory string, hugePageName st
return strings.TrimSpace(string(hugePageFile)), err
}
func (self *realSysFs) GetBlockDevices() ([]os.FileInfo, error) {
func (fs *realSysFs) GetBlockDevices() ([]os.FileInfo, error) {
return ioutil.ReadDir(blockDir)
}
func (self *realSysFs) GetBlockDeviceNumbers(name string) (string, error) {
func (fs *realSysFs) GetBlockDeviceNumbers(name string) (string, error) {
dev, err := ioutil.ReadFile(path.Join(blockDir, name, "/dev"))
if err != nil {
return "", err
@ -165,7 +165,7 @@ func (self *realSysFs) GetBlockDeviceNumbers(name string) (string, error) {
return string(dev), nil
}
func (self *realSysFs) GetBlockDeviceScheduler(name string) (string, error) {
func (fs *realSysFs) GetBlockDeviceScheduler(name string) (string, error) {
sched, err := ioutil.ReadFile(path.Join(blockDir, name, "/queue/scheduler"))
if err != nil {
return "", err
@ -173,7 +173,7 @@ func (self *realSysFs) GetBlockDeviceScheduler(name string) (string, error) {
return string(sched), nil
}
func (self *realSysFs) GetBlockDeviceSize(name string) (string, error) {
func (fs *realSysFs) GetBlockDeviceSize(name string) (string, error) {
size, err := ioutil.ReadFile(path.Join(blockDir, name, "/size"))
if err != nil {
return "", err
@ -181,7 +181,7 @@ func (self *realSysFs) GetBlockDeviceSize(name string) (string, error) {
return string(size), nil
}
func (self *realSysFs) GetNetworkDevices() ([]os.FileInfo, error) {
func (fs *realSysFs) GetNetworkDevices() ([]os.FileInfo, error) {
files, err := ioutil.ReadDir(netDir)
if err != nil {
return nil, err
@ -203,7 +203,7 @@ func (self *realSysFs) GetNetworkDevices() ([]os.FileInfo, error) {
return dirs, nil
}
func (self *realSysFs) GetNetworkAddress(name string) (string, error) {
func (fs *realSysFs) GetNetworkAddress(name string) (string, error) {
address, err := ioutil.ReadFile(path.Join(netDir, name, "/address"))
if err != nil {
return "", err
@ -211,7 +211,7 @@ func (self *realSysFs) GetNetworkAddress(name string) (string, error) {
return string(address), nil
}
func (self *realSysFs) GetNetworkMtu(name string) (string, error) {
func (fs *realSysFs) GetNetworkMtu(name string) (string, error) {
mtu, err := ioutil.ReadFile(path.Join(netDir, name, "/mtu"))
if err != nil {
return "", err
@ -219,7 +219,7 @@ func (self *realSysFs) GetNetworkMtu(name string) (string, error) {
return string(mtu), nil
}
func (self *realSysFs) GetNetworkSpeed(name string) (string, error) {
func (fs *realSysFs) GetNetworkSpeed(name string) (string, error) {
speed, err := ioutil.ReadFile(path.Join(netDir, name, "/speed"))
if err != nil {
return "", err
@ -227,7 +227,7 @@ func (self *realSysFs) GetNetworkSpeed(name string) (string, error) {
return string(speed), nil
}
func (self *realSysFs) GetNetworkStatValue(dev string, stat string) (uint64, error) {
func (fs *realSysFs) GetNetworkStatValue(dev string, stat string) (uint64, error) {
statPath := path.Join(netDir, dev, "/statistics", stat)
out, err := ioutil.ReadFile(statPath)
if err != nil {
@ -241,7 +241,7 @@ func (self *realSysFs) GetNetworkStatValue(dev string, stat string) (uint64, err
return s, nil
}
func (self *realSysFs) GetCaches(id int) ([]os.FileInfo, error) {
func (fs *realSysFs) GetCaches(id int) ([]os.FileInfo, error) {
cpuPath := fmt.Sprintf("%s%d/cache", cacheDir, id)
return ioutil.ReadDir(cpuPath)
}
@ -256,7 +256,7 @@ func bitCount(i uint64) (count int) {
return
}
func getCpuCount(cache string) (count int, err error) {
func getCPUCount(cache string) (count int, err error) {
out, err := ioutil.ReadFile(path.Join(cache, "/shared_cpu_map"))
if err != nil {
return 0, err
@ -273,7 +273,7 @@ func getCpuCount(cache string) (count int, err error) {
return
}
func (self *realSysFs) GetCacheInfo(id int, name string) (CacheInfo, error) {
func (fs *realSysFs) GetCacheInfo(id int, name string) (CacheInfo, error) {
cachePath := fmt.Sprintf("%s%d/cache/%s", cacheDir, id, name)
out, err := ioutil.ReadFile(path.Join(cachePath, "/size"))
if err != nil {
@ -301,7 +301,7 @@ func (self *realSysFs) GetCacheInfo(id int, name string) (CacheInfo, error) {
return CacheInfo{}, err
}
cacheType := strings.TrimSpace(string(out))
cpuCount, err := getCpuCount(cachePath)
cpuCount, err := getCPUCount(cachePath)
if err != nil {
return CacheInfo{}, err
}
@ -313,7 +313,7 @@ func (self *realSysFs) GetCacheInfo(id int, name string) (CacheInfo, error) {
}, nil
}
func (self *realSysFs) GetSystemUUID() (string, error) {
func (fs *realSysFs) GetSystemUUID() (string, error) {
if id, err := ioutil.ReadFile(path.Join(dmiDir, "id", "product_uuid")); err == nil {
return strings.TrimSpace(string(id)), nil
} else if id, err = ioutil.ReadFile(path.Join(ppcDevTree, "system-id")); err == nil {

View File

@ -57,14 +57,14 @@ func GetBlockDeviceInfo(sysfs sysfs.SysFs) (map[string]info.DiskInfo, error) {
if strings.HasPrefix(name, "loop") || strings.HasPrefix(name, "ram") || strings.HasPrefix(name, "sr") {
continue
}
disk_info := info.DiskInfo{
diskInfo := info.DiskInfo{
Name: name,
}
dev, err := sysfs.GetBlockDeviceNumbers(name)
if err != nil {
return nil, err
}
n, err := fmt.Sscanf(dev, "%d:%d", &disk_info.Major, &disk_info.Minor)
n, err := fmt.Sscanf(dev, "%d:%d", &diskInfo.Major, &diskInfo.Minor)
if err != nil || n != 2 {
return nil, fmt.Errorf("could not parse device numbers from %s for device %s", dev, name)
}
@ -78,18 +78,18 @@ func GetBlockDeviceInfo(sysfs sysfs.SysFs) (map[string]info.DiskInfo, error) {
return nil, err
}
// size is in 512 bytes blocks.
disk_info.Size = size * 512
diskInfo.Size = size * 512
disk_info.Scheduler = "none"
diskInfo.Scheduler = "none"
blkSched, err := sysfs.GetBlockDeviceScheduler(name)
if err == nil {
matches := schedulerRegExp.FindSubmatch([]byte(blkSched))
if len(matches) >= 2 {
disk_info.Scheduler = string(matches[1])
diskInfo.Scheduler = string(matches[1])
}
}
device := fmt.Sprintf("%d:%d", disk_info.Major, disk_info.Minor)
diskMap[device] = disk_info
device := fmt.Sprintf("%d:%d", diskInfo.Major, diskInfo.Minor)
diskMap[device] = diskInfo
}
return diskMap, nil
}

View File

@ -712,7 +712,7 @@ func TestGetCacheInfo(t *testing.T) {
}
func TestGetNetworkStats(t *testing.T) {
expected_stats := info.InterfaceStats{
expectedStats := info.InterfaceStats{
Name: "eth0",
RxBytes: 1024,
RxPackets: 1024,
@ -728,7 +728,7 @@ func TestGetNetworkStats(t *testing.T) {
if err != nil {
t.Errorf("call to getNetworkStats() failed with %s", err)
}
if expected_stats != netStats {
t.Errorf("expected to get stats %+v, got %+v", expected_stats, netStats)
if expectedStats != netStats {
t.Errorf("expected to get stats %+v, got %+v", expectedStats, netStats)
}
}

View File

@ -57,60 +57,60 @@ func NewTimedStore(age time.Duration, maxItems int) *TimedStore {
}
// Adds an element to the start of the buffer (removing one from the end if necessary).
func (self *TimedStore) Add(timestamp time.Time, item interface{}) {
func (s *TimedStore) Add(timestamp time.Time, item interface{}) {
data := timedStoreData{
timestamp: timestamp,
data: item,
}
// Common case: data is added in order.
if len(self.buffer) == 0 || !timestamp.Before(self.buffer[len(self.buffer)-1].timestamp) {
self.buffer = append(self.buffer, data)
if len(s.buffer) == 0 || !timestamp.Before(s.buffer[len(s.buffer)-1].timestamp) {
s.buffer = append(s.buffer, data)
} else {
// Data is out of order; insert it in the correct position.
index := sort.Search(len(self.buffer), func(index int) bool {
return self.buffer[index].timestamp.After(timestamp)
index := sort.Search(len(s.buffer), func(index int) bool {
return s.buffer[index].timestamp.After(timestamp)
})
self.buffer = append(self.buffer, timedStoreData{}) // Make room to shift the elements
copy(self.buffer[index+1:], self.buffer[index:]) // Shift the elements over
self.buffer[index] = data
s.buffer = append(s.buffer, timedStoreData{}) // Make room to shift the elements
copy(s.buffer[index+1:], s.buffer[index:]) // Shift the elements over
s.buffer[index] = data
}
// Remove any elements before eviction time.
// TODO(rjnagal): This is assuming that the added entry has timestamp close to now.
evictTime := timestamp.Add(-self.age)
index := sort.Search(len(self.buffer), func(index int) bool {
return self.buffer[index].timestamp.After(evictTime)
evictTime := timestamp.Add(-s.age)
index := sort.Search(len(s.buffer), func(index int) bool {
return s.buffer[index].timestamp.After(evictTime)
})
if index < len(self.buffer) {
self.buffer = self.buffer[index:]
if index < len(s.buffer) {
s.buffer = s.buffer[index:]
}
// Remove any elements if over our max size.
if self.maxItems >= 0 && len(self.buffer) > self.maxItems {
startIndex := len(self.buffer) - self.maxItems
self.buffer = self.buffer[startIndex:]
if s.maxItems >= 0 && len(s.buffer) > s.maxItems {
startIndex := len(s.buffer) - s.maxItems
s.buffer = s.buffer[startIndex:]
}
}
// Returns up to maxResult elements in the specified time period (inclusive).
// Results are from first to last. maxResults of -1 means no limit.
func (self *TimedStore) InTimeRange(start, end time.Time, maxResults int) []interface{} {
func (s *TimedStore) InTimeRange(start, end time.Time, maxResults int) []interface{} {
// No stats, return empty.
if len(self.buffer) == 0 {
if len(s.buffer) == 0 {
return []interface{}{}
}
var startIndex int
if start.IsZero() {
// None specified, start at the beginning.
startIndex = len(self.buffer) - 1
startIndex = len(s.buffer) - 1
} else {
// Start is the index before the elements smaller than it. We do this by
// finding the first element smaller than start and taking the index
// before that element
startIndex = sort.Search(len(self.buffer), func(index int) bool {
startIndex = sort.Search(len(s.buffer), func(index int) bool {
// buffer[index] < start
return self.getData(index).timestamp.Before(start)
return s.getData(index).timestamp.Before(start)
}) - 1
// Check if start is after all the data we have.
if startIndex < 0 {
@ -124,12 +124,12 @@ func (self *TimedStore) InTimeRange(start, end time.Time, maxResults int) []inte
endIndex = 0
} else {
// End is the first index smaller than or equal to it (so, not larger).
endIndex = sort.Search(len(self.buffer), func(index int) bool {
endIndex = sort.Search(len(s.buffer), func(index int) bool {
// buffer[index] <= t -> !(buffer[index] > t)
return !self.getData(index).timestamp.After(end)
return !s.getData(index).timestamp.After(end)
})
// Check if end is before all the data we have.
if endIndex == len(self.buffer) {
if endIndex == len(s.buffer) {
return []interface{}{}
}
}
@ -144,21 +144,21 @@ func (self *TimedStore) InTimeRange(start, end time.Time, maxResults int) []inte
// Return in sorted timestamp order so from the "back" to "front".
result := make([]interface{}, numResults)
for i := 0; i < numResults; i++ {
result[i] = self.Get(startIndex - i)
result[i] = s.Get(startIndex - i)
}
return result
}
// Gets the element at the specified index. Note that elements are output in LIFO order.
func (self *TimedStore) Get(index int) interface{} {
return self.getData(index).data
func (s *TimedStore) Get(index int) interface{} {
return s.getData(index).data
}
// Gets the data at the specified index. Note that elements are output in LIFO order.
func (self *TimedStore) getData(index int) timedStoreData {
return self.buffer[len(self.buffer)-index-1]
func (s *TimedStore) getData(index int) timedStoreData {
return s.buffer[len(s.buffer)-index-1]
}
func (self *TimedStore) Size() int {
return len(self.buffer)
func (s *TimedStore) Size() int {
return len(s.buffer)
}

View File

@ -134,8 +134,8 @@ func areCgroupsPresent(available map[string]int, desired []string) (bool, string
return true, ""
}
func validateCpuCfsBandwidth(available_cgroups map[string]int) string {
ok, _ := areCgroupsPresent(available_cgroups, []string{"cpu"})
func validateCPUCFSBandwidth(availableCgroups map[string]int) string {
ok, _ := areCgroupsPresent(availableCgroups, []string{"cpu"})
if !ok {
return "\tCpu cfs bandwidth status unknown: cpu cgroup not enabled.\n"
}
@ -151,8 +151,8 @@ func validateCpuCfsBandwidth(available_cgroups map[string]int) string {
return "\tCpu cfs bandwidth is enabled.\n"
}
func validateMemoryAccounting(available_cgroups map[string]int) string {
ok, _ := areCgroupsPresent(available_cgroups, []string{"memory"})
func validateMemoryAccounting(availableCgroups map[string]int) string {
ok, _ := areCgroupsPresent(availableCgroups, []string{"memory"})
if !ok {
return "\tHierarchical memory accounting status unknown: memory cgroup not enabled.\n"
}
@ -177,29 +177,29 @@ func validateMemoryAccounting(available_cgroups map[string]int) string {
}
func validateCgroups() (string, string) {
required_cgroups := []string{"cpu", "cpuacct"}
recommended_cgroups := []string{"memory", "blkio", "cpuset", "devices", "freezer"}
available_cgroups, err := getEnabledCgroups()
desc := fmt.Sprintf("\tFollowing cgroups are required: %v\n\tFollowing other cgroups are recommended: %v\n", required_cgroups, recommended_cgroups)
requiredCgroups := []string{"cpu", "cpuacct"}
recommendedCgroups := []string{"memory", "blkio", "cpuset", "devices", "freezer"}
availableCgroups, err := getEnabledCgroups()
desc := fmt.Sprintf("\tFollowing cgroups are required: %v\n\tFollowing other cgroups are recommended: %v\n", requiredCgroups, recommendedCgroups)
if err != nil {
desc = fmt.Sprintf("Could not parse /proc/cgroups.\n%s", desc)
return Unknown, desc
}
ok, out := areCgroupsPresent(available_cgroups, required_cgroups)
ok, out := areCgroupsPresent(availableCgroups, requiredCgroups)
if !ok {
out += desc
return Unsupported, out
}
ok, out = areCgroupsPresent(available_cgroups, recommended_cgroups)
ok, out = areCgroupsPresent(availableCgroups, recommendedCgroups)
if !ok {
// supported, but not recommended.
out += desc
return Supported, out
}
out = fmt.Sprintf("Available cgroups: %v\n", available_cgroups)
out = fmt.Sprintf("Available cgroups: %v\n", availableCgroups)
out += desc
out += validateMemoryAccounting(available_cgroups)
out += validateCpuCfsBandwidth(available_cgroups)
out += validateMemoryAccounting(availableCgroups)
out += validateCPUCFSBandwidth(availableCgroups)
return Recommended, out
}