Merge pull request #2091 from dims/move-from-glog-to-klog

Move from glog to klog
This commit is contained in:
David Ashpole 2018-11-08 15:25:47 -08:00 committed by GitHub
commit e310755a36
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
66 changed files with 496 additions and 327 deletions

8
Godeps/Godeps.json generated
View File

@ -439,10 +439,6 @@
"Comment": "v0.4-3-gc0656edd",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/golang/glog",
"Rev": "fca8c8854093a154ff1eb580aae10276ad6b1b5f"
},
{
"ImportPath": "github.com/golang/protobuf/proto",
"Rev": "ab9f9a6dab164b7d1246e0e688b0ab7b94d8553e"
@ -1002,6 +998,10 @@
"Comment": "v2.0.12",
"Rev": "3cfe88295d20b6299bd935131fc0fd17316405ad"
},
{
"ImportPath": "k8s.io/klog",
"Rev": "b9b56d5dfc9208f60ea747056670942d8b0afdc8"
},
{
"ImportPath": "k8s.io/utils/clock",
"Rev": "aedf551cdb8b0119df3a19c65fde413a13b34997"

View File

@ -26,8 +26,8 @@ import (
info "github.com/google/cadvisor/info/v1"
"github.com/golang/glog"
"github.com/mindprince/gonvml"
"k8s.io/klog"
)
type NvidiaManager struct {
@ -50,7 +50,7 @@ const nvidiaVendorId = "0x10de"
// Setup initializes NVML if nvidia devices are present on the node.
func (nm *NvidiaManager) Setup() {
if !detectDevices(nvidiaVendorId) {
glog.V(4).Info("No NVIDIA devices found.")
klog.V(4).Info("No NVIDIA devices found.")
return
}
@ -63,7 +63,7 @@ func (nm *NvidiaManager) Setup() {
func detectDevices(vendorId string) bool {
devices, err := ioutil.ReadDir(sysFsPCIDevicesPath)
if err != nil {
glog.Warningf("Error reading %q: %v", sysFsPCIDevicesPath, err)
klog.Warningf("Error reading %q: %v", sysFsPCIDevicesPath, err)
return false
}
@ -71,11 +71,11 @@ func detectDevices(vendorId string) bool {
vendorPath := filepath.Join(sysFsPCIDevicesPath, device.Name(), "vendor")
content, err := ioutil.ReadFile(vendorPath)
if err != nil {
glog.V(4).Infof("Error while reading %q: %v", vendorPath, err)
klog.V(4).Infof("Error while reading %q: %v", vendorPath, err)
continue
}
if strings.EqualFold(strings.TrimSpace(string(content)), vendorId) {
glog.V(3).Infof("Found device with vendorId %q", vendorId)
klog.V(3).Infof("Found device with vendorId %q", vendorId)
return true
}
}
@ -88,26 +88,26 @@ var initializeNVML = func(nm *NvidiaManager) {
if err := gonvml.Initialize(); err != nil {
// This is under a logging level because otherwise we may cause
// log spam if the drivers/nvml is not installed on the system.
glog.V(4).Infof("Could not initialize NVML: %v", err)
klog.V(4).Infof("Could not initialize NVML: %v", err)
return
}
nm.nvmlInitialized = true
numDevices, err := gonvml.DeviceCount()
if err != nil {
glog.Warningf("GPU metrics would not be available. Failed to get the number of nvidia devices: %v", err)
klog.Warningf("GPU metrics would not be available. Failed to get the number of nvidia devices: %v", err)
return
}
glog.V(1).Infof("NVML initialized. Number of nvidia devices: %v", numDevices)
klog.V(1).Infof("NVML initialized. Number of nvidia devices: %v", numDevices)
nm.nvidiaDevices = make(map[int]gonvml.Device, numDevices)
for i := 0; i < int(numDevices); i++ {
device, err := gonvml.DeviceHandleByIndex(uint(i))
if err != nil {
glog.Warningf("Failed to get nvidia device handle %d: %v", i, err)
klog.Warningf("Failed to get nvidia device handle %d: %v", i, err)
continue
}
minorNumber, err := device.MinorNumber()
if err != nil {
glog.Warningf("Failed to get nvidia device minor number: %v", err)
klog.Warningf("Failed to get nvidia device minor number: %v", err)
continue
}
nm.nvidiaDevices[int(minorNumber)] = device

View File

@ -33,7 +33,7 @@ import (
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/manager"
"github.com/golang/glog"
"k8s.io/klog"
)
const (
@ -68,7 +68,7 @@ const (
func handleRequest(supportedApiVersions map[string]ApiVersion, m manager.Manager, w http.ResponseWriter, r *http.Request) error {
start := time.Now()
defer func() {
glog.V(4).Infof("Request took %s", time.Since(start))
klog.V(4).Infof("Request took %s", time.Since(start))
}()
request := r.URL.Path
@ -157,7 +157,7 @@ func streamResults(eventChannel *events.EventChannel, w http.ResponseWriter, r *
case ev := <-eventChannel.GetChannel():
err := enc.Encode(ev)
if err != nil {
glog.Errorf("error encoding message %+v for result stream: %v", ev, err)
klog.Errorf("error encoding message %+v for result stream: %v", ev, err)
}
flusher.Flush()
}

View File

@ -24,7 +24,7 @@ import (
"github.com/google/cadvisor/info/v2"
"github.com/google/cadvisor/manager"
"github.com/golang/glog"
"k8s.io/klog"
)
const (
@ -85,7 +85,7 @@ func (self *version1_0) SupportedRequestTypes() []string {
func (self *version1_0) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error {
switch requestType {
case machineApi:
glog.V(4).Infof("Api - Machine")
klog.V(4).Infof("Api - Machine")
// Get the MachineInfo
machineInfo, err := m.GetMachineInfo()
@ -99,7 +99,7 @@ func (self *version1_0) HandleRequest(requestType string, request []string, m ma
}
case containersApi:
containerName := getContainerName(request)
glog.V(4).Infof("Api - Container(%s)", containerName)
klog.V(4).Infof("Api - Container(%s)", containerName)
// Get the query request.
query, err := getContainerInfoRequest(r.Body)
@ -149,7 +149,7 @@ func (self *version1_1) HandleRequest(requestType string, request []string, m ma
switch requestType {
case subcontainersApi:
containerName := getContainerName(request)
glog.V(4).Infof("Api - Subcontainers(%s)", containerName)
klog.V(4).Infof("Api - Subcontainers(%s)", containerName)
// Get the query request.
query, err := getContainerInfoRequest(r.Body)
@ -198,7 +198,7 @@ func (self *version1_2) SupportedRequestTypes() []string {
func (self *version1_2) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error {
switch requestType {
case dockerApi:
glog.V(4).Infof("Api - Docker(%v)", request)
klog.V(4).Infof("Api - Docker(%v)", request)
// Get the query request.
query, err := getContainerInfoRequest(r.Body)
@ -279,7 +279,7 @@ func handleEventRequest(request []string, m manager.Manager, w http.ResponseWrit
return err
}
query.ContainerName = path.Join("/", getContainerName(request))
glog.V(4).Infof("Api - Events(%v)", query)
klog.V(4).Infof("Api - Events(%v)", query)
if !stream {
pastEvents, err := m.GetPastEvents(query)
if err != nil {
@ -319,14 +319,14 @@ func (self *version2_0) HandleRequest(requestType string, request []string, m ma
}
switch requestType {
case versionApi:
glog.V(4).Infof("Api - Version")
klog.V(4).Infof("Api - Version")
versionInfo, err := m.GetVersionInfo()
if err != nil {
return err
}
return writeResult(versionInfo.CadvisorVersion, w)
case attributesApi:
glog.V(4).Info("Api - Attributes")
klog.V(4).Info("Api - Attributes")
machineInfo, err := m.GetMachineInfo()
if err != nil {
@ -339,7 +339,7 @@ func (self *version2_0) HandleRequest(requestType string, request []string, m ma
info := v2.GetAttributes(machineInfo, versionInfo)
return writeResult(info, w)
case machineApi:
glog.V(4).Info("Api - Machine")
klog.V(4).Info("Api - Machine")
// TODO(rjnagal): Move machineInfo from v1.
machineInfo, err := m.GetMachineInfo()
@ -349,7 +349,7 @@ func (self *version2_0) HandleRequest(requestType string, request []string, m ma
return writeResult(machineInfo, w)
case summaryApi:
containerName := getContainerName(request)
glog.V(4).Infof("Api - Summary for container %q, options %+v", containerName, opt)
klog.V(4).Infof("Api - Summary for container %q, options %+v", containerName, opt)
stats, err := m.GetDerivedStats(containerName, opt)
if err != nil {
@ -358,13 +358,13 @@ func (self *version2_0) HandleRequest(requestType string, request []string, m ma
return writeResult(stats, w)
case statsApi:
name := getContainerName(request)
glog.V(4).Infof("Api - Stats: Looking for stats for container %q, options %+v", name, opt)
klog.V(4).Infof("Api - Stats: Looking for stats for container %q, options %+v", name, opt)
infos, err := m.GetRequestedContainersInfo(name, opt)
if err != nil {
if len(infos) == 0 {
return err
}
glog.Errorf("Error calling GetRequestedContainersInfo: %v", err)
klog.Errorf("Error calling GetRequestedContainersInfo: %v", err)
}
contStats := make(map[string][]v2.DeprecatedContainerStats, 0)
for name, cinfo := range infos {
@ -373,7 +373,7 @@ func (self *version2_0) HandleRequest(requestType string, request []string, m ma
return writeResult(contStats, w)
case customMetricsApi:
containerName := getContainerName(request)
glog.V(4).Infof("Api - Custom Metrics: Looking for metrics for container %q, options %+v", containerName, opt)
klog.V(4).Infof("Api - Custom Metrics: Looking for metrics for container %q, options %+v", containerName, opt)
infos, err := m.GetContainerInfoV2(containerName, opt)
if err != nil {
return err
@ -413,7 +413,7 @@ func (self *version2_0) HandleRequest(requestType string, request []string, m ma
return writeResult(contMetrics, w)
case specApi:
containerName := getContainerName(request)
glog.V(4).Infof("Api - Spec for container %q, options %+v", containerName, opt)
klog.V(4).Infof("Api - Spec for container %q, options %+v", containerName, opt)
specs, err := m.GetContainerSpec(containerName, opt)
if err != nil {
return err
@ -451,7 +451,7 @@ func (self *version2_0) HandleRequest(requestType string, request []string, m ma
// ignore recursive.
// TODO(rjnagal): consider count to limit ps output.
name := getContainerName(request)
glog.V(4).Infof("Api - Spec for container %q, options %+v", name, opt)
klog.V(4).Infof("Api - Spec for container %q, options %+v", name, opt)
ps, err := m.GetProcessList(name, opt)
if err != nil {
return fmt.Errorf("process listing failed: %v", err)
@ -489,24 +489,24 @@ func (self *version2_1) HandleRequest(requestType string, request []string, m ma
switch requestType {
case machineStatsApi:
glog.V(4).Infof("Api - MachineStats(%v)", request)
klog.V(4).Infof("Api - MachineStats(%v)", request)
cont, err := m.GetRequestedContainersInfo("/", opt)
if err != nil {
if len(cont) == 0 {
return err
}
glog.Errorf("Error calling GetRequestedContainersInfo: %v", err)
klog.Errorf("Error calling GetRequestedContainersInfo: %v", err)
}
return writeResult(v2.MachineStatsFromV1(cont["/"]), w)
case statsApi:
name := getContainerName(request)
glog.V(4).Infof("Api - Stats: Looking for stats for container %q, options %+v", name, opt)
klog.V(4).Infof("Api - Stats: Looking for stats for container %q, options %+v", name, opt)
conts, err := m.GetRequestedContainersInfo(name, opt)
if err != nil {
if len(conts) == 0 {
return err
}
glog.Errorf("Error calling GetRequestedContainersInfo: %v", err)
klog.Errorf("Error calling GetRequestedContainersInfo: %v", err)
}
contStats := make(map[string]v2.ContainerInfo, len(conts))
for name, cont := range conts {

View File

@ -23,7 +23,7 @@ import (
"github.com/google/cadvisor/storage"
"github.com/google/cadvisor/utils"
"github.com/golang/glog"
"k8s.io/klog"
)
// ErrDataNotFound is the error resulting if failed to find a container in memory cache.
@ -91,7 +91,7 @@ func (self *InMemoryCache) AddStats(cInfo *info.ContainerInfo, stats *info.Conta
// may want to start a pool of goroutines to do write
// operations.
if err := self.backend.AddStats(cInfo, stats); err != nil {
glog.Error(err)
klog.Error(err)
}
}
return cstore.AddStats(stats)

View File

@ -34,8 +34,9 @@ import (
"crypto/tls"
"github.com/golang/glog"
"github.com/google/cadvisor/metrics"
"k8s.io/klog"
)
var argIp = flag.String("listen_ip", "", "IP to listen on, defaults to all IPs")
@ -118,7 +119,8 @@ func init() {
}
func main() {
defer glog.Flush()
klog.InitFlags(nil)
defer klog.Flush()
flag.Parse()
if *versionFlag {
@ -132,7 +134,7 @@ func main() {
memoryStorage, err := NewMemoryStorage()
if err != nil {
glog.Fatalf("Failed to initialize storage driver: %s", err)
klog.Fatalf("Failed to initialize storage driver: %s", err)
}
sysFs := sysfs.NewRealSysFs()
@ -141,7 +143,7 @@ func main() {
containerManager, err := manager.New(memoryStorage, sysFs, *maxHousekeepingInterval, *allowDynamicHousekeeping, includedMetrics, &collectorHttpClient, []string{"/"})
if err != nil {
glog.Fatalf("Failed to create a Container Manager: %s", err)
klog.Fatalf("Failed to create a Container Manager: %s", err)
}
mux := http.NewServeMux()
@ -156,7 +158,7 @@ func main() {
// Register all HTTP handlers.
err = cadvisorhttp.RegisterHandlers(mux, containerManager, *httpAuthFile, *httpAuthRealm, *httpDigestFile, *httpDigestRealm)
if err != nil {
glog.Fatalf("Failed to register HTTP handlers: %v", err)
klog.Fatalf("Failed to register HTTP handlers: %v", err)
}
containerLabelFunc := metrics.DefaultContainerLabels
@ -167,16 +169,16 @@ func main() {
// Start the manager.
if err := containerManager.Start(); err != nil {
glog.Fatalf("Failed to start container manager: %v", err)
klog.Fatalf("Failed to start container manager: %v", err)
}
// Install signal handler.
installSignalHandler(containerManager)
glog.V(1).Infof("Starting cAdvisor version: %s-%s on port %d", version.Info["version"], version.Info["revision"], *argPort)
klog.V(1).Infof("Starting cAdvisor version: %s-%s on port %d", version.Info["version"], version.Info["revision"], *argPort)
addr := fmt.Sprintf("%s:%d", *argIp, *argPort)
glog.Fatal(http.ListenAndServe(addr, mux))
klog.Fatal(http.ListenAndServe(addr, mux))
}
func setMaxProcs() {
@ -193,7 +195,7 @@ func setMaxProcs() {
// Check if the setting was successful.
actualNumProcs := runtime.GOMAXPROCS(0)
if actualNumProcs != numProcs {
glog.Warningf("Specified max procs of %v but using %v", numProcs, actualNumProcs)
klog.Warningf("Specified max procs of %v but using %v", numProcs, actualNumProcs)
}
}
@ -205,9 +207,9 @@ func installSignalHandler(containerManager manager.Manager) {
go func() {
sig := <-c
if err := containerManager.Stop(); err != nil {
glog.Errorf("Failed to stop container manager: %v", err)
klog.Errorf("Failed to stop container manager: %v", err)
}
glog.Infof("Exiting given signal: %v", sig)
klog.Infof("Exiting given signal: %v", sig)
os.Exit(0)
}()
}
@ -220,11 +222,11 @@ func createCollectorHttpClient(collectorCert, collectorKey string) http.Client {
if collectorCert != "" {
if collectorKey == "" {
glog.Fatal("The collector_key value must be specified if the collector_cert value is set.")
klog.Fatal("The collector_key value must be specified if the collector_cert value is set.")
}
cert, err := tls.LoadX509KeyPair(collectorCert, collectorKey)
if err != nil {
glog.Fatalf("Failed to use the collector certificate and key: %s", err)
klog.Fatalf("Failed to use the collector certificate and key: %s", err)
}
tlsConfig.Certificates = []tls.Certificate{cert}

View File

@ -30,7 +30,7 @@ import (
"github.com/google/cadvisor/info/v1"
"github.com/golang/glog"
"k8s.io/klog"
"time"
)
@ -230,7 +230,7 @@ func (self *Client) getEventStreamingData(url string, einfo chan *v1.Event) erro
break
}
// if called without &stream=true will not be able to parse event and will trigger fatal
glog.Fatalf("Received error %v", err)
klog.Fatalf("Received error %v", err)
}
einfo <- m
}

View File

@ -20,46 +20,47 @@ import (
"github.com/google/cadvisor/client"
info "github.com/google/cadvisor/info/v1"
"github.com/golang/glog"
"k8s.io/klog"
)
func staticClientExample() {
staticClient, err := client.NewClient("http://localhost:8080/")
if err != nil {
glog.Errorf("tried to make client and got error %v", err)
klog.Errorf("tried to make client and got error %v", err)
return
}
einfo, err := staticClient.EventStaticInfo("?oom_events=true")
if err != nil {
glog.Errorf("got error retrieving event info: %v", err)
klog.Errorf("got error retrieving event info: %v", err)
return
}
for idx, ev := range einfo {
glog.Infof("static einfo %v: %v", idx, ev)
klog.Infof("static einfo %v: %v", idx, ev)
}
}
func streamingClientExample(url string) {
streamingClient, err := client.NewClient("http://localhost:8080/")
if err != nil {
glog.Errorf("tried to make client and got error %v", err)
klog.Errorf("tried to make client and got error %v", err)
return
}
einfo := make(chan *info.Event)
go func() {
err = streamingClient.EventStreamingInfo(url, einfo)
if err != nil {
glog.Errorf("got error retrieving event info: %v", err)
klog.Errorf("got error retrieving event info: %v", err)
return
}
}()
for ev := range einfo {
glog.Infof("streaming einfo: %v\n", ev)
klog.Infof("streaming einfo: %v\n", ev)
}
}
// demonstrates how to use event clients
func main() {
klog.InitFlags(nil)
flag.Parse()
staticClientExample()
streamingClientExample("?creation_events=true&stream=true&oom_events=true&deletion_events=true")

View File

@ -22,7 +22,7 @@ import (
"github.com/google/cadvisor/fs"
"github.com/golang/glog"
"k8s.io/klog"
)
type FsHandler interface {
@ -118,7 +118,7 @@ func (fh *realFsHandler) trackUsage() {
case <-time.After(fh.period):
start := time.Now()
if err := fh.update(); err != nil {
glog.Errorf("failed to collect filesystem stats - %v", err)
klog.Errorf("failed to collect filesystem stats - %v", err)
fh.period = fh.period * 2
if fh.period > maxBackoffFactor*fh.minPeriod {
fh.period = maxBackoffFactor * fh.minPeriod
@ -132,7 +132,7 @@ func (fh *realFsHandler) trackUsage() {
// if the long duration is persistent either because of slow
// disk or lots of containers.
longOp = longOp + time.Second
glog.V(2).Infof("du and find on following dirs took %v: %v; will not log again for this container unless duration exceeds %v", duration, []string{fh.rootfs, fh.extraDir}, longOp)
klog.V(2).Infof("du and find on following dirs took %v: %v; will not log again for this container unless duration exceeds %v", duration, []string{fh.rootfs, fh.extraDir}, longOp)
}
}
}

View File

@ -26,10 +26,10 @@ import (
"github.com/google/cadvisor/container"
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/utils"
"github.com/golang/glog"
"github.com/karrick/godirwalk"
"github.com/pkg/errors"
"k8s.io/klog"
)
func DebugInfo(watches map[string][]string) map[string][]string {
@ -87,7 +87,7 @@ func GetSpec(cgroupPaths map[string]string, machineInfoFactory info.MachineInfoF
if quota != "" && quota != "-1" {
val, err := strconv.ParseUint(quota, 10, 64)
if err != nil {
glog.Errorf("GetSpec: Failed to parse CPUQuota from %q: %s", path.Join(cpuRoot, "cpu.cfs_quota_us"), err)
klog.Errorf("GetSpec: Failed to parse CPUQuota from %q: %s", path.Join(cpuRoot, "cpu.cfs_quota_us"), err)
}
spec.Cpu.Quota = val
}
@ -134,7 +134,7 @@ func readString(dirpath string, file string) string {
if err != nil {
// Ignore non-existent files
if !os.IsNotExist(err) {
glog.Errorf("readString: Failed to read %q: %s", cgroupFile, err)
klog.Errorf("readString: Failed to read %q: %s", cgroupFile, err)
}
return ""
}
@ -149,7 +149,7 @@ func readUInt64(dirpath string, file string) uint64 {
val, err := strconv.ParseUint(out, 10, 64)
if err != nil {
glog.Errorf("readUInt64: Failed to parse int %q from file %q: %s", out, path.Join(dirpath, file), err)
klog.Errorf("readUInt64: Failed to parse int %q from file %q: %s", out, path.Join(dirpath, file), err)
return 0
}

View File

@ -21,8 +21,8 @@ import (
"regexp"
"strings"
"github.com/golang/glog"
"golang.org/x/net/context"
"k8s.io/klog"
"github.com/google/cadvisor/container"
"github.com/google/cadvisor/container/libcontainer"
@ -133,7 +133,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics
return fmt.Errorf("failed to get cgroup subsystems: %v", err)
}
glog.V(1).Infof("Registering containerd factory")
klog.V(1).Infof("Registering containerd factory")
f := &containerdFactory{
cgroupSubsystems: cgroupSubsystems,
client: client,

View File

@ -26,7 +26,7 @@ import (
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/manager/watcher"
"github.com/golang/glog"
"k8s.io/klog"
)
// The namespace under which crio aliases are unique.
@ -154,7 +154,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics
return fmt.Errorf("failed to get cgroup subsystems: %v", err)
}
glog.V(1).Infof("Registering CRI-O factory")
klog.V(1).Infof("Registering CRI-O factory")
f := &crioFactory{
client: client,
cgroupSubsystems: cgroupSubsystems,

View File

@ -176,7 +176,7 @@ func newCrioContainerHandler(
}
// TODO for env vars we wanted to show from container.Config.Env from whitelist
//for _, exposedEnv := range metadataEnvs {
//glog.V(4).Infof("TODO env whitelist: %v", exposedEnv)
//klog.V(4).Infof("TODO env whitelist: %v", exposedEnv)
//}
return handler, nil

View File

@ -36,8 +36,8 @@ import (
"github.com/google/cadvisor/zfs"
docker "github.com/docker/docker/client"
"github.com/golang/glog"
"golang.org/x/net/context"
"k8s.io/klog"
)
var ArgDockerEndpoint = flag.String("docker", "unix:///var/run/docker.sock", "docker endpoint")
@ -337,7 +337,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics
if storageDriver(dockerInfo.Driver) == devicemapperStorageDriver {
thinPoolWatcher, err = startThinPoolWatcher(dockerInfo)
if err != nil {
glog.Errorf("devicemapper filesystem stats will not be reported: %v", err)
klog.Errorf("devicemapper filesystem stats will not be reported: %v", err)
}
// Safe to ignore error - driver status should always be populated.
@ -349,11 +349,11 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics
if storageDriver(dockerInfo.Driver) == zfsStorageDriver {
zfsWatcher, err = startZfsWatcher(dockerInfo)
if err != nil {
glog.Errorf("zfs filesystem stats will not be reported: %v", err)
klog.Errorf("zfs filesystem stats will not be reported: %v", err)
}
}
glog.V(1).Infof("Registering Docker factory")
klog.V(1).Infof("Registering Docker factory")
f := &dockerFactory{
cgroupSubsystems: cgroupSubsystems,
client: client,

View File

@ -34,10 +34,10 @@ import (
dockercontainer "github.com/docker/docker/api/types/container"
docker "github.com/docker/docker/client"
"github.com/golang/glog"
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs"
"golang.org/x/net/context"
"k8s.io/klog"
)
const (
@ -309,7 +309,7 @@ func (h *dockerFsHandler) Usage() common.FsUsage {
// TODO: ideally we should keep track of how many times we failed to get the usage for this
// device vs how many refreshes of the cache there have been, and display an error e.g. if we've
// had at least 1 refresh and we still can't find the device.
glog.V(5).Infof("unable to get fs usage from thin pool for device %s: %v", h.deviceID, err)
klog.V(5).Infof("unable to get fs usage from thin pool for device %s: %v", h.deviceID, err)
} else {
usage.BaseUsageBytes = thinPoolUsage
usage.TotalUsageBytes += thinPoolUsage
@ -319,7 +319,7 @@ func (h *dockerFsHandler) Usage() common.FsUsage {
if h.zfsWatcher != nil {
zfsUsage, err := h.zfsWatcher.GetUsage(h.zfsFilesystem)
if err != nil {
glog.V(5).Infof("unable to get fs usage from zfs for filesystem %s: %v", h.zfsFilesystem, err)
klog.V(5).Infof("unable to get fs usage from zfs for filesystem %s: %v", h.zfsFilesystem, err)
} else {
usage.BaseUsageBytes = zfsUsage
usage.TotalUsageBytes += zfsUsage

View File

@ -20,7 +20,7 @@ import (
"github.com/google/cadvisor/manager/watcher"
"github.com/golang/glog"
"k8s.io/klog"
)
type ContainerHandlerFactory interface {
@ -106,18 +106,18 @@ func NewContainerHandler(name string, watchType watcher.ContainerWatchSource, in
for _, factory := range factories[watchType] {
canHandle, canAccept, err := factory.CanHandleAndAccept(name)
if err != nil {
glog.V(4).Infof("Error trying to work out if we can handle %s: %v", name, err)
klog.V(4).Infof("Error trying to work out if we can handle %s: %v", name, err)
}
if canHandle {
if !canAccept {
glog.V(3).Infof("Factory %q can handle container %q, but ignoring.", factory, name)
klog.V(3).Infof("Factory %q can handle container %q, but ignoring.", factory, name)
return nil, false, nil
}
glog.V(3).Infof("Using factory %q for container %q", factory, name)
klog.V(3).Infof("Using factory %q for container %q", factory, name)
handle, err := factory.NewContainerHandler(name, inHostNamespace)
return handle, canAccept, err
} else {
glog.V(4).Infof("Factory %q was unable to handle container %q", factory, name)
klog.V(4).Infof("Factory %q was unable to handle container %q", factory, name)
}
}

View File

@ -29,9 +29,9 @@ import (
info "github.com/google/cadvisor/info/v1"
"bytes"
"github.com/golang/glog"
"github.com/opencontainers/runc/libcontainer"
"github.com/opencontainers/runc/libcontainer/cgroups"
"k8s.io/klog"
)
/*
@ -72,11 +72,11 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
if h.includedMetrics.Has(container.ProcessSchedulerMetrics) {
pids, err := h.cgroupManager.GetAllPids()
if err != nil {
glog.V(4).Infof("Could not get PIDs for container %d: %v", h.pid, err)
klog.V(4).Infof("Could not get PIDs for container %d: %v", h.pid, err)
} else {
stats.Cpu.Schedstat, err = schedulerStatsFromProcs(h.rootFs, pids, h.pidMetricsCache)
if err != nil {
glog.V(4).Infof("Unable to get Process Scheduler Stats: %v", err)
klog.V(4).Infof("Unable to get Process Scheduler Stats: %v", err)
}
}
}
@ -88,7 +88,7 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
if h.includedMetrics.Has(container.NetworkUsageMetrics) {
netStats, err := networkStatsFromProc(h.rootFs, h.pid)
if err != nil {
glog.V(4).Infof("Unable to get network stats from pid %d: %v", h.pid, err)
klog.V(4).Infof("Unable to get network stats from pid %d: %v", h.pid, err)
} else {
stats.Network.Interfaces = append(stats.Network.Interfaces, netStats...)
}
@ -96,14 +96,14 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
if h.includedMetrics.Has(container.NetworkTcpUsageMetrics) {
t, err := tcpStatsFromProc(h.rootFs, h.pid, "net/tcp")
if err != nil {
glog.V(4).Infof("Unable to get tcp stats from pid %d: %v", h.pid, err)
klog.V(4).Infof("Unable to get tcp stats from pid %d: %v", h.pid, err)
} else {
stats.Network.Tcp = t
}
t6, err := tcpStatsFromProc(h.rootFs, h.pid, "net/tcp6")
if err != nil {
glog.V(4).Infof("Unable to get tcp6 stats from pid %d: %v", h.pid, err)
klog.V(4).Infof("Unable to get tcp6 stats from pid %d: %v", h.pid, err)
} else {
stats.Network.Tcp6 = t6
}
@ -111,14 +111,14 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
if h.includedMetrics.Has(container.NetworkUdpUsageMetrics) {
u, err := udpStatsFromProc(h.rootFs, h.pid, "net/udp")
if err != nil {
glog.V(4).Infof("Unable to get udp stats from pid %d: %v", h.pid, err)
klog.V(4).Infof("Unable to get udp stats from pid %d: %v", h.pid, err)
} else {
stats.Network.Udp = u
}
u6, err := udpStatsFromProc(h.rootFs, h.pid, "net/udp6")
if err != nil {
glog.V(4).Infof("Unable to get udp6 stats from pid %d: %v", h.pid, err)
klog.V(4).Infof("Unable to get udp6 stats from pid %d: %v", h.pid, err)
} else {
stats.Network.Udp6 = u6
}
@ -127,11 +127,11 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
paths := h.cgroupManager.GetPaths()
path, ok := paths["cpu"]
if !ok {
glog.V(4).Infof("Could not find cgroups CPU for container %d", h.pid)
klog.V(4).Infof("Could not find cgroups CPU for container %d", h.pid)
} else {
stats.Processes, err = processStatsFromProcs(h.rootFs, path)
if err != nil {
glog.V(4).Infof("Unable to get Process Stats: %v", err)
klog.V(4).Infof("Unable to get Process Stats: %v", err)
}
}
}
@ -165,7 +165,7 @@ func processStatsFromProcs(rootFs string, cgroupPath string) (info.ProcessStats,
dirPath := path.Join(rootFs, "/proc", pid, "fd")
fds, err := ioutil.ReadDir(dirPath)
if err != nil {
glog.V(4).Infof("error while listing directory %q to measure fd count: %v", dirPath, err)
klog.V(4).Infof("error while listing directory %q to measure fd count: %v", dirPath, err)
continue
}
fdCount += uint64(len(fds))
@ -498,13 +498,13 @@ func setCpuStats(s *cgroups.Stats, ret *info.ContainerStats, withPerCPU bool) {
// We intentionally ignore these extra zeroes.
numActual, err := numCpusFunc()
if err != nil {
glog.Errorf("unable to determine number of actual cpus; defaulting to maximum possible number: errno %v", err)
klog.Errorf("unable to determine number of actual cpus; defaulting to maximum possible number: errno %v", err)
numActual = numPossible
}
if numActual > numPossible {
// The real number of cores should never be greater than the number of
// datapoints reported in cpu usage.
glog.Errorf("PercpuUsage had %v cpus, but the actual number is %v; ignoring extra CPUs", numPossible, numActual)
klog.Errorf("PercpuUsage had %v cpus, but the actual number is %v; ignoring extra CPUs", numPossible, numActual)
}
numActual = minUint32(numPossible, numActual)
ret.Cpu.Usage.PerCpu = make([]uint64, numActual)

View File

@ -19,8 +19,8 @@ import (
info "github.com/google/cadvisor/info/v1"
"github.com/golang/glog"
"github.com/opencontainers/runc/libcontainer/cgroups"
"k8s.io/klog"
)
type CgroupSubsystems struct {
@ -61,7 +61,7 @@ func getCgroupSubsystemsHelper(allCgroups []cgroups.Mount) (CgroupSubsystems, er
}
if _, ok := mountPoints[subsystem]; ok {
// duplicate mount for this subsystem; use the first one we saw
glog.V(5).Infof("skipping %s, already using mount at %s", mount.Mountpoint, mountPoints[subsystem])
klog.V(5).Infof("skipping %s, already using mount at %s", mount.Mountpoint, mountPoints[subsystem])
continue
}
if _, ok := recordedMountpoints[mount.Mountpoint]; !ok {

View File

@ -22,12 +22,12 @@ import (
"strings"
"time"
"github.com/golang/glog"
"github.com/google/cadvisor/container"
"github.com/google/cadvisor/container/libcontainer"
"github.com/google/cadvisor/fs"
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/manager/watcher"
"k8s.io/klog"
)
var MesosAgentAddress = flag.String("mesos_agent", "127.0.0.1:5051", "Mesos agent address")
@ -135,7 +135,7 @@ func Register(
return fmt.Errorf("failed to get cgroup subsystems: %v", err)
}
glog.V(1).Infof("Registering mesos factory")
klog.V(1).Infof("Registering mesos factory")
factory := &mesosFactory{
machineInfoFactory: machineInfoFactory,
cgroupSubsystems: cgroupSubsystems,

View File

@ -26,7 +26,7 @@ import (
info "github.com/google/cadvisor/info/v1"
watch "github.com/google/cadvisor/manager/watcher"
"github.com/golang/glog"
"k8s.io/klog"
)
var dockerOnly = flag.Bool("docker_only", false, "Only report docker containers in addition to root stats")
@ -94,7 +94,7 @@ func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, incl
return err
}
glog.V(1).Infof("Registering Raw factory")
klog.V(1).Infof("Registering Raw factory")
factory := &rawFactory{
machineInfoFactory: machineInfoFactory,
fsInfo: fsInfo,

View File

@ -25,9 +25,9 @@ import (
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/machine"
"github.com/golang/glog"
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
"github.com/opencontainers/runc/libcontainer/configs"
"k8s.io/klog"
)
type rawContainerHandler struct {
@ -134,7 +134,7 @@ func (self *rawContainerHandler) GetSpec() (info.ContainerSpec, error) {
// Get memory and swap limits of the running machine
memLimit, err := machine.GetMachineMemoryCapacity()
if err != nil {
glog.Warningf("failed to obtain memory limit for machine container")
klog.Warningf("failed to obtain memory limit for machine container")
spec.HasMemory = false
} else {
spec.Memory.Limit = uint64(memLimit)
@ -144,7 +144,7 @@ func (self *rawContainerHandler) GetSpec() (info.ContainerSpec, error) {
swapLimit, err := machine.GetMachineSwapCapacity()
if err != nil {
glog.Warningf("failed to obtain swap limit for machine container")
klog.Warningf("failed to obtain swap limit for machine container")
} else {
spec.Memory.SwapLimit = uint64(swapLimit)
}

View File

@ -23,7 +23,7 @@ import (
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/manager/watcher"
"github.com/golang/glog"
"k8s.io/klog"
)
const RktNamespace = "rkt"
@ -86,7 +86,7 @@ func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, incl
return fmt.Errorf("failed to find supported cgroup mounts for the raw factory")
}
glog.V(1).Infof("Registering Rkt factory")
klog.V(1).Infof("Registering Rkt factory")
factory := &rktFactory{
machineInfoFactory: machineInfoFactory,
fsInfo: fsInfo,

View File

@ -27,9 +27,9 @@ import (
info "github.com/google/cadvisor/info/v1"
"golang.org/x/net/context"
"github.com/golang/glog"
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
"github.com/opencontainers/runc/libcontainer/configs"
"k8s.io/klog"
)
type rktContainerHandler struct {
@ -89,7 +89,7 @@ func newRktContainerHandler(name string, rktClient rktapi.PublicAPIClient, rktPa
annotations := resp.Pod.Annotations
if parsed.Container != "" { // As not empty string, an App container
if contAnnotations, ok := findAnnotations(resp.Pod.Apps, parsed.Container); !ok {
glog.Warningf("couldn't find app %v in pod", parsed.Container)
klog.Warningf("couldn't find app %v in pod", parsed.Container)
} else {
annotations = append(annotations, contAnnotations...)
}

View File

@ -21,8 +21,8 @@ import (
"strings"
rktapi "github.com/coreos/rkt/api/v1alpha"
"github.com/golang/glog"
"golang.org/x/net/context"
"k8s.io/klog"
)
type parsedName struct {
@ -128,7 +128,7 @@ func getRootFs(root string, parsed *parsedName) string {
bytes, err := ioutil.ReadFile(tree)
if err != nil {
glog.Errorf("ReadFile failed, couldn't read %v to get upper dir: %v", tree, err)
klog.Errorf("ReadFile failed, couldn't read %v to get upper dir: %v", tree, err)
return ""
}

View File

@ -23,7 +23,7 @@ import (
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/manager/watcher"
"github.com/golang/glog"
"k8s.io/klog"
)
type systemdFactory struct{}
@ -51,7 +51,7 @@ func (f *systemdFactory) DebugInfo() map[string][]string {
// Register registers the systemd container factory.
func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics container.MetricSet) error {
glog.V(1).Infof("Registering systemd factory")
klog.V(1).Infof("Registering systemd factory")
factory := &systemdFactory{}
container.RegisterContainerHandlerFactory(factory, []watcher.ContainerWatchSource{watcher.Raw})
return nil

View File

@ -18,7 +18,7 @@ import (
"strconv"
"strings"
"github.com/golang/glog"
"k8s.io/klog"
)
// DmsetupClient is a low-level client for interacting with device mapper via
@ -58,6 +58,6 @@ func (c *defaultDmsetupClient) Status(deviceName string) ([]byte, error) {
}
func (*defaultDmsetupClient) dmsetup(args ...string) ([]byte, error) {
glog.V(5).Infof("running dmsetup %v", strings.Join(args, " "))
klog.V(5).Infof("running dmsetup %v", strings.Join(args, " "))
return exec.Command("dmsetup", args...).Output()
}

View File

@ -21,7 +21,7 @@ import (
"strconv"
"strings"
"github.com/golang/glog"
"k8s.io/klog"
)
// thinLsClient knows how to run a thin_ls very specific to CoW usage for
@ -53,7 +53,7 @@ var _ thinLsClient = &defaultThinLsClient{}
func (c *defaultThinLsClient) ThinLs(deviceName string) (map[string]uint64, error) {
args := []string{"--no-headers", "-m", "-o", "DEV,EXCLUSIVE_BYTES", deviceName}
glog.V(4).Infof("running command: thin_ls %v", strings.Join(args, " "))
klog.V(4).Infof("running command: thin_ls %v", strings.Join(args, " "))
output, err := exec.Command(c.thinLsPath, args...).Output()
if err != nil {
@ -80,7 +80,7 @@ func parseThinLsOutput(output []byte) map[string]uint64 {
deviceID := fields[0]
usage, err := strconv.ParseUint(fields[1], 10, 64)
if err != nil {
glog.Warningf("unexpected error parsing thin_ls output: %v", err)
klog.Warningf("unexpected error parsing thin_ls output: %v", err)
continue
}

View File

@ -19,7 +19,7 @@ import (
"sync"
"time"
"github.com/golang/glog"
"k8s.io/klog"
)
// ThinPoolWatcher maintains a cache of device name -> usage stats for a
@ -58,7 +58,7 @@ func NewThinPoolWatcher(poolName, metadataDevice string) (*ThinPoolWatcher, erro
func (w *ThinPoolWatcher) Start() {
err := w.Refresh()
if err != nil {
glog.Errorf("encountered error refreshing thin pool watcher: %v", err)
klog.Errorf("encountered error refreshing thin pool watcher: %v", err)
}
for {
@ -69,12 +69,12 @@ func (w *ThinPoolWatcher) Start() {
start := time.Now()
err = w.Refresh()
if err != nil {
glog.Errorf("encountered error refreshing thin pool watcher: %v", err)
klog.Errorf("encountered error refreshing thin pool watcher: %v", err)
}
// print latency for refresh
duration := time.Since(start)
glog.V(5).Infof("thin_ls(%d) took %s", start.Unix(), duration)
klog.V(5).Infof("thin_ls(%d) took %s", start.Unix(), duration)
}
}
}
@ -115,7 +115,7 @@ func (w *ThinPoolWatcher) Refresh() error {
}
if currentlyReserved {
glog.V(5).Infof("metadata for %v is currently reserved; releasing", w.poolName)
klog.V(5).Infof("metadata for %v is currently reserved; releasing", w.poolName)
_, err = w.dmsetup.Message(w.poolName, 0, releaseMetadataMessage)
if err != nil {
err = fmt.Errorf("error releasing metadata snapshot for %v: %v", w.poolName, err)
@ -123,22 +123,22 @@ func (w *ThinPoolWatcher) Refresh() error {
}
}
glog.V(5).Infof("reserving metadata snapshot for thin-pool %v", w.poolName)
klog.V(5).Infof("reserving metadata snapshot for thin-pool %v", w.poolName)
// NOTE: "0" in the call below is for the 'sector' argument to 'dmsetup
// message'. It's not needed for thin pools.
if output, err := w.dmsetup.Message(w.poolName, 0, reserveMetadataMessage); err != nil {
err = fmt.Errorf("error reserving metadata for thin-pool %v: %v output: %v", w.poolName, err, string(output))
return err
} else {
glog.V(5).Infof("reserved metadata snapshot for thin-pool %v", w.poolName)
klog.V(5).Infof("reserved metadata snapshot for thin-pool %v", w.poolName)
}
defer func() {
glog.V(5).Infof("releasing metadata snapshot for thin-pool %v", w.poolName)
klog.V(5).Infof("releasing metadata snapshot for thin-pool %v", w.poolName)
w.dmsetup.Message(w.poolName, 0, releaseMetadataMessage)
}()
glog.V(5).Infof("running thin_ls on metadata device %v", w.metadataDevice)
klog.V(5).Infof("running thin_ls on metadata device %v", w.metadataDevice)
newCache, err := w.thinLsClient.ThinLs(w.metadataDevice)
if err != nil {
err = fmt.Errorf("error performing thin_ls on metadata device %v: %v", w.metadataDevice, err)
@ -157,7 +157,7 @@ const (
// checkReservation checks to see whether the thin device is currently holding
// userspace metadata.
func (w *ThinPoolWatcher) checkReservation(poolName string) (bool, error) {
glog.V(5).Infof("checking whether the thin-pool is holding a metadata snapshot")
klog.V(5).Infof("checking whether the thin-pool is holding a metadata snapshot")
output, err := w.dmsetup.Status(poolName)
if err != nil {
return false, err

View File

@ -24,7 +24,7 @@ import (
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/utils"
"github.com/golang/glog"
"k8s.io/klog"
)
type byTimestamp []*info.Event
@ -322,7 +322,7 @@ func (self *events) AddEvent(e *info.Event) error {
for _, watchObject := range watchesToSend {
watchObject.eventChannel.GetChannel() <- e
}
glog.V(4).Infof("Added event %v", e)
klog.V(4).Infof("Added event %v", e)
return nil
}
@ -332,7 +332,7 @@ func (self *events) StopWatch(watchId int) {
defer self.watcherLock.Unlock()
_, ok := self.watchers[watchId]
if !ok {
glog.Errorf("Could not find watcher instance %v", watchId)
klog.Errorf("Could not find watcher instance %v", watchId)
}
close(self.watchers[watchId].eventChannel.GetChannel())
delete(self.watchers, watchId)

View File

@ -33,11 +33,11 @@ import (
"time"
"github.com/docker/docker/pkg/mount"
"github.com/golang/glog"
"github.com/google/cadvisor/devicemapper"
"github.com/google/cadvisor/utils"
dockerutil "github.com/google/cadvisor/utils/docker"
zfs "github.com/mistifyio/go-zfs"
"k8s.io/klog"
)
const (
@ -116,7 +116,7 @@ func NewFsInfo(context Context) (FsInfo, error) {
if err != nil {
// UUID is not always available across different OS distributions.
// Do not fail if there is an error.
glog.Warningf("Failed to get disk UUID mapping, getting disk info by uuid will not work: %v", err)
klog.Warningf("Failed to get disk UUID mapping, getting disk info by uuid will not work: %v", err)
}
// Avoid devicemapper container mounts - these are tracked by the ThinPoolWatcher
@ -139,8 +139,8 @@ func NewFsInfo(context Context) (FsInfo, error) {
fsInfo.addDockerImagesLabel(context, mounts)
fsInfo.addCrioImagesLabel(context, mounts)
glog.V(1).Infof("Filesystem UUIDs: %+v", fsInfo.fsUUIDToDeviceName)
glog.V(1).Infof("Filesystem partitions: %+v", fsInfo.partitions)
klog.V(1).Infof("Filesystem UUIDs: %+v", fsInfo.fsUUIDToDeviceName)
klog.V(1).Infof("Filesystem partitions: %+v", fsInfo.partitions)
fsInfo.addSystemRootLabel(mounts)
return fsInfo, nil
}
@ -165,7 +165,7 @@ func getFsUUIDToDeviceNameMap() (map[string]string, error) {
path := filepath.Join(dir, file.Name())
target, err := os.Readlink(path)
if err != nil {
glog.Warningf("Failed to resolve symlink for %q", path)
klog.Warningf("Failed to resolve symlink for %q", path)
continue
}
device, err := filepath.Abs(filepath.Join(dir, target))
@ -213,7 +213,7 @@ func processMounts(mounts []*mount.Info, excludedMountpointPrefixes []string) ma
if mount.Fstype == "btrfs" && mount.Major == 0 && strings.HasPrefix(mount.Source, "/dev/") {
major, minor, err := getBtrfsMajorMinorIds(mount)
if err != nil {
glog.Warningf("%s", err)
klog.Warningf("%s", err)
} else {
mount.Major = major
mount.Minor = minor
@ -278,7 +278,7 @@ func (self *RealFsInfo) addSystemRootLabel(mounts []*mount.Info) {
func (self *RealFsInfo) addDockerImagesLabel(context Context, mounts []*mount.Info) {
dockerDev, dockerPartition, err := self.getDockerDeviceMapperInfo(context.Docker)
if err != nil {
glog.Warningf("Could not get Docker devicemapper device: %v", err)
klog.Warningf("Could not get Docker devicemapper device: %v", err)
}
if len(dockerDev) > 0 && dockerPartition != nil {
self.partitions[dockerDev] = *dockerPartition
@ -405,7 +405,7 @@ func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, er
switch partition.fsType {
case DeviceMapper.String():
fs.Capacity, fs.Free, fs.Available, err = getDMStats(device, partition.blockSize)
glog.V(5).Infof("got devicemapper fs capacity stats: capacity: %v free: %v available: %v:", fs.Capacity, fs.Free, fs.Available)
klog.V(5).Infof("got devicemapper fs capacity stats: capacity: %v free: %v available: %v:", fs.Capacity, fs.Free, fs.Available)
fs.Type = DeviceMapper
case ZFS.String():
fs.Capacity, fs.Free, fs.Available, err = getZfstats(device)
@ -418,11 +418,11 @@ func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, er
fs.InodesFree = &inodesFree
fs.Type = VFS
} else {
glog.V(4).Infof("unable to determine file system type, partition mountpoint does not exist: %v", partition.mountpoint)
klog.V(4).Infof("unable to determine file system type, partition mountpoint does not exist: %v", partition.mountpoint)
}
}
if err != nil {
glog.V(4).Infof("Stat fs failed. Error: %v", err)
klog.V(4).Infof("Stat fs failed. Error: %v", err)
} else {
deviceSet[device] = struct{}{}
fs.DeviceInfo = DeviceInfo{
@ -445,7 +445,7 @@ func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) {
file, err := os.Open(diskStatsFile)
if err != nil {
if os.IsNotExist(err) {
glog.Warningf("Not collecting filesystem statistics because file %q was not found", diskStatsFile)
klog.Warningf("Not collecting filesystem statistics because file %q was not found", diskStatsFile)
return diskStatsMap, nil
}
return nil, err
@ -551,7 +551,7 @@ func (self *RealFsInfo) GetDirFsDevice(dir string) (*DeviceInfo, error) {
if found && mount.Fstype == "btrfs" && mount.Major == 0 && strings.HasPrefix(mount.Source, "/dev/") {
major, minor, err := getBtrfsMajorMinorIds(mount)
if err != nil {
glog.Warningf("%s", err)
klog.Warningf("%s", err)
} else {
return &DeviceInfo{mount.Source, uint(major), uint(minor)}, nil
}
@ -583,12 +583,12 @@ func GetDirDiskUsage(dir string, timeout time.Duration) (uint64, error) {
return 0, fmt.Errorf("failed to exec du - %v", err)
}
timer := time.AfterFunc(timeout, func() {
glog.Warningf("Killing cmd %v due to timeout(%s)", cmd.Args, timeout.String())
klog.Warningf("Killing cmd %v due to timeout(%s)", cmd.Args, timeout.String())
cmd.Process.Kill()
})
stdoutb, souterr := ioutil.ReadAll(stdoutp)
if souterr != nil {
glog.Errorf("Failed to read from stdout for cmd %v - %v", cmd.Args, souterr)
klog.Errorf("Failed to read from stdout for cmd %v - %v", cmd.Args, souterr)
}
stderrb, _ := ioutil.ReadAll(stderrp)
err = cmd.Wait()
@ -622,7 +622,7 @@ func GetDirInodeUsage(dir string, timeout time.Duration) (uint64, error) {
return 0, fmt.Errorf("failed to exec cmd %v - %v; stderr: %v", findCmd.Args, err, stderr.String())
}
timer := time.AfterFunc(timeout, func() {
glog.Warningf("Killing cmd %v due to timeout(%s)", findCmd.Args, timeout.String())
klog.Warningf("Killing cmd %v due to timeout(%s)", findCmd.Args, timeout.String())
findCmd.Process.Kill()
})
err := findCmd.Wait()
@ -763,7 +763,7 @@ func getBtrfsMajorMinorIds(mount *mount.Info) (int, int, error) {
return 0, 0, err
}
glog.V(4).Infof("btrfs mount %#v", mount)
klog.V(4).Infof("btrfs mount %#v", mount)
if buf.Mode&syscall.S_IFMT == syscall.S_IFBLK {
err := syscall.Stat(mount.Mountpoint, buf)
if err != nil {
@ -771,8 +771,8 @@ func getBtrfsMajorMinorIds(mount *mount.Info) (int, int, error) {
return 0, 0, err
}
glog.V(4).Infof("btrfs dev major:minor %d:%d\n", int(major(buf.Dev)), int(minor(buf.Dev)))
glog.V(4).Infof("btrfs rdev major:minor %d:%d\n", int(major(buf.Rdev)), int(minor(buf.Rdev)))
klog.V(4).Infof("btrfs dev major:minor %d:%d\n", int(major(buf.Dev)), int(minor(buf.Dev)))
klog.V(4).Infof("btrfs rdev major:minor %d:%d\n", int(major(buf.Rdev)), int(minor(buf.Rdev)))
return int(major(buf.Dev)), int(minor(buf.Dev)), nil
} else {

View File

@ -30,9 +30,9 @@ import (
"github.com/google/cadvisor/validate"
auth "github.com/abbot/go-http-auth"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"k8s.io/klog"
)
func RegisterHandlers(mux httpmux.Mux, containerManager manager.Manager, httpAuthFile, httpAuthRealm, httpDigestFile, httpDigestRealm string) error {
@ -61,7 +61,7 @@ func RegisterHandlers(mux httpmux.Mux, containerManager manager.Manager, httpAut
// Setup the authenticator object
if httpAuthFile != "" {
glog.V(1).Infof("Using auth file %s", httpAuthFile)
klog.V(1).Infof("Using auth file %s", httpAuthFile)
secrets := auth.HtpasswdFileProvider(httpAuthFile)
authenticator := auth.NewBasicAuthenticator(httpAuthRealm, secrets)
mux.HandleFunc(static.StaticResource, authenticator.Wrap(staticHandler))
@ -71,7 +71,7 @@ func RegisterHandlers(mux httpmux.Mux, containerManager manager.Manager, httpAut
authenticated = true
}
if httpAuthFile == "" && httpDigestFile != "" {
glog.V(1).Infof("Using digest file %s", httpDigestFile)
klog.V(1).Infof("Using digest file %s", httpDigestFile)
secrets := auth.HtdigestFileProvider(httpDigestFile)
authenticator := auth.NewDigestAuthenticator(httpDigestRealm, secrets)
mux.HandleFunc(static.StaticResource, authenticator.Wrap(staticHandler))

View File

@ -18,8 +18,8 @@ import (
"fmt"
"time"
"github.com/golang/glog"
"github.com/google/cadvisor/info/v1"
"k8s.io/klog"
)
func machineFsStatsFromV1(fsStats []v1.FsStats) []MachineFsStats {
@ -70,7 +70,7 @@ func MachineStatsFromV1(cont *v1.ContainerInfo) []MachineStats {
stat.Cpu = &val.Cpu
cpuInst, err := InstCpuStats(last, val)
if err != nil {
glog.Warningf("Could not get instant cpu stats: %v", err)
klog.Warningf("Could not get instant cpu stats: %v", err)
} else {
stat.CpuInst = cpuInst
}
@ -107,7 +107,7 @@ func ContainerStatsFromV1(containerName string, spec *v1.ContainerSpec, stats []
stat.Cpu = &val.Cpu
cpuInst, err := InstCpuStats(last, val)
if err != nil {
glog.Warningf("Could not get instant cpu stats: %v", err)
klog.Warningf("Could not get instant cpu stats: %v", err)
} else {
stat.CpuInst = cpuInst
}
@ -133,7 +133,7 @@ func ContainerStatsFromV1(containerName string, spec *v1.ContainerSpec, stats []
}
} else if len(val.Filesystem) > 1 && containerName != "/" {
// Cannot handle multiple devices per container.
glog.V(4).Infof("failed to handle multiple devices for container %s. Skipping Filesystem stats", containerName)
klog.V(4).Infof("failed to handle multiple devices for container %s. Skipping Filesystem stats", containerName)
}
}
if spec.HasDiskIo {
@ -168,7 +168,7 @@ func DeprecatedStatsFromV1(cont *v1.ContainerInfo) []DeprecatedContainerStats {
stat.Cpu = val.Cpu
cpuInst, err := InstCpuStats(last, val)
if err != nil {
glog.Warningf("Could not get instant cpu stats: %v", err)
klog.Warningf("Could not get instant cpu stats: %v", err)
} else {
stat.CpuInst = cpuInst
}

View File

@ -23,9 +23,9 @@ import (
"testing"
"time"
"github.com/golang/glog"
"github.com/google/cadvisor/client"
"github.com/google/cadvisor/client/v2"
"k8s.io/klog"
)
var host = flag.String("host", "localhost", "Address of the host being tested")
@ -329,7 +329,7 @@ func (self shellActions) Run(command string, args ...string) (string, string) {
var stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
glog.Infof("About to run - %v", cmd.Args)
klog.Infof("About to run - %v", cmd.Args)
err := cmd.Run()
if err != nil {
self.fm.T().Fatalf("Failed to run %q %v in %q with error: %q. Stdout: %q, Stderr: %s", command, args, self.fm.Hostname().Host, err, stdout.String(), stderr.String())

View File

@ -32,8 +32,8 @@ import (
"sync"
"time"
"github.com/golang/glog"
cadvisorApi "github.com/google/cadvisor/info/v2"
"k8s.io/klog"
)
// must be able to ssh into hosts without password
@ -90,7 +90,7 @@ func RunSshCommand(cmd string, args ...string) error {
func PushAndRunTests(host, testDir string) (result error) {
// Push binary.
glog.Infof("Pushing cAdvisor binary to %q...", host)
klog.Infof("Pushing cAdvisor binary to %q...", host)
err := RunSshCommand("ssh", host, "--", "mkdir", "-p", testDir)
if err != nil {
@ -99,7 +99,7 @@ func PushAndRunTests(host, testDir string) (result error) {
defer func() {
err = RunSshCommand("ssh", host, "--", "rm", "-rf", testDir)
if err != nil {
glog.Errorf("Failed to cleanup test directory: %v", err)
klog.Errorf("Failed to cleanup test directory: %v", err)
}
}()
@ -109,7 +109,7 @@ func PushAndRunTests(host, testDir string) (result error) {
}
// Start cAdvisor.
glog.Infof("Running cAdvisor on %q...", host)
klog.Infof("Running cAdvisor on %q...", host)
portStr := strconv.Itoa(*port)
errChan := make(chan error)
go func() {
@ -121,7 +121,7 @@ func PushAndRunTests(host, testDir string) (result error) {
defer func() {
err = RunSshCommand("ssh", host, "--", "sudo", "pkill", cadvisorBinary)
if err != nil {
glog.Errorf("Failed to cleanup: %v", err)
klog.Errorf("Failed to cleanup: %v", err)
}
}()
defer func() {
@ -138,12 +138,12 @@ func PushAndRunTests(host, testDir string) (result error) {
result = fmt.Errorf("error reading local log file: %v for %v", err, result)
return
}
glog.Errorf("----------------------\nLogs from Host: %q\n%v\n", host, string(logs))
klog.Errorf("----------------------\nLogs from Host: %q\n%v\n", host, string(logs))
// Get attributes for debugging purposes.
attributes, err := getAttributes(host, portStr)
if err != nil {
glog.Errorf("Failed to read host attributes: %v", err)
klog.Errorf("Failed to read host attributes: %v", err)
}
result = fmt.Errorf("error on host %s: %v\n%+v", host, result, attributes)
}
@ -171,12 +171,12 @@ func PushAndRunTests(host, testDir string) (result error) {
}
// Run the tests in a retry loop.
glog.Infof("Running integration tests targeting %q...", host)
klog.Infof("Running integration tests targeting %q...", host)
for i := 0; i <= *testRetryCount; i++ {
// Check if this is a retry
if i > 0 {
time.Sleep(time.Second * 15) // Wait 15 seconds before retrying
glog.Warningf("Retrying (%d of %d) tests on host %s due to error %v", i, *testRetryCount, host, err)
klog.Warningf("Retrying (%d of %d) tests on host %s due to error %v", i, *testRetryCount, host, err)
}
// Run the command
@ -188,7 +188,7 @@ func PushAndRunTests(host, testDir string) (result error) {
// Only retry on test failures caused by these known flaky failure conditions
if retryRegex == nil || !retryRegex.Match([]byte(err.Error())) {
glog.Warningf("Skipping retry for tests on host %s because error is not whitelisted", host)
klog.Warningf("Skipping retry for tests on host %s because error is not whitelisted", host)
break
}
}
@ -198,16 +198,16 @@ func PushAndRunTests(host, testDir string) (result error) {
func Run() error {
start := time.Now()
defer func() {
glog.Infof("Execution time %v", time.Since(start))
klog.Infof("Execution time %v", time.Since(start))
}()
defer glog.Flush()
defer klog.Flush()
hosts := flag.Args()
testDir := fmt.Sprintf("/tmp/cadvisor-%d", os.Getpid())
glog.Infof("Running integration tests on host(s) %q", strings.Join(hosts, ","))
klog.Infof("Running integration tests on host(s) %q", strings.Join(hosts, ","))
// Build cAdvisor.
glog.Infof("Building cAdvisor...")
klog.Infof("Building cAdvisor...")
err := RunCommand("build/build.sh")
if err != nil {
return err
@ -215,7 +215,7 @@ func Run() error {
defer func() {
err := RunCommand("rm", cadvisorBinary)
if err != nil {
glog.Error(err)
klog.Error(err)
}
}()
@ -249,7 +249,7 @@ func Run() error {
return errors.New(buffer.String())
}
glog.Infof("All tests pass!")
klog.Infof("All tests pass!")
return nil
}
@ -261,7 +261,7 @@ func initRetryWhitelist() {
file, err := os.Open(*testRetryWhitelist)
if err != nil {
glog.Fatal(err)
klog.Fatal(err)
}
defer file.Close()
@ -274,23 +274,24 @@ func initRetryWhitelist() {
}
}
if err := scanner.Err(); err != nil {
glog.Fatal(err)
klog.Fatal(err)
}
retryRegex = regexp.MustCompile(strings.Join(retryStrings, "|"))
}
func main() {
klog.InitFlags(nil)
flag.Parse()
// Check usage.
if len(flag.Args()) == 0 {
glog.Fatalf("USAGE: runner <hosts to test>")
klog.Fatalf("USAGE: runner <hosts to test>")
}
initRetryWhitelist()
// Run the tests.
err := Run()
if err != nil {
glog.Fatal(err)
klog.Fatal(err)
}
}

View File

@ -21,8 +21,13 @@ import (
info "github.com/google/cadvisor/info/v1"
"github.com/stretchr/testify/assert"
"k8s.io/klog"
)
func init() {
klog.InitFlags(nil)
}
// Checks that expected and actual are within delta of each other.
func inDelta(t *testing.T, expected, actual, delta uint64, description string) {
var diff uint64

View File

@ -13,3 +13,9 @@
// limitations under the License.
package healthz
import "k8s.io/klog"
func init() {
klog.InitFlags(nil)
}

View File

@ -30,7 +30,7 @@ import (
"github.com/google/cadvisor/utils/sysfs"
"github.com/google/cadvisor/utils/sysinfo"
"github.com/golang/glog"
"k8s.io/klog"
"golang.org/x/sys/unix"
)
@ -50,7 +50,7 @@ func getInfoFromFiles(filePaths string) string {
return strings.TrimSpace(string(id))
}
}
glog.Warningf("Couldn't collect info from any of the files in %q", filePaths)
klog.Warningf("Couldn't collect info from any of the files in %q", filePaths)
return ""
}
@ -117,27 +117,27 @@ func Info(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.Mach
filesystems, err := fsInfo.GetGlobalFsInfo()
if err != nil {
glog.Errorf("Failed to get global filesystem information: %v", err)
klog.Errorf("Failed to get global filesystem information: %v", err)
}
diskMap, err := sysinfo.GetBlockDeviceInfo(sysFs)
if err != nil {
glog.Errorf("Failed to get disk map: %v", err)
klog.Errorf("Failed to get disk map: %v", err)
}
netDevices, err := sysinfo.GetNetworkDevices(sysFs)
if err != nil {
glog.Errorf("Failed to get network devices: %v", err)
klog.Errorf("Failed to get network devices: %v", err)
}
topology, numCores, err := GetTopology(sysFs, string(cpuinfo))
if err != nil {
glog.Errorf("Failed to get topology information: %v", err)
klog.Errorf("Failed to get topology information: %v", err)
}
systemUUID, err := sysinfo.GetSystemUUID(sysFs)
if err != nil {
glog.Errorf("Failed to get system UUID: %v", err)
klog.Errorf("Failed to get system UUID: %v", err)
}
realCloudInfo := cloudinfo.NewRealCloudInfo()

View File

@ -30,7 +30,7 @@ import (
"github.com/google/cadvisor/utils/sysfs"
"github.com/google/cadvisor/utils/sysinfo"
"github.com/golang/glog"
"k8s.io/klog"
"golang.org/x/sys/unix"
)
@ -191,7 +191,7 @@ func GetTopology(sysFs sysfs.SysFs, cpuinfo string) ([]info.Node, int, error) {
for idx, node := range nodes {
caches, err := sysinfo.GetCacheInfo(sysFs, node.Cores[0].Threads[0])
if err != nil {
glog.Errorf("failed to get cache information for node %d: %v", node.Id, err)
klog.Errorf("failed to get cache information for node %d: %v", node.Id, err)
continue
}
numThreadsPerCore := len(node.Cores[0].Threads)

View File

@ -39,7 +39,7 @@ import (
"github.com/google/cadvisor/utils/cpuload"
units "github.com/docker/go-units"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/utils/clock"
)
@ -187,7 +187,7 @@ func (c *containerData) getCgroupPath(cgroups string) (string, error) {
}
matches := cgroupPathRegExp.FindSubmatch([]byte(cgroups))
if len(matches) != 2 {
glog.V(3).Infof("failed to get memory cgroup path from %q", cgroups)
klog.V(3).Infof("failed to get memory cgroup path from %q", cgroups)
// return root in case of failures - memory hierarchy might not be enabled.
return "/", nil
}
@ -208,7 +208,7 @@ func (c *containerData) ReadFile(filepath string, inHostNamespace bool) ([]byte,
}
for _, pid := range pids {
filePath := path.Join(rootfs, "/proc", pid, "/root", filepath)
glog.V(3).Infof("Trying path %q", filePath)
klog.V(3).Infof("Trying path %q", filePath)
data, err := ioutil.ReadFile(filePath)
if err == nil {
return data, err
@ -334,7 +334,7 @@ func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace
dirPath := path.Join(rootfs, "/proc", strconv.Itoa(pid), "fd")
fds, err := ioutil.ReadDir(dirPath)
if err != nil {
glog.V(4).Infof("error while listing directory %q to measure fd count: %v", dirPath, err)
klog.V(4).Infof("error while listing directory %q to measure fd count: %v", dirPath, err)
continue
}
fdCount = len(fds)
@ -393,7 +393,7 @@ func newContainerData(containerName string, memoryCache *memory.InMemoryCache, h
// Create cpu load reader.
loadReader, err := cpuload.New()
if err != nil {
glog.Warningf("Could not initialize cpu load reader for %q: %s", ref.Name, err)
klog.Warningf("Could not initialize cpu load reader for %q: %s", ref.Name, err)
} else {
cont.loadReader = loadReader
}
@ -406,7 +406,7 @@ func newContainerData(containerName string, memoryCache *memory.InMemoryCache, h
cont.summaryReader, err = summary.New(cont.info.Spec)
if err != nil {
cont.summaryReader = nil
glog.Warningf("Failed to create summary reader for %q: %v", ref.Name, err)
klog.Warningf("Failed to create summary reader for %q: %v", ref.Name, err)
}
return cont, nil
@ -419,7 +419,7 @@ func (self *containerData) nextHousekeepingInterval() time.Duration {
stats, err := self.memoryCache.RecentStats(self.info.Name, empty, empty, 2)
if err != nil {
if self.allowErrorLogging() {
glog.Warningf("Failed to get RecentStats(%q) while determining the next housekeeping: %v", self.info.Name, err)
klog.Warningf("Failed to get RecentStats(%q) while determining the next housekeeping: %v", self.info.Name, err)
}
} else if len(stats) == 2 {
// TODO(vishnuk): Use no processes as a signal.
@ -449,7 +449,7 @@ func (c *containerData) housekeeping() {
if c.loadReader != nil {
err := c.loadReader.Start()
if err != nil {
glog.Warningf("Could not start cpu load stat collector for %q: %s", c.info.Name, err)
klog.Warningf("Could not start cpu load stat collector for %q: %s", c.info.Name, err)
}
defer c.loadReader.Stop()
}
@ -461,7 +461,7 @@ func (c *containerData) housekeeping() {
}
// Housekeep every second.
glog.V(3).Infof("Start housekeeping for container %q\n", c.info.Name)
klog.V(3).Infof("Start housekeeping for container %q\n", c.info.Name)
houseKeepingTimer := c.clock.NewTimer(0 * time.Second)
defer houseKeepingTimer.Stop()
for {
@ -482,7 +482,7 @@ func (c *containerData) housekeeping() {
stats, err := c.memoryCache.RecentStats(c.info.Name, empty, empty, numSamples)
if err != nil {
if c.allowErrorLogging() {
glog.Warningf("[%s] Failed to get recent stats for logging usage: %v", c.info.Name, err)
klog.Warningf("[%s] Failed to get recent stats for logging usage: %v", c.info.Name, err)
}
} else if len(stats) < numSamples {
// Ignore, not enough stats yet.
@ -499,7 +499,7 @@ func (c *containerData) housekeeping() {
usageInCores := float64(usageCpuNs) / float64(stats[numSamples-1].Timestamp.Sub(stats[0].Timestamp).Nanoseconds())
usageInHuman := units.HumanSize(float64(usageMemory))
// Don't set verbosity since this is already protected by the logUsage flag.
glog.Infof("[%s] %.3f cores (average: %.3f cores), %s of memory", c.info.Name, instantUsageInCores, usageInCores, usageInHuman)
klog.Infof("[%s] %.3f cores (average: %.3f cores), %s of memory", c.info.Name, instantUsageInCores, usageInCores, usageInHuman)
}
}
houseKeepingTimer.Reset(c.nextHousekeepingInterval())
@ -520,13 +520,13 @@ func (c *containerData) housekeepingTick(timer <-chan time.Time, longHousekeepin
err := c.updateStats()
if err != nil {
if c.allowErrorLogging() {
glog.Warningf("Failed to update stats for container \"%s\": %s", c.info.Name, err)
klog.Warningf("Failed to update stats for container \"%s\": %s", c.info.Name, err)
}
}
// Log if housekeeping took too long.
duration := c.clock.Since(start)
if duration >= longHousekeeping {
glog.V(3).Infof("[%s] Housekeeping took %s", c.info.Name, duration)
klog.V(3).Infof("[%s] Housekeeping took %s", c.info.Name, duration)
}
c.notifyOnDemand()
c.statsLastUpdatedTime = c.clock.Now()
@ -600,7 +600,7 @@ func (c *containerData) updateStats() error {
err := c.summaryReader.AddSample(*stats)
if err != nil {
// Ignore summary errors for now.
glog.V(2).Infof("Failed to add summary stats for %q: %v", c.info.Name, err)
klog.V(2).Infof("Failed to add summary stats for %q: %v", c.info.Name, err)
}
}
var customStatsErr error

View File

@ -49,9 +49,9 @@ import (
"github.com/google/cadvisor/utils/sysfs"
"github.com/google/cadvisor/version"
"github.com/golang/glog"
"github.com/opencontainers/runc/libcontainer/cgroups"
"golang.org/x/net/context"
"k8s.io/klog"
"k8s.io/utils/clock"
)
@ -152,7 +152,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn
if err != nil {
return nil, err
}
glog.V(2).Infof("cAdvisor running in container: %q", selfContainer)
klog.V(2).Infof("cAdvisor running in container: %q", selfContainer)
var (
dockerStatus info.DockerStatus
@ -163,7 +163,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn
dockerStatus = retryDockerStatus()
if tmpRktPath, err := rkt.RktPath(); err != nil {
glog.V(5).Infof("Rkt not connected: %v", err)
klog.V(5).Infof("Rkt not connected: %v", err)
} else {
rktPath = tmpRktPath
}
@ -174,7 +174,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn
}
crioInfo, err := crioClient.Info()
if err != nil {
glog.V(5).Infof("CRI-O not connected: %v", err)
klog.V(5).Infof("CRI-O not connected: %v", err)
}
context := fs.Context{
@ -226,13 +226,13 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn
return nil, err
}
newManager.machineInfo = *machineInfo
glog.V(1).Infof("Machine: %+v", newManager.machineInfo)
klog.V(1).Infof("Machine: %+v", newManager.machineInfo)
versionInfo, err := getVersionInfo()
if err != nil {
return nil, err
}
glog.V(1).Infof("Version: %+v", *versionInfo)
klog.V(1).Infof("Version: %+v", *versionInfo)
newManager.eventHandler = events.NewEventManager(parseEventsStoragePolicy())
return newManager, nil
@ -250,9 +250,9 @@ func retryDockerStatus() info.DockerStatus {
switch err {
case context.DeadlineExceeded:
glog.Warningf("Timeout trying to communicate with docker during initialization, will retry")
klog.Warningf("Timeout trying to communicate with docker during initialization, will retry")
default:
glog.V(5).Infof("Docker not connected: %v", err)
klog.V(5).Infof("Docker not connected: %v", err)
return info.DockerStatus{}
}
@ -298,12 +298,12 @@ type manager struct {
func (self *manager) Start() error {
err := docker.Register(self, self.fsInfo, self.includedMetrics)
if err != nil {
glog.V(5).Infof("Registration of the Docker container factory failed: %v.", err)
klog.V(5).Infof("Registration of the Docker container factory failed: %v.", err)
}
err = rkt.Register(self, self.fsInfo, self.includedMetrics)
if err != nil {
glog.V(5).Infof("Registration of the rkt container factory failed: %v", err)
klog.V(5).Infof("Registration of the rkt container factory failed: %v", err)
} else {
watcher, err := rktwatcher.NewRktContainerWatcher()
if err != nil {
@ -314,27 +314,27 @@ func (self *manager) Start() error {
err = containerd.Register(self, self.fsInfo, self.includedMetrics)
if err != nil {
glog.V(5).Infof("Registration of the containerd container factory failed: %v", err)
klog.V(5).Infof("Registration of the containerd container factory failed: %v", err)
}
err = crio.Register(self, self.fsInfo, self.includedMetrics)
if err != nil {
glog.V(5).Infof("Registration of the crio container factory failed: %v", err)
klog.V(5).Infof("Registration of the crio container factory failed: %v", err)
}
err = mesos.Register(self, self.fsInfo, self.includedMetrics)
if err != nil {
glog.V(5).Infof("Registration of the mesos container factory failed: %v", err)
klog.V(5).Infof("Registration of the mesos container factory failed: %v", err)
}
err = systemd.Register(self, self.fsInfo, self.includedMetrics)
if err != nil {
glog.V(5).Infof("Registration of the systemd container factory failed: %v", err)
klog.V(5).Infof("Registration of the systemd container factory failed: %v", err)
}
err = raw.Register(self, self.fsInfo, self.includedMetrics, self.rawContainerCgroupPathPrefixWhiteList)
if err != nil {
glog.Errorf("Registration of the raw container factory failed: %v", err)
klog.Errorf("Registration of the raw container factory failed: %v", err)
}
rawWatcher, err := rawwatcher.NewRawContainerWatcher()
@ -346,7 +346,7 @@ func (self *manager) Start() error {
// Watch for OOMs.
err = self.watchForNewOoms()
if err != nil {
glog.Warningf("Could not configure a source for OOM detection, disabling OOM events: %v", err)
klog.Warningf("Could not configure a source for OOM detection, disabling OOM events: %v", err)
}
// If there are no factories, don't start any housekeeping and serve the information we do have.
@ -362,12 +362,12 @@ func (self *manager) Start() error {
if err != nil {
return err
}
glog.V(2).Infof("Starting recovery of all containers")
klog.V(2).Infof("Starting recovery of all containers")
err = self.detectSubcontainers("/")
if err != nil {
return err
}
glog.V(2).Infof("Recovery completed")
klog.V(2).Infof("Recovery completed")
// Watch for new container.
quitWatcher := make(chan error)
@ -418,18 +418,18 @@ func (self *manager) globalHousekeeping(quit chan error) {
// Check for new containers.
err := self.detectSubcontainers("/")
if err != nil {
glog.Errorf("Failed to detect containers: %s", err)
klog.Errorf("Failed to detect containers: %s", err)
}
// Log if housekeeping took too long.
duration := time.Since(start)
if duration >= longHousekeeping {
glog.V(3).Infof("Global Housekeeping(%d) took %s", t.Unix(), duration)
klog.V(3).Infof("Global Housekeeping(%d) took %s", t.Unix(), duration)
}
case <-quit:
// Quit if asked to do so.
quit <- nil
glog.Infof("Exiting global housekeeping thread")
klog.Infof("Exiting global housekeeping thread")
return
}
}
@ -630,7 +630,7 @@ func (self *manager) AllDockerContainers(query *info.ContainerInfoRequest) (map[
if err != nil {
// Ignore the error because of race condition and return best-effort result.
if err == memory.ErrDataNotFound {
glog.Warningf("Error getting data for container %s because of race condition", name)
klog.Warningf("Error getting data for container %s because of race condition", name)
continue
}
return nil, err
@ -890,7 +890,7 @@ func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *c
if err != nil {
return fmt.Errorf("failed to read config file %q for config %q, container %q: %v", k, v, cont.info.Name, err)
}
glog.V(4).Infof("Got config from %q: %q", v, configFile)
klog.V(4).Infof("Got config from %q: %q", v, configFile)
if strings.HasPrefix(k, "prometheus") || strings.HasPrefix(k, "Prometheus") {
newCollector, err := collector.NewPrometheusCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHttpClient)
@ -968,7 +968,7 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche
}
if !accept {
// ignoring this container.
glog.V(4).Infof("ignoring container %q", containerName)
klog.V(4).Infof("ignoring container %q", containerName)
return nil
}
collectorManager, err := collector.NewCollectorManager()
@ -983,11 +983,11 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche
}
devicesCgroupPath, err := handler.GetCgroupPath("devices")
if err != nil {
glog.Warningf("Error getting devices cgroup path: %v", err)
klog.Warningf("Error getting devices cgroup path: %v", err)
} else {
cont.nvidiaCollector, err = m.nvidiaManager.GetCollector(devicesCgroupPath)
if err != nil {
glog.V(4).Infof("GPU metrics may be unavailable/incomplete for container %q: %v", cont.info.Name, err)
klog.V(4).Infof("GPU metrics may be unavailable/incomplete for container %q: %v", cont.info.Name, err)
}
}
@ -996,7 +996,7 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche
collectorConfigs := collector.GetCollectorConfigs(labels)
err = m.registerCollectors(collectorConfigs, cont)
if err != nil {
glog.Warningf("Failed to register collectors for %q: %v", containerName, err)
klog.Warningf("Failed to register collectors for %q: %v", containerName, err)
}
// Add the container name and all its aliases. The aliases must be within the namespace of the factory.
@ -1008,7 +1008,7 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche
}] = cont
}
glog.V(3).Infof("Added container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
klog.V(3).Infof("Added container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
contSpec, err := cont.handler.GetSpec()
if err != nil {
@ -1065,7 +1065,7 @@ func (m *manager) destroyContainerLocked(containerName string) error {
Name: alias,
})
}
glog.V(3).Infof("Destroyed container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
klog.V(3).Infof("Destroyed container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
contRef, err := cont.handler.ContainerReference()
if err != nil {
@ -1144,7 +1144,7 @@ func (m *manager) detectSubcontainers(containerName string) error {
for _, cont := range added {
err = m.createContainer(cont.Name, watcher.Raw)
if err != nil {
glog.Errorf("Failed to create existing container: %s: %s", cont.Name, err)
klog.Errorf("Failed to create existing container: %s: %s", cont.Name, err)
}
}
@ -1152,7 +1152,7 @@ func (m *manager) detectSubcontainers(containerName string) error {
for _, cont := range removed {
err = m.destroyContainer(cont.Name)
if err != nil {
glog.Errorf("Failed to destroy existing container: %s: %s", cont.Name, err)
klog.Errorf("Failed to destroy existing container: %s: %s", cont.Name, err)
}
}
@ -1192,7 +1192,7 @@ func (self *manager) watchForNewContainers(quit chan error) error {
err = self.destroyContainer(event.Name)
}
if err != nil {
glog.Warningf("Failed to process watch event %+v: %v", event, err)
klog.Warningf("Failed to process watch event %+v: %v", event, err)
}
case <-quit:
var errs partialFailure
@ -1209,7 +1209,7 @@ func (self *manager) watchForNewContainers(quit chan error) error {
quit <- errs
} else {
quit <- nil
glog.Infof("Exiting thread watching subcontainers")
klog.Infof("Exiting thread watching subcontainers")
return
}
}
@ -1219,7 +1219,7 @@ func (self *manager) watchForNewContainers(quit chan error) error {
}
func (self *manager) watchForNewOoms() error {
glog.V(2).Infof("Started watching for new ooms in manager")
klog.V(2).Infof("Started watching for new ooms in manager")
outStream := make(chan *oomparser.OomInstance, 10)
oomLog, err := oomparser.New()
if err != nil {
@ -1237,9 +1237,9 @@ func (self *manager) watchForNewOoms() error {
}
err := self.eventHandler.AddEvent(newEvent)
if err != nil {
glog.Errorf("failed to add OOM event for %q: %v", oomInstance.ContainerName, err)
klog.Errorf("failed to add OOM event for %q: %v", oomInstance.ContainerName, err)
}
glog.V(3).Infof("Created an OOM event in container %q at %v", oomInstance.ContainerName, oomInstance.TimeOfDeath)
klog.V(3).Infof("Created an OOM event in container %q at %v", oomInstance.ContainerName, oomInstance.TimeOfDeath)
newEvent = &info.Event{
ContainerName: oomInstance.VictimContainerName,
@ -1254,7 +1254,7 @@ func (self *manager) watchForNewOoms() error {
}
err = self.eventHandler.AddEvent(newEvent)
if err != nil {
glog.Errorf("failed to add OOM kill event for %q: %v", oomInstance.ContainerName, err)
klog.Errorf("failed to add OOM kill event for %q: %v", oomInstance.ContainerName, err)
}
}
}()
@ -1285,12 +1285,12 @@ func parseEventsStoragePolicy() events.StoragePolicy {
for _, part := range parts {
items := strings.Split(part, "=")
if len(items) != 2 {
glog.Warningf("Unknown event storage policy %q when parsing max age", part)
klog.Warningf("Unknown event storage policy %q when parsing max age", part)
continue
}
dur, err := time.ParseDuration(items[1])
if err != nil {
glog.Warningf("Unable to parse event max age duration %q: %v", items[1], err)
klog.Warningf("Unable to parse event max age duration %q: %v", items[1], err)
continue
}
if items[0] == "default" {
@ -1305,12 +1305,12 @@ func parseEventsStoragePolicy() events.StoragePolicy {
for _, part := range parts {
items := strings.Split(part, "=")
if len(items) != 2 {
glog.Warningf("Unknown event storage policy %q when parsing max event limit", part)
klog.Warningf("Unknown event storage policy %q when parsing max event limit", part)
continue
}
val, err := strconv.Atoi(items[1])
if err != nil {
glog.Warningf("Unable to parse integer from %q: %v", items[1], err)
klog.Warningf("Unable to parse integer from %q: %v", items[1], err)
continue
}
if items[0] == "default" {

View File

@ -28,7 +28,7 @@ import (
"github.com/google/cadvisor/manager/watcher"
inotify "github.com/sigma/go-inotify"
"github.com/golang/glog"
"k8s.io/klog"
)
type rawContainerWatcher struct {
@ -84,10 +84,10 @@ func (self *rawContainerWatcher) Start(events chan watcher.ContainerEvent) error
case event := <-self.watcher.Event():
err := self.processEvent(event, events)
if err != nil {
glog.Warningf("Error while processing event (%+v): %v", event, err)
klog.Warningf("Error while processing event (%+v): %v", event, err)
}
case err := <-self.watcher.Error():
glog.Warningf("Error while watching %q: %v", "/", err)
klog.Warningf("Error while watching %q: %v", "/", err)
case <-self.stopWatcher:
err := self.watcher.Close()
if err == nil {
@ -126,7 +126,7 @@ func (self *rawContainerWatcher) watchDirectory(events chan watcher.ContainerEve
if cleanup {
_, err := self.watcher.RemoveWatch(containerName, dir)
if err != nil {
glog.Warningf("Failed to remove inotify watch for %q: %v", dir, err)
klog.Warningf("Failed to remove inotify watch for %q: %v", dir, err)
}
}
}()
@ -143,7 +143,7 @@ func (self *rawContainerWatcher) watchDirectory(events chan watcher.ContainerEve
subcontainerName := path.Join(containerName, entry.Name())
alreadyWatchingSubDir, err := self.watchDirectory(events, entryPath, subcontainerName)
if err != nil {
glog.Errorf("Failed to watch directory %q: %v", entryPath, err)
klog.Errorf("Failed to watch directory %q: %v", entryPath, err)
if os.IsNotExist(err) {
// The directory may have been removed before watching. Try to watch the other
// subdirectories. (https://github.com/kubernetes/kubernetes/issues/28997)

View File

@ -23,8 +23,8 @@ import (
"github.com/google/cadvisor/manager/watcher"
rktapi "github.com/coreos/rkt/api/v1alpha"
"github.com/golang/glog"
"golang.org/x/net/context"
"k8s.io/klog"
)
type rktContainerWatcher struct {
@ -53,7 +53,7 @@ func (self *rktContainerWatcher) Stop() error {
}
func (self *rktContainerWatcher) detectRktContainers(events chan watcher.ContainerEvent) {
glog.V(1).Infof("Starting detectRktContainers thread")
klog.V(1).Infof("Starting detectRktContainers thread")
ticker := time.Tick(10 * time.Second)
curpods := make(map[string]*rktapi.Pod)
@ -62,13 +62,13 @@ func (self *rktContainerWatcher) detectRktContainers(events chan watcher.Contain
case <-ticker:
pods, err := listRunningPods()
if err != nil {
glog.Errorf("detectRktContainers: listRunningPods failed: %v", err)
klog.Errorf("detectRktContainers: listRunningPods failed: %v", err)
continue
}
curpods = self.syncRunningPods(pods, events, curpods)
case <-self.stopWatcher:
glog.Infof("Exiting rktContainer Thread")
klog.Infof("Exiting rktContainer Thread")
return
}
}
@ -92,7 +92,7 @@ func (self *rktContainerWatcher) syncRunningPods(pods []*rktapi.Pod, events chan
for id, pod := range curpods {
if _, ok := newpods[id]; !ok {
for _, cgroup := range podToCgroup(pod) {
glog.V(2).Infof("cgroup to delete = %v", cgroup)
klog.V(2).Infof("cgroup to delete = %v", cgroup)
self.sendDestroyEvent(cgroup, events)
}
}

View File

@ -23,8 +23,8 @@ import (
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/info/v2"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
"k8s.io/klog"
)
// infoProvider will usually be manager.Manager, but can be swapped out for testing.
@ -942,7 +942,7 @@ func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric)
containers, err := c.infoProvider.SubcontainersInfo("/", &info.ContainerInfoRequest{NumStats: 1})
if err != nil {
c.errors.Set(1)
glog.Warningf("Couldn't get containers: %s", err)
klog.Warningf("Couldn't get containers: %s", err)
return
}
rawLabels := map[string]struct{}{}
@ -1006,7 +1006,7 @@ func (c *PrometheusCollector) collectVersionInfo(ch chan<- prometheus.Metric) {
versionInfo, err := c.infoProvider.GetVersionInfo()
if err != nil {
c.errors.Set(1)
glog.Warningf("Couldn't get version info: %s", err)
klog.Warningf("Couldn't get version info: %s", err)
return
}
ch <- prometheus.MustNewConstMetric(versionInfoDesc, prometheus.GaugeValue, 1, []string{versionInfo.KernelVersion, versionInfo.ContainerOsVersion, versionInfo.DockerVersion, versionInfo.CadvisorVersion, versionInfo.CadvisorRevision}...)
@ -1016,7 +1016,7 @@ func (c *PrometheusCollector) collectMachineInfo(ch chan<- prometheus.Metric) {
machineInfo, err := c.infoProvider.GetMachineInfo()
if err != nil {
c.errors.Set(1)
glog.Warningf("Couldn't get machine info: %s", err)
klog.Warningf("Couldn't get machine info: %s", err)
return
}
ch <- prometheus.MustNewConstMetric(machineInfoCoresDesc, prometheus.GaugeValue, float64(machineInfo.NumCores))

View File

@ -28,7 +28,7 @@ import (
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/manager"
"github.com/golang/glog"
"k8s.io/klog"
)
const ContainersPage = "/containers/"
@ -239,10 +239,10 @@ func serveContainersPage(m manager.Manager, w http.ResponseWriter, u *url.URL) {
}
err = pageTemplate.Execute(w, data)
if err != nil {
glog.Errorf("Failed to apply template: %s", err)
klog.Errorf("Failed to apply template: %s", err)
}
glog.V(5).Infof("Request took %s", time.Since(start))
klog.V(5).Infof("Request took %s", time.Since(start))
}
// Build a relative path to the root of the container page.

View File

@ -26,7 +26,7 @@ import (
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/manager"
"github.com/golang/glog"
"k8s.io/klog"
)
const DockerPage = "/docker/"
@ -154,9 +154,9 @@ func serveDockerPage(m manager.Manager, w http.ResponseWriter, u *url.URL) {
err := pageTemplate.Execute(w, data)
if err != nil {
glog.Errorf("Failed to apply template: %s", err)
klog.Errorf("Failed to apply template: %s", err)
}
glog.V(5).Infof("Request took %s", time.Since(start))
klog.V(5).Infof("Request took %s", time.Since(start))
return
}

View File

@ -26,7 +26,7 @@ import (
"github.com/google/cadvisor/manager"
auth "github.com/abbot/go-http-auth"
"github.com/golang/glog"
"k8s.io/klog"
)
var pageTemplate *template.Template
@ -71,7 +71,7 @@ func init() {
pageTemplate = template.New("containersTemplate").Funcs(funcMap)
_, err := pageTemplate.Parse(string(containersHtmlTemplate))
if err != nil {
glog.Fatalf("Failed to parse template: %s", err)
klog.Fatalf("Failed to parse template: %s", err)
}
}

View File

@ -23,7 +23,7 @@ import (
"net/url"
"path"
"github.com/golang/glog"
"k8s.io/klog"
)
const StaticResource = "/static/"
@ -72,6 +72,6 @@ func HandleRequest(w http.ResponseWriter, u *url.URL) {
}
if _, err := w.Write(content); err != nil {
glog.Errorf("Failed to write response: %v", err)
klog.Errorf("Failed to write response: %v", err)
}
}

View File

@ -30,7 +30,7 @@ import (
"github.com/google/cadvisor/utils/container"
kafka "github.com/Shopify/sarama"
"github.com/golang/glog"
"k8s.io/klog"
)
func init() {
@ -143,7 +143,7 @@ func newStorage(machineName string) (storage.StorageDriver, error) {
config.Producer.RequiredAcks = kafka.WaitForAll
brokerList := strings.Split(*brokers, ",")
glog.V(4).Infof("Kafka brokers:%q", *brokers)
klog.V(4).Infof("Kafka brokers:%q", *brokers)
producer, err := kafka.NewAsyncProducer(brokerList, config)
if err != nil {

View File

@ -18,7 +18,7 @@ import (
"fmt"
"net"
"github.com/golang/glog"
"k8s.io/klog"
)
type Client struct {
@ -30,7 +30,7 @@ type Client struct {
func (self *Client) Open() error {
conn, err := net.Dial("udp", self.HostPort)
if err != nil {
glog.Errorf("failed to open udp connection to %q: %v", self.HostPort, err)
klog.Errorf("failed to open udp connection to %q: %v", self.HostPort, err)
return err
}
self.conn = conn

View File

@ -30,7 +30,7 @@ import (
_ "github.com/google/cadvisor/storage/statsd"
_ "github.com/google/cadvisor/storage/stdout"
"github.com/golang/glog"
"k8s.io/klog"
)
var (
@ -45,8 +45,8 @@ func NewMemoryStorage() (*memory.InMemoryCache, error) {
return nil, err
}
if *storageDriver != "" {
glog.V(1).Infof("Using backend storage type %q", *storageDriver)
klog.V(1).Infof("Using backend storage type %q", *storageDriver)
}
glog.V(1).Infof("Caching stats in memory for %v", *storageDuration)
klog.V(1).Infof("Caching stats in memory for %v", *storageDuration)
return memory.New(*storageDuration, backendStorage), nil
}

View File

@ -21,7 +21,7 @@ import (
info "github.com/google/cadvisor/info/v1"
"cloud.google.com/go/compute/metadata"
"github.com/golang/glog"
"k8s.io/klog"
)
const (
@ -32,7 +32,7 @@ const (
func onGCE() bool {
data, err := ioutil.ReadFile(gceProductName)
if err != nil {
glog.V(2).Infof("Error while reading product_name: %v", err)
klog.V(2).Infof("Error while reading product_name: %v", err)
return false
}
return strings.Contains(string(data), google)

View File

@ -19,8 +19,8 @@ import (
info "github.com/google/cadvisor/info/v1"
"github.com/golang/glog"
"github.com/google/cadvisor/utils/cpuload/netlink"
"k8s.io/klog"
)
type CpuLoadReader interface {
@ -41,6 +41,6 @@ func New() (CpuLoadReader, error) {
if err != nil {
return nil, fmt.Errorf("failed to create a netlink based cpuload reader: %v", err)
}
glog.V(4).Info("Using a netlink-based load reader")
klog.V(4).Info("Using a netlink-based load reader")
return reader, nil
}

View File

@ -20,7 +20,7 @@ import (
info "github.com/google/cadvisor/info/v1"
"github.com/golang/glog"
"k8s.io/klog"
)
type NetlinkReader struct {
@ -38,7 +38,7 @@ func New() (*NetlinkReader, error) {
if err != nil {
return nil, fmt.Errorf("failed to get netlink family id for task stats: %s", err)
}
glog.V(4).Infof("Family id for taskstats: %d", id)
klog.V(4).Infof("Family id for taskstats: %d", id)
return &NetlinkReader{
familyId: id,
conn: conn,
@ -75,6 +75,6 @@ func (self *NetlinkReader) GetCpuLoad(name string, path string) (info.LoadStats,
if err != nil {
return info.LoadStats{}, err
}
glog.V(4).Infof("Task stats for %q: %+v", path, stats)
klog.V(4).Infof("Task stats for %q: %+v", path, stats)
return stats, nil
}

View File

@ -17,25 +17,26 @@ package main
import (
"flag"
"github.com/golang/glog"
"github.com/google/cadvisor/utils/oomparser"
"k8s.io/klog"
)
// demonstrates how to run oomparser.OomParser to get OomInstance information
func main() {
klog.InitFlags(nil)
flag.Parse()
// out is a user-provided channel from which the user can read incoming
// OomInstance objects
outStream := make(chan *oomparser.OomInstance)
oomLog, err := oomparser.New()
if err != nil {
glog.Infof("Couldn't make a new oomparser. %v", err)
klog.Infof("Couldn't make a new oomparser. %v", err)
} else {
go oomLog.StreamOoms(outStream)
// demonstration of how to get oomLog's list of oomInstances or access
// the user-declared oomInstance channel, here called outStream
for oomInstance := range outStream {
glog.Infof("Reading the buffer. Output is %v", oomInstance)
klog.Infof("Reading the buffer. Output is %v", oomInstance)
}
}
}

View File

@ -22,7 +22,7 @@ import (
"github.com/euank/go-kmsg-parser/kmsgparser"
"github.com/golang/glog"
"k8s.io/klog"
)
var (
@ -107,11 +107,11 @@ func (self *OomParser) StreamOoms(outStream chan<- *OomInstance) {
for msg := range kmsgEntries {
err := getContainerName(msg.Message, oomCurrentInstance)
if err != nil {
glog.Errorf("%v", err)
klog.Errorf("%v", err)
}
finished, err := getProcessNamePid(msg.Message, oomCurrentInstance)
if err != nil {
glog.Errorf("%v", err)
klog.Errorf("%v", err)
}
if finished {
oomCurrentInstance.TimeOfDeath = msg.Timestamp
@ -122,7 +122,7 @@ func (self *OomParser) StreamOoms(outStream chan<- *OomInstance) {
}
}
// Should not happen
glog.Errorf("exiting analyzeLines. OOM events will not be reported.")
klog.Errorf("exiting analyzeLines. OOM events will not be reported.")
}
// initializes an OomParser object. Returns an OomParser object and an error.
@ -140,11 +140,11 @@ type glogAdapter struct{}
var _ kmsgparser.Logger = glogAdapter{}
func (glogAdapter) Infof(format string, args ...interface{}) {
glog.V(4).Infof(format, args...)
klog.V(4).Infof(format, args...)
}
func (glogAdapter) Warningf(format string, args ...interface{}) {
glog.V(2).Infof(format, args...)
klog.V(2).Infof(format, args...)
}
func (glogAdapter) Errorf(format string, args ...interface{}) {
glog.Warningf(format, args...)
klog.Warningf(format, args...)
}

View File

@ -24,8 +24,8 @@ import (
"sync"
"time"
"github.com/golang/glog"
inotify "github.com/sigma/go-inotify"
"k8s.io/klog"
)
type Tail struct {
@ -96,7 +96,7 @@ func (t *Tail) attemptOpen() error {
var lastErr error
for interval := defaultRetryInterval; ; interval *= 2 {
attempt++
glog.V(4).Infof("Opening %s (attempt %d)", t.filename, attempt)
klog.V(4).Infof("Opening %s (attempt %d)", t.filename, attempt)
var err error
t.file, err = os.Open(t.filename)
if err == nil {
@ -106,7 +106,7 @@ func (t *Tail) attemptOpen() error {
return nil
}
lastErr = err
glog.V(4).Infof("open log file %s error: %v", t.filename, err)
klog.V(4).Infof("open log file %s error: %v", t.filename, err)
if interval >= maxRetryInterval {
break
@ -127,7 +127,7 @@ func (t *Tail) watchLoop() {
for {
err := t.watchFile()
if err != nil {
glog.Errorf("Tail failed on %s: %v", t.filename, err)
klog.Errorf("Tail failed on %s: %v", t.filename, err)
break
}
}
@ -152,7 +152,7 @@ func (t *Tail) watchFile() error {
case event := <-t.watcher.Event:
eventPath := filepath.Clean(event.Name) // Directory events have an extra '/'
if eventPath == t.filename {
glog.V(4).Infof("Log file %s moved/deleted", t.filename)
klog.V(4).Infof("Log file %s moved/deleted", t.filename)
return nil
}
case <-t.stop:

14
vendor/k8s.io/klog/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,14 @@
language: go
dist: xenial
go:
- 1.9.x
- 1.10.x
- 1.11.x
script:
- go get -t -v ./...
- diff -u <(echo -n) <(gofmt -d .)
- diff -u <(echo -n) <(golint $(go list -e ./...))
- go tool vet .
- go test -v -race ./...
install:
- go get golang.org/x/lint/golint

31
vendor/k8s.io/klog/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,31 @@
# Contributing Guidelines
Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
## Getting Started
We have full documentation on how to get started contributing here:
<!---
If your repo has certain guidelines for contribution, put them here ahead of the general k8s resources
-->
- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing)
- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers
## Mentorship
- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
<!---
Custom Information - if you're copying this template for the first time you can add custom content here, for example:
## Contact Information
- [Slack channel](https://kubernetes.slack.com/messages/kubernetes-users) - Replace `kubernetes-users` with your slack channel string, this will send users directly to your channel.
- [Mailing list](URL)
-->

11
vendor/k8s.io/klog/OWNERS generated vendored Normal file
View File

@ -0,0 +1,11 @@
# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
approvers:
- dims
- thockin
- justinsb
- tallclair
- piosz
- brancz
- DirectXMan12
- lavalamp

View File

@ -1,3 +1,10 @@
klog
====
klog is a permanant fork of https://github.com/golang/glog. original README from glog is below
----
glog
====
@ -5,7 +12,7 @@ Leveled execution logs for Go.
This is an efficient pure Go implementation of leveled logs in the
manner of the open source C++ package
http://code.google.com/p/google-glog
https://github.com/google/glog
By binding methods to booleans it is possible to use the log package
without paying the expense of evaluating the arguments to the log.

9
vendor/k8s.io/klog/RELEASE.md generated vendored Normal file
View File

@ -0,0 +1,9 @@
# Release Process
The `klog` is released on an as-needed basis. The process is as follows:
1. An issue is proposing a new release with a changelog since the last release
1. All [OWNERS](OWNERS) must LGTM this release
1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
1. The release issue is closed
1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released`

20
vendor/k8s.io/klog/SECURITY_CONTACTS generated vendored Normal file
View File

@ -0,0 +1,20 @@
# Defined below are the security contacts for this repo.
#
# They are the contact point for the Product Security Team to reach out
# to for triaging and handling of incoming issues.
#
# The below names agree to abide by the
# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
# and will be removed and replaced if they violate that agreement.
#
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
# INSTRUCTIONS AT https://kubernetes.io/security/
dims
thockin
justinsb
tallclair
piosz
brancz
DirectXMan12
lavalamp

View File

@ -14,7 +14,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
//
@ -68,7 +68,7 @@
// -vmodule=gopher*=3
// sets the V level to 3 in all Go files whose names begin "gopher".
//
package glog
package klog
import (
"bufio"
@ -396,13 +396,6 @@ type flushSyncWriter interface {
}
func init() {
flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files")
flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
flag.Var(&logging.verbosity, "v", "log level for V logs")
flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
// Default stderrThreshold is ERROR.
logging.stderrThreshold = errorLog
@ -410,6 +403,22 @@ func init() {
go logging.flushDaemon()
}
// InitFlags is for explicitly initializing the flags
func InitFlags(flagset *flag.FlagSet) {
if flagset == nil {
flagset = flag.CommandLine
}
flagset.StringVar(&logging.logDir, "log_dir", "", "If non-empty, write log files in this directory")
flagset.StringVar(&logging.logFile, "log_file", "", "If non-empty, use this log file")
flagset.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files")
flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
flagset.Var(&logging.verbosity, "v", "log level for V logs")
flagset.BoolVar(&logging.skipHeaders, "skip_headers", false, "If true, avoid header prefixes in the log messages")
flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
}
// Flush flushes all pending log I/O.
func Flush() {
logging.lockAndFlushAll()
@ -453,6 +462,17 @@ type loggingT struct {
// safely using atomic.LoadInt32.
vmodule moduleSpec // The state of the -vmodule flag.
verbosity Level // V logging level, the value of the -v flag/
// If non-empty, overrides the choice of directory in which to write logs.
// See createLogDirs for the full list of possible destinations.
logDir string
// If non-empty, specifies the path of the file to write logs. mutually exclusive
// with the log-dir option.
logFile string
// If true, do not add the prefix headers, useful when used with SetOutput
skipHeaders bool
}
// buffer holds a byte Buffer for reuse. The zero value is ready for use.
@ -556,6 +576,9 @@ func (l *loggingT) formatHeader(s severity, file string, line int) *buffer {
s = infoLog // for safety.
}
buf := l.getBuffer()
if l.skipHeaders {
return buf
}
// Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
// It's worth about 3X. Fprintf is hard.
@ -667,6 +690,45 @@ func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToSt
l.output(s, buf, file, line, alsoToStderr)
}
// redirectBuffer is used to set an alternate destination for the logs
type redirectBuffer struct {
w io.Writer
}
func (rb *redirectBuffer) Sync() error {
return nil
}
func (rb *redirectBuffer) Flush() error {
return nil
}
func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) {
return rb.w.Write(bytes)
}
// SetOutput sets the output destination for all severities
func SetOutput(w io.Writer) {
for s := fatalLog; s >= infoLog; s-- {
rb := &redirectBuffer{
w: w,
}
logging.file[s] = rb
}
}
// SetOutputBySeverity sets the output destination for specific severity
func SetOutputBySeverity(name string, w io.Writer) {
sev, ok := severityByName(name)
if !ok {
panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name))
}
rb := &redirectBuffer{
w: w,
}
logging.file[sev] = rb
}
// output writes the data to the log files and releases the buffer.
func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) {
l.mu.Lock()
@ -676,10 +738,7 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo
}
}
data := buf.Bytes()
if !flag.Parsed() {
os.Stderr.Write([]byte("ERROR: logging before flag.Parse: "))
os.Stderr.Write(data)
} else if l.toStderr {
if l.toStderr {
os.Stderr.Write(data)
} else {
if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
@ -879,7 +938,7 @@ const flushInterval = 30 * time.Second
// flushDaemon periodically flushes the log file buffers.
func (l *loggingT) flushDaemon() {
for _ = range time.NewTicker(flushInterval).C {
for range time.NewTicker(flushInterval).C {
l.lockAndFlushAll()
}
}

View File

@ -16,11 +16,10 @@
// File I/O for logs.
package glog
package klog
import (
"errors"
"flag"
"fmt"
"os"
"os/user"
@ -36,13 +35,9 @@ var MaxSize uint64 = 1024 * 1024 * 1800
// logDirs lists the candidate directories for new log files.
var logDirs []string
// If non-empty, overrides the choice of directory in which to write logs.
// See createLogDirs for the full list of possible destinations.
var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
func createLogDirs() {
if *logDir != "" {
logDirs = append(logDirs, *logDir)
if logging.logDir != "" {
logDirs = append(logDirs, logging.logDir)
}
logDirs = append(logDirs, os.TempDir())
}
@ -103,6 +98,13 @@ var onceLogDirs sync.Once
// successfully, create also attempts to update the symlink for that tag, ignoring
// errors.
func create(tag string, t time.Time) (f *os.File, filename string, err error) {
if logging.logFile != "" {
f, err := os.Create(logging.logFile)
if err == nil {
return f, logging.logFile, nil
}
return nil, "", fmt.Errorf("log: unable to create log: %v", err)
}
onceLogDirs.Do(createLogDirs)
if len(logDirs) == 0 {
return nil, "", errors.New("log: no log dirs")

View File

@ -18,8 +18,8 @@ import (
"sync"
"time"
"github.com/golang/glog"
zfs "github.com/mistifyio/go-zfs"
"k8s.io/klog"
)
// zfsWatcher maintains a cache of filesystem -> usage stats for a
@ -49,7 +49,7 @@ func NewZfsWatcher(filesystem string) (*ZfsWatcher, error) {
func (w *ZfsWatcher) Start() {
err := w.Refresh()
if err != nil {
glog.Errorf("encountered error refreshing zfs watcher: %v", err)
klog.Errorf("encountered error refreshing zfs watcher: %v", err)
}
for {
@ -60,12 +60,12 @@ func (w *ZfsWatcher) Start() {
start := time.Now()
err = w.Refresh()
if err != nil {
glog.Errorf("encountered error refreshing zfs watcher: %v", err)
klog.Errorf("encountered error refreshing zfs watcher: %v", err)
}
// print latency for refresh
duration := time.Since(start)
glog.V(5).Infof("zfs(%d) took %s", start.Unix(), duration)
klog.V(5).Infof("zfs(%d) took %s", start.Unix(), duration)
}
}
}
@ -95,12 +95,12 @@ func (w *ZfsWatcher) Refresh() error {
newCache := make(map[string]uint64)
parent, err := zfs.GetDataset(w.filesystem)
if err != nil {
glog.Errorf("encountered error getting zfs filesystem: %s: %v", w.filesystem, err)
klog.Errorf("encountered error getting zfs filesystem: %s: %v", w.filesystem, err)
return err
}
children, err := parent.Children(0)
if err != nil {
glog.Errorf("encountered error getting children of zfs filesystem: %s: %v", w.filesystem, err)
klog.Errorf("encountered error getting children of zfs filesystem: %s: %v", w.filesystem, err)
return err
}