Merge pull request #1827 from tallclair/logging

Clean up cAdvisor logging
This commit is contained in:
David Ashpole 2017-11-29 10:16:58 -08:00 committed by GitHub
commit 7d11f4243f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 63 additions and 64 deletions

View File

@ -47,7 +47,7 @@ const nvidiaVendorId = "0x10de"
// Setup initializes NVML if nvidia devices are present on the node. // Setup initializes NVML if nvidia devices are present on the node.
func (nm *NvidiaManager) Setup() { func (nm *NvidiaManager) Setup() {
if !detectDevices(nvidiaVendorId) { if !detectDevices(nvidiaVendorId) {
glog.Info("No NVIDIA devices found.") glog.V(4).Info("No NVIDIA devices found.")
return return
} }
@ -56,7 +56,7 @@ func (nm *NvidiaManager) Setup() {
return return
} }
go func() { go func() {
glog.Info("Starting goroutine to initialize NVML") glog.V(2).Info("Starting goroutine to initialize NVML")
// TODO: use globalHousekeepingInterval // TODO: use globalHousekeepingInterval
for range time.Tick(time.Minute) { for range time.Tick(time.Minute) {
nm.initializeNVML() nm.initializeNVML()
@ -71,7 +71,7 @@ func (nm *NvidiaManager) Setup() {
func detectDevices(vendorId string) bool { func detectDevices(vendorId string) bool {
devices, err := ioutil.ReadDir(sysFsPCIDevicesPath) devices, err := ioutil.ReadDir(sysFsPCIDevicesPath)
if err != nil { if err != nil {
glog.Warningf("error reading %q: %v", sysFsPCIDevicesPath, err) glog.Warningf("Error reading %q: %v", sysFsPCIDevicesPath, err)
return false return false
} }
@ -79,11 +79,11 @@ func detectDevices(vendorId string) bool {
vendorPath := filepath.Join(sysFsPCIDevicesPath, device.Name(), "vendor") vendorPath := filepath.Join(sysFsPCIDevicesPath, device.Name(), "vendor")
content, err := ioutil.ReadFile(vendorPath) content, err := ioutil.ReadFile(vendorPath)
if err != nil { if err != nil {
glog.Infof("Error while reading %q: %v", vendorPath, err) glog.V(4).Infof("Error while reading %q: %v", vendorPath, err)
continue continue
} }
if strings.EqualFold(strings.TrimSpace(string(content)), vendorId) { if strings.EqualFold(strings.TrimSpace(string(content)), vendorId) {
glog.Infof("Found device with vendorId %q", vendorId) glog.V(3).Infof("Found device with vendorId %q", vendorId)
return true return true
} }
} }
@ -95,7 +95,7 @@ func (nm *NvidiaManager) initializeNVML() {
if err := gonvml.Initialize(); err != nil { if err := gonvml.Initialize(); err != nil {
// This is under a logging level because otherwise we may cause // This is under a logging level because otherwise we may cause
// log spam if the drivers/nvml is not installed on the system. // log spam if the drivers/nvml is not installed on the system.
glog.V(3).Infof("Could not initialize NVML: %v", err) glog.V(4).Infof("Could not initialize NVML: %v", err)
return return
} }
numDevices, err := gonvml.DeviceCount() numDevices, err := gonvml.DeviceCount()
@ -107,7 +107,7 @@ func (nm *NvidiaManager) initializeNVML() {
nm.Unlock() nm.Unlock()
return return
} }
glog.Infof("NVML initialized. Number of nvidia devices: %v", numDevices) glog.V(1).Infof("NVML initialized. Number of nvidia devices: %v", numDevices)
nm.nvidiaDevices = make(map[int]gonvml.Device, numDevices) nm.nvidiaDevices = make(map[int]gonvml.Device, numDevices)
for i := 0; i < int(numDevices); i++ { for i := 0; i < int(numDevices); i++ {
device, err := gonvml.DeviceHandleByIndex(uint(i)) device, err := gonvml.DeviceHandleByIndex(uint(i))

View File

@ -104,6 +104,9 @@ func (ml *metricSetValue) Set(value string) error {
func init() { func init() {
flag.Var(&ignoreMetrics, "disable_metrics", "comma-separated list of `metrics` to be disabled. Options are 'disk', 'network', 'tcp', 'udp'. Note: tcp and udp are disabled by default due to high CPU usage.") flag.Var(&ignoreMetrics, "disable_metrics", "comma-separated list of `metrics` to be disabled. Options are 'disk', 'network', 'tcp', 'udp'. Note: tcp and udp are disabled by default due to high CPU usage.")
// Default logging verbosity to V(2)
flag.Set("v", "2")
} }
func main() { func main() {
@ -156,7 +159,7 @@ func main() {
// Install signal handler. // Install signal handler.
installSignalHandler(containerManager) installSignalHandler(containerManager)
glog.Infof("Starting cAdvisor version: %s-%s on port %d", version.Info["version"], version.Info["revision"], *argPort) glog.V(1).Infof("Starting cAdvisor version: %s-%s on port %d", version.Info["version"], version.Info["revision"], *argPort)
addr := fmt.Sprintf("%s:%d", *argIp, *argPort) addr := fmt.Sprintf("%s:%d", *argIp, *argPort)
glog.Fatal(http.ListenAndServe(addr, mux)) glog.Fatal(http.ListenAndServe(addr, mux))

View File

@ -133,7 +133,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c
return fmt.Errorf("failed to get cgroup subsystems: %v", err) return fmt.Errorf("failed to get cgroup subsystems: %v", err)
} }
glog.Infof("Registering containerd factory") glog.V(1).Infof("Registering containerd factory")
f := &containerdFactory{ f := &containerdFactory{
cgroupSubsystems: cgroupSubsystems, cgroupSubsystems: cgroupSubsystems,
client: client, client: client,

View File

@ -154,7 +154,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c
return fmt.Errorf("failed to get cgroup subsystems: %v", err) return fmt.Errorf("failed to get cgroup subsystems: %v", err)
} }
glog.Infof("Registering CRI-O factory") glog.V(1).Infof("Registering CRI-O factory")
f := &crioFactory{ f := &crioFactory{
client: client, client: client,
cgroupSubsystems: cgroupSubsystems, cgroupSubsystems: cgroupSubsystems,

View File

@ -185,7 +185,7 @@ func newCrioContainerHandler(
} }
// TODO for env vars we wanted to show from container.Config.Env from whitelist // TODO for env vars we wanted to show from container.Config.Env from whitelist
//for _, exposedEnv := range metadataEnvs { //for _, exposedEnv := range metadataEnvs {
//glog.Infof("TODO env whitelist: %v", exposedEnv) //glog.V(4).Infof("TODO env whitelist: %v", exposedEnv)
//} //}
return handler, nil return handler, nil

View File

@ -352,7 +352,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c
} }
} }
glog.Infof("Registering Docker factory") glog.V(1).Infof("Registering Docker factory")
f := &dockerFactory{ f := &dockerFactory{
cgroupSubsystems: cgroupSubsystems, cgroupSubsystems: cgroupSubsystems,
client: client, client: client,

View File

@ -83,7 +83,7 @@ func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, igno
return err return err
} }
glog.Infof("Registering Raw factory") glog.V(1).Infof("Registering Raw factory")
factory := &rawFactory{ factory := &rawFactory{
machineInfoFactory: machineInfoFactory, machineInfoFactory: machineInfoFactory,
fsInfo: fsInfo, fsInfo: fsInfo,

View File

@ -86,7 +86,7 @@ func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, igno
return fmt.Errorf("failed to find supported cgroup mounts for the raw factory") return fmt.Errorf("failed to find supported cgroup mounts for the raw factory")
} }
glog.Infof("Registering Rkt factory") glog.V(1).Infof("Registering Rkt factory")
factory := &rktFactory{ factory := &rktFactory{
machineInfoFactory: machineInfoFactory, machineInfoFactory: machineInfoFactory,
fsInfo: fsInfo, fsInfo: fsInfo,

View File

@ -51,7 +51,7 @@ func (f *systemdFactory) DebugInfo() map[string][]string {
// Register registers the systemd container factory. // Register registers the systemd container factory.
func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error { func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error {
glog.Infof("Registering systemd factory") glog.V(1).Infof("Registering systemd factory")
factory := &systemdFactory{} factory := &systemdFactory{}
container.RegisterContainerHandlerFactory(factory, []watcher.ContainerWatchSource{watcher.Raw}) container.RegisterContainerHandlerFactory(factory, []watcher.ContainerWatchSource{watcher.Raw})
return nil return nil

View File

@ -136,8 +136,8 @@ func NewFsInfo(context Context) (FsInfo, error) {
fsInfo.addDockerImagesLabel(context, mounts) fsInfo.addDockerImagesLabel(context, mounts)
fsInfo.addCrioImagesLabel(context, mounts) fsInfo.addCrioImagesLabel(context, mounts)
glog.Infof("Filesystem UUIDs: %+v", fsInfo.fsUUIDToDeviceName) glog.V(1).Infof("Filesystem UUIDs: %+v", fsInfo.fsUUIDToDeviceName)
glog.Infof("Filesystem partitions: %+v", fsInfo.partitions) glog.V(1).Infof("Filesystem partitions: %+v", fsInfo.partitions)
fsInfo.addSystemRootLabel(mounts) fsInfo.addSystemRootLabel(mounts)
return fsInfo, nil return fsInfo, nil
} }
@ -162,7 +162,7 @@ func getFsUUIDToDeviceNameMap() (map[string]string, error) {
path := filepath.Join(dir, file.Name()) path := filepath.Join(dir, file.Name())
target, err := os.Readlink(path) target, err := os.Readlink(path)
if err != nil { if err != nil {
glog.Infof("Failed to resolve symlink for %q", path) glog.Warningf("Failed to resolve symlink for %q", path)
continue continue
} }
device, err := filepath.Abs(filepath.Join(dir, target)) device, err := filepath.Abs(filepath.Join(dir, target))
@ -438,7 +438,7 @@ func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) {
file, err := os.Open(diskStatsFile) file, err := os.Open(diskStatsFile)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
glog.Infof("not collecting filesystem statistics because file %q was not available", diskStatsFile) glog.Warningf("Not collecting filesystem statistics because file %q was not found", diskStatsFile)
return diskStatsMap, nil return diskStatsMap, nil
} }
return nil, err return nil, err
@ -561,12 +561,12 @@ func GetDirDiskUsage(dir string, timeout time.Duration) (uint64, error) {
return 0, fmt.Errorf("failed to exec du - %v", err) return 0, fmt.Errorf("failed to exec du - %v", err)
} }
timer := time.AfterFunc(timeout, func() { timer := time.AfterFunc(timeout, func() {
glog.Infof("killing cmd %v due to timeout(%s)", cmd.Args, timeout.String()) glog.Warningf("Killing cmd %v due to timeout(%s)", cmd.Args, timeout.String())
cmd.Process.Kill() cmd.Process.Kill()
}) })
stdoutb, souterr := ioutil.ReadAll(stdoutp) stdoutb, souterr := ioutil.ReadAll(stdoutp)
if souterr != nil { if souterr != nil {
glog.Errorf("failed to read from stdout for cmd %v - %v", cmd.Args, souterr) glog.Errorf("Failed to read from stdout for cmd %v - %v", cmd.Args, souterr)
} }
stderrb, _ := ioutil.ReadAll(stderrp) stderrb, _ := ioutil.ReadAll(stderrp)
err = cmd.Wait() err = cmd.Wait()
@ -600,7 +600,7 @@ func GetDirInodeUsage(dir string, timeout time.Duration) (uint64, error) {
return 0, fmt.Errorf("failed to exec cmd %v - %v; stderr: %v", findCmd.Args, err, stderr.String()) return 0, fmt.Errorf("failed to exec cmd %v - %v; stderr: %v", findCmd.Args, err, stderr.String())
} }
timer := time.AfterFunc(timeout, func() { timer := time.AfterFunc(timeout, func() {
glog.Infof("killing cmd %v due to timeout(%s)", findCmd.Args, timeout.String()) glog.Warningf("Killing cmd %v due to timeout(%s)", findCmd.Args, timeout.String())
findCmd.Process.Kill() findCmd.Process.Kill()
}) })
err := findCmd.Wait() err := findCmd.Wait()
@ -741,7 +741,7 @@ func getBtrfsMajorMinorIds(mount *mount.Info) (int, int, error) {
return 0, 0, err return 0, 0, err
} }
glog.Infof("btrfs mount %#v", mount) glog.V(4).Infof("btrfs mount %#v", mount)
if buf.Mode&syscall.S_IFMT == syscall.S_IFBLK { if buf.Mode&syscall.S_IFMT == syscall.S_IFBLK {
err := syscall.Stat(mount.Mountpoint, buf) err := syscall.Stat(mount.Mountpoint, buf)
if err != nil { if err != nil {
@ -749,8 +749,8 @@ func getBtrfsMajorMinorIds(mount *mount.Info) (int, int, error) {
return 0, 0, err return 0, 0, err
} }
glog.Infof("btrfs dev major:minor %d:%d\n", int(major(buf.Dev)), int(minor(buf.Dev))) glog.V(4).Infof("btrfs dev major:minor %d:%d\n", int(major(buf.Dev)), int(minor(buf.Dev)))
glog.Infof("btrfs rdev major:minor %d:%d\n", int(major(buf.Rdev)), int(minor(buf.Rdev))) glog.V(4).Infof("btrfs rdev major:minor %d:%d\n", int(major(buf.Rdev)), int(minor(buf.Rdev)))
return int(major(buf.Dev)), int(minor(buf.Dev)), nil return int(major(buf.Dev)), int(minor(buf.Dev)), nil
} else { } else {

View File

@ -60,7 +60,7 @@ func RegisterHandlers(mux httpmux.Mux, containerManager manager.Manager, httpAut
// Setup the authenticator object // Setup the authenticator object
if httpAuthFile != "" { if httpAuthFile != "" {
glog.Infof("Using auth file %s", httpAuthFile) glog.V(1).Infof("Using auth file %s", httpAuthFile)
secrets := auth.HtpasswdFileProvider(httpAuthFile) secrets := auth.HtpasswdFileProvider(httpAuthFile)
authenticator := auth.NewBasicAuthenticator(httpAuthRealm, secrets) authenticator := auth.NewBasicAuthenticator(httpAuthRealm, secrets)
mux.HandleFunc(static.StaticResource, authenticator.Wrap(staticHandler)) mux.HandleFunc(static.StaticResource, authenticator.Wrap(staticHandler))
@ -70,7 +70,7 @@ func RegisterHandlers(mux httpmux.Mux, containerManager manager.Manager, httpAut
authenticated = true authenticated = true
} }
if httpAuthFile == "" && httpDigestFile != "" { if httpAuthFile == "" && httpDigestFile != "" {
glog.Infof("Using digest file %s", httpDigestFile) glog.V(1).Infof("Using digest file %s", httpDigestFile)
secrets := auth.HtdigestFileProvider(httpDigestFile) secrets := auth.HtdigestFileProvider(httpDigestFile)
authenticator := auth.NewDigestAuthenticator(httpDigestRealm, secrets) authenticator := auth.NewDigestAuthenticator(httpDigestRealm, secrets)
mux.HandleFunc(static.StaticResource, authenticator.Wrap(staticHandler)) mux.HandleFunc(static.StaticResource, authenticator.Wrap(staticHandler))

View File

@ -49,7 +49,7 @@ func getInfoFromFiles(filePaths string) string {
return strings.TrimSpace(string(id)) return strings.TrimSpace(string(id))
} }
} }
glog.Infof("Couldn't collect info from any of the files in %q", filePaths) glog.Warningf("Couldn't collect info from any of the files in %q", filePaths)
return "" return ""
} }

View File

@ -377,8 +377,7 @@ func newContainerData(containerName string, memoryCache *memory.InMemoryCache, h
// Create cpu load reader. // Create cpu load reader.
loadReader, err := cpuload.New() loadReader, err := cpuload.New()
if err != nil { if err != nil {
// TODO(rjnagal): Promote to warning once we support cpu load inside namespaces. glog.Warningf("Could not initialize cpu load reader for %q: %s", ref.Name, err)
glog.Infof("Could not initialize cpu load reader for %q: %s", ref.Name, err)
} else { } else {
cont.loadReader = loadReader cont.loadReader = loadReader
} }
@ -467,7 +466,7 @@ func (c *containerData) housekeeping() {
stats, err := c.memoryCache.RecentStats(c.info.Name, empty, empty, numSamples) stats, err := c.memoryCache.RecentStats(c.info.Name, empty, empty, numSamples)
if err != nil { if err != nil {
if c.allowErrorLogging() { if c.allowErrorLogging() {
glog.Infof("[%s] Failed to get recent stats for logging usage: %v", c.info.Name, err) glog.Warningf("[%s] Failed to get recent stats for logging usage: %v", c.info.Name, err)
} }
} else if len(stats) < numSamples { } else if len(stats) < numSamples {
// Ignore, not enough stats yet. // Ignore, not enough stats yet.
@ -483,6 +482,7 @@ func (c *containerData) housekeeping() {
instantUsageInCores := float64(stats[numSamples-1].Cpu.Usage.Total-stats[numSamples-2].Cpu.Usage.Total) / float64(stats[numSamples-1].Timestamp.Sub(stats[numSamples-2].Timestamp).Nanoseconds()) instantUsageInCores := float64(stats[numSamples-1].Cpu.Usage.Total-stats[numSamples-2].Cpu.Usage.Total) / float64(stats[numSamples-1].Timestamp.Sub(stats[numSamples-2].Timestamp).Nanoseconds())
usageInCores := float64(usageCpuNs) / float64(stats[numSamples-1].Timestamp.Sub(stats[0].Timestamp).Nanoseconds()) usageInCores := float64(usageCpuNs) / float64(stats[numSamples-1].Timestamp.Sub(stats[0].Timestamp).Nanoseconds())
usageInHuman := units.HumanSize(float64(usageMemory)) usageInHuman := units.HumanSize(float64(usageMemory))
// Don't set verbosity since this is already protected by the logUsage flag.
glog.Infof("[%s] %.3f cores (average: %.3f cores), %s of memory", c.info.Name, instantUsageInCores, usageInCores, usageInHuman) glog.Infof("[%s] %.3f cores (average: %.3f cores), %s of memory", c.info.Name, instantUsageInCores, usageInCores, usageInHuman)
} }
} }
@ -504,7 +504,7 @@ func (c *containerData) housekeepingTick(timer <-chan time.Time, longHousekeepin
err := c.updateStats() err := c.updateStats()
if err != nil { if err != nil {
if c.allowErrorLogging() { if c.allowErrorLogging() {
glog.Infof("Failed to update stats for container \"%s\": %s", c.info.Name, err) glog.Warning("Failed to update stats for container \"%s\": %s", c.info.Name, err)
} }
} }
// Log if housekeeping took too long. // Log if housekeeping took too long.

View File

@ -148,19 +148,19 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn
if err != nil { if err != nil {
return nil, err return nil, err
} }
glog.Infof("cAdvisor running in container: %q", selfContainer) glog.V(2).Infof("cAdvisor running in container: %q", selfContainer)
var ( var (
dockerStatus info.DockerStatus dockerStatus info.DockerStatus
rktPath string rktPath string
) )
if tempDockerStatus, err := docker.Status(); err != nil { if tempDockerStatus, err := docker.Status(); err != nil {
glog.Warningf("Unable to connect to Docker: %v", err) glog.V(5).Infof("Docker not connected: %v", err)
} else { } else {
dockerStatus = tempDockerStatus dockerStatus = tempDockerStatus
} }
if tmpRktPath, err := rkt.RktPath(); err != nil { if tmpRktPath, err := rkt.RktPath(); err != nil {
glog.Warningf("unable to connect to Rkt api service: %v", err) glog.V(5).Infof("Rkt not connected: %v", err)
} else { } else {
rktPath = tmpRktPath rktPath = tmpRktPath
} }
@ -171,7 +171,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn
} }
crioInfo, err := crioClient.Info() crioInfo, err := crioClient.Info()
if err != nil { if err != nil {
glog.Warningf("unable to connect to CRI-O api service: %v", err) glog.V(5).Infof("CRI-O not connected: %v", err)
} }
context := fs.Context{ context := fs.Context{
@ -222,13 +222,13 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn
return nil, err return nil, err
} }
newManager.machineInfo = *machineInfo newManager.machineInfo = *machineInfo
glog.Infof("Machine: %+v", newManager.machineInfo) glog.V(1).Infof("Machine: %+v", newManager.machineInfo)
versionInfo, err := getVersionInfo() versionInfo, err := getVersionInfo()
if err != nil { if err != nil {
return nil, err return nil, err
} }
glog.Infof("Version: %+v", *versionInfo) glog.V(1).Infof("Version: %+v", *versionInfo)
newManager.eventHandler = events.NewEventManager(parseEventsStoragePolicy()) newManager.eventHandler = events.NewEventManager(parseEventsStoragePolicy())
return newManager, nil return newManager, nil
@ -267,12 +267,12 @@ type manager struct {
func (self *manager) Start() error { func (self *manager) Start() error {
err := docker.Register(self, self.fsInfo, self.ignoreMetrics) err := docker.Register(self, self.fsInfo, self.ignoreMetrics)
if err != nil { if err != nil {
glog.Warningf("Docker container factory registration failed: %v.", err) glog.V(5).Infof("Registration of the Docker container factory failed: %v.", err)
} }
err = rkt.Register(self, self.fsInfo, self.ignoreMetrics) err = rkt.Register(self, self.fsInfo, self.ignoreMetrics)
if err != nil { if err != nil {
glog.Warningf("Registration of the rkt container factory failed: %v", err) glog.V(5).Infof("Registration of the rkt container factory failed: %v", err)
} else { } else {
watcher, err := rktwatcher.NewRktContainerWatcher() watcher, err := rktwatcher.NewRktContainerWatcher()
if err != nil { if err != nil {
@ -283,17 +283,17 @@ func (self *manager) Start() error {
err = containerd.Register(self, self.fsInfo, self.ignoreMetrics) err = containerd.Register(self, self.fsInfo, self.ignoreMetrics)
if err != nil { if err != nil {
glog.Warningf("Registration of the containerd container factory failed: %v", err) glog.V(5).Infof("Registration of the containerd container factory failed: %v", err)
} }
err = crio.Register(self, self.fsInfo, self.ignoreMetrics) err = crio.Register(self, self.fsInfo, self.ignoreMetrics)
if err != nil { if err != nil {
glog.Warningf("Registration of the crio container factory failed: %v", err) glog.V(5).Infof("Registration of the crio container factory failed: %v", err)
} }
err = systemd.Register(self, self.fsInfo, self.ignoreMetrics) err = systemd.Register(self, self.fsInfo, self.ignoreMetrics)
if err != nil { if err != nil {
glog.Warningf("Registration of the systemd container factory failed: %v", err) glog.V(5).Infof("Registration of the systemd container factory failed: %v", err)
} }
err = raw.Register(self, self.fsInfo, self.ignoreMetrics) err = raw.Register(self, self.fsInfo, self.ignoreMetrics)
@ -326,12 +326,12 @@ func (self *manager) Start() error {
if err != nil { if err != nil {
return err return err
} }
glog.Infof("Starting recovery of all containers") glog.V(2).Infof("Starting recovery of all containers")
err = self.detectSubcontainers("/") err = self.detectSubcontainers("/")
if err != nil { if err != nil {
return err return err
} }
glog.Infof("Recovery completed") glog.V(2).Infof("Recovery completed")
// Watch for new container. // Watch for new container.
quitWatcher := make(chan error) quitWatcher := make(chan error)
@ -849,29 +849,25 @@ func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *c
if err != nil { if err != nil {
return fmt.Errorf("failed to read config file %q for config %q, container %q: %v", k, v, cont.info.Name, err) return fmt.Errorf("failed to read config file %q for config %q, container %q: %v", k, v, cont.info.Name, err)
} }
glog.V(3).Infof("Got config from %q: %q", v, configFile) glog.V(4).Infof("Got config from %q: %q", v, configFile)
if strings.HasPrefix(k, "prometheus") || strings.HasPrefix(k, "Prometheus") { if strings.HasPrefix(k, "prometheus") || strings.HasPrefix(k, "Prometheus") {
newCollector, err := collector.NewPrometheusCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHttpClient) newCollector, err := collector.NewPrometheusCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHttpClient)
if err != nil { if err != nil {
glog.Infof("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err) return fmt.Errorf("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err)
return err
} }
err = cont.collectorManager.RegisterCollector(newCollector) err = cont.collectorManager.RegisterCollector(newCollector)
if err != nil { if err != nil {
glog.Infof("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err) return fmt.Errorf("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err)
return err
} }
} else { } else {
newCollector, err := collector.NewCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHttpClient) newCollector, err := collector.NewCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHttpClient)
if err != nil { if err != nil {
glog.Infof("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err) return fmt.Errorf("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err)
return err
} }
err = cont.collectorManager.RegisterCollector(newCollector) err = cont.collectorManager.RegisterCollector(newCollector)
if err != nil { if err != nil {
glog.Infof("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err) return fmt.Errorf("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err)
return err
} }
} }
} }
@ -946,11 +942,11 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche
} }
devicesCgroupPath, err := handler.GetCgroupPath("devices") devicesCgroupPath, err := handler.GetCgroupPath("devices")
if err != nil { if err != nil {
glog.Infof("Error getting devices cgroup path: %v", err) glog.Warningf("Error getting devices cgroup path: %v", err)
} else { } else {
cont.nvidiaCollector, err = m.nvidiaManager.GetCollector(devicesCgroupPath) cont.nvidiaCollector, err = m.nvidiaManager.GetCollector(devicesCgroupPath)
if err != nil { if err != nil {
glog.Infof("GPU metrics may be unavailable/incomplete for container %q: %v", cont.info.Name, err) glog.V(4).Infof("GPU metrics may be unavailable/incomplete for container %q: %v", cont.info.Name, err)
} }
} }
@ -959,7 +955,7 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche
collectorConfigs := collector.GetCollectorConfigs(labels) collectorConfigs := collector.GetCollectorConfigs(labels)
err = m.registerCollectors(collectorConfigs, cont) err = m.registerCollectors(collectorConfigs, cont)
if err != nil { if err != nil {
glog.Infof("failed to register collectors for %q: %v", containerName, err) glog.Warningf("Failed to register collectors for %q: %v", containerName, err)
} }
// Add the container name and all its aliases. The aliases must be within the namespace of the factory. // Add the container name and all its aliases. The aliases must be within the namespace of the factory.
@ -1179,7 +1175,7 @@ func (self *manager) watchForNewContainers(quit chan error) error {
} }
func (self *manager) watchForNewOoms() error { func (self *manager) watchForNewOoms() error {
glog.Infof("Started watching for new ooms in manager") glog.V(2).Infof("Started watching for new ooms in manager")
outStream := make(chan *oomparser.OomInstance, 10) outStream := make(chan *oomparser.OomInstance, 10)
oomLog, err := oomparser.New() oomLog, err := oomparser.New()
if err != nil { if err != nil {

View File

@ -53,7 +53,7 @@ func (self *rktContainerWatcher) Stop() error {
} }
func (self *rktContainerWatcher) detectRktContainers(events chan watcher.ContainerEvent) { func (self *rktContainerWatcher) detectRktContainers(events chan watcher.ContainerEvent) {
glog.Infof("starting detectRktContainers thread") glog.V(1).Infof("Starting detectRktContainers thread")
ticker := time.Tick(10 * time.Second) ticker := time.Tick(10 * time.Second)
curpods := make(map[string]*rktapi.Pod) curpods := make(map[string]*rktapi.Pod)
@ -92,7 +92,7 @@ func (self *rktContainerWatcher) syncRunningPods(pods []*rktapi.Pod, events chan
for id, pod := range curpods { for id, pod := range curpods {
if _, ok := newpods[id]; !ok { if _, ok := newpods[id]; !ok {
for _, cgroup := range podToCgroup(pod) { for _, cgroup := range podToCgroup(pod) {
glog.Infof("cgroup to delete = %v", cgroup) glog.V(2).Infof("cgroup to delete = %v", cgroup)
self.sendDestroyEvent(cgroup, events) self.sendDestroyEvent(cgroup, events)
} }
} }

View File

@ -45,8 +45,8 @@ func NewMemoryStorage() (*memory.InMemoryCache, error) {
return nil, err return nil, err
} }
if *storageDriver != "" { if *storageDriver != "" {
glog.Infof("Using backend storage type %q", *storageDriver) glog.V(1).Infof("Using backend storage type %q", *storageDriver)
} }
glog.Infof("Caching stats in memory for %v", *storageDuration) glog.V(1).Infof("Caching stats in memory for %v", *storageDuration)
return memory.New(*storageDuration, backendStorage), nil return memory.New(*storageDuration, backendStorage), nil
} }

View File

@ -41,6 +41,6 @@ func New() (CpuLoadReader, error) {
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create a netlink based cpuload reader: %v", err) return nil, fmt.Errorf("failed to create a netlink based cpuload reader: %v", err)
} }
glog.V(3).Info("Using a netlink-based load reader") glog.V(4).Info("Using a netlink-based load reader")
return reader, nil return reader, nil
} }

View File

@ -143,7 +143,7 @@ func (glogAdapter) Infof(format string, args ...interface{}) {
glog.V(4).Infof(format, args) glog.V(4).Infof(format, args)
} }
func (glogAdapter) Warningf(format string, args ...interface{}) { func (glogAdapter) Warningf(format string, args ...interface{}) {
glog.Infof(format, args) glog.V(2).Infof(format, args)
} }
func (glogAdapter) Errorf(format string, args ...interface{}) { func (glogAdapter) Errorf(format string, args ...interface{}) {
glog.Warningf(format, args) glog.Warningf(format, args)