Adding integration test framework along with 3 tests.
These tests are similar to Go unit tests except that they're targetting tesing of a running cAdvisor client. They do this by interacting with the testing framework that is able to talk to the running cAdvisor. This cAdvisor could be local or remote.
This commit is contained in:
parent
83d3810eba
commit
f6a90d7bac
@ -11,6 +11,6 @@ before_script:
|
||||
- sudo service influxdb start
|
||||
script:
|
||||
- export PATH=$PATH:$HOME/gopath/bin
|
||||
- godep go test -v -race github.com/google/cadvisor/...
|
||||
- godep go test -v -race -test.short github.com/google/cadvisor/...
|
||||
- godep go build github.com/google/cadvisor
|
||||
- go vet github.com/google/cadvisor
|
||||
|
206
integration/framework/framework.go
Normal file
206
integration/framework/framework.go
Normal file
@ -0,0 +1,206 @@
|
||||
package framework
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/google/cadvisor/client"
|
||||
)
|
||||
|
||||
var host = flag.String("host", "localhost", "Address of the host being tested")
|
||||
var port = flag.Int("port", 8080, "Port of the application on the host being tested")
|
||||
|
||||
// Integration test framework.
|
||||
type Framework interface {
|
||||
// Clean the framework state.
|
||||
Cleanup()
|
||||
|
||||
// The testing.T used by the framework and the current test.
|
||||
T() *testing.T
|
||||
|
||||
// Returns information about the host being tested.
|
||||
Host() HostInfo
|
||||
|
||||
// Returns the Docker actions for the test framework.
|
||||
Docker() DockerActions
|
||||
|
||||
// Returns the cAdvisor actions for the test framework.
|
||||
Cadvisor() CadvisorActions
|
||||
}
|
||||
|
||||
// Instantiates a Framework. Cleanup *must* be called. Class is thread-compatible.
|
||||
// All framework actions report fatal errors on the t specified at creation time.
|
||||
//
|
||||
// Typical use:
|
||||
//
|
||||
// func TestFoo(t *testing.T) {
|
||||
// fm := framework.New(t)
|
||||
// defer fm.Cleanup()
|
||||
// ... actual test ...
|
||||
// }
|
||||
func New(t *testing.T) Framework {
|
||||
// All integration tests are large.
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping framework test in short mode")
|
||||
}
|
||||
|
||||
return &realFramework{
|
||||
host: HostInfo{
|
||||
Host: *host,
|
||||
Port: *port,
|
||||
},
|
||||
t: t,
|
||||
cleanups: make([]func(), 0),
|
||||
}
|
||||
}
|
||||
|
||||
type DockerActions interface {
|
||||
// Run the no-op pause Docker container and return its ID.
|
||||
RunPause() string
|
||||
|
||||
// Run the specified command in a Docker busybox container and return its ID.
|
||||
RunBusybox(cmd ...string) string
|
||||
|
||||
// Runs a Docker container in the background. Uses the specified DockerRunArgs and command.
|
||||
// Returns the ID of the new container.
|
||||
//
|
||||
// e.g.:
|
||||
// Run(DockerRunArgs{Image: "busybox"}, "ping", "www.google.com")
|
||||
// -> docker run busybox ping www.google.com
|
||||
Run(args DockerRunArgs, cmd ...string) string
|
||||
}
|
||||
|
||||
type CadvisorActions interface {
|
||||
// Returns a cAdvisor client to the machine being tested.
|
||||
Client() *client.Client
|
||||
}
|
||||
|
||||
type realFramework struct {
|
||||
host HostInfo
|
||||
t *testing.T
|
||||
cadvisorClient *client.Client
|
||||
|
||||
// Cleanup functions to call on Cleanup()
|
||||
cleanups []func()
|
||||
}
|
||||
|
||||
type HostInfo struct {
|
||||
Host string
|
||||
Port int
|
||||
}
|
||||
|
||||
// Returns: http://<host>:<port>/
|
||||
func (self HostInfo) FullHost() string {
|
||||
return fmt.Sprintf("http://%s:%d/", self.Host, self.Port)
|
||||
}
|
||||
|
||||
func (self *realFramework) T() *testing.T {
|
||||
return self.t
|
||||
}
|
||||
|
||||
func (self *realFramework) Host() HostInfo {
|
||||
return self.host
|
||||
}
|
||||
|
||||
func (self *realFramework) Docker() DockerActions {
|
||||
return self
|
||||
}
|
||||
|
||||
func (self *realFramework) Cadvisor() CadvisorActions {
|
||||
return self
|
||||
}
|
||||
|
||||
// Call all cleanup functions.
|
||||
func (self *realFramework) Cleanup() {
|
||||
for _, cleanupFunc := range self.cleanups {
|
||||
cleanupFunc()
|
||||
}
|
||||
}
|
||||
|
||||
// Gets a client to the cAdvisor being tested.
|
||||
func (self *realFramework) Client() *client.Client {
|
||||
if self.cadvisorClient == nil {
|
||||
cadvisorClient, err := client.NewClient(self.Host().FullHost())
|
||||
if err != nil {
|
||||
self.t.Fatalf("Failed to instantiate the cAdvisor client: %v", err)
|
||||
}
|
||||
self.cadvisorClient = cadvisorClient
|
||||
}
|
||||
return self.cadvisorClient
|
||||
}
|
||||
|
||||
func (self *realFramework) RunPause() string {
|
||||
return self.Run(DockerRunArgs{
|
||||
Image: "kubernetes/pause",
|
||||
}, "sleep", "inf")
|
||||
}
|
||||
|
||||
// Run the specified command in a Docker busybox container.
|
||||
func (self *realFramework) RunBusybox(cmd ...string) string {
|
||||
return self.Run(DockerRunArgs{
|
||||
Image: "busybox",
|
||||
}, cmd...)
|
||||
}
|
||||
|
||||
type DockerRunArgs struct {
|
||||
// Image to use.
|
||||
Image string
|
||||
|
||||
// Arguments to the Docker CLI.
|
||||
Args []string
|
||||
}
|
||||
|
||||
// Runs a Docker container in the background. Uses the specified DockerRunArgs and command.
|
||||
//
|
||||
// e.g.:
|
||||
// RunDockerContainer(DockerRunArgs{Image: "busybox"}, "ping", "www.google.com")
|
||||
// -> docker run busybox ping www.google.com
|
||||
func (self *realFramework) Run(args DockerRunArgs, cmd ...string) string {
|
||||
if self.host.Host == "localhost" {
|
||||
// Just run locally.
|
||||
out, err := exec.Command("docker", append(append(append([]string{"run", "-d"}, args.Args...), args.Image), cmd...)...).CombinedOutput()
|
||||
if err != nil {
|
||||
self.t.Fatalf("Failed to run docker container with run args %+v due to error: %v and output: %q", args, err, out)
|
||||
return ""
|
||||
}
|
||||
// The last lime is the container ID.
|
||||
elements := strings.Split(string(out), "\n")
|
||||
if len(elements) < 2 {
|
||||
self.t.Fatalf("Failed to find Docker container ID in output %q", out)
|
||||
return ""
|
||||
}
|
||||
containerId := elements[len(elements)-2]
|
||||
self.cleanups = append(self.cleanups, func() {
|
||||
out, err := exec.Command("docker", "rm", "-f", containerId).CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to remove container %q with error: %v and output: %q", containerId, err, out)
|
||||
}
|
||||
})
|
||||
return containerId
|
||||
}
|
||||
|
||||
// TODO(vmarmol): Implement.
|
||||
// We must SSH to the remote machine and run the command.
|
||||
|
||||
self.t.Fatalf("Non-localhost Run not implemented")
|
||||
return ""
|
||||
}
|
||||
|
||||
// Runs retryFunc until no error is returned. After dur time the last error is returned.
|
||||
// Note that the function does not timeout the execution of retryFunc when the limit is reached.
|
||||
func RetryForDuration(retryFunc func() error, dur time.Duration) error {
|
||||
waitUntil := time.Now().Add(dur)
|
||||
var err error
|
||||
for time.Now().Before(waitUntil) {
|
||||
err = retryFunc()
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
8
integration/tests/TODO.md
Normal file
8
integration/tests/TODO.md
Normal file
@ -0,0 +1,8 @@
|
||||
Tests to Write:
|
||||
- UI comes up
|
||||
-- / -> /containers
|
||||
-- /containers
|
||||
-- /docker
|
||||
- API tests
|
||||
-- /containers
|
||||
-- /subcontainers
|
336
integration/tests/api/docker_test.go
Normal file
336
integration/tests/api/docker_test.go
Normal file
@ -0,0 +1,336 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/cadvisor/info"
|
||||
"github.com/google/cadvisor/integration/framework"
|
||||
)
|
||||
|
||||
// Checks whether el is in vals.
|
||||
func contains(el string, vals []string) bool {
|
||||
for _, val := range vals {
|
||||
if el == val {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Sanity check the container by:
|
||||
// - Checking that the specified alias is a valid one for this container.
|
||||
// - Verifying that stats are not empty.
|
||||
func sanityCheck(alias string, containerInfo info.ContainerInfo, t *testing.T) {
|
||||
if !contains(alias, containerInfo.Aliases) {
|
||||
t.Errorf("Failed to find container alias %q in aliases %v", alias, containerInfo.Aliases)
|
||||
}
|
||||
if len(containerInfo.Stats) == 0 {
|
||||
t.Errorf("No container stats found: %+v", containerInfo)
|
||||
}
|
||||
}
|
||||
|
||||
// Waits up to 5s for a container with the specified alias to appear.
|
||||
func waitForContainer(alias string, fm framework.Framework) {
|
||||
err := framework.RetryForDuration(func() error {
|
||||
_, err := fm.Cadvisor().Client().DockerContainer(alias, &info.ContainerInfoRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}, 5*time.Second)
|
||||
if err != nil {
|
||||
fm.T().Fatalf("Timed out waiting for container %q to be available in cAdvisor: %v", alias, err)
|
||||
}
|
||||
}
|
||||
|
||||
// A Docker container in /docker/<ID>
|
||||
func TestDockerContainerById(t *testing.T) {
|
||||
fm := framework.New(t)
|
||||
defer fm.Cleanup()
|
||||
|
||||
containerId := fm.Docker().RunPause()
|
||||
|
||||
// Wait for the container to show up.
|
||||
waitForContainer(containerId, fm)
|
||||
|
||||
request := &info.ContainerInfoRequest{
|
||||
NumStats: 1,
|
||||
}
|
||||
containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerId, request)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sanityCheck(containerId, containerInfo, t)
|
||||
}
|
||||
|
||||
// A Docker container in /docker/<name>
|
||||
func TestDockerContainerByName(t *testing.T) {
|
||||
fm := framework.New(t)
|
||||
defer fm.Cleanup()
|
||||
|
||||
containerName := fmt.Sprintf("test-docker-container-by-name-%d", os.Getpid())
|
||||
fm.Docker().Run(framework.DockerRunArgs{
|
||||
Image: "kubernetes/pause",
|
||||
Args: []string{"--name", containerName},
|
||||
}, "sleep", "inf")
|
||||
|
||||
// Wait for the container to show up.
|
||||
waitForContainer(containerName, fm)
|
||||
|
||||
request := &info.ContainerInfoRequest{
|
||||
NumStats: 1,
|
||||
}
|
||||
containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerName, request)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sanityCheck(containerName, containerInfo, t)
|
||||
}
|
||||
|
||||
// Find the first container with the specified alias in containers.
|
||||
func findContainer(alias string, containers []info.ContainerInfo, t *testing.T) info.ContainerInfo {
|
||||
for _, cont := range containers {
|
||||
if contains(alias, cont.Aliases) {
|
||||
return cont
|
||||
}
|
||||
}
|
||||
t.Fatalf("Failed to find container %q in %+v", alias, containers)
|
||||
return info.ContainerInfo{}
|
||||
}
|
||||
|
||||
// All Docker containers through /docker
|
||||
func TestGetAllDockerContainers(t *testing.T) {
|
||||
fm := framework.New(t)
|
||||
defer fm.Cleanup()
|
||||
|
||||
// Wait for the containers to show up.
|
||||
containerId1 := fm.Docker().RunPause()
|
||||
containerId2 := fm.Docker().RunPause()
|
||||
waitForContainer(containerId1, fm)
|
||||
waitForContainer(containerId2, fm)
|
||||
|
||||
request := &info.ContainerInfoRequest{
|
||||
NumStats: 1,
|
||||
}
|
||||
containersInfo, err := fm.Cadvisor().Client().AllDockerContainers(request)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(containersInfo) < 2 {
|
||||
t.Fatalf("At least 2 Docker containers should exist, received %d: %+v", len(containersInfo), containersInfo)
|
||||
}
|
||||
sanityCheck(containerId1, findContainer(containerId1, containersInfo, t), t)
|
||||
sanityCheck(containerId2, findContainer(containerId2, containersInfo, t), t)
|
||||
}
|
||||
|
||||
// Check expected properties of a Docker container.
|
||||
func TestBasicDockerContainer(t *testing.T) {
|
||||
fm := framework.New(t)
|
||||
defer fm.Cleanup()
|
||||
|
||||
containerName := fmt.Sprintf("test-basic-docker-container-%d", os.Getpid())
|
||||
containerId := fm.Docker().Run(framework.DockerRunArgs{
|
||||
Image: "kubernetes/pause",
|
||||
Args: []string{
|
||||
"--name", containerName,
|
||||
},
|
||||
}, "sleep", "inf")
|
||||
|
||||
// Wait for the container to show up.
|
||||
waitForContainer(containerId, fm)
|
||||
|
||||
request := &info.ContainerInfoRequest{
|
||||
NumStats: 1,
|
||||
}
|
||||
containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerId, request)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check that the contianer is known by both its name and ID.
|
||||
sanityCheck(containerId, containerInfo, t)
|
||||
sanityCheck(containerName, containerInfo, t)
|
||||
|
||||
if len(containerInfo.Subcontainers) != 0 {
|
||||
t.Errorf("Container has subcontainers: %+v", containerInfo)
|
||||
}
|
||||
|
||||
if len(containerInfo.Stats) != 1 {
|
||||
t.Fatalf("Container has more than 1 stat, has %d: %+v", len(containerInfo.Stats), containerInfo.Stats)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the difference between a and b.
|
||||
func difference(a, b uint64) uint64 {
|
||||
if a > b {
|
||||
return a - b
|
||||
}
|
||||
return b - a
|
||||
}
|
||||
|
||||
// TODO(vmarmol): Handle if CPU or memory is not isolated on this system.
|
||||
// Check the ContainerSpec.
|
||||
func TestDockerContainerSpec(t *testing.T) {
|
||||
fm := framework.New(t)
|
||||
defer fm.Cleanup()
|
||||
|
||||
cpuShares := uint64(2048)
|
||||
cpuMask := "0"
|
||||
memoryLimit := uint64(1 << 30) // 1GB
|
||||
containerId := fm.Docker().Run(framework.DockerRunArgs{
|
||||
Image: "kubernetes/pause",
|
||||
Args: []string{
|
||||
"--cpu-shares", strconv.FormatUint(cpuShares, 10),
|
||||
"--cpuset", cpuMask,
|
||||
"--memory", strconv.FormatUint(memoryLimit, 10),
|
||||
},
|
||||
}, "sleep", "inf")
|
||||
|
||||
// Wait for the container to show up.
|
||||
waitForContainer(containerId, fm)
|
||||
|
||||
request := &info.ContainerInfoRequest{
|
||||
NumStats: 1,
|
||||
}
|
||||
containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerId, request)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sanityCheck(containerId, containerInfo, t)
|
||||
|
||||
if !containerInfo.Spec.HasCpu {
|
||||
t.Errorf("CPU should be isolated: %+v", containerInfo)
|
||||
}
|
||||
if containerInfo.Spec.Cpu.Limit != cpuShares {
|
||||
t.Errorf("Container should have %d shares, has %d", cpuShares, containerInfo.Spec.Cpu.Limit)
|
||||
}
|
||||
if containerInfo.Spec.Cpu.Mask != cpuMask {
|
||||
t.Errorf("Cpu mask should be %q, but is %q", cpuMask, containerInfo.Spec.Cpu.Mask)
|
||||
}
|
||||
if !containerInfo.Spec.HasMemory {
|
||||
t.Errorf("Memory should be isolated: %+v", containerInfo)
|
||||
}
|
||||
if containerInfo.Spec.Memory.Limit != memoryLimit {
|
||||
t.Errorf("Container should have memory limit of %d, has %d", memoryLimit, containerInfo.Spec.Memory.Limit)
|
||||
}
|
||||
if !containerInfo.Spec.HasNetwork {
|
||||
t.Errorf("Network should be isolated: %+v", containerInfo)
|
||||
}
|
||||
if !containerInfo.Spec.HasFilesystem {
|
||||
t.Errorf("Filesystem should be isolated: %+v", containerInfo)
|
||||
}
|
||||
}
|
||||
|
||||
// Expect the specified value to be non-zero.
|
||||
func expectNonZero(val int, description string, t *testing.T) {
|
||||
if val < 0 {
|
||||
t.Errorf("%s should be posiive", description)
|
||||
}
|
||||
expectNonZeroU(uint64(val), description, t)
|
||||
}
|
||||
func expectNonZeroU(val uint64, description string, t *testing.T) {
|
||||
if val == 0 {
|
||||
t.Errorf("%s should be non-zero", description)
|
||||
}
|
||||
}
|
||||
|
||||
// Check the CPU ContainerStats.
|
||||
func TestDockerContainerCpuStats(t *testing.T) {
|
||||
fm := framework.New(t)
|
||||
defer fm.Cleanup()
|
||||
|
||||
// Wait for the container to show up.
|
||||
containerId := fm.Docker().RunBusybox("ping", "www.google.com")
|
||||
waitForContainer(containerId, fm)
|
||||
|
||||
request := &info.ContainerInfoRequest{
|
||||
NumStats: 1,
|
||||
}
|
||||
containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerId, request)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sanityCheck(containerId, containerInfo, t)
|
||||
stat := containerInfo.Stats[0]
|
||||
|
||||
// Checks for CpuStats.
|
||||
expectNonZeroU(stat.Cpu.Usage.Total, "CPU total usage", t)
|
||||
expectNonZero(len(stat.Cpu.Usage.PerCpu), "per-core CPU usage", t)
|
||||
totalUsage := uint64(0)
|
||||
for _, usage := range stat.Cpu.Usage.PerCpu {
|
||||
totalUsage += usage
|
||||
}
|
||||
dif := difference(totalUsage, stat.Cpu.Usage.Total)
|
||||
if dif > uint64((5 * time.Millisecond).Nanoseconds()) {
|
||||
t.Errorf("Per-core CPU usage (%d) and total usage (%d) are more than 1ms off", totalUsage, stat.Cpu.Usage.Total)
|
||||
}
|
||||
userPlusSystem := stat.Cpu.Usage.User + stat.Cpu.Usage.System
|
||||
dif = difference(totalUsage, userPlusSystem)
|
||||
if dif > uint64((25 * time.Millisecond).Nanoseconds()) {
|
||||
t.Errorf("User + system CPU usage (%d) and total usage (%d) are more than 20ms off", userPlusSystem, stat.Cpu.Usage.Total)
|
||||
}
|
||||
if stat.Cpu.Load != 0 {
|
||||
t.Errorf("Non-zero load is unexpected as it is currently unset. Do we need to update the test?")
|
||||
}
|
||||
}
|
||||
|
||||
// Check the memory ContainerStats.
|
||||
func TestDockerContainerMemoryStats(t *testing.T) {
|
||||
fm := framework.New(t)
|
||||
defer fm.Cleanup()
|
||||
|
||||
// Wait for the container to show up.
|
||||
containerId := fm.Docker().RunBusybox("ping", "www.google.com")
|
||||
waitForContainer(containerId, fm)
|
||||
|
||||
request := &info.ContainerInfoRequest{
|
||||
NumStats: 1,
|
||||
}
|
||||
containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerId, request)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sanityCheck(containerId, containerInfo, t)
|
||||
stat := containerInfo.Stats[0]
|
||||
|
||||
// Checks for MemoryStats.
|
||||
expectNonZeroU(stat.Memory.Usage, "memory usage", t)
|
||||
expectNonZeroU(stat.Memory.WorkingSet, "memory working set", t)
|
||||
if stat.Memory.WorkingSet > stat.Memory.Usage {
|
||||
t.Errorf("Memory working set (%d) should be at most equal to memory usage (%d)", stat.Memory.WorkingSet, stat.Memory.Usage)
|
||||
}
|
||||
// TODO(vmarmol): Add checks for ContainerData and HierarchicalData
|
||||
}
|
||||
|
||||
// Check the network ContainerStats.
|
||||
func TestDockerContainerNetworkStats(t *testing.T) {
|
||||
fm := framework.New(t)
|
||||
defer fm.Cleanup()
|
||||
|
||||
// Wait for the container to show up.
|
||||
containerId := fm.Docker().RunBusybox("ping", "www.google.com")
|
||||
waitForContainer(containerId, fm)
|
||||
|
||||
request := &info.ContainerInfoRequest{
|
||||
NumStats: 1,
|
||||
}
|
||||
containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerId, request)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sanityCheck(containerId, containerInfo, t)
|
||||
stat := containerInfo.Stats[0]
|
||||
|
||||
// Checks for NetworkStats.
|
||||
expectNonZeroU(stat.Network.TxBytes, "network tx bytes", t)
|
||||
expectNonZeroU(stat.Network.TxPackets, "network tx packets", t)
|
||||
// TODO(vmarmol): Can probably do a better test with two containers pinging each other.
|
||||
}
|
36
integration/tests/api/machine_test.go
Normal file
36
integration/tests/api/machine_test.go
Normal file
@ -0,0 +1,36 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/cadvisor/integration/framework"
|
||||
)
|
||||
|
||||
func TestMachineInformationIsReturned(t *testing.T) {
|
||||
fm := framework.New(t)
|
||||
defer fm.Cleanup()
|
||||
|
||||
machineInfo, err := fm.Cadvisor().Client().MachineInfo()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check for "sane" values. Note these can change with time.
|
||||
if machineInfo.NumCores <= 0 || machineInfo.NumCores >= 1000000 {
|
||||
t.Errorf("Machine info has unexpected number of cores: %v", machineInfo.NumCores)
|
||||
}
|
||||
if machineInfo.MemoryCapacity <= 0 || machineInfo.MemoryCapacity >= (1<<50 /* 1PB */) {
|
||||
t.Errorf("Machine info has unexpected amount of memory: %v", machineInfo.MemoryCapacity)
|
||||
}
|
||||
if len(machineInfo.Filesystems) == 0 {
|
||||
t.Errorf("Expected to have some filesystems, found none")
|
||||
}
|
||||
for _, fs := range machineInfo.Filesystems {
|
||||
if fs.Device == "" {
|
||||
t.Errorf("Expected a non-empty device name in: %+v", fs)
|
||||
}
|
||||
if fs.Capacity < 0 || fs.Capacity >= (1<<60 /* 1 EB*/) {
|
||||
t.Errorf("Unexpected capacity in device %q: %v", fs.Device, fs.Capacity)
|
||||
}
|
||||
}
|
||||
}
|
26
integration/tests/healthz/healthz_test.go
Normal file
26
integration/tests/healthz/healthz_test.go
Normal file
@ -0,0 +1,26 @@
|
||||
package healthz
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/google/cadvisor/integration/framework"
|
||||
)
|
||||
|
||||
func TestHealthzOk(t *testing.T) {
|
||||
fm := framework.New(t)
|
||||
defer fm.Cleanup()
|
||||
|
||||
// Ensure that /heathz returns "ok"
|
||||
resp, err := http.Get(fm.Host().FullHost() + "healthz")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
|
||||
if string(body) != "ok" {
|
||||
t.Fatalf("cAdvisor returned unexpected healthz status of %q", body)
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user