Re-enable disk usage integration tests with retries instead of an opaque sleep.
Signed-off-by: Vishnu kannan <vishnuk@google.com>
This commit is contained in:
parent
262ceb075c
commit
17a09ffbc5
@ -39,13 +39,17 @@ import (
|
||||
// must be able to ssh into hosts without password
|
||||
// godep go run ./integration/runner/runner.go --logtostderr --v 2 --ssh-config <.ssh/config file> <list of hosts>
|
||||
|
||||
const cadvisorBinary = "cadvisor"
|
||||
const (
|
||||
cadvisorBinary = "cadvisor"
|
||||
testTimeout = 15 * time.Minute
|
||||
)
|
||||
|
||||
var cadvisorTimeout = flag.Duration("cadvisor_timeout", 15*time.Second, "Time to wait for cAdvisor to come up on the remote host")
|
||||
var port = flag.Int("port", 8080, "Port in which to start cAdvisor in the remote host")
|
||||
var testRetryCount = flag.Int("test-retry-count", 3, "Number of times to retry failed tests before failing.")
|
||||
var testRetryWhitelist = flag.String("test-retry-whitelist", "", "Path to newline separated list of regexexp for test failures that should be retried. If empty, no tests are retried.")
|
||||
var sshOptions = flag.String("ssh-options", "", "Commandline options passed to ssh.")
|
||||
var testArgs = flag.String("test_args", "", "arguments to be passed to the integrationt tests")
|
||||
var retryRegex *regexp.Regexp
|
||||
|
||||
func getAttributes(ipAddress, portStr string) (*cadvisorApi.Attributes, error) {
|
||||
@ -158,7 +162,7 @@ func PushAndRunTests(host, testDir string) error {
|
||||
}
|
||||
// Run the command
|
||||
|
||||
err = RunCommand("godep", "go", "test", "github.com/google/cadvisor/integration/tests/...", "--host", host, "--port", portStr, "--ssh-options", *sshOptions)
|
||||
err = RunCommand("godep", "go", "test", "--timeout", testTimeout.String(), *testArgs, "github.com/google/cadvisor/integration/tests/...", "--host", host, "--port", portStr, "--ssh-options", *sshOptions)
|
||||
if err == nil {
|
||||
// On success, break out of retry loop
|
||||
break
|
||||
@ -166,7 +170,7 @@ func PushAndRunTests(host, testDir string) error {
|
||||
|
||||
// Only retry on test failures caused by these known flaky failure conditions
|
||||
if retryRegex == nil || !retryRegex.Match([]byte(err.Error())) {
|
||||
glog.Warningf("Skipping retry for tests on host %s because error is not whitelisted: %s", host, err.Error())
|
||||
glog.Warningf("Skipping retry for tests on host %s because error is not whitelisted", host)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -292,36 +292,62 @@ func TestDockerContainerNetworkStats(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDockerFilesystemStats(t *testing.T) {
|
||||
t.Skip("enable this once this test does not cause timeouts.")
|
||||
fm := framework.New(t)
|
||||
defer fm.Cleanup()
|
||||
|
||||
storageDriver := fm.Docker().StorageDriver()
|
||||
switch storageDriver {
|
||||
case framework.Aufs:
|
||||
case framework.Overlay:
|
||||
default:
|
||||
t.Skip("skipping filesystem stats test")
|
||||
}
|
||||
const (
|
||||
ddUsage = uint64(1 << 3) // 1 KB
|
||||
sleepDuration = 10 * time.Second
|
||||
)
|
||||
// Wait for the container to show up.
|
||||
containerId := fm.Docker().RunBusybox("/bin/sh", "-c", "dd if=/dev/zero of=/file count=1 bs=1M & ping www.google.com")
|
||||
containerId := fm.Docker().RunBusybox("/bin/sh", "-c", fmt.Sprintf("'dd if=/dev/zero of=/file count=2 bs=%d & sleep 10000'", ddUsage))
|
||||
|
||||
waitForContainer(containerId, fm)
|
||||
time.Sleep(time.Minute)
|
||||
request := &v2.RequestOptions{
|
||||
IdType: v2.TypeDocker,
|
||||
Count: 1,
|
||||
}
|
||||
containerInfo, err := fm.Cadvisor().ClientV2().Stats(containerId, request)
|
||||
time.Sleep(time.Minute)
|
||||
require.NoError(t, err)
|
||||
require.True(t, len(containerInfo) == 1)
|
||||
var info v2.ContainerInfo
|
||||
for _, cInfo := range containerInfo {
|
||||
info = cInfo
|
||||
needsBaseUsageCheck := false
|
||||
storageDriver := fm.Docker().StorageDriver()
|
||||
switch storageDriver {
|
||||
case framework.Aufs, framework.Overlay:
|
||||
needsBaseUsageCheck = true
|
||||
}
|
||||
pass := false
|
||||
// We need to wait for the `dd` operation to complete.
|
||||
for i := 0; i < 10; i++ {
|
||||
containerInfo, err := fm.Cadvisor().ClientV2().Stats(containerId, request)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(containerInfo), 1)
|
||||
var info v2.ContainerInfo
|
||||
// There is only one container in containerInfo. Since it is a map with unknown key,
|
||||
// use the value blindly.
|
||||
for _, cInfo := range containerInfo {
|
||||
info = cInfo
|
||||
}
|
||||
sanityCheckV2(containerId, info, t)
|
||||
|
||||
require.NotNil(t, info.Stats[0].Filesystem.TotalUsageBytes)
|
||||
if *info.Stats[0].Filesystem.TotalUsageBytes >= ddUsage {
|
||||
if !needsBaseUsageCheck {
|
||||
pass = true
|
||||
break
|
||||
}
|
||||
require.NotNil(t, info.Stats[0].Filesystem.BaseUsageBytes)
|
||||
if *info.Stats[0].Filesystem.BaseUsageBytes >= ddUsage {
|
||||
pass = true
|
||||
break
|
||||
}
|
||||
}
|
||||
t.Logf("expected total usage %d bytes to be greater than %d bytes", *info.Stats[0].Filesystem.TotalUsageBytes, ddUsage)
|
||||
if needsBaseUsageCheck {
|
||||
t.Logf("expected base %d bytes to be greater than %d bytes", *info.Stats[0].Filesystem.BaseUsageBytes, ddUsage)
|
||||
}
|
||||
t.Logf("retrying after %s...", sleepDuration.String())
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
if !pass {
|
||||
t.Fail()
|
||||
}
|
||||
sanityCheckV2(containerId, info, t)
|
||||
require.NotNil(t, info.Stats[0].Filesystem.BaseUsageBytes)
|
||||
assert.True(t, *info.Stats[0].Filesystem.BaseUsageBytes > (1<<6), "expected base fs usage to be greater than 1MB")
|
||||
require.NotNil(t, info.Stats[0].Filesystem.TotalUsageBytes)
|
||||
assert.True(t, *info.Stats[0].Filesystem.TotalUsageBytes > (1<<6), "expected total fs usage to be greater than 1MB")
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user