Merge pull request #1070 from ncdc/fix-devicemapper-labeling
Fixes for devicemapper partition reporting
This commit is contained in:
commit
92fff91a38
120
fs/fs.go
120
fs/fs.go
@ -54,6 +54,8 @@ type RealFsInfo struct {
|
||||
// Map from label to block device path.
|
||||
// Labels are intent-specific tags that are auto-detected.
|
||||
labels map[string]string
|
||||
|
||||
dmsetup dmsetupClient
|
||||
}
|
||||
|
||||
type Context struct {
|
||||
@ -67,9 +69,11 @@ func NewFsInfo(context Context) (FsInfo, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
partitions := make(map[string]partition, 0)
|
||||
fsInfo := &RealFsInfo{}
|
||||
fsInfo.labels = make(map[string]string, 0)
|
||||
fsInfo := &RealFsInfo{
|
||||
partitions: make(map[string]partition, 0),
|
||||
labels: make(map[string]string, 0),
|
||||
dmsetup: &defaultDmsetupClient{},
|
||||
}
|
||||
supportedFsType := map[string]bool{
|
||||
// all ext systems are checked through prefix.
|
||||
"btrfs": true,
|
||||
@ -82,49 +86,87 @@ func NewFsInfo(context Context) (FsInfo, error) {
|
||||
continue
|
||||
}
|
||||
// Avoid bind mounts.
|
||||
if _, ok := partitions[mount.Source]; ok {
|
||||
if _, ok := fsInfo.partitions[mount.Source]; ok {
|
||||
continue
|
||||
}
|
||||
if mount.Fstype == "zfs" {
|
||||
Fstype = mount.Fstype
|
||||
}
|
||||
partitions[mount.Source] = partition{
|
||||
fsInfo.partitions[mount.Source] = partition{
|
||||
fsType: Fstype,
|
||||
mountpoint: mount.Mountpoint,
|
||||
major: uint(mount.Major),
|
||||
minor: uint(mount.Minor),
|
||||
}
|
||||
}
|
||||
if storageDriver, ok := context.DockerInfo["Driver"]; ok && storageDriver == "devicemapper" {
|
||||
dev, major, minor, blockSize, err := dockerDMDevice(context.DockerInfo["DriverStatus"])
|
||||
if err != nil {
|
||||
glog.Warningf("Could not get Docker devicemapper device: %v", err)
|
||||
} else {
|
||||
partitions[dev] = partition{
|
||||
fsType: "devicemapper",
|
||||
major: major,
|
||||
minor: minor,
|
||||
blockSize: blockSize,
|
||||
}
|
||||
fsInfo.labels[LabelDockerImages] = dev
|
||||
}
|
||||
}
|
||||
glog.Infof("Filesystem partitions: %+v", partitions)
|
||||
fsInfo.partitions = partitions
|
||||
fsInfo.addLabels(context)
|
||||
|
||||
// need to call this before the log line below printing out the partitions, as this function may
|
||||
// add a "partition" for devicemapper to fsInfo.partitions
|
||||
fsInfo.addDockerImagesLabel(context)
|
||||
|
||||
glog.Infof("Filesystem partitions: %+v", fsInfo.partitions)
|
||||
fsInfo.addSystemRootLabel()
|
||||
return fsInfo, nil
|
||||
}
|
||||
|
||||
func (self *RealFsInfo) addLabels(context Context) {
|
||||
dockerPaths := getDockerImagePaths(context)
|
||||
// getDockerDeviceMapperInfo returns information about the devicemapper device and "partition" if
|
||||
// docker is using devicemapper for its storage driver. If a loopback device is being used, don't
|
||||
// return any information or error, as we want to report based on the actual partition where the
|
||||
// loopback file resides, inside of the loopback file itself.
|
||||
func (self *RealFsInfo) getDockerDeviceMapperInfo(dockerInfo map[string]string) (string, *partition, error) {
|
||||
if storageDriver, ok := dockerInfo["Driver"]; ok && storageDriver != "devicemapper" {
|
||||
return "", nil, nil
|
||||
}
|
||||
|
||||
var driverStatus [][]string
|
||||
if err := json.Unmarshal([]byte(dockerInfo["DriverStatus"]), &driverStatus); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
dataLoopFile := dockerStatusValue(driverStatus, "Data loop file")
|
||||
if len(dataLoopFile) > 0 {
|
||||
return "", nil, nil
|
||||
}
|
||||
|
||||
dev, major, minor, blockSize, err := dockerDMDevice(driverStatus, self.dmsetup)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
return dev, &partition{
|
||||
fsType: "devicemapper",
|
||||
major: major,
|
||||
minor: minor,
|
||||
blockSize: blockSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// addSystemRootLabel attempts to determine which device contains the mount for /.
|
||||
func (self *RealFsInfo) addSystemRootLabel() {
|
||||
for src, p := range self.partitions {
|
||||
if p.mountpoint == "/" {
|
||||
if _, ok := self.labels[LabelSystemRoot]; !ok {
|
||||
self.labels[LabelSystemRoot] = src
|
||||
}
|
||||
}
|
||||
self.updateDockerImagesPath(src, p.mountpoint, dockerPaths)
|
||||
// TODO(rjnagal): Add label for docker devicemapper pool.
|
||||
}
|
||||
}
|
||||
|
||||
// addDockerImagesLabel attempts to determine which device contains the mount for docker images.
|
||||
func (self *RealFsInfo) addDockerImagesLabel(context Context) {
|
||||
dockerDev, dockerPartition, err := self.getDockerDeviceMapperInfo(context.DockerInfo)
|
||||
if err != nil {
|
||||
glog.Warningf("Could not get Docker devicemapper device: %v", err)
|
||||
}
|
||||
if len(dockerDev) > 0 && dockerPartition != nil {
|
||||
self.partitions[dockerDev] = *dockerPartition
|
||||
self.labels[LabelDockerImages] = dockerDev
|
||||
} else {
|
||||
dockerPaths := getDockerImagePaths(context)
|
||||
|
||||
for src, p := range self.partitions {
|
||||
self.updateDockerImagesPath(src, p.mountpoint, dockerPaths)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -345,20 +387,30 @@ func dockerStatusValue(status [][]string, target string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// dmsetupClient knows to to interact with dmsetup to retrieve information about devicemapper.
|
||||
type dmsetupClient interface {
|
||||
table(poolName string) ([]byte, error)
|
||||
//TODO add status(poolName string) ([]byte, error) and use it in getDMStats so we can unit test
|
||||
}
|
||||
|
||||
// defaultDmsetupClient implements the standard behavior for interacting with dmsetup.
|
||||
type defaultDmsetupClient struct{}
|
||||
|
||||
var _ dmsetupClient = &defaultDmsetupClient{}
|
||||
|
||||
func (*defaultDmsetupClient) table(poolName string) ([]byte, error) {
|
||||
return exec.Command("dmsetup", "table", poolName).Output()
|
||||
}
|
||||
|
||||
// Devicemapper thin provisioning is detailed at
|
||||
// https://www.kernel.org/doc/Documentation/device-mapper/thin-provisioning.txt
|
||||
func dockerDMDevice(driverStatus string) (string, uint, uint, uint, error) {
|
||||
var config [][]string
|
||||
err := json.Unmarshal([]byte(driverStatus), &config)
|
||||
if err != nil {
|
||||
return "", 0, 0, 0, err
|
||||
}
|
||||
poolName := dockerStatusValue(config, "Pool Name")
|
||||
func dockerDMDevice(driverStatus [][]string, dmsetup dmsetupClient) (string, uint, uint, uint, error) {
|
||||
poolName := dockerStatusValue(driverStatus, "Pool Name")
|
||||
if len(poolName) == 0 {
|
||||
return "", 0, 0, 0, fmt.Errorf("Could not get dm pool name")
|
||||
}
|
||||
|
||||
out, err := exec.Command("dmsetup", "table", poolName).Output()
|
||||
out, err := dmsetup.table(poolName)
|
||||
if err != nil {
|
||||
return "", 0, 0, 0, err
|
||||
}
|
||||
|
237
fs/fs_test.go
237
fs/fs_test.go
@ -15,8 +15,10 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -156,3 +158,238 @@ func TestParseDMTable(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddSystemRootLabel(t *testing.T) {
|
||||
fsInfo := &RealFsInfo{
|
||||
labels: map[string]string{},
|
||||
partitions: map[string]partition{
|
||||
"/dev/mapper/vg_vagrant-lv_root": {
|
||||
mountpoint: "/",
|
||||
},
|
||||
"vg_vagrant-docker--pool": {
|
||||
mountpoint: "",
|
||||
fsType: "devicemapper",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fsInfo.addSystemRootLabel()
|
||||
if e, a := "/dev/mapper/vg_vagrant-lv_root", fsInfo.labels[LabelSystemRoot]; e != a {
|
||||
t.Errorf("expected %q, got %q", e, a)
|
||||
}
|
||||
}
|
||||
|
||||
type testDmsetup struct {
|
||||
data []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func (t *testDmsetup) table(poolName string) ([]byte, error) {
|
||||
return t.data, t.err
|
||||
}
|
||||
|
||||
func TestGetDockerDeviceMapperInfo(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
driver string
|
||||
driverStatus string
|
||||
dmsetupTable string
|
||||
dmsetupTableError error
|
||||
expectedDevice string
|
||||
expectedPartition *partition
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
name: "not devicemapper",
|
||||
driver: "btrfs",
|
||||
expectedDevice: "",
|
||||
expectedPartition: nil,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "error unmarshaling driver status",
|
||||
driver: "devicemapper",
|
||||
driverStatus: "{[[[asdf",
|
||||
expectedDevice: "",
|
||||
expectedPartition: nil,
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "loopback",
|
||||
driver: "devicemapper",
|
||||
driverStatus: `[["Data loop file","/var/lib/docker/devicemapper/devicemapper/data"]]`,
|
||||
expectedDevice: "",
|
||||
expectedPartition: nil,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "missing pool name",
|
||||
driver: "devicemapper",
|
||||
driverStatus: `[[]]`,
|
||||
expectedDevice: "",
|
||||
expectedPartition: nil,
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "error invoking dmsetup",
|
||||
driver: "devicemapper",
|
||||
driverStatus: `[["Pool Name", "vg_vagrant-docker--pool"]]`,
|
||||
dmsetupTableError: errors.New("foo"),
|
||||
expectedDevice: "",
|
||||
expectedPartition: nil,
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "unable to parse dmsetup table",
|
||||
driver: "devicemapper",
|
||||
driverStatus: `[["Pool Name", "vg_vagrant-docker--pool"]]`,
|
||||
dmsetupTable: "no data here!",
|
||||
expectedDevice: "",
|
||||
expectedPartition: nil,
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "happy path",
|
||||
driver: "devicemapper",
|
||||
driverStatus: `[["Pool Name", "vg_vagrant-docker--pool"]]`,
|
||||
dmsetupTable: "0 53870592 thin-pool 253:2 253:3 1024 0 1 skip_block_zeroing",
|
||||
expectedDevice: "vg_vagrant-docker--pool",
|
||||
expectedPartition: &partition{
|
||||
fsType: "devicemapper",
|
||||
major: 253,
|
||||
minor: 3,
|
||||
blockSize: 1024,
|
||||
},
|
||||
expectedError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
fsInfo := &RealFsInfo{
|
||||
dmsetup: &testDmsetup{
|
||||
data: []byte(tt.dmsetupTable),
|
||||
},
|
||||
}
|
||||
|
||||
dockerInfo := map[string]string{
|
||||
"Driver": tt.driver,
|
||||
"DriverStatus": tt.driverStatus,
|
||||
}
|
||||
|
||||
device, partition, err := fsInfo.getDockerDeviceMapperInfo(dockerInfo)
|
||||
|
||||
if tt.expectedError && err == nil {
|
||||
t.Errorf("%s: expected error but got nil", tt.name)
|
||||
continue
|
||||
}
|
||||
if !tt.expectedError && err != nil {
|
||||
t.Errorf("%s: unexpected error: %v", tt.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if e, a := tt.expectedDevice, device; e != a {
|
||||
t.Errorf("%s: device: expected %q, got %q", tt.name, e, a)
|
||||
}
|
||||
|
||||
if e, a := tt.expectedPartition, partition; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("%s: partition: expected %#v, got %#v", tt.name, e, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddDockerImagesLabel(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
driver string
|
||||
driverStatus string
|
||||
dmsetupTable string
|
||||
getDockerDeviceMapperInfoError error
|
||||
partitions map[string]partition
|
||||
expectedDockerDevice string
|
||||
expectedPartition *partition
|
||||
}{
|
||||
{
|
||||
name: "devicemapper, not loopback",
|
||||
driver: "devicemapper",
|
||||
driverStatus: `[["Pool Name", "vg_vagrant-docker--pool"]]`,
|
||||
dmsetupTable: "0 53870592 thin-pool 253:2 253:3 1024 0 1 skip_block_zeroing",
|
||||
partitions: map[string]partition{
|
||||
"/dev/mapper/vg_vagrant-lv_root": {
|
||||
mountpoint: "/",
|
||||
fsType: "devicemapper",
|
||||
},
|
||||
},
|
||||
expectedDockerDevice: "vg_vagrant-docker--pool",
|
||||
expectedPartition: &partition{
|
||||
fsType: "devicemapper",
|
||||
major: 253,
|
||||
minor: 3,
|
||||
blockSize: 1024,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "devicemapper, loopback on non-root partition",
|
||||
driver: "devicemapper",
|
||||
driverStatus: `[["Data loop file","/var/lib/docker/devicemapper/devicemapper/data"]]`,
|
||||
partitions: map[string]partition{
|
||||
"/dev/mapper/vg_vagrant-lv_root": {
|
||||
mountpoint: "/",
|
||||
fsType: "devicemapper",
|
||||
},
|
||||
"/dev/sdb1": {
|
||||
mountpoint: "/var/lib/docker/devicemapper",
|
||||
},
|
||||
},
|
||||
expectedDockerDevice: "/dev/sdb1",
|
||||
},
|
||||
{
|
||||
name: "multiple mounts - innermost check",
|
||||
partitions: map[string]partition{
|
||||
"/dev/sda1": {
|
||||
mountpoint: "/",
|
||||
fsType: "ext4",
|
||||
},
|
||||
"/dev/sdb1": {
|
||||
mountpoint: "/var/lib/docker",
|
||||
fsType: "ext4",
|
||||
},
|
||||
"/dev/sdb2": {
|
||||
mountpoint: "/var/lib/docker/btrfs",
|
||||
fsType: "btrfs",
|
||||
},
|
||||
},
|
||||
expectedDockerDevice: "/dev/sdb2",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
fsInfo := &RealFsInfo{
|
||||
labels: map[string]string{},
|
||||
partitions: tt.partitions,
|
||||
dmsetup: &testDmsetup{
|
||||
data: []byte(tt.dmsetupTable),
|
||||
},
|
||||
}
|
||||
|
||||
context := Context{
|
||||
DockerRoot: "/var/lib/docker",
|
||||
DockerInfo: map[string]string{
|
||||
"Driver": tt.driver,
|
||||
"DriverStatus": tt.driverStatus,
|
||||
},
|
||||
}
|
||||
|
||||
fsInfo.addDockerImagesLabel(context)
|
||||
|
||||
if e, a := tt.expectedDockerDevice, fsInfo.labels[LabelDockerImages]; e != a {
|
||||
t.Errorf("%s: docker device: expected %q, got %q", tt.name, e, a)
|
||||
}
|
||||
|
||||
if tt.expectedPartition == nil {
|
||||
continue
|
||||
}
|
||||
if e, a := *tt.expectedPartition, fsInfo.partitions[tt.expectedDockerDevice]; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("%s: docker partition: expected %#v, got %#v", tt.name, e, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user