Merge pull request #55911 from davidz627/localSSDUUID
Automatic merge from submit-queue (batch tested with PRs 54824, 55911, 55730, 55979, 55961). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Add options for mounting SCSI or NVMe local SSD though Block or Filesystem and do all of that with UUID Fixes: #51431 Fixed version of: #53466 Mount SCSI local SSD by UUID in /mnt/disks/by-uuid/, also allows for users to request and mount NVMe disks. Both types of disks will be accessible either through block or file-system. I have confirmed that it is no longer crashing when nodes are initialized on GKE.
This commit is contained in:
@@ -21,6 +21,8 @@ import (
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@@ -54,8 +56,22 @@ const (
|
||||
DirectoryLocalVolumeType LocalVolumeType = "dir"
|
||||
// creates a tmpfs and mounts it
|
||||
TmpfsLocalVolumeType LocalVolumeType = "tmpfs"
|
||||
// tests based on local ssd at /mnt/disks/by-uuid/
|
||||
GCELocalSSDVolumeType LocalVolumeType = "gce-localssd-scsi-fs"
|
||||
)
|
||||
|
||||
var setupLocalVolumeMap = map[LocalVolumeType]func(*localTestConfig) *localTestVolume{
|
||||
GCELocalSSDVolumeType: setupLocalVolumeGCELocalSSD,
|
||||
TmpfsLocalVolumeType: setupLocalVolumeTmpfs,
|
||||
DirectoryLocalVolumeType: setupLocalVolumeDirectory,
|
||||
}
|
||||
|
||||
var cleanupLocalVolumeMap = map[LocalVolumeType]func(*localTestConfig, *localTestVolume){
|
||||
GCELocalSSDVolumeType: cleanupLocalVolumeGCELocalSSD,
|
||||
TmpfsLocalVolumeType: cleanupLocalVolumeTmpfs,
|
||||
DirectoryLocalVolumeType: cleanupLocalVolumeDirectory,
|
||||
}
|
||||
|
||||
type localTestVolume struct {
|
||||
// Node that the volume is on
|
||||
node *v1.Node
|
||||
@@ -199,105 +215,100 @@ var _ = SIGDescribe("PersistentVolumes-local [Feature:LocalPersistentVolumes] [S
|
||||
})
|
||||
})
|
||||
|
||||
LocalVolumeTypes := []LocalVolumeType{DirectoryLocalVolumeType, TmpfsLocalVolumeType}
|
||||
|
||||
Context("when two pods mount a local volume at the same time", func() {
|
||||
It("should be able to write from pod1 and read from pod2", func() {
|
||||
for _, testVolType := range LocalVolumeTypes {
|
||||
var testVol *localTestVolume
|
||||
By(fmt.Sprintf("local-volume-type: %s", testVolType))
|
||||
testVol = setupLocalVolumePVCPV(config, testVolType)
|
||||
twoPodsReadWriteTest(config, testVol)
|
||||
cleanupLocalVolume(config, testVol)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
Context("when two pods mount a local volume one after the other", func() {
|
||||
It("should be able to write from pod1 and read from pod2", func() {
|
||||
for _, testVolType := range LocalVolumeTypes {
|
||||
var testVol *localTestVolume
|
||||
By(fmt.Sprintf("local-volume-type: %s", testVolType))
|
||||
testVol = setupLocalVolumePVCPV(config, testVolType)
|
||||
twoPodsReadWriteSerialTest(config, testVol)
|
||||
cleanupLocalVolume(config, testVol)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
Context("when pod using local volume with non-existant path", func() {
|
||||
ep := &eventPatterns{
|
||||
reason: "FailedMount",
|
||||
pattern: make([]string, 2)}
|
||||
ep.pattern = append(ep.pattern, "MountVolume.SetUp failed")
|
||||
ep.pattern = append(ep.pattern, "does not exist")
|
||||
|
||||
It("should not be able to mount", func() {
|
||||
for _, testVolType := range LocalVolumeTypes {
|
||||
By(fmt.Sprintf("local-volume-type: %s", testVolType))
|
||||
testVol := &localTestVolume{
|
||||
node: config.node0,
|
||||
hostDir: "/non-existent/location/nowhere",
|
||||
localVolumeType: testVolType,
|
||||
LocalVolumeTypes := []LocalVolumeType{DirectoryLocalVolumeType, TmpfsLocalVolumeType, GCELocalSSDVolumeType}
|
||||
for _, tempTestVolType := range LocalVolumeTypes {
|
||||
// New variable required for gingko test closures
|
||||
testVolType := tempTestVolType
|
||||
ctxString := fmt.Sprintf("when using volume type %s", testVolType)
|
||||
Context(ctxString, func() {
|
||||
BeforeEach(func() {
|
||||
if testVolType == GCELocalSSDVolumeType {
|
||||
SkipUnlessLocalSSDExists("scsi", "fs", config.node0)
|
||||
}
|
||||
By("Creating local PVC and PV")
|
||||
createLocalPVCPV(config, testVol)
|
||||
pod, err := createLocalPod(config, testVol)
|
||||
Expect(err).To(HaveOccurred())
|
||||
checkPodEvents(config, pod.Name, ep)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
Context("when pod's node is different from PV's NodeAffinity", func() {
|
||||
|
||||
BeforeEach(func() {
|
||||
if len(config.nodes.Items) < 2 {
|
||||
framework.Skipf("Runs only when number of nodes >= 2")
|
||||
}
|
||||
})
|
||||
|
||||
ep := &eventPatterns{
|
||||
reason: "FailedScheduling",
|
||||
pattern: make([]string, 2)}
|
||||
ep.pattern = append(ep.pattern, "MatchNodeSelector")
|
||||
ep.pattern = append(ep.pattern, "NoVolumeNodeConflict")
|
||||
for _, testVolType := range LocalVolumeTypes {
|
||||
|
||||
It("should not be able to mount due to different NodeAffinity", func() {
|
||||
|
||||
testPodWithNodeName(config, testVolType, ep, config.nodes.Items[1].Name, makeLocalPodWithNodeAffinity)
|
||||
})
|
||||
|
||||
It("should not be able to mount due to different NodeSelector", func() {
|
||||
Context("when two pods mount a local volume at the same time", func() {
|
||||
It("should be able to write from pod1 and read from pod2", func() {
|
||||
var testVol *localTestVolume
|
||||
testVol = setupLocalVolumePVCPV(config, testVolType)
|
||||
twoPodsReadWriteTest(config, testVol)
|
||||
cleanupLocalVolume(config, testVol)
|
||||
})
|
||||
|
||||
})
|
||||
Context("when two pods mount a local volume one after the other", func() {
|
||||
It("should be able to write from pod1 and read from pod2", func() {
|
||||
var testVol *localTestVolume
|
||||
testVol = setupLocalVolumePVCPV(config, testVolType)
|
||||
twoPodsReadWriteSerialTest(config, testVol)
|
||||
cleanupLocalVolume(config, testVol)
|
||||
})
|
||||
})
|
||||
Context("when pod using local volume with non-existant path", func() {
|
||||
|
||||
ep := &eventPatterns{
|
||||
reason: "FailedMount",
|
||||
pattern: make([]string, 2)}
|
||||
ep.pattern = append(ep.pattern, "MountVolume.SetUp failed")
|
||||
ep.pattern = append(ep.pattern, "does not exist")
|
||||
|
||||
It("should not be able to mount", func() {
|
||||
testVol := &localTestVolume{
|
||||
node: config.node0,
|
||||
hostDir: "/non-existent/location/nowhere",
|
||||
localVolumeType: testVolType,
|
||||
}
|
||||
By("Creating local PVC and PV")
|
||||
createLocalPVCPV(config, testVol)
|
||||
pod, err := createLocalPod(config, testVol)
|
||||
Expect(err).To(HaveOccurred())
|
||||
checkPodEvents(config, pod.Name, ep)
|
||||
})
|
||||
|
||||
})
|
||||
Context("when pod's node is different from PV's NodeAffinity", func() {
|
||||
|
||||
BeforeEach(func() {
|
||||
if len(config.nodes.Items) < 2 {
|
||||
framework.Skipf("Runs only when number of nodes >= 2")
|
||||
}
|
||||
})
|
||||
|
||||
ep := &eventPatterns{
|
||||
reason: "FailedScheduling",
|
||||
pattern: make([]string, 2)}
|
||||
ep.pattern = append(ep.pattern, "MatchNodeSelector")
|
||||
ep.pattern = append(ep.pattern, "NoVolumeNodeConflict")
|
||||
|
||||
It("should not be able to mount due to different NodeAffinity", func() {
|
||||
testPodWithNodeName(config, testVolType, ep, config.nodes.Items[1].Name, makeLocalPodWithNodeAffinity)
|
||||
})
|
||||
It("should not be able to mount due to different NodeSelector", func() {
|
||||
testPodWithNodeName(config, testVolType, ep, config.nodes.Items[1].Name, makeLocalPodWithNodeSelector)
|
||||
})
|
||||
|
||||
})
|
||||
Context("when pod's node is different from PV's NodeName", func() {
|
||||
|
||||
BeforeEach(func() {
|
||||
if len(config.nodes.Items) < 2 {
|
||||
framework.Skipf("Runs only when number of nodes >= 2")
|
||||
}
|
||||
})
|
||||
|
||||
ep := &eventPatterns{
|
||||
reason: "FailedMount",
|
||||
pattern: make([]string, 2)}
|
||||
ep.pattern = append(ep.pattern, "NodeSelectorTerm")
|
||||
ep.pattern = append(ep.pattern, "Storage node affinity check failed")
|
||||
It("should not be able to mount due to different NodeName", func() {
|
||||
testPodWithNodeName(config, testVolType, ep, config.nodes.Items[1].Name, makeLocalPodWithNodeName)
|
||||
})
|
||||
|
||||
testPodWithNodeName(config, testVolType, ep, config.nodes.Items[1].Name, makeLocalPodWithNodeSelector)
|
||||
})
|
||||
|
||||
}
|
||||
})
|
||||
|
||||
Context("when pod's node is different from PV's NodeName", func() {
|
||||
|
||||
BeforeEach(func() {
|
||||
if len(config.nodes.Items) < 2 {
|
||||
framework.Skipf("Runs only when number of nodes >= 2")
|
||||
}
|
||||
})
|
||||
|
||||
ep := &eventPatterns{
|
||||
reason: "FailedMount",
|
||||
pattern: make([]string, 2)}
|
||||
ep.pattern = append(ep.pattern, "NodeSelectorTerm")
|
||||
ep.pattern = append(ep.pattern, "Storage node affinity check failed")
|
||||
for _, testVolType := range LocalVolumeTypes {
|
||||
|
||||
It("should not be able to mount due to different NodeName", func() {
|
||||
|
||||
testPodWithNodeName(config, testVolType, ep, config.nodes.Items[1].Name, makeLocalPodWithNodeName)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
Context("when using local volume provisioner", func() {
|
||||
var volumePath string
|
||||
@@ -362,7 +373,6 @@ type makeLocalPodWith func(config *localTestConfig, volume *localTestVolume, nod
|
||||
|
||||
func testPodWithNodeName(config *localTestConfig, testVolType LocalVolumeType, ep *eventPatterns, nodeName string, makeLocalPodFunc makeLocalPodWith) {
|
||||
var testVol *localTestVolume
|
||||
By(fmt.Sprintf("local-volume-type: %s", testVolType))
|
||||
testVol = setupLocalVolumePVCPV(config, testVolType)
|
||||
|
||||
pod := makeLocalPodFunc(config, testVol, nodeName)
|
||||
@@ -486,16 +496,7 @@ func podNodeName(config *localTestConfig, pod *v1.Pod) (string, error) {
|
||||
return runtimePod.Spec.NodeName, runtimePodErr
|
||||
}
|
||||
|
||||
// setupLocalVolume setups a directory to user for local PV
|
||||
func setupLocalVolume(config *localTestConfig, localVolumeType LocalVolumeType) *localTestVolume {
|
||||
testDirName := "local-volume-test-" + string(uuid.NewUUID())
|
||||
hostDir := filepath.Join(hostBase, testDirName)
|
||||
|
||||
if localVolumeType == TmpfsLocalVolumeType {
|
||||
createAndMountTmpfsLocalVolume(config, hostDir)
|
||||
}
|
||||
|
||||
// populate volume with testFile containing testFileContent
|
||||
func setupWriteTestFile(hostDir string, config *localTestConfig, localVolumeType LocalVolumeType) *localTestVolume {
|
||||
writeCmd, _ := createWriteAndReadCmds(hostDir, testFile, testFileContent)
|
||||
By(fmt.Sprintf("Creating local volume on node %q at path %q", config.node0.Name, hostDir))
|
||||
err := framework.IssueSSHCommand(writeCmd, framework.TestContext.Provider, config.node0)
|
||||
@@ -507,7 +508,30 @@ func setupLocalVolume(config *localTestConfig, localVolumeType LocalVolumeType)
|
||||
}
|
||||
}
|
||||
|
||||
// Deletes the PVC/PV, and launches a pod with hostpath volume to remove the test directory
|
||||
func setupLocalVolumeTmpfs(config *localTestConfig) *localTestVolume {
|
||||
testDirName := "local-volume-test-" + string(uuid.NewUUID())
|
||||
hostDir := filepath.Join(hostBase, testDirName)
|
||||
createAndMountTmpfsLocalVolume(config, hostDir)
|
||||
// populate volume with testFile containing testFileContent
|
||||
return setupWriteTestFile(hostDir, config, TmpfsLocalVolumeType)
|
||||
}
|
||||
|
||||
func setupLocalVolumeGCELocalSSD(config *localTestConfig) *localTestVolume {
|
||||
res, err := framework.IssueSSHCommandWithResult("ls /mnt/disks/by-uuid/google-local-ssds-scsi-fs/", framework.TestContext.Provider, config.node0)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
dirName := strings.Fields(res.Stdout)[0]
|
||||
hostDir := "/mnt/disks/by-uuid/google-local-ssds-scsi-fs/" + dirName
|
||||
// populate volume with testFile containing testFileContent
|
||||
return setupWriteTestFile(hostDir, config, GCELocalSSDVolumeType)
|
||||
}
|
||||
|
||||
func setupLocalVolumeDirectory(config *localTestConfig) *localTestVolume {
|
||||
testDirName := "local-volume-test-" + string(uuid.NewUUID())
|
||||
hostDir := filepath.Join(hostBase, testDirName)
|
||||
// populate volume with testFile containing testFileContent
|
||||
return setupWriteTestFile(hostDir, config, DirectoryLocalVolumeType)
|
||||
}
|
||||
|
||||
func cleanupLocalVolume(config *localTestConfig, volume *localTestVolume) {
|
||||
if volume == nil {
|
||||
return
|
||||
@@ -519,10 +543,30 @@ func cleanupLocalVolume(config *localTestConfig, volume *localTestVolume) {
|
||||
framework.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs))
|
||||
}
|
||||
|
||||
if volume.localVolumeType == TmpfsLocalVolumeType {
|
||||
unmountTmpfsLocalVolume(config, volume.hostDir)
|
||||
}
|
||||
cleanup := cleanupLocalVolumeMap[volume.localVolumeType]
|
||||
cleanup(config, volume)
|
||||
}
|
||||
|
||||
// Deletes the PVC/PV, and launches a pod with hostpath volume to remove the test directory
|
||||
func cleanupLocalVolumeGCELocalSSD(config *localTestConfig, volume *localTestVolume) {
|
||||
By("Removing the test directory")
|
||||
removeCmd := fmt.Sprintf("rm %s", volume.hostDir+"/"+testFile)
|
||||
err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, config.node0)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// Deletes the PVC/PV, and launches a pod with hostpath volume to remove the test directory
|
||||
func cleanupLocalVolumeTmpfs(config *localTestConfig, volume *localTestVolume) {
|
||||
unmountTmpfsLocalVolume(config, volume.hostDir)
|
||||
|
||||
By("Removing the test directory")
|
||||
removeCmd := fmt.Sprintf("rm -r %s", volume.hostDir)
|
||||
err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, config.node0)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// Deletes the PVC/PV, and launches a pod with hostpath volume to remove the test directory
|
||||
func cleanupLocalVolumeDirectory(config *localTestConfig, volume *localTestVolume) {
|
||||
By("Removing the test directory")
|
||||
removeCmd := fmt.Sprintf("rm -r %s", volume.hostDir)
|
||||
err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, config.node0)
|
||||
@@ -703,7 +747,9 @@ func podRWCmdExec(pod *v1.Pod, cmd string) string {
|
||||
// and create local PVC and PV
|
||||
func setupLocalVolumePVCPV(config *localTestConfig, localVolumeType LocalVolumeType) *localTestVolume {
|
||||
By("Initializing test volume")
|
||||
testVol := setupLocalVolume(config, localVolumeType)
|
||||
setupLocalVolume, ok := setupLocalVolumeMap[localVolumeType]
|
||||
Expect(ok).To(BeTrue())
|
||||
testVol := setupLocalVolume(config)
|
||||
|
||||
By("Creating local PVC and PV")
|
||||
createLocalPVCPV(config, testVol)
|
||||
@@ -921,3 +967,16 @@ func findLocalPersistentVolume(c clientset.Interface, volumePath string) (*v1.Pe
|
||||
}
|
||||
return nil, fmt.Errorf("Unable to find local persistent volume with path %v", volumePath)
|
||||
}
|
||||
|
||||
// SkipUnlessLocalSSDExists takes in an ssdInterface (scsi/nvme) and a filesystemType (fs/block)
|
||||
// and skips if a disk of that type does not exist on the node
|
||||
func SkipUnlessLocalSSDExists(ssdInterface, filesystemType string, node *v1.Node) {
|
||||
ssdCmd := fmt.Sprintf("ls -1 /mnt/disks/by-uuid/google-local-ssds-%s-%s/ | wc -l", ssdInterface, filesystemType)
|
||||
res, err := framework.IssueSSHCommandWithResult(ssdCmd, framework.TestContext.Provider, node)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
num, err := strconv.Atoi(strings.TrimSpace(res.Stdout))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if num < 1 {
|
||||
framework.Skipf("Requires at least 1 %s %s localSSD ", ssdInterface, filesystemType)
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user