Merge pull request #112015 from humblec/glusterfs-gone
Removal of GlusterFS code from the repo
This commit is contained in:
@@ -43,18 +43,14 @@ limitations under the License.
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
)
|
||||
|
||||
// TODO(#99468): Check if these tests are still needed.
|
||||
@@ -123,39 +119,4 @@ var _ = SIGDescribe("Volumes", func() {
|
||||
e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
|
||||
})
|
||||
})
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Gluster
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
ginkgo.Describe("GlusterFS", func() {
|
||||
ginkgo.It("should be mountable", func() {
|
||||
// create gluster server and endpoints
|
||||
config, _, _ := e2evolume.NewGlusterfsServer(c, namespace.Name)
|
||||
name := config.Prefix + "-server"
|
||||
defer func() {
|
||||
e2evolume.TestServerCleanup(f, config)
|
||||
err := c.CoreV1().Endpoints(namespace.Name).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
if !apierrors.IsNotFound(err) {
|
||||
framework.ExpectNoError(err, "defer: Gluster delete endpoints failed")
|
||||
}
|
||||
}()
|
||||
|
||||
tests := []e2evolume.Test{
|
||||
{
|
||||
Volume: v1.VolumeSource{
|
||||
Glusterfs: &v1.GlusterfsVolumeSource{
|
||||
EndpointsName: name,
|
||||
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
|
||||
Path: "test_vol",
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
File: "index.html",
|
||||
// Must match content of test/images/volumes-tester/gluster/index.html
|
||||
ExpectedContent: "Hello from GlusterFS!",
|
||||
},
|
||||
}
|
||||
e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@@ -166,65 +166,6 @@ func NewNFSServer(cs clientset.Interface, namespace string, args []string) (conf
|
||||
return config, pod, host
|
||||
}
|
||||
|
||||
// NewGlusterfsServer is a GlusterFS-specific wrapper for CreateStorageServer. Also creates the gluster endpoints object.
|
||||
func NewGlusterfsServer(cs clientset.Interface, namespace string) (config TestConfig, pod *v1.Pod, ip string) {
|
||||
config = TestConfig{
|
||||
Namespace: namespace,
|
||||
Prefix: "gluster",
|
||||
ServerImage: imageutils.GetE2EImage(imageutils.VolumeGlusterServer),
|
||||
ServerPorts: []int{24007, 24008, 49152},
|
||||
}
|
||||
pod, ip = CreateStorageServer(cs, config)
|
||||
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-server",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: 24007,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := cs.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "failed to create service for Gluster server")
|
||||
|
||||
ginkgo.By("creating Gluster endpoints")
|
||||
endpoints := &v1.Endpoints{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Endpoints",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-server",
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []v1.EndpointAddress{
|
||||
{
|
||||
IP: ip,
|
||||
},
|
||||
},
|
||||
Ports: []v1.EndpointPort{
|
||||
{
|
||||
Name: "gluster",
|
||||
Port: 24007,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = cs.CoreV1().Endpoints(namespace).Create(context.TODO(), endpoints, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "failed to create endpoints for Gluster server")
|
||||
|
||||
return config, pod, ip
|
||||
}
|
||||
|
||||
// CreateStorageServer is a wrapper for startVolumeServer(). A storage server config is passed in, and a pod pointer
|
||||
// and ip address string are returned.
|
||||
// Note: Expect() is called so no error is returned.
|
||||
|
@@ -46,7 +46,6 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@@ -219,143 +218,6 @@ func (v *nfsVolume) DeleteVolume() {
|
||||
cleanUpVolumeServer(v.f, v.serverPod)
|
||||
}
|
||||
|
||||
// Gluster
|
||||
type glusterFSDriver struct {
|
||||
driverInfo storageframework.DriverInfo
|
||||
}
|
||||
|
||||
type glusterVolume struct {
|
||||
prefix string
|
||||
serverPod *v1.Pod
|
||||
f *framework.Framework
|
||||
}
|
||||
|
||||
var _ storageframework.TestDriver = &glusterFSDriver{}
|
||||
var _ storageframework.PreprovisionedVolumeTestDriver = &glusterFSDriver{}
|
||||
var _ storageframework.InlineVolumeTestDriver = &glusterFSDriver{}
|
||||
var _ storageframework.PreprovisionedPVTestDriver = &glusterFSDriver{}
|
||||
|
||||
// InitGlusterFSDriver returns glusterFSDriver that implements TestDriver interface
|
||||
func InitGlusterFSDriver() storageframework.TestDriver {
|
||||
return &glusterFSDriver{
|
||||
driverInfo: storageframework.DriverInfo{
|
||||
Name: "gluster",
|
||||
InTreePluginName: "kubernetes.io/glusterfs",
|
||||
MaxFileSize: storageframework.FileSizeMedium,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
Min: "1Gi",
|
||||
},
|
||||
SupportedFsType: sets.NewString(
|
||||
"", // Default fsType
|
||||
),
|
||||
Capabilities: map[storageframework.Capability]bool{
|
||||
storageframework.CapPersistence: true,
|
||||
storageframework.CapExec: true,
|
||||
storageframework.CapRWX: true,
|
||||
storageframework.CapMultiPODs: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (g *glusterFSDriver) GetDriverInfo() *storageframework.DriverInfo {
|
||||
return &g.driverInfo
|
||||
}
|
||||
|
||||
func (g *glusterFSDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
|
||||
e2eskipper.SkipUnlessNodeOSDistroIs("gci", "ubuntu", "custom")
|
||||
}
|
||||
|
||||
func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
|
||||
gv, ok := e2evolume.(*glusterVolume)
|
||||
if !ok {
|
||||
framework.Failf("failed to cast test volume type %T to the Gluster test volume", e2evolume)
|
||||
}
|
||||
|
||||
name := gv.prefix + "-server"
|
||||
return &v1.VolumeSource{
|
||||
Glusterfs: &v1.GlusterfsVolumeSource{
|
||||
EndpointsName: name,
|
||||
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
|
||||
Path: "test_vol",
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||
gv, ok := e2evolume.(*glusterVolume)
|
||||
if !ok {
|
||||
framework.Failf("failed to cast test volume of type %T to the Gluster test volume", e2evolume)
|
||||
}
|
||||
|
||||
name := gv.prefix + "-server"
|
||||
return &v1.PersistentVolumeSource{
|
||||
Glusterfs: &v1.GlusterfsPersistentVolumeSource{
|
||||
EndpointsName: name,
|
||||
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
|
||||
Path: "test_vol",
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (g *glusterFSDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) {
|
||||
return &storageframework.PerTestConfig{
|
||||
Driver: g,
|
||||
Prefix: "gluster",
|
||||
Framework: f,
|
||||
}, func() {}
|
||||
}
|
||||
|
||||
func (g *glusterFSDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
|
||||
f := config.Framework
|
||||
cs := f.ClientSet
|
||||
ns := f.Namespace
|
||||
|
||||
c, serverPod, _ := e2evolume.NewGlusterfsServer(cs, ns.Name)
|
||||
config.ServerConfig = &c
|
||||
return &glusterVolume{
|
||||
prefix: config.Prefix,
|
||||
serverPod: serverPod,
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
func (v *glusterVolume) DeleteVolume() {
|
||||
f := v.f
|
||||
cs := f.ClientSet
|
||||
ns := f.Namespace
|
||||
|
||||
name := v.prefix + "-server"
|
||||
|
||||
nameSpaceName := fmt.Sprintf("%s/%s", ns.Name, name)
|
||||
|
||||
framework.Logf("Deleting Gluster endpoints %s...", nameSpaceName)
|
||||
err := cs.CoreV1().Endpoints(ns.Name).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
framework.Failf("Gluster deleting endpoint %s failed: %v", nameSpaceName, err)
|
||||
}
|
||||
framework.Logf("Gluster endpoints %q not found, assuming deleted", nameSpaceName)
|
||||
}
|
||||
|
||||
framework.Logf("Deleting Gluster service %s...", nameSpaceName)
|
||||
err = cs.CoreV1().Services(ns.Name).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
framework.Failf("Gluster deleting service %s failed: %v", nameSpaceName, err)
|
||||
}
|
||||
framework.Logf("Gluster service %q not found, assuming deleted", nameSpaceName)
|
||||
}
|
||||
|
||||
framework.Logf("Deleting Gluster server pod %q...", v.serverPod.Name)
|
||||
err = e2epod.DeletePodWithWait(cs, v.serverPod)
|
||||
if err != nil {
|
||||
framework.Failf("Gluster server pod delete failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// iSCSI
|
||||
// The iscsiadm utility and iscsi target kernel modules must be installed on all nodes.
|
||||
type iSCSIDriver struct {
|
||||
|
@@ -30,7 +30,6 @@ import (
|
||||
// List of testDrivers to be executed in below loop
|
||||
var testDrivers = []func() storageframework.TestDriver{
|
||||
drivers.InitNFSDriver,
|
||||
drivers.InitGlusterFSDriver,
|
||||
drivers.InitISCSIDriver,
|
||||
drivers.InitRbdDriver,
|
||||
drivers.InitCephFSDriver,
|
||||
|
@@ -19,7 +19,6 @@ package storage
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -51,7 +50,6 @@ import (
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
)
|
||||
|
||||
@@ -680,37 +678,6 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Describe("GlusterDynamicProvisioner", func() {
|
||||
ginkgo.It("should create and delete persistent volumes [fast]", func() {
|
||||
e2eskipper.SkipIfProviderIs("gke")
|
||||
ginkgo.By("creating a Gluster DP server Pod")
|
||||
pod := startGlusterDpServerPod(c, ns)
|
||||
serverURL := "http://" + net.JoinHostPort(pod.Status.PodIP, "8081")
|
||||
ginkgo.By("creating a StorageClass")
|
||||
test := testsuites.StorageClassTest{
|
||||
Client: c,
|
||||
Name: "Gluster Dynamic provisioner test",
|
||||
Provisioner: "kubernetes.io/glusterfs",
|
||||
Timeouts: f.Timeouts,
|
||||
ClaimSize: "2Gi",
|
||||
ExpectedSize: "2Gi",
|
||||
Parameters: map[string]string{"resturl": serverURL},
|
||||
}
|
||||
storageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, "glusterdptest"))
|
||||
defer clearStorageClass()
|
||||
test.Class = storageClass
|
||||
|
||||
ginkgo.By("creating a claim object with a suffix for gluster dynamic provisioner")
|
||||
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
||||
ClaimSize: test.ClaimSize,
|
||||
StorageClassName: &test.Class.Name,
|
||||
VolumeMode: &test.VolumeMode,
|
||||
}, ns)
|
||||
|
||||
test.TestDynamicProvisioning()
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Describe("Invalid AWS KMS key", func() {
|
||||
ginkgo.It("should report an error and create no PV", func() {
|
||||
e2eskipper.SkipUnlessProviderIs("aws")
|
||||
@@ -880,55 +847,6 @@ func getStorageClass(
|
||||
}
|
||||
}
|
||||
|
||||
func startGlusterDpServerPod(c clientset.Interface, ns string) *v1.Pod {
|
||||
podClient := c.CoreV1().Pods(ns)
|
||||
|
||||
provisionerPod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "glusterdynamic-provisioner-",
|
||||
},
|
||||
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "glusterdynamic-provisioner",
|
||||
Image: imageutils.GetE2EImage(imageutils.GlusterDynamicProvisioner),
|
||||
Args: []string{
|
||||
"-config=" + "/etc/heketi/heketi.json",
|
||||
},
|
||||
Ports: []v1.ContainerPort{
|
||||
{Name: "heketi", ContainerPort: 8081},
|
||||
},
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "POD_IP",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: "status.podIP",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
provisionerPod, err := podClient.Create(context.TODO(), provisionerPod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err)
|
||||
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, provisionerPod))
|
||||
|
||||
ginkgo.By("locating the provisioner pod")
|
||||
pod, err := podClient.Get(context.TODO(), provisionerPod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err)
|
||||
return pod
|
||||
}
|
||||
|
||||
// waitForProvisionedVolumesDelete is a polling wrapper to scan all PersistentVolumes for any associated to the test's
|
||||
// StorageClass. Returns either an error and nil values or the remaining PVs and their count.
|
||||
func waitForProvisionedVolumesDeleted(c clientset.Interface, scName string) ([]*v1.PersistentVolume, error) {
|
||||
|
@@ -1,4 +0,0 @@
|
||||
linux/amd64=fedora:36
|
||||
linux/arm64=arm64v8/fedora:36
|
||||
linux/ppc64le=ppc64le/fedora:36
|
||||
linux/s390x=s390x/fedora:36
|
@@ -1,28 +0,0 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARG BASEIMAGE
|
||||
FROM $BASEIMAGE
|
||||
|
||||
CROSS_BUILD_COPY qemu-QEMUARCH-static /usr/bin/
|
||||
|
||||
RUN yum -y install hostname glusterfs-server && yum clean all
|
||||
ADD glusterd.vol /etc/glusterfs/
|
||||
ADD run_gluster.sh /usr/local/bin/
|
||||
ADD index.html /vol/
|
||||
RUN chmod 644 /vol/index.html
|
||||
|
||||
EXPOSE 24007/tcp 49152/tcp
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/run_gluster.sh"]
|
@@ -1,6 +0,0 @@
|
||||
# Gluster server container for testing
|
||||
|
||||
This container exports test_vol volume with an index.html inside.
|
||||
|
||||
Used by test/e2e/* to test GlusterfsVolumeSource. Not for production use!
|
||||
|
@@ -1 +0,0 @@
|
||||
1.4
|
@@ -1,14 +0,0 @@
|
||||
# This is default glusterd.vol (incl. commented out base-port),
|
||||
# with added "rpc-auth-allow-insecure on" to allow connection
|
||||
# from non-privileged ports.
|
||||
|
||||
volume management
|
||||
type mgmt/glusterd
|
||||
option working-directory /var/lib/glusterd
|
||||
option transport-type socket,rdma
|
||||
option transport.socket.keepalive-time 10
|
||||
option transport.socket.keepalive-interval 2
|
||||
option transport.socket.read-fail-log off
|
||||
# option base-port 49152
|
||||
option rpc-auth-allow-insecure on
|
||||
end-volume
|
@@ -1 +0,0 @@
|
||||
Hello from GlusterFS!
|
@@ -1,46 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
DIR="$(mktemp -d)"
|
||||
|
||||
function start()
|
||||
{
|
||||
mount -t tmpfs test "$DIR"
|
||||
chmod 755 "$DIR"
|
||||
cp /vol/* "$DIR/"
|
||||
/usr/sbin/glusterd -p /run/glusterd.pid
|
||||
gluster volume create test_vol "$(hostname -i):$DIR" force
|
||||
gluster volume start test_vol
|
||||
}
|
||||
|
||||
function stop()
|
||||
{
|
||||
gluster --mode=script volume stop test_vol force
|
||||
kill "$(cat /run/glusterd.pid)"
|
||||
umount "$DIR"
|
||||
rm -rf "$DIR"
|
||||
exit 0
|
||||
}
|
||||
|
||||
|
||||
trap stop TERM
|
||||
|
||||
start "$@"
|
||||
|
||||
while true; do
|
||||
sleep 5
|
||||
done
|
||||
|
Reference in New Issue
Block a user