Move network plugin teardown to DockerManager.
This teardown is Docker-specific and will let us also do the setup in the manager in future cleanups.
This commit is contained in:
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
|
||||
kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/network"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
@@ -40,7 +41,8 @@ func newPod(uid, name string) *api.Pod {
|
||||
func createPodWorkers() (*podWorkers, map[types.UID][]string) {
|
||||
fakeDocker := &dockertools.FakeDockerClient{}
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
dockerManager := dockertools.NewDockerManager(fakeDocker, fakeRecorder, nil, nil, dockertools.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{})
|
||||
np, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
|
||||
dockerManager := dockertools.NewDockerManager(fakeDocker, fakeRecorder, nil, nil, dockertools.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np)
|
||||
fakeRuntimeCache := kubecontainer.NewFakeRuntimeCache(dockerManager)
|
||||
|
||||
lock := sync.Mutex{}
|
||||
|
Reference in New Issue
Block a user