From 5834c9337a62090155e55361590eaa0c614dc89a Mon Sep 17 00:00:00 2001 From: Pingan2017 Date: Tue, 8 Aug 2017 20:20:37 +0800 Subject: [PATCH 001/264] fix the typo of wtih --- pkg/quota/resources.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/quota/resources.go b/pkg/quota/resources.go index aaf56b72a60..8c3d3d30231 100644 --- a/pkg/quota/resources.go +++ b/pkg/quota/resources.go @@ -225,7 +225,7 @@ func CalculateUsage(namespaceName string, scopes []api.ResourceQuotaScope, hardL for _, evaluator := range evaluators { potentialResources = append(potentialResources, evaluator.MatchingResources(hardResources)...) } - // NOTE: the intersection just removes duplicates since the evaluator match intersects wtih hard + // NOTE: the intersection just removes duplicates since the evaluator match intersects with hard matchedResources := Intersection(hardResources, potentialResources) // sum the observed usage from each evaluator From c578542ad7fac65a1b6c8c020dd1b5a99b4fe76c Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Thu, 31 Aug 2017 12:13:59 +0200 Subject: [PATCH 002/264] git: Use VolumeHost.GetExec() to execute stuff in volume plugins This prepares volume plugins to run things in containers instead of running them on the host. --- pkg/volume/git_repo/BUILD | 5 +- pkg/volume/git_repo/git_repo.go | 22 ++-- pkg/volume/git_repo/git_repo_test.go | 182 +++++++++------------------ 3 files changed, 73 insertions(+), 136 deletions(-) diff --git a/pkg/volume/git_repo/BUILD b/pkg/volume/git_repo/BUILD index be57c22489e..d73f11b3fe1 100644 --- a/pkg/volume/git_repo/BUILD +++ b/pkg/volume/git_repo/BUILD @@ -13,12 +13,12 @@ go_library( "git_repo.go", ], deps = [ + "//pkg/util/mount:go_default_library", "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/utils/exec:go_default_library", ], ) @@ -27,14 +27,13 @@ go_test( srcs = ["git_repo_test.go"], library = ":go_default_library", deps = [ + "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/empty_dir:go_default_library", "//pkg/volume/testing:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/utils/exec:go_default_library", - "//vendor/k8s.io/utils/exec/testing:go_default_library", ], ) diff --git a/pkg/volume/git_repo/git_repo.go b/pkg/volume/git_repo/git_repo.go index 1948971e94e..779bdc5e93c 100644 --- a/pkg/volume/git_repo/git_repo.go +++ b/pkg/volume/git_repo/git_repo.go @@ -24,10 +24,10 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/util/mount" utilstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/utils/exec" ) // This is the primary entrypoint for volume plugins. @@ -100,7 +100,8 @@ func (plugin *gitRepoPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts vol source: spec.Volume.GitRepo.Repository, revision: spec.Volume.GitRepo.Revision, target: spec.Volume.GitRepo.Directory, - exec: exec.New(), + mounter: plugin.host.GetMounter(plugin.GetPluginName()), + exec: plugin.host.GetExec(plugin.GetPluginName()), opts: opts, }, nil } @@ -149,7 +150,8 @@ type gitRepoVolumeMounter struct { source string revision string target string - exec exec.Interface + mounter mount.Interface + exec mount.Exec opts volume.VolumeOptions } @@ -195,7 +197,7 @@ func (b *gitRepoVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { if len(b.target) != 0 { args = append(args, b.target) } - if output, err := b.execCommand("git", args, dir); err != nil { + if output, err := b.execGit(args, dir); err != nil { return fmt.Errorf("failed to exec 'git %s': %s: %v", strings.Join(args, " "), output, err) } @@ -225,10 +227,10 @@ func (b *gitRepoVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return fmt.Errorf("unexpected directory contents: %v", files) } - if output, err := b.execCommand("git", []string{"checkout", b.revision}, subdir); err != nil { + if output, err := b.execGit([]string{"checkout", b.revision}, subdir); err != nil { return fmt.Errorf("failed to exec 'git checkout %s': %s: %v", b.revision, output, err) } - if output, err := b.execCommand("git", []string{"reset", "--hard"}, subdir); err != nil { + if output, err := b.execGit([]string{"reset", "--hard"}, subdir); err != nil { return fmt.Errorf("failed to exec 'git reset --hard': %s: %v", output, err) } @@ -242,10 +244,10 @@ func (b *gitRepoVolumeMounter) getMetaDir() string { return path.Join(b.plugin.host.GetPodPluginDir(b.podUID, utilstrings.EscapeQualifiedNameForDisk(gitRepoPluginName)), b.volName) } -func (b *gitRepoVolumeMounter) execCommand(command string, args []string, dir string) ([]byte, error) { - cmd := b.exec.Command(command, args...) - cmd.SetDir(dir) - return cmd.CombinedOutput() +func (b *gitRepoVolumeMounter) execGit(args []string, dir string) ([]byte, error) { + // run git -C + fullArgs := append([]string{"-C", dir}, args...) + return b.exec.Run("git", fullArgs...) } // gitRepoVolumeUnmounter cleans git repo volumes. diff --git a/pkg/volume/git_repo/git_repo_test.go b/pkg/volume/git_repo/git_repo_test.go index 1bee3ad2185..4a4d857da72 100644 --- a/pkg/volume/git_repo/git_repo_test.go +++ b/pkg/volume/git_repo/git_repo_test.go @@ -28,11 +28,16 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/empty_dir" volumetest "k8s.io/kubernetes/pkg/volume/testing" - "k8s.io/utils/exec" - fakeexec "k8s.io/utils/exec/testing" +) + +const ( + gitUrl = "https://github.com/kubernetes/kubernetes.git" + revision = "2a30ce65c5ab586b98916d83385c5983edd353a1" + gitRepositoryName = "kubernetes" ) func newTestHost(t *testing.T) (string, volume.VolumeHost) { @@ -62,23 +67,18 @@ func TestCanSupport(t *testing.T) { } // Expected command -type expectedCommand struct { - // The git command - cmd []string - // The dir of git command is executed - dir string +type expectedCommand []string + +type testScenario struct { + name string + vol *v1.Volume + repositoryDir string + expecteds []expectedCommand + isExpectedFailure bool } func TestPlugin(t *testing.T) { - gitUrl := "https://github.com/kubernetes/kubernetes.git" - revision := "2a30ce65c5ab586b98916d83385c5983edd353a1" - - scenarios := []struct { - name string - vol *v1.Volume - expecteds []expectedCommand - isExpectedFailure bool - }{ + scenarios := []testScenario{ { name: "target-dir", vol: &v1.Volume{ @@ -91,19 +91,11 @@ func TestPlugin(t *testing.T) { }, }, }, + repositoryDir: "target_dir", expecteds: []expectedCommand{ - { - cmd: []string{"git", "clone", gitUrl, "target_dir"}, - dir: "", - }, - { - cmd: []string{"git", "checkout", revision}, - dir: "/target_dir", - }, - { - cmd: []string{"git", "reset", "--hard"}, - dir: "/target_dir", - }, + []string{"git", "-C", "volume-dir", "clone", gitUrl, "target_dir"}, + []string{"git", "-C", "volume-dir/target_dir", "checkout", revision}, + []string{"git", "-C", "volume-dir/target_dir", "reset", "--hard"}, }, isExpectedFailure: false, }, @@ -118,11 +110,9 @@ func TestPlugin(t *testing.T) { }, }, }, + repositoryDir: "target_dir", expecteds: []expectedCommand{ - { - cmd: []string{"git", "clone", gitUrl, "target_dir"}, - dir: "", - }, + []string{"git", "-C", "volume-dir", "clone", gitUrl, "target_dir"}, }, isExpectedFailure: false, }, @@ -136,11 +126,9 @@ func TestPlugin(t *testing.T) { }, }, }, + repositoryDir: "kubernetes", expecteds: []expectedCommand{ - { - cmd: []string{"git", "clone", gitUrl}, - dir: "", - }, + []string{"git", "-C", "volume-dir", "clone", gitUrl}, }, isExpectedFailure: false, }, @@ -156,19 +144,11 @@ func TestPlugin(t *testing.T) { }, }, }, + repositoryDir: "kubernetes", expecteds: []expectedCommand{ - { - cmd: []string{"git", "clone", gitUrl}, - dir: "", - }, - { - cmd: []string{"git", "checkout", revision}, - dir: "/kubernetes", - }, - { - cmd: []string{"git", "reset", "--hard"}, - dir: "/kubernetes", - }, + []string{"git", "-C", "volume-dir", "clone", gitUrl}, + []string{"git", "-C", "volume-dir/kubernetes", "checkout", revision}, + []string{"git", "-C", "volume-dir/kubernetes", "reset", "--hard"}, }, isExpectedFailure: false, }, @@ -184,19 +164,11 @@ func TestPlugin(t *testing.T) { }, }, }, + repositoryDir: "", expecteds: []expectedCommand{ - { - cmd: []string{"git", "clone", gitUrl, "."}, - dir: "", - }, - { - cmd: []string{"git", "checkout", revision}, - dir: "", - }, - { - cmd: []string{"git", "reset", "--hard"}, - dir: "", - }, + []string{"git", "-C", "volume-dir", "clone", gitUrl, "."}, + []string{"git", "-C", "volume-dir", "checkout", revision}, + []string{"git", "-C", "volume-dir", "reset", "--hard"}, }, isExpectedFailure: false, }, @@ -214,12 +186,7 @@ func TestPlugin(t *testing.T) { } -func doTestPlugin(scenario struct { - name string - vol *v1.Volume - expecteds []expectedCommand - isExpectedFailure bool -}, t *testing.T) []error { +func doTestPlugin(scenario testScenario, t *testing.T) []error { allErrs := []error{} plugMgr := volume.VolumePluginMgr{} @@ -311,73 +278,42 @@ func doTestPlugin(scenario struct { return allErrs } -func doTestSetUp(scenario struct { - name string - vol *v1.Volume - expecteds []expectedCommand - isExpectedFailure bool -}, mounter volume.Mounter) []error { +func doTestSetUp(scenario testScenario, mounter volume.Mounter) []error { expecteds := scenario.expecteds allErrs := []error{} - // Construct combined outputs from expected commands - var fakeOutputs []fakeexec.FakeCombinedOutputAction - var fcmd fakeexec.FakeCmd - for _, expected := range expecteds { - if expected.cmd[1] == "clone" { - fakeOutputs = append(fakeOutputs, func() ([]byte, error) { - // git clone, it creates new dir/files - os.MkdirAll(path.Join(fcmd.Dirs[0], expected.dir), 0750) - return []byte{}, nil - }) - } else { - // git checkout || git reset, they create nothing - fakeOutputs = append(fakeOutputs, func() ([]byte, error) { - return []byte{}, nil - }) + var commandLog []expectedCommand + execCallback := func(cmd string, args ...string) ([]byte, error) { + if len(args) < 2 { + return nil, fmt.Errorf("expected at least 2 arguments, got %q", args) } + if args[0] != "-C" { + return nil, fmt.Errorf("expected the first argument to be \"-C\", got %q", args[0]) + } + // command is 'git -C + gitDir := args[1] + gitCommand := args[2] + if gitCommand == "clone" { + // Clone creates a directory + if scenario.repositoryDir != "" { + os.MkdirAll(path.Join(gitDir, scenario.repositoryDir), 0750) + } + } + // add the command to log with de-randomized gitDir + args[1] = strings.Replace(gitDir, mounter.GetPath(), "volume-dir", 1) + cmdline := append([]string{cmd}, args...) + commandLog = append(commandLog, cmdline) + return []byte{}, nil } - fcmd = fakeexec.FakeCmd{ - CombinedOutputScript: fakeOutputs, - } - - // Construct fake exec outputs from fcmd - var fakeAction []fakeexec.FakeCommandAction - for i := 0; i < len(expecteds); i++ { - fakeAction = append(fakeAction, func(cmd string, args ...string) exec.Cmd { - return fakeexec.InitFakeCmd(&fcmd, cmd, args...) - }) - - } - fake := fakeexec.FakeExec{ - CommandScript: fakeAction, - } - g := mounter.(*gitRepoVolumeMounter) - g.exec = &fake + g.mounter = &mount.FakeMounter{} + g.exec = mount.NewFakeExec(execCallback) g.SetUp(nil) - if fake.CommandCalls != len(expecteds) { + if !reflect.DeepEqual(expecteds, commandLog) { allErrs = append(allErrs, - fmt.Errorf("unexpected command calls in scenario: expected %d, saw: %d", len(expecteds), fake.CommandCalls)) - } - var expectedCmds [][]string - for _, expected := range expecteds { - expectedCmds = append(expectedCmds, expected.cmd) - } - if !reflect.DeepEqual(expectedCmds, fcmd.CombinedOutputLog) { - allErrs = append(allErrs, - fmt.Errorf("unexpected commands: %v, expected: %v", fcmd.CombinedOutputLog, expectedCmds)) - } - - var expectedPaths []string - for _, expected := range expecteds { - expectedPaths = append(expectedPaths, g.GetPath()+expected.dir) - } - if len(fcmd.Dirs) != len(expectedPaths) || !reflect.DeepEqual(expectedPaths, fcmd.Dirs) { - allErrs = append(allErrs, - fmt.Errorf("unexpected directories: %v, expected: %v", fcmd.Dirs, expectedPaths)) + fmt.Errorf("unexpected commands: %v, expected: %v", commandLog, expecteds)) } return allErrs From b4afb09ab91573aff59c9447064cbd3f2062eae5 Mon Sep 17 00:00:00 2001 From: Ma Shimiao Date: Tue, 12 Sep 2017 11:17:50 +0800 Subject: [PATCH 003/264] small tfix in cmd factory comment Signed-off-by: Ma Shimiao --- pkg/kubectl/cmd/util/factory.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index 992916dc5e4..a3f0c45bb39 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -69,10 +69,10 @@ var ( // Factory provides abstractions that allow the Kubectl command to be extended across multiple types // of resources and different API sets. -// The rings are here for a reason. In order for composers to be able to provide alternative factory implementations +// The rings are here for a reason. In order for composers to be able to provide alternative factory implementations // they need to provide low level pieces of *certain* functions so that when the factory calls back into itself -// it uses the custom version of the function. Rather than try to enumerate everything that someone would want to override -// we split the factory into rings, where each ring can depend on methods an earlier ring, but cannot depend +// it uses the custom version of the function. Rather than try to enumerate everything that someone would want to override +// we split the factory into rings, where each ring can depend on methods in an earlier ring, but cannot depend // upon peer methods in its own ring. // TODO: make the functions interfaces // TODO: pass the various interfaces on the factory directly into the command constructors (so the @@ -198,7 +198,7 @@ type ClientAccessFactory interface { PrintObjectSpecificMessage(obj runtime.Object, out io.Writer) } -// ObjectMappingFactory holds the second level of factory methods. These functions depend upon ClientAccessFactory methods. +// ObjectMappingFactory holds the second level of factory methods. These functions depend upon ClientAccessFactory methods. // Generally they provide object typing and functions that build requests based on the negotiated clients. type ObjectMappingFactory interface { // Returns interfaces for dealing with arbitrary runtime.Objects. @@ -240,7 +240,7 @@ type ObjectMappingFactory interface { OpenAPISchema() (openapi.Resources, error) } -// BuilderFactory holds the second level of factory methods. These functions depend upon ObjectMappingFactory and ClientAccessFactory methods. +// BuilderFactory holds the third level of factory methods. These functions depend upon ObjectMappingFactory and ClientAccessFactory methods. // Generally they depend upon client mapper functions type BuilderFactory interface { // PrinterForCommand returns the default printer for the command. It requires that certain options From 75cc26fb65ad92a604160f5cef22e852c8024803 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Thu, 19 Oct 2017 14:29:58 +0200 Subject: [PATCH 004/264] Allow Ceph server some time to start Ceph server needs to create our "foo" volume on startup. It keeps the image small, however it makes the server container start slow. Add sleep before the server is usable. Without this PR, all pods that use Ceph fail to start for couple of seconds with cryptic "image foo not found" error and it clutters logs and pod logs and makes it harder to spot real errors. --- test/e2e/framework/volume_util.go | 11 +++++++++++ test/e2e/storage/volumes.go | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/test/e2e/framework/volume_util.go b/test/e2e/framework/volume_util.go index 84214a15ab4..08d392b3a2b 100644 --- a/test/e2e/framework/volume_util.go +++ b/test/e2e/framework/volume_util.go @@ -72,6 +72,9 @@ const ( MiB int64 = 1024 * KiB GiB int64 = 1024 * MiB TiB int64 = 1024 * GiB + + // Waiting period for volume server (Ceph, ...) to initialize itself. + VolumeServerPodStartupSleep = 20 * time.Second ) // Configuration of one tests. The test consist of: @@ -196,6 +199,14 @@ func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestCo }, } pod, ip = CreateStorageServer(cs, config) + + // Ceph server container needs some time to start. Tests continue working if + // this sleep is removed, however kubelet logs (and kubectl describe + // ) would be cluttered with error messages about non-existing + // image. + Logf("sleeping a bit to give ceph server time to initialize") + time.Sleep(VolumeServerPodStartupSleep) + return config, pod, ip } diff --git a/test/e2e/storage/volumes.go b/test/e2e/storage/volumes.go index f3bfc0f2be4..c8b505fb360 100644 --- a/test/e2e/storage/volumes.go +++ b/test/e2e/storage/volumes.go @@ -288,7 +288,7 @@ var _ = SIGDescribe("Volumes", func() { }() _, serverIP := framework.CreateStorageServer(cs, config) By("sleeping a bit to give ceph server time to initialize") - time.Sleep(20 * time.Second) + time.Sleep(framework.VolumeServerPodStartupSleep) // create ceph secret secret := &v1.Secret{ From 10751e54e6f01ed771a8fb75abd90346ee4ab501 Mon Sep 17 00:00:00 2001 From: YuxiJin-tobeyjin Date: Wed, 25 Oct 2017 16:20:32 +0800 Subject: [PATCH 005/264] Should use Fatalf while need to format the output --- staging/src/k8s.io/apiserver/pkg/storage/etcd3/store_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store_test.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store_test.go index d7c7f0f10d3..dbf97feeb4e 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store_test.go @@ -1161,7 +1161,7 @@ func testPropogateStore(ctx context.Context, t *testing.T, store *store, obj *ex key := "/testkey" err := store.unconditionalDelete(ctx, key, &example.Pod{}) if err != nil && !storage.IsNotFound(err) { - t.Fatal("Cleanup failed: %v", err) + t.Fatalf("Cleanup failed: %v", err) } setOutput := &example.Pod{} if err := store.Create(ctx, key, obj, setOutput, 0); err != nil { From f6af1904cd0e017c6181c23d41e1281fd4a9b198 Mon Sep 17 00:00:00 2001 From: Tomas Nozicka Date: Thu, 9 Nov 2017 12:23:37 +0100 Subject: [PATCH 006/264] Make StatefulSet report an event when recreating failed pod --- pkg/controller/statefulset/stateful_set.go | 1 + pkg/controller/statefulset/stateful_set_control.go | 10 +++++++--- .../statefulset/stateful_set_control_test.go | 7 +++++-- pkg/controller/statefulset/stateful_set_test.go | 4 +++- 4 files changed, 16 insertions(+), 6 deletions(-) diff --git a/pkg/controller/statefulset/stateful_set.go b/pkg/controller/statefulset/stateful_set.go index e9db4c10b44..d1cb293abe8 100644 --- a/pkg/controller/statefulset/stateful_set.go +++ b/pkg/controller/statefulset/stateful_set.go @@ -99,6 +99,7 @@ func NewStatefulSetController( recorder), NewRealStatefulSetStatusUpdater(kubeClient, setInformer.Lister()), history.NewHistory(kubeClient, revInformer.Lister()), + recorder, ), pvcListerSynced: pvcInformer.Informer().HasSynced, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "statefulset"), diff --git a/pkg/controller/statefulset/stateful_set_control.go b/pkg/controller/statefulset/stateful_set_control.go index 5102808c447..bddf1d6e55f 100644 --- a/pkg/controller/statefulset/stateful_set_control.go +++ b/pkg/controller/statefulset/stateful_set_control.go @@ -25,6 +25,7 @@ import ( apps "k8s.io/api/apps/v1beta1" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/controller/history" ) @@ -53,14 +54,16 @@ type StatefulSetControlInterface interface { func NewDefaultStatefulSetControl( podControl StatefulPodControlInterface, statusUpdater StatefulSetStatusUpdaterInterface, - controllerHistory history.Interface) StatefulSetControlInterface { - return &defaultStatefulSetControl{podControl, statusUpdater, controllerHistory} + controllerHistory history.Interface, + recorder record.EventRecorder) StatefulSetControlInterface { + return &defaultStatefulSetControl{podControl, statusUpdater, controllerHistory, recorder} } type defaultStatefulSetControl struct { podControl StatefulPodControlInterface statusUpdater StatefulSetStatusUpdaterInterface controllerHistory history.Interface + recorder record.EventRecorder } // UpdateStatefulSet executes the core logic loop for a stateful set, applying the predictable and @@ -367,7 +370,8 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet( for i := range replicas { // delete and recreate failed pods if isFailed(replicas[i]) { - glog.V(4).Infof("StatefulSet %s/%s is recreating failed Pod %s", + ssc.recorder.Eventf(set, v1.EventTypeWarning, "RecreatingFailedPod", + "StatefulSet %s/%s is recreating failed Pod %s", set.Namespace, set.Name, replicas[i].Name) diff --git a/pkg/controller/statefulset/stateful_set_control_test.go b/pkg/controller/statefulset/stateful_set_control_test.go index a18995d5059..12d926b15eb 100644 --- a/pkg/controller/statefulset/stateful_set_control_test.go +++ b/pkg/controller/statefulset/stateful_set_control_test.go @@ -41,6 +41,7 @@ import ( appslisters "k8s.io/client-go/listers/apps/v1beta1" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/history" @@ -52,7 +53,8 @@ func setupController(client clientset.Interface) (*fakeStatefulPodControl, *fake informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) spc := newFakeStatefulPodControl(informerFactory.Core().V1().Pods(), informerFactory.Apps().V1beta1().StatefulSets()) ssu := newFakeStatefulSetStatusUpdater(informerFactory.Apps().V1beta1().StatefulSets()) - ssc := NewDefaultStatefulSetControl(spc, ssu, history.NewFakeHistory(informerFactory.Apps().V1beta1().ControllerRevisions())) + recorder := record.NewFakeRecorder(10) + ssc := NewDefaultStatefulSetControl(spc, ssu, history.NewFakeHistory(informerFactory.Apps().V1beta1().ControllerRevisions()), recorder) stop := make(chan struct{}) informerFactory.Start(stop) @@ -452,7 +454,8 @@ func TestStatefulSetControl_getSetRevisions(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) spc := newFakeStatefulPodControl(informerFactory.Core().V1().Pods(), informerFactory.Apps().V1beta1().StatefulSets()) ssu := newFakeStatefulSetStatusUpdater(informerFactory.Apps().V1beta1().StatefulSets()) - ssc := defaultStatefulSetControl{spc, ssu, history.NewFakeHistory(informerFactory.Apps().V1beta1().ControllerRevisions())} + recorder := record.NewFakeRecorder(10) + ssc := defaultStatefulSetControl{spc, ssu, history.NewFakeHistory(informerFactory.Apps().V1beta1().ControllerRevisions()), recorder} stop := make(chan struct{}) defer close(stop) diff --git a/pkg/controller/statefulset/stateful_set_test.go b/pkg/controller/statefulset/stateful_set_test.go index f43d7b70145..632f799c531 100644 --- a/pkg/controller/statefulset/stateful_set_test.go +++ b/pkg/controller/statefulset/stateful_set_test.go @@ -28,6 +28,7 @@ import ( "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/history" ) @@ -585,7 +586,8 @@ func newFakeStatefulSetController(initialObjects ...runtime.Object) (*StatefulSe ssh := history.NewFakeHistory(informerFactory.Apps().V1beta1().ControllerRevisions()) ssc.podListerSynced = alwaysReady ssc.setListerSynced = alwaysReady - ssc.control = NewDefaultStatefulSetControl(fpc, ssu, ssh) + recorder := record.NewFakeRecorder(10) + ssc.control = NewDefaultStatefulSetControl(fpc, ssu, ssh, recorder) return ssc, fpc } From 0fab7c1bec589537fba9f69ae1364474c911b9f5 Mon Sep 17 00:00:00 2001 From: zhengjiajin Date: Tue, 14 Nov 2017 11:45:14 +0800 Subject: [PATCH 007/264] bug(cli):fix kubectl rollout status not recoginze resource namespace --- pkg/kubectl/cmd/rollout/rollout_status.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/kubectl/cmd/rollout/rollout_status.go b/pkg/kubectl/cmd/rollout/rollout_status.go index d817a81a6d1..52340f6ca6d 100644 --- a/pkg/kubectl/cmd/rollout/rollout_status.go +++ b/pkg/kubectl/cmd/rollout/rollout_status.go @@ -124,7 +124,7 @@ func RunStatus(f cmdutil.Factory, cmd *cobra.Command, out io.Writer, args []stri } // check if deployment's has finished the rollout - status, done, err := statusViewer.Status(cmdNamespace, info.Name, revision) + status, done, err := statusViewer.Status(info.Namespace, info.Name, revision) if err != nil { return err } @@ -149,7 +149,7 @@ func RunStatus(f cmdutil.Factory, cmd *cobra.Command, out io.Writer, args []stri return intr.Run(func() error { _, err := watch.Until(0, w, func(e watch.Event) (bool, error) { // print deployment's status - status, done, err := statusViewer.Status(cmdNamespace, info.Name, revision) + status, done, err := statusViewer.Status(info.Namespace, info.Name, revision) if err != nil { return false, err } From da33d6f34f9424095e16d4881aa7f96fce5f8c66 Mon Sep 17 00:00:00 2001 From: Klaus Ma Date: Mon, 31 Jul 2017 21:33:52 +0800 Subject: [PATCH 008/264] Added nodeAffinity in validation error msg. --- pkg/apis/core/validation/validation.go | 31 ++++++++++++--------- pkg/apis/core/validation/validation_test.go | 6 ++-- 2 files changed, 21 insertions(+), 16 deletions(-) diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index 8fe283ddfeb..8f124caf0ae 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -2323,19 +2323,8 @@ func validateAffinity(affinity *core.Affinity, fldPath *field.Path) field.ErrorL allErrs := field.ErrorList{} if affinity != nil { - if na := affinity.NodeAffinity; na != nil { - // TODO: Uncomment the next three lines once RequiredDuringSchedulingRequiredDuringExecution is implemented. - // if na.RequiredDuringSchedulingRequiredDuringExecution != nil { - // allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) - // } - - if na.RequiredDuringSchedulingIgnoredDuringExecution != nil { - allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) - } - - if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) - } + if affinity.NodeAffinity != nil { + allErrs = append(allErrs, validateNodeAffinity(affinity.NodeAffinity, fldPath.Child("nodeAffinity"))...) } if affinity.PodAffinity != nil { allErrs = append(allErrs, validatePodAffinity(affinity.PodAffinity, fldPath.Child("podAffinity"))...) @@ -2751,6 +2740,22 @@ func validatePodAntiAffinity(podAntiAffinity *core.PodAntiAffinity, fldPath *fie return allErrs } +// validateNodeAffinity tests that the specified nodeAffinity fields have valid data +func validateNodeAffinity(na *core.NodeAffinity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // TODO: Uncomment the next three lines once RequiredDuringSchedulingRequiredDuringExecution is implemented. + // if na.RequiredDuringSchedulingRequiredDuringExecution != nil { + // allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) + // } + if na.RequiredDuringSchedulingIgnoredDuringExecution != nil { + allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) + } + if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) + } + return allErrs +} + // validatePodAffinity tests that the specified podAffinity fields have valid data func validatePodAffinity(podAffinity *core.PodAffinity, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} diff --git a/pkg/apis/core/validation/validation_test.go b/pkg/apis/core/validation/validation_test.go index 3a6121715ce..01c08591044 100644 --- a/pkg/apis/core/validation/validation_test.go +++ b/pkg/apis/core/validation/validation_test.go @@ -5449,7 +5449,7 @@ func TestValidatePod(t *testing.T) { }, }, "invalid node selector requirement in node affinity, operator can't be null": { - expectedError: "spec.affinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator", + expectedError: "spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator", spec: core.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "123", @@ -5500,7 +5500,7 @@ func TestValidatePod(t *testing.T) { }, }, "invalid requiredDuringSchedulingIgnoredDuringExecution node selector, nodeSelectorTerms must have at least one term": { - expectedError: "spec.affinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms", + expectedError: "spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms", spec: core.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "123", @@ -5516,7 +5516,7 @@ func TestValidatePod(t *testing.T) { }, }, "invalid requiredDuringSchedulingIgnoredDuringExecution node selector term, matchExpressions must have at least one node selector requirement": { - expectedError: "spec.affinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions", + expectedError: "spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions", spec: core.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "123", From 6b05ac10f07b9203b5aac5cdbcd50a1162a757d7 Mon Sep 17 00:00:00 2001 From: Connor Doyle Date: Wed, 22 Nov 2017 10:01:14 -0800 Subject: [PATCH 009/264] Add balajismaniam, ConnorDoyle node-e2e approvers --- test/e2e_node/OWNERS | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/e2e_node/OWNERS b/test/e2e_node/OWNERS index 1827549d6e0..c625c03e736 100644 --- a/test/e2e_node/OWNERS +++ b/test/e2e_node/OWNERS @@ -4,5 +4,7 @@ approvers: - vishh - derekwaynecarr - yujuhong +- balajismaniam +- ConnorDoyle reviewers: - sig-node-reviewers From 2e177ef9b28dab0b6eccc5d673d6ab32fdeddf46 Mon Sep 17 00:00:00 2001 From: Fabian Ruff Date: Wed, 22 Nov 2017 23:44:07 +0100 Subject: [PATCH 010/264] return routes for unknown next hops --- pkg/cloudprovider/providers/openstack/openstack_routes.go | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/pkg/cloudprovider/providers/openstack/openstack_routes.go b/pkg/cloudprovider/providers/openstack/openstack_routes.go index c5f0974dadd..a73001b0086 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_routes.go +++ b/pkg/cloudprovider/providers/openstack/openstack_routes.go @@ -77,15 +77,9 @@ func (r *Routes) ListRoutes(clusterName string) ([]*cloudprovider.Route, error) var routes []*cloudprovider.Route for _, item := range router.Routes { - nodeName, ok := nodeNamesByAddr[item.NextHop] - if !ok { - // Not one of our routes? - glog.V(4).Infof("Skipping route with unknown nexthop %v", item.NextHop) - continue - } route := cloudprovider.Route{ Name: item.DestinationCIDR, - TargetNode: nodeName, + TargetNode: nodeNamesByAddr[item.NextHop], //empty if NextHop is unknown DestinationCIDR: item.DestinationCIDR, } routes = append(routes, &route) From f42f79edb073798e1f40ad9867653087e66a842c Mon Sep 17 00:00:00 2001 From: Shiyang Wang Date: Fri, 24 Nov 2017 18:06:20 +0800 Subject: [PATCH 011/264] fix spaces around the / --- pkg/printers/internalversion/printers.go | 8 ++++---- pkg/printers/internalversion/printers_test.go | 18 +++++++++--------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/pkg/printers/internalversion/printers.go b/pkg/printers/internalversion/printers.go index 2c52c5e22fe..9811a64f348 100644 --- a/pkg/printers/internalversion/printers.go +++ b/pkg/printers/internalversion/printers.go @@ -1483,20 +1483,20 @@ func formatHPAMetrics(specs []autoscaling.MetricSpec, statuses []autoscaling.Met if len(statuses) > i && statuses[i].Pods != nil { current = statuses[i].Pods.CurrentAverageValue.String() } - list = append(list, fmt.Sprintf("%s / %s", current, spec.Pods.TargetAverageValue.String())) + list = append(list, fmt.Sprintf("%s/%s", current, spec.Pods.TargetAverageValue.String())) case autoscaling.ObjectMetricSourceType: current := "" if len(statuses) > i && statuses[i].Object != nil { current = statuses[i].Object.CurrentValue.String() } - list = append(list, fmt.Sprintf("%s / %s", current, spec.Object.TargetValue.String())) + list = append(list, fmt.Sprintf("%s/%s", current, spec.Object.TargetValue.String())) case autoscaling.ResourceMetricSourceType: if spec.Resource.TargetAverageValue != nil { current := "" if len(statuses) > i && statuses[i].Resource != nil { current = statuses[i].Resource.CurrentAverageValue.String() } - list = append(list, fmt.Sprintf("%s / %s", current, spec.Resource.TargetAverageValue.String())) + list = append(list, fmt.Sprintf("%s/%s", current, spec.Resource.TargetAverageValue.String())) } else { current := "" if len(statuses) > i && statuses[i].Resource != nil && statuses[i].Resource.CurrentAverageUtilization != nil { @@ -1507,7 +1507,7 @@ func formatHPAMetrics(specs []autoscaling.MetricSpec, statuses []autoscaling.Met if spec.Resource.TargetAverageUtilization != nil { target = fmt.Sprintf("%d%%", *spec.Resource.TargetAverageUtilization) } - list = append(list, fmt.Sprintf("%s / %s", current, target)) + list = append(list, fmt.Sprintf("%s/%s", current, target)) } default: list = append(list, "") diff --git a/pkg/printers/internalversion/printers_test.go b/pkg/printers/internalversion/printers_test.go index 8662c91fc37..f20c4191815 100644 --- a/pkg/printers/internalversion/printers_test.go +++ b/pkg/printers/internalversion/printers_test.go @@ -2182,7 +2182,7 @@ func TestPrintHPA(t *testing.T) { DesiredReplicas: 5, }, }, - "some-hpa\tReplicationController/some-rc\t / 100m\t2\t10\t4\t\n", + "some-hpa\tReplicationController/some-rc\t/100m\t2\t10\t4\t\n", }, // pods source type { @@ -2219,7 +2219,7 @@ func TestPrintHPA(t *testing.T) { }, }, }, - "some-hpa\tReplicationController/some-rc\t50m / 100m\t2\t10\t4\t\n", + "some-hpa\tReplicationController/some-rc\t50m/100m\t2\t10\t4\t\n", }, // object source type (no current) { @@ -2251,7 +2251,7 @@ func TestPrintHPA(t *testing.T) { DesiredReplicas: 5, }, }, - "some-hpa\tReplicationController/some-rc\t / 100m\t2\t10\t4\t\n", + "some-hpa\tReplicationController/some-rc\t/100m\t2\t10\t4\t\n", }, // object source type { @@ -2296,7 +2296,7 @@ func TestPrintHPA(t *testing.T) { }, }, }, - "some-hpa\tReplicationController/some-rc\t50m / 100m\t2\t10\t4\t\n", + "some-hpa\tReplicationController/some-rc\t50m/100m\t2\t10\t4\t\n", }, // resource source type, targetVal (no current) { @@ -2324,7 +2324,7 @@ func TestPrintHPA(t *testing.T) { DesiredReplicas: 5, }, }, - "some-hpa\tReplicationController/some-rc\t / 100m\t2\t10\t4\t\n", + "some-hpa\tReplicationController/some-rc\t/100m\t2\t10\t4\t\n", }, // resource source type, targetVal { @@ -2361,7 +2361,7 @@ func TestPrintHPA(t *testing.T) { }, }, }, - "some-hpa\tReplicationController/some-rc\t50m / 100m\t2\t10\t4\t\n", + "some-hpa\tReplicationController/some-rc\t50m/100m\t2\t10\t4\t\n", }, // resource source type, targetUtil (no current) { @@ -2389,7 +2389,7 @@ func TestPrintHPA(t *testing.T) { DesiredReplicas: 5, }, }, - "some-hpa\tReplicationController/some-rc\t / 80%\t2\t10\t4\t\n", + "some-hpa\tReplicationController/some-rc\t/80%\t2\t10\t4\t\n", }, // resource source type, targetUtil { @@ -2427,7 +2427,7 @@ func TestPrintHPA(t *testing.T) { }, }, }, - "some-hpa\tReplicationController/some-rc\t50% / 80%\t2\t10\t4\t\n", + "some-hpa\tReplicationController/some-rc\t50%/80%\t2\t10\t4\t\n", }, // multiple specs { @@ -2486,7 +2486,7 @@ func TestPrintHPA(t *testing.T) { }, }, }, - "some-hpa\tReplicationController/some-rc\t50m / 100m, 50% / 80% + 1 more...\t2\t10\t4\t\n", + "some-hpa\tReplicationController/some-rc\t50m/100m, 50%/80% + 1 more...\t2\t10\t4\t\n", }, } From 51a367f8061f638354023474e7d31eb34dc99ac7 Mon Sep 17 00:00:00 2001 From: Fabian Ruff Date: Wed, 29 Nov 2017 16:24:00 +0100 Subject: [PATCH 012/264] relax server list option, set Blackhole field --- pkg/cloudprovider/providers/openstack/openstack_routes.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/cloudprovider/providers/openstack/openstack_routes.go b/pkg/cloudprovider/providers/openstack/openstack_routes.go index a73001b0086..7b2796799c7 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_routes.go +++ b/pkg/cloudprovider/providers/openstack/openstack_routes.go @@ -53,7 +53,7 @@ func (r *Routes) ListRoutes(clusterName string) ([]*cloudprovider.Route, error) glog.V(4).Infof("ListRoutes(%v)", clusterName) nodeNamesByAddr := make(map[string]types.NodeName) - err := foreachServer(r.compute, servers.ListOpts{Status: "ACTIVE"}, func(srv *servers.Server) (bool, error) { + err := foreachServer(r.compute, servers.ListOpts{}, func(srv *servers.Server) (bool, error) { addrs, err := nodeAddresses(srv) if err != nil { return false, err @@ -77,9 +77,11 @@ func (r *Routes) ListRoutes(clusterName string) ([]*cloudprovider.Route, error) var routes []*cloudprovider.Route for _, item := range router.Routes { + nodeName, foundNode := nodeNamesByAddr[item.NextHop] route := cloudprovider.Route{ Name: item.DestinationCIDR, - TargetNode: nodeNamesByAddr[item.NextHop], //empty if NextHop is unknown + TargetNode: nodeName, //empty if NextHop is unknown + Blackhole: !foundNode, DestinationCIDR: item.DestinationCIDR, } routes = append(routes, &route) From 623d7c42ac2b0c24a609c48fa83905f4f36360b6 Mon Sep 17 00:00:00 2001 From: dhilipkumars Date: Sat, 2 Dec 2017 12:47:31 +0530 Subject: [PATCH 013/264] Move some tests to use go sub-test --- pkg/controller/deployment/progress_test.go | 70 ++++++------ pkg/controller/deployment/recreate_test.go | 8 +- pkg/controller/deployment/sync_test.go | 122 +++++++++++---------- 3 files changed, 104 insertions(+), 96 deletions(-) diff --git a/pkg/controller/deployment/progress_test.go b/pkg/controller/deployment/progress_test.go index 9677728e7d0..978b21469fb 100644 --- a/pkg/controller/deployment/progress_test.go +++ b/pkg/controller/deployment/progress_test.go @@ -163,13 +163,15 @@ func TestRequeueStuckDeployment(t *testing.T) { dc.enqueueDeployment = dc.enqueue for _, test := range tests { - if test.nowFn != nil { - nowFn = test.nowFn - } - got := dc.requeueStuckDeployment(test.d, test.status) - if got != test.expected { - t.Errorf("%s: got duration: %v, expected duration: %v", test.name, got, test.expected) - } + t.Run(test.name, func(t *testing.T) { + if test.nowFn != nil { + nowFn = test.nowFn + } + got := dc.requeueStuckDeployment(test.d, test.status) + if got != test.expected { + t.Errorf("%s: got duration: %v, expected duration: %v", test.name, got, test.expected) + } + }) } } @@ -310,32 +312,34 @@ func TestSyncRolloutStatus(t *testing.T) { } for _, test := range tests { - fake := fake.Clientset{} - dc := &DeploymentController{ - client: &fake, - } - - if test.newRS != nil { - test.allRSs = append(test.allRSs, test.newRS) - } - - err := dc.syncRolloutStatus(test.allRSs, test.newRS, test.d) - if err != nil { - t.Error(err) - } - - newCond := util.GetDeploymentCondition(test.d.Status, test.conditionType) - switch { - case newCond == nil: - if test.d.Spec.ProgressDeadlineSeconds != nil { - t.Errorf("%s: expected deployment condition: %s", test.name, test.conditionType) + t.Run(test.name, func(t *testing.T) { + fake := fake.Clientset{} + dc := &DeploymentController{ + client: &fake, } - case newCond.Status != test.conditionStatus || newCond.Reason != test.conditionReason: - t.Errorf("%s: DeploymentProgressing has status %s with reason %s. Expected %s with %s.", test.name, newCond.Status, newCond.Reason, test.conditionStatus, test.conditionReason) - case !test.lastUpdate.IsZero() && test.lastUpdate != testTime: - t.Errorf("%s: LastUpdateTime was changed to %s but expected %s;", test.name, test.lastUpdate, testTime) - case !test.lastTransition.IsZero() && test.lastTransition != testTime: - t.Errorf("%s: LastTransitionTime was changed to %s but expected %s;", test.name, test.lastTransition, testTime) - } + + if test.newRS != nil { + test.allRSs = append(test.allRSs, test.newRS) + } + + err := dc.syncRolloutStatus(test.allRSs, test.newRS, test.d) + if err != nil { + t.Error(err) + } + + newCond := util.GetDeploymentCondition(test.d.Status, test.conditionType) + switch { + case newCond == nil: + if test.d.Spec.ProgressDeadlineSeconds != nil { + t.Errorf("%s: expected deployment condition: %s", test.name, test.conditionType) + } + case newCond.Status != test.conditionStatus || newCond.Reason != test.conditionReason: + t.Errorf("%s: DeploymentProgressing has status %s with reason %s. Expected %s with %s.", test.name, newCond.Status, newCond.Reason, test.conditionStatus, test.conditionReason) + case !test.lastUpdate.IsZero() && test.lastUpdate != testTime: + t.Errorf("%s: LastUpdateTime was changed to %s but expected %s;", test.name, test.lastUpdate, testTime) + case !test.lastTransition.IsZero() && test.lastTransition != testTime: + t.Errorf("%s: LastTransitionTime was changed to %s but expected %s;", test.name, test.lastTransition, testTime) + } + }) } } diff --git a/pkg/controller/deployment/recreate_test.go b/pkg/controller/deployment/recreate_test.go index 2cf8661780a..d557b5633ab 100644 --- a/pkg/controller/deployment/recreate_test.go +++ b/pkg/controller/deployment/recreate_test.go @@ -115,9 +115,11 @@ func TestOldPodsRunning(t *testing.T) { } for _, test := range tests { - if expected, got := test.expected, oldPodsRunning(test.newRS, test.oldRSs, test.podMap); expected != got { - t.Errorf("%s: expected %t, got %t", test.name, expected, got) - } + t.Run(test.name, func(t *testing.T) { + if expected, got := test.expected, oldPodsRunning(test.newRS, test.oldRSs, test.podMap); expected != got { + t.Errorf("%s: expected %t, got %t", test.name, expected, got) + } + }) } } diff --git a/pkg/controller/deployment/sync_test.go b/pkg/controller/deployment/sync_test.go index ce74a3eead6..6f5cc96b344 100644 --- a/pkg/controller/deployment/sync_test.go +++ b/pkg/controller/deployment/sync_test.go @@ -267,72 +267,74 @@ func TestScale(t *testing.T) { } for _, test := range tests { - _ = olderTimestamp - t.Log(test.name) - fake := fake.Clientset{} - dc := &DeploymentController{ - client: &fake, - eventRecorder: &record.FakeRecorder{}, - } - - if test.newRS != nil { - desiredReplicas := *(test.oldDeployment.Spec.Replicas) - if desired, ok := test.desiredReplicasAnnotations[test.newRS.Name]; ok { - desiredReplicas = desired + t.Run(test.name, func(t *testing.T) { + _ = olderTimestamp + t.Log(test.name) + fake := fake.Clientset{} + dc := &DeploymentController{ + client: &fake, + eventRecorder: &record.FakeRecorder{}, } - deploymentutil.SetReplicasAnnotations(test.newRS, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment)) - } - for i := range test.oldRSs { - rs := test.oldRSs[i] - if rs == nil { - continue - } - desiredReplicas := *(test.oldDeployment.Spec.Replicas) - if desired, ok := test.desiredReplicasAnnotations[rs.Name]; ok { - desiredReplicas = desired - } - deploymentutil.SetReplicasAnnotations(rs, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment)) - } - if err := dc.scale(test.deployment, test.newRS, test.oldRSs); err != nil { - t.Errorf("%s: unexpected error: %v", test.name, err) - continue - } + if test.newRS != nil { + desiredReplicas := *(test.oldDeployment.Spec.Replicas) + if desired, ok := test.desiredReplicasAnnotations[test.newRS.Name]; ok { + desiredReplicas = desired + } + deploymentutil.SetReplicasAnnotations(test.newRS, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment)) + } + for i := range test.oldRSs { + rs := test.oldRSs[i] + if rs == nil { + continue + } + desiredReplicas := *(test.oldDeployment.Spec.Replicas) + if desired, ok := test.desiredReplicasAnnotations[rs.Name]; ok { + desiredReplicas = desired + } + deploymentutil.SetReplicasAnnotations(rs, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment)) + } - // Construct the nameToSize map that will hold all the sizes we got our of tests - // Skip updating the map if the replica set wasn't updated since there will be - // no update action for it. - nameToSize := make(map[string]int32) - if test.newRS != nil { - nameToSize[test.newRS.Name] = *(test.newRS.Spec.Replicas) - } - for i := range test.oldRSs { - rs := test.oldRSs[i] - nameToSize[rs.Name] = *(rs.Spec.Replicas) - } - // Get all the UPDATE actions and update nameToSize with all the updated sizes. - for _, action := range fake.Actions() { - rs := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet) - if !test.wasntUpdated[rs.Name] { + if err := dc.scale(test.deployment, test.newRS, test.oldRSs); err != nil { + t.Errorf("%s: unexpected error: %v", test.name, err) + return + } + + // Construct the nameToSize map that will hold all the sizes we got our of tests + // Skip updating the map if the replica set wasn't updated since there will be + // no update action for it. + nameToSize := make(map[string]int32) + if test.newRS != nil { + nameToSize[test.newRS.Name] = *(test.newRS.Spec.Replicas) + } + for i := range test.oldRSs { + rs := test.oldRSs[i] nameToSize[rs.Name] = *(rs.Spec.Replicas) } - } - - if test.expectedNew != nil && test.newRS != nil && *(test.expectedNew.Spec.Replicas) != nameToSize[test.newRS.Name] { - t.Errorf("%s: expected new replicas: %d, got: %d", test.name, *(test.expectedNew.Spec.Replicas), nameToSize[test.newRS.Name]) - continue - } - if len(test.expectedOld) != len(test.oldRSs) { - t.Errorf("%s: expected %d old replica sets, got %d", test.name, len(test.expectedOld), len(test.oldRSs)) - continue - } - for n := range test.oldRSs { - rs := test.oldRSs[n] - expected := test.expectedOld[n] - if *(expected.Spec.Replicas) != nameToSize[rs.Name] { - t.Errorf("%s: expected old (%s) replicas: %d, got: %d", test.name, rs.Name, *(expected.Spec.Replicas), nameToSize[rs.Name]) + // Get all the UPDATE actions and update nameToSize with all the updated sizes. + for _, action := range fake.Actions() { + rs := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet) + if !test.wasntUpdated[rs.Name] { + nameToSize[rs.Name] = *(rs.Spec.Replicas) + } } - } + + if test.expectedNew != nil && test.newRS != nil && *(test.expectedNew.Spec.Replicas) != nameToSize[test.newRS.Name] { + t.Errorf("%s: expected new replicas: %d, got: %d", test.name, *(test.expectedNew.Spec.Replicas), nameToSize[test.newRS.Name]) + return + } + if len(test.expectedOld) != len(test.oldRSs) { + t.Errorf("%s: expected %d old replica sets, got %d", test.name, len(test.expectedOld), len(test.oldRSs)) + return + } + for n := range test.oldRSs { + rs := test.oldRSs[n] + expected := test.expectedOld[n] + if *(expected.Spec.Replicas) != nameToSize[rs.Name] { + t.Errorf("%s: expected old (%s) replicas: %d, got: %d", test.name, rs.Name, *(expected.Spec.Replicas), nameToSize[rs.Name]) + } + } + }) } } From d66d8f053dabb141ebfa89224d0ccc76354ceb4c Mon Sep 17 00:00:00 2001 From: Di Xu Date: Thu, 7 Dec 2017 13:30:20 +0800 Subject: [PATCH 014/264] refactor getting uninitialized in kubectl get --- pkg/kubectl/cmd/resource/get.go | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/pkg/kubectl/cmd/resource/get.go b/pkg/kubectl/cmd/resource/get.go index b3cd8eb2580..a7a442ff9ef 100644 --- a/pkg/kubectl/cmd/resource/get.go +++ b/pkg/kubectl/cmd/resource/get.go @@ -65,6 +65,8 @@ type GetOptions struct { ShowKind bool LabelColumns []string Export bool + + IncludeUninitialized bool } var ( @@ -190,9 +192,13 @@ func (options *GetOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args options.ExplicitNamespace = false } + options.IncludeUninitialized = cmdutil.ShouldIncludeUninitialized(cmd, false) + switch { case options.Watch || options.WatchOnly: - + // include uninitialized objects when watching on a single object + // unless explicitly set --include-uninitialized=false + options.IncludeUninitialized = cmdutil.ShouldIncludeUninitialized(cmd, len(args) == 2) default: if len(args) == 0 && cmdutil.IsFilenameSliceEmpty(options.Filenames) { fmt.Fprint(options.ErrOut, "You must specify the type of resource to get. ", cmdutil.ValidResourceTypeList(f)) @@ -240,7 +246,7 @@ func (options *GetOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []str FieldSelectorParam(options.FieldSelector). ExportParam(options.Export). RequestChunksOf(options.ChunkSize). - IncludeUninitialized(cmdutil.ShouldIncludeUninitialized(cmd, false)). // TODO: this needs to be better factored + IncludeUninitialized(options.IncludeUninitialized). ResourceTypeOrNameArgs(true, args...). ContinueOnError(). Latest(). @@ -442,11 +448,6 @@ func (options *GetOptions) raw(f cmdutil.Factory) error { // watch starts a client-side watch of one or more resources. // TODO: remove the need for arguments here. func (options *GetOptions) watch(f cmdutil.Factory, cmd *cobra.Command, args []string) error { - // TODO: this could be better factored - // include uninitialized objects when watching on a single object - // unless explicitly set --include-uninitialized=false - includeUninitialized := cmdutil.ShouldIncludeUninitialized(cmd, len(args) == 2) - r := f.NewBuilder(). Unstructured(). NamespaceParam(options.Namespace).DefaultNamespace().AllNamespaces(options.AllNamespaces). @@ -455,7 +456,7 @@ func (options *GetOptions) watch(f cmdutil.Factory, cmd *cobra.Command, args []s FieldSelectorParam(options.FieldSelector). ExportParam(options.Export). RequestChunksOf(options.ChunkSize). - IncludeUninitialized(includeUninitialized). + IncludeUninitialized(options.IncludeUninitialized). ResourceTypeOrNameArgs(true, args...). SingleResourceType(). Latest(). From 9ab98d9f69a9bc8c0c31bd69d5932e8e61df3126 Mon Sep 17 00:00:00 2001 From: Zihong Zheng Date: Thu, 7 Dec 2017 21:22:49 -0800 Subject: [PATCH 015/264] Remove ExternalTrafficLocalOnly from kube_feature gate --- pkg/features/kube_features.go | 5 ---- pkg/proxy/iptables/BUILD | 2 -- pkg/proxy/iptables/proxier.go | 9 +----- pkg/proxy/ipvs/BUILD | 2 -- pkg/proxy/ipvs/proxier.go | 9 +----- pkg/proxy/winkernel/BUILD | 2 -- pkg/proxy/winkernel/proxier.go | 9 +----- pkg/registry/core/service/BUILD | 4 --- pkg/registry/core/service/rest.go | 39 +++++++++++--------------- pkg/registry/core/service/rest_test.go | 6 ---- 10 files changed, 19 insertions(+), 68 deletions(-) diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index 80599039bc4..640d7f9c4aa 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -33,10 +33,6 @@ const ( // beta: v1.4 AppArmor utilfeature.Feature = "AppArmor" - // owner: @girishkalele - // alpha: v1.4 - ExternalTrafficLocalOnly utilfeature.Feature = "AllowExtTrafficLocalEndpoints" - // owner: @mtaufen // alpha: v1.4 DynamicKubeletConfig utilfeature.Feature = "DynamicKubeletConfig" @@ -222,7 +218,6 @@ func init() { // To add a new feature, define a key for it above and add it here. The features will be // available throughout Kubernetes binaries. var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{ - ExternalTrafficLocalOnly: {Default: true, PreRelease: utilfeature.GA}, AppArmor: {Default: true, PreRelease: utilfeature.Beta}, DynamicKubeletConfig: {Default: false, PreRelease: utilfeature.Alpha}, KubeletConfigFile: {Default: false, PreRelease: utilfeature.Alpha}, diff --git a/pkg/proxy/iptables/BUILD b/pkg/proxy/iptables/BUILD index cd7e76524e6..052111719a7 100644 --- a/pkg/proxy/iptables/BUILD +++ b/pkg/proxy/iptables/BUILD @@ -16,7 +16,6 @@ go_library( "//pkg/api/service:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/core/helper:go_default_library", - "//pkg/features:go_default_library", "//pkg/proxy:go_default_library", "//pkg/proxy/healthcheck:go_default_library", "//pkg/proxy/metrics:go_default_library", @@ -30,7 +29,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index 1710d989f97..353346d8dd8 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -39,12 +39,10 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" apiservice "k8s.io/kubernetes/pkg/api/service" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/core/helper" - "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/proxy/healthcheck" "k8s.io/kubernetes/pkg/proxy/metrics" @@ -191,8 +189,7 @@ func (e *endpointsInfo) String() string { // returns a new serviceInfo struct func newServiceInfo(svcPortName proxy.ServicePortName, port *api.ServicePort, service *api.Service) *serviceInfo { onlyNodeLocalEndpoints := false - if utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) && - apiservice.RequestsOnlyLocalTraffic(service) { + if apiservice.RequestsOnlyLocalTraffic(service) { onlyNodeLocalEndpoints = true } var stickyMaxAgeSeconds int @@ -773,10 +770,6 @@ func updateEndpointsMap( changes.items = make(map[types.NamespacedName]*endpointsChange) }() - if !utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) { - return - } - // TODO: If this will appear to be computationally expensive, consider // computing this incrementally similarly to endpointsMap. result.hcEndpoints = make(map[types.NamespacedName]int) diff --git a/pkg/proxy/ipvs/BUILD b/pkg/proxy/ipvs/BUILD index 30945e44334..6369e1321ee 100644 --- a/pkg/proxy/ipvs/BUILD +++ b/pkg/proxy/ipvs/BUILD @@ -53,7 +53,6 @@ go_library( "//pkg/api/service:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/core/helper:go_default_library", - "//pkg/features:go_default_library", "//pkg/proxy:go_default_library", "//pkg/proxy/healthcheck:go_default_library", "//pkg/proxy/metrics:go_default_library", @@ -70,7 +69,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index 5f5a09d2447..bfbdd83d88c 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -38,12 +38,10 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" apiservice "k8s.io/kubernetes/pkg/api/service" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/core/helper" - "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/proxy/healthcheck" "k8s.io/kubernetes/pkg/proxy/metrics" @@ -367,8 +365,7 @@ func updateServiceMap( // returns a new serviceInfo struct func newServiceInfo(svcPortName proxy.ServicePortName, port *api.ServicePort, service *api.Service) *serviceInfo { onlyNodeLocalEndpoints := false - if utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) && - apiservice.RequestsOnlyLocalTraffic(service) { + if apiservice.RequestsOnlyLocalTraffic(service) { onlyNodeLocalEndpoints = true } var stickyMaxAgeSeconds int @@ -589,10 +586,6 @@ func updateEndpointsMap( changes.items = make(map[types.NamespacedName]*endpointsChange) }() - if !utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) { - return - } - // TODO: If this will appear to be computationally expensive, consider // computing this incrementally similarly to endpointsMap. result.hcEndpoints = make(map[types.NamespacedName]int) diff --git a/pkg/proxy/winkernel/BUILD b/pkg/proxy/winkernel/BUILD index 42494391696..2788e09c3cc 100644 --- a/pkg/proxy/winkernel/BUILD +++ b/pkg/proxy/winkernel/BUILD @@ -19,7 +19,6 @@ go_library( "//pkg/api/service:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/core/helper:go_default_library", - "//pkg/features:go_default_library", "//pkg/proxy:go_default_library", "//pkg/proxy/healthcheck:go_default_library", "//pkg/util/async:go_default_library", @@ -29,7 +28,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", ], "//conditions:default": [], diff --git a/pkg/proxy/winkernel/proxier.go b/pkg/proxy/winkernel/proxier.go index 5d56561c422..7b75e4a947c 100644 --- a/pkg/proxy/winkernel/proxier.go +++ b/pkg/proxy/winkernel/proxier.go @@ -35,12 +35,10 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" apiservice "k8s.io/kubernetes/pkg/api/service" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/core/helper" - "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/proxy/healthcheck" "k8s.io/kubernetes/pkg/util/async" @@ -160,8 +158,7 @@ func (ep *endpointsInfo) Cleanup() { // returns a new serviceInfo struct func newServiceInfo(svcPortName proxy.ServicePortName, port *api.ServicePort, service *api.Service) *serviceInfo { onlyNodeLocalEndpoints := false - if utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) && - apiservice.RequestsOnlyLocalTraffic(service) { + if apiservice.RequestsOnlyLocalTraffic(service) { onlyNodeLocalEndpoints = true } @@ -825,10 +822,6 @@ func (proxier *Proxier) updateEndpointsMap() (result updateEndpointMapResult) { changes.items = make(map[types.NamespacedName]*endpointsChange) }() - if !utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) { - return - } - // TODO: If this will appear to be computationally expensive, consider // computing this incrementally similarly to endpointsMap. result.hcEndpoints = make(map[types.NamespacedName]int) diff --git a/pkg/registry/core/service/BUILD b/pkg/registry/core/service/BUILD index ca6517abf27..f53b19b13e4 100644 --- a/pkg/registry/core/service/BUILD +++ b/pkg/registry/core/service/BUILD @@ -23,7 +23,6 @@ go_library( "//pkg/apis/core/helper:go_default_library", "//pkg/apis/core/validation:go_default_library", "//pkg/capabilities:go_default_library", - "//pkg/features:go_default_library", "//pkg/registry/core/endpoint:go_default_library", "//pkg/registry/core/service/ipallocator:go_default_library", "//pkg/registry/core/service/portallocator:go_default_library", @@ -40,7 +39,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) @@ -56,7 +54,6 @@ go_test( "//pkg/api/service:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/core/helper:go_default_library", - "//pkg/features:go_default_library", "//pkg/registry/core/service/ipallocator:go_default_library", "//pkg/registry/core/service/portallocator:go_default_library", "//pkg/registry/registrytest:go_default_library", @@ -69,7 +66,6 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) diff --git a/pkg/registry/core/service/rest.go b/pkg/registry/core/service/rest.go index 689850c1b86..3c8fca7a750 100644 --- a/pkg/registry/core/service/rest.go +++ b/pkg/registry/core/service/rest.go @@ -35,12 +35,10 @@ import ( "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - utilfeature "k8s.io/apiserver/pkg/util/feature" apiservice "k8s.io/kubernetes/pkg/api/service" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/pkg/apis/core/validation" - "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/registry/core/endpoint" "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" "k8s.io/kubernetes/pkg/registry/core/service/portallocator" @@ -131,17 +129,15 @@ func (rs *REST) Create(ctx genericapirequest.Context, obj runtime.Object, create } } - // Handle ExternalTraiffc related fields during service creation. - if utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) { - if apiservice.NeedsHealthCheck(service) { - if err := rs.allocateHealthCheckNodePort(service, nodePortOp); err != nil { - return nil, errors.NewInternalError(err) - } - } - if errs := validation.ValidateServiceExternalTrafficFieldsCombination(service); len(errs) > 0 { - return nil, errors.NewInvalid(api.Kind("Service"), service.Name, errs) + // Handle ExternalTraffic related fields during service creation. + if apiservice.NeedsHealthCheck(service) { + if err := rs.allocateHealthCheckNodePort(service, nodePortOp); err != nil { + return nil, errors.NewInternalError(err) } } + if errs := validation.ValidateServiceExternalTrafficFieldsCombination(service); len(errs) > 0 { + return nil, errors.NewInvalid(api.Kind("Service"), service.Name, errs) + } out, err := rs.registry.CreateService(ctx, service, createValidation) if err != nil { @@ -191,8 +187,7 @@ func (rs *REST) Delete(ctx genericapirequest.Context, id string) (runtime.Object } } - if utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) && - apiservice.NeedsHealthCheck(service) { + if apiservice.NeedsHealthCheck(service) { nodePort := service.Spec.HealthCheckNodePort if nodePort > 0 { err := rs.serviceNodePorts.Release(int(nodePort)) @@ -358,16 +353,14 @@ func (rs *REST) Update(ctx genericapirequest.Context, name string, objInfo rest. service.Status.LoadBalancer = api.LoadBalancerStatus{} } - // Handle ExternalTraiffc related updates. - if utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) { - success, err := rs.healthCheckNodePortUpdate(oldService, service, nodePortOp) - if !success || err != nil { - return nil, false, err - } - externalTrafficPolicyUpdate(oldService, service) - if errs := validation.ValidateServiceExternalTrafficFieldsCombination(service); len(errs) > 0 { - return nil, false, errors.NewInvalid(api.Kind("Service"), service.Name, errs) - } + // Handle ExternalTraffic related updates. + success, err := rs.healthCheckNodePortUpdate(oldService, service, nodePortOp) + if !success || err != nil { + return nil, false, err + } + externalTrafficPolicyUpdate(oldService, service) + if errs := validation.ValidateServiceExternalTrafficFieldsCombination(service); len(errs) > 0 { + return nil, false, errors.NewInvalid(api.Kind("Service"), service.Name, errs) } out, err := rs.registry.UpdateService(ctx, service, createValidation, updateValidation) diff --git a/pkg/registry/core/service/rest_test.go b/pkg/registry/core/service/rest_test.go index edc1a334d44..fa87951abd0 100644 --- a/pkg/registry/core/service/rest_test.go +++ b/pkg/registry/core/service/rest_test.go @@ -31,20 +31,14 @@ import ( "k8s.io/apimachinery/pkg/util/rand" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/api/service" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/core/helper" - "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" "k8s.io/kubernetes/pkg/registry/core/service/portallocator" "k8s.io/kubernetes/pkg/registry/registrytest" ) -func init() { - utilfeature.DefaultFeatureGate.Set(string(features.ExternalTrafficLocalOnly) + "=true") -} - // TODO(wojtek-t): Cleanup this file. // It is now testing mostly the same things as other resources but // in a completely different way. We should unify it. From 9a47249180ce15d55f0a58394603d74120be2de9 Mon Sep 17 00:00:00 2001 From: zoues Date: Sun, 10 Dec 2017 11:11:37 +0800 Subject: [PATCH 016/264] typo --- pkg/controller/statefulset/stateful_pod_control.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/statefulset/stateful_pod_control.go b/pkg/controller/statefulset/stateful_pod_control.go index fff08046297..d189f05ce05 100644 --- a/pkg/controller/statefulset/stateful_pod_control.go +++ b/pkg/controller/statefulset/stateful_pod_control.go @@ -172,7 +172,7 @@ func (spc *realStatefulPodControl) recordClaimEvent(verb string, set *apps.State } } -// createPersistentVolumeClaims creates all of the required PersistentVolumeClaims for pod, which mush be a member of +// createPersistentVolumeClaims creates all of the required PersistentVolumeClaims for pod, which must be a member of // set. If all of the claims for Pod are successfully created, the returned error is nil. If creation fails, this method // may be called again until no error is returned, indicating the PersistentVolumeClaims for pod are consistent with // set's Spec. From 4f400e5d2f35f47f98dcb57635c80e86dbe6484a Mon Sep 17 00:00:00 2001 From: Di Xu Date: Mon, 11 Dec 2017 15:20:55 +0800 Subject: [PATCH 017/264] ignore images in used by running containers when GC --- pkg/kubelet/images/image_gc_manager.go | 25 +++++++++------ pkg/kubelet/images/image_gc_manager_test.go | 35 ++++++++++++--------- 2 files changed, 36 insertions(+), 24 deletions(-) diff --git a/pkg/kubelet/images/image_gc_manager.go b/pkg/kubelet/images/image_gc_manager.go index 344e0156a49..b8503968bef 100644 --- a/pkg/kubelet/images/image_gc_manager.go +++ b/pkg/kubelet/images/image_gc_manager.go @@ -168,7 +168,7 @@ func (im *realImageGCManager) Start() { if im.initialized { ts = time.Now() } - err := im.detectImages(ts) + _, err := im.detectImages(ts) if err != nil { glog.Warningf("[imageGCManager] Failed to monitor images: %v", err) } else { @@ -194,18 +194,19 @@ func (im *realImageGCManager) GetImageList() ([]kubecontainer.Image, error) { return im.imageCache.get(), nil } -func (im *realImageGCManager) detectImages(detectTime time.Time) error { +func (im *realImageGCManager) detectImages(detectTime time.Time) (sets.String, error) { + imagesInUse := sets.NewString() + images, err := im.runtime.ListImages() if err != nil { - return err + return imagesInUse, err } pods, err := im.runtime.GetPods(true) if err != nil { - return err + return imagesInUse, err } // Make a set of images in use by containers. - imagesInUse := sets.NewString() for _, pod := range pods { for _, container := range pod.Containers { glog.V(5).Infof("Pod %s/%s, container %s uses image %s(%s)", pod.Namespace, pod.Name, container.Name, container.Image, container.ImageID) @@ -231,7 +232,7 @@ func (im *realImageGCManager) detectImages(detectTime time.Time) error { } // Set last used time to now if the image is being used. - if isImageUsed(image, imagesInUse) { + if isImageUsed(image.ID, imagesInUse) { glog.V(5).Infof("Setting Image ID %s lastUsed to %v", image.ID, now) im.imageRecords[image.ID].lastUsed = now } @@ -248,7 +249,7 @@ func (im *realImageGCManager) detectImages(detectTime time.Time) error { } } - return nil + return imagesInUse, nil } func (im *realImageGCManager) GarbageCollect() error { @@ -309,7 +310,7 @@ func (im *realImageGCManager) DeleteUnusedImages() (int64, error) { // Note that error may be nil and the number of bytes free may be less // than bytesToFree. func (im *realImageGCManager) freeSpace(bytesToFree int64, freeTime time.Time) (int64, error) { - err := im.detectImages(freeTime) + imagesInUse, err := im.detectImages(freeTime) if err != nil { return 0, err } @@ -320,6 +321,10 @@ func (im *realImageGCManager) freeSpace(bytesToFree int64, freeTime time.Time) ( // Get all images in eviction order. images := make([]evictionInfo, 0, len(im.imageRecords)) for image, record := range im.imageRecords { + if isImageUsed(image, imagesInUse) { + glog.V(5).Infof("Image ID %s is being used", image) + continue + } images = append(images, evictionInfo{ id: image, imageRecord: *record, @@ -385,9 +390,9 @@ func (ev byLastUsedAndDetected) Less(i, j int) bool { } } -func isImageUsed(image container.Image, imagesInUse sets.String) bool { +func isImageUsed(imageID string, imagesInUse sets.String) bool { // Check the image ID. - if _, ok := imagesInUse[image.ID]; ok { + if _, ok := imagesInUse[imageID]; ok { return true } return false diff --git a/pkg/kubelet/images/image_gc_manager_test.go b/pkg/kubelet/images/image_gc_manager_test.go index aac3bad0f47..fe680f45d8e 100644 --- a/pkg/kubelet/images/image_gc_manager_test.go +++ b/pkg/kubelet/images/image_gc_manager_test.go @@ -112,7 +112,7 @@ func TestDetectImagesInitialDetect(t *testing.T) { } startTime := time.Now().Add(-time.Millisecond) - err := manager.detectImages(zero) + _, err := manager.detectImages(zero) assert := assert.New(t) require.NoError(t, err) assert.Equal(manager.imageRecordsLen(), 3) @@ -145,7 +145,7 @@ func TestDetectImagesWithNewImage(t *testing.T) { }}, } - err := manager.detectImages(zero) + _, err := manager.detectImages(zero) assert := assert.New(t) require.NoError(t, err) assert.Equal(manager.imageRecordsLen(), 2) @@ -159,7 +159,7 @@ func TestDetectImagesWithNewImage(t *testing.T) { detectedTime := zero.Add(time.Second) startTime := time.Now().Add(-time.Millisecond) - err = manager.detectImages(detectedTime) + _, err = manager.detectImages(detectedTime) require.NoError(t, err) assert.Equal(manager.imageRecordsLen(), 3) noContainer, ok := manager.getImageRecord(imageID(0)) @@ -190,7 +190,7 @@ func TestDetectImagesContainerStopped(t *testing.T) { }}, } - err := manager.detectImages(zero) + _, err := manager.detectImages(zero) assert := assert.New(t) require.NoError(t, err) assert.Equal(manager.imageRecordsLen(), 2) @@ -199,7 +199,7 @@ func TestDetectImagesContainerStopped(t *testing.T) { // Simulate container being stopped. fakeRuntime.AllPodList = []*containertest.FakePod{} - err = manager.detectImages(time.Now()) + _, err = manager.detectImages(time.Now()) require.NoError(t, err) assert.Equal(manager.imageRecordsLen(), 2) container1, ok := manager.getImageRecord(imageID(0)) @@ -226,14 +226,14 @@ func TestDetectImagesWithRemovedImages(t *testing.T) { }}, } - err := manager.detectImages(zero) + _, err := manager.detectImages(zero) assert := assert.New(t) require.NoError(t, err) assert.Equal(manager.imageRecordsLen(), 2) // Simulate both images being removed. fakeRuntime.ImageList = []container.Image{} - err = manager.detectImages(time.Now()) + _, err = manager.detectImages(time.Now()) require.NoError(t, err) assert.Equal(manager.imageRecordsLen(), 0) } @@ -297,7 +297,8 @@ func TestFreeSpaceRemoveByLeastRecentlyUsed(t *testing.T) { } // Make 1 be more recently used than 0. - require.NoError(t, manager.detectImages(zero)) + _, err := manager.detectImages(zero) + require.NoError(t, err) fakeRuntime.AllPodList = []*containertest.FakePod{ {Pod: &container.Pod{ Containers: []*container.Container{ @@ -305,13 +306,15 @@ func TestFreeSpaceRemoveByLeastRecentlyUsed(t *testing.T) { }, }}, } - require.NoError(t, manager.detectImages(time.Now())) + _, err = manager.detectImages(time.Now()) + require.NoError(t, err) fakeRuntime.AllPodList = []*containertest.FakePod{ {Pod: &container.Pod{ Containers: []*container.Container{}, }}, } - require.NoError(t, manager.detectImages(time.Now())) + _, err = manager.detectImages(time.Now()) + require.NoError(t, err) require.Equal(t, manager.imageRecordsLen(), 2) spaceFreed, err := manager.freeSpace(1024, time.Now()) @@ -335,14 +338,17 @@ func TestFreeSpaceTiesBrokenByDetectedTime(t *testing.T) { } // Make 1 more recently detected but used at the same time as 0. - require.NoError(t, manager.detectImages(zero)) + _, err := manager.detectImages(zero) + require.NoError(t, err) fakeRuntime.ImageList = []container.Image{ makeImage(0, 1024), makeImage(1, 2048), } - require.NoError(t, manager.detectImages(time.Now())) + _, err = manager.detectImages(time.Now()) + require.NoError(t, err) fakeRuntime.AllPodList = []*containertest.FakePod{} - require.NoError(t, manager.detectImages(time.Now())) + _, err = manager.detectImages(time.Now()) + require.NoError(t, err) require.Equal(t, manager.imageRecordsLen(), 2) spaceFreed, err := manager.freeSpace(1024, time.Now()) @@ -448,7 +454,8 @@ func TestGarbageCollectImageNotOldEnough(t *testing.T) { fakeClock := clock.NewFakeClock(time.Now()) t.Log(fakeClock.Now()) - require.NoError(t, manager.detectImages(fakeClock.Now())) + _, err := manager.detectImages(fakeClock.Now()) + require.NoError(t, err) require.Equal(t, manager.imageRecordsLen(), 2) // no space freed since one image is in used, and another one is not old enough spaceFreed, err := manager.freeSpace(1024, fakeClock.Now()) From 7c4c321c9899ebe0c8cb21477f47d9f9743ddf19 Mon Sep 17 00:00:00 2001 From: Yu Liao Date: Tue, 12 Dec 2017 12:55:20 -0800 Subject: [PATCH 018/264] added defaults for --watch-cache-sizes description. --- staging/src/k8s.io/apiserver/pkg/server/options/etcd.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go b/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go index d522cde9bcb..9bfa3a0a778 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go @@ -120,7 +120,8 @@ func (s *EtcdOptions) AddFlags(fs *pflag.FlagSet) { fs.StringSliceVar(&s.WatchCacheSizes, "watch-cache-sizes", s.WatchCacheSizes, ""+ "List of watch cache sizes for every resource (pods, nodes, etc.), comma separated. "+ "The individual override format: resource#size, where size is a number. It takes effect "+ - "when watch-cache is enabled.") + "when watch-cache is enabled. Some resources (replicationcontrollers, endpoints, nodes, pods, services, apiservices) "+ + "have system defaults set by heuristics, others default to default-watch-cache-size") fs.StringVar(&s.StorageConfig.Type, "storage-backend", s.StorageConfig.Type, "The storage backend for persistence. Options: 'etcd3' (default), 'etcd2'.") From 18d24d8303e2078260ec34ca8fb4b1a83bb585d7 Mon Sep 17 00:00:00 2001 From: Yu Liao Date: Wed, 13 Dec 2017 12:39:37 -0800 Subject: [PATCH 019/264] added more description for flag '--watch-cache-sizes' to make the format of the flag clearer. --- staging/src/k8s.io/apiserver/pkg/server/options/etcd.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go b/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go index 9bfa3a0a778..4d5d1bc22ad 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go @@ -119,8 +119,9 @@ func (s *EtcdOptions) AddFlags(fs *pflag.FlagSet) { fs.StringSliceVar(&s.WatchCacheSizes, "watch-cache-sizes", s.WatchCacheSizes, ""+ "List of watch cache sizes for every resource (pods, nodes, etc.), comma separated. "+ - "The individual override format: resource#size, where size is a number. It takes effect "+ - "when watch-cache is enabled. Some resources (replicationcontrollers, endpoints, nodes, pods, services, apiservices) "+ + "The individual override format: resource[.group]#size, where resource is lowercase plural (no version), "+ + "group is optional, and size is a number. It takes effect when watch-cache is enabled. "+ + "Some resources (replicationcontrollers, endpoints, nodes, pods, services, apiservices.apiregistration.k8s.io) "+ "have system defaults set by heuristics, others default to default-watch-cache-size") fs.StringVar(&s.StorageConfig.Type, "storage-backend", s.StorageConfig.Type, From 6149df089e2667fefb740e408ece883fd76dd40e Mon Sep 17 00:00:00 2001 From: xuzhonghu Date: Fri, 1 Dec 2017 11:07:28 +0800 Subject: [PATCH 020/264] add admission into RecommendedOption --- .../pkg/cmd/server/start.go | 2 +- .../test/integration/testserver/start.go | 3 +- .../src/k8s.io/apiserver/pkg/server/config.go | 5 +++ .../apiserver/pkg/server/options/admission.go | 4 ++ .../pkg/server/options/recommended.go | 42 ++++++++++++++++++- .../kube-aggregator/pkg/cmd/server/start.go | 2 +- .../plugin/banflunder/admission_test.go | 5 +-- .../wardleinitializer/wardleinitializer.go | 4 +- .../wardleinitializer_test.go | 6 +-- .../sample-apiserver/pkg/cmd/server/start.go | 37 ++++++++-------- 10 files changed, 76 insertions(+), 34 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server/start.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server/start.go index 6b41a2e06de..d2096412bfd 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server/start.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server/start.go @@ -98,7 +98,7 @@ func (o CustomResourceDefinitionsServerOptions) Config() (*apiserver.Config, err } serverConfig := genericapiserver.NewRecommendedConfig(apiserver.Codecs) - if err := o.RecommendedOptions.ApplyTo(serverConfig); err != nil { + if err := o.RecommendedOptions.ApplyTo(serverConfig, apiserver.Scheme); err != nil { return nil, err } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/start.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/start.go index 99cbe9b3f85..81314842be3 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/start.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/start.go @@ -45,6 +45,7 @@ func DefaultServerConfig() (*extensionsapiserver.Config, error) { options.RecommendedOptions.SecureServing.BindPort = port options.RecommendedOptions.Authentication = nil // disable options.RecommendedOptions.Authorization = nil // disable + options.RecommendedOptions.Admission = nil // disable options.RecommendedOptions.SecureServing.BindAddress = net.ParseIP("127.0.0.1") etcdURL, ok := os.LookupEnv("KUBE_INTEGRATION_ETCD_URL") if !ok { @@ -58,7 +59,7 @@ func DefaultServerConfig() (*extensionsapiserver.Config, error) { if err := options.RecommendedOptions.SecureServing.MaybeDefaultWithSelfSignedCerts("localhost", nil, []net.IP{net.ParseIP("127.0.0.1")}); err != nil { return nil, fmt.Errorf("error creating self-signed certificates: %v", err) } - if err := options.RecommendedOptions.ApplyTo(genericConfig); err != nil { + if err := options.RecommendedOptions.ApplyTo(genericConfig, nil); err != nil { return nil, err } diff --git a/staging/src/k8s.io/apiserver/pkg/server/config.go b/staging/src/k8s.io/apiserver/pkg/server/config.go index 877071ad3b2..cd98717d2c0 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/config.go +++ b/staging/src/k8s.io/apiserver/pkg/server/config.go @@ -188,9 +188,13 @@ type Config struct { PublicAddress net.IP } +type AdmissionInitializersInitFunc func() (admission.PluginInitializer, error) + type RecommendedConfig struct { Config + ExtraAdmissionInitializersInitFunc []AdmissionInitializersInitFunc + // SharedInformerFactory provides shared informers for Kubernetes resources. This value is set by // RecommendedOptions.CoreAPI.ApplyTo called by RecommendedOptions.ApplyTo. It uses an in-cluster client config // by default, or the kubeconfig given with kubeconfig command line flag. @@ -259,6 +263,7 @@ func NewConfig(codecs serializer.CodecFactory) *Config { func NewRecommendedConfig(codecs serializer.CodecFactory) *RecommendedConfig { return &RecommendedConfig{ Config: *NewConfig(codecs), + ExtraAdmissionInitializersInitFunc: make([]AdmissionInitializersInitFunc, 0), } } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/admission.go b/staging/src/k8s.io/apiserver/pkg/server/options/admission.go index 6232567f7a4..30716869146 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/admission.go @@ -100,6 +100,10 @@ func (a *AdmissionOptions) ApplyTo( scheme *runtime.Scheme, pluginInitializers ...admission.PluginInitializer, ) error { + if a == nil { + return nil + } + pluginNames := a.PluginNames if len(a.PluginNames) == 0 { pluginNames = a.enabledPluginNames() diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go b/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go index 21c3dd76159..eff7cde33d3 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go @@ -17,9 +17,12 @@ limitations under the License. package options import ( + "fmt" + "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/storage/storagebackend" ) @@ -35,6 +38,7 @@ type RecommendedOptions struct { Audit *AuditOptions Features *FeatureOptions CoreAPI *CoreAPIOptions + Admission *AdmissionOptions } func NewRecommendedOptions(prefix string, codec runtime.Codec) *RecommendedOptions { @@ -46,6 +50,7 @@ func NewRecommendedOptions(prefix string, codec runtime.Codec) *RecommendedOptio Audit: NewAuditOptions(), Features: NewFeatureOptions(), CoreAPI: NewCoreAPIOptions(), + Admission: NewAdmissionOptions(), } } @@ -57,9 +62,13 @@ func (o *RecommendedOptions) AddFlags(fs *pflag.FlagSet) { o.Audit.AddFlags(fs) o.Features.AddFlags(fs) o.CoreAPI.AddFlags(fs) + o.Admission.AddFlags(fs) } -func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error { +// ApplyTo adds RecommendedOptions to the server configuration. +// scheme is the scheme of the apiserver types that are sent to the admission chain. +// pluginInitializers can be empty, it is only need for additional initializers. +func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig, scheme *runtime.Scheme) error { if err := o.Etcd.ApplyTo(&config.Config); err != nil { return err } @@ -81,6 +90,36 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error { if err := o.CoreAPI.ApplyTo(config); err != nil { return err } + if o.Admission != nil { + // Admission depends on CoreAPI to set SharedInformerFactory and ClientConfig. + if o.CoreAPI == nil { + return fmt.Errorf("admission depends on CoreAPI, so it must be set") + } + // Admission need scheme to construct admission initializer. + if scheme == nil { + return fmt.Errorf("admission depends on shceme, so it must be set") + } + + pluginInitializers := []admission.PluginInitializer{} + for _, initFunc := range config.ExtraAdmissionInitializersInitFunc { + intializer, err := initFunc() + if err != nil { + return err + } + pluginInitializers = append(pluginInitializers, intializer) + } + + err := o.Admission.ApplyTo( + &config.Config, + config.SharedInformerFactory, + config.ClientConfig, + scheme, + pluginInitializers...) + if err != nil { + return err + } + } + return nil } @@ -93,6 +132,7 @@ func (o *RecommendedOptions) Validate() []error { errors = append(errors, o.Audit.Validate()...) errors = append(errors, o.Features.Validate()...) errors = append(errors, o.CoreAPI.Validate()...) + errors = append(errors, o.Admission.Validate()...) return errors } diff --git a/staging/src/k8s.io/kube-aggregator/pkg/cmd/server/start.go b/staging/src/k8s.io/kube-aggregator/pkg/cmd/server/start.go index 3be7856966b..1b1a652febe 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/cmd/server/start.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/cmd/server/start.go @@ -109,7 +109,7 @@ func (o AggregatorOptions) RunAggregator(stopCh <-chan struct{}) error { serverConfig := genericapiserver.NewRecommendedConfig(apiserver.Codecs) - if err := o.RecommendedOptions.ApplyTo(serverConfig); err != nil { + if err := o.RecommendedOptions.ApplyTo(serverConfig, apiserver.Scheme); err != nil { return err } serverConfig.LongRunningFunc = filters.BasicLongRunningRequestCheck( diff --git a/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/admission_test.go b/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/admission_test.go index 5b08387b0e7..4e21833e046 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/admission_test.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/admission_test.go @@ -113,10 +113,7 @@ func TestBanflunderAdmissionPlugin(t *testing.T) { t.Fatalf("scenario %d: failed to create banflunder admission plugin due to = %v", index, err) } - targetInitializer, err := wardleinitializer.New(informersFactory) - if err != nil { - t.Fatalf("scenario %d: failed to crate wardle plugin initializer due to = %v", index, err) - } + targetInitializer := wardleinitializer.New(informersFactory) targetInitializer.Initialize(target) err = admission.ValidateInitialization(target) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer.go b/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer.go index c53c8a4944a..b41e3dfba09 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer.go @@ -28,10 +28,10 @@ type pluginInitializer struct { var _ admission.PluginInitializer = pluginInitializer{} // New creates an instance of wardle admission plugins initializer. -func New(informers informers.SharedInformerFactory) (pluginInitializer, error) { +func New(informers informers.SharedInformerFactory) pluginInitializer { return pluginInitializer{ informers: informers, - }, nil + } } // Initialize checks the initialization interfaces implemented by a plugin diff --git a/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer_test.go b/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer_test.go index 221876a617c..c64ed3ab3e1 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer_test.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer_test.go @@ -31,10 +31,8 @@ import ( func TestWantsInternalWardleInformerFactory(t *testing.T) { cs := &fake.Clientset{} sf := informers.NewSharedInformerFactory(cs, time.Duration(1)*time.Second) - target, err := wardleinitializer.New(sf) - if err != nil { - t.Fatalf("expected to create an instance of initializer but got an error = %s", err.Error()) - } + target := wardleinitializer.New(sf) + wantWardleInformerFactory := &wantInternalWardleInformerFactory{} target.Initialize(wantWardleInformerFactory) if wantWardleInformerFactory.sf != sf { diff --git a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go index ae1e12dc75e..b375d46e609 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go @@ -24,6 +24,7 @@ import ( "github.com/spf13/cobra" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apiserver/pkg/admission" genericapiserver "k8s.io/apiserver/pkg/server" genericoptions "k8s.io/apiserver/pkg/server/options" "k8s.io/sample-apiserver/pkg/admission/plugin/banflunder" @@ -38,16 +39,15 @@ const defaultEtcdPathPrefix = "/registry/wardle.kubernetes.io" type WardleServerOptions struct { RecommendedOptions *genericoptions.RecommendedOptions - Admission *genericoptions.AdmissionOptions - StdOut io.Writer - StdErr io.Writer + SharedInformerFactory informers.SharedInformerFactory + StdOut io.Writer + StdErr io.Writer } func NewWardleServerOptions(out, errOut io.Writer) *WardleServerOptions { o := &WardleServerOptions{ RecommendedOptions: genericoptions.NewRecommendedOptions(defaultEtcdPathPrefix, apiserver.Codecs.LegacyCodec(v1alpha1.SchemeGroupVersion)), - Admission: genericoptions.NewAdmissionOptions(), StdOut: out, StdErr: errOut, @@ -79,7 +79,6 @@ func NewCommandStartWardleServer(out, errOut io.Writer, stopCh <-chan struct{}) flags := cmd.Flags() o.RecommendedOptions.AddFlags(flags) - o.Admission.AddFlags(flags) return cmd } @@ -87,7 +86,6 @@ func NewCommandStartWardleServer(out, errOut io.Writer, stopCh <-chan struct{}) func (o WardleServerOptions) Validate(args []string) error { errors := []error{} errors = append(errors, o.RecommendedOptions.Validate()...) - errors = append(errors, o.Admission.Validate()...) return utilerrors.NewAggregate(errors) } @@ -95,9 +93,9 @@ func (o *WardleServerOptions) Complete() error { return nil } -func (o WardleServerOptions) Config() (*apiserver.Config, error) { +func (o *WardleServerOptions) Config() (*apiserver.Config, error) { // register admission plugins - banflunder.Register(o.Admission.Plugins) + banflunder.Register(o.RecommendedOptions.Admission.Plugins) // TODO have a "real" external address if err := o.RecommendedOptions.SecureServing.MaybeDefaultWithSelfSignedCerts("localhost", nil, []net.IP{net.ParseIP("127.0.0.1")}); err != nil { @@ -105,21 +103,20 @@ func (o WardleServerOptions) Config() (*apiserver.Config, error) { } serverConfig := genericapiserver.NewRecommendedConfig(apiserver.Codecs) - if err := o.RecommendedOptions.ApplyTo(serverConfig); err != nil { - return nil, err + + admissionInitializerInitFunc := func() (admission.PluginInitializer, error) { + client, err := clientset.NewForConfig(serverConfig.LoopbackClientConfig) + if err != nil { + return nil, err + } + informerFactory := informers.NewSharedInformerFactory(client, serverConfig.LoopbackClientConfig.Timeout) + o.SharedInformerFactory = informerFactory + return wardleinitializer.New(informerFactory), nil } - client, err := clientset.NewForConfig(serverConfig.LoopbackClientConfig) - if err != nil { - return nil, err - } - informerFactory := informers.NewSharedInformerFactory(client, serverConfig.LoopbackClientConfig.Timeout) - admissionInitializer, err := wardleinitializer.New(informerFactory) - if err != nil { - return nil, err - } + serverConfig.ExtraAdmissionInitializersInitFunc = []genericapiserver.AdmissionInitializersInitFunc{admissionInitializerInitFunc} - if err := o.Admission.ApplyTo(&serverConfig.Config, serverConfig.SharedInformerFactory, serverConfig.ClientConfig, apiserver.Scheme, admissionInitializer); err != nil { + if err := o.RecommendedOptions.ApplyTo(serverConfig, apiserver.Scheme); err != nil { return nil, err } From 5dab6bc40a86bf3633bb1f09a048bcc0206b460f Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Fri, 1 Dec 2017 19:39:50 +0800 Subject: [PATCH 021/264] update bazel --- staging/src/k8s.io/sample-apiserver/pkg/cmd/server/BUILD | 1 + 1 file changed, 1 insertion(+) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/BUILD b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/BUILD index c969d4b0519..6e54da37f05 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/BUILD +++ b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/BUILD @@ -12,6 +12,7 @@ go_library( deps = [ "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", "//vendor/k8s.io/apiserver/pkg/server:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/options:go_default_library", "//vendor/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder:go_default_library", From d1eb8a6163c0065c23d8d04084bcd85ad5f15964 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luis=20Pab=C3=B3n?= Date: Mon, 20 Nov 2017 23:43:22 -0500 Subject: [PATCH 022/264] e2e: CSI Volume tests This e2e test tests the CSI volume plugin in kubernetes with a CSI hostPath driver. It is also setup to be able to be tested with more drivers in the future. --- test/e2e/storage/BUILD | 3 + test/e2e/storage/csi_hostpath.go | 199 +++++++++++++++++++ test/e2e/storage/csi_volumes.go | 243 ++++++++++++++++++++++++ test/e2e/storage/volume_provisioning.go | 193 ++++++++++--------- 4 files changed, 544 insertions(+), 94 deletions(-) create mode 100644 test/e2e/storage/csi_hostpath.go create mode 100644 test/e2e/storage/csi_volumes.go diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index f7c76328535..12f2f24cd06 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -8,6 +8,8 @@ load( go_library( name = "go_default_library", srcs = [ + "csi_hostpath.go", + "csi_volumes.go", "empty_dir_wrapper.go", "flexvolume.go", "pd.go", @@ -47,6 +49,7 @@ go_library( "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/policy/v1beta1:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", "//vendor/k8s.io/api/storage/v1:go_default_library", "//vendor/k8s.io/api/storage/v1beta1:go_default_library", diff --git a/test/e2e/storage/csi_hostpath.go b/test/e2e/storage/csi_hostpath.go new file mode 100644 index 00000000000..ddf38f12322 --- /dev/null +++ b/test/e2e/storage/csi_hostpath.go @@ -0,0 +1,199 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file is used to deploy the CSI hostPath plugin +// More Information: https://github.com/kubernetes-csi/drivers/tree/master/pkg/hostpath + +package storage + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" +) + +const ( + csiHostPathPluginImage string = "docker.io/k8scsi/hostpathplugin:0.1" +) + +func csiHostPathPod( + client clientset.Interface, + config framework.VolumeTestConfig, + teardown bool, + f *framework.Framework, + sa *v1.ServiceAccount, +) *v1.Pod { + podClient := client.CoreV1().Pods(config.Namespace) + + priv := true + mountPropagation := v1.MountPropagationBidirectional + hostPathType := v1.HostPathDirectoryOrCreate + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.Prefix + "-pod", + Namespace: config.Namespace, + Labels: map[string]string{ + "app": "hostpath-driver", + }, + }, + Spec: v1.PodSpec{ + ServiceAccountName: sa.GetName(), + NodeName: config.ServerNodeName, + RestartPolicy: v1.RestartPolicyNever, + Containers: []v1.Container{ + { + Name: "external-provisioner", + Image: csiExternalProvisionerImage, + ImagePullPolicy: v1.PullAlways, + Args: []string{ + "--v=5", + "--provisioner=csi-hostpath", + "--csi-address=/csi/csi.sock", + }, + VolumeMounts: []v1.VolumeMount{ + { + Name: "socket-dir", + MountPath: "/csi", + }, + }, + }, + { + Name: "driver-registrar", + Image: csiDriverRegistrarImage, + ImagePullPolicy: v1.PullAlways, + Args: []string{ + "--v=5", + "--csi-address=/csi/csi.sock", + }, + Env: []v1.EnvVar{ + { + Name: "KUBE_NODE_NAME", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + FieldPath: "spec.nodeName", + }, + }, + }, + }, + VolumeMounts: []v1.VolumeMount{ + { + Name: "socket-dir", + MountPath: "/csi", + }, + }, + }, + { + Name: "external-attacher", + Image: csiExternalAttacherImage, + ImagePullPolicy: v1.PullAlways, + Args: []string{ + "--v=5", + "--csi-address=$(ADDRESS)", + }, + Env: []v1.EnvVar{ + { + Name: "ADDRESS", + Value: "/csi/csi.sock", + }, + }, + VolumeMounts: []v1.VolumeMount{ + { + Name: "socket-dir", + MountPath: "/csi", + }, + }, + }, + { + Name: "hostpath-driver", + Image: csiHostPathPluginImage, + ImagePullPolicy: v1.PullAlways, + SecurityContext: &v1.SecurityContext{ + Privileged: &priv, + }, + Args: []string{ + "--v=5", + "--endpoint=$(CSI_ENDPOINT)", + "--nodeid=$(KUBE_NODE_NAME)", + }, + Env: []v1.EnvVar{ + { + Name: "CSI_ENDPOINT", + Value: "unix://" + "/csi/csi.sock", + }, + { + Name: "KUBE_NODE_NAME", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + FieldPath: "spec.nodeName", + }, + }, + }, + }, + VolumeMounts: []v1.VolumeMount{ + { + Name: "socket-dir", + MountPath: "/csi", + }, + { + Name: "mountpoint-dir", + MountPath: "/var/lib/kubelet/pods", + MountPropagation: &mountPropagation, + }, + }, + }, + }, + Volumes: []v1.Volume{ + { + Name: "socket-dir", + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{ + Path: "/var/lib/kubelet/plugins/csi-hostpath", + Type: &hostPathType, + }, + }, + }, + { + Name: "mountpoint-dir", + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{ + Path: "/var/lib/kubelet/pods", + Type: &hostPathType, + }, + }, + }, + }, + }, + } + + err := framework.DeletePodWithWait(f, client, pod) + framework.ExpectNoError(err, "Failed to delete pod %s/%s: %v", + pod.GetNamespace(), pod.GetName(), err) + + if teardown { + return nil + } + + ret, err := podClient.Create(pod) + if err != nil { + framework.ExpectNoError(err, "Failed to create %q pod: %v", pod.GetName(), err) + } + + // Wait for pod to come up + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, ret)) + return ret +} diff --git a/test/e2e/storage/csi_volumes.go b/test/e2e/storage/csi_volumes.go new file mode 100644 index 00000000000..3e764fad429 --- /dev/null +++ b/test/e2e/storage/csi_volumes.go @@ -0,0 +1,243 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "math/rand" + "time" + + "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" + + . "github.com/onsi/ginkgo" +) + +const ( + csiExternalAttacherImage string = "docker.io/k8scsi/csi-attacher:0.1" + csiExternalProvisionerImage string = "docker.io/k8scsi/csi-provisioner:0.1" + csiDriverRegistrarImage string = "docker.io/k8scsi/driver-registrar" +) + +func externalAttacherServiceAccount( + client clientset.Interface, + config framework.VolumeTestConfig, + teardown bool, +) *v1.ServiceAccount { + serviceAccountName := config.Prefix + "-service-account" + serviceAccountClient := client.CoreV1().ServiceAccounts(config.Namespace) + sa := &v1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceAccountName, + }, + } + + serviceAccountClient.Delete(sa.GetName(), &metav1.DeleteOptions{}) + err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) { + _, err := serviceAccountClient.Get(sa.GetName(), metav1.GetOptions{}) + return apierrs.IsNotFound(err), nil + }) + framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err) + + if teardown { + return nil + } + + ret, err := serviceAccountClient.Create(sa) + if err != nil { + framework.ExpectNoError(err, "Failed to create %s service account: %v", sa.GetName(), err) + } + + return ret +} + +func externalAttacherClusterRole( + client clientset.Interface, + config framework.VolumeTestConfig, + teardown bool, +) *rbacv1.ClusterRole { + clusterRoleClient := client.RbacV1().ClusterRoles() + role := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.Prefix + "-cluster-role", + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"persistentvolumes"}, + Verbs: []string{"create", "delete", "get", "list", "watch", "update"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"persistentvolumesclaims"}, + Verbs: []string{"get", "list", "watch", "update"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"nodes"}, + Verbs: []string{"get", "list", "watch", "update"}, + }, + { + APIGroups: []string{"storage.k8s.io"}, + Resources: []string{"volumeattachments"}, + Verbs: []string{"get", "list", "watch", "update"}, + }, + { + APIGroups: []string{"storage.k8s.io"}, + Resources: []string{"storageclasses"}, + Verbs: []string{"get", "list", "watch"}, + }, + }, + } + + clusterRoleClient.Delete(role.GetName(), &metav1.DeleteOptions{}) + err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) { + _, err := clusterRoleClient.Get(role.GetName(), metav1.GetOptions{}) + return apierrs.IsNotFound(err), nil + }) + framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err) + + if teardown { + return nil + } + + ret, err := clusterRoleClient.Create(role) + if err != nil { + framework.ExpectNoError(err, "Failed to create %s cluster role: %v", role.GetName(), err) + } + + return ret +} + +func externalAttacherClusterRoleBinding( + client clientset.Interface, + config framework.VolumeTestConfig, + teardown bool, + sa *v1.ServiceAccount, + clusterRole *rbacv1.ClusterRole, +) *rbacv1.ClusterRoleBinding { + clusterRoleBindingClient := client.RbacV1().ClusterRoleBindings() + binding := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.Prefix + "-role-binding", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: sa.GetName(), + Namespace: sa.GetNamespace(), + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: clusterRole.GetName(), + APIGroup: "rbac.authorization.k8s.io", + }, + } + + clusterRoleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{}) + err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) { + _, err := clusterRoleBindingClient.Get(binding.GetName(), metav1.GetOptions{}) + return apierrs.IsNotFound(err), nil + }) + framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err) + + if teardown { + return nil + } + + ret, err := clusterRoleBindingClient.Create(binding) + if err != nil { + framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err) + } + + return ret +} + +var _ = utils.SIGDescribe("CSI Volumes [Feature:CSI]", func() { + f := framework.NewDefaultFramework("csi-mock-plugin") + + var ( + cs clientset.Interface + ns *v1.Namespace + node v1.Node + config framework.VolumeTestConfig + suffix string + ) + + BeforeEach(func() { + cs = f.ClientSet + ns = f.Namespace + nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + node = nodes.Items[rand.Intn(len(nodes.Items))] + config = framework.VolumeTestConfig{ + Namespace: ns.Name, + Prefix: "csi", + ClientNodeName: node.Name, + ServerNodeName: node.Name, + WaitForCompletion: true, + } + suffix = ns.Name + }) + + // Create one of these for each of the drivers to be tested + // CSI hostPath driver test + Describe("Sanity CSI plugin test using hostPath CSI driver", func() { + + var ( + clusterRole *rbacv1.ClusterRole + serviceAccount *v1.ServiceAccount + ) + + BeforeEach(func() { + By("deploying csi hostpath driver") + clusterRole = externalAttacherClusterRole(cs, config, false) + serviceAccount = externalAttacherServiceAccount(cs, config, false) + externalAttacherClusterRoleBinding(cs, config, false, serviceAccount, clusterRole) + csiHostPathPod(cs, config, false, f, serviceAccount) + }) + + AfterEach(func() { + By("uninstalling csi hostpath driver") + csiHostPathPod(cs, config, true, f, serviceAccount) + externalAttacherClusterRoleBinding(cs, config, true, serviceAccount, clusterRole) + serviceAccount = externalAttacherServiceAccount(cs, config, true) + clusterRole = externalAttacherClusterRole(cs, config, true) + }) + + It("should provision storage with a hostPath CSI driver", func() { + t := storageClassTest{ + name: "csi-hostpath", + provisioner: "csi-hostpath", + parameters: map[string]string{}, + claimSize: "1Gi", + expectedSize: "1Gi", + nodeName: node.Name, + } + + claim := newClaim(t, ns.GetName(), "") + class := newStorageClass(t, ns.GetName(), "") + testDynamicProvisioning(t, cs, claim, class) + }) + }) +}) diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index e87cc902201..41107da5344 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -56,6 +56,7 @@ type storageClassTest struct { claimSize string expectedSize string pvCheck func(volume *v1.PersistentVolume) error + nodeName string } const ( @@ -139,10 +140,10 @@ func testDynamicProvisioning(t storageClassTest, client clientset.Interface, cla // Get entry, get mount options at 6th word, replace brackets with commas command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option) } - runInPodWithVolume(client, claim.Namespace, claim.Name, command) + runInPodWithVolume(client, claim.Namespace, claim.Name, t.nodeName, command) By("checking the created volume is readable and retains data") - runInPodWithVolume(client, claim.Namespace, claim.Name, "grep 'hello world' /mnt/test/data") + runInPodWithVolume(client, claim.Namespace, claim.Name, t.nodeName, "grep 'hello world' /mnt/test/data") By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name)) framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)) @@ -250,140 +251,140 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // that can be used to persist data among pods. tests := []storageClassTest{ { - "SSD PD on GCE/GKE", - []string{"gce", "gke"}, - "kubernetes.io/gce-pd", - map[string]string{ + name: "SSD PD on GCE/GKE", + cloudProviders: []string{"gce", "gke"}, + provisioner: "kubernetes.io/gce-pd", + parameters: map[string]string{ "type": "pd-ssd", "zone": cloudZone, }, - "1.5G", - "2G", - func(volume *v1.PersistentVolume) error { + claimSize: "1.5G", + expectedSize: "2G", + pvCheck: func(volume *v1.PersistentVolume) error { return checkGCEPD(volume, "pd-ssd") }, }, { - "HDD PD on GCE/GKE", - []string{"gce", "gke"}, - "kubernetes.io/gce-pd", - map[string]string{ + name: "HDD PD on GCE/GKE", + cloudProviders: []string{"gce", "gke"}, + provisioner: "kubernetes.io/gce-pd", + parameters: map[string]string{ "type": "pd-standard", }, - "1.5G", - "2G", - func(volume *v1.PersistentVolume) error { + claimSize: "1.5G", + expectedSize: "2G", + pvCheck: func(volume *v1.PersistentVolume) error { return checkGCEPD(volume, "pd-standard") }, }, // AWS { - "gp2 EBS on AWS", - []string{"aws"}, - "kubernetes.io/aws-ebs", - map[string]string{ + name: "gp2 EBS on AWS", + cloudProviders: []string{"aws"}, + provisioner: "kubernetes.io/aws-ebs", + parameters: map[string]string{ "type": "gp2", "zone": cloudZone, }, - "1.5Gi", - "2Gi", - func(volume *v1.PersistentVolume) error { + claimSize: "1.5Gi", + expectedSize: "2Gi", + pvCheck: func(volume *v1.PersistentVolume) error { return checkAWSEBS(volume, "gp2", false) }, }, { - "io1 EBS on AWS", - []string{"aws"}, - "kubernetes.io/aws-ebs", - map[string]string{ + name: "io1 EBS on AWS", + cloudProviders: []string{"aws"}, + provisioner: "kubernetes.io/aws-ebs", + parameters: map[string]string{ "type": "io1", "iopsPerGB": "50", }, - "3.5Gi", - "4Gi", // 4 GiB is minimum for io1 - func(volume *v1.PersistentVolume) error { + claimSize: "3.5Gi", + expectedSize: "4Gi", // 4 GiB is minimum for io1 + pvCheck: func(volume *v1.PersistentVolume) error { return checkAWSEBS(volume, "io1", false) }, }, { - "sc1 EBS on AWS", - []string{"aws"}, - "kubernetes.io/aws-ebs", - map[string]string{ + name: "sc1 EBS on AWS", + cloudProviders: []string{"aws"}, + provisioner: "kubernetes.io/aws-ebs", + parameters: map[string]string{ "type": "sc1", }, - "500Gi", // minimum for sc1 - "500Gi", - func(volume *v1.PersistentVolume) error { + claimSize: "500Gi", // minimum for sc1 + expectedSize: "500Gi", + pvCheck: func(volume *v1.PersistentVolume) error { return checkAWSEBS(volume, "sc1", false) }, }, { - "st1 EBS on AWS", - []string{"aws"}, - "kubernetes.io/aws-ebs", - map[string]string{ + name: "st1 EBS on AWS", + cloudProviders: []string{"aws"}, + provisioner: "kubernetes.io/aws-ebs", + parameters: map[string]string{ "type": "st1", }, - "500Gi", // minimum for st1 - "500Gi", - func(volume *v1.PersistentVolume) error { + claimSize: "500Gi", // minimum for st1 + expectedSize: "500Gi", + pvCheck: func(volume *v1.PersistentVolume) error { return checkAWSEBS(volume, "st1", false) }, }, { - "encrypted EBS on AWS", - []string{"aws"}, - "kubernetes.io/aws-ebs", - map[string]string{ + name: "encrypted EBS on AWS", + cloudProviders: []string{"aws"}, + provisioner: "kubernetes.io/aws-ebs", + parameters: map[string]string{ "encrypted": "true", }, - "1Gi", - "1Gi", - func(volume *v1.PersistentVolume) error { + claimSize: "1Gi", + expectedSize: "1Gi", + pvCheck: func(volume *v1.PersistentVolume) error { return checkAWSEBS(volume, "gp2", true) }, }, // OpenStack generic tests (works on all OpenStack deployments) { - "generic Cinder volume on OpenStack", - []string{"openstack"}, - "kubernetes.io/cinder", - map[string]string{}, - "1.5Gi", - "2Gi", - nil, // there is currently nothing to check on OpenStack + name: "generic Cinder volume on OpenStack", + cloudProviders: []string{"openstack"}, + provisioner: "kubernetes.io/cinder", + parameters: map[string]string{}, + claimSize: "1.5Gi", + expectedSize: "2Gi", + pvCheck: nil, // there is currently nothing to check on OpenStack }, { - "Cinder volume with empty volume type and zone on OpenStack", - []string{"openstack"}, - "kubernetes.io/cinder", - map[string]string{ + name: "Cinder volume with empty volume type and zone on OpenStack", + cloudProviders: []string{"openstack"}, + provisioner: "kubernetes.io/cinder", + parameters: map[string]string{ "type": "", "availability": "", }, - "1.5Gi", - "2Gi", - nil, // there is currently nothing to check on OpenStack + claimSize: "1.5Gi", + expectedSize: "2Gi", + pvCheck: nil, // there is currently nothing to check on OpenStack }, // vSphere generic test { - "generic vSphere volume", - []string{"vsphere"}, - "kubernetes.io/vsphere-volume", - map[string]string{}, - "1.5Gi", - "1.5Gi", - nil, + name: "generic vSphere volume", + cloudProviders: []string{"vsphere"}, + provisioner: "kubernetes.io/vsphere-volume", + parameters: map[string]string{}, + claimSize: "1.5Gi", + expectedSize: "1.5Gi", + pvCheck: nil, }, { - "Azure disk volume with empty sku and location", - []string{"azure"}, - "kubernetes.io/azure-disk", - map[string]string{}, - "1Gi", - "1Gi", - nil, + name: "Azure disk volume with empty sku and location", + cloudProviders: []string{"azure"}, + provisioner: "kubernetes.io/azure-disk", + parameters: map[string]string{}, + claimSize: "1Gi", + expectedSize: "1Gi", + pvCheck: nil, }, } @@ -430,15 +431,15 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { framework.SkipUnlessProviderIs("gce", "gke") test := storageClassTest{ - "HDD PD on GCE/GKE", - []string{"gce", "gke"}, - "kubernetes.io/gce-pd", - map[string]string{ + name: "HDD PD on GCE/GKE", + cloudProviders: []string{"gce", "gke"}, + provisioner: "kubernetes.io/gce-pd", + parameters: map[string]string{ "type": "pd-standard", }, - "1G", - "1G", - func(volume *v1.PersistentVolume) error { + claimSize: "1G", + expectedSize: "1G", + pvCheck: func(volume *v1.PersistentVolume) error { return checkGCEPD(volume, "pd-standard") }, } @@ -464,15 +465,15 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { framework.SkipUnlessProviderIs("gce", "gke") test := storageClassTest{ - "HDD PD on GCE/GKE", - []string{"gce", "gke"}, - "kubernetes.io/gce-pd", - map[string]string{ + name: "HDD PD on GCE/GKE", + cloudProviders: []string{"gce", "gke"}, + provisioner: "kubernetes.io/gce-pd", + parameters: map[string]string{ "type": "pd-standard", }, - "1G", - "1G", - func(volume *v1.PersistentVolume) error { + claimSize: "1G", + expectedSize: "1G", + pvCheck: func(volume *v1.PersistentVolume) error { return checkGCEPD(volume, "pd-standard") }, } @@ -791,7 +792,7 @@ func newClaim(t storageClassTest, ns, suffix string) *v1.PersistentVolumeClaim { } // runInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory. -func runInPodWithVolume(c clientset.Interface, ns, claimName, command string) { +func runInPodWithVolume(c clientset.Interface, ns, claimName, nodeName, command string) { pod := &v1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", @@ -829,6 +830,10 @@ func runInPodWithVolume(c clientset.Interface, ns, claimName, command string) { }, }, } + + if len(nodeName) != 0 { + pod.Spec.NodeName = nodeName + } pod, err := c.CoreV1().Pods(ns).Create(pod) framework.ExpectNoError(err, "Failed to create pod: %v", err) defer func() { From a0a69a35830e60ef9f87ddf3f13391c4c3e39077 Mon Sep 17 00:00:00 2001 From: Zhenguo Niu Date: Tue, 19 Dec 2017 15:37:21 +0800 Subject: [PATCH 023/264] Add more validate conditions when run kubectl get with --raw --- pkg/kubectl/cmd/resource/get.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pkg/kubectl/cmd/resource/get.go b/pkg/kubectl/cmd/resource/get.go index 4013cb42874..4e830342af4 100644 --- a/pkg/kubectl/cmd/resource/get.go +++ b/pkg/kubectl/cmd/resource/get.go @@ -25,6 +25,8 @@ import ( "github.com/golang/glog" "github.com/spf13/cobra" + "net/url" + kapierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -213,6 +215,12 @@ func (options *GetOptions) Validate(cmd *cobra.Command) error { if len(options.Raw) > 0 && (options.Watch || options.WatchOnly || len(options.LabelSelector) > 0 || options.Export) { return fmt.Errorf("--raw may not be specified with other flags that filter the server request or alter the output") } + if len(cmdutil.GetFlagString(cmd, "output")) > 0 { + return cmdutil.UsageErrorf(cmd, "--raw and --output are mutually exclusive") + } + if _, err := url.ParseRequestURI(options.Raw); err != nil { + return cmdutil.UsageErrorf(cmd, "--raw must be a valid URL path: %v", err) + } if cmdutil.GetFlagBool(cmd, "show-labels") { outputOption := cmd.Flags().Lookup("output").Value.String() if outputOption != "" && outputOption != "wide" { From ce14bdfc7cc57264e8fadd234338582788c3bb47 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Tue, 19 Dec 2017 10:47:27 +0100 Subject: [PATCH 024/264] apimachinery: remove dead code from roundtrip tester --- .../apimachinery/pkg/api/testing/roundtrip/roundtrip.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/staging/src/k8s.io/apimachinery/pkg/api/testing/roundtrip/roundtrip.go b/staging/src/k8s.io/apimachinery/pkg/api/testing/roundtrip/roundtrip.go index 479a6a50a3e..0c032b81555 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/testing/roundtrip/roundtrip.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/testing/roundtrip/roundtrip.go @@ -277,12 +277,6 @@ func roundTrip(t *testing.T, scheme *runtime.Scheme, codec runtime.Codec, object return } - // catch deepcopy errors early - if !apiequality.Semantic.DeepEqual(original, object) { - t.Errorf("%v: DeepCopy did not lead to equal object, diff: %v", name, diff.ObjectReflectDiff(original, object)) - return - } - // encode (serialize) the deep copy using the provided codec data, err := runtime.Encode(codec, object) if err != nil { From 8a7f8bc0462afb34a53542ce1deb4256e644bb0b Mon Sep 17 00:00:00 2001 From: Zhenguo Niu Date: Tue, 19 Dec 2017 17:51:30 +0800 Subject: [PATCH 025/264] Move output and url checks under raw flag condition --- pkg/kubectl/cmd/resource/get.go | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/pkg/kubectl/cmd/resource/get.go b/pkg/kubectl/cmd/resource/get.go index 4e830342af4..6e928a65445 100644 --- a/pkg/kubectl/cmd/resource/get.go +++ b/pkg/kubectl/cmd/resource/get.go @@ -212,14 +212,16 @@ func (options *GetOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args // Validate checks the set of flags provided by the user. func (options *GetOptions) Validate(cmd *cobra.Command) error { - if len(options.Raw) > 0 && (options.Watch || options.WatchOnly || len(options.LabelSelector) > 0 || options.Export) { - return fmt.Errorf("--raw may not be specified with other flags that filter the server request or alter the output") - } - if len(cmdutil.GetFlagString(cmd, "output")) > 0 { - return cmdutil.UsageErrorf(cmd, "--raw and --output are mutually exclusive") - } - if _, err := url.ParseRequestURI(options.Raw); err != nil { - return cmdutil.UsageErrorf(cmd, "--raw must be a valid URL path: %v", err) + if len(options.Raw) > 0 { + if options.Watch || options.WatchOnly || len(options.LabelSelector) > 0 || options.Export { + return fmt.Errorf("--raw may not be specified with other flags that filter the server request or alter the output") + } + if len(cmdutil.GetFlagString(cmd, "output")) > 0 { + return cmdutil.UsageErrorf(cmd, "--raw and --output are mutually exclusive") + } + if _, err := url.ParseRequestURI(options.Raw); err != nil { + return cmdutil.UsageErrorf(cmd, "--raw must be a valid URL path: %v", err) + } } if cmdutil.GetFlagBool(cmd, "show-labels") { outputOption := cmd.Flags().Lookup("output").Value.String() From b6e1fecde7f43f8a0284b56c291fd7fa08391ea1 Mon Sep 17 00:00:00 2001 From: zouyee Date: Tue, 19 Dec 2017 23:24:35 +0800 Subject: [PATCH 026/264] check function return err --- pkg/volume/cephfs/cephfs.go | 6 +++++- pkg/volume/glusterfs/glusterfs.go | 5 +++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/pkg/volume/cephfs/cephfs.go b/pkg/volume/cephfs/cephfs.go index bbe681d71b5..103766e7e22 100644 --- a/pkg/volume/cephfs/cephfs.go +++ b/pkg/volume/cephfs/cephfs.go @@ -232,7 +232,10 @@ func (cephfsVolume *cephfsMounter) SetUpAt(dir string, fsGroup *int64) error { if !notMnt { return nil } - os.MkdirAll(dir, 0750) + + if err := os.MkdirAll(dir, 0750); err != nil { + return err + } // check whether it belongs to fuse, if not, default to use kernel mount. if cephfsVolume.checkFuseMount() { @@ -253,6 +256,7 @@ func (cephfsVolume *cephfsMounter) SetUpAt(dir string, fsGroup *int64) error { } } glog.V(4).Infof("CephFS kernel mount.") + err = cephfsVolume.execMount(dir) if err != nil { // cleanup upon failure. diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index d90ea04cb73..6e3c4545fb3 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -259,8 +259,9 @@ func (b *glusterfsMounter) SetUpAt(dir string, fsGroup *int64) error { if !notMnt { return nil } - - os.MkdirAll(dir, 0750) + if err := os.MkdirAll(dir, 0750); err != nil { + return err + } err = b.setUpAtInternal(dir) if err == nil { return nil From 63e2eacd22b5050140f9630d8ec430a54361d228 Mon Sep 17 00:00:00 2001 From: Ryan Phillips Date: Tue, 19 Dec 2017 15:04:56 -0600 Subject: [PATCH 027/264] add semver metadata regex * supports v1.9.0+stable.0 --- hack/lib/version.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/lib/version.sh b/hack/lib/version.sh index 1e819b6e51a..f8cf2ea2b9c 100644 --- a/hack/lib/version.sh +++ b/hack/lib/version.sh @@ -89,7 +89,7 @@ kube::version::get_version_vars() { # Try to match the "git describe" output to a regex to try to extract # the "major" and "minor" versions and whether this is the exact tagged # version or whether the tree is between two tagged versions. - if [[ "${KUBE_GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?([-].*)?$ ]]; then + if [[ "${KUBE_GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?([-].*)?([+].*)?$ ]]; then KUBE_GIT_MAJOR=${BASH_REMATCH[1]} KUBE_GIT_MINOR=${BASH_REMATCH[2]} if [[ -n "${BASH_REMATCH[4]}" ]]; then From ffba27d72ee3fc85766dbd0971fccaa61fe4736c Mon Sep 17 00:00:00 2001 From: Josh Horwitz Date: Sat, 25 Nov 2017 13:44:39 -0500 Subject: [PATCH 028/264] Refactor service controller to common controller pattern --- pkg/controller/service/service_controller.go | 144 +++++++----------- .../service/service_controller_test.go | 76 +++------ 2 files changed, 73 insertions(+), 147 deletions(-) diff --git a/pkg/controller/service/service_controller.go b/pkg/controller/service/service_controller.go index 3496f0681ca..4854fc4f1cf 100644 --- a/pkg/controller/service/service_controller.go +++ b/pkg/controller/service/service_controller.go @@ -60,11 +60,6 @@ const ( clientRetryCount = 5 clientRetryInterval = 5 * time.Second - retryable = true - notRetryable = false - - doNotRetry = time.Duration(0) - // LabelNodeRoleMaster specifies that a node is a master // It's copied over to kubeadm until it's merged in core: https://github.com/kubernetes/kubernetes/pull/39112 LabelNodeRoleMaster = "node-role.kubernetes.io/master" @@ -77,8 +72,6 @@ const ( type cachedService struct { // The cached state of the service state *v1.Service - // Controls error back-off - lastRetryDelay time.Duration } type serviceCache struct { @@ -86,6 +79,8 @@ type serviceCache struct { serviceMap map[string]*cachedService } +// ServiceController keeps cloud provider service resources +// (like load balancers) in sync with the registry. type ServiceController struct { cloud cloudprovider.Interface knownHosts []*v1.Node @@ -101,7 +96,7 @@ type ServiceController struct { nodeLister corelisters.NodeLister nodeListerSynced cache.InformerSynced // services that need to be synced - workingQueue workqueue.DelayingInterface + queue workqueue.RateLimitingInterface } // New returns a new service controller to keep cloud provider service resources @@ -134,7 +129,7 @@ func New( eventRecorder: recorder, nodeLister: nodeInformer.Lister(), nodeListerSynced: nodeInformer.Informer().HasSynced, - workingQueue: workqueue.NewNamedDelayingQueue("service"), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "service"), } serviceInformer.Informer().AddEventHandlerWithResyncPeriod( @@ -167,7 +162,7 @@ func (s *ServiceController) enqueueService(obj interface{}) { glog.Errorf("Couldn't get key for object %#v: %v", obj, err) return } - s.workingQueue.Add(key) + s.queue.Add(key) } // Run starts a background goroutine that watches for changes to services that @@ -182,7 +177,7 @@ func (s *ServiceController) enqueueService(obj interface{}) { // object. func (s *ServiceController) Run(stopCh <-chan struct{}, workers int) { defer runtime.HandleCrash() - defer s.workingQueue.ShutDown() + defer s.queue.ShutDown() glog.Info("Starting service controller") defer glog.Info("Shutting down service controller") @@ -203,21 +198,28 @@ func (s *ServiceController) Run(stopCh <-chan struct{}, workers int) { // worker runs a worker thread that just dequeues items, processes them, and marks them done. // It enforces that the syncHandler is never invoked concurrently with the same key. func (s *ServiceController) worker() { - for { - func() { - key, quit := s.workingQueue.Get() - if quit { - return - } - defer s.workingQueue.Done(key) - err := s.syncService(key.(string)) - if err != nil { - glog.Errorf("Error syncing service: %v", err) - } - }() + for s.processNextWorkItem() { } } +func (s *ServiceController) processNextWorkItem() bool { + key, quit := s.queue.Get() + if quit { + return false + } + defer s.queue.Done(key) + + err := s.syncService(key.(string)) + if err == nil { + s.queue.Forget(key) + return true + } + + runtime.HandleError(fmt.Errorf("error processing service %v (will retry): %v", key, err)) + s.queue.AddRateLimited(key) + return true +} + func (s *ServiceController) init() error { if s.cloud == nil { return fmt.Errorf("WARNING: no cloud provider provided, services of type LoadBalancer will fail") @@ -235,31 +237,21 @@ func (s *ServiceController) init() error { // Returns an error if processing the service update failed, along with a time.Duration // indicating whether processing should be retried; zero means no-retry; otherwise // we should retry in that Duration. -func (s *ServiceController) processServiceUpdate(cachedService *cachedService, service *v1.Service, key string) (error, time.Duration) { +func (s *ServiceController) processServiceUpdate(cachedService *cachedService, service *v1.Service, key string) error { if cachedService.state != nil { if cachedService.state.UID != service.UID { - err, retry := s.processLoadBalancerDelete(cachedService, key) + err := s.processLoadBalancerDelete(cachedService, key) if err != nil { - return err, retry + return err } } } // cache the service, we need the info for service deletion cachedService.state = service - err, retry := s.createLoadBalancerIfNeeded(key, service) + err := s.createLoadBalancerIfNeeded(key, service) if err != nil { - message := "Error creating load balancer" - var retryToReturn time.Duration - if retry { - message += " (will retry): " - retryToReturn = cachedService.nextRetryDelay() - } else { - message += " (will not retry): " - retryToReturn = doNotRetry - } - message += err.Error() - s.eventRecorder.Event(service, v1.EventTypeWarning, "CreatingLoadBalancerFailed", message) - return err, retryToReturn + s.eventRecorder.Eventf(service, v1.EventTypeWarning, "CreatingLoadBalancerFailed", "Error creating load balancer (will retry): %v", err) + return err } // Always update the cache upon success. // NOTE: Since we update the cached service if and only if we successfully @@ -267,13 +259,12 @@ func (s *ServiceController) processServiceUpdate(cachedService *cachedService, s // been successfully processed. s.cache.set(key, cachedService) - cachedService.resetRetryDelay() - return nil, doNotRetry + return nil } // Returns whatever error occurred along with a boolean indicator of whether it // should be retried. -func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *v1.Service) (error, bool) { +func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *v1.Service) error { // Note: It is safe to just call EnsureLoadBalancer. But, on some clouds that requires a delete & create, // which may involve service interruption. Also, we would like user-friendly events. @@ -285,13 +276,13 @@ func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *v1.S if !wantsLoadBalancer(service) { _, exists, err := s.balancer.GetLoadBalancer(s.clusterName, service) if err != nil { - return fmt.Errorf("error getting LB for service %s: %v", key, err), retryable + return fmt.Errorf("error getting LB for service %s: %v", key, err) } if exists { glog.Infof("Deleting existing load balancer for service %s that no longer needs a load balancer.", key) s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer") if err := s.balancer.EnsureLoadBalancerDeleted(s.clusterName, service); err != nil { - return err, retryable + return err } s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer") } @@ -305,7 +296,7 @@ func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *v1.S s.eventRecorder.Event(service, v1.EventTypeNormal, "EnsuringLoadBalancer", "Ensuring load balancer") newState, err = s.ensureLoadBalancer(service) if err != nil { - return fmt.Errorf("failed to ensure load balancer for service %s: %v", key, err), retryable + return fmt.Errorf("failed to ensure load balancer for service %s: %v", key, err) } s.eventRecorder.Event(service, v1.EventTypeNormal, "EnsuredLoadBalancer", "Ensured load balancer") } @@ -320,13 +311,14 @@ func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *v1.S service.Status.LoadBalancer = *newState if err := s.persistUpdate(service); err != nil { - return fmt.Errorf("failed to persist updated status to apiserver, even after retries. Giving up: %v", err), notRetryable + runtime.HandleError(fmt.Errorf("failed to persist service %q updated status to apiserver, even after retries. Giving up: %v", key, err)) + return nil } } else { glog.V(2).Infof("Not persisting unchanged LoadBalancerStatus for service %s to registry.", key) } - return nil, notRetryable + return nil } func (s *ServiceController) persistUpdate(service *v1.Service) error { @@ -721,31 +713,12 @@ func loadBalancerIPsAreEqual(oldService, newService *v1.Service) bool { return oldService.Spec.LoadBalancerIP == newService.Spec.LoadBalancerIP } -// Computes the next retry, using exponential backoff -// mutex must be held. -func (s *cachedService) nextRetryDelay() time.Duration { - s.lastRetryDelay = s.lastRetryDelay * 2 - if s.lastRetryDelay < minRetryDelay { - s.lastRetryDelay = minRetryDelay - } - if s.lastRetryDelay > maxRetryDelay { - s.lastRetryDelay = maxRetryDelay - } - return s.lastRetryDelay -} - -// Resets the retry exponential backoff. mutex must be held. -func (s *cachedService) resetRetryDelay() { - s.lastRetryDelay = time.Duration(0) -} - // syncService will sync the Service with the given key if it has had its expectations fulfilled, // meaning it did not expect to see any more of its pods created or deleted. This function is not meant to be // invoked concurrently with the same key. func (s *ServiceController) syncService(key string) error { startTime := time.Now() var cachedService *cachedService - var retryDelay time.Duration defer func() { glog.V(4).Infof("Finished syncing service %q (%v)", key, time.Since(startTime)) }() @@ -760,59 +733,44 @@ func (s *ServiceController) syncService(key string) error { switch { case errors.IsNotFound(err): // service absence in store means watcher caught the deletion, ensure LB info is cleaned - glog.Infof("Service has been deleted %v", key) - err, retryDelay = s.processServiceDeletion(key) + glog.Infof("Service has been deleted %v. Attempting to cleanup load balancer resources", key) + err = s.processServiceDeletion(key) case err != nil: glog.Infof("Unable to retrieve service %v from store: %v", key, err) - s.workingQueue.Add(key) - return err default: cachedService = s.cache.getOrCreate(key) - err, retryDelay = s.processServiceUpdate(cachedService, service, key) + err = s.processServiceUpdate(cachedService, service, key) } - if retryDelay != 0 { - // Add the failed service back to the queue so we'll retry it. - glog.Errorf("Failed to process service %v. Retrying in %s: %v", key, retryDelay, err) - go func(obj interface{}, delay time.Duration) { - // put back the service key to working queue, it is possible that more entries of the service - // were added into the queue during the delay, but it does not mess as when handling the retry, - // it always get the last service info from service store - s.workingQueue.AddAfter(obj, delay) - }(key, retryDelay) - } else if err != nil { - runtime.HandleError(fmt.Errorf("failed to process service %v. Not retrying: %v", key, err)) - } - return nil + return err } // Returns an error if processing the service deletion failed, along with a time.Duration // indicating whether processing should be retried; zero means no-retry; otherwise // we should retry after that Duration. -func (s *ServiceController) processServiceDeletion(key string) (error, time.Duration) { +func (s *ServiceController) processServiceDeletion(key string) error { cachedService, ok := s.cache.get(key) if !ok { - return fmt.Errorf("service %s not in cache even though the watcher thought it was. Ignoring the deletion", key), doNotRetry + glog.Errorf("service %s not in cache even though the watcher thought it was. Ignoring the deletion", key) + return nil } return s.processLoadBalancerDelete(cachedService, key) } -func (s *ServiceController) processLoadBalancerDelete(cachedService *cachedService, key string) (error, time.Duration) { +func (s *ServiceController) processLoadBalancerDelete(cachedService *cachedService, key string) error { service := cachedService.state // delete load balancer info only if the service type is LoadBalancer if !wantsLoadBalancer(service) { - return nil, doNotRetry + return nil } s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer") err := s.balancer.EnsureLoadBalancerDeleted(s.clusterName, service) if err != nil { - message := "Error deleting load balancer (will retry): " + err.Error() - s.eventRecorder.Event(service, v1.EventTypeWarning, "DeletingLoadBalancerFailed", message) - return err, cachedService.nextRetryDelay() + s.eventRecorder.Eventf(service, v1.EventTypeWarning, "DeletingLoadBalancerFailed", "Error deleting load balancer (will retry): %v", err) + return err } s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer") s.cache.delete(key) - cachedService.resetRetryDelay() - return nil, doNotRetry + return nil } diff --git a/pkg/controller/service/service_controller_test.go b/pkg/controller/service/service_controller_test.go index 0c4990adb1a..0241cb1d22d 100644 --- a/pkg/controller/service/service_controller_test.go +++ b/pkg/controller/service/service_controller_test.go @@ -20,7 +20,6 @@ import ( "fmt" "reflect" "testing" - "time" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -129,7 +128,7 @@ func TestCreateExternalLoadBalancer(t *testing.T) { for _, item := range table { controller, cloud, client := newController() - err, _ := controller.createLoadBalancerIfNeeded("foo/bar", item.service) + err := controller.createLoadBalancerIfNeeded("foo/bar", item.service) if !item.expectErr && err != nil { t.Errorf("unexpected error: %v", err) } else if item.expectErr && err == nil { @@ -320,7 +319,7 @@ func TestProcessServiceUpdate(t *testing.T) { key string updateFn func(*v1.Service) *v1.Service //Manipulate the structure svc *v1.Service - expectedFn func(*v1.Service, error, time.Duration) error //Error comparision function + expectedFn func(*v1.Service, error) error //Error comparision function }{ { testName: "If updating a valid service", @@ -333,15 +332,8 @@ func TestProcessServiceUpdate(t *testing.T) { return svc }, - expectedFn: func(svc *v1.Service, err error, retryDuration time.Duration) error { - - if err != nil { - return err - } - if retryDuration != doNotRetry { - return fmt.Errorf("retryDuration Expected=%v Obtained=%v", doNotRetry, retryDuration) - } - return nil + expectedFn: func(svc *v1.Service, err error) error { + return err }, }, { @@ -358,9 +350,9 @@ func TestProcessServiceUpdate(t *testing.T) { cachedServiceTest.state = svc controller.cache.set(keyExpected, cachedServiceTest) - keyGot, quit := controller.workingQueue.Get() + keyGot, quit := controller.queue.Get() if quit { - t.Fatalf("get no workingQueue element") + t.Fatalf("get no queue element") } if keyExpected != keyGot.(string) { t.Fatalf("get service key error, expected: %s, got: %s", keyExpected, keyGot.(string)) @@ -372,20 +364,17 @@ func TestProcessServiceUpdate(t *testing.T) { return newService }, - expectedFn: func(svc *v1.Service, err error, retryDuration time.Duration) error { + expectedFn: func(svc *v1.Service, err error) error { if err != nil { return err } - if retryDuration != doNotRetry { - return fmt.Errorf("retryDuration Expected=%v Obtained=%v", doNotRetry, retryDuration) - } keyExpected := svc.GetObjectMeta().GetNamespace() + "/" + svc.GetObjectMeta().GetName() cachedServiceGot, exist := controller.cache.get(keyExpected) if !exist { - return fmt.Errorf("update service error, workingQueue should contain service: %s", keyExpected) + return fmt.Errorf("update service error, queue should contain service: %s", keyExpected) } if cachedServiceGot.state.Spec.LoadBalancerIP != newLBIP { return fmt.Errorf("update LoadBalancerIP error, expected: %s, got: %s", newLBIP, cachedServiceGot.state.Spec.LoadBalancerIP) @@ -398,8 +387,8 @@ func TestProcessServiceUpdate(t *testing.T) { for _, tc := range testCases { newSvc := tc.updateFn(tc.svc) svcCache := controller.cache.getOrCreate(tc.key) - obtErr, retryDuration := controller.processServiceUpdate(svcCache, newSvc, tc.key) - if err := tc.expectedFn(newSvc, obtErr, retryDuration); err != nil { + obtErr := controller.processServiceUpdate(svcCache, newSvc, tc.key) + if err := tc.expectedFn(newSvc, obtErr); err != nil { t.Errorf("%v processServiceUpdate() %v", tc.testName, err) } } @@ -491,33 +480,21 @@ func TestProcessServiceDeletion(t *testing.T) { var controller *ServiceController var cloud *fakecloud.FakeCloud - //Add a global svcKey name + // Add a global svcKey name svcKey := "external-balancer" testCases := []struct { testName string - updateFn func(*ServiceController) //Update function used to manupulate srv and controller values - expectedFn func(svcErr error, retryDuration time.Duration) error //Function to check if the returned value is expected + updateFn func(*ServiceController) // Update function used to manupulate srv and controller values + expectedFn func(svcErr error) error // Function to check if the returned value is expected }{ { testName: "If an non-existant service is deleted", updateFn: func(controller *ServiceController) { - //Does not do anything + // Does not do anything }, - expectedFn: func(svcErr error, retryDuration time.Duration) error { - - expectedError := "service external-balancer not in cache even though the watcher thought it was. Ignoring the deletion" - if svcErr == nil || svcErr.Error() != expectedError { - //cannot be nil or Wrong error message - return fmt.Errorf("Expected=%v Obtained=%v", expectedError, svcErr) - } - - if retryDuration != doNotRetry { - //Retry duration should match - return fmt.Errorf("RetryDuration Expected=%v Obtained=%v", doNotRetry, retryDuration) - } - - return nil + expectedFn: func(svcErr error) error { + return svcErr }, }, { @@ -529,7 +506,7 @@ func TestProcessServiceDeletion(t *testing.T) { cloud.Err = fmt.Errorf("Error Deleting the Loadbalancer") }, - expectedFn: func(svcErr error, retryDuration time.Duration) error { + expectedFn: func(svcErr error) error { expectedError := "Error Deleting the Loadbalancer" @@ -537,9 +514,6 @@ func TestProcessServiceDeletion(t *testing.T) { return fmt.Errorf("Expected=%v Obtained=%v", expectedError, svcErr) } - if retryDuration != minRetryDelay { - return fmt.Errorf("RetryDuration Expected=%v Obtained=%v", minRetryDelay, retryDuration) - } return nil }, }, @@ -554,21 +528,15 @@ func TestProcessServiceDeletion(t *testing.T) { controller.cache.set(svcKey, svc) }, - expectedFn: func(svcErr error, retryDuration time.Duration) error { - + expectedFn: func(svcErr error) error { if svcErr != nil { return fmt.Errorf("Expected=nil Obtained=%v", svcErr) } - if retryDuration != doNotRetry { - //Retry duration should match - return fmt.Errorf("RetryDuration Expected=%v Obtained=%v", doNotRetry, retryDuration) - } - - //It should no longer be in the workqueue. + // It should no longer be in the workqueue. _, exist := controller.cache.get(svcKey) if exist { - return fmt.Errorf("delete service error, workingQueue should not contain service: %s any more", svcKey) + return fmt.Errorf("delete service error, queue should not contain service: %s any more", svcKey) } return nil @@ -580,8 +548,8 @@ func TestProcessServiceDeletion(t *testing.T) { //Create a new controller. controller, cloud, _ = newController() tc.updateFn(controller) - obtainedErr, retryDuration := controller.processServiceDeletion(svcKey) - if err := tc.expectedFn(obtainedErr, retryDuration); err != nil { + obtainedErr := controller.processServiceDeletion(svcKey) + if err := tc.expectedFn(obtainedErr); err != nil { t.Errorf("%v processServiceDeletion() %v", tc.testName, err) } } From 5800c32fb67849c44a404f371dd70e1c9122d31e Mon Sep 17 00:00:00 2001 From: Cao Shufeng Date: Sat, 23 Dec 2017 17:17:35 +0800 Subject: [PATCH 029/264] [quota controller] remove extra queue.Add() requeue immediately after an error may end-up with hot-loop --- pkg/controller/resourcequota/resource_quota_controller.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/controller/resourcequota/resource_quota_controller.go b/pkg/controller/resourcequota/resource_quota_controller.go index b2ae6d1f6e2..ccd43de8ab0 100644 --- a/pkg/controller/resourcequota/resource_quota_controller.go +++ b/pkg/controller/resourcequota/resource_quota_controller.go @@ -308,7 +308,6 @@ func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err err } if err != nil { glog.Infof("Unable to retrieve resource quota %v from store: %v", key, err) - rq.queue.Add(key) return err } return rq.syncResourceQuota(quota) From 4dcc92e472a365814e676260c0deaeedc5dca270 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Sat, 23 Dec 2017 17:56:21 +0800 Subject: [PATCH 030/264] remove dead code in pkg/api --- pkg/api/endpoints/util.go | 8 -------- pkg/api/v1/endpoints/util.go | 8 -------- 2 files changed, 16 deletions(-) diff --git a/pkg/api/endpoints/util.go b/pkg/api/endpoints/util.go index 3d7b6e514f6..8fa72b56819 100644 --- a/pkg/api/endpoints/util.go +++ b/pkg/api/endpoints/util.go @@ -168,14 +168,6 @@ func LessEndpointAddress(a, b *api.EndpointAddress) bool { return a.TargetRef.UID < b.TargetRef.UID } -type addrPtrsByIpAndUID []*api.EndpointAddress - -func (sl addrPtrsByIpAndUID) Len() int { return len(sl) } -func (sl addrPtrsByIpAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } -func (sl addrPtrsByIpAndUID) Less(i, j int) bool { - return LessEndpointAddress(sl[i], sl[j]) -} - // SortSubsets sorts an array of EndpointSubset objects in place. For ease of // use it returns the input slice. func SortSubsets(subsets []api.EndpointSubset) []api.EndpointSubset { diff --git a/pkg/api/v1/endpoints/util.go b/pkg/api/v1/endpoints/util.go index 89b8d9e16ae..833af440c32 100644 --- a/pkg/api/v1/endpoints/util.go +++ b/pkg/api/v1/endpoints/util.go @@ -169,14 +169,6 @@ func LessEndpointAddress(a, b *v1.EndpointAddress) bool { return a.TargetRef.UID < b.TargetRef.UID } -type addrPtrsByIpAndUID []*v1.EndpointAddress - -func (sl addrPtrsByIpAndUID) Len() int { return len(sl) } -func (sl addrPtrsByIpAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } -func (sl addrPtrsByIpAndUID) Less(i, j int) bool { - return LessEndpointAddress(sl[i], sl[j]) -} - // SortSubsets sorts an array of EndpointSubset objects in place. For ease of // use it returns the input slice. func SortSubsets(subsets []v1.EndpointSubset) []v1.EndpointSubset { From ddf97084f5a7259656084094a2ffb81a4f58d869 Mon Sep 17 00:00:00 2001 From: Di Xu Date: Mon, 20 Nov 2017 12:40:51 +0800 Subject: [PATCH 031/264] update vendor spf13/cobra to enforce required flags --- Godeps/Godeps.json | 4 +- .../Godeps/Godeps.json | 2 +- .../k8s.io/kube-aggregator/Godeps/Godeps.json | 2 +- .../sample-apiserver/Godeps/Godeps.json | 2 +- vendor/github.com/spf13/cobra/.travis.yml | 13 +- vendor/github.com/spf13/cobra/BUILD | 2 + vendor/github.com/spf13/cobra/README.md | 851 +++++++--------- vendor/github.com/spf13/cobra/args.go | 89 ++ .../spf13/cobra/bash_completions.go | 315 +++--- .../spf13/cobra/bash_completions.md | 4 +- vendor/github.com/spf13/cobra/cobra.go | 74 +- vendor/github.com/spf13/cobra/command.go | 936 ++++++++++-------- vendor/github.com/spf13/cobra/command_win.go | 8 +- vendor/github.com/spf13/cobra/doc/BUILD | 3 + vendor/github.com/spf13/cobra/doc/man_docs.go | 75 +- vendor/github.com/spf13/cobra/doc/man_docs.md | 11 +- vendor/github.com/spf13/cobra/doc/md_docs.go | 94 +- vendor/github.com/spf13/cobra/doc/md_docs.md | 23 +- .../github.com/spf13/cobra/doc/rest_docs.go | 185 ++++ .../github.com/spf13/cobra/doc/rest_docs.md | 114 +++ vendor/github.com/spf13/cobra/doc/util.go | 17 +- .../github.com/spf13/cobra/doc/yaml_docs.go | 169 ++++ .../github.com/spf13/cobra/doc/yaml_docs.md | 112 +++ .../github.com/spf13/cobra/zsh_completions.go | 126 +++ 24 files changed, 1977 insertions(+), 1254 deletions(-) create mode 100644 vendor/github.com/spf13/cobra/args.go create mode 100644 vendor/github.com/spf13/cobra/doc/rest_docs.go create mode 100644 vendor/github.com/spf13/cobra/doc/rest_docs.md create mode 100644 vendor/github.com/spf13/cobra/doc/yaml_docs.go create mode 100644 vendor/github.com/spf13/cobra/doc/yaml_docs.md create mode 100644 vendor/github.com/spf13/cobra/zsh_completions.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 691bd76f46e..592545de505 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -2470,11 +2470,11 @@ }, { "ImportPath": "github.com/spf13/cobra", - "Rev": "f62e98d28ab7ad31d707ba837a966378465c7b57" + "Rev": "19e54c4a2b8a78c9d54b2bed61b1a6c5e1bfcf6f" }, { "ImportPath": "github.com/spf13/cobra/doc", - "Rev": "f62e98d28ab7ad31d707ba837a966378465c7b57" + "Rev": "19e54c4a2b8a78c9d54b2bed61b1a6c5e1bfcf6f" }, { "ImportPath": "github.com/spf13/jwalterweatherman", diff --git a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json index 08607c1ddcf..f792000d584 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json @@ -328,7 +328,7 @@ }, { "ImportPath": "github.com/spf13/cobra", - "Rev": "f62e98d28ab7ad31d707ba837a966378465c7b57" + "Rev": "19e54c4a2b8a78c9d54b2bed61b1a6c5e1bfcf6f" }, { "ImportPath": "github.com/spf13/pflag", diff --git a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json index ec78f699e49..32d1debb00a 100644 --- a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json +++ b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json @@ -308,7 +308,7 @@ }, { "ImportPath": "github.com/spf13/cobra", - "Rev": "f62e98d28ab7ad31d707ba837a966378465c7b57" + "Rev": "19e54c4a2b8a78c9d54b2bed61b1a6c5e1bfcf6f" }, { "ImportPath": "github.com/spf13/pflag", diff --git a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json index 3281bf2fb31..a8c8d2a5cad 100644 --- a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json @@ -296,7 +296,7 @@ }, { "ImportPath": "github.com/spf13/cobra", - "Rev": "f62e98d28ab7ad31d707ba837a966378465c7b57" + "Rev": "19e54c4a2b8a78c9d54b2bed61b1a6c5e1bfcf6f" }, { "ImportPath": "github.com/spf13/pflag", diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml index 6e84be54d16..68efa136331 100644 --- a/vendor/github.com/spf13/cobra/.travis.yml +++ b/vendor/github.com/spf13/cobra/.travis.yml @@ -1,11 +1,10 @@ language: go -go: - - 1.4.3 - - 1.5.4 - - 1.6.3 - - tip matrix: + include: + - go: 1.7.6 + - go: 1.8.3 + - go: tip allow_failures: - go: tip @@ -16,3 +15,7 @@ before_install: script: - PATH=$PATH:$PWD/bin go test -v ./... - go build + - diff -u <(echo -n) <(gofmt -d -s .) + - if [ -z $NOVET ]; then + diff -u <(echo -n) <(go tool vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint'); + fi diff --git a/vendor/github.com/spf13/cobra/BUILD b/vendor/github.com/spf13/cobra/BUILD index 5c16bacbce2..158c8f10692 100644 --- a/vendor/github.com/spf13/cobra/BUILD +++ b/vendor/github.com/spf13/cobra/BUILD @@ -3,9 +3,11 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ + "args.go", "bash_completions.go", "cobra.go", "command.go", + "zsh_completions.go", ] + select({ "@io_bazel_rules_go//go/platform:android": [ "command_notwin.go", diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index b338a0e4424..d7279e4f9f9 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -8,6 +8,7 @@ Many of the most widely used Go projects are built using Cobra including: * [Hugo](http://gohugo.io) * [rkt](https://github.com/coreos/rkt) * [etcd](https://github.com/coreos/etcd) +* [Moby (former Docker)](https://github.com/moby/moby) * [Docker (distribution)](https://github.com/docker/distribution) * [OpenShift](https://www.openshift.com/) * [Delve](https://github.com/derekparker/delve) @@ -15,16 +16,36 @@ Many of the most widely used Go projects are built using Cobra including: * [CockroachDB](http://www.cockroachlabs.com/) * [Bleve](http://www.blevesearch.com/) * [ProjectAtomic (enterprise)](http://www.projectatomic.io/) -* [Parse (CLI)](https://parse.com/) * [GiantSwarm's swarm](https://github.com/giantswarm/cli) * [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) - +* [rclone](http://rclone.org/) +* [nehm](https://github.com/bogem/nehm) [![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra) [![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra) -[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra) +[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra) -![cobra](https://cloud.githubusercontent.com/assets/173412/10911369/84832a8e-8212-11e5-9f82-cc96660a4794.gif) +# Table of Contents + +- [Overview](#overview) +- [Concepts](#concepts) + * [Commands](#commands) + * [Flags](#flags) +- [Installing](#installing) +- [Getting Started](#getting-started) + * [Using the Cobra Generator](#using-the-cobra-generator) + * [Using the Cobra Library](#using-the-cobra-library) + * [Working with Flags](#working-with-flags) + * [Positional and Custom Arguments](#positional-and-custom-arguments) + * [Example](#example) + * [Help Command](#help-command) + * [Usage Message](#usage-message) + * [PreRun and PostRun Hooks](#prerun-and-postrun-hooks) + * [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens) + * [Generating documentation for your command](#generating-documentation-for-your-command) + * [Generating bash completions](#generating-bash-completions) +- [Contributing](#contributing) +- [License](#license) # Overview @@ -39,27 +60,16 @@ Cobra provides: * Fully POSIX-compliant flags (including short & long versions) * Nested subcommands * Global, local and cascading flags -* Easy generation of applications & commands with `cobra create appname` & `cobra add cmdname` +* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname` * Intelligent suggestions (`app srver`... did you mean `app server`?) * Automatic help generation for commands and flags -* Automatic detailed help for `app help [command]` * Automatic help flag recognition of `-h`, `--help`, etc. * Automatically generated bash autocomplete for your application * Automatically generated man pages for your application * Command aliases so you can change things without breaking them -* The flexibilty to define your own help, usage, etc. +* The flexibility to define your own help, usage, etc. * Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps -Cobra has an exceptionally clean interface and simple design without needless -constructors or initialization methods. - -Applications built with Cobra commands are designed to be as user-friendly as -possible. Flags can be placed before or after the command (as long as a -confusing space isn’t provided). Both short and long flags can be used. A -command need not even be fully typed. Help is automatically generated and -available for the application or for a specific command using either the help -command or the `--help` flag. - # Concepts Cobra is built on a structure of commands, arguments & flags. @@ -78,11 +88,11 @@ A few good real world examples may better illustrate this point. In the following example, 'server' is a command, and 'port' is a flag: - > hugo server --port=1313 + hugo server --port=1313 In this command we are telling Git to clone the url bare. - > git clone URL --bare + git clone URL --bare ## Commands @@ -92,20 +102,11 @@ have children commands and optionally run an action. In the example above, 'server' is the command. -A Command has the following structure: - -```go -type Command struct { - Use string // The one-line usage message. - Short string // The short description shown in the 'help' output. - Long string // The long message shown in the 'help ' output. - Run func(cmd *Command, args []string) // Run runs the command. -} -``` +[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command) ## Flags -A Flag is a way to modify the behavior of a command. Cobra supports +A flag is a way to modify the behavior of a command. Cobra supports fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/). A Cobra command can define flags that persist through to children commands and flags that are only available to that command. @@ -113,23 +114,15 @@ and flags that are only available to that command. In the example above, 'port' is the flag. Flag functionality is provided by the [pflag -library](https://github.com/ogier/pflag), a fork of the flag standard library +library](https://github.com/spf13/pflag), a fork of the flag standard library which maintains the same interface while adding POSIX compliance. -## Usage - -Cobra works by creating a set of commands and then organizing them into a tree. -The tree defines the structure of the application. - -Once each command is defined with its corresponding flags, then the -tree is assigned to the commander which is finally executed. - # Installing Using Cobra is easy. First, use `go get` to install the latest version -of the library. This command will install the `cobra` generator executible -along with the library: +of the library. This command will install the `cobra` generator executable +along with the library and its dependencies: - > go get -v github.com/spf13/cobra/cobra + go get -u github.com/spf13/cobra/cobra Next, include Cobra in your application: @@ -139,8 +132,8 @@ import "github.com/spf13/cobra" # Getting Started -While you are welcome to provide your own organization, typically a Cobra based -application will follow the following organizational structure. +While you are welcome to provide your own organization, typically a Cobra-based +application will follow the following organizational structure: ``` ▾ appName/ @@ -152,18 +145,23 @@ application will follow the following organizational structure. main.go ``` -In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. +In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra. ```go package main -import "{pathToYourApp}/cmd" +import ( + "fmt" + "os" + + "{pathToYourApp}/cmd" +) func main() { - if err := cmd.RootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(-1) - } + if err := cmd.RootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } } ``` @@ -172,99 +170,14 @@ func main() { Cobra provides its own program that will create your application and add any commands you want. It's the easiest way to incorporate Cobra into your application. -In order to use the cobra command, compile it using the following command: +[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it. - > go install github.com/spf13/cobra/cobra +## Using the Cobra Library -This will create the cobra executable under your go path bin directory! - -### cobra init - -The `cobra init [yourApp]` command will create your initial application code -for you. It is a very powerful application that will populate your program with -the right structure so you can immediately enjoy all the benefits of Cobra. It -will also automatically apply the license you specify to your application. - -Cobra init is pretty smart. You can provide it a full path, or simply a path -similar to what is expected in the import. - -``` -cobra init github.com/spf13/newAppName -``` - -### cobra add - -Once an application is initialized Cobra can create additional commands for you. -Let's say you created an app and you wanted the following commands for it: - -* app serve -* app config -* app config create - -In your project directory (where your main.go file is) you would run the following: - -``` -cobra add serve -cobra add config -cobra add create -p 'configCmd' -``` - -Once you have run these three commands you would have an app structure that would look like: - -``` - ▾ app/ - ▾ cmd/ - serve.go - config.go - create.go - main.go -``` - -at this point you can run `go run main.go` and it would run your app. `go run -main.go serve`, `go run main.go config`, `go run main.go config create` along -with `go run main.go help serve`, etc would all work. - -Obviously you haven't added your own code to these yet, the commands are ready -for you to give them their tasks. Have fun. - -### Configuring the cobra generator - -The cobra generator will be easier to use if you provide a simple configuration -file which will help you eliminate providing a bunch of repeated information in -flags over and over. - -An example ~/.cobra.yaml file: - -```yaml -author: Steve Francia -license: MIT -``` - -You can specify no license by setting `license` to `none` or you can specify -a custom license: - -```yaml -license: - header: This file is part of {{ .appName }}. - text: | - {{ .copyright }} - - This is my license. There are many like it, but this one is mine. - My license is my best friend. It is my life. I must master it as I must - master my life. -``` - -## Manually implementing Cobra - -To manually implement cobra you need to create a bare main.go file and a RootCmd file. +To manually implement Cobra you need to create a bare main.go file and a RootCmd file. You will optionally provide additional commands as you see fit. -### Create the root command - -The root command represents your binary itself. - - -#### Manually create rootCmd +### Create rootCmd Cobra doesn't require any special constructors. Simply create your commands. @@ -272,34 +185,67 @@ Ideally you place this in app/cmd/root.go: ```go var RootCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with + Use: "hugo", + Short: "Hugo is a very fast static site generator", + Long: `A Fast and Flexible Static Site Generator built with love by spf13 and friends in Go. Complete documentation is available at http://hugo.spf13.com`, - Run: func(cmd *cobra.Command, args []string) { - // Do Stuff Here - }, + Run: func(cmd *cobra.Command, args []string) { + // Do Stuff Here + }, } ``` You will additionally define flags and handle configuration in your init() function. -for example cmd/root.go: +For example cmd/root.go: ```go +import ( + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + func init() { - cobra.OnInitialize(initConfig) - RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") - RootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/") - RootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution") - RootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)") - RootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration") - viper.BindPFlag("author", RootCmd.PersistentFlags().Lookup("author")) - viper.BindPFlag("projectbase", RootCmd.PersistentFlags().Lookup("projectbase")) - viper.BindPFlag("useViper", RootCmd.PersistentFlags().Lookup("viper")) - viper.SetDefault("author", "NAME HERE ") - viper.SetDefault("license", "apache") + cobra.OnInitialize(initConfig) + RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") + RootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/") + RootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution") + RootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)") + RootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration") + viper.BindPFlag("author", RootCmd.PersistentFlags().Lookup("author")) + viper.BindPFlag("projectbase", RootCmd.PersistentFlags().Lookup("projectbase")) + viper.BindPFlag("useViper", RootCmd.PersistentFlags().Lookup("viper")) + viper.SetDefault("author", "NAME HERE ") + viper.SetDefault("license", "apache") +} + +func initConfig() { + // Don't forget to read config either from cfgFile or from home directory! + if cfgFile != "" { + // Use config file from the flag. + viper.SetConfigFile(cfgFile) + } else { + // Find home directory. + home, err := homedir.Dir() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + // Search config in home directory with name ".cobra" (without extension). + viper.AddConfigPath(home) + viper.SetConfigName(".cobra") + } + + if err := viper.ReadInConfig(); err != nil { + fmt.Println("Can't read config:", err) + os.Exit(1) + } } ``` @@ -313,17 +259,21 @@ In a Cobra app, typically the main.go file is very bare. It serves, one purpose, ```go package main -import "{pathToYourApp}/cmd" +import ( + "fmt" + "os" + + "{pathToYourApp}/cmd" +) func main() { - if err := cmd.RootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(-1) - } + if err := cmd.RootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } } ``` - ### Create additional commands Additional commands can be defined and typically are each given their own file @@ -336,47 +286,24 @@ populate it with the following: package cmd import ( - "github.com/spf13/cobra" + "github.com/spf13/cobra" + "fmt" ) func init() { - RootCmd.AddCommand(versionCmd) + RootCmd.AddCommand(versionCmd) } var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print the version number of Hugo", - Long: `All software has versions. This is Hugo's`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") - }, + Use: "version", + Short: "Print the version number of Hugo", + Long: `All software has versions. This is Hugo's`, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") + }, } ``` -### Attach command to its parent - - -If you notice in the above example we attach the command to its parent. In -this case the parent is the rootCmd. In this example we are attaching it to the -root, but commands can be attached at any level. - -```go -RootCmd.AddCommand(versionCmd) -``` - -### Remove a command from its parent - -Removing a command is not a common action in simple programs, but it allows 3rd -parties to customize an existing command tree. - -In this example, we remove the existing `VersionCmd` command of an existing -root command, and we replace it with our own version: - -```go -mainlib.RootCmd.RemoveCommand(mainlib.VersionCmd) -mainlib.RootCmd.AddCommand(versionCmd) -``` - ## Working with Flags Flags provide modifiers to control how the action command operates. @@ -412,6 +339,71 @@ A flag can also be assigned locally which will only apply to that specific comma RootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") ``` +### Local Flag on Parent Commands + +By default Cobra only parses local flags on the target command, any local flags on +parent commands are ignored. By enabling `Command.TraverseChildren` Cobra will +parse local flags on each command before executing the target command. + +```go +command := cobra.Command{ + Use: "print [OPTIONS] [COMMANDS]", + TraverseChildren: true, +} +``` + +### Bind Flags with Config + +You can also bind your flags with [viper](https://github.com/spf13/viper): +```go +var author string + +func init() { + RootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") + viper.BindPFlag("author", RootCmd.PersistentFlags().Lookup("author")) +} +``` + +In this example the persistent flag `author` is bound with `viper`. +**Note**, that the variable `author` will not be set to the value from config, +when the `--author` flag is not provided by user. + +More in [viper documentation](https://github.com/spf13/viper#working-with-flags). + +## Positional and Custom Arguments + +Validation of positional arguments can be specified using the `Args` field +of `Command`. + +The following validators are built in: + +- `NoArgs` - the command will report an error if there are any positional args. +- `ArbitraryArgs` - the command will accept any args. +- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`. +- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args. +- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args. +- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args. +- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args. + +An example of setting the custom validator: + +```go +var cmd = &cobra.Command{ + Short: "hello", + Args: func(cmd *cobra.Command, args []string) error { + if len(args) < 1 { + return errors.New("requires at least one arg") + } + if myapp.IsValidColor(args[0]) { + return nil + } + return fmt.Errorf("invalid color specified: %s", args[0]) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hello, World!") + }, +} +``` ## Example @@ -428,62 +420,62 @@ More documentation about flags is available at https://github.com/spf13/pflag package main import ( - "fmt" - "strings" + "fmt" + "strings" - "github.com/spf13/cobra" + "github.com/spf13/cobra" ) func main() { + var echoTimes int - var echoTimes int + var cmdPrint = &cobra.Command{ + Use: "print [string to print]", + Short: "Print anything to the screen", + Long: `print is for printing anything back to the screen. +For many years people have printed back to the screen.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } - var cmdPrint = &cobra.Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `print is for printing anything back to the screen. - For many years people have printed back to the screen. - `, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } + var cmdEcho = &cobra.Command{ + Use: "echo [string to echo]", + Short: "Echo anything to the screen", + Long: `echo is for echoing anything back. +Echo works a lot like print, except it has a child command.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } - var cmdEcho = &cobra.Command{ - Use: "echo [string to echo]", - Short: "Echo anything to the screen", - Long: `echo is for echoing anything back. - Echo works a lot like print, except it has a child command. - `, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } + var cmdTimes = &cobra.Command{ + Use: "times [# times] [string to echo]", + Short: "Echo anything to the screen more times", + Long: `echo things multiple times back to the user by providing +a count and a string.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + for i := 0; i < echoTimes; i++ { + fmt.Println("Echo: " + strings.Join(args, " ")) + } + }, + } - var cmdTimes = &cobra.Command{ - Use: "times [# times] [string to echo]", - Short: "Echo anything to the screen more times", - Long: `echo things multiple times back to the user by providing - a count and a string.`, - Run: func(cmd *cobra.Command, args []string) { - for i := 0; i < echoTimes; i++ { - fmt.Println("Echo: " + strings.Join(args, " ")) - } - }, - } + cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") - cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") - - var rootCmd = &cobra.Command{Use: "app"} - rootCmd.AddCommand(cmdPrint, cmdEcho) - cmdEcho.AddCommand(cmdTimes) - rootCmd.Execute() + var rootCmd = &cobra.Command{Use: "app"} + rootCmd.AddCommand(cmdPrint, cmdEcho) + cmdEcho.AddCommand(cmdTimes) + rootCmd.Execute() } ``` For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/). -## The Help Command +## Help Command Cobra automatically adds a help command to your application when you have subcommands. This will be called when a user runs 'app help'. Additionally, help will also @@ -496,60 +488,28 @@ create' is called. Every command will automatically have the '--help' flag adde The following output is automatically generated by Cobra. Nothing beyond the command and flag definitions are needed. - > hugo help + $ cobra help - hugo is the main command, used to build your Hugo site. - - Hugo is a Fast and Flexible Static Site Generator - built with love by spf13 and friends in Go. - - Complete documentation is available at http://gohugo.io/. + Cobra is a CLI library for Go that empowers applications. + This application is a tool to generate the needed files + to quickly create a Cobra application. Usage: - hugo [flags] - hugo [command] + cobra [command] Available Commands: - server Hugo runs its own webserver to render the files - version Print the version number of Hugo - config Print the site configuration - check Check content in the source directory - benchmark Benchmark hugo by building a site a number of times. - convert Convert your content to different formats - new Create new content for your site - list Listing out various types of content - undraft Undraft changes the content's draft status from 'True' to 'False' - genautocomplete Generate shell autocompletion script for Hugo - gendoc Generate Markdown documentation for the Hugo CLI. - genman Generate man page for Hugo - import Import your site from others. + add Add a command to a Cobra Application + help Help about any command + init Initialize a Cobra Application Flags: - -b, --baseURL="": hostname (and path) to the root, e.g. http://spf13.com/ - -D, --buildDrafts[=false]: include content marked as draft - -F, --buildFuture[=false]: include content with publishdate in the future - --cacheDir="": filesystem path to cache directory. Defaults: $TMPDIR/hugo_cache/ - --canonifyURLs[=false]: if true, all relative URLs will be canonicalized using baseURL - --config="": config file (default is path/config.yaml|json|toml) - -d, --destination="": filesystem path to write files to - --disableRSS[=false]: Do not build RSS files - --disableSitemap[=false]: Do not build Sitemap file - --editor="": edit new content with this editor, if provided - --ignoreCache[=false]: Ignores the cache directory for reading but still writes to it - --log[=false]: Enable Logging - --logFile="": Log File path (if set, logging enabled automatically) - --noTimes[=false]: Don't sync modification time of files - --pluralizeListTitles[=true]: Pluralize titles in lists using inflect - --preserveTaxonomyNames[=false]: Preserve taxonomy names as written ("Gérard Depardieu" vs "gerard-depardieu") - -s, --source="": filesystem path to read files relative from - --stepAnalysis[=false]: display memory and timing of different steps of the program - -t, --theme="": theme to use (located in /themes/THEMENAME/) - --uglyURLs[=false]: if true, use /filename.html instead of /filename/ - -v, --verbose[=false]: verbose output - --verboseLog[=false]: verbose logging - -w, --watch[=false]: watch filesystem for changes and recreate as needed + -a, --author string author name for copyright attribution (default "YOUR NAME") + --config string config file (default is $HOME/.cobra.yaml) + -h, --help help for cobra + -l, --license string name of license for the project + --viper use Viper for configuration (default true) - Use "hugo [command] --help" for more information about a command. + Use "cobra [command] --help" for more information about a command. Help is just a command like any other. There is no special logic or behavior @@ -557,38 +517,18 @@ around it. In fact, you can provide your own if you want. ### Defining your own help -You can provide your own Help command or your own template for the default command to use. - -The default help command is +You can provide your own Help command or your own template for the default command to use +with following functions: ```go -func (c *Command) initHelp() { - if c.helpCommand == nil { - c.helpCommand = &Command{ - Use: "help [command]", - Short: "Help about any command", - Long: `Help provides help for any command in the application. - Simply type ` + c.Name() + ` help [path to command] for full details.`, - Run: c.HelpFunc(), - } - } - c.AddCommand(c.helpCommand) -} -``` - -You can provide your own command, function or template through the following methods: - -```go -command.SetHelpCommand(cmd *Command) - -command.SetHelpFunc(f func(*Command, []string)) - -command.SetHelpTemplate(s string) +cmd.SetHelpCommand(cmd *Command) +cmd.SetHelpFunc(f func(*Command, []string)) +cmd.SetHelpTemplate(s string) ``` The latter two will also apply to any children commands. -## Usage +## Usage Message When the user provides an invalid flag or invalid command, Cobra responds by showing the user the 'usage'. @@ -597,73 +537,37 @@ showing the user the 'usage'. You may recognize this from the help above. That's because the default help embeds the usage as part of its output. + $ cobra --invalid + Error: unknown flag: --invalid Usage: - hugo [flags] - hugo [command] + cobra [command] Available Commands: - server Hugo runs its own webserver to render the files - version Print the version number of Hugo - config Print the site configuration - check Check content in the source directory - benchmark Benchmark hugo by building a site a number of times. - convert Convert your content to different formats - new Create new content for your site - list Listing out various types of content - undraft Undraft changes the content's draft status from 'True' to 'False' - genautocomplete Generate shell autocompletion script for Hugo - gendoc Generate Markdown documentation for the Hugo CLI. - genman Generate man page for Hugo - import Import your site from others. + add Add a command to a Cobra Application + help Help about any command + init Initialize a Cobra Application Flags: - -b, --baseURL="": hostname (and path) to the root, e.g. http://spf13.com/ - -D, --buildDrafts[=false]: include content marked as draft - -F, --buildFuture[=false]: include content with publishdate in the future - --cacheDir="": filesystem path to cache directory. Defaults: $TMPDIR/hugo_cache/ - --canonifyURLs[=false]: if true, all relative URLs will be canonicalized using baseURL - --config="": config file (default is path/config.yaml|json|toml) - -d, --destination="": filesystem path to write files to - --disableRSS[=false]: Do not build RSS files - --disableSitemap[=false]: Do not build Sitemap file - --editor="": edit new content with this editor, if provided - --ignoreCache[=false]: Ignores the cache directory for reading but still writes to it - --log[=false]: Enable Logging - --logFile="": Log File path (if set, logging enabled automatically) - --noTimes[=false]: Don't sync modification time of files - --pluralizeListTitles[=true]: Pluralize titles in lists using inflect - --preserveTaxonomyNames[=false]: Preserve taxonomy names as written ("Gérard Depardieu" vs "gerard-depardieu") - -s, --source="": filesystem path to read files relative from - --stepAnalysis[=false]: display memory and timing of different steps of the program - -t, --theme="": theme to use (located in /themes/THEMENAME/) - --uglyURLs[=false]: if true, use /filename.html instead of /filename/ - -v, --verbose[=false]: verbose output - --verboseLog[=false]: verbose logging - -w, --watch[=false]: watch filesystem for changes and recreate as needed + -a, --author string author name for copyright attribution (default "YOUR NAME") + --config string config file (default is $HOME/.cobra.yaml) + -h, --help help for cobra + -l, --license string name of license for the project + --viper use Viper for configuration (default true) + + Use "cobra [command] --help" for more information about a command. ### Defining your own usage You can provide your own usage function or template for Cobra to use. - -The default usage function is: - -```go -return func(c *Command) error { - err := tmpl(c.Out(), c.UsageTemplate(), c) - return err -} -``` - Like help, the function and template are overridable through public methods: ```go -command.SetUsageFunc(f func(*Command) error) - -command.SetUsageTemplate(s string) +cmd.SetUsageFunc(f func(*Command) error) +cmd.SetUsageTemplate(s string) ``` -## PreRun or PostRun Hooks +## PreRun and PostRun Hooks -It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherrited by children if they do not declare their own. These function are run in the following order: +It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: - `PersistentPreRun` - `PreRun` @@ -677,105 +581,73 @@ An example of two commands which use all of these features is below. When the s package main import ( - "fmt" + "fmt" - "github.com/spf13/cobra" + "github.com/spf13/cobra" ) func main() { - var rootCmd = &cobra.Command{ - Use: "root [sub]", - Short: "My root command", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) - }, - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) - }, - } + var rootCmd = &cobra.Command{ + Use: "root [sub]", + Short: "My root command", + PersistentPreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) + }, + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) + }, + } - var subCmd = &cobra.Command{ - Use: "sub [no options!]", - Short: "My subcommand", - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) - }, - } + var subCmd = &cobra.Command{ + Use: "sub [no options!]", + Short: "My subcommand", + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) + }, + } - rootCmd.AddCommand(subCmd) + rootCmd.AddCommand(subCmd) - rootCmd.SetArgs([]string{""}) - _ = rootCmd.Execute() - fmt.Print("\n") - rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) - _ = rootCmd.Execute() + rootCmd.SetArgs([]string{""}) + rootCmd.Execute() + fmt.Println() + rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) + rootCmd.Execute() } ``` +Output: +``` +Inside rootCmd PersistentPreRun with args: [] +Inside rootCmd PreRun with args: [] +Inside rootCmd Run with args: [] +Inside rootCmd PostRun with args: [] +Inside rootCmd PersistentPostRun with args: [] -## Alternative Error Handling - -Cobra also has functions where the return signature is an error. This allows for errors to bubble up to the top, -providing a way to handle the errors in one location. The current list of functions that return an error is: - -* PersistentPreRunE -* PreRunE -* RunE -* PostRunE -* PersistentPostRunE - -If you would like to silence the default `error` and `usage` output in favor of your own, you can set `SilenceUsage` -and `SilenceErrors` to `false` on the command. A child command respects these flags if they are set on the parent -command. - -**Example Usage using RunE:** - -```go -package main - -import ( - "errors" - "log" - - "github.com/spf13/cobra" -) - -func main() { - var rootCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at http://hugo.spf13.com`, - RunE: func(cmd *cobra.Command, args []string) error { - // Do Stuff Here - return errors.New("some random error") - }, - } - - if err := rootCmd.Execute(); err != nil { - log.Fatal(err) - } -} +Inside rootCmd PersistentPreRun with args: [arg1 arg2] +Inside subCmd PreRun with args: [arg1 arg2] +Inside subCmd Run with args: [arg1 arg2] +Inside subCmd PostRun with args: [arg1 arg2] +Inside subCmd PersistentPostRun with args: [arg1 arg2] ``` ## Suggestions when "unknown command" happens @@ -818,81 +690,28 @@ Did you mean this? Run 'kubectl help' for usage. ``` -## Generating Markdown-formatted documentation for your command +## Generating documentation for your command -Cobra can generate a Markdown-formatted document based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Markdown Docs](doc/md_docs.md). +Cobra can generate documentation based on subcommands, flags, etc. in the following formats: -## Generating man pages for your command +- [Markdown](doc/md_docs.md) +- [ReStructured Text](doc/rest_docs.md) +- [Man Page](doc/man_docs.md) -Cobra can generate a man page based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Man Docs](doc/man_docs.md). - -## Generating bash completions for your command +## Generating bash completions Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md). -## Debugging - -Cobra provides a ‘DebugFlags’ method on a command which, when called, will print -out everything Cobra knows about the flags for each command. - -### Example - -```go -command.DebugFlags() -``` - -## Release Notes -* **0.9.0** June 17, 2014 - * flags can appears anywhere in the args (provided they are unambiguous) - * --help prints usage screen for app or command - * Prefix matching for commands - * Cleaner looking help and usage output - * Extensive test suite -* **0.8.0** Nov 5, 2013 - * Reworked interface to remove commander completely - * Command now primary structure - * No initialization needed - * Usage & Help templates & functions definable at any level - * Updated Readme -* **0.7.0** Sept 24, 2013 - * Needs more eyes - * Test suite - * Support for automatic error messages - * Support for help command - * Support for printing to any io.Writer instead of os.Stderr - * Support for persistent flags which cascade down tree - * Ready for integration into Hugo -* **0.1.0** Sept 3, 2013 - * Implement first draft - -## Extensions - -Libraries for extending Cobra: - -* [cmdns](https://github.com/gosuri/cmdns): Enables name spacing a command's immediate children. It provides an alternative way to structure subcommands, similar to `heroku apps:create` and `ovrclk clusters:launch`. - -## ToDo -* Launch proper documentation site - -## Contributing +# Contributing 1. Fork it -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Commit your changes (`git commit -am 'Add some feature'`) -4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request +2. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`) +3. Create your feature branch (`git checkout -b my-new-feature`) +4. Make changes and add them (`git add .`) +5. Commit your changes (`git commit -m 'Add some feature'`) +6. Push to the branch (`git push origin my-new-feature`) +7. Create new pull request -## Contributors - -Names in no particular order: - -* [spf13](https://github.com/spf13), -[eparis](https://github.com/eparis), -[bep](https://github.com/bep), and many more! - -## License +# License Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) - - -[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/spf13/cobra/trend.png)](https://bitdeli.com/free "Bitdeli Badge") diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go new file mode 100644 index 00000000000..a5d8a9273ea --- /dev/null +++ b/vendor/github.com/spf13/cobra/args.go @@ -0,0 +1,89 @@ +package cobra + +import ( + "fmt" +) + +type PositionalArgs func(cmd *Command, args []string) error + +// Legacy arg validation has the following behaviour: +// - root commands with no subcommands can take arbitrary arguments +// - root commands with subcommands will do subcommand validity checking +// - subcommands will always accept arbitrary arguments +func legacyArgs(cmd *Command, args []string) error { + // no subcommand, always take args + if !cmd.HasSubCommands() { + return nil + } + + // root command with subcommands, do subcommand checking. + if !cmd.HasParent() && len(args) > 0 { + return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0])) + } + return nil +} + +// NoArgs returns an error if any args are included. +func NoArgs(cmd *Command, args []string) error { + if len(args) > 0 { + return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath()) + } + return nil +} + +// OnlyValidArgs returns an error if any args are not in the list of ValidArgs. +func OnlyValidArgs(cmd *Command, args []string) error { + if len(cmd.ValidArgs) > 0 { + for _, v := range args { + if !stringInSlice(v, cmd.ValidArgs) { + return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0])) + } + } + } + return nil +} + +// ArbitraryArgs never returns an error. +func ArbitraryArgs(cmd *Command, args []string) error { + return nil +} + +// MinimumNArgs returns an error if there is not at least N args. +func MinimumNArgs(n int) PositionalArgs { + return func(cmd *Command, args []string) error { + if len(args) < n { + return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args)) + } + return nil + } +} + +// MaximumNArgs returns an error if there are more than N args. +func MaximumNArgs(n int) PositionalArgs { + return func(cmd *Command, args []string) error { + if len(args) > n { + return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args)) + } + return nil + } +} + +// ExactArgs returns an error if there are not exactly n args. +func ExactArgs(n int) PositionalArgs { + return func(cmd *Command, args []string) error { + if len(args) != n { + return fmt.Errorf("accepts %d arg(s), received %d", n, len(args)) + } + return nil + } +} + +// RangeArgs returns an error if the number of args is not within the expected range. +func RangeArgs(min int, max int) PositionalArgs { + return func(cmd *Command, args []string) error { + if len(args) < min || len(args) > max { + return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args)) + } + return nil + } +} diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 236dee67f21..c19fe7a068b 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -1,6 +1,7 @@ package cobra import ( + "bytes" "fmt" "io" "os" @@ -10,19 +11,17 @@ import ( "github.com/spf13/pflag" ) +// Annotations for Bash completion. const ( - BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extentions" + BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions" BashCompCustom = "cobra_annotation_bash_completion_custom" BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag" BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir" ) -func preamble(out io.Writer, name string) error { - _, err := fmt.Fprintf(out, "# bash completion for %-36s -*- shell-script -*-\n", name) - if err != nil { - return err - } - _, err = fmt.Fprint(out, ` +func writePreamble(buf *bytes.Buffer, name string) { + buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name)) + buf.WriteString(` __debug() { if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then @@ -87,13 +86,13 @@ __handle_reply() local index flag flag="${cur%%=*}" __index_of_word "${flag}" "${flags_with_completion[@]}" + COMPREPLY=() if [[ ${index} -ge 0 ]]; then - COMPREPLY=() PREFIX="" cur="${cur#*=}" ${flags_completion[${index}]} if [ -n "${ZSH_VERSION}" ]; then - # zfs completion needs --flag= prefix + # zsh completion needs --flag= prefix eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )" fi fi @@ -133,7 +132,10 @@ __handle_reply() declare -F __custom_func >/dev/null && __custom_func fi - __ltrim_colon_completions "$cur" + # available in bash-completion >= 2, not always present on macOS + if declare -F __ltrim_colon_completions >/dev/null; then + __ltrim_colon_completions "$cur" + fi } # The arguments should be in the form "ext1|ext2|extn" @@ -224,7 +226,7 @@ __handle_command() fi c=$((c+1)) __debug "${FUNCNAME[0]}: looking for ${next_command}" - declare -F $next_command >/dev/null && $next_command + declare -F "$next_command" >/dev/null && $next_command } __handle_word() @@ -247,16 +249,12 @@ __handle_word() } `) - return err } -func postscript(w io.Writer, name string) error { +func writePostscript(buf *bytes.Buffer, name string) { name = strings.Replace(name, ":", "__", -1) - _, err := fmt.Fprintf(w, "__start_%s()\n", name) - if err != nil { - return err - } - _, err = fmt.Fprintf(w, `{ + buf.WriteString(fmt.Sprintf("__start_%s()\n", name)) + buf.WriteString(fmt.Sprintf(`{ local cur prev words cword declare -A flaghash 2>/dev/null || : if declare -F _init_completion >/dev/null 2>&1; then @@ -280,318 +278,227 @@ func postscript(w io.Writer, name string) error { __handle_word } -`, name) - if err != nil { - return err - } - _, err = fmt.Fprintf(w, `if [[ $(type -t compopt) = "builtin" ]]; then +`, name)) + buf.WriteString(fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then complete -o default -F __start_%s %s else complete -o default -o nospace -F __start_%s %s fi -`, name, name, name, name) - if err != nil { - return err - } - _, err = fmt.Fprintf(w, "# ex: ts=4 sw=4 et filetype=sh\n") - return err +`, name, name, name, name)) + buf.WriteString("# ex: ts=4 sw=4 et filetype=sh\n") } -func writeCommands(cmd *Command, w io.Writer) error { - if _, err := fmt.Fprintf(w, " commands=()\n"); err != nil { - return err - } +func writeCommands(buf *bytes.Buffer, cmd *Command) { + buf.WriteString(" commands=()\n") for _, c := range cmd.Commands() { if !c.IsAvailableCommand() || c == cmd.helpCommand { continue } - if _, err := fmt.Fprintf(w, " commands+=(%q)\n", c.Name()); err != nil { - return err - } + buf.WriteString(fmt.Sprintf(" commands+=(%q)\n", c.Name())) } - _, err := fmt.Fprintf(w, "\n") - return err + buf.WriteString("\n") } -func writeFlagHandler(name string, annotations map[string][]string, w io.Writer) error { +func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string) { for key, value := range annotations { switch key { case BashCompFilenameExt: - _, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name) - if err != nil { - return err - } + buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + var ext string if len(value) > 0 { - ext := "__handle_filename_extension_flag " + strings.Join(value, "|") - _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) + ext = "__handle_filename_extension_flag " + strings.Join(value, "|") } else { - ext := "_filedir" - _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) - } - if err != nil { - return err + ext = "_filedir" } + buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext)) case BashCompCustom: - _, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name) - if err != nil { - return err - } + buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) if len(value) > 0 { handlers := strings.Join(value, "; ") - _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", handlers) + buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", handlers)) } else { - _, err = fmt.Fprintf(w, " flags_completion+=(:)\n") - } - if err != nil { - return err + buf.WriteString(" flags_completion+=(:)\n") } case BashCompSubdirsInDir: - _, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name) + buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + var ext string if len(value) == 1 { - ext := "__handle_subdirs_in_dir_flag " + value[0] - _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) + ext = "__handle_subdirs_in_dir_flag " + value[0] } else { - ext := "_filedir -d" - _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) - } - if err != nil { - return err + ext = "_filedir -d" } + buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext)) } } - return nil } -func writeShortFlag(flag *pflag.Flag, w io.Writer) error { - b := (len(flag.NoOptDefVal) > 0) +func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag) { name := flag.Shorthand format := " " - if !b { + if len(flag.NoOptDefVal) == 0 { format += "two_word_" } format += "flags+=(\"-%s\")\n" - if _, err := fmt.Fprintf(w, format, name); err != nil { - return err - } - return writeFlagHandler("-"+name, flag.Annotations, w) + buf.WriteString(fmt.Sprintf(format, name)) + writeFlagHandler(buf, "-"+name, flag.Annotations) } -func writeFlag(flag *pflag.Flag, w io.Writer) error { - b := (len(flag.NoOptDefVal) > 0) +func writeFlag(buf *bytes.Buffer, flag *pflag.Flag) { name := flag.Name format := " flags+=(\"--%s" - if !b { + if len(flag.NoOptDefVal) == 0 { format += "=" } format += "\")\n" - if _, err := fmt.Fprintf(w, format, name); err != nil { - return err - } - return writeFlagHandler("--"+name, flag.Annotations, w) + buf.WriteString(fmt.Sprintf(format, name)) + writeFlagHandler(buf, "--"+name, flag.Annotations) } -func writeLocalNonPersistentFlag(flag *pflag.Flag, w io.Writer) error { - b := (len(flag.NoOptDefVal) > 0) +func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) { name := flag.Name format := " local_nonpersistent_flags+=(\"--%s" - if !b { + if len(flag.NoOptDefVal) == 0 { format += "=" } format += "\")\n" - if _, err := fmt.Fprintf(w, format, name); err != nil { - return err - } - return nil + buf.WriteString(fmt.Sprintf(format, name)) } -func writeFlags(cmd *Command, w io.Writer) error { - _, err := fmt.Fprintf(w, ` flags=() +func writeFlags(buf *bytes.Buffer, cmd *Command) { + buf.WriteString(` flags=() two_word_flags=() local_nonpersistent_flags=() flags_with_completion=() flags_completion=() `) - if err != nil { - return err - } localNonPersistentFlags := cmd.LocalNonPersistentFlags() - var visitErr error cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - if err := writeFlag(flag, w); err != nil { - visitErr = err + if nonCompletableFlag(flag) { return } + writeFlag(buf, flag) if len(flag.Shorthand) > 0 { - if err := writeShortFlag(flag, w); err != nil { - visitErr = err - return - } + writeShortFlag(buf, flag) } if localNonPersistentFlags.Lookup(flag.Name) != nil { - if err := writeLocalNonPersistentFlag(flag, w); err != nil { - visitErr = err - return - } + writeLocalNonPersistentFlag(buf, flag) } }) - if visitErr != nil { - return visitErr - } cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { - if err := writeFlag(flag, w); err != nil { - visitErr = err + if nonCompletableFlag(flag) { return } + writeFlag(buf, flag) if len(flag.Shorthand) > 0 { - if err := writeShortFlag(flag, w); err != nil { - visitErr = err - return - } + writeShortFlag(buf, flag) } }) - if visitErr != nil { - return visitErr - } - _, err = fmt.Fprintf(w, "\n") - return err + buf.WriteString("\n") } -func writeRequiredFlag(cmd *Command, w io.Writer) error { - if _, err := fmt.Fprintf(w, " must_have_one_flag=()\n"); err != nil { - return err - } +func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) { + buf.WriteString(" must_have_one_flag=()\n") flags := cmd.NonInheritedFlags() - var visitErr error flags.VisitAll(func(flag *pflag.Flag) { + if nonCompletableFlag(flag) { + return + } for key := range flag.Annotations { switch key { case BashCompOneRequiredFlag: format := " must_have_one_flag+=(\"--%s" - b := (flag.Value.Type() == "bool") - if !b { + if flag.Value.Type() != "bool" { format += "=" } format += "\")\n" - if _, err := fmt.Fprintf(w, format, flag.Name); err != nil { - visitErr = err - return - } + buf.WriteString(fmt.Sprintf(format, flag.Name)) if len(flag.Shorthand) > 0 { - if _, err := fmt.Fprintf(w, " must_have_one_flag+=(\"-%s\")\n", flag.Shorthand); err != nil { - visitErr = err - return - } + buf.WriteString(fmt.Sprintf(" must_have_one_flag+=(\"-%s\")\n", flag.Shorthand)) } } } }) - return visitErr } -func writeRequiredNouns(cmd *Command, w io.Writer) error { - if _, err := fmt.Fprintf(w, " must_have_one_noun=()\n"); err != nil { - return err - } +func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) { + buf.WriteString(" must_have_one_noun=()\n") sort.Sort(sort.StringSlice(cmd.ValidArgs)) for _, value := range cmd.ValidArgs { - if _, err := fmt.Fprintf(w, " must_have_one_noun+=(%q)\n", value); err != nil { - return err - } + buf.WriteString(fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) } - return nil } -func writeArgAliases(cmd *Command, w io.Writer) error { - if _, err := fmt.Fprintf(w, " noun_aliases=()\n"); err != nil { - return err - } +func writeArgAliases(buf *bytes.Buffer, cmd *Command) { + buf.WriteString(" noun_aliases=()\n") sort.Sort(sort.StringSlice(cmd.ArgAliases)) for _, value := range cmd.ArgAliases { - if _, err := fmt.Fprintf(w, " noun_aliases+=(%q)\n", value); err != nil { - return err - } + buf.WriteString(fmt.Sprintf(" noun_aliases+=(%q)\n", value)) } - return nil } -func gen(cmd *Command, w io.Writer) error { +func gen(buf *bytes.Buffer, cmd *Command) { for _, c := range cmd.Commands() { if !c.IsAvailableCommand() || c == cmd.helpCommand { continue } - if err := gen(c, w); err != nil { - return err - } + gen(buf, c) } commandName := cmd.CommandPath() commandName = strings.Replace(commandName, " ", "_", -1) commandName = strings.Replace(commandName, ":", "__", -1) - if _, err := fmt.Fprintf(w, "_%s()\n{\n", commandName); err != nil { - return err - } - if _, err := fmt.Fprintf(w, " last_command=%q\n", commandName); err != nil { - return err - } - if err := writeCommands(cmd, w); err != nil { - return err - } - if err := writeFlags(cmd, w); err != nil { - return err - } - if err := writeRequiredFlag(cmd, w); err != nil { - return err - } - if err := writeRequiredNouns(cmd, w); err != nil { - return err - } - if err := writeArgAliases(cmd, w); err != nil { - return err - } - if _, err := fmt.Fprintf(w, "}\n\n"); err != nil { - return err - } - return nil + buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName)) + buf.WriteString(fmt.Sprintf(" last_command=%q\n", commandName)) + writeCommands(buf, cmd) + writeFlags(buf, cmd) + writeRequiredFlag(buf, cmd) + writeRequiredNouns(buf, cmd) + writeArgAliases(buf, cmd) + buf.WriteString("}\n\n") } -func (cmd *Command) GenBashCompletion(w io.Writer) error { - if err := preamble(w, cmd.Name()); err != nil { - return err +// GenBashCompletion generates bash completion file and writes to the passed writer. +func (c *Command) GenBashCompletion(w io.Writer) error { + buf := new(bytes.Buffer) + writePreamble(buf, c.Name()) + if len(c.BashCompletionFunction) > 0 { + buf.WriteString(c.BashCompletionFunction + "\n") } - if len(cmd.BashCompletionFunction) > 0 { - if _, err := fmt.Fprintf(w, "%s\n", cmd.BashCompletionFunction); err != nil { - return err - } - } - if err := gen(cmd, w); err != nil { - return err - } - return postscript(w, cmd.Name()) + gen(buf, c) + writePostscript(buf, c.Name()) + + _, err := buf.WriteTo(w) + return err } -func (cmd *Command) GenBashCompletionFile(filename string) error { +func nonCompletableFlag(flag *pflag.Flag) bool { + return flag.Hidden || len(flag.Deprecated) > 0 +} + +// GenBashCompletionFile generates bash completion file. +func (c *Command) GenBashCompletionFile(filename string) error { outFile, err := os.Create(filename) if err != nil { return err } defer outFile.Close() - return cmd.GenBashCompletion(outFile) + return c.GenBashCompletion(outFile) } // MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag, if it exists. -func (cmd *Command) MarkFlagRequired(name string) error { - return MarkFlagRequired(cmd.Flags(), name) +func (c *Command) MarkFlagRequired(name string) error { + return MarkFlagRequired(c.Flags(), name) } // MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag, if it exists. -func (cmd *Command) MarkPersistentFlagRequired(name string) error { - return MarkFlagRequired(cmd.PersistentFlags(), name) +func (c *Command) MarkPersistentFlagRequired(name string) error { + return MarkFlagRequired(c.PersistentFlags(), name) } // MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag in the flag set, if it exists. @@ -601,20 +508,20 @@ func MarkFlagRequired(flags *pflag.FlagSet, name string) error { // MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists. // Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. -func (cmd *Command) MarkFlagFilename(name string, extensions ...string) error { - return MarkFlagFilename(cmd.Flags(), name, extensions...) +func (c *Command) MarkFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(c.Flags(), name, extensions...) } // MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. // Generated bash autocompletion will call the bash function f for the flag. -func (cmd *Command) MarkFlagCustom(name string, f string) error { - return MarkFlagCustom(cmd.Flags(), name, f) +func (c *Command) MarkFlagCustom(name string, f string) error { + return MarkFlagCustom(c.Flags(), name, f) } // MarkPersistentFlagFilename adds the BashCompFilenameExt annotation to the named persistent flag, if it exists. // Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. -func (cmd *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { - return MarkFlagFilename(cmd.PersistentFlags(), name, extensions...) +func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(c.PersistentFlags(), name, extensions...) } // MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists. diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md index 6e3b71f13d5..52bd39ddb1d 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.md +++ b/vendor/github.com/spf13/cobra/bash_completions.md @@ -18,7 +18,7 @@ func main() { } ``` -That will get you completions of subcommands and flags. If you make additional annotations to your code, you can get even more intelligent and flexible behavior. +`out.sh` will get you completions of subcommands and flags. Copy it to `/etc/bash_completion.d/` as described [here](https://debian-administration.org/article/316/An_introduction_to_bash_completion_part_1) and reset your terminal to use autocompletion. If you make additional annotations to your code, you can get even more intelligent and flexible behavior. ## Creating your own custom functions @@ -106,7 +106,7 @@ node pod replicationcontroller service If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`: -```go` +```go argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } cmd := &cobra.Command{ diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index 93a2c0f3a77..e4b910c5d7a 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -27,48 +27,59 @@ import ( ) var templateFuncs = template.FuncMap{ - "trim": strings.TrimSpace, - "trimRightSpace": trimRightSpace, - "appendIfNotPresent": appendIfNotPresent, - "rpad": rpad, - "gt": Gt, - "eq": Eq, + "trim": strings.TrimSpace, + "trimRightSpace": trimRightSpace, + "trimTrailingWhitespaces": trimRightSpace, + "appendIfNotPresent": appendIfNotPresent, + "rpad": rpad, + "gt": Gt, + "eq": Eq, } var initializers []func() -// automatic prefix matching can be a dangerous thing to automatically enable in CLI tools. -// Set this to true to enable it +// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing +// to automatically enable in CLI tools. +// Set this to true to enable it. var EnablePrefixMatching = false -//EnableCommandSorting controls sorting of the slice of commands, which is turned on by default. -//To disable sorting, set it to false. +// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default. +// To disable sorting, set it to false. var EnableCommandSorting = true -//AddTemplateFunc adds a template function that's available to Usage and Help -//template generation. +// MousetrapHelpText enables an information splash screen on Windows +// if the CLI is started from explorer.exe. +// To disable the mousetrap, just set this variable to blank string (""). +// Works only on Microsoft Windows. +var MousetrapHelpText string = `This is a command line tool. + +You need to open cmd.exe and run it from there. +` + +// AddTemplateFunc adds a template function that's available to Usage and Help +// template generation. func AddTemplateFunc(name string, tmplFunc interface{}) { templateFuncs[name] = tmplFunc } -//AddTemplateFuncs adds multiple template functions availalble to Usage and -//Help template generation. +// AddTemplateFuncs adds multiple template functions that are available to Usage and +// Help template generation. func AddTemplateFuncs(tmplFuncs template.FuncMap) { for k, v := range tmplFuncs { templateFuncs[k] = v } } -//OnInitialize takes a series of func() arguments and appends them to a slice of func(). +// OnInitialize takes a series of func() arguments and appends them to a slice of func(). func OnInitialize(y ...func()) { - for _, x := range y { - initializers = append(initializers, x) - } + initializers = append(initializers, y...) } -//Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans, -//Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as -//ints and then compared. +// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. + +// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans, +// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as +// ints and then compared. func Gt(a interface{}, b interface{}) bool { var left, right int64 av := reflect.ValueOf(a) @@ -96,7 +107,9 @@ func Gt(a interface{}, b interface{}) bool { return left > right } -//Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic. +// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. + +// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic. func Eq(a interface{}, b interface{}) bool { av := reflect.ValueOf(a) bv := reflect.ValueOf(b) @@ -116,7 +129,9 @@ func trimRightSpace(s string) string { return strings.TrimRightFunc(s, unicode.IsSpace) } -// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s +// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. + +// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s. func appendIfNotPresent(s, stringToAppend string) string { if strings.Contains(s, stringToAppend) { return s @@ -124,7 +139,7 @@ func appendIfNotPresent(s, stringToAppend string) string { return s + " " + stringToAppend } -//rpad adds padding to the right of a string +// rpad adds padding to the right of a string. func rpad(s string, padding int) string { template := fmt.Sprintf("%%-%ds", padding) return fmt.Sprintf(template, s) @@ -138,7 +153,7 @@ func tmpl(w io.Writer, text string, data interface{}) error { return t.Execute(w, data) } -// ld compares two strings and returns the levenshtein distance between them +// ld compares two strings and returns the levenshtein distance between them. func ld(s, t string, ignoreCase bool) int { if ignoreCase { s = strings.ToLower(s) @@ -173,3 +188,12 @@ func ld(s, t string, ignoreCase bool) int { } return d[len(s)][len(t)] } + +func stringInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 083e4ea7f47..eb311a7373a 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. -//In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. +// Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. +// In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. package cobra import ( @@ -28,107 +28,158 @@ import ( ) // Command is just that, a command for your application. -// eg. 'go run' ... 'run' is the command. Cobra requires +// E.g. 'go run ...' - 'run' is the command. Cobra requires // you to define the usage and description as part of your command // definition to ensure usability. type Command struct { - // Name is the command name, usually the executable's name. - name string - // The one-line usage message. + // Use is the one-line usage message. Use string - // An array of aliases that can be used instead of the first word in Use. + + // Aliases is an array of aliases that can be used instead of the first word in Use. Aliases []string - // An array of command names for which this command will be suggested - similar to aliases but only suggests. + + // SuggestFor is an array of command names for which this command will be suggested - + // similar to aliases but only suggests. SuggestFor []string - // The short description shown in the 'help' output. + + // Short is the short description shown in the 'help' output. Short string - // The long message shown in the 'help ' output. + + // Long is the long message shown in the 'help ' output. Long string - // Examples of how to use the command + + // Example is examples of how to use the command. Example string - // List of all valid non-flag arguments that are accepted in bash completions + + // ValidArgs is list of all valid non-flag arguments that are accepted in bash completions ValidArgs []string - // List of aliases for ValidArgs. These are not suggested to the user in the bash - // completion, but accepted if entered manually. + + // Expected arguments + Args PositionalArgs + + // ArgAliases is List of aliases for ValidArgs. + // These are not suggested to the user in the bash completion, + // but accepted if entered manually. ArgAliases []string - // Custom functions used by the bash autocompletion generator + + // BashCompletionFunction is custom functions used by the bash autocompletion generator. BashCompletionFunction string - // Is this command deprecated and should print this string when used? + + // Deprecated defines, if this command is deprecated and should print this string when used. Deprecated string - // Is this command hidden and should NOT show up in the list of available commands? + + // Hidden defines, if this command is hidden and should NOT show up in the list of available commands. Hidden bool - // Full set of flags - flags *flag.FlagSet - // Set of flags childrens of this command will inherit - pflags *flag.FlagSet - // Flags that are declared specifically by this command (not inherited). - lflags *flag.FlagSet - // SilenceErrors is an option to quiet errors down stream - SilenceErrors bool - // Silence Usage is an option to silence usage when an error occurs. - SilenceUsage bool + + // Annotations are key/value pairs that can be used by applications to identify or + // group commands. + Annotations map[string]string + // The *Run functions are executed in the following order: // * PersistentPreRun() // * PreRun() // * Run() // * PostRun() // * PersistentPostRun() - // All functions get the same args, the arguments after the command name - // PersistentPreRun: children of this command will inherit and execute + // All functions get the same args, the arguments after the command name. + // + // PersistentPreRun: children of this command will inherit and execute. PersistentPreRun func(cmd *Command, args []string) - // PersistentPreRunE: PersistentPreRun but returns an error + // PersistentPreRunE: PersistentPreRun but returns an error. PersistentPreRunE func(cmd *Command, args []string) error // PreRun: children of this command will not inherit. PreRun func(cmd *Command, args []string) - // PreRunE: PreRun but returns an error + // PreRunE: PreRun but returns an error. PreRunE func(cmd *Command, args []string) error - // Run: Typically the actual work function. Most commands will only implement this + // Run: Typically the actual work function. Most commands will only implement this. Run func(cmd *Command, args []string) - // RunE: Run but returns an error + // RunE: Run but returns an error. RunE func(cmd *Command, args []string) error // PostRun: run after the Run command. PostRun func(cmd *Command, args []string) - // PostRunE: PostRun but returns an error + // PostRunE: PostRun but returns an error. PostRunE func(cmd *Command, args []string) error - // PersistentPostRun: children of this command will inherit and execute after PostRun + // PersistentPostRun: children of this command will inherit and execute after PostRun. PersistentPostRun func(cmd *Command, args []string) - // PersistentPostRunE: PersistentPostRun but returns an error + // PersistentPostRunE: PersistentPostRun but returns an error. PersistentPostRunE func(cmd *Command, args []string) error - // DisableAutoGenTag remove + + // SilenceErrors is an option to quiet errors down stream. + SilenceErrors bool + + // SilenceUsage is an option to silence usage when an error occurs. + SilenceUsage bool + + // DisableFlagParsing disables the flag parsing. + // If this is true all flags will be passed to the command as arguments. + DisableFlagParsing bool + + // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...") + // will be printed by generating docs for this command. DisableAutoGenTag bool - // Commands is the list of commands supported by this program. + + // DisableFlagsInUseLine will disable the addition of [flags] to the usage + // line of a command when printing help or generating docs + DisableFlagsInUseLine bool + + // DisableSuggestions disables the suggestions based on Levenshtein distance + // that go along with 'unknown command' messages. + DisableSuggestions bool + // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions. + // Must be > 0. + SuggestionsMinimumDistance int + + // TraverseChildren parses flags on all parents before executing child command. + TraverseChildren bool + + // commands is the list of commands supported by this program. commands []*Command - // Parent Command for this command + // parent is a parent command for this command. parent *Command - // max lengths of commands' string lengths for use in padding + // Max lengths of commands' string lengths for use in padding. commandsMaxUseLen int commandsMaxCommandPathLen int commandsMaxNameLen int - // is commands slice are sorted or not + // commandsAreSorted defines, if command slice are sorted or not. commandsAreSorted bool + // args is actual args parsed from flags. + args []string + // flagErrorBuf contains all error messages from pflag. flagErrorBuf *bytes.Buffer - - args []string // actual args parsed from flags - output *io.Writer // out writer if set in SetOutput(w) - usageFunc func(*Command) error // Usage can be defined by application - usageTemplate string // Can be defined by Application - helpTemplate string // Can be defined by Application - helpFunc func(*Command, []string) // Help can be defined by application - helpCommand *Command // The help command - // The global normalization function that we can use on every pFlag set and children commands + // flags is full set of flags. + flags *flag.FlagSet + // pflags contains persistent flags. + pflags *flag.FlagSet + // lflags contains local flags. + lflags *flag.FlagSet + // iflags contains inherited flags. + iflags *flag.FlagSet + // parentsPflags is all persistent flags of cmd's parents. + parentsPflags *flag.FlagSet + // globNormFunc is the global normalization function + // that we can use on every pflag set and children commands globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName - // Disable the suggestions based on Levenshtein distance that go along with 'unknown command' messages - DisableSuggestions bool - // If displaying suggestions, allows to set the minimum levenshtein distance to display, must be > 0 - SuggestionsMinimumDistance int - - // Disable the flag parsing. If this is true all flags will be passed to the command as arguments. - DisableFlagParsing bool + // output is an output writer defined by user. + output io.Writer + // usageFunc is usage func defined by user. + usageFunc func(*Command) error + // usageTemplate is usage template defined by user. + usageTemplate string + // flagErrorFunc is func defined by user and it's called when the parsing of + // flags returns an error. + flagErrorFunc func(*Command, error) error + // helpTemplate is help template defined by user. + helpTemplate string + // helpFunc is help func defined by user. + helpFunc func(*Command, []string) + // helpCommand is command with usage 'help'. If it's not defined by user, + // cobra uses default help command. + helpCommand *Command } -// os.Args[1:] by default, if desired, can be overridden +// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden // particularly useful when testing. func (c *Command) SetArgs(a []string) { c.args = a @@ -137,29 +188,36 @@ func (c *Command) SetArgs(a []string) { // SetOutput sets the destination for usage and error messages. // If output is nil, os.Stderr is used. func (c *Command) SetOutput(output io.Writer) { - c.output = &output + c.output = output } -// Usage can be defined by application +// SetUsageFunc sets usage function. Usage can be defined by application. func (c *Command) SetUsageFunc(f func(*Command) error) { c.usageFunc = f } -// Can be defined by Application +// SetUsageTemplate sets usage template. Can be defined by Application. func (c *Command) SetUsageTemplate(s string) { c.usageTemplate = s } -// Can be defined by Application +// SetFlagErrorFunc sets a function to generate an error when flag parsing +// fails. +func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) { + c.flagErrorFunc = f +} + +// SetHelpFunc sets help function. Can be defined by Application. func (c *Command) SetHelpFunc(f func(*Command, []string)) { c.helpFunc = f } +// SetHelpCommand sets help command. func (c *Command) SetHelpCommand(cmd *Command) { c.helpCommand = cmd } -// Can be defined by Application +// SetHelpTemplate sets help template to be used. Application can use it to set custom template. func (c *Command) SetHelpTemplate(s string) { c.helpTemplate = s } @@ -176,17 +234,19 @@ func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string } } +// OutOrStdout returns output to stdout. func (c *Command) OutOrStdout() io.Writer { return c.getOut(os.Stdout) } +// OutOrStderr returns output to stderr func (c *Command) OutOrStderr() io.Writer { return c.getOut(os.Stderr) } func (c *Command) getOut(def io.Writer) io.Writer { if c.output != nil { - return *c.output + return c.output } if c.HasParent() { return c.parent.getOut(def) @@ -195,14 +255,13 @@ func (c *Command) getOut(def io.Writer) io.Writer { } // UsageFunc returns either the function set by SetUsageFunc for this command -// or a parent, or it returns a default usage function +// or a parent, or it returns a default usage function. func (c *Command) UsageFunc() (f func(*Command) error) { if c.usageFunc != nil { return c.usageFunc } - if c.HasParent() { - return c.parent.UsageFunc() + return c.Parent().UsageFunc() } return func(c *Command) error { c.mergePersistentFlags() @@ -214,24 +273,23 @@ func (c *Command) UsageFunc() (f func(*Command) error) { } } -// Output the usage for the command -// Used when a user provides invalid input -// Can be defined by user by overriding UsageFunc +// Usage puts out the usage for the command. +// Used when a user provides invalid input. +// Can be defined by user by overriding UsageFunc. func (c *Command) Usage() error { return c.UsageFunc()(c) } // HelpFunc returns either the function set by SetHelpFunc for this command -// or a parent, or it returns a function with default help behavior +// or a parent, or it returns a function with default help behavior. func (c *Command) HelpFunc() func(*Command, []string) { - cmd := c - for cmd != nil { - if cmd.helpFunc != nil { - return cmd.helpFunc - } - cmd = cmd.parent + if c.helpFunc != nil { + return c.helpFunc } - return func(*Command, []string) { + if c.HasParent() { + return c.Parent().HelpFunc() + } + return func(c *Command, a []string) { c.mergePersistentFlags() err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c) if err != nil { @@ -240,14 +298,15 @@ func (c *Command) HelpFunc() func(*Command, []string) { } } -// Output the help for the command -// Used when a user calls help [command] -// Can be defined by user by overriding HelpFunc +// Help puts out the help for the command. +// Used when a user calls help [command]. +// Can be defined by user by overriding HelpFunc. func (c *Command) Help() error { c.HelpFunc()(c, []string{}) return nil } +// UsageString return usage string. func (c *Command) UsageString() string { tmpOutput := c.output bb := new(bytes.Buffer) @@ -257,8 +316,25 @@ func (c *Command) UsageString() string { return bb.String() } +// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this +// command or a parent, or it returns a function which returns the original +// error. +func (c *Command) FlagErrorFunc() (f func(*Command, error) error) { + if c.flagErrorFunc != nil { + return c.flagErrorFunc + } + + if c.HasParent() { + return c.parent.FlagErrorFunc() + } + return func(c *Command, err error) error { + return err + } +} + var minUsagePadding = 25 +// UsagePadding return padding for the usage. func (c *Command) UsagePadding() int { if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen { return minUsagePadding @@ -268,7 +344,7 @@ func (c *Command) UsagePadding() int { var minCommandPathPadding = 11 -// +// CommandPathPadding return padding for the command path. func (c *Command) CommandPathPadding() int { if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen { return minCommandPathPadding @@ -278,6 +354,7 @@ func (c *Command) CommandPathPadding() int { var minNamePadding = 11 +// NamePadding returns padding for the name. func (c *Command) NamePadding() int { if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen { return minNamePadding @@ -285,6 +362,7 @@ func (c *Command) NamePadding() int { return c.parent.commandsMaxNameLen } +// UsageTemplate returns usage template for the command. func (c *Command) UsageTemplate() string { if c.usageTemplate != "" { return c.usageTemplate @@ -294,32 +372,32 @@ func (c *Command) UsageTemplate() string { return c.parent.UsageTemplate() } return `Usage:{{if .Runnable}} - {{if .HasAvailableFlags}}{{appendIfNotPresent .UseLine "[flags]"}}{{else}}{{.UseLine}}{{end}}{{end}}{{if .HasAvailableSubCommands}} - {{ .CommandPath}} [command]{{end}}{{if gt .Aliases 0}} + {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} + {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} Aliases: - {{.NameAndAliases}} -{{end}}{{if .HasExample}} + {{.NameAndAliases}}{{end}}{{if .HasExample}} Examples: -{{ .Example }}{{end}}{{ if .HasAvailableSubCommands}} +{{.Example}}{{end}}{{if .HasAvailableSubCommands}} -Available Commands:{{range .Commands}}{{if .IsAvailableCommand}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableLocalFlags}} +Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} Flags: -{{.LocalFlags.FlagUsages | trimRightSpace}}{{end}}{{ if .HasAvailableInheritedFlags}} +{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} Global Flags: -{{.InheritedFlags.FlagUsages | trimRightSpace}}{{end}}{{if .HasHelpSubCommands}} +{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}} -Additional help topics:{{range .Commands}}{{if .IsHelpCommand}} - {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableSubCommands }} +Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} + {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} ` } +// HelpTemplate return help template for the command. func (c *Command) HelpTemplate() string { if c.helpTemplate != "" { return c.helpTemplate @@ -328,72 +406,60 @@ func (c *Command) HelpTemplate() string { if c.HasParent() { return c.parent.HelpTemplate() } - return `{{with or .Long .Short }}{{. | trim}} + return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} {{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` } -// Really only used when casting a command to a commander -func (c *Command) resetChildrensParents() { - for _, x := range c.commands { - x.parent = c - } -} - -// Test if the named flag is a boolean flag. -func isBooleanFlag(name string, f *flag.FlagSet) bool { - flag := f.Lookup(name) +func hasNoOptDefVal(name string, fs *flag.FlagSet) bool { + flag := fs.Lookup(name) if flag == nil { return false } - return flag.Value.Type() == "bool" + return flag.NoOptDefVal != "" } -// Test if the named flag is a boolean flag. -func isBooleanShortFlag(name string, f *flag.FlagSet) bool { - result := false - f.VisitAll(func(f *flag.Flag) { - if f.Shorthand == name && f.Value.Type() == "bool" { - result = true - } - }) - return result +func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool { + if len(name) == 0 { + return false + } + + flag := fs.ShorthandLookup(name[:1]) + if flag == nil { + return false + } + return flag.NoOptDefVal != "" } func stripFlags(args []string, c *Command) []string { - if len(args) < 1 { + if len(args) == 0 { return args } c.mergePersistentFlags() commands := []string{} + flags := c.Flags() - inQuote := false - inFlag := false - for _, y := range args { - if !inQuote { - switch { - case strings.HasPrefix(y, "\""): - inQuote = true - case strings.Contains(y, "=\""): - inQuote = true - case strings.HasPrefix(y, "--") && !strings.Contains(y, "="): - // TODO: this isn't quite right, we should really check ahead for 'true' or 'false' - inFlag = !isBooleanFlag(y[2:], c.Flags()) - case strings.HasPrefix(y, "-") && !strings.Contains(y, "=") && len(y) == 2 && !isBooleanShortFlag(y[1:], c.Flags()): - inFlag = true - case inFlag: - inFlag = false - case y == "": - // strip empty commands, as the go tests expect this to be ok.... - case !strings.HasPrefix(y, "-"): - commands = append(commands, y) - inFlag = false +Loop: + for len(args) > 0 { + s := args[0] + args = args[1:] + switch { + case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags): + // If '--flag arg' then + // delete arg from args. + fallthrough // (do the same as below) + case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags): + // If '-f arg' then + // delete 'arg' from args or break the loop if len(args) <= 1. + if len(args) <= 1 { + break Loop + } else { + args = args[1:] + continue } - } - - if strings.HasSuffix(y, "\"") && !strings.HasSuffix(y, "\\\"") { - inQuote = false + case s != "" && !strings.HasPrefix(s, "-"): + commands = append(commands, s) } } @@ -414,13 +480,14 @@ func argsMinusFirstX(args []string, x string) []string { return args } -// find the target command given the args and command tree +func isFlagArg(arg string) bool { + return ((len(arg) >= 3 && arg[1] == '-') || + (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-')) +} + +// Find the target command given the args and command tree // Meant to be run on the highest node. Only searches down. func (c *Command) Find(args []string) (*Command, []string, error) { - if c == nil { - return nil, nil, fmt.Errorf("Called find() on a nil Command") - } - var innerfind func(*Command, []string) (*Command, []string) innerfind = func(c *Command, innerArgs []string) (*Command, []string) { @@ -429,59 +496,99 @@ func (c *Command) Find(args []string) (*Command, []string, error) { return c, innerArgs } nextSubCmd := argsWOflags[0] - matches := make([]*Command, 0) - for _, cmd := range c.commands { - if cmd.Name() == nextSubCmd || cmd.HasAlias(nextSubCmd) { // exact name or alias match - return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd)) - } - if EnablePrefixMatching { - if strings.HasPrefix(cmd.Name(), nextSubCmd) { // prefix match - matches = append(matches, cmd) - } - for _, x := range cmd.Aliases { - if strings.HasPrefix(x, nextSubCmd) { - matches = append(matches, cmd) - } - } - } - } - // only accept a single prefix match - multiple matches would be ambiguous - if len(matches) == 1 { - return innerfind(matches[0], argsMinusFirstX(innerArgs, argsWOflags[0])) + cmd := c.findNext(nextSubCmd) + if cmd != nil { + return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd)) } - return c, innerArgs } commandFound, a := innerfind(c, args) - argsWOflags := stripFlags(a, commandFound) - - // no subcommand, always take args - if !commandFound.HasSubCommands() { - return commandFound, a, nil + if commandFound.Args == nil { + return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound)) } - - // root command with subcommands, do subcommand checking - if commandFound == c && len(argsWOflags) > 0 { - suggestionsString := "" - if !c.DisableSuggestions { - if c.SuggestionsMinimumDistance <= 0 { - c.SuggestionsMinimumDistance = 2 - } - if suggestions := c.SuggestionsFor(argsWOflags[0]); len(suggestions) > 0 { - suggestionsString += "\n\nDid you mean this?\n" - for _, s := range suggestions { - suggestionsString += fmt.Sprintf("\t%v\n", s) - } - } - } - return commandFound, a, fmt.Errorf("unknown command %q for %q%s", argsWOflags[0], commandFound.CommandPath(), suggestionsString) - } - return commandFound, a, nil } +func (c *Command) findSuggestions(arg string) string { + if c.DisableSuggestions { + return "" + } + if c.SuggestionsMinimumDistance <= 0 { + c.SuggestionsMinimumDistance = 2 + } + suggestionsString := "" + if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 { + suggestionsString += "\n\nDid you mean this?\n" + for _, s := range suggestions { + suggestionsString += fmt.Sprintf("\t%v\n", s) + } + } + return suggestionsString +} + +func (c *Command) findNext(next string) *Command { + matches := make([]*Command, 0) + for _, cmd := range c.commands { + if cmd.Name() == next || cmd.HasAlias(next) { + return cmd + } + if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) { + matches = append(matches, cmd) + } + } + + if len(matches) == 1 { + return matches[0] + } + return nil +} + +// Traverse the command tree to find the command, and parse args for +// each parent. +func (c *Command) Traverse(args []string) (*Command, []string, error) { + flags := []string{} + inFlag := false + + for i, arg := range args { + switch { + // A long flag with a space separated value + case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="): + // TODO: this isn't quite right, we should really check ahead for 'true' or 'false' + inFlag = !hasNoOptDefVal(arg[2:], c.Flags()) + flags = append(flags, arg) + continue + // A short flag with a space separated value + case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !shortHasNoOptDefVal(arg[1:], c.Flags()): + inFlag = true + flags = append(flags, arg) + continue + // The value for a flag + case inFlag: + inFlag = false + flags = append(flags, arg) + continue + // A flag without a value, or with an `=` separated value + case isFlagArg(arg): + flags = append(flags, arg) + continue + } + + cmd := c.findNext(arg) + if cmd == nil { + return c, args, nil + } + + if err := c.ParseFlags(flags); err != nil { + return nil, args, err + } + return cmd.Traverse(args[i+1:]) + } + return c, args, nil +} + +// SuggestionsFor provides suggestions for the typedName. func (c *Command) SuggestionsFor(typedName string) []string { suggestions := []string{} for _, cmd := range c.commands { @@ -502,38 +609,24 @@ func (c *Command) SuggestionsFor(typedName string) []string { return suggestions } +// VisitParents visits all parents of the command and invokes fn on each parent. func (c *Command) VisitParents(fn func(*Command)) { - var traverse func(*Command) *Command - - traverse = func(x *Command) *Command { - if x != c { - fn(x) - } - if x.HasParent() { - return traverse(x.parent) - } - return x + if c.HasParent() { + fn(c.Parent()) + c.Parent().VisitParents(fn) } - traverse(c) } +// Root finds root command. func (c *Command) Root() *Command { - var findRoot func(*Command) *Command - - findRoot = func(x *Command) *Command { - if x.HasParent() { - return findRoot(x.parent) - } - return x + if c.HasParent() { + return c.Parent().Root() } - - return findRoot(c) + return c } -// ArgsLenAtDash will return the length of f.Args at the moment when a -- was -// found during arg parsing. This allows your program to know which args were -// before the -- and which came after. (Description from -// https://godoc.org/github.com/spf13/pflag#FlagSet.ArgsLenAtDash). +// ArgsLenAtDash will return the length of c.Flags().Args at the moment +// when a -- was found during args parsing. func (c *Command) ArgsLenAtDash() int { return c.Flags().ArgsLenAtDash() } @@ -549,18 +642,19 @@ func (c *Command) execute(a []string) (err error) { // initialize help flag as the last point possible to allow for user // overriding - c.initHelpFlag() + c.InitDefaultHelpFlag() err = c.ParseFlags(a) if err != nil { - return err + return c.FlagErrorFunc()(c, err) } - // If help is called, regardless of other flags, return we want help + + // If help is called, regardless of other flags, return we want help. // Also say we need help if the command isn't runnable. helpVal, err := c.Flags().GetBool("help") if err != nil { // should be impossible to get here as we always declare a help - // flag in initHelpFlag() + // flag in InitDefaultHelpFlag() c.Println("\"help\" flag declared as non-bool. Please correct your code") return err } @@ -576,6 +670,10 @@ func (c *Command) execute(a []string) (err error) { argWoFlags = a } + if err := c.ValidateArgs(argWoFlags); err != nil { + return err + } + for p := c; p != nil; p = p.Parent() { if p.PersistentPreRunE != nil { if err := p.PersistentPreRunE(c, argWoFlags); err != nil { @@ -595,6 +693,9 @@ func (c *Command) execute(a []string) (err error) { c.PreRun(c, argWoFlags) } + if err := c.validateRequiredFlags(); err != nil { + return err + } if c.RunE != nil { if err := c.RunE(c, argWoFlags); err != nil { return err @@ -630,18 +731,7 @@ func (c *Command) preRun() { } } -func (c *Command) errorMsgFromParse() string { - s := c.flagErrorBuf.String() - - x := strings.Split(s, "\n") - - if len(x) > 0 { - return x[0] - } - return "" -} - -// Call execute to use the args (os.Args[1:] by default) +// Execute uses the args (os.Args[1:] by default) // and run through the command tree finding appropriate matches // for commands and then corresponding flags. func (c *Command) Execute() error { @@ -649,8 +739,8 @@ func (c *Command) Execute() error { return err } +// ExecuteC executes the command. func (c *Command) ExecuteC() (cmd *Command, err error) { - // Regardless of what command execute is called on, run on Root only if c.HasParent() { return c.Root().ExecuteC() @@ -663,7 +753,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { // initialize help as the last point possible to allow for user // overriding - c.initHelpCmd() + c.InitDefaultHelpCmd() var args []string @@ -674,7 +764,12 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { args = c.args } - cmd, flags, err := c.Find(args) + var flags []string + if c.TraverseChildren { + cmd, flags, err = c.Traverse(args) + } else { + cmd, flags, err = c.Find(args) + } if err != nil { // If found parse to a subcommand and then failed, talk about the subcommand if cmd != nil { @@ -686,6 +781,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { } return c, err } + err = cmd.execute(flags) if err != nil { // Always show help if requested, even if SilenceErrors is in @@ -706,52 +802,92 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { if !cmd.SilenceUsage && !c.SilenceUsage { c.Println(cmd.UsageString()) } - return cmd, err } - return cmd, nil + return cmd, err } -func (c *Command) initHelpFlag() { - if c.Flags().Lookup("help") == nil { - c.Flags().BoolP("help", "h", false, "help for "+c.Name()) +func (c *Command) ValidateArgs(args []string) error { + if c.Args == nil { + return nil } + return c.Args(c, args) } -func (c *Command) initHelpCmd() { - if c.helpCommand == nil { - if !c.HasSubCommands() { +func (c *Command) validateRequiredFlags() error { + flags := c.Flags() + missingFlagNames := []string{} + flags.VisitAll(func(pflag *flag.Flag) { + requiredAnnotation, found := pflag.Annotations[BashCompOneRequiredFlag] + if !found { return } + if (requiredAnnotation[0] == "true") && !pflag.Changed { + missingFlagNames = append(missingFlagNames, pflag.Name) + } + }) + if len(missingFlagNames) > 0 { + return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`)) + } + return nil +} + +// InitDefaultHelpFlag adds default help flag to c. +// It is called automatically by executing the c or by calling help and usage. +// If c already has help flag, it will do nothing. +func (c *Command) InitDefaultHelpFlag() { + c.mergePersistentFlags() + if c.Flags().Lookup("help") == nil { + usage := "help for " + if c.Name() == "" { + usage += "this command" + } else { + usage += c.Name() + } + c.Flags().BoolP("help", "h", false, usage) + } +} + +// InitDefaultHelpCmd adds default help command to c. +// It is called automatically by executing the c or by calling help and usage. +// If c already has help command or c has no subcommands, it will do nothing. +func (c *Command) InitDefaultHelpCmd() { + if !c.HasSubCommands() { + return + } + + if c.helpCommand == nil { c.helpCommand = &Command{ Use: "help [command]", Short: "Help about any command", Long: `Help provides help for any command in the application. - Simply type ` + c.Name() + ` help [path to command] for full details.`, - PersistentPreRun: func(cmd *Command, args []string) {}, - PersistentPostRun: func(cmd *Command, args []string) {}, +Simply type ` + c.Name() + ` help [path to command] for full details.`, Run: func(c *Command, args []string) { cmd, _, e := c.Root().Find(args) if cmd == nil || e != nil { - c.Printf("Unknown help topic %#q.", args) + c.Printf("Unknown help topic %#q\n", args) c.Root().Usage() } else { + cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown cmd.Help() } }, } } + c.RemoveCommand(c.helpCommand) c.AddCommand(c.helpCommand) } -// Used for testing +// ResetCommands delete parent, subcommand and help command from c. func (c *Command) ResetCommands() { + c.parent = nil c.commands = nil c.helpCommand = nil + c.parentsPflags = nil } -// Sorts commands by their names +// Sorts commands by their names. type commandSorterByName []*Command func (c commandSorterByName) Len() int { return len(c) } @@ -831,45 +967,48 @@ main: } } -// Print is a convenience method to Print to the defined output, fallback to Stderr if not set +// Print is a convenience method to Print to the defined output, fallback to Stderr if not set. func (c *Command) Print(i ...interface{}) { fmt.Fprint(c.OutOrStderr(), i...) } -// Println is a convenience method to Println to the defined output, fallback to Stderr if not set +// Println is a convenience method to Println to the defined output, fallback to Stderr if not set. func (c *Command) Println(i ...interface{}) { - str := fmt.Sprintln(i...) - c.Print(str) + c.Print(fmt.Sprintln(i...)) } -// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set +// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set. func (c *Command) Printf(format string, i ...interface{}) { - str := fmt.Sprintf(format, i...) - c.Print(str) + c.Print(fmt.Sprintf(format, i...)) } // CommandPath returns the full path to this command. func (c *Command) CommandPath() string { - str := c.Name() - x := c - for x.HasParent() { - str = x.parent.Name() + " " + str - x = x.parent - } - return str -} - -//The full usage for a given command (including parents) -func (c *Command) UseLine() string { - str := "" if c.HasParent() { - str = c.parent.CommandPath() + " " + return c.Parent().CommandPath() + " " + c.Name() } - return str + c.Use + return c.Name() } -// For use in determining which flags have been assigned to which commands -// and which persist +// UseLine puts out the full usage for a given command (including parents). +func (c *Command) UseLine() string { + var useline string + if c.HasParent() { + useline = c.parent.CommandPath() + " " + c.Use + } else { + useline = c.Use + } + if c.DisableFlagsInUseLine { + return useline + } + if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") { + useline += " [flags]" + } + return useline +} + +// DebugFlags used to determine which flags have been assigned to which commands +// and which persist. func (c *Command) DebugFlags() { c.Println("DebugFlags called on", c.Name()) var debugflags func(*Command) @@ -880,12 +1019,8 @@ func (c *Command) DebugFlags() { } if x.HasFlags() { x.flags.VisitAll(func(f *flag.Flag) { - if x.HasPersistentFlags() { - if x.persistentFlag(f.Name) == nil { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") - } else { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]") - } + if x.HasPersistentFlags() && x.persistentFlag(f.Name) != nil { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]") } else { c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") } @@ -915,9 +1050,6 @@ func (c *Command) DebugFlags() { // Name returns the command's name: the first word in the use line. func (c *Command) Name() string { - if c.name != "" { - return c.name - } name := c.Use i := strings.Index(name, " ") if i >= 0 { @@ -936,26 +1068,42 @@ func (c *Command) HasAlias(s string) bool { return false } +// hasNameOrAliasPrefix returns true if the Name or any of aliases start +// with prefix +func (c *Command) hasNameOrAliasPrefix(prefix string) bool { + if strings.HasPrefix(c.Name(), prefix) { + return true + } + for _, alias := range c.Aliases { + if strings.HasPrefix(alias, prefix) { + return true + } + } + return false +} + +// NameAndAliases returns a list of the command name and all aliases func (c *Command) NameAndAliases() string { return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ") } +// HasExample determines if the command has example. func (c *Command) HasExample() bool { return len(c.Example) > 0 } -// Runnable determines if the command is itself runnable +// Runnable determines if the command is itself runnable. func (c *Command) Runnable() bool { return c.Run != nil || c.RunE != nil } -// HasSubCommands determines if the command has children commands +// HasSubCommands determines if the command has children commands. func (c *Command) HasSubCommands() bool { return len(c.commands) > 0 } // IsAvailableCommand determines if a command is available as a non-help command -// (this includes all non deprecated/hidden commands) +// (this includes all non deprecated/hidden commands). func (c *Command) IsAvailableCommand() bool { if len(c.Deprecated) != 0 || c.Hidden { return false @@ -972,11 +1120,12 @@ func (c *Command) IsAvailableCommand() bool { return false } -// IsHelpCommand determines if a command is a 'help' command; a help command is -// determined by the fact that it is NOT runnable/hidden/deprecated, and has no -// sub commands that are runnable/hidden/deprecated -func (c *Command) IsHelpCommand() bool { - +// IsAdditionalHelpTopicCommand determines if a command is an additional +// help topic command; additional help topic command is determined by the +// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that +// are runnable/hidden/deprecated. +// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924. +func (c *Command) IsAdditionalHelpTopicCommand() bool { // if a command is runnable, deprecated, or hidden it is not a 'help' command if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden { return false @@ -984,7 +1133,7 @@ func (c *Command) IsHelpCommand() bool { // if any non-help sub commands are found, the command is not a 'help' command for _, sub := range c.commands { - if !sub.IsHelpCommand() { + if !sub.IsAdditionalHelpTopicCommand() { return false } } @@ -993,14 +1142,13 @@ func (c *Command) IsHelpCommand() bool { return true } -// HasHelpSubCommands determines if a command has any avilable 'help' sub commands +// HasHelpSubCommands determines if a command has any available 'help' sub commands // that need to be shown in the usage/help default template under 'additional help -// topics' +// topics'. func (c *Command) HasHelpSubCommands() bool { - // return true on the first found available 'help' sub command for _, sub := range c.commands { - if sub.IsHelpCommand() { + if sub.IsAdditionalHelpTopicCommand() { return true } } @@ -1010,9 +1158,8 @@ func (c *Command) HasHelpSubCommands() bool { } // HasAvailableSubCommands determines if a command has available sub commands that -// need to be shown in the usage/help default template under 'available commands' +// need to be shown in the usage/help default template under 'available commands'. func (c *Command) HasAvailableSubCommands() bool { - // return true on the first found available (non deprecated/help/hidden) // sub command for _, sub := range c.commands { @@ -1021,22 +1168,23 @@ func (c *Command) HasAvailableSubCommands() bool { } } - // the command either has no sub comamnds, or no available (non deprecated/help/hidden) + // the command either has no sub commands, or no available (non deprecated/help/hidden) // sub commands return false } -// Determine if the command is a child command +// HasParent determines if the command is a child command. func (c *Command) HasParent() bool { return c.parent != nil } -// GlobalNormalizationFunc returns the global normalization function or nil if doesn't exists +// GlobalNormalizationFunc returns the global normalization function or nil if doesn't exists. func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName { return c.globNormFunc } -// Get the complete FlagSet that applies to this command (local and persistent declared here and by all parents) +// Flags returns the complete FlagSet that applies +// to this command (local and persistent declared here and by all parents). func (c *Command) Flags() *flag.FlagSet { if c.flags == nil { c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) @@ -1045,10 +1193,11 @@ func (c *Command) Flags() *flag.FlagSet { } c.flags.SetOutput(c.flagErrorBuf) } + return c.flags } -// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands +// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands. func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { persistentFlags := c.PersistentFlags() @@ -1061,59 +1210,63 @@ func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { return out } -// Get the local FlagSet specifically set in the current command +// LocalFlags returns the local FlagSet specifically set in the current command. func (c *Command) LocalFlags() *flag.FlagSet { c.mergePersistentFlags() - local := flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.lflags.VisitAll(func(f *flag.Flag) { - local.AddFlag(f) - }) - if !c.HasParent() { - flag.CommandLine.VisitAll(func(f *flag.Flag) { - if local.Lookup(f.Name) == nil { - local.AddFlag(f) - } - }) + if c.lflags == nil { + c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.lflags.SetOutput(c.flagErrorBuf) } - return local + c.lflags.SortFlags = c.Flags().SortFlags + if c.globNormFunc != nil { + c.lflags.SetNormalizeFunc(c.globNormFunc) + } + + addToLocal := func(f *flag.Flag) { + if c.lflags.Lookup(f.Name) == nil && c.parentsPflags.Lookup(f.Name) == nil { + c.lflags.AddFlag(f) + } + } + c.Flags().VisitAll(addToLocal) + c.PersistentFlags().VisitAll(addToLocal) + return c.lflags } -// All Flags which were inherited from parents commands +// InheritedFlags returns all flags which were inherited from parents commands. func (c *Command) InheritedFlags() *flag.FlagSet { c.mergePersistentFlags() - inherited := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.iflags == nil { + c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.iflags.SetOutput(c.flagErrorBuf) + } + local := c.LocalFlags() - - var rmerge func(x *Command) - - rmerge = func(x *Command) { - if x.HasPersistentFlags() { - x.PersistentFlags().VisitAll(func(f *flag.Flag) { - if inherited.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil { - inherited.AddFlag(f) - } - }) - } - if x.HasParent() { - rmerge(x.parent) - } + if c.globNormFunc != nil { + c.iflags.SetNormalizeFunc(c.globNormFunc) } - if c.HasParent() { - rmerge(c.parent) - } - - return inherited + c.parentsPflags.VisitAll(func(f *flag.Flag) { + if c.iflags.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil { + c.iflags.AddFlag(f) + } + }) + return c.iflags } -// All Flags which were not inherited from parent commands +// NonInheritedFlags returns all flags which were not inherited from parent commands. func (c *Command) NonInheritedFlags() *flag.FlagSet { return c.LocalFlags() } -// Get the Persistent FlagSet specifically set in the current command +// PersistentFlags returns the persistent FlagSet specifically set in the current command. func (c *Command) PersistentFlags() *flag.FlagSet { if c.pflags == nil { c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) @@ -1125,7 +1278,7 @@ func (c *Command) PersistentFlags() *flag.FlagSet { return c.pflags } -// For use in testing +// ResetFlags deletes all flags from command. func (c *Command) ResetFlags() { c.flagErrorBuf = new(bytes.Buffer) c.flagErrorBuf.Reset() @@ -1133,52 +1286,56 @@ func (c *Command) ResetFlags() { c.flags.SetOutput(c.flagErrorBuf) c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) c.pflags.SetOutput(c.flagErrorBuf) + + c.lflags = nil + c.iflags = nil + c.parentsPflags = nil } -// Does the command contain any flags (local plus persistent from the entire structure) +// HasFlags checks if the command contains any flags (local plus persistent from the entire structure). func (c *Command) HasFlags() bool { return c.Flags().HasFlags() } -// Does the command contain persistent flags +// HasPersistentFlags checks if the command contains persistent flags. func (c *Command) HasPersistentFlags() bool { return c.PersistentFlags().HasFlags() } -// Does the command has flags specifically declared locally +// HasLocalFlags checks if the command has flags specifically declared locally. func (c *Command) HasLocalFlags() bool { return c.LocalFlags().HasFlags() } -// Does the command have flags inherited from its parent command +// HasInheritedFlags checks if the command has flags inherited from its parent command. func (c *Command) HasInheritedFlags() bool { return c.InheritedFlags().HasFlags() } -// Does the command contain any flags (local plus persistent from the entire -// structure) which are not hidden or deprecated +// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire +// structure) which are not hidden or deprecated. func (c *Command) HasAvailableFlags() bool { return c.Flags().HasAvailableFlags() } -// Does the command contain persistent flags which are not hidden or deprecated +// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated. func (c *Command) HasAvailablePersistentFlags() bool { return c.PersistentFlags().HasAvailableFlags() } -// Does the command has flags specifically declared locally which are not hidden -// or deprecated +// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden +// or deprecated. func (c *Command) HasAvailableLocalFlags() bool { return c.LocalFlags().HasAvailableFlags() } -// Does the command have flags inherited from its parent command which are -// not hidden or deprecated +// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are +// not hidden or deprecated. func (c *Command) HasAvailableInheritedFlags() bool { return c.InheritedFlags().HasAvailableFlags() } -// Flag climbs up the command tree looking for matching flag +// Flag climbs up the command tree looking for matching flag. func (c *Command) Flag(name string) (flag *flag.Flag) { flag = c.Flags().Lookup(name) @@ -1189,68 +1346,69 @@ func (c *Command) Flag(name string) (flag *flag.Flag) { return } -// recursively find matching persistent flag +// Recursively find matching persistent flag. func (c *Command) persistentFlag(name string) (flag *flag.Flag) { if c.HasPersistentFlags() { flag = c.PersistentFlags().Lookup(name) } - if flag == nil && c.HasParent() { - flag = c.parent.persistentFlag(name) + if flag == nil { + c.updateParentsPflags() + flag = c.parentsPflags.Lookup(name) } return } -// ParseFlags parses persistent flag tree & local flags -func (c *Command) ParseFlags(args []string) (err error) { +// ParseFlags parses persistent flag tree and local flags. +func (c *Command) ParseFlags(args []string) error { if c.DisableFlagParsing { return nil } + + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + beforeErrorBufLen := c.flagErrorBuf.Len() c.mergePersistentFlags() - err = c.Flags().Parse(args) - return + err := c.Flags().Parse(args) + // Print warnings if they occurred (e.g. deprecated flag messages). + if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil { + c.Print(c.flagErrorBuf.String()) + } + + return err } -// Parent returns a commands parent command +// Parent returns a commands parent command. func (c *Command) Parent() *Command { return c.parent } +// mergePersistentFlags merges c.PersistentFlags() to c.Flags() +// and adds missing persistent flags of all parents. func (c *Command) mergePersistentFlags() { - var rmerge func(x *Command) - - // Save the set of local flags - if c.lflags == nil { - c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.lflags.SetOutput(c.flagErrorBuf) - addtolocal := func(f *flag.Flag) { - c.lflags.AddFlag(f) - } - c.Flags().VisitAll(addtolocal) - c.PersistentFlags().VisitAll(addtolocal) - } - rmerge = func(x *Command) { - if !x.HasParent() { - flag.CommandLine.VisitAll(func(f *flag.Flag) { - if x.PersistentFlags().Lookup(f.Name) == nil { - x.PersistentFlags().AddFlag(f) - } - }) - } - if x.HasPersistentFlags() { - x.PersistentFlags().VisitAll(func(f *flag.Flag) { - if c.Flags().Lookup(f.Name) == nil { - c.Flags().AddFlag(f) - } - }) - } - if x.HasParent() { - rmerge(x.parent) - } - } - - rmerge(c) + c.updateParentsPflags() + c.Flags().AddFlagSet(c.PersistentFlags()) + c.Flags().AddFlagSet(c.parentsPflags) +} + +// updateParentsPflags updates c.parentsPflags by adding +// new persistent flags of all parents. +// If c.parentsPflags == nil, it makes new. +func (c *Command) updateParentsPflags() { + if c.parentsPflags == nil { + c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.parentsPflags.SetOutput(c.flagErrorBuf) + c.parentsPflags.SortFlags = false + } + + if c.globNormFunc != nil { + c.parentsPflags.SetNormalizeFunc(c.globNormFunc) + } + + c.Root().PersistentFlags().AddFlagSet(flag.CommandLine) + + c.VisitParents(func(parent *Command) { + c.parentsPflags.AddFlagSet(parent.PersistentFlags()) + }) } diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go index 4b0eaa1b6bc..edec728e4f5 100644 --- a/vendor/github.com/spf13/cobra/command_win.go +++ b/vendor/github.com/spf13/cobra/command_win.go @@ -11,14 +11,8 @@ import ( var preExecHookFn = preExecHook -// enables an information splash screen on Windows if the CLI is started from explorer.exe. -var MousetrapHelpText string = `This is a command line tool - -You need to open cmd.exe and run it from there. -` - func preExecHook(c *Command) { - if mousetrap.StartedByExplorer() { + if MousetrapHelpText != "" && mousetrap.StartedByExplorer() { c.Print(MousetrapHelpText) time.Sleep(5 * time.Second) os.Exit(1) diff --git a/vendor/github.com/spf13/cobra/doc/BUILD b/vendor/github.com/spf13/cobra/doc/BUILD index 1820193cc00..52fbc08c1ef 100644 --- a/vendor/github.com/spf13/cobra/doc/BUILD +++ b/vendor/github.com/spf13/cobra/doc/BUILD @@ -5,7 +5,9 @@ go_library( srcs = [ "man_docs.go", "md_docs.go", + "rest_docs.go", "util.go", + "yaml_docs.go", ], importpath = "github.com/spf13/cobra/doc", visibility = ["//visibility:public"], @@ -13,6 +15,7 @@ go_library( "//vendor/github.com/cpuguy83/go-md2man/md2man:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/gopkg.in/yaml.v2:go_default_library", ], ) diff --git a/vendor/github.com/spf13/cobra/doc/man_docs.go b/vendor/github.com/spf13/cobra/doc/man_docs.go index b202029d1e0..ce92332dd17 100644 --- a/vendor/github.com/spf13/cobra/doc/man_docs.go +++ b/vendor/github.com/spf13/cobra/doc/man_docs.go @@ -23,21 +23,21 @@ import ( "strings" "time" - mangen "github.com/cpuguy83/go-md2man/md2man" + "github.com/cpuguy83/go-md2man/md2man" "github.com/spf13/cobra" "github.com/spf13/pflag" ) // GenManTree will generate a man page for this command and all descendants // in the directory given. The header may be nil. This function may not work -// correctly if your command names have - in them. If you have `cmd` with two -// subcmds, `sub` and `sub-third`. And `sub` has a subcommand called `third` +// correctly if your command names have `-` in them. If you have `cmd` with two +// subcmds, `sub` and `sub-third`, and `sub` has a subcommand called `third` // it is undefined which help output will be in the file `cmd-sub-third.1`. func GenManTree(cmd *cobra.Command, header *GenManHeader, dir string) error { return GenManTreeFromOpts(cmd, GenManTreeOptions{ Header: header, Path: dir, - CommandSeparator: "_", + CommandSeparator: "-", }) } @@ -49,7 +49,7 @@ func GenManTreeFromOpts(cmd *cobra.Command, opts GenManTreeOptions) error { header = &GenManHeader{} } for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c.IsHelpCommand() { + if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { continue } if err := GenManTreeFromOpts(c, opts); err != nil { @@ -66,7 +66,7 @@ func GenManTreeFromOpts(cmd *cobra.Command, opts GenManTreeOptions) error { separator = opts.CommandSeparator } basename := strings.Replace(cmd.CommandPath(), " ", separator, -1) - filename := filepath.Join(opts.Path, basename + "." + section) + filename := filepath.Join(opts.Path, basename+"."+section) f, err := os.Create(filename) if err != nil { return err @@ -77,6 +77,8 @@ func GenManTreeFromOpts(cmd *cobra.Command, opts GenManTreeOptions) error { return GenMan(cmd, &headerCopy, f) } +// GenManTreeOptions is the options for generating the man pages. +// Used only in GenManTreeFromOpts. type GenManTreeOptions struct { Header *GenManHeader Path string @@ -105,7 +107,7 @@ func GenMan(cmd *cobra.Command, header *GenManHeader, w io.Writer) error { fillHeader(header, cmd.CommandPath()) b := genMan(cmd, header) - _, err := w.Write(mangen.Render(b)) + _, err := w.Write(md2man.Render(b)) return err } @@ -126,25 +128,25 @@ func fillHeader(header *GenManHeader, name string) { } } -func manPreamble(out io.Writer, header *GenManHeader, cmd *cobra.Command, dashedName string) { +func manPreamble(buf *bytes.Buffer, header *GenManHeader, cmd *cobra.Command, dashedName string) { description := cmd.Long if len(description) == 0 { description = cmd.Short } - fmt.Fprintf(out, `%% %s(%s)%s + buf.WriteString(fmt.Sprintf(`%% %s(%s)%s %% %s %% %s # NAME -`, header.Title, header.Section, header.date, header.Source, header.Manual) - fmt.Fprintf(out, "%s \\- %s\n\n", dashedName, cmd.Short) - fmt.Fprintf(out, "# SYNOPSIS\n") - fmt.Fprintf(out, "**%s**\n\n", cmd.UseLine()) - fmt.Fprintf(out, "# DESCRIPTION\n") - fmt.Fprintf(out, "%s\n\n", description) +`, header.Title, header.Section, header.date, header.Source, header.Manual)) + buf.WriteString(fmt.Sprintf("%s \\- %s\n\n", dashedName, cmd.Short)) + buf.WriteString("# SYNOPSIS\n") + buf.WriteString(fmt.Sprintf("**%s**\n\n", cmd.UseLine())) + buf.WriteString("# DESCRIPTION\n") + buf.WriteString(description + "\n\n") } -func manPrintFlags(out io.Writer, flags *pflag.FlagSet) { +func manPrintFlags(buf *bytes.Buffer, flags *pflag.FlagSet) { flags.VisitAll(func(flag *pflag.Flag) { if len(flag.Deprecated) > 0 || flag.Hidden { return @@ -156,38 +158,41 @@ func manPrintFlags(out io.Writer, flags *pflag.FlagSet) { format = fmt.Sprintf("**--%s**", flag.Name) } if len(flag.NoOptDefVal) > 0 { - format = format + "[" + format += "[" } if flag.Value.Type() == "string" { // put quotes on the value - format = format + "=%q" + format += "=%q" } else { - format = format + "=%s" + format += "=%s" } if len(flag.NoOptDefVal) > 0 { - format = format + "]" + format += "]" } - format = format + "\n\t%s\n\n" - fmt.Fprintf(out, format, flag.DefValue, flag.Usage) + format += "\n\t%s\n\n" + buf.WriteString(fmt.Sprintf(format, flag.DefValue, flag.Usage)) }) } -func manPrintOptions(out io.Writer, command *cobra.Command) { +func manPrintOptions(buf *bytes.Buffer, command *cobra.Command) { flags := command.NonInheritedFlags() if flags.HasFlags() { - fmt.Fprintf(out, "# OPTIONS\n") - manPrintFlags(out, flags) - fmt.Fprintf(out, "\n") + buf.WriteString("# OPTIONS\n") + manPrintFlags(buf, flags) + buf.WriteString("\n") } flags = command.InheritedFlags() if flags.HasFlags() { - fmt.Fprintf(out, "# OPTIONS INHERITED FROM PARENT COMMANDS\n") - manPrintFlags(out, flags) - fmt.Fprintf(out, "\n") + buf.WriteString("# OPTIONS INHERITED FROM PARENT COMMANDS\n") + manPrintFlags(buf, flags) + buf.WriteString("\n") } } func genMan(cmd *cobra.Command, header *GenManHeader) []byte { + cmd.InitDefaultHelpCmd() + cmd.InitDefaultHelpFlag() + // something like `rootcmd-subcmd1-subcmd2` dashCommandName := strings.Replace(cmd.CommandPath(), " ", "-", -1) @@ -196,11 +201,11 @@ func genMan(cmd *cobra.Command, header *GenManHeader) []byte { manPreamble(buf, header, cmd, dashCommandName) manPrintOptions(buf, cmd) if len(cmd.Example) > 0 { - fmt.Fprintf(buf, "# EXAMPLE\n") - fmt.Fprintf(buf, "```\n%s\n```\n", cmd.Example) + buf.WriteString("# EXAMPLE\n") + buf.WriteString(fmt.Sprintf("```\n%s\n```\n", cmd.Example)) } if hasSeeAlso(cmd) { - fmt.Fprintf(buf, "# SEE ALSO\n") + buf.WriteString("# SEE ALSO\n") seealsos := make([]string, 0) if cmd.HasParent() { parentPath := cmd.Parent().CommandPath() @@ -216,16 +221,16 @@ func genMan(cmd *cobra.Command, header *GenManHeader) []byte { children := cmd.Commands() sort.Sort(byName(children)) for _, c := range children { - if !c.IsAvailableCommand() || c.IsHelpCommand() { + if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { continue } seealso := fmt.Sprintf("**%s-%s(%s)**", dashCommandName, c.Name(), header.Section) seealsos = append(seealsos, seealso) } - fmt.Fprintf(buf, "%s\n", strings.Join(seealsos, ", ")) + buf.WriteString(strings.Join(seealsos, ", ") + "\n") } if !cmd.DisableAutoGenTag { - fmt.Fprintf(buf, "# HISTORY\n%s Auto generated by spf13/cobra\n", header.Date.Format("2-Jan-2006")) + buf.WriteString(fmt.Sprintf("# HISTORY\n%s Auto generated by spf13/cobra\n", header.Date.Format("2-Jan-2006"))) } return buf.Bytes() } diff --git a/vendor/github.com/spf13/cobra/doc/man_docs.md b/vendor/github.com/spf13/cobra/doc/man_docs.md index 5fe957a3558..3709160f34f 100644 --- a/vendor/github.com/spf13/cobra/doc/man_docs.md +++ b/vendor/github.com/spf13/cobra/doc/man_docs.md @@ -6,6 +6,8 @@ Generating man pages from a cobra command is incredibly easy. An example is as f package main import ( + "log" + "github.com/spf13/cobra" "github.com/spf13/cobra/doc" ) @@ -15,12 +17,15 @@ func main() { Use: "test", Short: "my test program", } - header := &cobra.GenManHeader{ + header := &doc.GenManHeader{ Title: "MINE", Section: "3", } - doc.GenManTree(cmd, header, "/tmp") + err := doc.GenManTree(cmd, header, "/tmp") + if err != nil { + log.Fatal(err) + } } ``` -That will get you a man page `/tmp/test.1` +That will get you a man page `/tmp/test.3` diff --git a/vendor/github.com/spf13/cobra/doc/md_docs.go b/vendor/github.com/spf13/cobra/doc/md_docs.go index fa136318049..68cf5bf6487 100644 --- a/vendor/github.com/spf13/cobra/doc/md_docs.go +++ b/vendor/github.com/spf13/cobra/doc/md_docs.go @@ -14,6 +14,7 @@ package doc import ( + "bytes" "fmt" "io" "os" @@ -25,38 +26,36 @@ import ( "github.com/spf13/cobra" ) -func printOptions(w io.Writer, cmd *cobra.Command, name string) error { +func printOptions(buf *bytes.Buffer, cmd *cobra.Command, name string) error { flags := cmd.NonInheritedFlags() - flags.SetOutput(w) + flags.SetOutput(buf) if flags.HasFlags() { - if _, err := fmt.Fprintf(w, "### Options\n\n```\n"); err != nil { - return err - } + buf.WriteString("### Options\n\n```\n") flags.PrintDefaults() - if _, err := fmt.Fprintf(w, "```\n\n"); err != nil { - return err - } + buf.WriteString("```\n\n") } parentFlags := cmd.InheritedFlags() - parentFlags.SetOutput(w) + parentFlags.SetOutput(buf) if parentFlags.HasFlags() { - if _, err := fmt.Fprintf(w, "### Options inherited from parent commands\n\n```\n"); err != nil { - return err - } + buf.WriteString("### Options inherited from parent commands\n\n```\n") parentFlags.PrintDefaults() - if _, err := fmt.Fprintf(w, "```\n\n"); err != nil { - return err - } + buf.WriteString("```\n\n") } return nil } +// GenMarkdown creates markdown output. func GenMarkdown(cmd *cobra.Command, w io.Writer) error { return GenMarkdownCustom(cmd, w, func(s string) string { return s }) } +// GenMarkdownCustom creates custom markdown output. func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) string) error { + cmd.InitDefaultHelpCmd() + cmd.InitDefaultHelpFlag() + + buf := new(bytes.Buffer) name := cmd.CommandPath() short := cmd.Short @@ -65,49 +64,31 @@ func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) long = short } - if _, err := fmt.Fprintf(w, "## %s\n\n", name); err != nil { - return err - } - if _, err := fmt.Fprintf(w, "%s\n\n", short); err != nil { - return err - } - if _, err := fmt.Fprintf(w, "### Synopsis\n\n"); err != nil { - return err - } - if _, err := fmt.Fprintf(w, "\n%s\n\n", long); err != nil { - return err - } + buf.WriteString("## " + name + "\n\n") + buf.WriteString(short + "\n\n") + buf.WriteString("### Synopsis\n\n") + buf.WriteString("\n" + long + "\n\n") if cmd.Runnable() { - if _, err := fmt.Fprintf(w, "```\n%s\n```\n\n", cmd.UseLine()); err != nil { - return err - } + buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.UseLine())) } if len(cmd.Example) > 0 { - if _, err := fmt.Fprintf(w, "### Examples\n\n"); err != nil { - return err - } - if _, err := fmt.Fprintf(w, "```\n%s\n```\n\n", cmd.Example); err != nil { - return err - } + buf.WriteString("### Examples\n\n") + buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.Example)) } - if err := printOptions(w, cmd, name); err != nil { + if err := printOptions(buf, cmd, name); err != nil { return err } if hasSeeAlso(cmd) { - if _, err := fmt.Fprintf(w, "### SEE ALSO\n"); err != nil { - return err - } + buf.WriteString("### SEE ALSO\n") if cmd.HasParent() { parent := cmd.Parent() pname := parent.CommandPath() link := pname + ".md" link = strings.Replace(link, " ", "_", -1) - if _, err := fmt.Fprintf(w, "* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short); err != nil { - return err - } + buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short)) cmd.VisitParents(func(c *cobra.Command) { if c.DisableAutoGenTag { cmd.DisableAutoGenTag = c.DisableAutoGenTag @@ -119,37 +100,40 @@ func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) sort.Sort(byName(children)) for _, child := range children { - if !child.IsAvailableCommand() || child.IsHelpCommand() { + if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() { continue } cname := name + " " + child.Name() link := cname + ".md" link = strings.Replace(link, " ", "_", -1) - if _, err := fmt.Fprintf(w, "* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "\n"); err != nil { - return err + buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short)) } + buf.WriteString("\n") } if !cmd.DisableAutoGenTag { - if _, err := fmt.Fprintf(w, "###### Auto generated by spf13/cobra on %s\n", time.Now().Format("2-Jan-2006")); err != nil { - return err - } + buf.WriteString("###### Auto generated by spf13/cobra on " + time.Now().Format("2-Jan-2006") + "\n") } - return nil + _, err := buf.WriteTo(w) + return err } +// GenMarkdownTree will generate a markdown page for this command and all +// descendants in the directory given. The header may be nil. +// This function may not work correctly if your command names have `-` in them. +// If you have `cmd` with two subcmds, `sub` and `sub-third`, +// and `sub` has a subcommand called `third`, it is undefined which +// help output will be in the file `cmd-sub-third.1`. func GenMarkdownTree(cmd *cobra.Command, dir string) error { identity := func(s string) string { return s } emptyStr := func(s string) string { return "" } return GenMarkdownTreeCustom(cmd, dir, emptyStr, identity) } +// GenMarkdownTreeCustom is the the same as GenMarkdownTree, but +// with custom filePrepender and linkHandler. func GenMarkdownTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error { for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c.IsHelpCommand() { + if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { continue } if err := GenMarkdownTreeCustom(c, dir, filePrepender, linkHandler); err != nil { diff --git a/vendor/github.com/spf13/cobra/doc/md_docs.md b/vendor/github.com/spf13/cobra/doc/md_docs.md index 0c3b96e2718..56ce9fe819e 100644 --- a/vendor/github.com/spf13/cobra/doc/md_docs.md +++ b/vendor/github.com/spf13/cobra/doc/md_docs.md @@ -6,6 +6,8 @@ Generating man pages from a cobra command is incredibly easy. An example is as f package main import ( + "log" + "github.com/spf13/cobra" "github.com/spf13/cobra/doc" ) @@ -15,7 +17,10 @@ func main() { Use: "test", Short: "my test program", } - doc.GenMarkdownTree(cmd, "/tmp") + err := doc.GenMarkdownTree(cmd, "/tmp") + if err != nil { + log.Fatal(err) + } } ``` @@ -29,18 +34,22 @@ This program can actually generate docs for the kubectl command in the kubernete package main import ( + "log" "io/ioutil" "os" - kubectlcmd "k8s.io/kubernetes/pkg/kubectl/cmd" + "k8s.io/kubernetes/pkg/kubectl/cmd" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "github.com/spf13/cobra/doc" ) func main() { - cmd := kubectlcmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) - doc.GenMarkdownTree(cmd, "./") + kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) + err := doc.GenMarkdownTree(kubectl, "./") + if err != nil { + log.Fatal(err) + } } ``` @@ -52,7 +61,10 @@ You may wish to have more control over the output, or only generate for a single ```go out := new(bytes.Buffer) - doc.GenMarkdown(cmd, out) + err := doc.GenMarkdown(cmd, out) + if err != nil { + log.Fatal(err) + } ``` This will write the markdown doc for ONLY "cmd" into the out, buffer. @@ -101,4 +113,3 @@ linkHandler := func(name string) string { return "/commands/" + strings.ToLower(base) + "/" } ``` - diff --git a/vendor/github.com/spf13/cobra/doc/rest_docs.go b/vendor/github.com/spf13/cobra/doc/rest_docs.go new file mode 100644 index 00000000000..4913e3ee2ea --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/rest_docs.go @@ -0,0 +1,185 @@ +//Copyright 2015 Red Hat Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package doc + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/spf13/cobra" +) + +func printOptionsReST(buf *bytes.Buffer, cmd *cobra.Command, name string) error { + flags := cmd.NonInheritedFlags() + flags.SetOutput(buf) + if flags.HasFlags() { + buf.WriteString("Options\n") + buf.WriteString("~~~~~~~\n\n::\n\n") + flags.PrintDefaults() + buf.WriteString("\n") + } + + parentFlags := cmd.InheritedFlags() + parentFlags.SetOutput(buf) + if parentFlags.HasFlags() { + buf.WriteString("Options inherited from parent commands\n") + buf.WriteString("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n") + parentFlags.PrintDefaults() + buf.WriteString("\n") + } + return nil +} + +// linkHandler for default ReST hyperlink markup +func defaultLinkHandler(name, ref string) string { + return fmt.Sprintf("`%s <%s.rst>`_", name, ref) +} + +// GenReST creates reStructured Text output. +func GenReST(cmd *cobra.Command, w io.Writer) error { + return GenReSTCustom(cmd, w, defaultLinkHandler) +} + +// GenReSTCustom creates custom reStructured Text output. +func GenReSTCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string, string) string) error { + cmd.InitDefaultHelpCmd() + cmd.InitDefaultHelpFlag() + + buf := new(bytes.Buffer) + name := cmd.CommandPath() + + short := cmd.Short + long := cmd.Long + if len(long) == 0 { + long = short + } + ref := strings.Replace(name, " ", "_", -1) + + buf.WriteString(".. _" + ref + ":\n\n") + buf.WriteString(name + "\n") + buf.WriteString(strings.Repeat("-", len(name)) + "\n\n") + buf.WriteString(short + "\n\n") + buf.WriteString("Synopsis\n") + buf.WriteString("~~~~~~~~\n\n") + buf.WriteString("\n" + long + "\n\n") + + if cmd.Runnable() { + buf.WriteString(fmt.Sprintf("::\n\n %s\n\n", cmd.UseLine())) + } + + if len(cmd.Example) > 0 { + buf.WriteString("Examples\n") + buf.WriteString("~~~~~~~~\n\n") + buf.WriteString(fmt.Sprintf("::\n\n%s\n\n", indentString(cmd.Example, " "))) + } + + if err := printOptionsReST(buf, cmd, name); err != nil { + return err + } + if hasSeeAlso(cmd) { + buf.WriteString("SEE ALSO\n") + buf.WriteString("~~~~~~~~\n\n") + if cmd.HasParent() { + parent := cmd.Parent() + pname := parent.CommandPath() + ref = strings.Replace(pname, " ", "_", -1) + buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(pname, ref), parent.Short)) + cmd.VisitParents(func(c *cobra.Command) { + if c.DisableAutoGenTag { + cmd.DisableAutoGenTag = c.DisableAutoGenTag + } + }) + } + + children := cmd.Commands() + sort.Sort(byName(children)) + + for _, child := range children { + if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() { + continue + } + cname := name + " " + child.Name() + ref = strings.Replace(cname, " ", "_", -1) + buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(cname, ref), child.Short)) + } + buf.WriteString("\n") + } + if !cmd.DisableAutoGenTag { + buf.WriteString("*Auto generated by spf13/cobra on " + time.Now().Format("2-Jan-2006") + "*\n") + } + _, err := buf.WriteTo(w) + return err +} + +// GenReSTTree will generate a ReST page for this command and all +// descendants in the directory given. +// This function may not work correctly if your command names have `-` in them. +// If you have `cmd` with two subcmds, `sub` and `sub-third`, +// and `sub` has a subcommand called `third`, it is undefined which +// help output will be in the file `cmd-sub-third.1`. +func GenReSTTree(cmd *cobra.Command, dir string) error { + emptyStr := func(s string) string { return "" } + return GenReSTTreeCustom(cmd, dir, emptyStr, defaultLinkHandler) +} + +// GenReSTTreeCustom is the the same as GenReSTTree, but +// with custom filePrepender and linkHandler. +func GenReSTTreeCustom(cmd *cobra.Command, dir string, filePrepender func(string) string, linkHandler func(string, string) string) error { + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { + continue + } + if err := GenReSTTreeCustom(c, dir, filePrepender, linkHandler); err != nil { + return err + } + } + + basename := strings.Replace(cmd.CommandPath(), " ", "_", -1) + ".rst" + filename := filepath.Join(dir, basename) + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + + if _, err := io.WriteString(f, filePrepender(filename)); err != nil { + return err + } + if err := GenReSTCustom(cmd, f, linkHandler); err != nil { + return err + } + return nil +} + +// adapted from: https://github.com/kr/text/blob/main/indent.go +func indentString(s, p string) string { + var res []byte + b := []byte(s) + prefix := []byte(p) + bol := true + for _, c := range b { + if bol && c != '\n' { + res = append(res, prefix...) + } + res = append(res, c) + bol = c == '\n' + } + return string(res) +} diff --git a/vendor/github.com/spf13/cobra/doc/rest_docs.md b/vendor/github.com/spf13/cobra/doc/rest_docs.md new file mode 100644 index 00000000000..6098430eff6 --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/rest_docs.md @@ -0,0 +1,114 @@ +# Generating ReStructured Text Docs For Your Own cobra.Command + +Generating ReST pages from a cobra command is incredibly easy. An example is as follows: + +```go +package main + +import ( + "log" + + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" +) + +func main() { + cmd := &cobra.Command{ + Use: "test", + Short: "my test program", + } + err := doc.GenReSTTree(cmd, "/tmp") + if err != nil { + log.Fatal(err) + } +} +``` + +That will get you a ReST document `/tmp/test.rst` + +## Generate ReST docs for the entire command tree + +This program can actually generate docs for the kubectl command in the kubernetes project + +```go +package main + +import ( + "log" + "io/ioutil" + "os" + + "k8s.io/kubernetes/pkg/kubectl/cmd" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + + "github.com/spf13/cobra/doc" +) + +func main() { + kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) + err := doc.GenReSTTree(kubectl, "./") + if err != nil { + log.Fatal(err) + } +} +``` + +This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./") + +## Generate ReST docs for a single command + +You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenReST` instead of `GenReSTTree` + +```go + out := new(bytes.Buffer) + err := doc.GenReST(cmd, out) + if err != nil { + log.Fatal(err) + } +``` + +This will write the ReST doc for ONLY "cmd" into the out, buffer. + +## Customize the output + +Both `GenReST` and `GenReSTTree` have alternate versions with callbacks to get some control of the output: + +```go +func GenReSTTreeCustom(cmd *Command, dir string, filePrepender func(string) string, linkHandler func(string, string) string) error { + //... +} +``` + +```go +func GenReSTCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string, string) string) error { + //... +} +``` + +The `filePrepender` will prepend the return value given the full filepath to the rendered ReST file. A common use case is to add front matter to use the generated documentation with [Hugo](http://gohugo.io/): + +```go +const fmTemplate = `--- +date: %s +title: "%s" +slug: %s +url: %s +--- +` +filePrepender := func(filename string) string { + now := time.Now().Format(time.RFC3339) + name := filepath.Base(filename) + base := strings.TrimSuffix(name, path.Ext(name)) + url := "/commands/" + strings.ToLower(base) + "/" + return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url) +} +``` + +The `linkHandler` can be used to customize the rendered links to the commands, given a command name and reference. This is useful while converting rst to html or while generating documentation with tools like Sphinx where `:ref:` is used: + +```go +// Sphinx cross-referencing format +linkHandler := func(name, ref string) string { + return fmt.Sprintf(":ref:`%s <%s>`", name, ref) +} +``` diff --git a/vendor/github.com/spf13/cobra/doc/util.go b/vendor/github.com/spf13/cobra/doc/util.go index a1c6b89ba6c..8d3dbecec82 100644 --- a/vendor/github.com/spf13/cobra/doc/util.go +++ b/vendor/github.com/spf13/cobra/doc/util.go @@ -13,7 +13,11 @@ package doc -import "github.com/spf13/cobra" +import ( + "strings" + + "github.com/spf13/cobra" +) // Test to see if we have a reason to print See Also information in docs // Basically this is a test for a parent commend or a subcommand which is @@ -23,7 +27,7 @@ func hasSeeAlso(cmd *cobra.Command) bool { return true } for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c.IsHelpCommand() { + if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { continue } return true @@ -31,6 +35,15 @@ func hasSeeAlso(cmd *cobra.Command) bool { return false } +// Temporary workaround for yaml lib generating incorrect yaml with long strings +// that do not contain \n. +func forceMultiLine(s string) string { + if len(s) > 60 && !strings.Contains(s, "\n") { + s = s + "\n" + } + return s +} + type byName []*cobra.Command func (s byName) Len() int { return len(s) } diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs.go b/vendor/github.com/spf13/cobra/doc/yaml_docs.go new file mode 100644 index 00000000000..ea00af07e81 --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/yaml_docs.go @@ -0,0 +1,169 @@ +// Copyright 2016 French Ben. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package doc + +import ( + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "gopkg.in/yaml.v2" +) + +type cmdOption struct { + Name string + Shorthand string `yaml:",omitempty"` + DefaultValue string `yaml:"default_value,omitempty"` + Usage string `yaml:",omitempty"` +} + +type cmdDoc struct { + Name string + Synopsis string `yaml:",omitempty"` + Description string `yaml:",omitempty"` + Options []cmdOption `yaml:",omitempty"` + InheritedOptions []cmdOption `yaml:"inherited_options,omitempty"` + Example string `yaml:",omitempty"` + SeeAlso []string `yaml:"see_also,omitempty"` +} + +// GenYamlTree creates yaml structured ref files for this command and all descendants +// in the directory given. This function may not work +// correctly if your command names have `-` in them. If you have `cmd` with two +// subcmds, `sub` and `sub-third`, and `sub` has a subcommand called `third` +// it is undefined which help output will be in the file `cmd-sub-third.1`. +func GenYamlTree(cmd *cobra.Command, dir string) error { + identity := func(s string) string { return s } + emptyStr := func(s string) string { return "" } + return GenYamlTreeCustom(cmd, dir, emptyStr, identity) +} + +// GenYamlTreeCustom creates yaml structured ref files. +func GenYamlTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error { + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { + continue + } + if err := GenYamlTreeCustom(c, dir, filePrepender, linkHandler); err != nil { + return err + } + } + + basename := strings.Replace(cmd.CommandPath(), " ", "_", -1) + ".yaml" + filename := filepath.Join(dir, basename) + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + + if _, err := io.WriteString(f, filePrepender(filename)); err != nil { + return err + } + if err := GenYamlCustom(cmd, f, linkHandler); err != nil { + return err + } + return nil +} + +// GenYaml creates yaml output. +func GenYaml(cmd *cobra.Command, w io.Writer) error { + return GenYamlCustom(cmd, w, func(s string) string { return s }) +} + +// GenYamlCustom creates custom yaml output. +func GenYamlCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) string) error { + cmd.InitDefaultHelpCmd() + cmd.InitDefaultHelpFlag() + + yamlDoc := cmdDoc{} + yamlDoc.Name = cmd.CommandPath() + + yamlDoc.Synopsis = forceMultiLine(cmd.Short) + yamlDoc.Description = forceMultiLine(cmd.Long) + + if len(cmd.Example) > 0 { + yamlDoc.Example = cmd.Example + } + + flags := cmd.NonInheritedFlags() + if flags.HasFlags() { + yamlDoc.Options = genFlagResult(flags) + } + flags = cmd.InheritedFlags() + if flags.HasFlags() { + yamlDoc.InheritedOptions = genFlagResult(flags) + } + + if hasSeeAlso(cmd) { + result := []string{} + if cmd.HasParent() { + parent := cmd.Parent() + result = append(result, parent.CommandPath()+" - "+parent.Short) + } + children := cmd.Commands() + sort.Sort(byName(children)) + for _, child := range children { + if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() { + continue + } + result = append(result, child.Name()+" - "+child.Short) + } + yamlDoc.SeeAlso = result + } + + final, err := yaml.Marshal(&yamlDoc) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + if _, err := w.Write(final); err != nil { + return err + } + return nil +} + +func genFlagResult(flags *pflag.FlagSet) []cmdOption { + var result []cmdOption + + flags.VisitAll(func(flag *pflag.Flag) { + // Todo, when we mark a shorthand is deprecated, but specify an empty message. + // The flag.ShorthandDeprecated is empty as the shorthand is deprecated. + // Using len(flag.ShorthandDeprecated) > 0 can't handle this, others are ok. + if !(len(flag.ShorthandDeprecated) > 0) && len(flag.Shorthand) > 0 { + opt := cmdOption{ + flag.Name, + flag.Shorthand, + flag.DefValue, + forceMultiLine(flag.Usage), + } + result = append(result, opt) + } else { + opt := cmdOption{ + Name: flag.Name, + DefaultValue: forceMultiLine(flag.DefValue), + Usage: forceMultiLine(flag.Usage), + } + result = append(result, opt) + } + }) + + return result +} diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs.md b/vendor/github.com/spf13/cobra/doc/yaml_docs.md new file mode 100644 index 00000000000..1a9b7c6a3c9 --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/yaml_docs.md @@ -0,0 +1,112 @@ +# Generating Yaml Docs For Your Own cobra.Command + +Generating yaml files from a cobra command is incredibly easy. An example is as follows: + +```go +package main + +import ( + "log" + + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" +) + +func main() { + cmd := &cobra.Command{ + Use: "test", + Short: "my test program", + } + err := doc.GenYamlTree(cmd, "/tmp") + if err != nil { + log.Fatal(err) + } +} +``` + +That will get you a Yaml document `/tmp/test.yaml` + +## Generate yaml docs for the entire command tree + +This program can actually generate docs for the kubectl command in the kubernetes project + +```go +package main + +import ( + "io/ioutil" + "log" + "os" + + "k8s.io/kubernetes/pkg/kubectl/cmd" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + + "github.com/spf13/cobra/doc" +) + +func main() { + kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) + err := doc.GenYamlTree(kubectl, "./") + if err != nil { + log.Fatal(err) + } +} +``` + +This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./") + +## Generate yaml docs for a single command + +You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenYaml` instead of `GenYamlTree` + +```go + out := new(bytes.Buffer) + doc.GenYaml(cmd, out) +``` + +This will write the yaml doc for ONLY "cmd" into the out, buffer. + +## Customize the output + +Both `GenYaml` and `GenYamlTree` have alternate versions with callbacks to get some control of the output: + +```go +func GenYamlTreeCustom(cmd *Command, dir string, filePrepender, linkHandler func(string) string) error { + //... +} +``` + +```go +func GenYamlCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) error { + //... +} +``` + +The `filePrepender` will prepend the return value given the full filepath to the rendered Yaml file. A common use case is to add front matter to use the generated documentation with [Hugo](http://gohugo.io/): + +```go +const fmTemplate = `--- +date: %s +title: "%s" +slug: %s +url: %s +--- +` + +filePrepender := func(filename string) string { + now := time.Now().Format(time.RFC3339) + name := filepath.Base(filename) + base := strings.TrimSuffix(name, path.Ext(name)) + url := "/commands/" + strings.ToLower(base) + "/" + return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url) +} +``` + +The `linkHandler` can be used to customize the rendered internal links to the commands, given a filename: + +```go +linkHandler := func(name string) string { + base := strings.TrimSuffix(name, path.Ext(name)) + return "/commands/" + strings.ToLower(base) + "/" +} +``` diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go new file mode 100644 index 00000000000..889c22e273c --- /dev/null +++ b/vendor/github.com/spf13/cobra/zsh_completions.go @@ -0,0 +1,126 @@ +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" + "strings" +) + +// GenZshCompletionFile generates zsh completion file. +func (c *Command) GenZshCompletionFile(filename string) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.GenZshCompletion(outFile) +} + +// GenZshCompletion generates a zsh completion file and writes to the passed writer. +func (c *Command) GenZshCompletion(w io.Writer) error { + buf := new(bytes.Buffer) + + writeHeader(buf, c) + maxDepth := maxDepth(c) + writeLevelMapping(buf, maxDepth) + writeLevelCases(buf, maxDepth, c) + + _, err := buf.WriteTo(w) + return err +} + +func writeHeader(w io.Writer, cmd *Command) { + fmt.Fprintf(w, "#compdef %s\n\n", cmd.Name()) +} + +func maxDepth(c *Command) int { + if len(c.Commands()) == 0 { + return 0 + } + maxDepthSub := 0 + for _, s := range c.Commands() { + subDepth := maxDepth(s) + if subDepth > maxDepthSub { + maxDepthSub = subDepth + } + } + return 1 + maxDepthSub +} + +func writeLevelMapping(w io.Writer, numLevels int) { + fmt.Fprintln(w, `_arguments \`) + for i := 1; i <= numLevels; i++ { + fmt.Fprintf(w, ` '%d: :->level%d' \`, i, i) + fmt.Fprintln(w) + } + fmt.Fprintf(w, ` '%d: :%s'`, numLevels+1, "_files") + fmt.Fprintln(w) +} + +func writeLevelCases(w io.Writer, maxDepth int, root *Command) { + fmt.Fprintln(w, "case $state in") + defer fmt.Fprintln(w, "esac") + + for i := 1; i <= maxDepth; i++ { + fmt.Fprintf(w, " level%d)\n", i) + writeLevel(w, root, i) + fmt.Fprintln(w, " ;;") + } + fmt.Fprintln(w, " *)") + fmt.Fprintln(w, " _arguments '*: :_files'") + fmt.Fprintln(w, " ;;") +} + +func writeLevel(w io.Writer, root *Command, i int) { + fmt.Fprintf(w, " case $words[%d] in\n", i) + defer fmt.Fprintln(w, " esac") + + commands := filterByLevel(root, i) + byParent := groupByParent(commands) + + for p, c := range byParent { + names := names(c) + fmt.Fprintf(w, " %s)\n", p) + fmt.Fprintf(w, " _arguments '%d: :(%s)'\n", i, strings.Join(names, " ")) + fmt.Fprintln(w, " ;;") + } + fmt.Fprintln(w, " *)") + fmt.Fprintln(w, " _arguments '*: :_files'") + fmt.Fprintln(w, " ;;") + +} + +func filterByLevel(c *Command, l int) []*Command { + cs := make([]*Command, 0) + if l == 0 { + cs = append(cs, c) + return cs + } + for _, s := range c.Commands() { + cs = append(cs, filterByLevel(s, l-1)...) + } + return cs +} + +func groupByParent(commands []*Command) map[string][]*Command { + m := make(map[string][]*Command) + for _, c := range commands { + parent := c.Parent() + if parent == nil { + continue + } + m[parent.Name()] = append(m[parent.Name()], c) + } + return m +} + +func names(commands []*Command) []string { + ns := make([]string, len(commands)) + for i, c := range commands { + ns[i] = c.Name() + } + return ns +} From 6c54ec59ee7e6e1fa9f1dc311762ab98dc1b3d0a Mon Sep 17 00:00:00 2001 From: Di Xu Date: Mon, 20 Nov 2017 13:21:27 +0800 Subject: [PATCH 032/264] bump pflag --- Godeps/Godeps.json | 2 +- staging/src/k8s.io/api/Godeps/Godeps.json | 2 +- .../Godeps/Godeps.json | 2 +- .../k8s.io/apimachinery/Godeps/Godeps.json | 2 +- .../src/k8s.io/apiserver/Godeps/Godeps.json | 2 +- .../src/k8s.io/client-go/Godeps/Godeps.json | 2 +- .../k8s.io/code-generator/Godeps/Godeps.json | 2 +- .../k8s.io/kube-aggregator/Godeps/Godeps.json | 2 +- staging/src/k8s.io/metrics/Godeps/Godeps.json | 2 +- .../sample-apiserver/Godeps/Godeps.json | 2 +- .../sample-controller/Godeps/Godeps.json | 2 +- vendor/github.com/spf13/pflag/.travis.yml | 21 +- vendor/github.com/spf13/pflag/BUILD | 1 + vendor/github.com/spf13/pflag/README.md | 25 +- vendor/github.com/spf13/pflag/count.go | 16 +- vendor/github.com/spf13/pflag/flag.go | 286 ++++++++++++------ vendor/github.com/spf13/pflag/int16.go | 88 ++++++ 17 files changed, 332 insertions(+), 127 deletions(-) create mode 100644 vendor/github.com/spf13/pflag/int16.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 592545de505..b66024cfb79 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -2482,7 +2482,7 @@ }, { "ImportPath": "github.com/spf13/pflag", - "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" + "Rev": "4c012f6dcd9546820e378d0bdda4d8fc772cdfea" }, { "ImportPath": "github.com/spf13/viper", diff --git a/staging/src/k8s.io/api/Godeps/Godeps.json b/staging/src/k8s.io/api/Godeps/Godeps.json index ffa35e2d980..488056bc433 100644 --- a/staging/src/k8s.io/api/Godeps/Godeps.json +++ b/staging/src/k8s.io/api/Godeps/Godeps.json @@ -68,7 +68,7 @@ }, { "ImportPath": "github.com/spf13/pflag", - "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" + "Rev": "4c012f6dcd9546820e378d0bdda4d8fc772cdfea" }, { "ImportPath": "golang.org/x/net/http2", diff --git a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json index f792000d584..226a9e7fda5 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json @@ -332,7 +332,7 @@ }, { "ImportPath": "github.com/spf13/pflag", - "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" + "Rev": "4c012f6dcd9546820e378d0bdda4d8fc772cdfea" }, { "ImportPath": "github.com/stretchr/testify/assert", diff --git a/staging/src/k8s.io/apimachinery/Godeps/Godeps.json b/staging/src/k8s.io/apimachinery/Godeps/Godeps.json index a264f944b4e..ae000e588e5 100644 --- a/staging/src/k8s.io/apimachinery/Godeps/Godeps.json +++ b/staging/src/k8s.io/apimachinery/Godeps/Godeps.json @@ -152,7 +152,7 @@ }, { "ImportPath": "github.com/spf13/pflag", - "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" + "Rev": "4c012f6dcd9546820e378d0bdda4d8fc772cdfea" }, { "ImportPath": "github.com/stretchr/testify/assert", diff --git a/staging/src/k8s.io/apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiserver/Godeps/Godeps.json index 83c6b74dbaa..7cf45b73d5f 100644 --- a/staging/src/k8s.io/apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiserver/Godeps/Godeps.json @@ -552,7 +552,7 @@ }, { "ImportPath": "github.com/spf13/pflag", - "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" + "Rev": "4c012f6dcd9546820e378d0bdda4d8fc772cdfea" }, { "ImportPath": "github.com/stretchr/testify/assert", diff --git a/staging/src/k8s.io/client-go/Godeps/Godeps.json b/staging/src/k8s.io/client-go/Godeps/Godeps.json index d9c1c4f0410..610ef120b88 100644 --- a/staging/src/k8s.io/client-go/Godeps/Godeps.json +++ b/staging/src/k8s.io/client-go/Godeps/Godeps.json @@ -256,7 +256,7 @@ }, { "ImportPath": "github.com/spf13/pflag", - "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" + "Rev": "4c012f6dcd9546820e378d0bdda4d8fc772cdfea" }, { "ImportPath": "github.com/stretchr/testify/assert", diff --git a/staging/src/k8s.io/code-generator/Godeps/Godeps.json b/staging/src/k8s.io/code-generator/Godeps/Godeps.json index 239636cbe3d..fdf6be0eb3e 100644 --- a/staging/src/k8s.io/code-generator/Godeps/Godeps.json +++ b/staging/src/k8s.io/code-generator/Godeps/Godeps.json @@ -156,7 +156,7 @@ }, { "ImportPath": "github.com/spf13/pflag", - "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" + "Rev": "4c012f6dcd9546820e378d0bdda4d8fc772cdfea" }, { "ImportPath": "golang.org/x/net/idna", diff --git a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json index 32d1debb00a..94035ad7338 100644 --- a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json +++ b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json @@ -312,7 +312,7 @@ }, { "ImportPath": "github.com/spf13/pflag", - "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" + "Rev": "4c012f6dcd9546820e378d0bdda4d8fc772cdfea" }, { "ImportPath": "github.com/stretchr/testify/assert", diff --git a/staging/src/k8s.io/metrics/Godeps/Godeps.json b/staging/src/k8s.io/metrics/Godeps/Godeps.json index e964a638073..5d7e2f271e9 100644 --- a/staging/src/k8s.io/metrics/Godeps/Godeps.json +++ b/staging/src/k8s.io/metrics/Godeps/Godeps.json @@ -128,7 +128,7 @@ }, { "ImportPath": "github.com/spf13/pflag", - "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" + "Rev": "4c012f6dcd9546820e378d0bdda4d8fc772cdfea" }, { "ImportPath": "golang.org/x/net/http2", diff --git a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json index a8c8d2a5cad..be93c86b50a 100644 --- a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json @@ -300,7 +300,7 @@ }, { "ImportPath": "github.com/spf13/pflag", - "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" + "Rev": "4c012f6dcd9546820e378d0bdda4d8fc772cdfea" }, { "ImportPath": "github.com/ugorji/go/codec", diff --git a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json index 5c0249ded73..3d061e4340e 100644 --- a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json @@ -152,7 +152,7 @@ }, { "ImportPath": "github.com/spf13/pflag", - "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" + "Rev": "4c012f6dcd9546820e378d0bdda4d8fc772cdfea" }, { "ImportPath": "golang.org/x/crypto/ssh/terminal", diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml index 707bdc39a7f..f8a63b308ba 100644 --- a/vendor/github.com/spf13/pflag/.travis.yml +++ b/vendor/github.com/spf13/pflag/.travis.yml @@ -3,18 +3,19 @@ sudo: false language: go go: - - 1.6.3 - - 1.7.3 - - tip + - 1.7.3 + - 1.8.1 + - tip matrix: - allow_failures: - - go: tip + allow_failures: + - go: tip + install: - - go get github.com/golang/lint/golint - - export PATH=$GOPATH/bin:$PATH - - go install ./... + - go get github.com/golang/lint/golint + - export PATH=$GOPATH/bin:$PATH + - go install ./... script: - - verify/all.sh -v - - go test ./... + - verify/all.sh -v + - go test ./... diff --git a/vendor/github.com/spf13/pflag/BUILD b/vendor/github.com/spf13/pflag/BUILD index e48a6b28889..cb68c486ee2 100644 --- a/vendor/github.com/spf13/pflag/BUILD +++ b/vendor/github.com/spf13/pflag/BUILD @@ -12,6 +12,7 @@ go_library( "float64.go", "golangflag.go", "int.go", + "int16.go", "int32.go", "int64.go", "int8.go", diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md index eefb46dec83..b052414d129 100644 --- a/vendor/github.com/spf13/pflag/README.md +++ b/vendor/github.com/spf13/pflag/README.md @@ -246,6 +246,25 @@ It is possible to mark a flag as hidden, meaning it will still function as norma flags.MarkHidden("secretFlag") ``` +## Disable sorting of flags +`pflag` allows you to disable sorting of flags for help and usage message. + +**Example**: +```go +flags.BoolP("verbose", "v", false, "verbose output") +flags.String("coolflag", "yeaah", "it's really cool flag") +flags.Int("usefulflag", 777, "sometimes it's very useful") +flags.SortFlags = false +flags.PrintDefaults() +``` +**Output**: +``` + -v, --verbose verbose output + --coolflag string it's really cool flag (default "yeaah") + --usefulflag int sometimes it's very useful (default 777) +``` + + ## Supporting Go flags when using pflag In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary to support flags defined by third-party dependencies (e.g. `golang/glog`). @@ -270,8 +289,8 @@ func main() { You can see the full reference documentation of the pflag package [at godoc.org][3], or through go's standard documentation system by running `godoc -http=:6060` and browsing to -[http://localhost:6060/pkg/github.com/ogier/pflag][2] after +[http://localhost:6060/pkg/github.com/spf13/pflag][2] after installation. -[2]: http://localhost:6060/pkg/github.com/ogier/pflag -[3]: http://godoc.org/github.com/ogier/pflag +[2]: http://localhost:6060/pkg/github.com/spf13/pflag +[3]: http://godoc.org/github.com/spf13/pflag diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go index d22be41f29f..aa126e44d1c 100644 --- a/vendor/github.com/spf13/pflag/count.go +++ b/vendor/github.com/spf13/pflag/count.go @@ -11,13 +11,13 @@ func newCountValue(val int, p *int) *countValue { } func (i *countValue) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - // -1 means that no specific value was passed, so increment - if v == -1 { + // "+1" means that no specific value was passed, so increment + if s == "+1" { *i = countValue(*i + 1) - } else { - *i = countValue(v) + return nil } + v, err := strconv.ParseInt(s, 0, 0) + *i = countValue(v) return err } @@ -54,7 +54,7 @@ func (f *FlagSet) CountVar(p *int, name string, usage string) { // CountVarP is like CountVar only take a shorthand for the flag name. func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) { flag := f.VarPF(newCountValue(0, p), name, shorthand, usage) - flag.NoOptDefVal = "-1" + flag.NoOptDefVal = "+1" } // CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set @@ -83,7 +83,9 @@ func (f *FlagSet) CountP(name, shorthand string, usage string) *int { return p } -// Count like Count only the flag is placed on the CommandLine isntead of a given flag set +// Count defines a count flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +// A count flag will add 1 to its value evey time it is found on the command line func Count(name string, usage string) *int { return CommandLine.CountP(name, "", usage) } diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index 746af6327e4..28538c0750b 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -16,9 +16,9 @@ pflag is a drop-in replacement of Go's native flag package. If you import pflag under the name "flag" then all code should continue to function with no changes. - import flag "github.com/ogier/pflag" + import flag "github.com/spf13/pflag" - There is one exception to this: if you directly instantiate the Flag struct +There is one exception to this: if you directly instantiate the Flag struct there is one more field "Shorthand" that you will need to set. Most code never instantiates this struct directly, and instead uses functions such as String(), BoolVar(), and Var(), and is therefore @@ -134,14 +134,21 @@ type FlagSet struct { // a custom error handler. Usage func() + // SortFlags is used to indicate, if user wants to have sorted flags in + // help/usage messages. + SortFlags bool + name string parsed bool actual map[NormalizedName]*Flag + orderedActual []*Flag + sortedActual []*Flag formal map[NormalizedName]*Flag + orderedFormal []*Flag + sortedFormal []*Flag shorthands map[byte]*Flag args []string // arguments after flags argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no -- - exitOnError bool // does the program exit if there's an error? errorHandling ErrorHandling output io.Writer // nil means stderr; use out() accessor interspersed bool // allow interspersed option/non-option args @@ -156,7 +163,7 @@ type Flag struct { Value Value // value as set DefValue string // default value (as text); for usage message Changed bool // If the user set the value (or if left to default) - NoOptDefVal string //default value (as text); if the flag is on the command line without any options + NoOptDefVal string // default value (as text); if the flag is on the command line without any options Deprecated string // If this flag is deprecated, this string is the new or now thing to use Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use @@ -194,11 +201,19 @@ func sortFlags(flags map[NormalizedName]*Flag) []*Flag { // "--getUrl" which may also be translated to "geturl" and everything will work. func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) { f.normalizeNameFunc = n - for k, v := range f.formal { - delete(f.formal, k) - nname := f.normalizeFlagName(string(k)) - f.formal[nname] = v - v.Name = string(nname) + f.sortedFormal = f.sortedFormal[:0] + for fname, flag := range f.formal { + nname := f.normalizeFlagName(flag.Name) + if fname == nname { + continue + } + flag.Name = string(nname) + delete(f.formal, fname) + f.formal[nname] = flag + if _, set := f.actual[fname]; set { + delete(f.actual, fname) + f.actual[nname] = flag + } } } @@ -229,10 +244,25 @@ func (f *FlagSet) SetOutput(output io.Writer) { f.output = output } -// VisitAll visits the flags in lexicographical order, calling fn for each. +// VisitAll visits the flags in lexicographical order or +// in primordial order if f.SortFlags is false, calling fn for each. // It visits all flags, even those not set. func (f *FlagSet) VisitAll(fn func(*Flag)) { - for _, flag := range sortFlags(f.formal) { + if len(f.formal) == 0 { + return + } + + var flags []*Flag + if f.SortFlags { + if len(f.formal) != len(f.sortedFormal) { + f.sortedFormal = sortFlags(f.formal) + } + flags = f.sortedFormal + } else { + flags = f.orderedFormal + } + + for _, flag := range flags { fn(flag) } } @@ -253,22 +283,39 @@ func (f *FlagSet) HasAvailableFlags() bool { return false } -// VisitAll visits the command-line flags in lexicographical order, calling -// fn for each. It visits all flags, even those not set. +// VisitAll visits the command-line flags in lexicographical order or +// in primordial order if f.SortFlags is false, calling fn for each. +// It visits all flags, even those not set. func VisitAll(fn func(*Flag)) { CommandLine.VisitAll(fn) } -// Visit visits the flags in lexicographical order, calling fn for each. +// Visit visits the flags in lexicographical order or +// in primordial order if f.SortFlags is false, calling fn for each. // It visits only those flags that have been set. func (f *FlagSet) Visit(fn func(*Flag)) { - for _, flag := range sortFlags(f.actual) { + if len(f.actual) == 0 { + return + } + + var flags []*Flag + if f.SortFlags { + if len(f.actual) != len(f.sortedActual) { + f.sortedActual = sortFlags(f.actual) + } + flags = f.sortedActual + } else { + flags = f.orderedActual + } + + for _, flag := range flags { fn(flag) } } -// Visit visits the command-line flags in lexicographical order, calling fn -// for each. It visits only those flags that have been set. +// Visit visits the command-line flags in lexicographical order or +// in primordial order if f.SortFlags is false, calling fn for each. +// It visits only those flags that have been set. func Visit(fn func(*Flag)) { CommandLine.Visit(fn) } @@ -278,6 +325,22 @@ func (f *FlagSet) Lookup(name string) *Flag { return f.lookup(f.normalizeFlagName(name)) } +// ShorthandLookup returns the Flag structure of the short handed flag, +// returning nil if none exists. +// It panics, if len(name) > 1. +func (f *FlagSet) ShorthandLookup(name string) *Flag { + if name == "" { + return nil + } + if len(name) > 1 { + msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name) + fmt.Fprintf(f.out(), msg) + panic(msg) + } + c := name[0] + return f.shorthands[c] +} + // lookup returns the Flag structure of the named flag, returning nil if none exists. func (f *FlagSet) lookup(name NormalizedName) *Flag { return f.formal[name] @@ -319,7 +382,7 @@ func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { if flag == nil { return fmt.Errorf("flag %q does not exist", name) } - if len(usageMessage) == 0 { + if usageMessage == "" { return fmt.Errorf("deprecated message for flag %q must be set", name) } flag.Deprecated = usageMessage @@ -334,7 +397,7 @@ func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) erro if flag == nil { return fmt.Errorf("flag %q does not exist", name) } - if len(usageMessage) == 0 { + if usageMessage == "" { return fmt.Errorf("deprecated message for flag %q must be set", name) } flag.ShorthandDeprecated = usageMessage @@ -358,6 +421,12 @@ func Lookup(name string) *Flag { return CommandLine.Lookup(name) } +// ShorthandLookup returns the Flag structure of the short handed flag, +// returning nil if none exists. +func ShorthandLookup(name string) *Flag { + return CommandLine.ShorthandLookup(name) +} + // Set sets the value of the named flag. func (f *FlagSet) Set(name, value string) error { normalName := f.normalizeFlagName(name) @@ -365,17 +434,30 @@ func (f *FlagSet) Set(name, value string) error { if !ok { return fmt.Errorf("no such flag -%v", name) } + err := flag.Value.Set(value) if err != nil { - return err + var flagName string + if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { + flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name) + } else { + flagName = fmt.Sprintf("--%s", flag.Name) + } + return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err) } - if f.actual == nil { - f.actual = make(map[NormalizedName]*Flag) + + if !flag.Changed { + if f.actual == nil { + f.actual = make(map[NormalizedName]*Flag) + } + f.actual[normalName] = flag + f.orderedActual = append(f.orderedActual, flag) + + flag.Changed = true } - f.actual[normalName] = flag - flag.Changed = true - if len(flag.Deprecated) > 0 { - fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) + + if flag.Deprecated != "" { + fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) } return nil } @@ -482,6 +564,14 @@ func UnquoteUsage(flag *Flag) (name string, usage string) { name = "int" case "uint64": name = "uint" + case "stringSlice": + name = "strings" + case "intSlice": + name = "ints" + case "uintSlice": + name = "uints" + case "boolSlice": + name = "bools" } return @@ -557,28 +647,28 @@ func wrap(i, w int, s string) string { // for all flags in the FlagSet. Wrapped to `cols` columns (0 for no // wrapping) func (f *FlagSet) FlagUsagesWrapped(cols int) string { - x := new(bytes.Buffer) + buf := new(bytes.Buffer) lines := make([]string, 0, len(f.formal)) maxlen := 0 f.VisitAll(func(flag *Flag) { - if len(flag.Deprecated) > 0 || flag.Hidden { + if flag.Deprecated != "" || flag.Hidden { return } line := "" - if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 { + if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name) } else { line = fmt.Sprintf(" --%s", flag.Name) } varname, usage := UnquoteUsage(flag) - if len(varname) > 0 { + if varname != "" { line += " " + varname } - if len(flag.NoOptDefVal) > 0 { + if flag.NoOptDefVal != "" { switch flag.Value.Type() { case "string": line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal) @@ -586,6 +676,10 @@ func (f *FlagSet) FlagUsagesWrapped(cols int) string { if flag.NoOptDefVal != "true" { line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) } + case "count": + if flag.NoOptDefVal != "+1" { + line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) + } default: line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) } @@ -601,7 +695,7 @@ func (f *FlagSet) FlagUsagesWrapped(cols int) string { line += usage if !flag.defaultIsZeroValue() { if flag.Value.Type() == "string" { - line += fmt.Sprintf(" (default \"%s\")", flag.DefValue) + line += fmt.Sprintf(" (default %q)", flag.DefValue) } else { line += fmt.Sprintf(" (default %s)", flag.DefValue) } @@ -614,10 +708,10 @@ func (f *FlagSet) FlagUsagesWrapped(cols int) string { sidx := strings.Index(line, "\x00") spacing := strings.Repeat(" ", maxlen-sidx) // maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx - fmt.Fprintln(x, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:])) + fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:])) } - return x.String() + return buf.String() } // FlagUsages returns a string containing the usage information for all flags in @@ -714,11 +808,10 @@ func (f *FlagSet) VarP(value Value, name, shorthand, usage string) { // AddFlag will add the flag to the FlagSet func (f *FlagSet) AddFlag(flag *Flag) { - // Call normalizeFlagName function only once normalizedFlagName := f.normalizeFlagName(flag.Name) - _, alreadythere := f.formal[normalizedFlagName] - if alreadythere { + _, alreadyThere := f.formal[normalizedFlagName] + if alreadyThere { msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) fmt.Fprintln(f.out(), msg) panic(msg) // Happens only if flags are declared with identical names @@ -729,28 +822,31 @@ func (f *FlagSet) AddFlag(flag *Flag) { flag.Name = string(normalizedFlagName) f.formal[normalizedFlagName] = flag + f.orderedFormal = append(f.orderedFormal, flag) - if len(flag.Shorthand) == 0 { + if flag.Shorthand == "" { return } if len(flag.Shorthand) > 1 { - fmt.Fprintf(f.out(), "%s shorthand more than ASCII character: %s\n", f.name, flag.Shorthand) - panic("shorthand is more than one character") + msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand) + fmt.Fprintf(f.out(), msg) + panic(msg) } if f.shorthands == nil { f.shorthands = make(map[byte]*Flag) } c := flag.Shorthand[0] - old, alreadythere := f.shorthands[c] - if alreadythere { - fmt.Fprintf(f.out(), "%s shorthand reused: %q for %s already used for %s\n", f.name, c, flag.Name, old.Name) - panic("shorthand redefinition") + used, alreadyThere := f.shorthands[c] + if alreadyThere { + msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name) + fmt.Fprintf(f.out(), msg) + panic(msg) } f.shorthands[c] = flag } // AddFlagSet adds one FlagSet to another. If a flag is already present in f -// the flag from newSet will be ignored +// the flag from newSet will be ignored. func (f *FlagSet) AddFlagSet(newSet *FlagSet) { if newSet == nil { return @@ -781,8 +877,10 @@ func VarP(value Value, name, shorthand, usage string) { // returns the error. func (f *FlagSet) failf(format string, a ...interface{}) error { err := fmt.Errorf(format, a...) - fmt.Fprintln(f.out(), err) - f.usage() + if f.errorHandling != ContinueOnError { + fmt.Fprintln(f.out(), err) + f.usage() + } return err } @@ -798,34 +896,6 @@ func (f *FlagSet) usage() { } } -func (f *FlagSet) setFlag(flag *Flag, value string, origArg string) error { - if err := flag.Value.Set(value); err != nil { - return f.failf("invalid argument %q for %s: %v", value, origArg, err) - } - // mark as visited for Visit() - if f.actual == nil { - f.actual = make(map[NormalizedName]*Flag) - } - f.actual[f.normalizeFlagName(flag.Name)] = flag - flag.Changed = true - if len(flag.Deprecated) > 0 { - fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) - } - if len(flag.ShorthandDeprecated) > 0 && containsShorthand(origArg, flag.Shorthand) { - fmt.Fprintf(os.Stderr, "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) - } - return nil -} - -func containsShorthand(arg, shorthand string) bool { - // filter out flags -- - if strings.HasPrefix(arg, "-") { - return false - } - arg = strings.SplitN(arg, "=", 2)[0] - return strings.Contains(arg, shorthand) -} - func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) { a = args name := s[2:] @@ -833,10 +903,11 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin err = f.failf("bad flag syntax: %s", s) return } + split := strings.SplitN(name, "=", 2) name = split[0] - flag, alreadythere := f.formal[f.normalizeFlagName(name)] - if !alreadythere { + flag, exists := f.formal[f.normalizeFlagName(name)] + if !exists { if name == "help" { // special case for nice help message. f.usage() return a, ErrHelp @@ -844,11 +915,12 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin err = f.failf("unknown flag: --%s", name) return } + var value string if len(split) == 2 { // '--flag=arg' value = split[1] - } else if len(flag.NoOptDefVal) > 0 { + } else if flag.NoOptDefVal != "" { // '--flag' (arg was optional) value = flag.NoOptDefVal } else if len(a) > 0 { @@ -860,7 +932,11 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin err = f.failf("flag needs an argument: %s", s) return } - err = fn(flag, value, s) + + err = fn(flag, value) + if err != nil { + f.failf(err.Error()) + } return } @@ -868,38 +944,52 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse if strings.HasPrefix(shorthands, "test.") { return } + outArgs = args outShorts = shorthands[1:] c := shorthands[0] - flag, alreadythere := f.shorthands[c] - if !alreadythere { + flag, exists := f.shorthands[c] + if !exists { if c == 'h' { // special case for nice help message. f.usage() err = ErrHelp return } - //TODO continue on error err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) return } + var value string if len(shorthands) > 2 && shorthands[1] == '=' { + // '-f=arg' value = shorthands[2:] outShorts = "" - } else if len(flag.NoOptDefVal) > 0 { + } else if flag.NoOptDefVal != "" { + // '-f' (arg was optional) value = flag.NoOptDefVal } else if len(shorthands) > 1 { + // '-farg' value = shorthands[1:] outShorts = "" } else if len(args) > 0 { + // '-f arg' value = args[0] outArgs = args[1:] } else { + // '-f' (arg was required) err = f.failf("flag needs an argument: %q in -%s", c, shorthands) return } - err = fn(flag, value, shorthands) + + if flag.ShorthandDeprecated != "" { + fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) + } + + err = fn(flag, value) + if err != nil { + f.failf(err.Error()) + } return } @@ -907,6 +997,7 @@ func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []stri a = args shorthands := s[1:] + // "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv"). for len(shorthands) > 0 { shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn) if err != nil { @@ -954,18 +1045,24 @@ func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) { // The return value will be ErrHelp if -help was set but not defined. func (f *FlagSet) Parse(arguments []string) error { f.parsed = true - f.args = make([]string, 0, len(arguments)) - assign := func(flag *Flag, value, origArg string) error { - return f.setFlag(flag, value, origArg) + if len(arguments) < 0 { + return nil } - err := f.parseArgs(arguments, assign) + f.args = make([]string, 0, len(arguments)) + + set := func(flag *Flag, value string) error { + return f.Set(flag.Name, value) + } + + err := f.parseArgs(arguments, set) if err != nil { switch f.errorHandling { case ContinueOnError: return err case ExitOnError: + fmt.Println(err) os.Exit(2) case PanicOnError: panic(err) @@ -974,7 +1071,7 @@ func (f *FlagSet) Parse(arguments []string) error { return nil } -type parseFunc func(flag *Flag, value, origArg string) error +type parseFunc func(flag *Flag, value string) error // ParseAll parses flag definitions from the argument list, which should not // include the command name. The arguments for fn are flag and value. Must be @@ -985,11 +1082,7 @@ func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) f.parsed = true f.args = make([]string, 0, len(arguments)) - assign := func(flag *Flag, value, origArg string) error { - return fn(flag, value) - } - - err := f.parseArgs(arguments, assign) + err := f.parseArgs(arguments, fn) if err != nil { switch f.errorHandling { case ContinueOnError: @@ -1036,14 +1129,15 @@ func Parsed() bool { // CommandLine is the default set of command-line flags, parsed from os.Args. var CommandLine = NewFlagSet(os.Args[0], ExitOnError) -// NewFlagSet returns a new, empty flag set with the specified name and -// error handling property. +// NewFlagSet returns a new, empty flag set with the specified name, +// error handling property and SortFlags set to true. func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { f := &FlagSet{ name: name, errorHandling: errorHandling, argsLenAtDash: -1, interspersed: true, + SortFlags: true, } return f } diff --git a/vendor/github.com/spf13/pflag/int16.go b/vendor/github.com/spf13/pflag/int16.go new file mode 100644 index 00000000000..f1a01d05e69 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int16.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- int16 Value +type int16Value int16 + +func newInt16Value(val int16, p *int16) *int16Value { + *p = val + return (*int16Value)(p) +} + +func (i *int16Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 16) + *i = int16Value(v) + return err +} + +func (i *int16Value) Type() string { + return "int16" +} + +func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) } + +func int16Conv(sval string) (interface{}, error) { + v, err := strconv.ParseInt(sval, 0, 16) + if err != nil { + return 0, err + } + return int16(v), nil +} + +// GetInt16 returns the int16 value of a flag with the given name +func (f *FlagSet) GetInt16(name string) (int16, error) { + val, err := f.getFlagType(name, "int16", int16Conv) + if err != nil { + return 0, err + } + return val.(int16), nil +} + +// Int16Var defines an int16 flag with specified name, default value, and usage string. +// The argument p points to an int16 variable in which to store the value of the flag. +func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) { + f.VarP(newInt16Value(value, p), name, "", usage) +} + +// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) { + f.VarP(newInt16Value(value, p), name, shorthand, usage) +} + +// Int16Var defines an int16 flag with specified name, default value, and usage string. +// The argument p points to an int16 variable in which to store the value of the flag. +func Int16Var(p *int16, name string, value int16, usage string) { + CommandLine.VarP(newInt16Value(value, p), name, "", usage) +} + +// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash. +func Int16VarP(p *int16, name, shorthand string, value int16, usage string) { + CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage) +} + +// Int16 defines an int16 flag with specified name, default value, and usage string. +// The return value is the address of an int16 variable that stores the value of the flag. +func (f *FlagSet) Int16(name string, value int16, usage string) *int16 { + p := new(int16) + f.Int16VarP(p, name, "", value, usage) + return p +} + +// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 { + p := new(int16) + f.Int16VarP(p, name, shorthand, value, usage) + return p +} + +// Int16 defines an int16 flag with specified name, default value, and usage string. +// The return value is the address of an int16 variable that stores the value of the flag. +func Int16(name string, value int16, usage string) *int16 { + return CommandLine.Int16P(name, "", value, usage) +} + +// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash. +func Int16P(name, shorthand string, value int16, usage string) *int16 { + return CommandLine.Int16P(name, shorthand, value, usage) +} From 1c715d51c42cafb042b4bb8b03e82a52c5070409 Mon Sep 17 00:00:00 2001 From: Di Xu Date: Wed, 11 Oct 2017 14:26:02 +0800 Subject: [PATCH 033/264] some code change fix wrong required flags disable the addition of [flags] to the usage, use customized useline fix function rename --- cmd/genkubedocs/postprocessing.go | 2 +- cmd/kubeadm/app/cmd/join.go | 2 +- cmd/kubeadm/app/cmd/token.go | 6 +++-- cmd/kubeadm/app/cmd/upgrade/apply.go | 3 ++- pkg/kubectl/cmd/annotate.go | 3 ++- pkg/kubectl/cmd/apply.go | 3 ++- pkg/kubectl/cmd/apply_edit_last_applied.go | 3 ++- pkg/kubectl/cmd/apply_set_last_applied.go | 3 ++- pkg/kubectl/cmd/apply_view_last_applied.go | 3 ++- pkg/kubectl/cmd/attach.go | 3 ++- pkg/kubectl/cmd/auth/cani.go | 3 ++- pkg/kubectl/cmd/auth/reconcile.go | 3 ++- pkg/kubectl/cmd/autoscale.go | 3 ++- pkg/kubectl/cmd/certificates.go | 9 ++++--- pkg/kubectl/cmd/completion.go | 3 ++- pkg/kubectl/cmd/config/config.go | 3 ++- pkg/kubectl/cmd/config/create_authinfo.go | 3 ++- pkg/kubectl/cmd/config/create_cluster.go | 3 ++- pkg/kubectl/cmd/config/create_context.go | 3 ++- pkg/kubectl/cmd/config/delete_cluster.go | 3 ++- pkg/kubectl/cmd/config/delete_context.go | 3 ++- pkg/kubectl/cmd/config/get_contexts.go | 3 ++- pkg/kubectl/cmd/config/rename_context.go | 3 ++- pkg/kubectl/cmd/config/set.go | 3 ++- pkg/kubectl/cmd/config/unset.go | 3 ++- pkg/kubectl/cmd/config/use_context.go | 3 ++- pkg/kubectl/cmd/convert.go | 3 ++- pkg/kubectl/cmd/cp.go | 3 ++- pkg/kubectl/cmd/create.go | 3 ++- pkg/kubectl/cmd/create_clusterrole.go | 3 ++- pkg/kubectl/cmd/create_clusterrolebinding.go | 3 ++- pkg/kubectl/cmd/create_configmap.go | 11 ++++---- pkg/kubectl/cmd/create_deployment.go | 11 ++++---- pkg/kubectl/cmd/create_namespace.go | 11 ++++---- pkg/kubectl/cmd/create_pdb.go | 11 ++++---- pkg/kubectl/cmd/create_priorityclass.go | 11 ++++---- pkg/kubectl/cmd/create_quota.go | 11 ++++---- pkg/kubectl/cmd/create_role.go | 3 ++- pkg/kubectl/cmd/create_rolebinding.go | 3 ++- pkg/kubectl/cmd/create_secret.go | 9 ++++--- pkg/kubectl/cmd/create_service.go | 12 ++++++--- pkg/kubectl/cmd/create_serviceaccount.go | 11 ++++---- pkg/kubectl/cmd/delete.go | 3 ++- pkg/kubectl/cmd/delete_test.go | 1 + pkg/kubectl/cmd/describe.go | 3 ++- pkg/kubectl/cmd/describe_test.go | 28 ++++++++++++++++++++ pkg/kubectl/cmd/diff.go | 3 ++- pkg/kubectl/cmd/drain.go | 9 ++++--- pkg/kubectl/cmd/edit.go | 3 ++- pkg/kubectl/cmd/exec.go | 3 ++- pkg/kubectl/cmd/explain.go | 3 ++- pkg/kubectl/cmd/expose.go | 3 ++- pkg/kubectl/cmd/help.go | 3 ++- pkg/kubectl/cmd/label.go | 3 ++- pkg/kubectl/cmd/logs.go | 3 ++- pkg/kubectl/cmd/patch.go | 3 ++- pkg/kubectl/cmd/plugin.go | 3 ++- pkg/kubectl/cmd/portforward.go | 3 ++- pkg/kubectl/cmd/proxy.go | 3 ++- pkg/kubectl/cmd/replace.go | 3 ++- pkg/kubectl/cmd/resource/get.go | 3 ++- pkg/kubectl/cmd/rollingupdate.go | 5 ++-- pkg/kubectl/cmd/rollout/rollout.go | 3 ++- pkg/kubectl/cmd/rollout/rollout_history.go | 3 ++- pkg/kubectl/cmd/rollout/rollout_pause.go | 3 ++- pkg/kubectl/cmd/rollout/rollout_resume.go | 3 ++- pkg/kubectl/cmd/rollout/rollout_status.go | 3 ++- pkg/kubectl/cmd/rollout/rollout_undo.go | 3 ++- pkg/kubectl/cmd/run.go | 3 ++- pkg/kubectl/cmd/scale.go | 3 ++- pkg/kubectl/cmd/set/set.go | 3 ++- pkg/kubectl/cmd/set/set_env.go | 3 ++- pkg/kubectl/cmd/set/set_image.go | 3 ++- pkg/kubectl/cmd/set/set_resources.go | 3 ++- pkg/kubectl/cmd/set/set_selector.go | 3 ++- pkg/kubectl/cmd/set/set_serviceaccount.go | 11 ++++---- pkg/kubectl/cmd/set/set_subject.go | 3 ++- pkg/kubectl/cmd/taint.go | 3 ++- pkg/kubectl/cmd/top_node.go | 3 ++- pkg/kubectl/cmd/top_pod.go | 3 ++- 80 files changed, 235 insertions(+), 122 deletions(-) diff --git a/cmd/genkubedocs/postprocessing.go b/cmd/genkubedocs/postprocessing.go index 57b6d95e058..c51e1443bc3 100644 --- a/cmd/genkubedocs/postprocessing.go +++ b/cmd/genkubedocs/postprocessing.go @@ -27,7 +27,7 @@ import ( // MarkdownPostProcessing goes though the generated files func MarkdownPostProcessing(cmd *cobra.Command, dir string, processor func(string) string) error { for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c.IsHelpCommand() { + if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { continue } if err := MarkdownPostProcessing(c, dir, processor); err != nil { diff --git a/cmd/kubeadm/app/cmd/join.go b/cmd/kubeadm/app/cmd/join.go index 2bc32fe4d72..14d7acce072 100644 --- a/cmd/kubeadm/app/cmd/join.go +++ b/cmd/kubeadm/app/cmd/join.go @@ -111,7 +111,7 @@ func NewCmdJoin(out io.Writer) *cobra.Command { var ignorePreflightErrors []string cmd := &cobra.Command{ - Use: "join [flags]", + Use: "join", Short: "Run this on any machine you wish to join an existing cluster", Long: joinLongDescription, Run: func(cmd *cobra.Command, args []string) { diff --git a/cmd/kubeadm/app/cmd/token.go b/cmd/kubeadm/app/cmd/token.go index 87b8d077750..3c3799857a7 100644 --- a/cmd/kubeadm/app/cmd/token.go +++ b/cmd/kubeadm/app/cmd/token.go @@ -101,7 +101,8 @@ func NewCmdToken(out io.Writer, errW io.Writer) *cobra.Command { var description string var printJoinCommand bool createCmd := &cobra.Command{ - Use: "create [token]", + Use: "create [token]", + DisableFlagsInUseLine: true, Short: "Create bootstrap tokens on the server.", Long: dedent.Dedent(` This command will create a bootstrap token for you. @@ -155,7 +156,8 @@ func NewCmdToken(out io.Writer, errW io.Writer) *cobra.Command { tokenCmd.AddCommand(listCmd) deleteCmd := &cobra.Command{ - Use: "delete [token-value]", + Use: "delete [token-value]", + DisableFlagsInUseLine: true, Short: "Delete bootstrap tokens on the server.", Long: dedent.Dedent(` This command will delete a given bootstrap token for you. diff --git a/cmd/kubeadm/app/cmd/upgrade/apply.go b/cmd/kubeadm/app/cmd/upgrade/apply.go index 08629ef2b78..12101083c1b 100644 --- a/cmd/kubeadm/app/cmd/upgrade/apply.go +++ b/cmd/kubeadm/app/cmd/upgrade/apply.go @@ -69,7 +69,8 @@ func NewCmdApply(parentFlags *cmdUpgradeFlags) *cobra.Command { } cmd := &cobra.Command{ - Use: "apply [version]", + Use: "apply [version]", + DisableFlagsInUseLine: true, Short: "Upgrade your Kubernetes cluster to the specified version.", Run: func(cmd *cobra.Command, args []string) { var err error diff --git a/pkg/kubectl/cmd/annotate.go b/pkg/kubectl/cmd/annotate.go index 09541ea0e24..0637528d7e7 100644 --- a/pkg/kubectl/cmd/annotate.go +++ b/pkg/kubectl/cmd/annotate.go @@ -113,7 +113,8 @@ func NewCmdAnnotate(f cmdutil.Factory, out io.Writer) *cobra.Command { } cmd := &cobra.Command{ - Use: "annotate [--overwrite] (-f FILENAME | TYPE NAME) KEY_1=VAL_1 ... KEY_N=VAL_N [--resource-version=version]", + Use: "annotate [--overwrite] (-f FILENAME | TYPE NAME) KEY_1=VAL_1 ... KEY_N=VAL_N [--resource-version=version]", + DisableFlagsInUseLine: true, Short: i18n.T("Update the annotations on a resource"), Long: annotateLong + "\n\n" + cmdutil.ValidResourceTypeList(f), Example: annotateExample, diff --git a/pkg/kubectl/cmd/apply.go b/pkg/kubectl/cmd/apply.go index e132743231c..9bb6d194f81 100644 --- a/pkg/kubectl/cmd/apply.go +++ b/pkg/kubectl/cmd/apply.go @@ -105,7 +105,8 @@ func NewCmdApply(baseName string, f cmdutil.Factory, out, errOut io.Writer) *cob options.cmdBaseName = baseName cmd := &cobra.Command{ - Use: "apply -f FILENAME", + Use: "apply -f FILENAME", + DisableFlagsInUseLine: true, Short: i18n.T("Apply a configuration to a resource by filename or stdin"), Long: applyLong, Example: applyExample, diff --git a/pkg/kubectl/cmd/apply_edit_last_applied.go b/pkg/kubectl/cmd/apply_edit_last_applied.go index ca4868fa5b8..f12bb981e87 100644 --- a/pkg/kubectl/cmd/apply_edit_last_applied.go +++ b/pkg/kubectl/cmd/apply_edit_last_applied.go @@ -76,7 +76,8 @@ func NewCmdApplyEditLastApplied(f cmdutil.Factory, out, errOut io.Writer) *cobra } cmd := &cobra.Command{ - Use: "edit-last-applied (RESOURCE/NAME | -f FILENAME)", + Use: "edit-last-applied (RESOURCE/NAME | -f FILENAME)", + DisableFlagsInUseLine: true, Short: "Edit latest last-applied-configuration annotations of a resource/object", Long: applyEditLastAppliedLong, Example: applyEditLastAppliedExample, diff --git a/pkg/kubectl/cmd/apply_set_last_applied.go b/pkg/kubectl/cmd/apply_set_last_applied.go index 640009b2788..cc455f6a4c3 100644 --- a/pkg/kubectl/cmd/apply_set_last_applied.go +++ b/pkg/kubectl/cmd/apply_set_last_applied.go @@ -84,7 +84,8 @@ var ( func NewCmdApplySetLastApplied(f cmdutil.Factory, out, err io.Writer) *cobra.Command { options := &SetLastAppliedOptions{Out: out, ErrOut: err} cmd := &cobra.Command{ - Use: "set-last-applied -f FILENAME", + Use: "set-last-applied -f FILENAME", + DisableFlagsInUseLine: true, Short: i18n.T("Set the last-applied-configuration annotation on a live object to match the contents of a file."), Long: applySetLastAppliedLong, Example: applySetLastAppliedExample, diff --git a/pkg/kubectl/cmd/apply_view_last_applied.go b/pkg/kubectl/cmd/apply_view_last_applied.go index 7282ae6265f..b0ebb1af820 100644 --- a/pkg/kubectl/cmd/apply_view_last_applied.go +++ b/pkg/kubectl/cmd/apply_view_last_applied.go @@ -60,7 +60,8 @@ var ( func NewCmdApplyViewLastApplied(f cmdutil.Factory, out, err io.Writer) *cobra.Command { options := &ViewLastAppliedOptions{Out: out, ErrOut: err} cmd := &cobra.Command{ - Use: "view-last-applied (TYPE [NAME | -l label] | TYPE/NAME | -f FILENAME)", + Use: "view-last-applied (TYPE [NAME | -l label] | TYPE/NAME | -f FILENAME)", + DisableFlagsInUseLine: true, Short: i18n.T("View latest last-applied-configuration annotations of a resource/object"), Long: applyViewLastAppliedLong, Example: applyViewLastAppliedExample, diff --git a/pkg/kubectl/cmd/attach.go b/pkg/kubectl/cmd/attach.go index 7bc49ca06da..1b188d10853 100644 --- a/pkg/kubectl/cmd/attach.go +++ b/pkg/kubectl/cmd/attach.go @@ -71,7 +71,8 @@ func NewCmdAttach(f cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer) Attach: &DefaultRemoteAttach{}, } cmd := &cobra.Command{ - Use: "attach (POD | TYPE/NAME) -c CONTAINER", + Use: "attach (POD | TYPE/NAME) -c CONTAINER", + DisableFlagsInUseLine: true, Short: i18n.T("Attach to a running container"), Long: "Attach to a process that is already running inside an existing container.", Example: attachExample, diff --git a/pkg/kubectl/cmd/auth/cani.go b/pkg/kubectl/cmd/auth/cani.go index 97e461a46fc..7484cab1895 100644 --- a/pkg/kubectl/cmd/auth/cani.go +++ b/pkg/kubectl/cmd/auth/cani.go @@ -88,7 +88,8 @@ func NewCmdCanI(f cmdutil.Factory, out, err io.Writer) *cobra.Command { } cmd := &cobra.Command{ - Use: "can-i VERB [TYPE | TYPE/NAME | NONRESOURCEURL]", + Use: "can-i VERB [TYPE | TYPE/NAME | NONRESOURCEURL]", + DisableFlagsInUseLine: true, Short: "Check whether an action is allowed", Long: canILong, Example: canIExample, diff --git a/pkg/kubectl/cmd/auth/reconcile.go b/pkg/kubectl/cmd/auth/reconcile.go index 0c65c954c38..377d241a00b 100644 --- a/pkg/kubectl/cmd/auth/reconcile.go +++ b/pkg/kubectl/cmd/auth/reconcile.go @@ -64,7 +64,8 @@ func NewCmdReconcile(f cmdutil.Factory, out, err io.Writer) *cobra.Command { } cmd := &cobra.Command{ - Use: "reconcile -f FILENAME", + Use: "reconcile -f FILENAME", + DisableFlagsInUseLine: true, Short: "Reconciles rules for RBAC Role, RoleBinding, ClusterRole, and ClusterRole binding objects", Long: reconcileLong, Example: reconcileExample, diff --git a/pkg/kubectl/cmd/autoscale.go b/pkg/kubectl/cmd/autoscale.go index 30406934fce..d87146cd073 100644 --- a/pkg/kubectl/cmd/autoscale.go +++ b/pkg/kubectl/cmd/autoscale.go @@ -52,7 +52,8 @@ func NewCmdAutoscale(f cmdutil.Factory, out io.Writer) *cobra.Command { argAliases := kubectl.ResourceAliases(validArgs) cmd := &cobra.Command{ - Use: "autoscale (-f FILENAME | TYPE NAME | TYPE/NAME) [--min=MINPODS] --max=MAXPODS [--cpu-percent=CPU] [flags]", + Use: "autoscale (-f FILENAME | TYPE NAME | TYPE/NAME) [--min=MINPODS] --max=MAXPODS [--cpu-percent=CPU]", + DisableFlagsInUseLine: true, Short: i18n.T("Auto-scale a Deployment, ReplicaSet, or ReplicationController"), Long: autoscaleLong, Example: autoscaleExample, diff --git a/pkg/kubectl/cmd/certificates.go b/pkg/kubectl/cmd/certificates.go index bc2974322c2..ee51e4b266c 100644 --- a/pkg/kubectl/cmd/certificates.go +++ b/pkg/kubectl/cmd/certificates.go @@ -32,7 +32,8 @@ import ( func NewCmdCertificate(f cmdutil.Factory, out io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "certificate SUBCOMMAND", + Use: "certificate SUBCOMMAND", + DisableFlagsInUseLine: true, Short: i18n.T("Modify certificate resources."), Long: "Modify certificate resources.", Run: func(cmd *cobra.Command, args []string) { @@ -68,7 +69,8 @@ func (options *CertificateOptions) Validate() error { func NewCmdCertificateApprove(f cmdutil.Factory, out io.Writer) *cobra.Command { options := CertificateOptions{} cmd := &cobra.Command{ - Use: "approve (-f FILENAME | NAME)", + Use: "approve (-f FILENAME | NAME)", + DisableFlagsInUseLine: true, Short: i18n.T("Approve a certificate signing request"), Long: templates.LongDesc(` Approve a certificate signing request. @@ -118,7 +120,8 @@ func (options *CertificateOptions) RunCertificateApprove(f cmdutil.Factory, out func NewCmdCertificateDeny(f cmdutil.Factory, out io.Writer) *cobra.Command { options := CertificateOptions{} cmd := &cobra.Command{ - Use: "deny (-f FILENAME | NAME)", + Use: "deny (-f FILENAME | NAME)", + DisableFlagsInUseLine: true, Short: i18n.T("Deny a certificate signing request"), Long: templates.LongDesc(` Deny a certificate signing request. diff --git a/pkg/kubectl/cmd/completion.go b/pkg/kubectl/cmd/completion.go index d14ca66329b..d6d9a3506ca 100644 --- a/pkg/kubectl/cmd/completion.go +++ b/pkg/kubectl/cmd/completion.go @@ -97,7 +97,8 @@ func NewCmdCompletion(out io.Writer, boilerPlate string) *cobra.Command { } cmd := &cobra.Command{ - Use: "completion SHELL", + Use: "completion SHELL", + DisableFlagsInUseLine: true, Short: i18n.T("Output shell completion code for the specified shell (bash or zsh)"), Long: completion_long, Example: completion_example, diff --git a/pkg/kubectl/cmd/config/config.go b/pkg/kubectl/cmd/config/config.go index b63d1cf15f3..7984eb5e15f 100644 --- a/pkg/kubectl/cmd/config/config.go +++ b/pkg/kubectl/cmd/config/config.go @@ -37,7 +37,8 @@ func NewCmdConfig(f cmdutil.Factory, pathOptions *clientcmd.PathOptions, out, er } cmd := &cobra.Command{ - Use: "config SUBCOMMAND", + Use: "config SUBCOMMAND", + DisableFlagsInUseLine: true, Short: i18n.T("Modify kubeconfig files"), Long: templates.LongDesc(` Modify kubeconfig files using subcommands like "kubectl config set current-context my-context" diff --git a/pkg/kubectl/cmd/config/create_authinfo.go b/pkg/kubectl/cmd/config/create_authinfo.go index 7430a7c0bc4..368d47c0fb3 100644 --- a/pkg/kubectl/cmd/config/create_authinfo.go +++ b/pkg/kubectl/cmd/config/create_authinfo.go @@ -100,7 +100,8 @@ func NewCmdConfigSetAuthInfo(out io.Writer, configAccess clientcmd.ConfigAccess) func newCmdConfigSetAuthInfo(out io.Writer, options *createAuthInfoOptions) *cobra.Command { cmd := &cobra.Command{ - Use: fmt.Sprintf("set-credentials NAME [--%v=path/to/certfile] [--%v=path/to/keyfile] [--%v=bearer_token] [--%v=basic_user] [--%v=basic_password] [--%v=provider_name] [--%v=key=value]", clientcmd.FlagCertFile, clientcmd.FlagKeyFile, clientcmd.FlagBearerToken, clientcmd.FlagUsername, clientcmd.FlagPassword, flagAuthProvider, flagAuthProviderArg), + Use: fmt.Sprintf("set-credentials NAME [--%v=path/to/certfile] [--%v=path/to/keyfile] [--%v=bearer_token] [--%v=basic_user] [--%v=basic_password] [--%v=provider_name] [--%v=key=value]", clientcmd.FlagCertFile, clientcmd.FlagKeyFile, clientcmd.FlagBearerToken, clientcmd.FlagUsername, clientcmd.FlagPassword, flagAuthProvider, flagAuthProviderArg), + DisableFlagsInUseLine: true, Short: i18n.T("Sets a user entry in kubeconfig"), Long: create_authinfo_long, Example: create_authinfo_example, diff --git a/pkg/kubectl/cmd/config/create_cluster.go b/pkg/kubectl/cmd/config/create_cluster.go index 2b4a47a1d84..f1f2d091bca 100644 --- a/pkg/kubectl/cmd/config/create_cluster.go +++ b/pkg/kubectl/cmd/config/create_cluster.go @@ -63,7 +63,8 @@ func NewCmdConfigSetCluster(out io.Writer, configAccess clientcmd.ConfigAccess) options := &createClusterOptions{configAccess: configAccess} cmd := &cobra.Command{ - Use: fmt.Sprintf("set-cluster NAME [--%v=server] [--%v=path/to/certificate/authority] [--%v=true]", clientcmd.FlagAPIServer, clientcmd.FlagCAFile, clientcmd.FlagInsecure), + Use: fmt.Sprintf("set-cluster NAME [--%v=server] [--%v=path/to/certificate/authority] [--%v=true]", clientcmd.FlagAPIServer, clientcmd.FlagCAFile, clientcmd.FlagInsecure), + DisableFlagsInUseLine: true, Short: i18n.T("Sets a cluster entry in kubeconfig"), Long: create_cluster_long, Example: create_cluster_example, diff --git a/pkg/kubectl/cmd/config/create_context.go b/pkg/kubectl/cmd/config/create_context.go index e3351488c06..d5231729165 100644 --- a/pkg/kubectl/cmd/config/create_context.go +++ b/pkg/kubectl/cmd/config/create_context.go @@ -54,7 +54,8 @@ func NewCmdConfigSetContext(out io.Writer, configAccess clientcmd.ConfigAccess) options := &createContextOptions{configAccess: configAccess} cmd := &cobra.Command{ - Use: fmt.Sprintf("set-context NAME [--%v=cluster_nickname] [--%v=user_nickname] [--%v=namespace]", clientcmd.FlagClusterName, clientcmd.FlagAuthInfoName, clientcmd.FlagNamespace), + Use: fmt.Sprintf("set-context NAME [--%v=cluster_nickname] [--%v=user_nickname] [--%v=namespace]", clientcmd.FlagClusterName, clientcmd.FlagAuthInfoName, clientcmd.FlagNamespace), + DisableFlagsInUseLine: true, Short: i18n.T("Sets a context entry in kubeconfig"), Long: create_context_long, Example: create_context_example, diff --git a/pkg/kubectl/cmd/config/delete_cluster.go b/pkg/kubectl/cmd/config/delete_cluster.go index 136524a8fc3..a9e00ce8348 100644 --- a/pkg/kubectl/cmd/config/delete_cluster.go +++ b/pkg/kubectl/cmd/config/delete_cluster.go @@ -35,7 +35,8 @@ var ( func NewCmdConfigDeleteCluster(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command { cmd := &cobra.Command{ - Use: "delete-cluster NAME", + Use: "delete-cluster NAME", + DisableFlagsInUseLine: true, Short: i18n.T("Delete the specified cluster from the kubeconfig"), Long: "Delete the specified cluster from the kubeconfig", Example: delete_cluster_example, diff --git a/pkg/kubectl/cmd/config/delete_context.go b/pkg/kubectl/cmd/config/delete_context.go index 6d834841c76..d6dad310221 100644 --- a/pkg/kubectl/cmd/config/delete_context.go +++ b/pkg/kubectl/cmd/config/delete_context.go @@ -35,7 +35,8 @@ var ( func NewCmdConfigDeleteContext(out, errOut io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command { cmd := &cobra.Command{ - Use: "delete-context NAME", + Use: "delete-context NAME", + DisableFlagsInUseLine: true, Short: i18n.T("Delete the specified context from the kubeconfig"), Long: "Delete the specified context from the kubeconfig", Example: delete_context_example, diff --git a/pkg/kubectl/cmd/config/get_contexts.go b/pkg/kubectl/cmd/config/get_contexts.go index b018a52dca5..b392bdb3961 100644 --- a/pkg/kubectl/cmd/config/get_contexts.go +++ b/pkg/kubectl/cmd/config/get_contexts.go @@ -61,7 +61,8 @@ func NewCmdConfigGetContexts(out io.Writer, configAccess clientcmd.ConfigAccess) options := &GetContextsOptions{configAccess: configAccess} cmd := &cobra.Command{ - Use: "get-contexts [(-o|--output=)name)]", + Use: "get-contexts [(-o|--output=)name)]", + DisableFlagsInUseLine: true, Short: i18n.T("Describe one or many contexts"), Long: getContextsLong, Example: getContextsExample, diff --git a/pkg/kubectl/cmd/config/rename_context.go b/pkg/kubectl/cmd/config/rename_context.go index 3216a7c83ea..b5fc2e728de 100644 --- a/pkg/kubectl/cmd/config/rename_context.go +++ b/pkg/kubectl/cmd/config/rename_context.go @@ -61,7 +61,8 @@ func NewCmdConfigRenameContext(out io.Writer, configAccess clientcmd.ConfigAcces options := &RenameContextOptions{configAccess: configAccess} cmd := &cobra.Command{ - Use: renameContextUse, + Use: renameContextUse, + DisableFlagsInUseLine: true, Short: renameContextShort, Long: renameContextLong, Example: renameContextExample, diff --git a/pkg/kubectl/cmd/config/set.go b/pkg/kubectl/cmd/config/set.go index 03f97580a08..4374b74b201 100644 --- a/pkg/kubectl/cmd/config/set.go +++ b/pkg/kubectl/cmd/config/set.go @@ -56,7 +56,8 @@ func NewCmdConfigSet(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra. options := &setOptions{configAccess: configAccess} cmd := &cobra.Command{ - Use: "set PROPERTY_NAME PROPERTY_VALUE", + Use: "set PROPERTY_NAME PROPERTY_VALUE", + DisableFlagsInUseLine: true, Short: i18n.T("Sets an individual value in a kubeconfig file"), Long: set_long, Run: func(cmd *cobra.Command, args []string) { diff --git a/pkg/kubectl/cmd/config/unset.go b/pkg/kubectl/cmd/config/unset.go index 3d62038301b..201c6f9d313 100644 --- a/pkg/kubectl/cmd/config/unset.go +++ b/pkg/kubectl/cmd/config/unset.go @@ -44,7 +44,8 @@ func NewCmdConfigUnset(out io.Writer, configAccess clientcmd.ConfigAccess) *cobr options := &unsetOptions{configAccess: configAccess} cmd := &cobra.Command{ - Use: "unset PROPERTY_NAME", + Use: "unset PROPERTY_NAME", + DisableFlagsInUseLine: true, Short: i18n.T("Unsets an individual value in a kubeconfig file"), Long: unset_long, Run: func(cmd *cobra.Command, args []string) { diff --git a/pkg/kubectl/cmd/config/use_context.go b/pkg/kubectl/cmd/config/use_context.go index 38f77eea2c3..f19a92cce0d 100644 --- a/pkg/kubectl/cmd/config/use_context.go +++ b/pkg/kubectl/cmd/config/use_context.go @@ -45,7 +45,8 @@ func NewCmdConfigUseContext(out io.Writer, configAccess clientcmd.ConfigAccess) options := &useContextOptions{configAccess: configAccess} cmd := &cobra.Command{ - Use: "use-context CONTEXT_NAME", + Use: "use-context CONTEXT_NAME", + DisableFlagsInUseLine: true, Short: i18n.T("Sets the current-context in a kubeconfig file"), Aliases: []string{"use"}, Long: `Sets the current-context in a kubeconfig file`, diff --git a/pkg/kubectl/cmd/convert.go b/pkg/kubectl/cmd/convert.go index 285b726011d..aa5b31cdb1c 100644 --- a/pkg/kubectl/cmd/convert.go +++ b/pkg/kubectl/cmd/convert.go @@ -65,7 +65,8 @@ func NewCmdConvert(f cmdutil.Factory, out io.Writer) *cobra.Command { options := &ConvertOptions{} cmd := &cobra.Command{ - Use: "convert -f FILENAME", + Use: "convert -f FILENAME", + DisableFlagsInUseLine: true, Short: i18n.T("Convert config files between different API versions"), Long: convert_long, Example: convert_example, diff --git a/pkg/kubectl/cmd/cp.go b/pkg/kubectl/cmd/cp.go index a5d67de3f96..acbb2eff38a 100644 --- a/pkg/kubectl/cmd/cp.go +++ b/pkg/kubectl/cmd/cp.go @@ -64,7 +64,8 @@ var ( // NewCmdCp creates a new Copy command. func NewCmdCp(f cmdutil.Factory, cmdOut, cmdErr io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "cp ", + Use: "cp ", + DisableFlagsInUseLine: true, Short: i18n.T("Copy files and directories to and from containers."), Long: "Copy files and directories to and from containers.", Example: cpExample, diff --git a/pkg/kubectl/cmd/create.go b/pkg/kubectl/cmd/create.go index 5c00d69acd3..b23dee7b7c8 100644 --- a/pkg/kubectl/cmd/create.go +++ b/pkg/kubectl/cmd/create.go @@ -65,7 +65,8 @@ func NewCmdCreate(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command { var options CreateOptions cmd := &cobra.Command{ - Use: "create -f FILENAME", + Use: "create -f FILENAME", + DisableFlagsInUseLine: true, Short: i18n.T("Create a resource from a file or from stdin."), Long: createLong, Example: createExample, diff --git a/pkg/kubectl/cmd/create_clusterrole.go b/pkg/kubectl/cmd/create_clusterrole.go index 5ed01ce3c33..8f8db8ccb70 100644 --- a/pkg/kubectl/cmd/create_clusterrole.go +++ b/pkg/kubectl/cmd/create_clusterrole.go @@ -66,7 +66,8 @@ func NewCmdCreateClusterRole(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command }, } cmd := &cobra.Command{ - Use: "clusterrole NAME --verb=verb --resource=resource.group [--resource-name=resourcename] [--dry-run]", + Use: "clusterrole NAME --verb=verb --resource=resource.group [--resource-name=resourcename] [--dry-run]", + DisableFlagsInUseLine: true, Short: clusterRoleLong, Long: clusterRoleLong, Example: clusterRoleExample, diff --git a/pkg/kubectl/cmd/create_clusterrolebinding.go b/pkg/kubectl/cmd/create_clusterrolebinding.go index d79a18bea9b..c7ce180c343 100644 --- a/pkg/kubectl/cmd/create_clusterrolebinding.go +++ b/pkg/kubectl/cmd/create_clusterrolebinding.go @@ -39,7 +39,8 @@ var ( // ClusterRoleBinding is a command to ease creating ClusterRoleBindings. func NewCmdCreateClusterRoleBinding(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "clusterrolebinding NAME --clusterrole=NAME [--user=username] [--group=groupname] [--serviceaccount=namespace:serviceaccountname] [--dry-run]", + Use: "clusterrolebinding NAME --clusterrole=NAME [--user=username] [--group=groupname] [--serviceaccount=namespace:serviceaccountname] [--dry-run]", + DisableFlagsInUseLine: true, Short: i18n.T("Create a ClusterRoleBinding for a particular ClusterRole"), Long: clusterRoleBindingLong, Example: clusterRoleBindingExample, diff --git a/pkg/kubectl/cmd/create_configmap.go b/pkg/kubectl/cmd/create_configmap.go index b9f17adede8..cb80b83e9f9 100644 --- a/pkg/kubectl/cmd/create_configmap.go +++ b/pkg/kubectl/cmd/create_configmap.go @@ -60,11 +60,12 @@ var ( // ConfigMap is a command to ease creating ConfigMaps. func NewCmdCreateConfigMap(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "configmap NAME [--from-file=[key=]source] [--from-literal=key1=value1] [--dry-run]", - Aliases: []string{"cm"}, - Short: i18n.T("Create a configmap from a local file, directory or literal value"), - Long: configMapLong, - Example: configMapExample, + Use: "configmap NAME [--from-file=[key=]source] [--from-literal=key1=value1] [--dry-run]", + DisableFlagsInUseLine: true, + Aliases: []string{"cm"}, + Short: i18n.T("Create a configmap from a local file, directory or literal value"), + Long: configMapLong, + Example: configMapExample, Run: func(cmd *cobra.Command, args []string) { err := CreateConfigMap(f, cmdOut, cmd, args) cmdutil.CheckErr(err) diff --git a/pkg/kubectl/cmd/create_deployment.go b/pkg/kubectl/cmd/create_deployment.go index 08e1c9dec0d..cde03d1b76c 100644 --- a/pkg/kubectl/cmd/create_deployment.go +++ b/pkg/kubectl/cmd/create_deployment.go @@ -41,11 +41,12 @@ var ( // Note that this command overlaps significantly with the `kubectl run` command. func NewCmdCreateDeployment(f cmdutil.Factory, cmdOut, cmdErr io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "deployment NAME --image=image [--dry-run]", - Aliases: []string{"deploy"}, - Short: i18n.T("Create a deployment with the specified name."), - Long: deploymentLong, - Example: deploymentExample, + Use: "deployment NAME --image=image [--dry-run]", + DisableFlagsInUseLine: true, + Aliases: []string{"deploy"}, + Short: i18n.T("Create a deployment with the specified name."), + Long: deploymentLong, + Example: deploymentExample, Run: func(cmd *cobra.Command, args []string) { err := createDeployment(f, cmdOut, cmdErr, cmd, args) cmdutil.CheckErr(err) diff --git a/pkg/kubectl/cmd/create_namespace.go b/pkg/kubectl/cmd/create_namespace.go index 955cf1f84af..7a0f2ec327f 100644 --- a/pkg/kubectl/cmd/create_namespace.go +++ b/pkg/kubectl/cmd/create_namespace.go @@ -39,11 +39,12 @@ var ( // NewCmdCreateNamespace is a macro command to create a new namespace func NewCmdCreateNamespace(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "namespace NAME [--dry-run]", - Aliases: []string{"ns"}, - Short: i18n.T("Create a namespace with the specified name"), - Long: namespaceLong, - Example: namespaceExample, + Use: "namespace NAME [--dry-run]", + DisableFlagsInUseLine: true, + Aliases: []string{"ns"}, + Short: i18n.T("Create a namespace with the specified name"), + Long: namespaceLong, + Example: namespaceExample, Run: func(cmd *cobra.Command, args []string) { err := CreateNamespace(f, cmdOut, cmd, args) cmdutil.CheckErr(err) diff --git a/pkg/kubectl/cmd/create_pdb.go b/pkg/kubectl/cmd/create_pdb.go index 310a55e275b..39938f64152 100644 --- a/pkg/kubectl/cmd/create_pdb.go +++ b/pkg/kubectl/cmd/create_pdb.go @@ -44,11 +44,12 @@ var ( // NewCmdCreatePodDisruptionBudget is a macro command to create a new pod disruption budget. func NewCmdCreatePodDisruptionBudget(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "poddisruptionbudget NAME --selector=SELECTOR --min-available=N [--dry-run]", - Aliases: []string{"pdb"}, - Short: i18n.T("Create a pod disruption budget with the specified name."), - Long: pdbLong, - Example: pdbExample, + Use: "poddisruptionbudget NAME --selector=SELECTOR --min-available=N [--dry-run]", + DisableFlagsInUseLine: true, + Aliases: []string{"pdb"}, + Short: i18n.T("Create a pod disruption budget with the specified name."), + Long: pdbLong, + Example: pdbExample, Run: func(cmd *cobra.Command, args []string) { err := CreatePodDisruptionBudget(f, cmdOut, cmd, args) cmdutil.CheckErr(err) diff --git a/pkg/kubectl/cmd/create_priorityclass.go b/pkg/kubectl/cmd/create_priorityclass.go index 7f3561ae614..99c17866732 100644 --- a/pkg/kubectl/cmd/create_priorityclass.go +++ b/pkg/kubectl/cmd/create_priorityclass.go @@ -42,11 +42,12 @@ var ( // NewCmdCreatePriorityClass is a macro command to create a new priorityClass. func NewCmdCreatePriorityClass(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "priorityclass NAME --value=VALUE --global-default=BOOL [--dry-run]", - Aliases: []string{"pc"}, - Short: i18n.T("Create a priorityclass with the specified name."), - Long: pcLong, - Example: pcExample, + Use: "priorityclass NAME --value=VALUE --global-default=BOOL [--dry-run]", + DisableFlagsInUseLine: true, + Aliases: []string{"pc"}, + Short: i18n.T("Create a priorityclass with the specified name."), + Long: pcLong, + Example: pcExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(CreatePriorityClass(f, cmdOut, cmd, args)) }, diff --git a/pkg/kubectl/cmd/create_quota.go b/pkg/kubectl/cmd/create_quota.go index cbd5dc741d3..48eed9993a1 100644 --- a/pkg/kubectl/cmd/create_quota.go +++ b/pkg/kubectl/cmd/create_quota.go @@ -42,11 +42,12 @@ var ( // NewCmdCreateQuota is a macro command to create a new quota func NewCmdCreateQuota(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "quota NAME [--hard=key1=value1,key2=value2] [--scopes=Scope1,Scope2] [--dry-run=bool]", - Aliases: []string{"resourcequota"}, - Short: i18n.T("Create a quota with the specified name."), - Long: quotaLong, - Example: quotaExample, + Use: "quota NAME [--hard=key1=value1,key2=value2] [--scopes=Scope1,Scope2] [--dry-run=bool]", + DisableFlagsInUseLine: true, + Aliases: []string{"resourcequota"}, + Short: i18n.T("Create a quota with the specified name."), + Long: quotaLong, + Example: quotaExample, Run: func(cmd *cobra.Command, args []string) { err := CreateQuota(f, cmdOut, cmd, args) cmdutil.CheckErr(err) diff --git a/pkg/kubectl/cmd/create_role.go b/pkg/kubectl/cmd/create_role.go index 0a881e779c7..1710a4de32e 100644 --- a/pkg/kubectl/cmd/create_role.go +++ b/pkg/kubectl/cmd/create_role.go @@ -121,7 +121,8 @@ func NewCmdCreateRole(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command { Out: cmdOut, } cmd := &cobra.Command{ - Use: "role NAME --verb=verb --resource=resource.group/subresource [--resource-name=resourcename] [--dry-run]", + Use: "role NAME --verb=verb --resource=resource.group/subresource [--resource-name=resourcename] [--dry-run]", + DisableFlagsInUseLine: true, Short: roleLong, Long: roleLong, Example: roleExample, diff --git a/pkg/kubectl/cmd/create_rolebinding.go b/pkg/kubectl/cmd/create_rolebinding.go index 35cb8dc02c8..49eca5706f5 100644 --- a/pkg/kubectl/cmd/create_rolebinding.go +++ b/pkg/kubectl/cmd/create_rolebinding.go @@ -39,7 +39,8 @@ var ( // RoleBinding is a command to ease creating RoleBindings. func NewCmdCreateRoleBinding(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "rolebinding NAME --clusterrole=NAME|--role=NAME [--user=username] [--group=groupname] [--serviceaccount=namespace:serviceaccountname] [--dry-run]", + Use: "rolebinding NAME --clusterrole=NAME|--role=NAME [--user=username] [--group=groupname] [--serviceaccount=namespace:serviceaccountname] [--dry-run]", + DisableFlagsInUseLine: true, Short: i18n.T("Create a RoleBinding for a particular Role or ClusterRole"), Long: roleBindingLong, Example: roleBindingExample, diff --git a/pkg/kubectl/cmd/create_secret.go b/pkg/kubectl/cmd/create_secret.go index f49048bf9a3..5a5b99015b6 100644 --- a/pkg/kubectl/cmd/create_secret.go +++ b/pkg/kubectl/cmd/create_secret.go @@ -76,7 +76,8 @@ var ( // NewCmdCreateSecretGeneric is a command to create generic secrets from files, directories, or literal values func NewCmdCreateSecretGeneric(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "generic NAME [--type=string] [--from-file=[key=]source] [--from-literal=key1=value1] [--dry-run]", + Use: "generic NAME [--type=string] [--from-file=[key=]source] [--from-literal=key1=value1] [--dry-run]", + DisableFlagsInUseLine: true, Short: i18n.T("Create a secret from a local file, directory or literal value"), Long: secretLong, Example: secretExample, @@ -149,7 +150,8 @@ var ( // NewCmdCreateSecretDockerRegistry is a macro command for creating secrets to work with Docker registries func NewCmdCreateSecretDockerRegistry(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "docker-registry NAME --docker-username=user --docker-password=password --docker-email=email [--docker-server=string] [--from-literal=key1=value1] [--dry-run]", + Use: "docker-registry NAME --docker-username=user --docker-password=password --docker-email=email [--docker-server=string] [--from-literal=key1=value1] [--dry-run]", + DisableFlagsInUseLine: true, Short: i18n.T("Create a secret for use with a Docker registry"), Long: secretForDockerRegistryLong, Example: secretForDockerRegistryExample, @@ -223,7 +225,8 @@ var ( // NewCmdCreateSecretTLS is a macro command for creating secrets to work with Docker registries func NewCmdCreateSecretTLS(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "tls NAME --cert=path/to/cert/file --key=path/to/key/file [--dry-run]", + Use: "tls NAME --cert=path/to/cert/file --key=path/to/key/file [--dry-run]", + DisableFlagsInUseLine: true, Short: i18n.T("Create a TLS secret"), Long: secretForTLSLong, Example: secretForTLSExample, diff --git a/pkg/kubectl/cmd/create_service.go b/pkg/kubectl/cmd/create_service.go index 52496a03b5e..a35d210de27 100644 --- a/pkg/kubectl/cmd/create_service.go +++ b/pkg/kubectl/cmd/create_service.go @@ -64,7 +64,8 @@ func addPortFlags(cmd *cobra.Command) { // NewCmdCreateServiceClusterIP is a command to create a ClusterIP service func NewCmdCreateServiceClusterIP(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "clusterip NAME [--tcp=:] [--dry-run]", + Use: "clusterip NAME [--tcp=:] [--dry-run]", + DisableFlagsInUseLine: true, Short: i18n.T("Create a ClusterIP service."), Long: serviceClusterIPLong, Example: serviceClusterIPExample, @@ -124,7 +125,8 @@ var ( // NewCmdCreateServiceNodePort is a macro command for creating a NodePort service func NewCmdCreateServiceNodePort(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "nodeport NAME [--tcp=port:targetPort] [--dry-run]", + Use: "nodeport NAME [--tcp=port:targetPort] [--dry-run]", + DisableFlagsInUseLine: true, Short: i18n.T("Create a NodePort service."), Long: serviceNodePortLong, Example: serviceNodePortExample, @@ -181,7 +183,8 @@ var ( // NewCmdCreateServiceLoadBalancer is a macro command for creating a LoadBalancer service func NewCmdCreateServiceLoadBalancer(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "loadbalancer NAME [--tcp=port:targetPort] [--dry-run]", + Use: "loadbalancer NAME [--tcp=port:targetPort] [--dry-run]", + DisableFlagsInUseLine: true, Short: i18n.T("Create a LoadBalancer service."), Long: serviceLoadBalancerLong, Example: serviceLoadBalancerExample, @@ -240,7 +243,8 @@ var ( // NewCmdCreateServiceExternalName is a macro command for creating an ExternalName service func NewCmdCreateServiceExternalName(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "externalname NAME --external-name external.name [--dry-run]", + Use: "externalname NAME --external-name external.name [--dry-run]", + DisableFlagsInUseLine: true, Short: i18n.T("Create an ExternalName service."), Long: serviceExternalNameLong, Example: serviceExternalNameExample, diff --git a/pkg/kubectl/cmd/create_serviceaccount.go b/pkg/kubectl/cmd/create_serviceaccount.go index 99dfde9dd1a..c4302c64cc7 100644 --- a/pkg/kubectl/cmd/create_serviceaccount.go +++ b/pkg/kubectl/cmd/create_serviceaccount.go @@ -39,11 +39,12 @@ var ( // NewCmdCreateServiceAccount is a macro command to create a new service account func NewCmdCreateServiceAccount(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "serviceaccount NAME [--dry-run]", - Aliases: []string{"sa"}, - Short: i18n.T("Create a service account with the specified name"), - Long: serviceAccountLong, - Example: serviceAccountExample, + Use: "serviceaccount NAME [--dry-run]", + DisableFlagsInUseLine: true, + Aliases: []string{"sa"}, + Short: i18n.T("Create a service account with the specified name"), + Long: serviceAccountLong, + Example: serviceAccountExample, Run: func(cmd *cobra.Command, args []string) { err := CreateServiceAccount(f, cmdOut, cmd, args) cmdutil.CheckErr(err) diff --git a/pkg/kubectl/cmd/delete.go b/pkg/kubectl/cmd/delete.go index 80a9bd49920..60ac7d5b43c 100644 --- a/pkg/kubectl/cmd/delete.go +++ b/pkg/kubectl/cmd/delete.go @@ -127,7 +127,8 @@ func NewCmdDelete(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command { } cmd := &cobra.Command{ - Use: "delete ([-f FILENAME] | TYPE [(NAME | -l label | --all)])", + Use: "delete ([-f FILENAME] | TYPE [(NAME | -l label | --all)])", + DisableFlagsInUseLine: true, Short: i18n.T("Delete resources by filenames, stdin, resources and names, or by resources and label selector"), Long: delete_long, Example: delete_example, diff --git a/pkg/kubectl/cmd/delete_test.go b/pkg/kubectl/cmd/delete_test.go index 5b78ba09761..220dd288909 100644 --- a/pkg/kubectl/cmd/delete_test.go +++ b/pkg/kubectl/cmd/delete_test.go @@ -44,6 +44,7 @@ var unstructuredSerializer = dynamic.ContentConfig().NegotiatedSerializer var fakecmd = &cobra.Command{ Use: "delete ([-f FILENAME] | TYPE [(NAME | -l label | --all)])", + DisableFlagsInUseLine: true, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(cmdutil.ValidateOutputArgs(cmd)) }, diff --git a/pkg/kubectl/cmd/describe.go b/pkg/kubectl/cmd/describe.go index f77304ec599..f1b75ccd0e6 100644 --- a/pkg/kubectl/cmd/describe.go +++ b/pkg/kubectl/cmd/describe.go @@ -79,7 +79,8 @@ func NewCmdDescribe(f cmdutil.Factory, out, cmdErr io.Writer) *cobra.Command { argAliases := kubectl.ResourceAliases(validArgs) cmd := &cobra.Command{ - Use: "describe (-f FILENAME | TYPE [NAME_PREFIX | -l label] | TYPE/NAME)", + Use: "describe (-f FILENAME | TYPE [NAME_PREFIX | -l label] | TYPE/NAME)", + DisableFlagsInUseLine: true, Short: i18n.T("Show details of a specific resource or group of resources"), Long: describeLong + "\n\n" + cmdutil.ValidResourceTypeList(f), Example: describeExample, diff --git a/pkg/kubectl/cmd/describe_test.go b/pkg/kubectl/cmd/describe_test.go index 2c3ec49c5c8..5c068609d35 100644 --- a/pkg/kubectl/cmd/describe_test.go +++ b/pkg/kubectl/cmd/describe_test.go @@ -20,6 +20,7 @@ import ( "bytes" "fmt" "net/http" + "strings" "testing" "k8s.io/client-go/rest/fake" @@ -168,3 +169,30 @@ func TestDescribeObjectSkipEvents(t *testing.T) { t.Errorf("ShowEvents = false expected, got ShowEvents = %v", d.Settings.ShowEvents) } } + +func TestDescribeHelpMessage(t *testing.T) { + f, _, _, _ := cmdtesting.NewAPIFactory() + + buf := bytes.NewBuffer([]byte{}) + buferr := bytes.NewBuffer([]byte{}) + cmd := NewCmdDescribe(f, buf, buferr) + cmd.SetArgs([]string{"-h"}) + cmd.SetOutput(buf) + _, err := cmd.ExecuteC() + + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + got := buf.String() + + expected := `describe (-f FILENAME | TYPE [NAME_PREFIX | -l label] | TYPE/NAME)` + if !strings.Contains(got, expected) { + t.Errorf("Expected to contain: \n %v\nGot:\n %v\n", expected, got) + } + + unexpected := `describe (-f FILENAME | TYPE [NAME_PREFIX | -l label] | TYPE/NAME) [flags]` + if strings.Contains(got, unexpected) { + t.Errorf("Expected not to contain: \n %v\nGot:\n %v\n", unexpected, got) + } +} diff --git a/pkg/kubectl/cmd/diff.go b/pkg/kubectl/cmd/diff.go index e536069952d..422ec70dc36 100644 --- a/pkg/kubectl/cmd/diff.go +++ b/pkg/kubectl/cmd/diff.go @@ -109,7 +109,8 @@ func NewCmdDiff(f cmdutil.Factory, stdout, stderr io.Writer) *cobra.Command { Stderr: stderr, } cmd := &cobra.Command{ - Use: "diff -f FILENAME", + Use: "diff -f FILENAME", + DisableFlagsInUseLine: true, Short: i18n.T("Diff different versions of configurations"), Long: diffLong, Example: diffExample, diff --git a/pkg/kubectl/cmd/drain.go b/pkg/kubectl/cmd/drain.go index ba5ae1f1a72..3226d54a646 100644 --- a/pkg/kubectl/cmd/drain.go +++ b/pkg/kubectl/cmd/drain.go @@ -105,7 +105,8 @@ func NewCmdCordon(f cmdutil.Factory, out io.Writer) *cobra.Command { options := &DrainOptions{Factory: f, Out: out} cmd := &cobra.Command{ - Use: "cordon NODE", + Use: "cordon NODE", + DisableFlagsInUseLine: true, Short: i18n.T("Mark node as unschedulable"), Long: cordon_long, Example: cordon_example, @@ -132,7 +133,8 @@ func NewCmdUncordon(f cmdutil.Factory, out io.Writer) *cobra.Command { options := &DrainOptions{Factory: f, Out: out} cmd := &cobra.Command{ - Use: "uncordon NODE", + Use: "uncordon NODE", + DisableFlagsInUseLine: true, Short: i18n.T("Mark node as schedulable"), Long: uncordon_long, Example: uncordon_example, @@ -184,7 +186,8 @@ func NewCmdDrain(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command { options := &DrainOptions{Factory: f, Out: out, ErrOut: errOut, backOff: clockwork.NewRealClock()} cmd := &cobra.Command{ - Use: "drain NODE", + Use: "drain NODE", + DisableFlagsInUseLine: true, Short: i18n.T("Drain node in preparation for maintenance"), Long: drain_long, Example: drain_example, diff --git a/pkg/kubectl/cmd/edit.go b/pkg/kubectl/cmd/edit.go index 11ef23c6bd5..1883c11895c 100644 --- a/pkg/kubectl/cmd/edit.go +++ b/pkg/kubectl/cmd/edit.go @@ -87,7 +87,8 @@ func NewCmdEdit(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command { } cmd := &cobra.Command{ - Use: "edit (RESOURCE/NAME | -f FILENAME)", + Use: "edit (RESOURCE/NAME | -f FILENAME)", + DisableFlagsInUseLine: true, Short: i18n.T("Edit a resource on the server"), Long: editLong, Example: fmt.Sprintf(editExample), diff --git a/pkg/kubectl/cmd/exec.go b/pkg/kubectl/cmd/exec.go index cca3d61a928..e13675029bb 100644 --- a/pkg/kubectl/cmd/exec.go +++ b/pkg/kubectl/cmd/exec.go @@ -73,7 +73,8 @@ func NewCmdExec(f cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer) *c Executor: &DefaultRemoteExecutor{}, } cmd := &cobra.Command{ - Use: "exec POD [-c CONTAINER] -- COMMAND [args...]", + Use: "exec POD [-c CONTAINER] -- COMMAND [args...]", + DisableFlagsInUseLine: true, Short: i18n.T("Execute a command in a container"), Long: "Execute a command in a container.", Example: exec_example, diff --git a/pkg/kubectl/cmd/explain.go b/pkg/kubectl/cmd/explain.go index d1ecf3d65d4..47472c3c466 100644 --- a/pkg/kubectl/cmd/explain.go +++ b/pkg/kubectl/cmd/explain.go @@ -53,7 +53,8 @@ var ( // NewCmdExplain returns a cobra command for swagger docs func NewCmdExplain(f cmdutil.Factory, out, cmdErr io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "explain RESOURCE", + Use: "explain RESOURCE", + DisableFlagsInUseLine: true, Short: i18n.T("Documentation of resources"), Long: explainLong + "\n\n" + cmdutil.ValidResourceTypeList(f), Example: explainExamples, diff --git a/pkg/kubectl/cmd/expose.go b/pkg/kubectl/cmd/expose.go index ed84c1d76b7..51ccd43225d 100644 --- a/pkg/kubectl/cmd/expose.go +++ b/pkg/kubectl/cmd/expose.go @@ -83,7 +83,8 @@ func NewCmdExposeService(f cmdutil.Factory, out io.Writer) *cobra.Command { } cmd := &cobra.Command{ - Use: "expose (-f FILENAME | TYPE NAME) [--port=port] [--protocol=TCP|UDP] [--target-port=number-or-name] [--name=name] [--external-ip=external-ip-of-service] [--type=type]", + Use: "expose (-f FILENAME | TYPE NAME) [--port=port] [--protocol=TCP|UDP] [--target-port=number-or-name] [--name=name] [--external-ip=external-ip-of-service] [--type=type]", + DisableFlagsInUseLine: true, Short: i18n.T("Take a replication controller, service, deployment or pod and expose it as a new Kubernetes Service"), Long: exposeLong, Example: exposeExample, diff --git a/pkg/kubectl/cmd/help.go b/pkg/kubectl/cmd/help.go index 569abb9fd88..783a87c64db 100644 --- a/pkg/kubectl/cmd/help.go +++ b/pkg/kubectl/cmd/help.go @@ -31,7 +31,8 @@ var helpLong = templates.LongDesc(i18n.T(` func NewCmdHelp() *cobra.Command { cmd := &cobra.Command{ - Use: "help [command] | STRING_TO_SEARCH", + Use: "help [command] | STRING_TO_SEARCH", + DisableFlagsInUseLine: true, Short: i18n.T("Help about any command"), Long: helpLong, diff --git a/pkg/kubectl/cmd/label.go b/pkg/kubectl/cmd/label.go index 786fbe6afcb..32574853234 100644 --- a/pkg/kubectl/cmd/label.go +++ b/pkg/kubectl/cmd/label.go @@ -111,7 +111,8 @@ func NewCmdLabel(f cmdutil.Factory, out io.Writer) *cobra.Command { } cmd := &cobra.Command{ - Use: "label [--overwrite] (-f FILENAME | TYPE NAME) KEY_1=VAL_1 ... KEY_N=VAL_N [--resource-version=version]", + Use: "label [--overwrite] (-f FILENAME | TYPE NAME) KEY_1=VAL_1 ... KEY_N=VAL_N [--resource-version=version]", + DisableFlagsInUseLine: true, Short: i18n.T("Update the labels on a resource"), Long: fmt.Sprintf(labelLong, validation.LabelValueMaxLength), Example: labelExample, diff --git a/pkg/kubectl/cmd/logs.go b/pkg/kubectl/cmd/logs.go index cccceb154bc..8091cf8251a 100644 --- a/pkg/kubectl/cmd/logs.go +++ b/pkg/kubectl/cmd/logs.go @@ -89,7 +89,8 @@ type LogsOptions struct { func NewCmdLogs(f cmdutil.Factory, out io.Writer) *cobra.Command { o := &LogsOptions{} cmd := &cobra.Command{ - Use: "logs [-f] [-p] (POD | TYPE/NAME) [-c CONTAINER]", + Use: "logs [-f] [-p] (POD | TYPE/NAME) [-c CONTAINER]", + DisableFlagsInUseLine: true, Short: i18n.T("Print the logs for a container in a pod"), Long: "Print the logs for a container in a pod or specified resource. If the pod has only one container, the container name is optional.", Example: logsExample, diff --git a/pkg/kubectl/cmd/patch.go b/pkg/kubectl/cmd/patch.go index 325c2584722..daf67ef56bb 100644 --- a/pkg/kubectl/cmd/patch.go +++ b/pkg/kubectl/cmd/patch.go @@ -95,7 +95,8 @@ func NewCmdPatch(f cmdutil.Factory, out io.Writer) *cobra.Command { } cmd := &cobra.Command{ - Use: "patch (-f FILENAME | TYPE NAME) -p PATCH", + Use: "patch (-f FILENAME | TYPE NAME) -p PATCH", + DisableFlagsInUseLine: true, Short: i18n.T("Update field(s) of a resource using strategic merge patch"), Long: patchLong, Example: patchExample, diff --git a/pkg/kubectl/cmd/plugin.go b/pkg/kubectl/cmd/plugin.go index b69e7a89c68..d34e78733a4 100644 --- a/pkg/kubectl/cmd/plugin.go +++ b/pkg/kubectl/cmd/plugin.go @@ -50,7 +50,8 @@ func NewCmdPlugin(f cmdutil.Factory, in io.Reader, out, err io.Writer) *cobra.Co } cmd := &cobra.Command{ - Use: "plugin NAME", + Use: "plugin NAME", + DisableFlagsInUseLine: true, Short: i18n.T("Runs a command-line plugin"), Long: plugin_long, Run: func(cmd *cobra.Command, args []string) { diff --git a/pkg/kubectl/cmd/portforward.go b/pkg/kubectl/cmd/portforward.go index cedb93bb232..cf39bf637ab 100644 --- a/pkg/kubectl/cmd/portforward.go +++ b/pkg/kubectl/cmd/portforward.go @@ -70,7 +70,8 @@ func NewCmdPortForward(f cmdutil.Factory, cmdOut, cmdErr io.Writer) *cobra.Comma }, } cmd := &cobra.Command{ - Use: "port-forward POD [LOCAL_PORT:]REMOTE_PORT [...[LOCAL_PORT_N:]REMOTE_PORT_N]", + Use: "port-forward POD [LOCAL_PORT:]REMOTE_PORT [...[LOCAL_PORT_N:]REMOTE_PORT_N]", + DisableFlagsInUseLine: true, Short: i18n.T("Forward one or more local ports to a pod"), Long: "Forward one or more local ports to a pod.", Example: portforwardExample, diff --git a/pkg/kubectl/cmd/proxy.go b/pkg/kubectl/cmd/proxy.go index 7bc82ec7f45..027dfbf1ee8 100644 --- a/pkg/kubectl/cmd/proxy.go +++ b/pkg/kubectl/cmd/proxy.go @@ -71,7 +71,8 @@ var ( func NewCmdProxy(f cmdutil.Factory, out io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "proxy [--port=PORT] [--www=static-dir] [--www-prefix=prefix] [--api-prefix=prefix]", + Use: "proxy [--port=PORT] [--www=static-dir] [--www-prefix=prefix] [--api-prefix=prefix]", + DisableFlagsInUseLine: true, Short: i18n.T("Run a proxy to the Kubernetes API server"), Long: proxyLong, Example: proxyExample, diff --git a/pkg/kubectl/cmd/replace.go b/pkg/kubectl/cmd/replace.go index 16b80ae3247..fe75e2b211b 100644 --- a/pkg/kubectl/cmd/replace.go +++ b/pkg/kubectl/cmd/replace.go @@ -64,7 +64,8 @@ func NewCmdReplace(f cmdutil.Factory, out io.Writer) *cobra.Command { options := &resource.FilenameOptions{} cmd := &cobra.Command{ - Use: "replace -f FILENAME", + Use: "replace -f FILENAME", + DisableFlagsInUseLine: true, Short: i18n.T("Replace a resource by filename or stdin"), Long: replaceLong, Example: replaceExample, diff --git a/pkg/kubectl/cmd/resource/get.go b/pkg/kubectl/cmd/resource/get.go index 4013cb42874..386c1eb97ac 100644 --- a/pkg/kubectl/cmd/resource/get.go +++ b/pkg/kubectl/cmd/resource/get.go @@ -139,7 +139,8 @@ func NewCmdGet(f cmdutil.Factory, out io.Writer, errOut io.Writer) *cobra.Comman } cmd := &cobra.Command{ - Use: "get [(-o|--output=)json|yaml|wide|custom-columns=...|custom-columns-file=...|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=...] (TYPE [NAME | -l label] | TYPE/NAME ...) [flags]", + Use: "get [(-o|--output=)json|yaml|wide|custom-columns=...|custom-columns-file=...|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=...] (TYPE [NAME | -l label] | TYPE/NAME ...) [flags]", + DisableFlagsInUseLine: true, Short: i18n.T("Display one or many resources"), Long: getLong + "\n\n" + cmdutil.ValidResourceTypeList(f), Example: getExample, diff --git a/pkg/kubectl/cmd/rollingupdate.go b/pkg/kubectl/cmd/rollingupdate.go index 117cacbfb10..e27037e6662 100644 --- a/pkg/kubectl/cmd/rollingupdate.go +++ b/pkg/kubectl/cmd/rollingupdate.go @@ -79,7 +79,8 @@ func NewCmdRollingUpdate(f cmdutil.Factory, out io.Writer) *cobra.Command { options := &resource.FilenameOptions{} cmd := &cobra.Command{ - Use: "rolling-update OLD_CONTROLLER_NAME ([NEW_CONTROLLER_NAME] --image=NEW_CONTAINER_IMAGE | -f NEW_CONTROLLER_SPEC)", + Use: "rolling-update OLD_CONTROLLER_NAME ([NEW_CONTROLLER_NAME] --image=NEW_CONTAINER_IMAGE | -f NEW_CONTROLLER_SPEC)", + DisableFlagsInUseLine: true, Short: i18n.T("Perform a rolling update of the given ReplicationController"), Long: rollingUpdateLong, Example: rollingUpdateExample, @@ -93,9 +94,7 @@ func NewCmdRollingUpdate(f cmdutil.Factory, out io.Writer) *cobra.Command { cmd.Flags().Duration("timeout", timeout, `Max time to wait for a replication controller to update before giving up. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".`) usage := "Filename or URL to file to use to create the new replication controller." kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, usage) - cmd.MarkFlagRequired("filename") cmd.Flags().String("image", "", i18n.T("Image to use for upgrading the replication controller. Must be distinct from the existing image (either new image or new image tag). Can not be used with --filename/-f")) - cmd.MarkFlagRequired("image") cmd.Flags().String("deployment-label-key", "deployment", i18n.T("The key to use to differentiate between two different controllers, default 'deployment'. Only relevant when --image is specified, ignored otherwise")) cmd.Flags().String("container", "", i18n.T("Container name which will have its image upgraded. Only relevant when --image is specified, ignored otherwise. Required when using --image on a multi-container pod")) cmd.Flags().String("image-pull-policy", "", i18n.T("Explicit policy for when to pull container images. Required when --image is same as existing image, ignored otherwise.")) diff --git a/pkg/kubectl/cmd/rollout/rollout.go b/pkg/kubectl/cmd/rollout/rollout.go index a65ed4d8df5..941b585c11d 100644 --- a/pkg/kubectl/cmd/rollout/rollout.go +++ b/pkg/kubectl/cmd/rollout/rollout.go @@ -49,7 +49,8 @@ var ( func NewCmdRollout(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "rollout SUBCOMMAND", + Use: "rollout SUBCOMMAND", + DisableFlagsInUseLine: true, Short: i18n.T("Manage the rollout of a resource"), Long: rollout_long, Example: rollout_example, diff --git a/pkg/kubectl/cmd/rollout/rollout_history.go b/pkg/kubectl/cmd/rollout/rollout_history.go index 91b5711f1d2..fb6ccbe90e7 100644 --- a/pkg/kubectl/cmd/rollout/rollout_history.go +++ b/pkg/kubectl/cmd/rollout/rollout_history.go @@ -48,7 +48,8 @@ func NewCmdRolloutHistory(f cmdutil.Factory, out io.Writer) *cobra.Command { argAliases := kubectl.ResourceAliases(validArgs) cmd := &cobra.Command{ - Use: "history (TYPE NAME | TYPE/NAME) [flags]", + Use: "history (TYPE NAME | TYPE/NAME) [flags]", + DisableFlagsInUseLine: true, Short: i18n.T("View rollout history"), Long: history_long, Example: history_example, diff --git a/pkg/kubectl/cmd/rollout/rollout_pause.go b/pkg/kubectl/cmd/rollout/rollout_pause.go index 5a9f3bd4b7f..17b8d3e80ce 100644 --- a/pkg/kubectl/cmd/rollout/rollout_pause.go +++ b/pkg/kubectl/cmd/rollout/rollout_pause.go @@ -71,7 +71,8 @@ func NewCmdRolloutPause(f cmdutil.Factory, out io.Writer) *cobra.Command { argAliases := kubectl.ResourceAliases(validArgs) cmd := &cobra.Command{ - Use: "pause RESOURCE", + Use: "pause RESOURCE", + DisableFlagsInUseLine: true, Short: i18n.T("Mark the provided resource as paused"), Long: pause_long, Example: pause_example, diff --git a/pkg/kubectl/cmd/rollout/rollout_resume.go b/pkg/kubectl/cmd/rollout/rollout_resume.go index 427fac9acfa..3e0d07e9c77 100644 --- a/pkg/kubectl/cmd/rollout/rollout_resume.go +++ b/pkg/kubectl/cmd/rollout/rollout_resume.go @@ -69,7 +69,8 @@ func NewCmdRolloutResume(f cmdutil.Factory, out io.Writer) *cobra.Command { argAliases := kubectl.ResourceAliases(validArgs) cmd := &cobra.Command{ - Use: "resume RESOURCE", + Use: "resume RESOURCE", + DisableFlagsInUseLine: true, Short: i18n.T("Resume a paused resource"), Long: resume_long, Example: resume_example, diff --git a/pkg/kubectl/cmd/rollout/rollout_status.go b/pkg/kubectl/cmd/rollout/rollout_status.go index 6655c7c92f5..4abf6b4ee9e 100644 --- a/pkg/kubectl/cmd/rollout/rollout_status.go +++ b/pkg/kubectl/cmd/rollout/rollout_status.go @@ -54,7 +54,8 @@ func NewCmdRolloutStatus(f cmdutil.Factory, out io.Writer) *cobra.Command { argAliases := kubectl.ResourceAliases(validArgs) cmd := &cobra.Command{ - Use: "status (TYPE NAME | TYPE/NAME) [flags]", + Use: "status (TYPE NAME | TYPE/NAME) [flags]", + DisableFlagsInUseLine: true, Short: i18n.T("Show the status of the rollout"), Long: status_long, Example: status_example, diff --git a/pkg/kubectl/cmd/rollout/rollout_undo.go b/pkg/kubectl/cmd/rollout/rollout_undo.go index fb5c7dc09cc..3eb40ea42a4 100644 --- a/pkg/kubectl/cmd/rollout/rollout_undo.go +++ b/pkg/kubectl/cmd/rollout/rollout_undo.go @@ -69,7 +69,8 @@ func NewCmdRolloutUndo(f cmdutil.Factory, out io.Writer) *cobra.Command { argAliases := kubectl.ResourceAliases(validArgs) cmd := &cobra.Command{ - Use: "undo (TYPE NAME | TYPE/NAME) [flags]", + Use: "undo (TYPE NAME | TYPE/NAME) [flags]", + DisableFlagsInUseLine: true, Short: i18n.T("Undo a previous rollout"), Long: undo_long, Example: undo_example, diff --git a/pkg/kubectl/cmd/run.go b/pkg/kubectl/cmd/run.go index 087e8d40b85..516624e8666 100644 --- a/pkg/kubectl/cmd/run.go +++ b/pkg/kubectl/cmd/run.go @@ -96,7 +96,8 @@ type RunObject struct { func NewCmdRun(f cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "run NAME --image=image [--env=\"key=value\"] [--port=port] [--replicas=replicas] [--dry-run=bool] [--overrides=inline-json] [--command] -- [COMMAND] [args...]", + Use: "run NAME --image=image [--env=\"key=value\"] [--port=port] [--replicas=replicas] [--dry-run=bool] [--overrides=inline-json] [--command] -- [COMMAND] [args...]", + DisableFlagsInUseLine: true, Short: i18n.T("Run a particular image on the cluster"), Long: runLong, Example: runExample, diff --git a/pkg/kubectl/cmd/scale.go b/pkg/kubectl/cmd/scale.go index be722abc962..90233f499f6 100644 --- a/pkg/kubectl/cmd/scale.go +++ b/pkg/kubectl/cmd/scale.go @@ -64,7 +64,8 @@ func NewCmdScale(f cmdutil.Factory, out io.Writer) *cobra.Command { argAliases := kubectl.ResourceAliases(validArgs) cmd := &cobra.Command{ - Use: "scale [--resource-version=version] [--current-replicas=count] --replicas=COUNT (-f FILENAME | TYPE NAME)", + Use: "scale [--resource-version=version] [--current-replicas=count] --replicas=COUNT (-f FILENAME | TYPE NAME)", + DisableFlagsInUseLine: true, Short: i18n.T("Set a new size for a Deployment, ReplicaSet, Replication Controller, or Job"), Long: scaleLong, Example: scaleExample, diff --git a/pkg/kubectl/cmd/set/set.go b/pkg/kubectl/cmd/set/set.go index e905c3fb5b7..dfc69c1049d 100644 --- a/pkg/kubectl/cmd/set/set.go +++ b/pkg/kubectl/cmd/set/set.go @@ -34,7 +34,8 @@ var ( func NewCmdSet(f cmdutil.Factory, in io.Reader, out, err io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "set SUBCOMMAND", + Use: "set SUBCOMMAND", + DisableFlagsInUseLine: true, Short: i18n.T("Set specific features on objects"), Long: set_long, Run: cmdutil.DefaultSubCommandRun(err), diff --git a/pkg/kubectl/cmd/set/set_env.go b/pkg/kubectl/cmd/set/set_env.go index 36c3b810050..bb7234dbbb7 100644 --- a/pkg/kubectl/cmd/set/set_env.go +++ b/pkg/kubectl/cmd/set/set_env.go @@ -135,7 +135,8 @@ func NewCmdEnv(f cmdutil.Factory, in io.Reader, out, errout io.Writer) *cobra.Co In: in, } cmd := &cobra.Command{ - Use: "env RESOURCE/NAME KEY_1=VAL_1 ... KEY_N=VAL_N", + Use: "env RESOURCE/NAME KEY_1=VAL_1 ... KEY_N=VAL_N", + DisableFlagsInUseLine: true, Short: "Update environment variables on a pod template", Long: envLong, Example: fmt.Sprintf(envExample), diff --git a/pkg/kubectl/cmd/set/set_image.go b/pkg/kubectl/cmd/set/set_image.go index 09bff8302ee..4f940cf8804 100644 --- a/pkg/kubectl/cmd/set/set_image.go +++ b/pkg/kubectl/cmd/set/set_image.go @@ -92,7 +92,8 @@ func NewCmdImage(f cmdutil.Factory, out, err io.Writer) *cobra.Command { } cmd := &cobra.Command{ - Use: "image (-f FILENAME | TYPE NAME) CONTAINER_NAME_1=CONTAINER_IMAGE_1 ... CONTAINER_NAME_N=CONTAINER_IMAGE_N", + Use: "image (-f FILENAME | TYPE NAME) CONTAINER_NAME_1=CONTAINER_IMAGE_1 ... CONTAINER_NAME_N=CONTAINER_IMAGE_N", + DisableFlagsInUseLine: true, Short: i18n.T("Update image of a pod template"), Long: image_long, Example: image_example, diff --git a/pkg/kubectl/cmd/set/set_resources.go b/pkg/kubectl/cmd/set/set_resources.go index 5284e52ac58..a6b85994f2a 100644 --- a/pkg/kubectl/cmd/set/set_resources.go +++ b/pkg/kubectl/cmd/set/set_resources.go @@ -98,7 +98,8 @@ func NewCmdResources(f cmdutil.Factory, out io.Writer, errOut io.Writer) *cobra. } cmd := &cobra.Command{ - Use: "resources (-f FILENAME | TYPE NAME) ([--limits=LIMITS & --requests=REQUESTS]", + Use: "resources (-f FILENAME | TYPE NAME) ([--limits=LIMITS & --requests=REQUESTS]", + DisableFlagsInUseLine: true, Short: i18n.T("Update resource requests/limits on objects with pod templates"), Long: fmt.Sprintf(resources_long, strings.Join(resourceTypesWithPodTemplate, ", ")), Example: resources_example, diff --git a/pkg/kubectl/cmd/set/set_selector.go b/pkg/kubectl/cmd/set/set_selector.go index 3579f63cf4b..8dca8f45751 100644 --- a/pkg/kubectl/cmd/set/set_selector.go +++ b/pkg/kubectl/cmd/set/set_selector.go @@ -79,7 +79,8 @@ func NewCmdSelector(f cmdutil.Factory, out io.Writer) *cobra.Command { } cmd := &cobra.Command{ - Use: "selector (-f FILENAME | TYPE NAME) EXPRESSIONS [--resource-version=version]", + Use: "selector (-f FILENAME | TYPE NAME) EXPRESSIONS [--resource-version=version]", + DisableFlagsInUseLine: true, Short: i18n.T("Set the selector on a resource"), Long: fmt.Sprintf(selectorLong, validation.LabelValueMaxLength), Example: selectorExample, diff --git a/pkg/kubectl/cmd/set/set_serviceaccount.go b/pkg/kubectl/cmd/set/set_serviceaccount.go index 65ac0d166c3..535fd32b64b 100644 --- a/pkg/kubectl/cmd/set/set_serviceaccount.go +++ b/pkg/kubectl/cmd/set/set_serviceaccount.go @@ -83,11 +83,12 @@ func NewCmdServiceAccount(f cmdutil.Factory, out, err io.Writer) *cobra.Command } cmd := &cobra.Command{ - Use: "serviceaccount (-f FILENAME | TYPE NAME) SERVICE_ACCOUNT", - Aliases: []string{"sa"}, - Short: i18n.T("Update ServiceAccount of a resource"), - Long: serviceaccountLong, - Example: serviceaccountExample, + Use: "serviceaccount (-f FILENAME | TYPE NAME) SERVICE_ACCOUNT", + DisableFlagsInUseLine: true, + Aliases: []string{"sa"}, + Short: i18n.T("Update ServiceAccount of a resource"), + Long: serviceaccountLong, + Example: serviceaccountExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(saConfig.Complete(f, cmd, args)) cmdutil.CheckErr(saConfig.Run()) diff --git a/pkg/kubectl/cmd/set/set_subject.go b/pkg/kubectl/cmd/set/set_subject.go index c71a664b00e..3e8c204de65 100644 --- a/pkg/kubectl/cmd/set/set_subject.go +++ b/pkg/kubectl/cmd/set/set_subject.go @@ -84,7 +84,8 @@ func NewCmdSubject(f cmdutil.Factory, out io.Writer, errOut io.Writer) *cobra.Co } cmd := &cobra.Command{ - Use: "subject (-f FILENAME | TYPE NAME) [--user=username] [--group=groupname] [--serviceaccount=namespace:serviceaccountname] [--dry-run]", + Use: "subject (-f FILENAME | TYPE NAME) [--user=username] [--group=groupname] [--serviceaccount=namespace:serviceaccountname] [--dry-run]", + DisableFlagsInUseLine: true, Short: i18n.T("Update User, Group or ServiceAccount in a RoleBinding/ClusterRoleBinding"), Long: subject_long, Example: subject_example, diff --git a/pkg/kubectl/cmd/taint.go b/pkg/kubectl/cmd/taint.go index 239aba71634..9ef8bc10c14 100644 --- a/pkg/kubectl/cmd/taint.go +++ b/pkg/kubectl/cmd/taint.go @@ -85,7 +85,8 @@ func NewCmdTaint(f cmdutil.Factory, out io.Writer) *cobra.Command { argAliases := kubectl.ResourceAliases(validArgs) cmd := &cobra.Command{ - Use: "taint NODE NAME KEY_1=VAL_1:TAINT_EFFECT_1 ... KEY_N=VAL_N:TAINT_EFFECT_N", + Use: "taint NODE NAME KEY_1=VAL_1:TAINT_EFFECT_1 ... KEY_N=VAL_N:TAINT_EFFECT_N", + DisableFlagsInUseLine: true, Short: i18n.T("Update the taints on one or more nodes"), Long: fmt.Sprintf(taintLong, validation.DNS1123SubdomainMaxLength, validation.LabelValueMaxLength), Example: taintExample, diff --git a/pkg/kubectl/cmd/top_node.go b/pkg/kubectl/cmd/top_node.go index dace2918d69..610a542b7f3 100644 --- a/pkg/kubectl/cmd/top_node.go +++ b/pkg/kubectl/cmd/top_node.go @@ -89,7 +89,8 @@ func NewCmdTopNode(f cmdutil.Factory, options *TopNodeOptions, out io.Writer) *c } cmd := &cobra.Command{ - Use: "node [NAME | -l label]", + Use: "node [NAME | -l label]", + DisableFlagsInUseLine: true, Short: i18n.T("Display Resource (CPU/Memory/Storage) usage of nodes"), Long: topNodeLong, Example: topNodeExample, diff --git a/pkg/kubectl/cmd/top_pod.go b/pkg/kubectl/cmd/top_pod.go index 2ffdc6dff8e..669f95cd3cf 100644 --- a/pkg/kubectl/cmd/top_pod.go +++ b/pkg/kubectl/cmd/top_pod.go @@ -78,7 +78,8 @@ func NewCmdTopPod(f cmdutil.Factory, options *TopPodOptions, out io.Writer) *cob } cmd := &cobra.Command{ - Use: "pod [NAME | -l label]", + Use: "pod [NAME | -l label]", + DisableFlagsInUseLine: true, Short: i18n.T("Display Resource (CPU/Memory/Storage) usage of pods"), Long: topPodLong, Example: topPodExample, From 39418b175f19e21b9e0b4550b4800aea02001486 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Tue, 26 Dec 2017 19:59:47 -0500 Subject: [PATCH 034/264] Fix TestCadvisorListPodStats failure under mac/darwin GetPodCgroupNameSuffix is not really implemented under darwin (or windows for that matter). So let's just skip over the check for CPU and Memory if that is not set. --- pkg/kubelet/stats/cadvisor_stats_provider_test.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pkg/kubelet/stats/cadvisor_stats_provider_test.go b/pkg/kubelet/stats/cadvisor_stats_provider_test.go index 2eb9d4a25c9..942fa2e2c50 100644 --- a/pkg/kubelet/stats/cadvisor_stats_provider_test.go +++ b/pkg/kubelet/stats/cadvisor_stats_provider_test.go @@ -230,8 +230,12 @@ func TestCadvisorListPodStats(t *testing.T) { assert.EqualValues(t, testTime(creationTime, seedPod0Infra).Unix(), ps.StartTime.Time.Unix()) checkNetworkStats(t, "Pod0", seedPod0Infra, ps.Network) checkEphemeralStats(t, "Pod0", []int{seedPod0Container0, seedPod0Container1}, []int{seedEphemeralVolume1, seedEphemeralVolume2}, ps.EphemeralStorage) - checkCPUStats(t, "Pod0", seedPod0Infra, ps.CPU) - checkMemoryStats(t, "Pod0", seedPod0Infra, infos["/pod0-i"], ps.Memory) + if ps.CPU != nil { + checkCPUStats(t, "Pod0", seedPod0Infra, ps.CPU) + } + if ps.Memory != nil { + checkMemoryStats(t, "Pod0", seedPod0Infra, infos["/pod0-i"], ps.Memory) + } // Validate Pod1 Results ps, found = indexPods[prf1] From 38a8c72f8adc7ba7d2fc2b2478fa50f9cc02ebd2 Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Tue, 2 Jan 2018 14:33:48 +0800 Subject: [PATCH 035/264] Print the full path of Kubeconfig files. --- cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go index 1d1474417ba..451fb0c0f19 100644 --- a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go +++ b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go @@ -231,7 +231,7 @@ func createKubeConfigFileIfNotExists(outDir, filename string, config *clientcmda return fmt.Errorf("failed to save kubeconfig file %s on disk: %v", kubeConfigFilePath, err) } - fmt.Printf("[kubeconfig] Wrote KubeConfig file to disk: %q\n", filename) + fmt.Printf("[kubeconfig] Wrote KubeConfig file to disk: %q\n", kubeConfigFilePath) return nil } @@ -258,7 +258,7 @@ func createKubeConfigFileIfNotExists(outDir, filename string, config *clientcmda // kubeadm doesn't validate the existing kubeconfig file more than this (kubeadm trusts the client certs to be valid) // Basically, if we find a kubeconfig file with the same path; the same CA cert and the same server URL; // kubeadm thinks those files are equal and doesn't bother writing a new file - fmt.Printf("[kubeconfig] Using existing up-to-date KubeConfig file: %q\n", filename) + fmt.Printf("[kubeconfig] Using existing up-to-date KubeConfig file: %q\n", kubeConfigFilePath) return nil } From 6c39b570fbbc9a18fa8c615c2923a0bcd8bf809c Mon Sep 17 00:00:00 2001 From: Karol Wychowaniec Date: Wed, 20 Dec 2017 14:44:39 +0100 Subject: [PATCH 036/264] Bump Metrics Server to version v0.2.1 --- .../metrics-server/metrics-server-deployment.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cluster/addons/metrics-server/metrics-server-deployment.yaml b/cluster/addons/metrics-server/metrics-server-deployment.yaml index 73375b2202b..e85afcbff9f 100644 --- a/cluster/addons/metrics-server/metrics-server-deployment.yaml +++ b/cluster/addons/metrics-server/metrics-server-deployment.yaml @@ -23,31 +23,31 @@ data: apiVersion: extensions/v1beta1 kind: Deployment metadata: - name: metrics-server-v0.2.0 + name: metrics-server-v0.2.1 namespace: kube-system labels: k8s-app: metrics-server kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile - version: v0.2.0 + version: v0.2.1 spec: selector: matchLabels: k8s-app: metrics-server - version: v0.2.0 + version: v0.2.1 template: metadata: name: metrics-server labels: k8s-app: metrics-server - version: v0.2.0 + version: v0.2.1 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: serviceAccountName: metrics-server containers: - name: metrics-server - image: gcr.io/google_containers/metrics-server-amd64:v0.2.0 + image: gcr.io/google_containers/metrics-server-amd64:v0.2.1 command: - /metrics-server - --source=kubernetes.summary_api:'' @@ -84,7 +84,7 @@ spec: - --memory=140Mi - --extra-memory=4Mi - --threshold=5 - - --deployment=metrics-server-v0.2.0 + - --deployment=metrics-server-v0.2.1 - --container=metrics-server - --poll-period=300000 - --estimator=exponential From a8127df3bb396717b4fb2a7f688c1f98e6bef6b4 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Wed, 20 Dec 2017 12:17:44 +0100 Subject: [PATCH 037/264] Simplify extra initializer logic --- .../src/k8s.io/apiserver/pkg/server/config.go | 5 -- .../apiserver/pkg/server/options/admission.go | 10 ++++ .../pkg/server/options/recommended.go | 57 ++++++------------- .../sample-apiserver/pkg/cmd/server/start.go | 7 +-- 4 files changed, 30 insertions(+), 49 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/server/config.go b/staging/src/k8s.io/apiserver/pkg/server/config.go index cd98717d2c0..877071ad3b2 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/config.go +++ b/staging/src/k8s.io/apiserver/pkg/server/config.go @@ -188,13 +188,9 @@ type Config struct { PublicAddress net.IP } -type AdmissionInitializersInitFunc func() (admission.PluginInitializer, error) - type RecommendedConfig struct { Config - ExtraAdmissionInitializersInitFunc []AdmissionInitializersInitFunc - // SharedInformerFactory provides shared informers for Kubernetes resources. This value is set by // RecommendedOptions.CoreAPI.ApplyTo called by RecommendedOptions.ApplyTo. It uses an in-cluster client config // by default, or the kubeconfig given with kubeconfig command line flag. @@ -263,7 +259,6 @@ func NewConfig(codecs serializer.CodecFactory) *Config { func NewRecommendedConfig(codecs serializer.CodecFactory) *RecommendedConfig { return &RecommendedConfig{ Config: *NewConfig(codecs), - ExtraAdmissionInitializersInitFunc: make([]AdmissionInitializersInitFunc, 0), } } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/admission.go b/staging/src/k8s.io/apiserver/pkg/server/options/admission.go index 30716869146..2565c0a69cf 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/admission.go @@ -104,6 +104,16 @@ func (a *AdmissionOptions) ApplyTo( return nil } + // Admission need scheme to construct admission initializer. + if scheme == nil { + return fmt.Errorf("admission depends on a scheme, it cannot be nil") + } + + // Admission depends on CoreAPI to set SharedInformerFactory and ClientConfig. + if informers == nil { + return fmt.Errorf("admission depends on a Kubernetes core API shared informer, it cannot be nil") + } + pluginNames := a.PluginNames if len(a.PluginNames) == 0 { pluginNames = a.enabledPluginNames() diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go b/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go index eff7cde33d3..829647a2490 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go @@ -17,8 +17,6 @@ limitations under the License. package options import ( - "fmt" - "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/runtime" @@ -38,19 +36,24 @@ type RecommendedOptions struct { Audit *AuditOptions Features *FeatureOptions CoreAPI *CoreAPIOptions - Admission *AdmissionOptions + + // ExtraAdmissionInitializers is called once after all ApplyTo from the options above, to pass the returned + // admission plugin initializers to Admission.ApplyTo. + ExtraAdmissionInitializers func() ([]admission.PluginInitializer, error) + Admission *AdmissionOptions } func NewRecommendedOptions(prefix string, codec runtime.Codec) *RecommendedOptions { return &RecommendedOptions{ - Etcd: NewEtcdOptions(storagebackend.NewDefaultConfig(prefix, codec)), - SecureServing: NewSecureServingOptions(), - Authentication: NewDelegatingAuthenticationOptions(), - Authorization: NewDelegatingAuthorizationOptions(), - Audit: NewAuditOptions(), - Features: NewFeatureOptions(), - CoreAPI: NewCoreAPIOptions(), - Admission: NewAdmissionOptions(), + Etcd: NewEtcdOptions(storagebackend.NewDefaultConfig(prefix, codec)), + SecureServing: NewSecureServingOptions(), + Authentication: NewDelegatingAuthenticationOptions(), + Authorization: NewDelegatingAuthorizationOptions(), + Audit: NewAuditOptions(), + Features: NewFeatureOptions(), + CoreAPI: NewCoreAPIOptions(), + ExtraAdmissionInitializers: func() ([]admission.PluginInitializer, error) { return nil, nil }, + Admission: NewAdmissionOptions(), } } @@ -90,34 +93,10 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig, scheme *r if err := o.CoreAPI.ApplyTo(config); err != nil { return err } - if o.Admission != nil { - // Admission depends on CoreAPI to set SharedInformerFactory and ClientConfig. - if o.CoreAPI == nil { - return fmt.Errorf("admission depends on CoreAPI, so it must be set") - } - // Admission need scheme to construct admission initializer. - if scheme == nil { - return fmt.Errorf("admission depends on shceme, so it must be set") - } - - pluginInitializers := []admission.PluginInitializer{} - for _, initFunc := range config.ExtraAdmissionInitializersInitFunc { - intializer, err := initFunc() - if err != nil { - return err - } - pluginInitializers = append(pluginInitializers, intializer) - } - - err := o.Admission.ApplyTo( - &config.Config, - config.SharedInformerFactory, - config.ClientConfig, - scheme, - pluginInitializers...) - if err != nil { - return err - } + if initializers, err := o.ExtraAdmissionInitializers(); err != nil { + return err + } else if err := o.Admission.ApplyTo(&config.Config, config.SharedInformerFactory, config.ClientConfig, scheme, initializers...); err != nil { + return err } return nil diff --git a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go index b375d46e609..e38eb749359 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go @@ -104,18 +104,15 @@ func (o *WardleServerOptions) Config() (*apiserver.Config, error) { serverConfig := genericapiserver.NewRecommendedConfig(apiserver.Codecs) - admissionInitializerInitFunc := func() (admission.PluginInitializer, error) { + o.RecommendedOptions.ExtraAdmissionInitializers = func() ([]admission.PluginInitializer, error) { client, err := clientset.NewForConfig(serverConfig.LoopbackClientConfig) if err != nil { return nil, err } informerFactory := informers.NewSharedInformerFactory(client, serverConfig.LoopbackClientConfig.Timeout) o.SharedInformerFactory = informerFactory - return wardleinitializer.New(informerFactory), nil + return []admission.PluginInitializer{wardleinitializer.New(informerFactory)}, nil } - - serverConfig.ExtraAdmissionInitializersInitFunc = []genericapiserver.AdmissionInitializersInitFunc{admissionInitializerInitFunc} - if err := o.RecommendedOptions.ApplyTo(serverConfig, apiserver.Scheme); err != nil { return nil, err } From 5a3cfd27ed818b971f36032d85e2de2db586a4e5 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Tue, 2 Jan 2018 09:32:04 +0100 Subject: [PATCH 038/264] Pass RecommendedConfig into ExtraAdmissionInitializers --- .../k8s.io/apiserver/pkg/server/options/recommended.go | 6 +++--- .../k8s.io/sample-apiserver/pkg/cmd/server/start.go | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go b/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go index 829647a2490..148bfbdce5a 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go @@ -39,7 +39,7 @@ type RecommendedOptions struct { // ExtraAdmissionInitializers is called once after all ApplyTo from the options above, to pass the returned // admission plugin initializers to Admission.ApplyTo. - ExtraAdmissionInitializers func() ([]admission.PluginInitializer, error) + ExtraAdmissionInitializers func(c *server.RecommendedConfig) ([]admission.PluginInitializer, error) Admission *AdmissionOptions } @@ -52,7 +52,7 @@ func NewRecommendedOptions(prefix string, codec runtime.Codec) *RecommendedOptio Audit: NewAuditOptions(), Features: NewFeatureOptions(), CoreAPI: NewCoreAPIOptions(), - ExtraAdmissionInitializers: func() ([]admission.PluginInitializer, error) { return nil, nil }, + ExtraAdmissionInitializers: func(c *server.RecommendedConfig) ([]admission.PluginInitializer, error) { return nil, nil }, Admission: NewAdmissionOptions(), } } @@ -93,7 +93,7 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig, scheme *r if err := o.CoreAPI.ApplyTo(config); err != nil { return err } - if initializers, err := o.ExtraAdmissionInitializers(); err != nil { + if initializers, err := o.ExtraAdmissionInitializers(config); err != nil { return err } else if err := o.Admission.ApplyTo(&config.Config, config.SharedInformerFactory, config.ClientConfig, scheme, initializers...); err != nil { return err diff --git a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go index e38eb749359..0a6188305a1 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go @@ -102,17 +102,17 @@ func (o *WardleServerOptions) Config() (*apiserver.Config, error) { return nil, fmt.Errorf("error creating self-signed certificates: %v", err) } - serverConfig := genericapiserver.NewRecommendedConfig(apiserver.Codecs) - - o.RecommendedOptions.ExtraAdmissionInitializers = func() ([]admission.PluginInitializer, error) { - client, err := clientset.NewForConfig(serverConfig.LoopbackClientConfig) + o.RecommendedOptions.ExtraAdmissionInitializers = func(c *genericapiserver.RecommendedConfig) ([]admission.PluginInitializer, error) { + client, err := clientset.NewForConfig(c.LoopbackClientConfig) if err != nil { return nil, err } - informerFactory := informers.NewSharedInformerFactory(client, serverConfig.LoopbackClientConfig.Timeout) + informerFactory := informers.NewSharedInformerFactory(client, c.LoopbackClientConfig.Timeout) o.SharedInformerFactory = informerFactory return []admission.PluginInitializer{wardleinitializer.New(informerFactory)}, nil } + + serverConfig := genericapiserver.NewRecommendedConfig(apiserver.Codecs) if err := o.RecommendedOptions.ApplyTo(serverConfig, apiserver.Scheme); err != nil { return nil, err } From 6cf819165f6cb00faf06b9627c48d9de5d3791df Mon Sep 17 00:00:00 2001 From: Penghao Cen Date: Wed, 3 Jan 2018 16:58:12 +0800 Subject: [PATCH 039/264] Double check before setKubeletConfiguration --- test/e2e_node/util.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index 0d00f88944e..bc08f03d27f 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -107,6 +107,10 @@ func tempSetCurrentKubeletConfig(f *framework.Framework, updateFunction func(ini framework.ExpectNoError(err) newCfg := oldCfg.DeepCopy() updateFunction(newCfg) + if reflect.DeepEqual(*newCfg, *oldCfg) { + return + } + framework.ExpectNoError(setKubeletConfiguration(f, newCfg)) }) AfterEach(func() { From 62f29fcb398853749fbd3fc102225858936a872d Mon Sep 17 00:00:00 2001 From: lcfang Date: Tue, 12 Dec 2017 09:22:27 +0800 Subject: [PATCH 040/264] fixed the some typo in eviction_manager --- pkg/kubelet/eviction/eviction_manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubelet/eviction/eviction_manager.go b/pkg/kubelet/eviction/eviction_manager.go index 90b1038cf15..3378bf864c1 100644 --- a/pkg/kubelet/eviction/eviction_manager.go +++ b/pkg/kubelet/eviction/eviction_manager.go @@ -312,7 +312,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act m.Unlock() // evict pods if there is a resource usage violation from local volume temporary storage - // If eviction happens in localVolumeEviction function, skip the rest of eviction action + // If eviction happens in localStorageEviction function, skip the rest of eviction action if utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { if evictedPods := m.localStorageEviction(activePods); len(evictedPods) > 0 { return evictedPods From eb688e098f5abd095584d84a50dba1beeea97db5 Mon Sep 17 00:00:00 2001 From: mattjmcnaughton Date: Fri, 5 Jan 2018 08:40:24 -0500 Subject: [PATCH 041/264] Add RESTClient Custom metrics empty test Add testing for a previously untested path, which is tested when getting resource metrics. --- .../podautoscaler/metrics/rest_metrics_client_test.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pkg/controller/podautoscaler/metrics/rest_metrics_client_test.go b/pkg/controller/podautoscaler/metrics/rest_metrics_client_test.go index 289b93f04d7..e51c5309f8c 100644 --- a/pkg/controller/podautoscaler/metrics/rest_metrics_client_test.go +++ b/pkg/controller/podautoscaler/metrics/rest_metrics_client_test.go @@ -249,6 +249,16 @@ func TestRESTClientQpsSumEqualZero(t *testing.T) { tc.runTest(t) } +func TestRESTClientQpsEmptyMetrics(t *testing.T) { + tc := restClientTestCase{ + metricName: "qps", + desiredError: fmt.Errorf("no metrics returned from custom metrics API"), + reportedMetricPoints: []metricPoint{}, + } + + tc.runTest(t) +} + func TestRESTClientCPUEmptyMetrics(t *testing.T) { tc := restClientTestCase{ resourceName: v1.ResourceCPU, From 64c20676ac731f2247766846a9ac7ac298538c06 Mon Sep 17 00:00:00 2001 From: Nick Sardo Date: Thu, 4 Jan 2018 17:48:52 -0800 Subject: [PATCH 042/264] Use existing subnetwork of forwarding rule --- .../providers/gce/gce_loadbalancer_internal.go | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go b/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go index d4acf9fa031..4b0c02c6925 100644 --- a/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go +++ b/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go @@ -82,10 +82,18 @@ func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, s requestedIP := determineRequestedIP(svc, existingFwdRule) ipToUse := requestedIP + // If the ILB already exists, continue using the subnet that it's already using. + // This is to support existing ILBs that were setup using the wrong subnet. + subnetworkURL := gce.SubnetworkURL() + if existingFwdRule != nil && existingFwdRule.Subnetwork != "" { + // external LBs have an empty Subnetwork field. + subnetworkURL = existingFwdRule.Subnetwork + } + var addrMgr *addressManager // If the network is not a legacy network, use the address manager if !gce.IsLegacyNetwork() { - addrMgr = newAddressManager(gce, nm.String(), gce.Region(), gce.SubnetworkURL(), loadBalancerName, requestedIP, schemeInternal) + addrMgr = newAddressManager(gce, nm.String(), gce.Region(), subnetworkURL, loadBalancerName, requestedIP, schemeInternal) ipToUse, err = addrMgr.HoldAddress() if err != nil { return nil, err @@ -108,9 +116,10 @@ func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, s LoadBalancingScheme: string(scheme), } - // Specify subnetwork if known - if len(gce.subnetworkURL) > 0 { - expectedFwdRule.Subnetwork = gce.subnetworkURL + // Given that CreateGCECloud will attempt to determine the subnet based off the network, + // the subnetwork should rarely be unknown. + if subnetworkURL != "" { + expectedFwdRule.Subnetwork = subnetworkURL } else { expectedFwdRule.Network = gce.networkURL } From 12301ae319cef8c5c59e044e6c078644416ff8c9 Mon Sep 17 00:00:00 2001 From: Robert Pothier Date: Fri, 5 Jan 2018 14:28:42 -0500 Subject: [PATCH 043/264] Kubeadm: clean up MarshalToYamlForCodecs Proxy will use PrintBytesWithLinePrefix to indent. --- cmd/kubeadm/app/phases/addons/proxy/proxy.go | 7 +++++-- cmd/kubeadm/app/util/marshal.go | 16 ---------------- 2 files changed, 5 insertions(+), 18 deletions(-) diff --git a/cmd/kubeadm/app/phases/addons/proxy/proxy.go b/cmd/kubeadm/app/phases/addons/proxy/proxy.go index e8f71d11be1..2f47aa15547 100644 --- a/cmd/kubeadm/app/phases/addons/proxy/proxy.go +++ b/cmd/kubeadm/app/phases/addons/proxy/proxy.go @@ -17,6 +17,7 @@ limitations under the License. package proxy import ( + "bytes" "fmt" "runtime" @@ -57,11 +58,13 @@ func EnsureProxyAddon(cfg *kubeadmapi.MasterConfiguration, client clientset.Inte return err } - proxyBytes, err := kubeadmutil.MarshalToYamlForCodecsWithShift(cfg.KubeProxy.Config, kubeproxyconfigv1alpha1.SchemeGroupVersion, + proxyBytes, err := kubeadmutil.MarshalToYamlForCodecs(cfg.KubeProxy.Config, kubeproxyconfigv1alpha1.SchemeGroupVersion, kubeproxyconfigscheme.Codecs) if err != nil { return fmt.Errorf("error when marshaling: %v", err) } + var prefixBytes bytes.Buffer + apiclient.PrintBytesWithLinePrefix(&prefixBytes, proxyBytes, " ") var proxyConfigMapBytes, proxyDaemonSetBytes []byte proxyConfigMapBytes, err = kubeadmutil.ParseTemplate(KubeProxyConfigMap19, struct { @@ -69,7 +72,7 @@ func EnsureProxyAddon(cfg *kubeadmapi.MasterConfiguration, client clientset.Inte ProxyConfig string }{ MasterEndpoint: masterEndpoint, - ProxyConfig: proxyBytes, + ProxyConfig: prefixBytes.String(), }) if err != nil { return fmt.Errorf("error when parsing kube-proxy configmap template: %v", err) diff --git a/cmd/kubeadm/app/util/marshal.go b/cmd/kubeadm/app/util/marshal.go index 907a0f17794..67281d04732 100644 --- a/cmd/kubeadm/app/util/marshal.go +++ b/cmd/kubeadm/app/util/marshal.go @@ -18,7 +18,6 @@ package util import ( "fmt" - "strings" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -42,18 +41,3 @@ func MarshalToYamlForCodecs(obj runtime.Object, gv schema.GroupVersion, codecs s encoder := codecs.EncoderForVersion(info.Serializer, gv) return runtime.Encode(encoder, obj) } - -// MarshalToYamlForCodecsWithShift adds spaces in front of each line so the indents line up -// correctly in the manifest -func MarshalToYamlForCodecsWithShift(obj runtime.Object, gv schema.GroupVersion, codecs serializer.CodecFactory) (string, error) { - serial, err := MarshalToYamlForCodecs(obj, gv, codecs) - if err != nil { - return "", err - } - lines := strings.Split(string(serial), "\n") - var newSerial string - for _, line := range lines { - newSerial = newSerial + " " + line + "\n" - } - return newSerial, err -} From c322f1d06504fa5b81e117ec79dd77fb4e769b4f Mon Sep 17 00:00:00 2001 From: Anish Ramasekar Date: Wed, 13 Dec 2017 01:46:06 +0530 Subject: [PATCH 044/264] fix typos in kubectl pkg --- pkg/kubectl/cmd/clusterinfo.go | 2 +- pkg/kubectl/cmd/completion.go | 2 +- pkg/kubectl/cmd/expose.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/kubectl/cmd/clusterinfo.go b/pkg/kubectl/cmd/clusterinfo.go index 4924691c68a..2e2b7a8b959 100644 --- a/pkg/kubectl/cmd/clusterinfo.go +++ b/pkg/kubectl/cmd/clusterinfo.go @@ -135,7 +135,7 @@ func printService(out io.Writer, name, link string) { ct.ChangeColor(ct.Green, false, ct.None, false) fmt.Fprint(out, name) ct.ResetColor() - fmt.Fprintf(out, " is running at ") + fmt.Fprint(out, " is running at ") ct.ChangeColor(ct.Yellow, false, ct.None, false) fmt.Fprint(out, link) ct.ResetColor() diff --git a/pkg/kubectl/cmd/completion.go b/pkg/kubectl/cmd/completion.go index d14ca66329b..4a2f2b10ffe 100644 --- a/pkg/kubectl/cmd/completion.go +++ b/pkg/kubectl/cmd/completion.go @@ -46,7 +46,7 @@ const defaultBoilerPlate = ` var ( completion_long = templates.LongDesc(i18n.T(` Output shell completion code for the specified shell (bash or zsh). - The shell code must be evalutated to provide interactive + The shell code must be evaluated to provide interactive completion of kubectl commands. This can be done by sourcing it from the .bash_profile. diff --git a/pkg/kubectl/cmd/expose.go b/pkg/kubectl/cmd/expose.go index ed84c1d76b7..b1347fe1436 100644 --- a/pkg/kubectl/cmd/expose.go +++ b/pkg/kubectl/cmd/expose.go @@ -99,7 +99,7 @@ func NewCmdExposeService(f cmdutil.Factory, out io.Writer) *cobra.Command { cmd.Flags().String("protocol", "", i18n.T("The network protocol for the service to be created. Default is 'TCP'.")) cmd.Flags().String("port", "", i18n.T("The port that the service should serve on. Copied from the resource being exposed, if unspecified")) cmd.Flags().String("type", "", i18n.T("Type for this service: ClusterIP, NodePort, LoadBalancer, or ExternalName. Default is 'ClusterIP'.")) - cmd.Flags().String("load-balancer-ip", "", i18n.T("IP to assign to the Load Balancer. If empty, an ephemeral IP will be created and used (cloud-provider specific).")) + cmd.Flags().String("load-balancer-ip", "", i18n.T("IP to assign to the LoadBalancer. If empty, an ephemeral IP will be created and used (cloud-provider specific).")) cmd.Flags().String("selector", "", i18n.T("A label selector to use for this service. Only equality-based selector requirements are supported. If empty (the default) infer the selector from the replication controller or replica set.)")) cmd.Flags().StringP("labels", "l", "", "Labels to apply to the service created by this call.") cmd.Flags().String("container-port", "", i18n.T("Synonym for --target-port")) From 5a165b03878d2712731b6f7b989583ca54d00414 Mon Sep 17 00:00:00 2001 From: mattjmcnaughton Date: Fri, 5 Jan 2018 09:05:54 -0500 Subject: [PATCH 045/264] Add test coverage for metrics/utilization.go Currently, there is no test coverage for this code. Since it does fairly important calculations, test coverage seems helpful. --- pkg/controller/podautoscaler/metrics/BUILD | 1 + .../podautoscaler/metrics/utilization_test.go | 149 ++++++++++++++++++ 2 files changed, 150 insertions(+) create mode 100644 pkg/controller/podautoscaler/metrics/utilization_test.go diff --git a/pkg/controller/podautoscaler/metrics/BUILD b/pkg/controller/podautoscaler/metrics/BUILD index 8592a565082..c9c11fe249f 100644 --- a/pkg/controller/podautoscaler/metrics/BUILD +++ b/pkg/controller/podautoscaler/metrics/BUILD @@ -37,6 +37,7 @@ go_test( srcs = [ "legacy_metrics_client_test.go", "rest_metrics_client_test.go", + "utilization_test.go", ], embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics", diff --git a/pkg/controller/podautoscaler/metrics/utilization_test.go b/pkg/controller/podautoscaler/metrics/utilization_test.go new file mode 100644 index 00000000000..35e7df6eda4 --- /dev/null +++ b/pkg/controller/podautoscaler/metrics/utilization_test.go @@ -0,0 +1,149 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +type resourceUtilizationRatioTestCase struct { + metrics PodMetricsInfo + requests map[string]int64 + targetUtilization int32 + + expectedUtilizationRatio float64 + expectedCurrentUtilization int32 + expectedRawAverageValue int64 + expectedErr error +} + +func (tc *resourceUtilizationRatioTestCase) runTest(t *testing.T) { + actualUtilizationRatio, actualCurrentUtilization, actualRawAverageValue, actualErr := GetResourceUtilizationRatio(tc.metrics, tc.requests, tc.targetUtilization) + + if tc.expectedErr != nil { + assert.Error(t, actualErr, "there should be an error getting the utilization ratio") + assert.Contains(t, fmt.Sprintf("%v", actualErr), fmt.Sprintf("%v", tc.expectedErr), "the error message should be as expected") + return + } + + assert.NoError(t, actualErr, "there should be no error retrieving the utilization ratio") + assert.Equal(t, tc.expectedUtilizationRatio, actualUtilizationRatio, "the utilization ratios should be as expected") + assert.Equal(t, tc.expectedCurrentUtilization, actualCurrentUtilization, "the current utilization should be as expected") + assert.Equal(t, tc.expectedRawAverageValue, actualRawAverageValue, "the raw average value should be as expected") +} + +type metricUtilizationRatioTestCase struct { + metrics PodMetricsInfo + targetUtilization int64 + + expectedUtilizationRatio float64 + expectedCurrentUtilization int64 +} + +func (tc *metricUtilizationRatioTestCase) runTest(t *testing.T) { + actualUtilizationRatio, actualCurrentUtilization := GetMetricUtilizationRatio(tc.metrics, tc.targetUtilization) + + assert.Equal(t, tc.expectedUtilizationRatio, actualUtilizationRatio, "the utilization ratios should be as expected") + assert.Equal(t, tc.expectedCurrentUtilization, actualCurrentUtilization, "the current utilization should be as expected") +} + +func TestGetResourceUtilizationRatioBaseCase(t *testing.T) { + tc := resourceUtilizationRatioTestCase{ + metrics: PodMetricsInfo{ + "test-pod-0": 50, "test-pod-1": 76, + }, + requests: map[string]int64{ + "test-pod-0": 100, "test-pod-1": 100, + }, + targetUtilization: 50, + expectedUtilizationRatio: 1.26, + expectedCurrentUtilization: 63, + expectedRawAverageValue: 63, + expectedErr: nil, + } + + tc.runTest(t) +} + +func TestGetResourceUtilizationRatioIgnorePodsWithNoRequest(t *testing.T) { + tc := resourceUtilizationRatioTestCase{ + metrics: PodMetricsInfo{ + "test-pod-0": 50, "test-pod-1": 76, "test-pod-no-request": 100, + }, + requests: map[string]int64{ + "test-pod-0": 100, "test-pod-1": 100, + }, + targetUtilization: 50, + expectedUtilizationRatio: 1.26, + expectedCurrentUtilization: 63, + expectedRawAverageValue: 63, + expectedErr: nil, + } + + tc.runTest(t) +} + +func TestGetResourceUtilizationRatioExtraRequest(t *testing.T) { + tc := resourceUtilizationRatioTestCase{ + metrics: PodMetricsInfo{ + "test-pod-0": 50, "test-pod-1": 76, + }, + requests: map[string]int64{ + "test-pod-0": 100, "test-pod-1": 100, "test-pod-extra-request": 500, + }, + targetUtilization: 50, + expectedUtilizationRatio: 1.26, + expectedCurrentUtilization: 63, + expectedRawAverageValue: 63, + expectedErr: nil, + } + + tc.runTest(t) +} + +func TestGetResourceUtilizationRatioNoRequests(t *testing.T) { + tc := resourceUtilizationRatioTestCase{ + metrics: PodMetricsInfo{ + "test-pod-0": 50, "test-pod-1": 76, + }, + requests: map[string]int64{}, + targetUtilization: 50, + + expectedUtilizationRatio: 0, + expectedCurrentUtilization: 0, + expectedRawAverageValue: 0, + expectedErr: fmt.Errorf("no metrics returned matched known pods"), + } + + tc.runTest(t) +} + +func TestGetMetricUtilizationRatioBaseCase(t *testing.T) { + tc := metricUtilizationRatioTestCase{ + metrics: PodMetricsInfo{ + "test-pod-0": 5000, "test-pod-1": 10000, + }, + targetUtilization: 10000, + expectedUtilizationRatio: .75, + expectedCurrentUtilization: 7500, + } + + tc.runTest(t) +} From 0e6ac1df76f30375c0ef81b58c4f1e575efd7957 Mon Sep 17 00:00:00 2001 From: mlmhl Date: Mon, 1 Jan 2018 21:35:13 +0800 Subject: [PATCH 046/264] fix populateDesiredStateOfWorld bug for attach/detach controller --- pkg/controller/volume/attachdetach/attach_detach_controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/volume/attachdetach/attach_detach_controller.go b/pkg/controller/volume/attachdetach/attach_detach_controller.go index 3c01e6b05a4..e7576ea0c3a 100644 --- a/pkg/controller/volume/attachdetach/attach_detach_controller.go +++ b/pkg/controller/volume/attachdetach/attach_detach_controller.go @@ -335,7 +335,7 @@ func (adc *attachDetachController) populateDesiredStateOfWorld() error { } for _, pod := range pods { podToAdd := pod - adc.podAdd(&podToAdd) + adc.podAdd(podToAdd) for _, podVolume := range podToAdd.Spec.Volumes { // The volume specs present in the ActualStateOfWorld are nil, let's replace those // with the correct ones found on pods. The present in the ASW with no corresponding From bac270533e0176739f78777bb59f43647c474137 Mon Sep 17 00:00:00 2001 From: Jesse Haka Date: Wed, 6 Dec 2017 09:33:45 +0200 Subject: [PATCH 047/264] use danglingerror add getNodeNameByID and use volume.AttachedDevice as devicepath use uppercase functionname do not delete automatically nodes if node is shutdowned in openstack do not delete node fix gofmt fix cinder detach if instance is not in active state fix gofmt --- pkg/cloudprovider/providers/openstack/BUILD | 1 + .../providers/openstack/openstack.go | 29 +++++++++++++++---- .../openstack/openstack_instances.go | 6 ++-- .../openstack/openstack_loadbalancer.go | 2 +- .../providers/openstack/openstack_routes.go | 2 +- .../providers/openstack/openstack_volumes.go | 29 ++++++++++++++++++- 6 files changed, 58 insertions(+), 11 deletions(-) diff --git a/pkg/cloudprovider/providers/openstack/BUILD b/pkg/cloudprovider/providers/openstack/BUILD index 42a185e2f4c..7840fd00a6a 100644 --- a/pkg/cloudprovider/providers/openstack/BUILD +++ b/pkg/cloudprovider/providers/openstack/BUILD @@ -26,6 +26,7 @@ go_library( "//pkg/controller:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", + "//pkg/volume/util:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/gophercloud/gophercloud:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack:go_default_library", diff --git a/pkg/cloudprovider/providers/openstack/openstack.go b/pkg/cloudprovider/providers/openstack/openstack.go index 00e15d228b2..759412a9f01 100644 --- a/pkg/cloudprovider/providers/openstack/openstack.go +++ b/pkg/cloudprovider/providers/openstack/openstack.go @@ -319,6 +319,22 @@ func mapNodeNameToServerName(nodeName types.NodeName) string { return string(nodeName) } +// getNodeNameByID maps instanceid to types.NodeName +func (os *OpenStack) GetNodeNameByID(instanceID string) (types.NodeName, error) { + client, err := os.NewComputeV2() + var nodeName types.NodeName + if err != nil { + return nodeName, err + } + + server, err := servers.Get(client, instanceID).Extract() + if err != nil { + return nodeName, err + } + nodeName = mapServerToNodeName(server) + return nodeName, nil +} + // mapServerToNodeName maps an OpenStack Server to a k8s NodeName func mapServerToNodeName(server *servers.Server) types.NodeName { // Node names are always lowercase, and (at least) @@ -346,11 +362,14 @@ func foreachServer(client *gophercloud.ServiceClient, opts servers.ListOptsBuild return err } -func getServerByName(client *gophercloud.ServiceClient, name types.NodeName) (*servers.Server, error) { +func getServerByName(client *gophercloud.ServiceClient, name types.NodeName, showOnlyActive bool) (*servers.Server, error) { opts := servers.ListOpts{ - Name: fmt.Sprintf("^%s$", regexp.QuoteMeta(mapNodeNameToServerName(name))), - Status: "ACTIVE", + Name: fmt.Sprintf("^%s$", regexp.QuoteMeta(mapNodeNameToServerName(name))), } + if showOnlyActive { + opts.Status = "ACTIVE" + } + pager := servers.List(client, opts) serverList := make([]servers.Server, 0, 1) @@ -432,7 +451,7 @@ func nodeAddresses(srv *servers.Server) ([]v1.NodeAddress, error) { } func getAddressesByName(client *gophercloud.ServiceClient, name types.NodeName) ([]v1.NodeAddress, error) { - srv, err := getServerByName(client, name) + srv, err := getServerByName(client, name, true) if err != nil { return nil, err } @@ -582,7 +601,7 @@ func (os *OpenStack) GetZoneByNodeName(nodeName types.NodeName) (cloudprovider.Z return cloudprovider.Zone{}, err } - srv, err := getServerByName(compute, nodeName) + srv, err := getServerByName(compute, nodeName, true) if err != nil { if err == ErrNotFound { return cloudprovider.Zone{}, cloudprovider.InstanceNotFound diff --git a/pkg/cloudprovider/providers/openstack/openstack_instances.go b/pkg/cloudprovider/providers/openstack/openstack_instances.go index 3cf1733b322..981ff7b9f89 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_instances.go +++ b/pkg/cloudprovider/providers/openstack/openstack_instances.go @@ -103,7 +103,7 @@ func (i *Instances) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddre // ExternalID returns the cloud provider ID of the specified instance (deprecated). func (i *Instances) ExternalID(name types.NodeName) (string, error) { - srv, err := getServerByName(i.compute, name) + srv, err := getServerByName(i.compute, name, true) if err != nil { if err == ErrNotFound { return "", cloudprovider.InstanceNotFound @@ -151,7 +151,7 @@ func (os *OpenStack) InstanceID() (string, error) { // InstanceID returns the cloud provider ID of the specified instance. func (i *Instances) InstanceID(name types.NodeName) (string, error) { - srv, err := getServerByName(i.compute, name) + srv, err := getServerByName(i.compute, name, true) if err != nil { if err == ErrNotFound { return "", cloudprovider.InstanceNotFound @@ -184,7 +184,7 @@ func (i *Instances) InstanceTypeByProviderID(providerID string) (string, error) // InstanceType returns the type of the specified instance. func (i *Instances) InstanceType(name types.NodeName) (string, error) { - srv, err := getServerByName(i.compute, name) + srv, err := getServerByName(i.compute, name, true) if err != nil { return "", err diff --git a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go index 2089c14f4ce..036af670bc7 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go +++ b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go @@ -551,7 +551,7 @@ func getNodeSecurityGroupIDForLB(compute *gophercloud.ServiceClient, nodes []*v1 for _, node := range nodes { nodeName := types.NodeName(node.Name) - srv, err := getServerByName(compute, nodeName) + srv, err := getServerByName(compute, nodeName, true) if err != nil { return nodeSecurityGroupIDs.List(), err } diff --git a/pkg/cloudprovider/providers/openstack/openstack_routes.go b/pkg/cloudprovider/providers/openstack/openstack_routes.go index c5f0974dadd..c5a8ba6d212 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_routes.go +++ b/pkg/cloudprovider/providers/openstack/openstack_routes.go @@ -288,7 +288,7 @@ func (r *Routes) DeleteRoute(clusterName string, route *cloudprovider.Route) err } func getPortIDByIP(compute *gophercloud.ServiceClient, targetNode types.NodeName, ipAddress string) (string, error) { - srv, err := getServerByName(compute, targetNode) + srv, err := getServerByName(compute, targetNode, true) if err != nil { return "", err } diff --git a/pkg/cloudprovider/providers/openstack/openstack_volumes.go b/pkg/cloudprovider/providers/openstack/openstack_volumes.go index eab5b7c9b5d..8a530592845 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_volumes.go +++ b/pkg/cloudprovider/providers/openstack/openstack_volumes.go @@ -17,6 +17,7 @@ limitations under the License. package openstack import ( + "errors" "fmt" "io/ioutil" "path" @@ -26,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" k8s_volume "k8s.io/kubernetes/pkg/volume" + volumeutil "k8s.io/kubernetes/pkg/volume/util" "github.com/gophercloud/gophercloud" volumeexpand "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions" @@ -317,8 +319,33 @@ func (os *OpenStack) AttachDisk(instanceID, volumeID string) (string, error) { if instanceID == volume.AttachedServerId { glog.V(4).Infof("Disk %s is already attached to instance %s", volumeID, instanceID) return volume.ID, nil + } else { + nodeName, err := os.GetNodeNameByID(volume.AttachedServerId) + attachErr := fmt.Sprintf("disk %s path %s is attached to a different instance (%s)", volumeID, volume.AttachedDevice, volume.AttachedServerId) + if err != nil { + glog.Error(attachErr) + return "", errors.New(attachErr) + } + // using volume.AttachedDevice may cause problems because cinder does not report device path correctly see issue #33128 + devicePath := volume.AttachedDevice + danglingErr := volumeutil.NewDanglingError(attachErr, nodeName, devicePath) + glog.V(4).Infof("volume %s is already attached to node %s path %s", volumeID, nodeName, devicePath) + // check special case, if node is deleted from cluster but exist still in openstack + // we need to check can we detach the cinder, node is deleted from cluster if state is not ACTIVE + srv, err := getServerByName(cClient, nodeName, false) + if err != nil { + return "", err + } + if srv.Status != "ACTIVE" { + err = os.DetachDisk(volume.AttachedServerId, volumeID) + if err != nil { + glog.Error(err) + return "", err + } + glog.V(4).Infof("detached volume %s node state was %s", volumeID, srv.Status) + } + return "", danglingErr } - return "", fmt.Errorf("disk %s is attached to a different instance (%s)", volumeID, volume.AttachedServerId) } startTime := time.Now() From ff380d67f469de8d8ad119dc36b490cedfa77d88 Mon Sep 17 00:00:00 2001 From: zouyee Date: Mon, 8 Jan 2018 18:42:01 +0800 Subject: [PATCH 048/264] remove deplicate func --- pkg/controller/deployment/util/deployment_util.go | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/pkg/controller/deployment/util/deployment_util.go b/pkg/controller/deployment/util/deployment_util.go index d017e33709e..763b8debdb3 100644 --- a/pkg/controller/deployment/util/deployment_util.go +++ b/pkg/controller/deployment/util/deployment_util.go @@ -124,18 +124,6 @@ func GetDeploymentCondition(status extensions.DeploymentStatus, condType extensi return nil } -// TODO: remove the duplicate -// GetDeploymentConditionInternal returns the condition with the provided type. -func GetDeploymentConditionInternal(status internalextensions.DeploymentStatus, condType internalextensions.DeploymentConditionType) *internalextensions.DeploymentCondition { - for i := range status.Conditions { - c := status.Conditions[i] - if c.Type == condType { - return &c - } - } - return nil -} - // SetDeploymentCondition updates the deployment to include the provided condition. If the condition that // we are about to add already exists and has the same status and reason then we are not going to update. func SetDeploymentCondition(status *extensions.DeploymentStatus, condition extensions.DeploymentCondition) { From e3cafd83037a752c99ddb072a3c09c94b9dfdf43 Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Thu, 14 Dec 2017 17:16:34 +0200 Subject: [PATCH 049/264] Enable support for etcd3 --- .../reactive/kubernetes_master.py | 37 ++++++++++++++----- 1 file changed, 28 insertions(+), 9 deletions(-) diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index d27caf59f57..a4211913002 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -24,7 +24,7 @@ import string import json import ipaddress -import charms.leadership +from charms.leadership import leader_get, leader_set from shutil import move @@ -112,6 +112,7 @@ def check_for_upgrade_needed(): # we take no risk and forcibly upgrade the snaps. # Forcibly means we do not prompt the user to call the upgrade action. set_upgrade_needed(forced=True) + upgrade_for_etcd() def snap_resources_changed(): @@ -136,6 +137,13 @@ def snap_resources_changed(): any_file_changed(paths) return 'unknown' +def upgrade_for_etcd(): + # we are upgrading the charm. + # If this is an old deployment etcd_version is not set + # so if we are the leader we need to set it to v2 + if not leader_get('etcd_version') and is_state('leadership.is_leader'): + leader_set(etcd_version='etcd2') + def add_rbac_roles(): '''Update the known_tokens file with proper groups.''' @@ -316,7 +324,7 @@ def setup_leader_authentication(): # path as a key. # eg: # {'/root/cdk/serviceaccount.key': 'RSA:2471731...'} - charms.leadership.leader_set(leader_data) + leader_set(leader_data) remove_state('kubernetes-master.components.started') set_state('authentication.setup') @@ -364,7 +372,7 @@ def get_keys_from_leader(keys, overwrite_local=False): # If the path does not exist, assume we need it if not os.path.exists(k) or overwrite_local: # Fetch data from leadership broadcast - contents = charms.leadership.leader_get(k) + contents = leader_get(k) # Default to logging the warning and wait for leader data to be set if contents is None: msg = "Waiting on leaders crypto keys." @@ -423,6 +431,7 @@ def master_services_down(): @when('etcd.available', 'tls_client.server.certificate.saved', 'authentication.setup') +@when('leadership.set.etcd_version') @when_not('kubernetes-master.components.started') def start_master(etcd): '''Run the Kubernetes master components.''' @@ -440,7 +449,8 @@ def start_master(etcd): handle_etcd_relation(etcd) # Add CLI options to all components - configure_apiserver(etcd) + leader_etcd_version = leader_get('etcd_version') + configure_apiserver(etcd.get_connection_string(), leader_etcd_version) configure_controller_manager() configure_scheduler() set_state('kubernetes-master.components.started') @@ -462,6 +472,14 @@ def etcd_data_change(etcd): if data_changed('etcd-connect', connection_string): remove_state('kubernetes-master.components.started') + # We are the leader and the etcd_version is not set meaning + # this is the first time we connect to etcd. + if is_state('leadership.is_leader') and not leader_get('etcd_version'): + if etcd.get_version().startswith('3.'): + leader_set(etcd_version='etcd3') + else: + leader_set(etcd_version='etcd2') + @when('kube-control.connected') @when('cdk-addons.configured') @@ -816,9 +834,11 @@ def on_config_allow_privileged_change(): @when('config.changed.api-extra-args') @when('kubernetes-master.components.started') +@when('leadership.set.etcd_version') @when('etcd.available') def on_config_api_extra_args_change(etcd): - configure_apiserver(etcd) + configure_apiserver(etcd.get_connection_string(), + leader_get('etcd_version')) @when('config.changed.controller-manager-extra-args') @@ -1045,7 +1065,7 @@ def configure_kubernetes_service(service, base_args, extra_args_key): db.set(prev_args_key, args) -def configure_apiserver(etcd): +def configure_apiserver(etcd_connection_string, leader_etcd_version): api_opts = {} # Get the tls paths from the layer data. @@ -1075,8 +1095,7 @@ def configure_apiserver(etcd): api_opts['logtostderr'] = 'true' api_opts['insecure-bind-address'] = '127.0.0.1' api_opts['insecure-port'] = '8080' - api_opts['storage-backend'] = 'etcd2' # FIXME: add etcd3 support - + api_opts['storage-backend'] = leader_etcd_version api_opts['basic-auth-file'] = '/root/cdk/basic_auth.csv' api_opts['token-auth-file'] = '/root/cdk/known_tokens.csv' api_opts['service-account-key-file'] = '/root/cdk/serviceaccount.key' @@ -1089,7 +1108,7 @@ def configure_apiserver(etcd): api_opts['etcd-cafile'] = etcd_ca api_opts['etcd-keyfile'] = etcd_key api_opts['etcd-certfile'] = etcd_cert - api_opts['etcd-servers'] = etcd.get_connection_string() + api_opts['etcd-servers'] = etcd_connection_string admission_control = [ 'Initializers', From 45d21ee36b1a49e3b86d99810a7875b7496da30a Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Mon, 8 Jan 2018 14:01:33 +0100 Subject: [PATCH 050/264] Fixed TearDown of NFS with root squash. NFS plugin should not use IsLikelyNotMountPoint(), as it uses lstat() / stat() to determine if the NFS volume is still mounted - NFS server may use root_squash and kubelet may not be allowed to do lstat() / stat() there. It must use slower IsNotMountPoint() instead, including TearDown() function. --- pkg/util/mount/mount.go | 7 +++++++ pkg/volume/nfs/nfs.go | 11 +++++++---- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/pkg/util/mount/mount.go b/pkg/util/mount/mount.go index 953b571900a..d2069ec7861 100644 --- a/pkg/util/mount/mount.go +++ b/pkg/util/mount/mount.go @@ -19,6 +19,7 @@ limitations under the License. package mount import ( + "os" "path/filepath" ) @@ -208,6 +209,12 @@ func IsNotMountPoint(mounter Interface, file string) (bool, error) { // IsLikelyNotMountPoint provides a quick check // to determine whether file IS A mountpoint notMnt, notMntErr := mounter.IsLikelyNotMountPoint(file) + if notMntErr != nil && os.IsPermission(notMntErr) { + // We were not allowed to do the simple stat() check, e.g. on NFS with + // root_squash. Fall back to /proc/mounts check below. + notMnt = true + notMntErr = nil + } if notMntErr != nil { return notMnt, notMntErr } diff --git a/pkg/volume/nfs/nfs.go b/pkg/volume/nfs/nfs.go index f61fbd25944..26db61d729a 100644 --- a/pkg/volume/nfs/nfs.go +++ b/pkg/volume/nfs/nfs.go @@ -233,7 +233,7 @@ func (b *nfsMounter) SetUp(fsGroup *int64) error { } func (b *nfsMounter) SetUpAt(dir string, fsGroup *int64) error { - notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) + notMnt, err := b.mounter.IsNotMountPoint(dir) glog.V(4).Infof("NFS mount set up: %s %v %v", dir, !notMnt, err) if err != nil && !os.IsNotExist(err) { return err @@ -252,7 +252,7 @@ func (b *nfsMounter) SetUpAt(dir string, fsGroup *int64) error { mountOptions := volume.JoinMountOptions(b.mountOptions, options) err = b.mounter.Mount(source, dir, "nfs", mountOptions) if err != nil { - notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) + notMnt, mntErr := b.mounter.IsNotMountPoint(dir) if mntErr != nil { glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err @@ -262,7 +262,7 @@ func (b *nfsMounter) SetUpAt(dir string, fsGroup *int64) error { glog.Errorf("Failed to unmount: %v", mntErr) return err } - notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) + notMnt, mntErr := b.mounter.IsNotMountPoint(dir) if mntErr != nil { glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err @@ -290,7 +290,10 @@ func (c *nfsUnmounter) TearDown() error { } func (c *nfsUnmounter) TearDownAt(dir string) error { - return util.UnmountPath(dir, c.mounter) + // Use extensiveMountPointCheck to consult /proc/mounts. We can't use faster + // IsLikelyNotMountPoint (lstat()), since there may be root_squash on the + // NFS server and kubelet may not be able to do lstat/stat() there. + return util.UnmountMountPoint(dir, c.mounter, true /* extensiveMountPointCheck */) } func getVolumeSource(spec *volume.Spec) (*v1.NFSVolumeSource, bool, error) { From da1eec2853098b2e60b50b14415303d1c8123bc7 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Mon, 8 Jan 2018 14:19:22 +0100 Subject: [PATCH 051/264] Add jsafrane as util/mount approver. --- pkg/util/mount/OWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/util/mount/OWNERS b/pkg/util/mount/OWNERS index 556119c4471..0c7ea81089b 100644 --- a/pkg/util/mount/OWNERS +++ b/pkg/util/mount/OWNERS @@ -4,4 +4,5 @@ reviewers: approvers: - jingxu97 - saad-ali + - jsafrane From 3a461afaf59085e587a91430db756bc68ff9182b Mon Sep 17 00:00:00 2001 From: Slava Semushin Date: Mon, 8 Jan 2018 15:26:03 +0100 Subject: [PATCH 052/264] pkg/securitycontext/util_test.go(TestAddNoNewPrivileges): update tests. - remove irrelevant test cases - add test case for AllowPrivilegeEscalation: nil - explicitly specify input and expected outcome --- pkg/securitycontext/util_test.go | 45 ++++++++++---------------------- 1 file changed, 14 insertions(+), 31 deletions(-) diff --git a/pkg/securitycontext/util_test.go b/pkg/securitycontext/util_test.go index 1b89adcd893..5cc3fa71ada 100644 --- a/pkg/securitycontext/util_test.go +++ b/pkg/securitycontext/util_test.go @@ -178,56 +178,39 @@ func TestHasRootRunAsUser(t *testing.T) { } func TestAddNoNewPrivileges(t *testing.T) { - var nonRoot int64 = 1000 - var root int64 = 0 pfalse := false ptrue := true tests := map[string]struct { - sc v1.SecurityContext + sc *v1.SecurityContext expect bool }{ - "allowPrivilegeEscalation nil security context nil": {}, - "allowPrivilegeEscalation nil nonRoot": { - sc: v1.SecurityContext{ - RunAsUser: &nonRoot, - }, + "allowPrivilegeEscalation nil security context nil": { + sc: nil, + expect: false, }, - "allowPrivilegeEscalation nil root": { - sc: v1.SecurityContext{ - RunAsUser: &root, + "allowPrivilegeEscalation nil": { + sc: &v1.SecurityContext{ + AllowPrivilegeEscalation: nil, }, + expect: false, }, - "allowPrivilegeEscalation false nonRoot": { - sc: v1.SecurityContext{ - RunAsUser: &nonRoot, + "allowPrivilegeEscalation false": { + sc: &v1.SecurityContext{ AllowPrivilegeEscalation: &pfalse, }, expect: true, }, - "allowPrivilegeEscalation false root": { - sc: v1.SecurityContext{ - RunAsUser: &root, - AllowPrivilegeEscalation: &pfalse, - }, - expect: true, - }, - "allowPrivilegeEscalation true nonRoot": { - sc: v1.SecurityContext{ - RunAsUser: &nonRoot, - AllowPrivilegeEscalation: &ptrue, - }, - }, - "allowPrivilegeEscalation true root": { - sc: v1.SecurityContext{ - RunAsUser: &root, + "allowPrivilegeEscalation true": { + sc: &v1.SecurityContext{ AllowPrivilegeEscalation: &ptrue, }, + expect: false, }, } for k, v := range tests { - actual := AddNoNewPrivileges(&v.sc) + actual := AddNoNewPrivileges(v.sc) if actual != v.expect { t.Errorf("%s failed, expected %t but received %t", k, v.expect, actual) } From d4e17cb7b4b3a655d11d9a1739189548b47628a1 Mon Sep 17 00:00:00 2001 From: juanvallejo Date: Fri, 1 Dec 2017 11:26:11 -0500 Subject: [PATCH 053/264] Allow oadm drain to continue w ds-managed pods w local storage --- pkg/kubectl/cmd/drain.go | 9 ++++- pkg/kubectl/cmd/drain_test.go | 76 ++++++++++++++++++++++++++++++----- 2 files changed, 74 insertions(+), 11 deletions(-) diff --git a/pkg/kubectl/cmd/drain.go b/pkg/kubectl/cmd/drain.go index 910654f6f6a..5640c734623 100644 --- a/pkg/kubectl/cmd/drain.go +++ b/pkg/kubectl/cmd/drain.go @@ -466,7 +466,7 @@ func (o *DrainOptions) getPodsForDeletion(nodeInfo *resource.Info) (pods []corev for _, pod := range podList.Items { podOk := true - for _, filt := range []podFilter{mirrorPodFilter, o.localStorageFilter, o.unreplicatedFilter, o.daemonsetFilter} { + for _, filt := range []podFilter{o.daemonsetFilter, mirrorPodFilter, o.localStorageFilter, o.unreplicatedFilter} { filterOk, w, f := filt(pod) podOk = podOk && filterOk @@ -476,6 +476,13 @@ func (o *DrainOptions) getPodsForDeletion(nodeInfo *resource.Info) (pods []corev if f != nil { fs[f.string] = append(fs[f.string], pod.Name) } + + // short-circuit as soon as pod not ok + // at that point, there is no reason to run pod + // through any additional filters + if !podOk { + break + } } if podOk { pods = append(pods, pod) diff --git a/pkg/kubectl/cmd/drain_test.go b/pkg/kubectl/cmd/drain_test.go index 5200e95cd89..f9996314406 100644 --- a/pkg/kubectl/cmd/drain_test.go +++ b/pkg/kubectl/cmd/drain_test.go @@ -304,6 +304,34 @@ func TestDrain(t *testing.T) { }, } + ds_pod_with_emptyDir := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + Namespace: "default", + CreationTimestamp: metav1.Time{Time: time.Now()}, + Labels: labels, + SelfLink: testapi.Default.SelfLink("pods", "bar"), + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "extensions/v1beta1", + Kind: "DaemonSet", + Name: "ds", + BlockOwnerDeletion: boolptr(true), + Controller: boolptr(true), + }, + }, + }, + Spec: corev1.PodSpec{ + NodeName: "node", + Volumes: []corev1.Volume{ + { + Name: "scratch", + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: ""}}, + }, + }, + }, + } + orphaned_ds_pod := corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", @@ -414,15 +442,16 @@ func TestDrain(t *testing.T) { } tests := []struct { - description string - node *corev1.Node - expected *corev1.Node - pods []corev1.Pod - rcs []api.ReplicationController - replicaSets []extensions.ReplicaSet - args []string - expectFatal bool - expectDelete bool + description string + node *corev1.Node + expected *corev1.Node + pods []corev1.Pod + rcs []api.ReplicationController + replicaSets []extensions.ReplicaSet + args []string + expectWarning string + expectFatal bool + expectDelete bool }{ { description: "RC-managed pod", @@ -474,6 +503,17 @@ func TestDrain(t *testing.T) { expectFatal: false, expectDelete: false, }, + { + description: "DS-managed pod with emptyDir with --ignore-daemonsets", + node: node, + expected: cordoned_node, + pods: []corev1.Pod{ds_pod_with_emptyDir}, + rcs: []api.ReplicationController{rc}, + args: []string{"node", "--ignore-daemonsets"}, + expectWarning: "WARNING: Ignoring DaemonSet-managed pods: bar\n", + expectFatal: false, + expectDelete: false, + }, { description: "Job-managed pod", node: node, @@ -661,6 +701,7 @@ func TestDrain(t *testing.T) { cmd := NewCmdDrain(f, buf, errBuf) saw_fatal := false + fatal_msg := "" func() { defer func() { // Recover from the panic below. @@ -668,7 +709,7 @@ func TestDrain(t *testing.T) { // Restore cmdutil behavior cmdutil.DefaultBehaviorOnFatal() }() - cmdutil.BehaviorOnFatal(func(e string, code int) { saw_fatal = true; panic(e) }) + cmdutil.BehaviorOnFatal(func(e string, code int) { saw_fatal = true; fatal_msg = e; panic(e) }) cmd.SetArgs(test.args) cmd.Execute() }() @@ -676,6 +717,11 @@ func TestDrain(t *testing.T) { if !saw_fatal { t.Fatalf("%s: unexpected non-error when using %s", test.description, currMethod) } + } else { + if saw_fatal { + t.Fatalf("%s: unexpected error when using %s: %s", test.description, currMethod, fatal_msg) + + } } if test.expectDelete { @@ -693,6 +739,16 @@ func TestDrain(t *testing.T) { t.Fatalf("%s: unexpected delete when using %s", test.description, currMethod) } } + + if len(test.expectWarning) > 0 { + if len(errBuf.String()) == 0 { + t.Fatalf("%s: expected warning, but found no stderr output", test.description) + } + + if errBuf.String() != test.expectWarning { + t.Fatalf("%s: actual warning message did not match expected warning message.\n Expecting: %s\n Got: %s", test.description, test.expectWarning, errBuf.String()) + } + } } } } From 09da53c8e92003321230488063d070a8ccfd2799 Mon Sep 17 00:00:00 2001 From: Doug MacEachern Date: Thu, 16 Nov 2017 20:13:29 -0800 Subject: [PATCH 054/264] Update vmware/govmomi godeps --- Godeps/Godeps.json | 97 +- Godeps/LICENSES | 875 ++ .../providers/vsphere/vclib/BUILD | 12 + vendor/BUILD | 1 + vendor/github.com/google/uuid/.travis.yml | 9 + vendor/github.com/google/uuid/BUILD | 34 + vendor/github.com/google/uuid/CONTRIBUTING.md | 10 + vendor/github.com/google/uuid/CONTRIBUTORS | 9 + vendor/github.com/google/uuid/LICENSE | 27 + vendor/github.com/google/uuid/README.md | 23 + vendor/github.com/google/uuid/dce.go | 80 + vendor/github.com/google/uuid/doc.go | 12 + vendor/github.com/google/uuid/hash.go | 53 + vendor/github.com/google/uuid/marshal.go | 39 + vendor/github.com/google/uuid/node.go | 100 + vendor/github.com/google/uuid/sql.go | 59 + vendor/github.com/google/uuid/time.go | 123 + vendor/github.com/google/uuid/util.go | 43 + vendor/github.com/google/uuid/uuid.go | 198 + vendor/github.com/google/uuid/version1.go | 44 + vendor/github.com/google/uuid/version4.go | 38 + vendor/github.com/vmware/govmomi/.drone.sec | 1 - vendor/github.com/vmware/govmomi/.drone.yml | 17 - vendor/github.com/vmware/govmomi/.mailmap | 3 + vendor/github.com/vmware/govmomi/.travis.yml | 2 +- vendor/github.com/vmware/govmomi/BUILD | 2 + vendor/github.com/vmware/govmomi/CHANGELOG.md | 31 + .../github.com/vmware/govmomi/CONTRIBUTING.md | 10 + vendor/github.com/vmware/govmomi/CONTRIBUTORS | 12 + vendor/github.com/vmware/govmomi/Makefile | 16 +- vendor/github.com/vmware/govmomi/README.md | 38 +- .../vmware/govmomi/find/recurser.go | 8 + vendor/github.com/vmware/govmomi/nfc/BUILD | 34 + vendor/github.com/vmware/govmomi/nfc/lease.go | 238 + .../vmware/govmomi/nfc/lease_updater.go | 146 + vendor/github.com/vmware/govmomi/object/BUILD | 3 +- .../vmware/govmomi/object/common.go | 19 +- .../govmomi/object/custom_fields_manager.go | 19 +- .../vmware/govmomi/object/datastore_file.go | 43 +- .../govmomi/object/datastore_file_manager.go | 19 + .../vmware/govmomi/object/datastore_path.go | 6 + .../object/distributed_virtual_portgroup.go | 2 +- .../object/distributed_virtual_switch.go | 12 + .../govmomi/object/host_storage_system.go | 9 + .../object/host_vsan_internal_system.go | 2 +- .../vmware/govmomi/object/http_nfc_lease.go | 143 - .../vmware/govmomi/object/network.go | 15 +- .../vmware/govmomi/object/ovf_manager.go | 104 - .../vmware/govmomi/object/resource_pool.go | 5 +- .../github.com/vmware/govmomi/object/task.go | 9 + .../govmomi/object/virtual_device_list.go | 16 +- .../vmware/govmomi/object/virtual_machine.go | 58 +- .../vmware/govmomi/property/collector.go | 2 +- .../vmware/govmomi/property/filter.go | 8 +- .../vmware/govmomi/property/wait.go | 138 +- .../vmware/govmomi/session/manager.go | 28 + .../github.com/vmware/govmomi/simulator/BUILD | 84 + .../simulator/authorization_manager.go | 257 + .../simulator/cluster_compute_resource.go | 98 + .../simulator/custom_fields_manager.go | 111 + .../vmware/govmomi/simulator/datacenter.go | 76 + .../vmware/govmomi/simulator/datastore.go | 59 + .../vmware/govmomi/simulator/doc.go | 22 + .../vmware/govmomi/simulator/dvs.go | 187 + .../vmware/govmomi/simulator/entity.go | 46 + .../vmware/govmomi/simulator/esx/BUILD | 41 + .../simulator/esx/authorization_manager.go | 85 + .../govmomi/simulator/esx/datacenter.go | 60 + .../vmware/govmomi/simulator/esx/doc.go | 20 + .../govmomi/simulator/esx/host_config_info.go | 1091 ++ .../simulator/esx/host_firewall_system.go | 1425 +++ .../simulator/esx/host_hardware_info.go | 864 ++ .../simulator/esx/host_storage_device_info.go | 346 + .../govmomi/simulator/esx/host_system.go | 1791 +++ .../simulator/esx/performance_manager.go | 9885 +++++++++++++++++ .../govmomi/simulator/esx/resource_pool.go | 165 + .../govmomi/simulator/esx/root_folder.go | 76 + .../govmomi/simulator/esx/service_content.go | 86 + .../vmware/govmomi/simulator/esx/setting.go | 30 + .../govmomi/simulator/esx/virtual_device.go | 242 + .../vmware/govmomi/simulator/file_manager.go | 251 + .../vmware/govmomi/simulator/folder.go | 471 + .../vmware/govmomi/simulator/guest_id.go | 171 + .../vmware/govmomi/simulator/guest_id.sh | 35 + .../simulator/host_datastore_browser.go | 254 + .../simulator/host_datastore_system.go | 161 + .../govmomi/simulator/host_firewall_system.go | 87 + .../govmomi/simulator/host_network_system.go | 171 + .../vmware/govmomi/simulator/host_system.go | 180 + .../govmomi/simulator/ip_pool_manager.go | 392 + .../govmomi/simulator/license_manager.go | 156 + .../vmware/govmomi/simulator/model.go | 484 + .../govmomi/simulator/option_manager.go | 59 + .../vmware/govmomi/simulator/os_unix.go | 38 + .../vmware/govmomi/simulator/os_windows.go | 26 + .../govmomi/simulator/performance_manager.go | 35 + .../vmware/govmomi/simulator/portgroup.go | 82 + .../govmomi/simulator/property_collector.go | 548 + .../govmomi/simulator/property_filter.go | 42 + .../vmware/govmomi/simulator/registry.go | 338 + .../vmware/govmomi/simulator/resource_pool.go | 312 + .../vmware/govmomi/simulator/search_index.go | 155 + .../govmomi/simulator/service_instance.go | 99 + .../govmomi/simulator/session_manager.go | 83 + .../vmware/govmomi/simulator/simulator.go | 551 + .../vmware/govmomi/simulator/snapshot.go | 68 + .../vmware/govmomi/simulator/task.go | 102 + .../vmware/govmomi/simulator/task_manager.go | 52 + .../govmomi/simulator/user_directory.go | 78 + .../vmware/govmomi/simulator/view_manager.go | 184 + .../govmomi/simulator/virtual_disk_manager.go | 193 + .../govmomi/simulator/virtual_machine.go | 885 ++ .../vmware/govmomi/simulator/vpx/BUILD | 31 + .../vmware/govmomi/simulator/vpx/doc.go | 20 + .../govmomi/simulator/vpx/root_folder.go | 64 + .../govmomi/simulator/vpx/service_content.go | 86 + .../vmware/govmomi/simulator/vpx/setting.go | 60 + .../vmware/govmomi/vim25/methods/BUILD | 1 - .../vmware/govmomi/vim25/methods/internal.go | 124 - .../vmware/govmomi/vim25/soap/client.go | 95 +- .../vmware/govmomi/vim25/types/BUILD | 1 - .../vmware/govmomi/vim25/types/helpers.go | 40 +- .../vmware/govmomi/vim25/types/if.go | 10 - .../vmware/govmomi/vim25/types/internal.go | 266 - .../vmware/govmomi/vim25/types/types.go | 68 +- 125 files changed, 26926 insertions(+), 934 deletions(-) create mode 100644 vendor/github.com/google/uuid/.travis.yml create mode 100644 vendor/github.com/google/uuid/BUILD create mode 100644 vendor/github.com/google/uuid/CONTRIBUTING.md create mode 100644 vendor/github.com/google/uuid/CONTRIBUTORS create mode 100644 vendor/github.com/google/uuid/LICENSE create mode 100644 vendor/github.com/google/uuid/README.md create mode 100644 vendor/github.com/google/uuid/dce.go create mode 100644 vendor/github.com/google/uuid/doc.go create mode 100644 vendor/github.com/google/uuid/hash.go create mode 100644 vendor/github.com/google/uuid/marshal.go create mode 100644 vendor/github.com/google/uuid/node.go create mode 100644 vendor/github.com/google/uuid/sql.go create mode 100644 vendor/github.com/google/uuid/time.go create mode 100644 vendor/github.com/google/uuid/util.go create mode 100644 vendor/github.com/google/uuid/uuid.go create mode 100644 vendor/github.com/google/uuid/version1.go create mode 100644 vendor/github.com/google/uuid/version4.go delete mode 100644 vendor/github.com/vmware/govmomi/.drone.sec delete mode 100644 vendor/github.com/vmware/govmomi/.drone.yml create mode 100644 vendor/github.com/vmware/govmomi/nfc/BUILD create mode 100644 vendor/github.com/vmware/govmomi/nfc/lease.go create mode 100644 vendor/github.com/vmware/govmomi/nfc/lease_updater.go delete mode 100644 vendor/github.com/vmware/govmomi/object/http_nfc_lease.go delete mode 100644 vendor/github.com/vmware/govmomi/object/ovf_manager.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/BUILD create mode 100644 vendor/github.com/vmware/govmomi/simulator/authorization_manager.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/cluster_compute_resource.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/custom_fields_manager.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/datacenter.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/datastore.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/doc.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/dvs.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/entity.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/esx/BUILD create mode 100644 vendor/github.com/vmware/govmomi/simulator/esx/authorization_manager.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/esx/datacenter.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/esx/doc.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/esx/host_config_info.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/esx/host_firewall_system.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/esx/host_hardware_info.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/esx/host_storage_device_info.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/esx/host_system.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/esx/performance_manager.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/esx/resource_pool.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/esx/root_folder.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/esx/service_content.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/esx/setting.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/esx/virtual_device.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/file_manager.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/folder.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/guest_id.go create mode 100755 vendor/github.com/vmware/govmomi/simulator/guest_id.sh create mode 100644 vendor/github.com/vmware/govmomi/simulator/host_datastore_browser.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/host_datastore_system.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/host_firewall_system.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/host_network_system.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/host_system.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/ip_pool_manager.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/license_manager.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/model.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/option_manager.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/os_unix.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/os_windows.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/performance_manager.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/portgroup.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/property_collector.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/property_filter.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/registry.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/resource_pool.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/search_index.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/service_instance.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/session_manager.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/simulator.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/snapshot.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/task.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/task_manager.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/user_directory.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/view_manager.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/virtual_disk_manager.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/virtual_machine.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/vpx/BUILD create mode 100644 vendor/github.com/vmware/govmomi/simulator/vpx/doc.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/vpx/root_folder.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/vpx/service_content.go create mode 100644 vendor/github.com/vmware/govmomi/simulator/vpx/setting.go delete mode 100644 vendor/github.com/vmware/govmomi/vim25/methods/internal.go delete mode 100644 vendor/github.com/vmware/govmomi/vim25/types/internal.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index fa46a593410..fb8d8ec14b2 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1712,6 +1712,11 @@ "ImportPath": "github.com/google/gofuzz", "Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c" }, + { + "ImportPath": "github.com/google/uuid", + "Comment": "0.2-15-g8c31c18", + "Rev": "8c31c18f31ede9fc8eae72290a7e7a8064e9b3e3" + }, { "ImportPath": "github.com/googleapis/gnostic/OpenAPIv2", "Rev": "0c5108395e2debce0d731cf0287ddf7242066aba" @@ -2630,93 +2635,113 @@ }, { "ImportPath": "github.com/vmware/govmomi", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-5-g5f0f400", + "Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17" }, { "ImportPath": "github.com/vmware/govmomi/find", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-5-g5f0f400", + "Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17" }, { "ImportPath": "github.com/vmware/govmomi/list", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-5-g5f0f400", + "Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17" + }, + { + "ImportPath": "github.com/vmware/govmomi/nfc", + "Comment": "v0.16.0-5-g5f0f400", + "Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17" }, { "ImportPath": "github.com/vmware/govmomi/object", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-5-g5f0f400", + "Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17" }, { "ImportPath": "github.com/vmware/govmomi/pbm", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-5-g5f0f400", + "Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17" }, { "ImportPath": "github.com/vmware/govmomi/pbm/methods", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-5-g5f0f400", + "Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17" }, { "ImportPath": "github.com/vmware/govmomi/pbm/types", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-5-g5f0f400", + "Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17" }, { "ImportPath": "github.com/vmware/govmomi/property", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-5-g5f0f400", + "Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17" }, { "ImportPath": "github.com/vmware/govmomi/session", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-5-g5f0f400", + "Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17" + }, + { + "ImportPath": "github.com/vmware/govmomi/simulator", + "Comment": "v0.16.0-5-g5f0f400", + "Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17" + }, + { + "ImportPath": "github.com/vmware/govmomi/simulator/esx", + "Comment": "v0.16.0-5-g5f0f400", + "Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17" + }, + { + "ImportPath": "github.com/vmware/govmomi/simulator/vpx", + "Comment": "v0.16.0-5-g5f0f400", + "Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17" }, { "ImportPath": "github.com/vmware/govmomi/task", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-5-g5f0f400", + "Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17" }, { "ImportPath": "github.com/vmware/govmomi/vim25", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-5-g5f0f400", + "Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17" }, { "ImportPath": "github.com/vmware/govmomi/vim25/debug", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-5-g5f0f400", + "Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17" }, { "ImportPath": "github.com/vmware/govmomi/vim25/methods", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-5-g5f0f400", + "Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17" }, { "ImportPath": "github.com/vmware/govmomi/vim25/mo", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-5-g5f0f400", + "Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17" }, { "ImportPath": "github.com/vmware/govmomi/vim25/progress", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-5-g5f0f400", + "Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17" }, { "ImportPath": "github.com/vmware/govmomi/vim25/soap", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-5-g5f0f400", + "Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17" }, { "ImportPath": "github.com/vmware/govmomi/vim25/types", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-5-g5f0f400", + "Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17" }, { "ImportPath": "github.com/vmware/govmomi/vim25/xml", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-5-g5f0f400", + "Rev": "5f0f4004a1f075f29e715f1b956ca0ab4b428f17" }, { "ImportPath": "github.com/vmware/photon-controller-go-sdk/SSPI", diff --git a/Godeps/LICENSES b/Godeps/LICENSES index 8021a07e90e..f4cf45fa76e 100644 --- a/Godeps/LICENSES +++ b/Godeps/LICENSES @@ -58133,6 +58133,41 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ +================================================================================ += vendor/github.com/google/uuid licensed under: = + +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/github.com/google/uuid/LICENSE 88073b6dd8ec00fe09da59e0b6dfded1 +================================================================================ + + ================================================================================ = vendor/github.com/googleapis/gnostic/compiler licensed under: = @@ -83473,6 +83508,216 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/vmware/govmomi/nfc licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/vmware/govmomi/LICENSE.txt 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/vmware/govmomi/object licensed under: = @@ -84733,6 +84978,636 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/vmware/govmomi/simulator licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/vmware/govmomi/LICENSE.txt 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/github.com/vmware/govmomi/simulator/esx licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/vmware/govmomi/LICENSE.txt 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/github.com/vmware/govmomi/simulator/vpx licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/vmware/govmomi/LICENSE.txt 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/vmware/govmomi/task licensed under: = diff --git a/pkg/cloudprovider/providers/vsphere/vclib/BUILD b/pkg/cloudprovider/providers/vsphere/vclib/BUILD index 6750d560e4c..42001e1e2f9 100644 --- a/pkg/cloudprovider/providers/vsphere/vclib/BUILD +++ b/pkg/cloudprovider/providers/vsphere/vclib/BUILD @@ -3,6 +3,7 @@ package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", + "go_test", ) go_library( @@ -55,3 +56,14 @@ filegroup( ], tags = ["automanaged"], ) + +go_test( + name = "go_default_test", + srcs = ["datacenter_test.go"], + embed = [":go_default_library"], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib", + deps = [ + "//vendor/github.com/vmware/govmomi:go_default_library", + "//vendor/github.com/vmware/govmomi/simulator:go_default_library", + ], +) diff --git a/vendor/BUILD b/vendor/BUILD index 20495c2248a..b17abcaf91f 100644 --- a/vendor/BUILD +++ b/vendor/BUILD @@ -244,6 +244,7 @@ filegroup( "//vendor/github.com/google/cadvisor/zfs:all-srcs", "//vendor/github.com/google/certificate-transparency/go:all-srcs", "//vendor/github.com/google/gofuzz:all-srcs", + "//vendor/github.com/google/uuid:all-srcs", "//vendor/github.com/googleapis/gnostic/OpenAPIv2:all-srcs", "//vendor/github.com/googleapis/gnostic/compiler:all-srcs", "//vendor/github.com/googleapis/gnostic/extensions:all-srcs", diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml new file mode 100644 index 00000000000..d8156a60ba9 --- /dev/null +++ b/vendor/github.com/google/uuid/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.4.3 + - 1.5.3 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/google/uuid/BUILD b/vendor/github.com/google/uuid/BUILD new file mode 100644 index 00000000000..64c2ffc3ed3 --- /dev/null +++ b/vendor/github.com/google/uuid/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "dce.go", + "doc.go", + "hash.go", + "marshal.go", + "node.go", + "sql.go", + "time.go", + "util.go", + "uuid.go", + "version1.go", + "version4.go", + ], + importpath = "github.com/google/uuid", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 00000000000..04fdf09f136 --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 00000000000..b4bb97f6bcd --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 00000000000..5dc68268d90 --- /dev/null +++ b/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md new file mode 100644 index 00000000000..21205eaeb59 --- /dev/null +++ b/vendor/github.com/google/uuid/README.md @@ -0,0 +1,23 @@ +**This package is currently in development and the API may not be stable.** + +The API will become stable with v1. + +# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on +[RFC 4122](http://tools.ietf.org/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +`go get github.com/google/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://godoc.org/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go new file mode 100644 index 00000000000..fa820b9d309 --- /dev/null +++ b/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go new file mode 100644 index 00000000000..5b8a4b9af8c --- /dev/null +++ b/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go new file mode 100644 index 00000000000..4fc5a77df58 --- /dev/null +++ b/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) + h.Write([]byte(data)) + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 00000000000..84bbc5880bb --- /dev/null +++ b/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,39 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + // See comment in ParseBytes why we do this. + // id, err := ParseBytes(data) + id, err := ParseBytes(data) + if err == nil { + *uuid = id + } + return err +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go new file mode 100644 index 00000000000..f2c2765b220 --- /dev/null +++ b/vendor/github.com/google/uuid/node.go @@ -0,0 +1,100 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "net" + "sync" +) + +var ( + nodeMu sync.Mutex + interfaces []net.Interface // cached list of interfaces + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil && name != "" { + return false + } + } + + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + copy(nodeID[:], ifs.HardwareAddr) + ifname = ifs.Name + return true + } + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go new file mode 100644 index 00000000000..f326b54db37 --- /dev/null +++ b/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go new file mode 100644 index 00000000000..fd7fe0ac46a --- /dev/null +++ b/vendor/github.com/google/uuid/time.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + old_seq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if old_seq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1 and 2 UUIDs. +func (uuid UUID) Time() Time { + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time) +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go new file mode 100644 index 00000000000..5ea6c737806 --- /dev/null +++ b/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 00000000000..1320d60d80f --- /dev/null +++ b/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,198 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// Parse decodes s into a UUID or returns an error. Both the UUID form of +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. +func Parse(s string) (UUID, error) { + var uuid UUID + if len(s) != 36 { + if len(s) != 36+9 { + return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) + } + if strings.ToLower(s[:9]) != "urn:uuid:" { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + if v, ok := xtob(s[x], s[x+1]); !ok { + return uuid, errors.New("invalid UUID format") + } else { + uuid[i] = v + } + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + if len(b) != 36 { + if len(b) != 36+9 { + return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) + } + if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + } + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + if v, ok := xtob(b[x], b[x+1]); !ok { + return uuid, errors.New("invalid UUID format") + } else { + uuid[i] = v + } + } + return uuid, nil +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst[:], uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go new file mode 100644 index 00000000000..199a1ac6540 --- /dev/null +++ b/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nodeMu.Unlock() + + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + copy(uuid[10:], nodeID[:]) + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go new file mode 100644 index 00000000000..74c4e6c9f5a --- /dev/null +++ b/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewRandom returns a Random (Version 4) UUID or panics. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + var uuid UUID + _, err := io.ReadFull(rander, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/vmware/govmomi/.drone.sec b/vendor/github.com/vmware/govmomi/.drone.sec deleted file mode 100644 index ad52e59ac87..00000000000 --- a/vendor/github.com/vmware/govmomi/.drone.sec +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.kK6pryC8R-O1R0Gj9ydLvQuIZlcYLGze23WdW7xbpiEEKdz6nweJrMm7ysy8lgu1tM47JVo19p2_b26bNKSQshCUOETvd7Hb2UMZOjnyUnqdyAAyoi6UkIquXfUUbHTNS0iMxwSxxW9KMp2GXNq8-o6T8xQZTDirBJFKKd8ZNUasTaoa5j8U9IfdR1aCavTBuOhvk8IVs-jSbY5TVJMJiE0IOPXois7aRJ6uAiANQBk9VKLegEcZD_qAewecXHDsHi-u0jbmg3o3PPaJaK_Qv5dsPlR2M-E2kE3AGUn0-zn5zYRngoAZ8WZr2O4GvLdltJKq9i2z7jOrdOzzRcDRow.96qvwl_E1Hj15u7Q.hWs-jQ8FsqQFD7pE9N-UEP1BWQ9rsJIcCaPvQRIp8Fukm_vvlw9YEaEq0ERLrsUWsJWpd1ca8_h8x7xD6f_d5YppwRqRHIeGIsdBOTMhNs0lG8ikkQXLat-UroCpy8EC17nuUtDE2E2Kdxrk4Cdd6Bk-dKk0Ta4w3Ud0YBKa.P8zrO7xizgv0i98eVWWzEg \ No newline at end of file diff --git a/vendor/github.com/vmware/govmomi/.drone.yml b/vendor/github.com/vmware/govmomi/.drone.yml deleted file mode 100644 index dee4bf5b389..00000000000 --- a/vendor/github.com/vmware/govmomi/.drone.yml +++ /dev/null @@ -1,17 +0,0 @@ -clone: - tags: true - path: github.com/vmware/govmomi -build: - image: golang:1.7 - pull: true - environment: - - GOVC_TEST_URL=$$GOVC_TEST_URL - - GOVC_INSECURE=1 - - VCA=1 - commands: - - make all install - - git clone https://github.com/sstephenson/bats.git /tmp/bats - - /tmp/bats/install.sh /usr/local - - apt-get -qq update && apt-get install -yqq uuid-runtime bsdmainutils jq - - govc/test/images/update.sh - - bats govc/test diff --git a/vendor/github.com/vmware/govmomi/.mailmap b/vendor/github.com/vmware/govmomi/.mailmap index 31a0bd55aad..c0e235c5783 100644 --- a/vendor/github.com/vmware/govmomi/.mailmap +++ b/vendor/github.com/vmware/govmomi/.mailmap @@ -13,4 +13,7 @@ Pieter Noordhuis Takaaki Furukawa takaaki.furukawa Takaaki Furukawa tkak Vadim Egorov +Anfernee Yongkun Gui +Anfernee Yongkun Gui Yongkun Anfernee Gui +Zach Tucker Zee Yang diff --git a/vendor/github.com/vmware/govmomi/.travis.yml b/vendor/github.com/vmware/govmomi/.travis.yml index a7fba0350b8..23798f283d4 100644 --- a/vendor/github.com/vmware/govmomi/.travis.yml +++ b/vendor/github.com/vmware/govmomi/.travis.yml @@ -3,7 +3,7 @@ sudo: false language: go go: - - 1.7 + - 1.8 before_install: - make vendor diff --git a/vendor/github.com/vmware/govmomi/BUILD b/vendor/github.com/vmware/govmomi/BUILD index 58a4c6bb7c5..91de0a8391b 100644 --- a/vendor/github.com/vmware/govmomi/BUILD +++ b/vendor/github.com/vmware/govmomi/BUILD @@ -27,10 +27,12 @@ filegroup( ":package-srcs", "//vendor/github.com/vmware/govmomi/find:all-srcs", "//vendor/github.com/vmware/govmomi/list:all-srcs", + "//vendor/github.com/vmware/govmomi/nfc:all-srcs", "//vendor/github.com/vmware/govmomi/object:all-srcs", "//vendor/github.com/vmware/govmomi/pbm:all-srcs", "//vendor/github.com/vmware/govmomi/property:all-srcs", "//vendor/github.com/vmware/govmomi/session:all-srcs", + "//vendor/github.com/vmware/govmomi/simulator:all-srcs", "//vendor/github.com/vmware/govmomi/task:all-srcs", "//vendor/github.com/vmware/govmomi/vim25:all-srcs", ], diff --git a/vendor/github.com/vmware/govmomi/CHANGELOG.md b/vendor/github.com/vmware/govmomi/CHANGELOG.md index ea4dd68db02..42c7cc113bb 100644 --- a/vendor/github.com/vmware/govmomi/CHANGELOG.md +++ b/vendor/github.com/vmware/govmomi/CHANGELOG.md @@ -1,5 +1,36 @@ # changelog +### 0.16.0 (2017-11-08) + +* Add support for SOAP request operation ID header + +* Moved ovf helpers from govc import.ovf command to ovf and nfc packages + +* Added guest/toolbox (client) package + +* Added toolbox package and toolbox command + +* Added simulator package and vcsim command + +### 0.15.0 (2017-06-19) + +* WaitOptions.MaxWaitSeconds is now optional + +* Support removal of ExtraConfig entries + +* GuestPosixFileAttributes OwnerId and GroupId fields are now pointers, + rather than omitempty ints to allow chown with root uid:gid + +* Updated examples/ using view package + +* Add DatastoreFile.TailFunc method + +* Export VirtualMachine.FindSnapshot method + +* Add AuthorizationManager {Enable,Disable}Methods + +* Add PBM client + ### 0.14.0 (2017-04-08) * Add view.ContainerView type and methods diff --git a/vendor/github.com/vmware/govmomi/CONTRIBUTING.md b/vendor/github.com/vmware/govmomi/CONTRIBUTING.md index f87c6061029..f6645cbf4f1 100644 --- a/vendor/github.com/vmware/govmomi/CONTRIBUTING.md +++ b/vendor/github.com/vmware/govmomi/CONTRIBUTING.md @@ -17,6 +17,16 @@ git remote add $USER git@github.com:$USER/govmomi.git git fetch $USER ``` +## Installing from source + +Compile the govmomi libraries and install govc using: + +``` shell +go install -v github.com/vmware/govmomi/govc +``` + +Note that **govc/build.sh** is only used for building release binaries. + ## Contribution flow This is a rough outline of what a contributor's workflow looks like: diff --git a/vendor/github.com/vmware/govmomi/CONTRIBUTORS b/vendor/github.com/vmware/govmomi/CONTRIBUTORS index 6afdb9f56f0..c37dc939849 100644 --- a/vendor/github.com/vmware/govmomi/CONTRIBUTORS +++ b/vendor/github.com/vmware/govmomi/CONTRIBUTORS @@ -3,12 +3,20 @@ # This script is generated by contributors.sh # +Abhijeet Kasurde abrarshivani +Adam Shannon Alvaro Miranda +amandahla Amit Bathla +amit bezalel Andrew Chin +Anfernee Yongkun Gui +aniketGslab Arran Walker +Aryeh Weinreb Austin Parker +Balu Dontu bastienbc Bob Killen Brad Fitzpatrick @@ -35,8 +43,10 @@ gthombare Hasan Mahmood Henrik Hodne Isaac Rodman +Ivan Porto Carrero Jason Kincl Jeremy Canady +João Pereira Louie Jiang Marc Carmier Mevan Samaratunga @@ -47,7 +57,9 @@ S.Çağlar Onur Sergey Ignatov Steve Purcell Takaaki Furukawa +tanishi Ted Zlatanov +Thibaut Ackermann Vadim Egorov Yang Yang Yuya Kusakabe diff --git a/vendor/github.com/vmware/govmomi/Makefile b/vendor/github.com/vmware/govmomi/Makefile index cbaa3624994..9886b45ceb5 100644 --- a/vendor/github.com/vmware/govmomi/Makefile +++ b/vendor/github.com/vmware/govmomi/Makefile @@ -11,13 +11,19 @@ goimports: govet: @echo checking go vet... - @go tool vet -structtags=false -methods=false . - -test: - go test -v $(TEST_OPTS) ./... + @go tool vet -structtags=false -methods=false $$(find . -mindepth 1 -maxdepth 1 -type d -not -name vendor) install: - go install github.com/vmware/govmomi/govc + go install -v github.com/vmware/govmomi/govc + go install -v github.com/vmware/govmomi/vcsim + +go-test: + go test -v $(TEST_OPTS) ./... + +govc-test: install + (cd govc/test && ./vendor/github.com/sstephenson/bats/libexec/bats -t .) + +test: go-test govc-test doc: install ./govc/usage.sh > ./govc/USAGE.md diff --git a/vendor/github.com/vmware/govmomi/README.md b/vendor/github.com/vmware/govmomi/README.md index 7ebdf1b17c8..02901ea4fba 100644 --- a/vendor/github.com/vmware/govmomi/README.md +++ b/vendor/github.com/vmware/govmomi/README.md @@ -5,14 +5,19 @@ A Go library for interacting with VMware vSphere APIs (ESXi and/or vCenter). -For `govc`, a CLI built on top of govmomi, check out the [govc](./govc) directory and [USAGE](./govc/USAGE.md) document. +In addition to the vSphere API client, this repository includes: + +* [govc](./govc) - vSphere CLI + +* [vcsim](./vcsim) - vSphere API mock framework + +* [toolbox](./toolbox) - VM guest tools framework ## Compatibility -This library is built for and tested against ESXi and vCenter 5.5, 6.0 and 6.5. +This library is built for and tested against ESXi and vCenter 6.0 and 6.5. -If you're able to use it against older versions of ESXi and/or vCenter, please -leave a note and we'll include it in this compatibility list. +It should work with versions 5.5 and 5.1, but neither are officially supported. ## Documentation @@ -23,19 +28,14 @@ The code in the `govmomi` package is a wrapper for the code that is generated fr It primarily provides convenience functions for working with the vSphere API. See [godoc.org][godoc] for documentation. -[apiref]:http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.wssdk.apiref.doc/right-pane.html +[apiref]:http://pubs.vmware.com/vsphere-6-5/index.jsp#com.vmware.wssdk.apiref.doc/right-pane.html [godoc]:http://godoc.org/github.com/vmware/govmomi -[drone]:https://drone.io -[dronesrc]:https://github.com/drone/drone -[dronecli]:http://readme.drone.io/devs/cli/ -#### Building with CI -Merges to this repository will trigger builds in both Travis and [Drone][drone]. +## Installation -To build locally with Drone: -- Ensure that you have Docker 1.6 or higher installed. -- Install the [Drone command line tools][dronecli]. -- Run `drone exec` from within the root directory of the govmomi repository. +```sh +go get -u github.com/vmware/govmomi +``` ## Discussion @@ -53,9 +53,17 @@ Refer to the [CHANGELOG](CHANGELOG.md) for version to version changes. * [Docker Machine](https://github.com/docker/machine/tree/master/drivers/vmwarevsphere) +* [Docker InfraKit](https://github.com/docker/infrakit/tree/master/pkg/provider/vsphere) + +* [Docker LinuxKit](https://github.com/linuxkit/linuxkit/tree/master/src/cmd/linuxkit) + * [Kubernetes](https://github.com/kubernetes/kubernetes/tree/master/pkg/cloudprovider/providers/vsphere) -* [Terraform](https://github.com/hashicorp/terraform/tree/master/builtin/providers/vsphere) +* [Kubernetes kops](https://github.com/kubernetes/kops/tree/master/upup/pkg/fi/cloudup/vsphere) + +* [Terraform](https://github.com/terraform-providers/terraform-provider-vsphere) + +* [Packer](https://github.com/jetbrains-infra/packer-builder-vsphere) * [VMware VIC Engine](https://github.com/vmware/vic) diff --git a/vendor/github.com/vmware/govmomi/find/recurser.go b/vendor/github.com/vmware/govmomi/find/recurser.go index b62e93a6f82..80d958a264f 100644 --- a/vendor/github.com/vmware/govmomi/find/recurser.go +++ b/vendor/github.com/vmware/govmomi/find/recurser.go @@ -20,6 +20,7 @@ import ( "context" "os" "path" + "strings" "github.com/vmware/govmomi/list" "github.com/vmware/govmomi/object" @@ -177,6 +178,7 @@ func (r recurser) List(ctx context.Context, s *spec, root list.Element, parts [] return in, nil } + all := parts pattern := parts[0] parts = parts[1:] @@ -188,6 +190,12 @@ func (r recurser) List(ctx context.Context, s *spec, root list.Element, parts [] } if !matched { + matched = strings.HasSuffix(e.Path, "/"+path.Join(all...)) + if matched { + // name contains a '/' + out = append(out, e) + } + continue } diff --git a/vendor/github.com/vmware/govmomi/nfc/BUILD b/vendor/github.com/vmware/govmomi/nfc/BUILD new file mode 100644 index 00000000000..51c8a6ea8c4 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/nfc/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "lease.go", + "lease_updater.go", + ], + importpath = "github.com/vmware/govmomi/nfc", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/vmware/govmomi/property:go_default_library", + "//vendor/github.com/vmware/govmomi/vim25:go_default_library", + "//vendor/github.com/vmware/govmomi/vim25/methods:go_default_library", + "//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library", + "//vendor/github.com/vmware/govmomi/vim25/progress:go_default_library", + "//vendor/github.com/vmware/govmomi/vim25/soap:go_default_library", + "//vendor/github.com/vmware/govmomi/vim25/types:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/vmware/govmomi/nfc/lease.go b/vendor/github.com/vmware/govmomi/nfc/lease.go new file mode 100644 index 00000000000..393c648353f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/nfc/lease.go @@ -0,0 +1,238 @@ +/* +Copyright (c) 2015-2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nfc + +import ( + "context" + "errors" + "fmt" + "io" + "path" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/progress" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type Lease struct { + types.ManagedObjectReference + + c *vim25.Client +} + +func NewLease(c *vim25.Client, ref types.ManagedObjectReference) *Lease { + return &Lease{ref, c} +} + +// Abort wraps methods.Abort +func (l *Lease) Abort(ctx context.Context, fault *types.LocalizedMethodFault) error { + req := types.HttpNfcLeaseAbort{ + This: l.Reference(), + Fault: fault, + } + + _, err := methods.HttpNfcLeaseAbort(ctx, l.c, &req) + if err != nil { + return err + } + + return nil +} + +// Complete wraps methods.Complete +func (l *Lease) Complete(ctx context.Context) error { + req := types.HttpNfcLeaseComplete{ + This: l.Reference(), + } + + _, err := methods.HttpNfcLeaseComplete(ctx, l.c, &req) + if err != nil { + return err + } + + return nil +} + +// GetManifest wraps methods.GetManifest +func (l *Lease) GetManifest(ctx context.Context) error { + req := types.HttpNfcLeaseGetManifest{ + This: l.Reference(), + } + + _, err := methods.HttpNfcLeaseGetManifest(ctx, l.c, &req) + if err != nil { + return err + } + + return nil +} + +// Progress wraps methods.Progress +func (l *Lease) Progress(ctx context.Context, percent int32) error { + req := types.HttpNfcLeaseProgress{ + This: l.Reference(), + Percent: percent, + } + + _, err := methods.HttpNfcLeaseProgress(ctx, l.c, &req) + if err != nil { + return err + } + + return nil +} + +type LeaseInfo struct { + types.HttpNfcLeaseInfo + + Items []FileItem +} + +func (l *Lease) newLeaseInfo(li *types.HttpNfcLeaseInfo, items []types.OvfFileItem) (*LeaseInfo, error) { + info := &LeaseInfo{ + HttpNfcLeaseInfo: *li, + } + + for _, device := range li.DeviceUrl { + u, err := l.c.ParseURL(device.Url) + if err != nil { + return nil, err + } + + if device.SslThumbprint != "" { + // TODO: prefer host management IP + l.c.SetThumbprint(u.Host, device.SslThumbprint) + } + + if len(items) == 0 { + // this is an export + item := types.OvfFileItem{ + DeviceId: device.Key, + Path: device.TargetId, + Size: device.FileSize, + } + + if item.Size == 0 { + item.Size = li.TotalDiskCapacityInKB * 1024 + } + + if item.Path == "" { + item.Path = path.Base(device.Url) + } + + info.Items = append(info.Items, NewFileItem(u, item)) + + continue + } + + // this is an import + for _, item := range items { + if device.ImportKey == item.DeviceId { + info.Items = append(info.Items, NewFileItem(u, item)) + break + } + } + } + + return info, nil +} + +func (l *Lease) Wait(ctx context.Context, items []types.OvfFileItem) (*LeaseInfo, error) { + var lease mo.HttpNfcLease + + pc := property.DefaultCollector(l.c) + err := property.Wait(ctx, pc, l.Reference(), []string{"state", "info", "error"}, func(pc []types.PropertyChange) bool { + done := false + + for _, c := range pc { + if c.Val == nil { + continue + } + + switch c.Name { + case "error": + val := c.Val.(types.LocalizedMethodFault) + lease.Error = &val + done = true + case "info": + val := c.Val.(types.HttpNfcLeaseInfo) + lease.Info = &val + case "state": + lease.State = c.Val.(types.HttpNfcLeaseState) + if lease.State != types.HttpNfcLeaseStateInitializing { + done = true + } + } + } + + return done + }) + + if err != nil { + return nil, err + } + + if lease.State == types.HttpNfcLeaseStateReady { + return l.newLeaseInfo(lease.Info, items) + } + + if lease.Error != nil { + return nil, errors.New(lease.Error.LocalizedMessage) + } + + return nil, fmt.Errorf("unexpected nfc lease state: %s", lease.State) +} + +func (l *Lease) StartUpdater(ctx context.Context, info *LeaseInfo) *LeaseUpdater { + return newLeaseUpdater(ctx, l, info) +} + +func (l *Lease) Upload(ctx context.Context, item FileItem, f io.Reader, opts soap.Upload) error { + if opts.Progress == nil { + opts.Progress = item + } else { + opts.Progress = progress.Tee(item, opts.Progress) + } + + // Non-disk files (such as .iso) use the PUT method. + // Overwrite: t header is also required in this case (ovftool does the same) + if item.Create { + opts.Method = "PUT" + opts.Headers = map[string]string{ + "Overwrite": "t", + } + } else { + opts.Method = "POST" + opts.Type = "application/x-vnd.vmware-streamVmdk" + } + + return l.c.Upload(f, item.URL, &opts) +} + +func (l *Lease) DownloadFile(ctx context.Context, file string, item FileItem, opts soap.Download) error { + if opts.Progress == nil { + opts.Progress = item + } else { + opts.Progress = progress.Tee(item, opts.Progress) + } + + return l.c.DownloadFile(file, item.URL, &opts) +} diff --git a/vendor/github.com/vmware/govmomi/nfc/lease_updater.go b/vendor/github.com/vmware/govmomi/nfc/lease_updater.go new file mode 100644 index 00000000000..d3face81a4c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/nfc/lease_updater.go @@ -0,0 +1,146 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nfc + +import ( + "context" + "log" + "net/url" + "sync" + "sync/atomic" + "time" + + "github.com/vmware/govmomi/vim25/progress" + "github.com/vmware/govmomi/vim25/types" +) + +type FileItem struct { + types.OvfFileItem + URL *url.URL + + ch chan progress.Report +} + +func NewFileItem(u *url.URL, item types.OvfFileItem) FileItem { + return FileItem{ + OvfFileItem: item, + URL: u, + ch: make(chan progress.Report), + } +} + +func (o FileItem) Sink() chan<- progress.Report { + return o.ch +} + +// File converts the FileItem.OvfFileItem to an OvfFile +func (o FileItem) File() types.OvfFile { + return types.OvfFile{ + DeviceId: o.DeviceId, + Path: o.Path, + Size: o.Size, + } +} + +type LeaseUpdater struct { + lease *Lease + + pos int64 // Number of bytes + total int64 // Total number of bytes + + done chan struct{} // When lease updater should stop + + wg sync.WaitGroup // Track when update loop is done +} + +func newLeaseUpdater(ctx context.Context, lease *Lease, info *LeaseInfo) *LeaseUpdater { + l := LeaseUpdater{ + lease: lease, + + done: make(chan struct{}), + } + + for _, item := range info.Items { + l.total += item.Size + go l.waitForProgress(item) + } + + // Kickstart update loop + l.wg.Add(1) + go l.run() + + return &l +} + +func (l *LeaseUpdater) waitForProgress(item FileItem) { + var pos, total int64 + + total = item.Size + + for { + select { + case <-l.done: + return + case p, ok := <-item.ch: + // Return in case of error + if ok && p.Error() != nil { + return + } + + if !ok { + // Last element on the channel, add to total + atomic.AddInt64(&l.pos, total-pos) + return + } + + // Approximate progress in number of bytes + x := int64(float32(total) * (p.Percentage() / 100.0)) + atomic.AddInt64(&l.pos, x-pos) + pos = x + } + } +} + +func (l *LeaseUpdater) run() { + defer l.wg.Done() + + tick := time.NewTicker(2 * time.Second) + defer tick.Stop() + + for { + select { + case <-l.done: + return + case <-tick.C: + // From the vim api HttpNfcLeaseProgress(percent) doc, percent == + // "Completion status represented as an integer in the 0-100 range." + // Always report the current value of percent, as it will renew the + // lease even if the value hasn't changed or is 0. + percent := int32(float32(100*atomic.LoadInt64(&l.pos)) / float32(l.total)) + err := l.lease.Progress(context.TODO(), percent) + if err != nil { + log.Printf("NFC lease progress: %s", err) + return + } + } + } +} + +func (l *LeaseUpdater) Done() { + close(l.done) + l.wg.Wait() +} diff --git a/vendor/github.com/vmware/govmomi/object/BUILD b/vendor/github.com/vmware/govmomi/object/BUILD index cc53d9e0782..eb1b6f5f360 100644 --- a/vendor/github.com/vmware/govmomi/object/BUILD +++ b/vendor/github.com/vmware/govmomi/object/BUILD @@ -38,13 +38,11 @@ go_library( "host_virtual_nic_manager.go", "host_vsan_internal_system.go", "host_vsan_system.go", - "http_nfc_lease.go", "namespace_manager.go", "network.go", "network_reference.go", "opaque_network.go", "option_manager.go", - "ovf_manager.go", "resource_pool.go", "search_index.go", "storage_pod.go", @@ -61,6 +59,7 @@ go_library( importpath = "github.com/vmware/govmomi/object", visibility = ["//visibility:public"], deps = [ + "//vendor/github.com/vmware/govmomi/nfc:go_default_library", "//vendor/github.com/vmware/govmomi/property:go_default_library", "//vendor/github.com/vmware/govmomi/session:go_default_library", "//vendor/github.com/vmware/govmomi/task:go_default_library", diff --git a/vendor/github.com/vmware/govmomi/object/common.go b/vendor/github.com/vmware/govmomi/object/common.go index 52feeed6503..dfeee4a365e 100644 --- a/vendor/github.com/vmware/govmomi/object/common.go +++ b/vendor/github.com/vmware/govmomi/object/common.go @@ -80,17 +80,24 @@ func (c *Common) SetInventoryPath(p string) { func (c Common) ObjectName(ctx context.Context) (string, error) { var o mo.ManagedEntity - name := c.Name() - if name != "" { - return name, nil - } - err := c.Properties(ctx, c.Reference(), []string{"name"}, &o) if err != nil { return "", err } - return o.Name, nil + if o.Name != "" { + return o.Name, nil + } + + // Network has its own "name" field... + var n mo.Network + + err = c.Properties(ctx, c.Reference(), []string{"name"}, &n) + if err != nil { + return "", err + } + + return n.Name, nil } func (c Common) Properties(ctx context.Context, r types.ManagedObjectReference, ps []string, dst interface{}) error { diff --git a/vendor/github.com/vmware/govmomi/object/custom_fields_manager.go b/vendor/github.com/vmware/govmomi/object/custom_fields_manager.go index 60b78df2b4b..ef748ef2c13 100644 --- a/vendor/github.com/vmware/govmomi/object/custom_fields_manager.go +++ b/vendor/github.com/vmware/govmomi/object/custom_fields_manager.go @@ -102,7 +102,9 @@ func (m CustomFieldsManager) Set(ctx context.Context, entity types.ManagedObject return err } -func (m CustomFieldsManager) Field(ctx context.Context) ([]types.CustomFieldDef, error) { +type CustomFieldDefList []types.CustomFieldDef + +func (m CustomFieldsManager) Field(ctx context.Context) (CustomFieldDefList, error) { var fm mo.CustomFieldsManager err := m.Properties(ctx, m.Reference(), []string{"field"}, &fm) @@ -113,19 +115,19 @@ func (m CustomFieldsManager) Field(ctx context.Context) ([]types.CustomFieldDef, return fm.Field, nil } -func (m CustomFieldsManager) FindKey(ctx context.Context, key string) (int32, error) { +func (m CustomFieldsManager) FindKey(ctx context.Context, name string) (int32, error) { field, err := m.Field(ctx) if err != nil { return -1, err } for _, def := range field { - if def.Name == key { + if def.Name == name { return def.Key, nil } } - k, err := strconv.Atoi(key) + k, err := strconv.Atoi(name) if err == nil { // assume literal int key return int32(k), nil @@ -133,3 +135,12 @@ func (m CustomFieldsManager) FindKey(ctx context.Context, key string) (int32, er return -1, ErrKeyNameNotFound } + +func (l CustomFieldDefList) ByKey(key int32) *types.CustomFieldDef { + for _, def := range l { + if def.Key == key { + return &def + } + } + return nil +} diff --git a/vendor/github.com/vmware/govmomi/object/datastore_file.go b/vendor/github.com/vmware/govmomi/object/datastore_file.go index 36c95234855..a73990f12cc 100644 --- a/vendor/github.com/vmware/govmomi/object/datastore_file.go +++ b/vendor/github.com/vmware/govmomi/object/datastore_file.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2016 VMware, Inc. All Rights Reserved. +Copyright (c) 2016-2017 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -25,6 +25,7 @@ import ( "net/http" "os" "path" + "sync" "time" "github.com/vmware/govmomi/vim25/soap" @@ -232,8 +233,9 @@ func (f *DatastoreFile) get() (io.Reader, error) { return f.body, nil } -func lastIndexLines(s []byte, n *int) int64 { +func lastIndexLines(s []byte, line *int, include func(l int, m string) bool) (int64, bool) { i := len(s) - 1 + done := false for i > 0 { o := bytes.LastIndexByte(s[:i], '\n') @@ -241,18 +243,27 @@ func lastIndexLines(s []byte, n *int) int64 { break } - i = o - *n-- - if *n == 0 { + msg := string(s[o+1 : i+1]) + if !include(*line, msg) { + done = true break + } else { + i = o + *line++ } } - return int64(i) + return int64(i), done } // Tail seeks to the position of the last N lines of the file. func (f *DatastoreFile) Tail(n int) error { + return f.TailFunc(n, func(line int, _ string) bool { return n > line }) +} + +// TailFunc will seek backwards in the datastore file until it hits a line that does +// not satisfy the supplied `include` function. +func (f *DatastoreFile) TailFunc(lines int, include func(line int, message string) bool) error { // Read the file in reverse using bsize chunks const bsize = int64(1024 * 16) @@ -261,13 +272,14 @@ func (f *DatastoreFile) Tail(n int) error { return err } - if n == 0 { + if lines == 0 { return nil } chunk := int64(-1) buf := bytes.NewBuffer(make([]byte, 0, bsize)) + line := 0 for { var eof bool @@ -298,19 +310,19 @@ func (f *DatastoreFile) Tail(n int) error { } b := buf.Bytes() - idx := lastIndexLines(b, &n) + 1 + idx, done := lastIndexLines(b, &line, include) - if n == 0 { + if done { if chunk == -1 { // We found all N lines in the last chunk of the file. // The seek offset is also now at the current end of file. // Save this buffer to avoid another GET request when Read() is called. - buf.Next(int(idx)) + buf.Next(int(idx + 1)) f.buf = buf return nil } - if _, err = f.Seek(pos+idx, io.SeekStart); err != nil { + if _, err = f.Seek(pos+idx+1, io.SeekStart); err != nil { return err } @@ -336,6 +348,7 @@ type followDatastoreFile struct { r *DatastoreFile c chan struct{} i time.Duration + o sync.Once } // Read reads up to len(b) bytes from the DatastoreFile being followed. @@ -387,11 +400,15 @@ func (f *followDatastoreFile) Read(p []byte) (int, error) { // Close will stop Follow polling and close the underlying DatastoreFile. func (f *followDatastoreFile) Close() error { - close(f.c) + f.o.Do(func() { close(f.c) }) return nil } // Follow returns an io.ReadCloser to stream the file contents as data is appended. func (f *DatastoreFile) Follow(interval time.Duration) io.ReadCloser { - return &followDatastoreFile{f, make(chan struct{}), interval} + return &followDatastoreFile{ + r: f, + c: make(chan struct{}), + i: interval, + } } diff --git a/vendor/github.com/vmware/govmomi/object/datastore_file_manager.go b/vendor/github.com/vmware/govmomi/object/datastore_file_manager.go index 7164fbbed7d..e484368060e 100644 --- a/vendor/github.com/vmware/govmomi/object/datastore_file_manager.go +++ b/vendor/github.com/vmware/govmomi/object/datastore_file_manager.go @@ -97,6 +97,25 @@ func (m *DatastoreFileManager) DeleteVirtualDisk(ctx context.Context, name strin return task.Wait(ctx) } +// Move dispatches to the appropriate Move method based on file name extension +func (m *DatastoreFileManager) Move(ctx context.Context, src string, dst string) error { + srcp := m.Path(src) + dstp := m.Path(dst) + + f := m.FileManager.MoveDatastoreFile + + if srcp.IsVMDK() { + f = m.VirtualDiskManager.MoveVirtualDisk + } + + task, err := f(ctx, srcp.String(), m.Datacenter, dstp.String(), m.Datacenter, m.Force) + if err != nil { + return err + } + + return task.Wait(ctx) +} + // Path converts path name to a DatastorePath func (m *DatastoreFileManager) Path(name string) *DatastorePath { var p DatastorePath diff --git a/vendor/github.com/vmware/govmomi/object/datastore_path.go b/vendor/github.com/vmware/govmomi/object/datastore_path.go index ea152103df5..1563ee1e11d 100644 --- a/vendor/github.com/vmware/govmomi/object/datastore_path.go +++ b/vendor/github.com/vmware/govmomi/object/datastore_path.go @@ -18,6 +18,7 @@ package object import ( "fmt" + "path" "strings" ) @@ -63,3 +64,8 @@ func (p *DatastorePath) String() string { return strings.Join([]string{s, p.Path}, " ") } + +// IsVMDK returns true if Path has a ".vmdk" extension +func (p *DatastorePath) IsVMDK() bool { + return path.Ext(p.Path) == ".vmdk" +} diff --git a/vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup.go b/vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup.go index 864bb783f38..86c9fc1c7a8 100644 --- a/vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup.go +++ b/vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup.go @@ -38,7 +38,7 @@ func NewDistributedVirtualPortgroup(c *vim25.Client, ref types.ManagedObjectRefe // EthernetCardBackingInfo returns the VirtualDeviceBackingInfo for this DistributedVirtualPortgroup func (p DistributedVirtualPortgroup) EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) { var dvp mo.DistributedVirtualPortgroup - var dvs mo.VmwareDistributedVirtualSwitch // TODO: should be mo.BaseDistributedVirtualSwitch + var dvs mo.DistributedVirtualSwitch if err := p.Properties(ctx, p.Reference(), []string{"key", "config.distributedVirtualSwitch"}, &dvp); err != nil { return nil, err diff --git a/vendor/github.com/vmware/govmomi/object/distributed_virtual_switch.go b/vendor/github.com/vmware/govmomi/object/distributed_virtual_switch.go index 29ee52d9501..7a41258799d 100644 --- a/vendor/github.com/vmware/govmomi/object/distributed_virtual_switch.go +++ b/vendor/github.com/vmware/govmomi/object/distributed_virtual_switch.go @@ -65,3 +65,15 @@ func (s DistributedVirtualSwitch) AddPortgroup(ctx context.Context, spec []types return NewTask(s.Client(), res.Returnval), nil } + +func (s DistributedVirtualSwitch) FetchDVPorts(ctx context.Context) ([]types.DistributedVirtualPort, error) { + req := &types.FetchDVPorts{ + This: s.Reference(), + } + + res, err := methods.FetchDVPorts(ctx, s.Client(), req) + if err != nil { + return nil, err + } + return res.Returnval, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/host_storage_system.go b/vendor/github.com/vmware/govmomi/object/host_storage_system.go index 2a433ff2a88..6785eecdcd3 100644 --- a/vendor/github.com/vmware/govmomi/object/host_storage_system.go +++ b/vendor/github.com/vmware/govmomi/object/host_storage_system.go @@ -88,6 +88,15 @@ func (s HostStorageSystem) RescanAllHba(ctx context.Context) error { return err } +func (s HostStorageSystem) Refresh(ctx context.Context) error { + req := types.RefreshStorageSystem{ + This: s.Reference(), + } + + _, err := methods.RefreshStorageSystem(ctx, s.c, &req) + return err +} + func (s HostStorageSystem) MarkAsSsd(ctx context.Context, uuid string) (*Task, error) { req := types.MarkAsSsd_Task{ This: s.Reference(), diff --git a/vendor/github.com/vmware/govmomi/object/host_vsan_internal_system.go b/vendor/github.com/vmware/govmomi/object/host_vsan_internal_system.go index 65e4587f6c8..1430e8a8822 100644 --- a/vendor/github.com/vmware/govmomi/object/host_vsan_internal_system.go +++ b/vendor/github.com/vmware/govmomi/object/host_vsan_internal_system.go @@ -42,7 +42,7 @@ func (m HostVsanInternalSystem) QueryVsanObjectUuidsByFilter(ctx context.Context req := types.QueryVsanObjectUuidsByFilter{ This: m.Reference(), Uuids: uuids, - Limit: limit, + Limit: &limit, Version: version, } diff --git a/vendor/github.com/vmware/govmomi/object/http_nfc_lease.go b/vendor/github.com/vmware/govmomi/object/http_nfc_lease.go deleted file mode 100644 index 3ca53558b3f..00000000000 --- a/vendor/github.com/vmware/govmomi/object/http_nfc_lease.go +++ /dev/null @@ -1,143 +0,0 @@ -/* -Copyright (c) 2015 VMware, Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "context" - "errors" - "fmt" - - "github.com/vmware/govmomi/property" - "github.com/vmware/govmomi/vim25" - "github.com/vmware/govmomi/vim25/methods" - "github.com/vmware/govmomi/vim25/mo" - "github.com/vmware/govmomi/vim25/types" -) - -type HttpNfcLease struct { - Common -} - -func NewHttpNfcLease(c *vim25.Client, ref types.ManagedObjectReference) *HttpNfcLease { - return &HttpNfcLease{ - Common: NewCommon(c, ref), - } -} - -// HttpNfcLeaseAbort wraps methods.HttpNfcLeaseAbort -func (o HttpNfcLease) HttpNfcLeaseAbort(ctx context.Context, fault *types.LocalizedMethodFault) error { - req := types.HttpNfcLeaseAbort{ - This: o.Reference(), - Fault: fault, - } - - _, err := methods.HttpNfcLeaseAbort(ctx, o.c, &req) - if err != nil { - return err - } - - return nil -} - -// HttpNfcLeaseComplete wraps methods.HttpNfcLeaseComplete -func (o HttpNfcLease) HttpNfcLeaseComplete(ctx context.Context) error { - req := types.HttpNfcLeaseComplete{ - This: o.Reference(), - } - - _, err := methods.HttpNfcLeaseComplete(ctx, o.c, &req) - if err != nil { - return err - } - - return nil -} - -// HttpNfcLeaseGetManifest wraps methods.HttpNfcLeaseGetManifest -func (o HttpNfcLease) HttpNfcLeaseGetManifest(ctx context.Context) error { - req := types.HttpNfcLeaseGetManifest{ - This: o.Reference(), - } - - _, err := methods.HttpNfcLeaseGetManifest(ctx, o.c, &req) - if err != nil { - return err - } - - return nil -} - -// HttpNfcLeaseProgress wraps methods.HttpNfcLeaseProgress -func (o HttpNfcLease) HttpNfcLeaseProgress(ctx context.Context, percent int32) error { - req := types.HttpNfcLeaseProgress{ - This: o.Reference(), - Percent: percent, - } - - _, err := methods.HttpNfcLeaseProgress(ctx, o.c, &req) - if err != nil { - return err - } - - return nil -} - -func (o HttpNfcLease) Wait(ctx context.Context) (*types.HttpNfcLeaseInfo, error) { - var lease mo.HttpNfcLease - - pc := property.DefaultCollector(o.c) - err := property.Wait(ctx, pc, o.Reference(), []string{"state", "info", "error"}, func(pc []types.PropertyChange) bool { - done := false - - for _, c := range pc { - if c.Val == nil { - continue - } - - switch c.Name { - case "error": - val := c.Val.(types.LocalizedMethodFault) - lease.Error = &val - done = true - case "info": - val := c.Val.(types.HttpNfcLeaseInfo) - lease.Info = &val - case "state": - lease.State = c.Val.(types.HttpNfcLeaseState) - if lease.State != types.HttpNfcLeaseStateInitializing { - done = true - } - } - } - - return done - }) - - if err != nil { - return nil, err - } - - if lease.State == types.HttpNfcLeaseStateReady { - return lease.Info, nil - } - - if lease.Error != nil { - return nil, errors.New(lease.Error.LocalizedMessage) - } - - return nil, fmt.Errorf("unexpected nfc lease state: %s", lease.State) -} diff --git a/vendor/github.com/vmware/govmomi/object/network.go b/vendor/github.com/vmware/govmomi/object/network.go index a76b17d91c2..d1dc7ce01f6 100644 --- a/vendor/github.com/vmware/govmomi/object/network.go +++ b/vendor/github.com/vmware/govmomi/object/network.go @@ -20,6 +20,7 @@ import ( "context" "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" ) @@ -34,12 +35,20 @@ func NewNetwork(c *vim25.Client, ref types.ManagedObjectReference) *Network { } // EthernetCardBackingInfo returns the VirtualDeviceBackingInfo for this Network -func (n Network) EthernetCardBackingInfo(_ context.Context) (types.BaseVirtualDeviceBackingInfo, error) { - name := n.Name() +func (n Network) EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) { + var e mo.Network + + // Use Network.Name rather than Common.Name as the latter does not return the complete name if it contains a '/' + // We can't use Common.ObjectName here either as we need the ManagedEntity.Name field is not set since mo.Network + // has its own Name field. + err := n.Properties(ctx, n.Reference(), []string{"name"}, &e) + if err != nil { + return nil, err + } backing := &types.VirtualEthernetCardNetworkBackingInfo{ VirtualDeviceDeviceBackingInfo: types.VirtualDeviceDeviceBackingInfo{ - DeviceName: name, + DeviceName: e.Name, }, } diff --git a/vendor/github.com/vmware/govmomi/object/ovf_manager.go b/vendor/github.com/vmware/govmomi/object/ovf_manager.go deleted file mode 100644 index 7fedf689ff3..00000000000 --- a/vendor/github.com/vmware/govmomi/object/ovf_manager.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright (c) 2015 VMware, Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "context" - - "github.com/vmware/govmomi/vim25" - "github.com/vmware/govmomi/vim25/methods" - "github.com/vmware/govmomi/vim25/types" -) - -type OvfManager struct { - Common -} - -func NewOvfManager(c *vim25.Client) *OvfManager { - o := OvfManager{ - Common: NewCommon(c, *c.ServiceContent.OvfManager), - } - - return &o -} - -// CreateDescriptor wraps methods.CreateDescriptor -func (o OvfManager) CreateDescriptor(ctx context.Context, obj Reference, cdp types.OvfCreateDescriptorParams) (*types.OvfCreateDescriptorResult, error) { - req := types.CreateDescriptor{ - This: o.Reference(), - Obj: obj.Reference(), - Cdp: cdp, - } - - res, err := methods.CreateDescriptor(ctx, o.c, &req) - if err != nil { - return nil, err - } - - return &res.Returnval, nil -} - -// CreateImportSpec wraps methods.CreateImportSpec -func (o OvfManager) CreateImportSpec(ctx context.Context, ovfDescriptor string, resourcePool Reference, datastore Reference, cisp types.OvfCreateImportSpecParams) (*types.OvfCreateImportSpecResult, error) { - req := types.CreateImportSpec{ - This: o.Reference(), - OvfDescriptor: ovfDescriptor, - ResourcePool: resourcePool.Reference(), - Datastore: datastore.Reference(), - Cisp: cisp, - } - - res, err := methods.CreateImportSpec(ctx, o.c, &req) - if err != nil { - return nil, err - } - - return &res.Returnval, nil -} - -// ParseDescriptor wraps methods.ParseDescriptor -func (o OvfManager) ParseDescriptor(ctx context.Context, ovfDescriptor string, pdp types.OvfParseDescriptorParams) (*types.OvfParseDescriptorResult, error) { - req := types.ParseDescriptor{ - This: o.Reference(), - OvfDescriptor: ovfDescriptor, - Pdp: pdp, - } - - res, err := methods.ParseDescriptor(ctx, o.c, &req) - if err != nil { - return nil, err - } - - return &res.Returnval, nil -} - -// ValidateHost wraps methods.ValidateHost -func (o OvfManager) ValidateHost(ctx context.Context, ovfDescriptor string, host Reference, vhp types.OvfValidateHostParams) (*types.OvfValidateHostResult, error) { - req := types.ValidateHost{ - This: o.Reference(), - OvfDescriptor: ovfDescriptor, - Host: host.Reference(), - Vhp: vhp, - } - - res, err := methods.ValidateHost(ctx, o.c, &req) - if err != nil { - return nil, err - } - - return &res.Returnval, nil -} diff --git a/vendor/github.com/vmware/govmomi/object/resource_pool.go b/vendor/github.com/vmware/govmomi/object/resource_pool.go index 791fd38229d..55c2e2b2fde 100644 --- a/vendor/github.com/vmware/govmomi/object/resource_pool.go +++ b/vendor/github.com/vmware/govmomi/object/resource_pool.go @@ -19,6 +19,7 @@ package object import ( "context" + "github.com/vmware/govmomi/nfc" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/methods" "github.com/vmware/govmomi/vim25/types" @@ -34,7 +35,7 @@ func NewResourcePool(c *vim25.Client, ref types.ManagedObjectReference) *Resourc } } -func (p ResourcePool) ImportVApp(ctx context.Context, spec types.BaseImportSpec, folder *Folder, host *HostSystem) (*HttpNfcLease, error) { +func (p ResourcePool) ImportVApp(ctx context.Context, spec types.BaseImportSpec, folder *Folder, host *HostSystem) (*nfc.Lease, error) { req := types.ImportVApp{ This: p.Reference(), Spec: spec, @@ -55,7 +56,7 @@ func (p ResourcePool) ImportVApp(ctx context.Context, spec types.BaseImportSpec, return nil, err } - return NewHttpNfcLease(p.c, res.Returnval), nil + return nfc.NewLease(p.c, res.Returnval), nil } func (p ResourcePool) Create(ctx context.Context, name string, spec types.ResourceConfigSpec) (*ResourcePool, error) { diff --git a/vendor/github.com/vmware/govmomi/object/task.go b/vendor/github.com/vmware/govmomi/object/task.go index 8572b4363bd..2b66aa93b7f 100644 --- a/vendor/github.com/vmware/govmomi/object/task.go +++ b/vendor/github.com/vmware/govmomi/object/task.go @@ -22,6 +22,7 @@ import ( "github.com/vmware/govmomi/property" "github.com/vmware/govmomi/task" "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" "github.com/vmware/govmomi/vim25/progress" "github.com/vmware/govmomi/vim25/types" ) @@ -51,3 +52,11 @@ func (t *Task) WaitForResult(ctx context.Context, s progress.Sinker) (*types.Tas p := property.DefaultCollector(t.c) return task.Wait(ctx, t.Reference(), p, s) } + +func (t *Task) Cancel(ctx context.Context) error { + _, err := methods.CancelTask(ctx, t.Client(), &types.CancelTask{ + This: t.Reference(), + }) + + return err +} diff --git a/vendor/github.com/vmware/govmomi/object/virtual_device_list.go b/vendor/github.com/vmware/govmomi/object/virtual_device_list.go index 24821aa6b9c..6fe08356158 100644 --- a/vendor/github.com/vmware/govmomi/object/virtual_device_list.go +++ b/vendor/github.com/vmware/govmomi/object/virtual_device_list.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2015 VMware, Inc. All Rights Reserved. +Copyright (c) 2015-2017 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -30,6 +30,7 @@ import ( // Type values for use in BootOrder const ( + DeviceTypeNone = "-" DeviceTypeCdrom = "cdrom" DeviceTypeDisk = "disk" DeviceTypeEthernet = "ethernet" @@ -754,6 +755,9 @@ func (l VirtualDeviceList) PrimaryMacAddress() string { // convert a BaseVirtualDevice to a BaseVirtualMachineBootOptionsBootableDevice var bootableDevices = map[string]func(device types.BaseVirtualDevice) types.BaseVirtualMachineBootOptionsBootableDevice{ + DeviceTypeNone: func(types.BaseVirtualDevice) types.BaseVirtualMachineBootOptionsBootableDevice { + return &types.VirtualMachineBootOptionsBootableDevice{} + }, DeviceTypeCdrom: func(types.BaseVirtualDevice) types.BaseVirtualMachineBootOptionsBootableDevice { return &types.VirtualMachineBootOptionsBootableCdromDevice{} }, @@ -773,17 +777,23 @@ var bootableDevices = map[string]func(device types.BaseVirtualDevice) types.Base } // BootOrder returns a list of devices which can be used to set boot order via VirtualMachine.SetBootOptions. -// The order can any of "ethernet", "cdrom", "floppy" or "disk" or by specific device name. +// The order can be any of "ethernet", "cdrom", "floppy" or "disk" or by specific device name. +// A value of "-" will clear the existing boot order on the VC/ESX side. func (l VirtualDeviceList) BootOrder(order []string) []types.BaseVirtualMachineBootOptionsBootableDevice { var devices []types.BaseVirtualMachineBootOptionsBootableDevice for _, name := range order { if kind, ok := bootableDevices[name]; ok { + if name == DeviceTypeNone { + // Not covered in the API docs, nor obvious, but this clears the boot order on the VC/ESX side. + devices = append(devices, new(types.VirtualMachineBootOptionsBootableDevice)) + continue + } + for _, device := range l { if l.Type(device) == name { devices = append(devices, kind(device)) } - } continue } diff --git a/vendor/github.com/vmware/govmomi/object/virtual_machine.go b/vendor/github.com/vmware/govmomi/object/virtual_machine.go index 8fd6a421b28..511f5572357 100644 --- a/vendor/github.com/vmware/govmomi/object/virtual_machine.go +++ b/vendor/github.com/vmware/govmomi/object/virtual_machine.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2015 VMware, Inc. All Rights Reserved. +Copyright (c) 2015-2017 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ import ( "net" "path" + "github.com/vmware/govmomi/nfc" "github.com/vmware/govmomi/property" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/methods" @@ -464,6 +465,20 @@ func (v VirtualMachine) Answer(ctx context.Context, id, answer string) error { return nil } +func (v VirtualMachine) AcquireTicket(ctx context.Context, kind string) (*types.VirtualMachineTicket, error) { + req := types.AcquireTicket{ + This: v.Reference(), + TicketType: kind, + } + + res, err := methods.AcquireTicket(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} + // CreateSnapshot creates a new snapshot of a virtual machine. func (v VirtualMachine) CreateSnapshot(ctx context.Context, name string, description string, memory bool, quiesce bool) (*Task, error) { req := types.CreateSnapshot_Task{ @@ -497,7 +512,7 @@ func (v VirtualMachine) RemoveAllSnapshot(ctx context.Context, consolidate *bool return NewTask(v.c, res.Returnval), nil } -type snapshotMap map[string][]Reference +type snapshotMap map[string][]types.ManagedObjectReference func (m snapshotMap) add(parent string, tree []types.VirtualMachineSnapshotTree) { for i, st := range tree { @@ -511,18 +526,18 @@ func (m snapshotMap) add(parent string, tree []types.VirtualMachineSnapshotTree) } for _, name := range names { - m[name] = append(m[name], &tree[i].Snapshot) + m[name] = append(m[name], tree[i].Snapshot) } m.add(sname, st.ChildSnapshotList) } } -// findSnapshot supports snapshot lookup by name, where name can be: +// FindSnapshot supports snapshot lookup by name, where name can be: // 1) snapshot ManagedObjectReference.Value (unique) // 2) snapshot name (may not be unique) // 3) snapshot tree path (may not be unique) -func (v VirtualMachine) findSnapshot(ctx context.Context, name string) (Reference, error) { +func (v VirtualMachine) FindSnapshot(ctx context.Context, name string) (*types.ManagedObjectReference, error) { var o mo.VirtualMachine err := v.Properties(ctx, v.Reference(), []string{"snapshot"}, &o) @@ -542,7 +557,7 @@ func (v VirtualMachine) findSnapshot(ctx context.Context, name string) (Referenc case 0: return nil, fmt.Errorf("snapshot %q not found", name) case 1: - return s[0], nil + return &s[0], nil default: return nil, fmt.Errorf("%q resolves to %d snapshots", name, len(s)) } @@ -550,7 +565,7 @@ func (v VirtualMachine) findSnapshot(ctx context.Context, name string) (Referenc // RemoveSnapshot removes a named snapshot func (v VirtualMachine) RemoveSnapshot(ctx context.Context, name string, removeChildren bool, consolidate *bool) (*Task, error) { - snapshot, err := v.findSnapshot(ctx, name) + snapshot, err := v.FindSnapshot(ctx, name) if err != nil { return nil, err } @@ -586,7 +601,7 @@ func (v VirtualMachine) RevertToCurrentSnapshot(ctx context.Context, suppressPow // RevertToSnapshot reverts to a named snapshot func (v VirtualMachine) RevertToSnapshot(ctx context.Context, name string, suppressPowerOn bool) (*Task, error) { - snapshot, err := v.findSnapshot(ctx, name) + snapshot, err := v.FindSnapshot(ctx, name) if err != nil { return nil, err } @@ -757,3 +772,30 @@ func (v VirtualMachine) UpgradeTools(ctx context.Context, options string) (*Task return NewTask(v.c, res.Returnval), nil } + +func (v VirtualMachine) Export(ctx context.Context) (*nfc.Lease, error) { + req := types.ExportVm{ + This: v.Reference(), + } + + res, err := methods.ExportVm(ctx, v.Client(), &req) + if err != nil { + return nil, err + } + + return nfc.NewLease(v.c, res.Returnval), nil +} + +func (v VirtualMachine) UpgradeVM(ctx context.Context, version string) (*Task, error) { + req := types.UpgradeVM_Task{ + This: v.Reference(), + Version: version, + } + + res, err := methods.UpgradeVM_Task(ctx, v.Client(), &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/property/collector.go b/vendor/github.com/vmware/govmomi/property/collector.go index e2059d923f9..04a9e77373f 100644 --- a/vendor/github.com/vmware/govmomi/property/collector.go +++ b/vendor/github.com/vmware/govmomi/property/collector.go @@ -30,7 +30,7 @@ import ( // Collector models the PropertyCollector managed object. // // For more information, see: -// http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.wssdk.apiref.doc/vmodl.query.PropertyCollector.html +// http://pubs.vmware.com/vsphere-60/index.jsp?topic=%2Fcom.vmware.wssdk.apiref.doc%2Fvmodl.query.PropertyCollector.html // type Collector struct { roundTripper soap.RoundTripper diff --git a/vendor/github.com/vmware/govmomi/property/filter.go b/vendor/github.com/vmware/govmomi/property/filter.go index a6fa1de9ffd..8284b0c7d82 100644 --- a/vendor/github.com/vmware/govmomi/property/filter.go +++ b/vendor/github.com/vmware/govmomi/property/filter.go @@ -18,7 +18,7 @@ package property import ( "fmt" - "path/filepath" + "path" "reflect" "strconv" "strings" @@ -103,7 +103,11 @@ func (f Filter) MatchProperty(prop types.DynamicProperty) bool { switch pval := prop.Val.(type) { case string: - m, _ := filepath.Match(match.(string), pval) + s := match.(string) + if s == "*" { + return true // TODO: path.Match fails if s contains a '/' + } + m, _ := path.Match(s, pval) return m default: return reflect.DeepEqual(match, pval) diff --git a/vendor/github.com/vmware/govmomi/property/wait.go b/vendor/github.com/vmware/govmomi/property/wait.go index 689477bfb85..fe847926ce9 100644 --- a/vendor/github.com/vmware/govmomi/property/wait.go +++ b/vendor/github.com/vmware/govmomi/property/wait.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2015 VMware, Inc. All Rights Reserved. +Copyright (c) 2015-2017 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,7 +22,50 @@ import ( "github.com/vmware/govmomi/vim25/types" ) -// Wait waits for any of the specified properties of the specified managed +// WaitFilter provides helpers to construct a types.CreateFilter for use with property.Wait +type WaitFilter struct { + types.CreateFilter +} + +// Add a new ObjectSpec and PropertySpec to the WaitFilter +func (f *WaitFilter) Add(obj types.ManagedObjectReference, kind string, ps []string, set ...types.BaseSelectionSpec) *WaitFilter { + spec := types.ObjectSpec{ + Obj: obj, + SelectSet: set, + } + + pset := types.PropertySpec{ + Type: kind, + PathSet: ps, + } + + if len(ps) == 0 { + pset.All = types.NewBool(true) + } + + f.Spec.ObjectSet = append(f.Spec.ObjectSet, spec) + + f.Spec.PropSet = append(f.Spec.PropSet, pset) + + return f +} + +// Wait creates a new WaitFilter and calls the specified function for each ObjectUpdate via WaitForUpdates +func Wait(ctx context.Context, c *Collector, obj types.ManagedObjectReference, ps []string, f func([]types.PropertyChange) bool) error { + filter := new(WaitFilter).Add(obj, obj.Type, ps) + + return WaitForUpdates(ctx, c, filter, func(updates []types.ObjectUpdate) bool { + for _, update := range updates { + if f(update.ChangeSet) { + return true + } + } + + return false + }) +} + +// WaitForUpdates waits for any of the specified properties of the specified managed // object to change. It calls the specified function for every update it // receives. If this function returns false, it continues waiting for // subsequent updates. If this function returns true, it stops waiting and @@ -35,7 +78,7 @@ import ( // The newly created collector is destroyed before this function returns (both // in case of success or error). // -func Wait(ctx context.Context, c *Collector, obj types.ManagedObjectReference, ps []string, f func([]types.PropertyChange) bool) error { +func WaitForUpdates(ctx context.Context, c *Collector, filter *WaitFilter, f func([]types.ObjectUpdate) bool) error { p, err := c.Create(ctx) if err != nil { return err @@ -45,91 +88,13 @@ func Wait(ctx context.Context, c *Collector, obj types.ManagedObjectReference, p // specified context may have timed out or have been cancelled. defer p.Destroy(context.Background()) - req := types.CreateFilter{ - Spec: types.PropertyFilterSpec{ - ObjectSet: []types.ObjectSpec{ - { - Obj: obj, - }, - }, - PropSet: []types.PropertySpec{ - { - PathSet: ps, - Type: obj.Type, - }, - }, - }, - } - - if len(ps) == 0 { - req.Spec.PropSet[0].All = types.NewBool(true) - } - - err = p.CreateFilter(ctx, req) - if err != nil { - return err - } - return waitLoop(ctx, p, func(_ types.ManagedObjectReference, pc []types.PropertyChange) bool { - return f(pc) - }) -} - -// WaitForView waits for any of the specified properties of the managed -// objects in the View to change. It calls the specified function for every update it -// receives. If this function returns false, it continues waiting for -// subsequent updates. If this function returns true, it stops waiting and -// returns. -// -// To only receive updates for the View's specified managed objects, the function -// creates a new property collector and calls CreateFilter. A new property -// collector is required because filters can only be added, not removed. -// -// The newly created collector is destroyed before this function returns (both -// in case of success or error). -// -// The code assumes that all objects in the View are the same type -func WaitForView(ctx context.Context, c *Collector, view types.ManagedObjectReference, obj types.ManagedObjectReference, ps []string, f func(types.ManagedObjectReference, []types.PropertyChange) bool) error { - p, err := c.Create(ctx) + err = p.CreateFilter(ctx, filter.CreateFilter) if err != nil { return err } - // Attempt to destroy the collector using the background context, as the - // specified context may have timed out or have been cancelled. - defer p.Destroy(context.Background()) - - req := types.CreateFilter{ - Spec: types.PropertyFilterSpec{ - ObjectSet: []types.ObjectSpec{ - { - Obj: view, - SelectSet: []types.BaseSelectionSpec{ - &types.TraversalSpec{ - SelectionSpec: types.SelectionSpec{ - Name: "traverseEntities", - }, - Path: "view", - Type: view.Type}}, - }, - }, - PropSet: []types.PropertySpec{ - { - Type: obj.Type, - PathSet: ps, - }, - }, - }} - - err = p.CreateFilter(ctx, req) - if err != nil { - return err - } - return waitLoop(ctx, p, f) -} - -func waitLoop(ctx context.Context, c *Collector, f func(types.ManagedObjectReference, []types.PropertyChange) bool) error { for version := ""; ; { - res, err := c.WaitForUpdates(ctx, version) + res, err := p.WaitForUpdates(ctx, version) if err != nil { return err } @@ -142,12 +107,9 @@ func waitLoop(ctx context.Context, c *Collector, f func(types.ManagedObjectRefer version = res.Version for _, fs := range res.FilterSet { - for _, os := range fs.ObjectSet { - if f(os.Obj, os.ChangeSet) { - return nil - } + if f(fs.ObjectSet) { + return nil } } } - } diff --git a/vendor/github.com/vmware/govmomi/session/manager.go b/vendor/github.com/vmware/govmomi/session/manager.go index b4591c1c4dc..3cbfa938419 100644 --- a/vendor/github.com/vmware/govmomi/session/manager.go +++ b/vendor/github.com/vmware/govmomi/session/manager.go @@ -199,3 +199,31 @@ func (sm *Manager) AcquireLocalTicket(ctx context.Context, userName string) (*ty return &res.Returnval, nil } + +func (sm *Manager) AcquireCloneTicket(ctx context.Context) (string, error) { + req := types.AcquireCloneTicket{ + This: sm.Reference(), + } + + res, err := methods.AcquireCloneTicket(ctx, sm.client, &req) + if err != nil { + return "", err + } + + return res.Returnval, nil +} + +func (sm *Manager) CloneSession(ctx context.Context, ticket string) error { + req := types.CloneSession{ + This: sm.Reference(), + CloneTicket: ticket, + } + + res, err := methods.CloneSession(ctx, sm.client, &req) + if err != nil { + return err + } + + sm.userSession = &res.Returnval + return nil +} diff --git a/vendor/github.com/vmware/govmomi/simulator/BUILD b/vendor/github.com/vmware/govmomi/simulator/BUILD new file mode 100644 index 00000000000..dafff93c675 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/BUILD @@ -0,0 +1,84 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "authorization_manager.go", + "cluster_compute_resource.go", + "custom_fields_manager.go", + "datacenter.go", + "datastore.go", + "doc.go", + "dvs.go", + "entity.go", + "file_manager.go", + "folder.go", + "guest_id.go", + "host_datastore_browser.go", + "host_datastore_system.go", + "host_firewall_system.go", + "host_network_system.go", + "host_system.go", + "ip_pool_manager.go", + "license_manager.go", + "model.go", + "option_manager.go", + "os_unix.go", + "performance_manager.go", + "portgroup.go", + "property_collector.go", + "property_filter.go", + "registry.go", + "resource_pool.go", + "search_index.go", + "service_instance.go", + "session_manager.go", + "simulator.go", + "snapshot.go", + "task.go", + "task_manager.go", + "user_directory.go", + "view_manager.go", + "virtual_disk_manager.go", + "virtual_machine.go", + ] + select({ + "@io_bazel_rules_go//go/platform:windows": [ + "os_windows.go", + ], + "//conditions:default": [], + }), + importpath = "github.com/vmware/govmomi/simulator", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/google/uuid:go_default_library", + "//vendor/github.com/vmware/govmomi/find:go_default_library", + "//vendor/github.com/vmware/govmomi/object:go_default_library", + "//vendor/github.com/vmware/govmomi/session:go_default_library", + "//vendor/github.com/vmware/govmomi/simulator/esx:go_default_library", + "//vendor/github.com/vmware/govmomi/simulator/vpx:go_default_library", + "//vendor/github.com/vmware/govmomi/vim25:go_default_library", + "//vendor/github.com/vmware/govmomi/vim25/methods:go_default_library", + "//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library", + "//vendor/github.com/vmware/govmomi/vim25/soap:go_default_library", + "//vendor/github.com/vmware/govmomi/vim25/types:go_default_library", + "//vendor/github.com/vmware/govmomi/vim25/xml:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/vmware/govmomi/simulator/esx:all-srcs", + "//vendor/github.com/vmware/govmomi/simulator/vpx:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/vmware/govmomi/simulator/authorization_manager.go b/vendor/github.com/vmware/govmomi/simulator/authorization_manager.go new file mode 100644 index 00000000000..b65db933e8f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/authorization_manager.go @@ -0,0 +1,257 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "strings" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/simulator/esx" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type AuthorizationManager struct { + mo.AuthorizationManager + + permissions map[types.ManagedObjectReference][]types.Permission + privileges map[string]struct{} + system []string + nextID int32 +} + +func NewAuthorizationManager(ref types.ManagedObjectReference) object.Reference { + m := &AuthorizationManager{} + m.Self = ref + m.RoleList = esx.RoleList + m.permissions = make(map[types.ManagedObjectReference][]types.Permission) + + l := object.AuthorizationRoleList(m.RoleList) + m.system = l.ByName("ReadOnly").Privilege + admin := l.ByName("Admin") + m.privileges = make(map[string]struct{}, len(admin.Privilege)) + + for _, id := range admin.Privilege { + m.privileges[id] = struct{}{} + } + + root := Map.content().RootFolder + + for _, u := range DefaultUserGroup { + m.permissions[root] = append(m.permissions[root], types.Permission{ + Entity: &root, + Principal: u.Principal, + Group: u.Group, + RoleId: admin.RoleId, + Propagate: true, + }) + } + + return m +} + +func (m *AuthorizationManager) RetrieveEntityPermissions(req *types.RetrieveEntityPermissions) soap.HasFault { + e := Map.Get(req.Entity).(mo.Entity) + + p := m.permissions[e.Reference()] + + if req.Inherited { + for { + parent := e.Entity().Parent + if parent == nil { + break + } + + e = Map.Get(parent.Reference()).(mo.Entity) + + p = append(p, m.permissions[e.Reference()]...) + } + } + + return &methods.RetrieveEntityPermissionsBody{ + Res: &types.RetrieveEntityPermissionsResponse{ + Returnval: p, + }, + } +} + +func (m *AuthorizationManager) RetrieveAllPermissions(req *types.RetrieveAllPermissions) soap.HasFault { + var p []types.Permission + + for _, v := range m.permissions { + p = append(p, v...) + } + + return &methods.RetrieveAllPermissionsBody{ + Res: &types.RetrieveAllPermissionsResponse{ + Returnval: p, + }, + } +} + +func (m *AuthorizationManager) RemoveEntityPermission(req *types.RemoveEntityPermission) soap.HasFault { + var p []types.Permission + + for _, v := range m.permissions[req.Entity] { + if v.Group == req.IsGroup && v.Principal == req.User { + continue + } + p = append(p, v) + } + + m.permissions[req.Entity] = p + + return &methods.RemoveEntityPermissionBody{ + Res: &types.RemoveEntityPermissionResponse{}, + } +} + +func (m *AuthorizationManager) SetEntityPermissions(req *types.SetEntityPermissions) soap.HasFault { + m.permissions[req.Entity] = req.Permission + + return &methods.SetEntityPermissionsBody{ + Res: &types.SetEntityPermissionsResponse{}, + } +} + +func (m *AuthorizationManager) RetrieveRolePermissions(req *types.RetrieveRolePermissions) soap.HasFault { + var p []types.Permission + + for _, set := range m.permissions { + for _, v := range set { + if v.RoleId == req.RoleId { + p = append(p, v) + } + } + } + + return &methods.RetrieveRolePermissionsBody{ + Res: &types.RetrieveRolePermissionsResponse{ + Returnval: p, + }, + } +} + +func (m *AuthorizationManager) AddAuthorizationRole(req *types.AddAuthorizationRole) soap.HasFault { + body := &methods.AddAuthorizationRoleBody{} + + for _, role := range m.RoleList { + if role.Name == req.Name { + body.Fault_ = Fault("", &types.AlreadyExists{}) + return body + } + } + + ids, err := m.privIDs(req.PrivIds) + if err != nil { + body.Fault_ = err + return body + } + + m.RoleList = append(m.RoleList, types.AuthorizationRole{ + Info: &types.Description{ + Label: req.Name, + Summary: req.Name, + }, + RoleId: m.nextID, + Privilege: ids, + Name: req.Name, + System: false, + }) + + m.nextID++ + + body.Res = &types.AddAuthorizationRoleResponse{} + + return body +} + +func (m *AuthorizationManager) UpdateAuthorizationRole(req *types.UpdateAuthorizationRole) soap.HasFault { + body := &methods.UpdateAuthorizationRoleBody{} + + for _, role := range m.RoleList { + if role.Name == req.NewName && role.RoleId != req.RoleId { + body.Fault_ = Fault("", &types.AlreadyExists{}) + return body + } + } + + for i, role := range m.RoleList { + if role.RoleId == req.RoleId { + if len(req.PrivIds) != 0 { + ids, err := m.privIDs(req.PrivIds) + if err != nil { + body.Fault_ = err + return body + } + m.RoleList[i].Privilege = ids + } + + m.RoleList[i].Name = req.NewName + + body.Res = &types.UpdateAuthorizationRoleResponse{} + return body + } + } + + body.Fault_ = Fault("", &types.NotFound{}) + + return body +} + +func (m *AuthorizationManager) RemoveAuthorizationRole(req *types.RemoveAuthorizationRole) soap.HasFault { + body := &methods.RemoveAuthorizationRoleBody{} + + for i, role := range m.RoleList { + if role.RoleId == req.RoleId { + m.RoleList = append(m.RoleList[:i], m.RoleList[i+1:]...) + + body.Res = &types.RemoveAuthorizationRoleResponse{} + return body + } + } + + body.Fault_ = Fault("", &types.NotFound{}) + + return body +} + +func (m *AuthorizationManager) privIDs(ids []string) ([]string, *soap.Fault) { + system := make(map[string]struct{}, len(m.system)) + + for _, id := range ids { + if _, ok := m.privileges[id]; !ok { + return nil, Fault("", &types.InvalidArgument{InvalidProperty: "privIds"}) + } + + if strings.HasPrefix(id, "System.") { + system[id] = struct{}{} + } + } + + for _, id := range m.system { + if _, ok := system[id]; ok { + continue + } + + ids = append(ids, id) + } + + return ids, nil +} diff --git a/vendor/github.com/vmware/govmomi/simulator/cluster_compute_resource.go b/vendor/github.com/vmware/govmomi/simulator/cluster_compute_resource.go new file mode 100644 index 00000000000..fe51ae3e7c2 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/cluster_compute_resource.go @@ -0,0 +1,98 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "github.com/vmware/govmomi/simulator/esx" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type ClusterComputeResource struct { + mo.ClusterComputeResource +} + +type addHost struct { + *ClusterComputeResource + + req *types.AddHost_Task +} + +func (add *addHost) Run(task *Task) (types.AnyType, types.BaseMethodFault) { + spec := add.req.Spec + + if spec.HostName == "" { + return nil, &types.NoHost{} + } + + host := NewHostSystem(esx.HostSystem) + host.Summary.Config.Name = spec.HostName + host.Name = host.Summary.Config.Name + host.Runtime.ConnectionState = types.HostSystemConnectionStateDisconnected + + cr := add.ClusterComputeResource + Map.PutEntity(cr, Map.NewEntity(host)) + + cr.Host = append(cr.Host, host.Reference()) + + if add.req.AsConnected { + host.Runtime.ConnectionState = types.HostSystemConnectionStateConnected + } + + addComputeResource(add.ClusterComputeResource.Summary.GetComputeResourceSummary(), host) + + return host.Reference(), nil +} + +func (c *ClusterComputeResource) AddHostTask(add *types.AddHost_Task) soap.HasFault { + return &methods.AddHost_TaskBody{ + Res: &types.AddHost_TaskResponse{ + Returnval: NewTask(&addHost{c, add}).Run(), + }, + } +} + +func CreateClusterComputeResource(f *Folder, name string, spec types.ClusterConfigSpecEx) (*ClusterComputeResource, types.BaseMethodFault) { + if e := Map.FindByName(name, f.ChildEntity); e != nil { + return nil, &types.DuplicateName{ + Name: e.Entity().Name, + Object: e.Reference(), + } + } + + cluster := &ClusterComputeResource{} + cluster.Name = name + cluster.Summary = &types.ClusterComputeResourceSummary{ + UsageSummary: new(types.ClusterUsageSummary), + } + + config := &types.ClusterConfigInfoEx{} + cluster.ConfigurationEx = config + + config.DrsConfig.Enabled = types.NewBool(true) + + pool := NewResourcePool() + Map.PutEntity(cluster, Map.NewEntity(pool)) + cluster.ResourcePool = &pool.Self + + f.putChild(cluster) + pool.Owner = cluster.Self + + return cluster, nil +} diff --git a/vendor/github.com/vmware/govmomi/simulator/custom_fields_manager.go b/vendor/github.com/vmware/govmomi/simulator/custom_fields_manager.go new file mode 100644 index 00000000000..3ae5ef0302f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/custom_fields_manager.go @@ -0,0 +1,111 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type CustomFieldsManager struct { + mo.CustomFieldsManager + + nextKey int32 +} + +func NewCustomFieldsManager(ref types.ManagedObjectReference) object.Reference { + m := &CustomFieldsManager{} + m.Self = ref + return m +} + +func (c *CustomFieldsManager) find(key int32) (int, *types.CustomFieldDef) { + for i, field := range c.Field { + if field.Key == key { + return i, &c.Field[i] + } + } + + return -1, nil +} + +func (c *CustomFieldsManager) AddCustomFieldDef(req *types.AddCustomFieldDef) soap.HasFault { + body := &methods.AddCustomFieldDefBody{} + + def := types.CustomFieldDef{ + Key: c.nextKey, + Name: req.Name, + ManagedObjectType: req.MoType, + Type: req.MoType, + FieldDefPrivileges: req.FieldDefPolicy, + FieldInstancePrivileges: req.FieldPolicy, + } + + c.Field = append(c.Field, def) + c.nextKey++ + + body.Res = &types.AddCustomFieldDefResponse{ + Returnval: def, + } + return body +} + +func (c *CustomFieldsManager) RemoveCustomFieldDef(req *types.RemoveCustomFieldDef) soap.HasFault { + body := &methods.RemoveCustomFieldDefBody{} + + i, field := c.find(req.Key) + if field == nil { + body.Fault_ = Fault("", &types.NotFound{}) + return body + } + + c.Field = append(c.Field[:i], c.Field[i+1:]...) + + body.Res = &types.RemoveCustomFieldDefResponse{} + return body +} + +func (c *CustomFieldsManager) RenameCustomFieldDef(req *types.RenameCustomFieldDef) soap.HasFault { + body := &methods.RenameCustomFieldDefBody{} + + _, field := c.find(req.Key) + if field == nil { + body.Fault_ = Fault("", &types.NotFound{}) + return body + } + + field.Name = req.Name + + body.Res = &types.RenameCustomFieldDefResponse{} + return body +} + +func (c *CustomFieldsManager) SetField(req *types.SetField) soap.HasFault { + body := &methods.SetFieldBody{} + + entity := Map.Get(req.Entity).(mo.Entity).Entity() + entity.CustomValue = append(entity.CustomValue, &types.CustomFieldStringValue{ + CustomFieldValue: types.CustomFieldValue{Key: req.Key}, + Value: req.Value, + }) + + body.Res = &types.SetFieldResponse{} + return body +} diff --git a/vendor/github.com/vmware/govmomi/simulator/datacenter.go b/vendor/github.com/vmware/govmomi/simulator/datacenter.go new file mode 100644 index 00000000000..15b5d432424 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/datacenter.go @@ -0,0 +1,76 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "strings" + + "github.com/vmware/govmomi/simulator/esx" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +// Create Datacenter Folders. +// Every Datacenter has 4 inventory Folders: Vm, Host, Datastore and Network. +// The ESX folder child types are limited to 1 type. +// The VC folders have additional child types, including nested folders. +func createDatacenterFolders(dc *mo.Datacenter, isVC bool) { + folders := []struct { + ref *types.ManagedObjectReference + name string + types []string + }{ + {&dc.VmFolder, "vm", []string{"VirtualMachine", "VirtualApp", "Folder"}}, + {&dc.HostFolder, "host", []string{"ComputeResource", "Folder"}}, + {&dc.DatastoreFolder, "datastore", []string{"Datastore", "StoragePod", "Folder"}}, + {&dc.NetworkFolder, "network", []string{"Network", "DistributedVirtualSwitch", "Folder"}}, + } + + for _, f := range folders { + folder := &Folder{} + folder.Name = f.name + + if isVC { + folder.ChildType = f.types + e := Map.PutEntity(dc, folder) + + // propagate the generated morefs to Datacenter + ref := e.Reference() + f.ref.Type = ref.Type + f.ref.Value = ref.Value + } else { + folder.ChildType = f.types[:1] + folder.Self = *f.ref + Map.PutEntity(dc, folder) + } + } + + net := Map.Get(dc.NetworkFolder).(*Folder) + + for _, ref := range esx.Datacenter.Network { + // Add VM Network by default to each Datacenter + network := &mo.Network{} + network.Self = ref + network.Name = strings.Split(ref.Value, "-")[1] + network.Entity().Name = network.Name + if isVC { + network.Self.Value = "" // we want a different moid per-DC + } + + net.putChild(network) + } +} diff --git a/vendor/github.com/vmware/govmomi/simulator/datastore.go b/vendor/github.com/vmware/govmomi/simulator/datastore.go new file mode 100644 index 00000000000..527f85dec17 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/datastore.go @@ -0,0 +1,59 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "time" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type Datastore struct { + mo.Datastore +} + +func parseDatastorePath(dsPath string) (*object.DatastorePath, types.BaseMethodFault) { + var p object.DatastorePath + + if p.FromString(dsPath) { + return &p, nil + } + + return nil, &types.InvalidDatastorePath{DatastorePath: dsPath} +} + +func (ds *Datastore) RefreshDatastore(*types.RefreshDatastore) soap.HasFault { + r := &methods.RefreshDatastoreBody{} + + err := ds.stat() + if err != nil { + r.Fault_ = Fault(err.Error(), &types.HostConfigFault{}) + return r + } + + info := ds.Info.GetDatastoreInfo() + + now := time.Now() + + info.Timestamp = &now + + return r +} diff --git a/vendor/github.com/vmware/govmomi/simulator/doc.go b/vendor/github.com/vmware/govmomi/simulator/doc.go new file mode 100644 index 00000000000..441e9a0e7f4 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/doc.go @@ -0,0 +1,22 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package simulator is a mock framework for the vSphere API. + +See also: https://github.com/vmware/govmomi/blob/master/vcsim/README.md +*/ +package simulator diff --git a/vendor/github.com/vmware/govmomi/simulator/dvs.go b/vendor/github.com/vmware/govmomi/simulator/dvs.go new file mode 100644 index 00000000000..e60b5601092 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/dvs.go @@ -0,0 +1,187 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type DistributedVirtualSwitch struct { + mo.DistributedVirtualSwitch +} + +func (s *DistributedVirtualSwitch) AddDVPortgroupTask(c *types.AddDVPortgroup_Task) soap.HasFault { + task := CreateTask(s, "addDVPortgroup", func(t *Task) (types.AnyType, types.BaseMethodFault) { + f := Map.getEntityParent(s, "Folder").(*Folder) + + for _, spec := range c.Spec { + pg := &DistributedVirtualPortgroup{} + pg.Name = spec.Name + pg.Entity().Name = pg.Name + + if obj := Map.FindByName(pg.Name, f.ChildEntity); obj != nil { + return nil, &types.DuplicateName{ + Name: pg.Name, + Object: obj.Reference(), + } + } + + f.putChild(pg) + + pg.Key = pg.Self.Value + pg.Config = types.DVPortgroupConfigInfo{ + Key: pg.Key, + Name: pg.Name, + NumPorts: spec.NumPorts, + DistributedVirtualSwitch: &s.Self, + DefaultPortConfig: spec.DefaultPortConfig, + Description: spec.Description, + Type: spec.Type, + Policy: spec.Policy, + PortNameFormat: spec.PortNameFormat, + Scope: spec.Scope, + VendorSpecificConfig: spec.VendorSpecificConfig, + ConfigVersion: spec.ConfigVersion, + AutoExpand: spec.AutoExpand, + VmVnicNetworkResourcePoolKey: spec.VmVnicNetworkResourcePoolKey, + } + + s.Portgroup = append(s.Portgroup, pg.Self) + s.Summary.PortgroupName = append(s.Summary.PortgroupName, pg.Name) + + for _, h := range s.Summary.HostMember { + pg.Host = AddReference(h, pg.Host) + host := Map.Get(h).(*HostSystem) + host.Network = append(host.Network, pg.Reference()) + } + } + + return nil, nil + }) + + return &methods.AddDVPortgroup_TaskBody{ + Res: &types.AddDVPortgroup_TaskResponse{ + Returnval: task.Run(), + }, + } +} + +func (s *DistributedVirtualSwitch) ReconfigureDvsTask(req *types.ReconfigureDvs_Task) soap.HasFault { + task := CreateTask(s, "reconfigureDvs", func(t *Task) (types.AnyType, types.BaseMethodFault) { + spec := req.Spec.GetDVSConfigSpec() + + for _, member := range spec.Host { + h := Map.Get(member.Host) + if h == nil { + return nil, &types.ManagedObjectNotFound{Obj: member.Host} + } + + host := h.(*HostSystem) + + switch types.ConfigSpecOperation(member.Operation) { + case types.ConfigSpecOperationAdd: + if FindReference(host.Network, s.Self) != nil { + return nil, &types.AlreadyExists{Name: host.Name} + } + + host.Network = append(host.Network, s.Self) + host.Network = append(host.Network, s.Portgroup...) + s.Summary.HostMember = append(s.Summary.HostMember, member.Host) + + for _, ref := range s.Portgroup { + pg := Map.Get(ref).(*DistributedVirtualPortgroup) + pg.Host = AddReference(member.Host, pg.Host) + } + case types.ConfigSpecOperationRemove: + if pg := FindReference(host.Network, s.Portgroup...); pg != nil { + return nil, &types.ResourceInUse{ + Type: pg.Type, + Name: pg.Value, + } + } + + host.Network = RemoveReference(s.Self, host.Network) + s.Summary.HostMember = RemoveReference(s.Self, s.Summary.HostMember) + case types.ConfigSpecOperationEdit: + return nil, &types.NotSupported{} + } + } + + return nil, nil + }) + + return &methods.ReconfigureDvs_TaskBody{ + Res: &types.ReconfigureDvs_TaskResponse{ + Returnval: task.Run(), + }, + } +} + +func (s *DistributedVirtualSwitch) FetchDVPorts(req *types.FetchDVPorts) soap.HasFault { + body := &methods.FetchDVPortsBody{} + body.Res = &types.FetchDVPortsResponse{ + Returnval: s.dvPortgroups(req.Criteria), + } + return body +} + +func (s *DistributedVirtualSwitch) DestroyTask(req *types.Destroy_Task) soap.HasFault { + task := CreateTask(s, "destroy", func(t *Task) (types.AnyType, types.BaseMethodFault) { + f := Map.getEntityParent(s, "Folder").(*Folder) + f.removeChild(s.Reference()) + return nil, nil + }) + + return &methods.Destroy_TaskBody{ + Res: &types.Destroy_TaskResponse{ + Returnval: task.Run(), + }, + } +} + +func (s *DistributedVirtualSwitch) dvPortgroups(_ *types.DistributedVirtualSwitchPortCriteria) []types.DistributedVirtualPort { + // TODO(agui): Filter is not implemented yet + var res []types.DistributedVirtualPort + for _, ref := range s.Portgroup { + pg := Map.Get(ref).(*DistributedVirtualPortgroup) + res = append(res, types.DistributedVirtualPort{ + DvsUuid: s.Uuid, + Key: pg.Key, + Config: types.DVPortConfigInfo{ + Setting: pg.Config.DefaultPortConfig, + }, + }) + + if pg.PortKeys == nil { + continue + } + + for _, key := range pg.PortKeys { + res = append(res, types.DistributedVirtualPort{ + DvsUuid: s.Uuid, + Key: key, + Config: types.DVPortConfigInfo{ + Setting: pg.Config.DefaultPortConfig, + }, + }) + } + } + return res +} diff --git a/vendor/github.com/vmware/govmomi/simulator/entity.go b/vendor/github.com/vmware/govmomi/simulator/entity.go new file mode 100644 index 00000000000..8266c0235ec --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/entity.go @@ -0,0 +1,46 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +func RenameTask(e mo.Entity, r *types.Rename_Task) soap.HasFault { + task := CreateTask(e, "rename", func(t *Task) (types.AnyType, types.BaseMethodFault) { + obj := Map.Get(r.This).(mo.Entity).Entity() + + if parent, ok := Map.Get(*obj.Parent).(*Folder); ok { + if Map.FindByName(r.NewName, parent.ChildEntity) != nil { + return nil, &types.InvalidArgument{InvalidProperty: "name"} + } + } + + obj.Name = r.NewName + + return nil, nil + }) + + return &methods.Rename_TaskBody{ + Res: &types.Rename_TaskResponse{ + Returnval: task.Run(), + }, + } +} diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/BUILD b/vendor/github.com/vmware/govmomi/simulator/esx/BUILD new file mode 100644 index 00000000000..acfb87ca798 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/esx/BUILD @@ -0,0 +1,41 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "authorization_manager.go", + "datacenter.go", + "doc.go", + "host_config_info.go", + "host_firewall_system.go", + "host_hardware_info.go", + "host_storage_device_info.go", + "host_system.go", + "performance_manager.go", + "resource_pool.go", + "root_folder.go", + "service_content.go", + "setting.go", + "virtual_device.go", + ], + importpath = "github.com/vmware/govmomi/simulator/esx", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library", + "//vendor/github.com/vmware/govmomi/vim25/types:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/authorization_manager.go b/vendor/github.com/vmware/govmomi/simulator/esx/authorization_manager.go new file mode 100644 index 00000000000..e72c06c851f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/esx/authorization_manager.go @@ -0,0 +1,85 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package esx + +import "github.com/vmware/govmomi/vim25/types" + +// RoleList is the default template for the AuthorizationManager roleList property. +// Capture method: +// govc object.collect -s -dump AuthorizationManager:ha-authmgr roleList +var RoleList = []types.AuthorizationRole{ + { + RoleId: -6, + System: true, + Name: "NoCryptoAdmin", + Info: &types.Description{ + Label: "No cryptography administrator", + Summary: "Full access without Cryptographic operations privileges", + }, + Privilege: nil, + }, + { + RoleId: -5, + System: true, + Name: "NoAccess", + Info: &types.Description{ + Label: "No access", + Summary: "Used for restricting granted access", + }, + Privilege: nil, + }, + { + RoleId: -4, + System: true, + Name: "Anonymous", + Info: &types.Description{ + Label: "Anonymous", + Summary: "Not logged-in user (cannot be granted)", + }, + Privilege: []string{"System.Anonymous"}, + }, + { + RoleId: -3, + System: true, + Name: "View", + Info: &types.Description{ + Label: "View", + Summary: "Visibility access (cannot be granted)", + }, + Privilege: []string{"System.Anonymous", "System.View"}, + }, + { + RoleId: -2, + System: true, + Name: "ReadOnly", + Info: &types.Description{ + Label: "Read-only", + Summary: "See details of objects, but not make changes", + }, + Privilege: []string{"System.Anonymous", "System.Read", "System.View"}, + }, + { + RoleId: -1, + System: true, + Name: "Admin", + Info: &types.Description{ + Label: "Administrator", + Summary: "Full access rights", + }, + Privilege: []string{"Alarm.Acknowledge", "Alarm.Create", "Alarm.Delete", "Alarm.DisableActions", "Alarm.Edit", "Alarm.SetStatus", "Authorization.ModifyPermissions", "Authorization.ModifyRoles", "Authorization.ReassignRolePermissions", "Certificate.Manage", "Cryptographer.Access", "Cryptographer.AddDisk", "Cryptographer.Clone", "Cryptographer.Decrypt", "Cryptographer.Encrypt", "Cryptographer.EncryptNew", "Cryptographer.ManageEncryptionPolicy", "Cryptographer.ManageKeyServers", "Cryptographer.ManageKeys", "Cryptographer.Migrate", "Cryptographer.Recrypt", "Cryptographer.RegisterHost", "Cryptographer.RegisterVM", "DVPortgroup.Create", "DVPortgroup.Delete", "DVPortgroup.Modify", "DVPortgroup.PolicyOp", "DVPortgroup.ScopeOp", "DVSwitch.Create", "DVSwitch.Delete", "DVSwitch.HostOp", "DVSwitch.Modify", "DVSwitch.Move", "DVSwitch.PolicyOp", "DVSwitch.PortConfig", "DVSwitch.PortSetting", "DVSwitch.ResourceManagement", "DVSwitch.Vspan", "Datacenter.Create", "Datacenter.Delete", "Datacenter.IpPoolConfig", "Datacenter.IpPoolQueryAllocations", "Datacenter.IpPoolReleaseIp", "Datacenter.Move", "Datacenter.Reconfigure", "Datacenter.Rename", "Datastore.AllocateSpace", "Datastore.Browse", "Datastore.Config", "Datastore.Delete", "Datastore.DeleteFile", "Datastore.FileManagement", "Datastore.Move", "Datastore.Rename", "Datastore.UpdateVirtualMachineFiles", "Datastore.UpdateVirtualMachineMetadata", "EAM.Config", "EAM.Modify", "EAM.View", "Extension.Register", "Extension.Unregister", "Extension.Update", "ExternalStatsProvider.Register", "ExternalStatsProvider.Unregister", "ExternalStatsProvider.Update", "Folder.Create", "Folder.Delete", "Folder.Move", "Folder.Rename", "Global.CancelTask", "Global.CapacityPlanning", "Global.Diagnostics", "Global.DisableMethods", "Global.EnableMethods", "Global.GlobalTag", "Global.Health", "Global.Licenses", "Global.LogEvent", "Global.ManageCustomFields", "Global.Proxy", "Global.ScriptAction", "Global.ServiceManagers", "Global.SetCustomField", "Global.Settings", "Global.SystemTag", "Global.VCServer", "HealthUpdateProvider.Register", "HealthUpdateProvider.Unregister", "HealthUpdateProvider.Update", "Host.Cim.CimInteraction", "Host.Config.AdvancedConfig", "Host.Config.AuthenticationStore", "Host.Config.AutoStart", "Host.Config.Connection", "Host.Config.DateTime", "Host.Config.Firmware", "Host.Config.HyperThreading", "Host.Config.Image", "Host.Config.Maintenance", "Host.Config.Memory", "Host.Config.NetService", "Host.Config.Network", "Host.Config.Patch", "Host.Config.PciPassthru", "Host.Config.Power", "Host.Config.Quarantine", "Host.Config.Resources", "Host.Config.Settings", "Host.Config.Snmp", "Host.Config.Storage", "Host.Config.SystemManagement", "Host.Hbr.HbrManagement", "Host.Inventory.AddHostToCluster", "Host.Inventory.AddStandaloneHost", "Host.Inventory.CreateCluster", "Host.Inventory.DeleteCluster", "Host.Inventory.EditCluster", "Host.Inventory.MoveCluster", "Host.Inventory.MoveHost", "Host.Inventory.RemoveHostFromCluster", "Host.Inventory.RenameCluster", "Host.Local.CreateVM", "Host.Local.DeleteVM", "Host.Local.InstallAgent", "Host.Local.ManageUserGroups", "Host.Local.ReconfigVM", "Network.Assign", "Network.Config", "Network.Delete", "Network.Move", "Performance.ModifyIntervals", "Profile.Clear", "Profile.Create", "Profile.Delete", "Profile.Edit", "Profile.Export", "Profile.View", "Resource.ApplyRecommendation", "Resource.AssignVAppToPool", "Resource.AssignVMToPool", "Resource.ColdMigrate", "Resource.CreatePool", "Resource.DeletePool", "Resource.EditPool", "Resource.HotMigrate", "Resource.MovePool", "Resource.QueryVMotion", "Resource.RenamePool", "ScheduledTask.Create", "ScheduledTask.Delete", "ScheduledTask.Edit", "ScheduledTask.Run", "Sessions.GlobalMessage", "Sessions.ImpersonateUser", "Sessions.TerminateSession", "Sessions.ValidateSession", "StoragePod.Config", "System.Anonymous", "System.Read", "System.View", "Task.Create", "Task.Update", "VApp.ApplicationConfig", "VApp.AssignResourcePool", "VApp.AssignVApp", "VApp.AssignVM", "VApp.Clone", "VApp.Create", "VApp.Delete", "VApp.Export", "VApp.ExtractOvfEnvironment", "VApp.Import", "VApp.InstanceConfig", "VApp.ManagedByConfig", "VApp.Move", "VApp.PowerOff", "VApp.PowerOn", "VApp.Rename", "VApp.ResourceConfig", "VApp.Suspend", "VApp.Unregister", "VRMPolicy.Query", "VRMPolicy.Update", "VirtualMachine.Config.AddExistingDisk", "VirtualMachine.Config.AddNewDisk", "VirtualMachine.Config.AddRemoveDevice", "VirtualMachine.Config.AdvancedConfig", "VirtualMachine.Config.Annotation", "VirtualMachine.Config.CPUCount", "VirtualMachine.Config.ChangeTracking", "VirtualMachine.Config.DiskExtend", "VirtualMachine.Config.DiskLease", "VirtualMachine.Config.EditDevice", "VirtualMachine.Config.HostUSBDevice", "VirtualMachine.Config.ManagedBy", "VirtualMachine.Config.Memory", "VirtualMachine.Config.MksControl", "VirtualMachine.Config.QueryFTCompatibility", "VirtualMachine.Config.QueryUnownedFiles", "VirtualMachine.Config.RawDevice", "VirtualMachine.Config.ReloadFromPath", "VirtualMachine.Config.RemoveDisk", "VirtualMachine.Config.Rename", "VirtualMachine.Config.ResetGuestInfo", "VirtualMachine.Config.Resource", "VirtualMachine.Config.Settings", "VirtualMachine.Config.SwapPlacement", "VirtualMachine.Config.ToggleForkParent", "VirtualMachine.Config.Unlock", "VirtualMachine.Config.UpgradeVirtualHardware", "VirtualMachine.GuestOperations.Execute", "VirtualMachine.GuestOperations.Modify", "VirtualMachine.GuestOperations.ModifyAliases", "VirtualMachine.GuestOperations.Query", "VirtualMachine.GuestOperations.QueryAliases", "VirtualMachine.Hbr.ConfigureReplication", "VirtualMachine.Hbr.MonitorReplication", "VirtualMachine.Hbr.ReplicaManagement", "VirtualMachine.Interact.AnswerQuestion", "VirtualMachine.Interact.Backup", "VirtualMachine.Interact.ConsoleInteract", "VirtualMachine.Interact.CreateScreenshot", "VirtualMachine.Interact.CreateSecondary", "VirtualMachine.Interact.DefragmentAllDisks", "VirtualMachine.Interact.DeviceConnection", "VirtualMachine.Interact.DisableSecondary", "VirtualMachine.Interact.DnD", "VirtualMachine.Interact.EnableSecondary", "VirtualMachine.Interact.GuestControl", "VirtualMachine.Interact.MakePrimary", "VirtualMachine.Interact.Pause", "VirtualMachine.Interact.PowerOff", "VirtualMachine.Interact.PowerOn", "VirtualMachine.Interact.PutUsbScanCodes", "VirtualMachine.Interact.Record", "VirtualMachine.Interact.Replay", "VirtualMachine.Interact.Reset", "VirtualMachine.Interact.SESparseMaintenance", "VirtualMachine.Interact.SetCDMedia", "VirtualMachine.Interact.SetFloppyMedia", "VirtualMachine.Interact.Suspend", "VirtualMachine.Interact.TerminateFaultTolerantVM", "VirtualMachine.Interact.ToolsInstall", "VirtualMachine.Interact.TurnOffFaultTolerance", "VirtualMachine.Inventory.Create", "VirtualMachine.Inventory.CreateFromExisting", "VirtualMachine.Inventory.Delete", "VirtualMachine.Inventory.Move", "VirtualMachine.Inventory.Register", "VirtualMachine.Inventory.Unregister", "VirtualMachine.Namespace.Event", "VirtualMachine.Namespace.EventNotify", "VirtualMachine.Namespace.Management", "VirtualMachine.Namespace.ModifyContent", "VirtualMachine.Namespace.Query", "VirtualMachine.Namespace.ReadContent", "VirtualMachine.Provisioning.Clone", "VirtualMachine.Provisioning.CloneTemplate", "VirtualMachine.Provisioning.CreateTemplateFromVM", "VirtualMachine.Provisioning.Customize", "VirtualMachine.Provisioning.DeployTemplate", "VirtualMachine.Provisioning.DiskRandomAccess", "VirtualMachine.Provisioning.DiskRandomRead", "VirtualMachine.Provisioning.FileRandomAccess", "VirtualMachine.Provisioning.GetVmFiles", "VirtualMachine.Provisioning.MarkAsTemplate", "VirtualMachine.Provisioning.MarkAsVM", "VirtualMachine.Provisioning.ModifyCustSpecs", "VirtualMachine.Provisioning.PromoteDisks", "VirtualMachine.Provisioning.PutVmFiles", "VirtualMachine.Provisioning.ReadCustSpecs", "VirtualMachine.State.CreateSnapshot", "VirtualMachine.State.RemoveSnapshot", "VirtualMachine.State.RenameSnapshot", "VirtualMachine.State.RevertToSnapshot"}, + }, +} diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/datacenter.go b/vendor/github.com/vmware/govmomi/simulator/esx/datacenter.go new file mode 100644 index 00000000000..c0f95eff9c9 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/esx/datacenter.go @@ -0,0 +1,60 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package esx + +import ( + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +// Datacenter is the default template for Datacenter properties. +// Capture method: +// govc datacenter.info -dump +var Datacenter = mo.Datacenter{ + ManagedEntity: mo.ManagedEntity{ + ExtensibleManagedObject: mo.ExtensibleManagedObject{ + Self: types.ManagedObjectReference{Type: "Datacenter", Value: "ha-datacenter"}, + Value: nil, + AvailableField: nil, + }, + Parent: (*types.ManagedObjectReference)(nil), + CustomValue: nil, + OverallStatus: "", + ConfigStatus: "", + ConfigIssue: nil, + EffectiveRole: nil, + Permission: nil, + Name: "ha-datacenter", + DisabledMethod: nil, + RecentTask: nil, + DeclaredAlarmState: nil, + TriggeredAlarmState: nil, + AlarmActionsEnabled: (*bool)(nil), + Tag: nil, + }, + VmFolder: types.ManagedObjectReference{Type: "Folder", Value: "ha-folder-vm"}, + HostFolder: types.ManagedObjectReference{Type: "Folder", Value: "ha-folder-host"}, + DatastoreFolder: types.ManagedObjectReference{Type: "Folder", Value: "ha-folder-datastore"}, + NetworkFolder: types.ManagedObjectReference{Type: "Folder", Value: "ha-folder-network"}, + Datastore: []types.ManagedObjectReference{ + {Type: "Datastore", Value: "57089c25-85e3ccd4-17b6-000c29d0beb3"}, + }, + Network: []types.ManagedObjectReference{ + {Type: "Network", Value: "HaNetwork-VM Network"}, + }, + Configuration: types.DatacenterConfigInfo{}, +} diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/doc.go b/vendor/github.com/vmware/govmomi/simulator/esx/doc.go new file mode 100644 index 00000000000..50b6202fa65 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/esx/doc.go @@ -0,0 +1,20 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package esx contains SOAP responses from an ESX server, captured using `govc ... -dump`. +*/ +package esx diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/host_config_info.go b/vendor/github.com/vmware/govmomi/simulator/esx/host_config_info.go new file mode 100644 index 00000000000..3c8295a6c44 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/esx/host_config_info.go @@ -0,0 +1,1091 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package esx + +import "github.com/vmware/govmomi/vim25/types" + +// HostConfigInfo is the default template for the HostSystem config property. +// Capture method: +// govc object.collect -s -dump HostSystem:ha-host config +var HostConfigInfo = types.HostConfigInfo{ + Host: types.ManagedObjectReference{Type: "HostSystem", Value: "ha-host"}, + Product: types.AboutInfo{ + Name: "VMware ESXi", + FullName: "VMware ESXi 6.5.0 build-5969303", + Vendor: "VMware, Inc.", + Version: "6.5.0", + Build: "5969303", + LocaleVersion: "INTL", + LocaleBuild: "000", + OsType: "vmnix-x86", + ProductLineId: "embeddedEsx", + ApiType: "HostAgent", + ApiVersion: "6.5", + InstanceUuid: "", + LicenseProductName: "VMware ESX Server", + LicenseProductVersion: "6.0", + }, + DeploymentInfo: &types.HostDeploymentInfo{ + BootedFromStatelessCache: types.NewBool(false), + }, + HyperThread: &types.HostHyperThreadScheduleInfo{ + Available: false, + Active: false, + Config: true, + }, + ConsoleReservation: (*types.ServiceConsoleReservationInfo)(nil), + VirtualMachineReservation: (*types.VirtualMachineMemoryReservationInfo)(nil), + StorageDevice: &HostStorageDeviceInfo, + SystemFile: nil, + Network: &types.HostNetworkInfo{ + Vswitch: []types.HostVirtualSwitch{ + { + Name: "vSwitch0", + Key: "key-vim.host.VirtualSwitch-vSwitch0", + NumPorts: 1536, + NumPortsAvailable: 1530, + Mtu: 1500, + Portgroup: []string{"key-vim.host.PortGroup-VM Network", "key-vim.host.PortGroup-Management Network"}, + Pnic: []string{"key-vim.host.PhysicalNic-vmnic0"}, + Spec: types.HostVirtualSwitchSpec{ + NumPorts: 128, + Bridge: &types.HostVirtualSwitchBondBridge{ + HostVirtualSwitchBridge: types.HostVirtualSwitchBridge{}, + NicDevice: []string{"vmnic0"}, + Beacon: &types.HostVirtualSwitchBeaconConfig{ + Interval: 1, + }, + LinkDiscoveryProtocolConfig: &types.LinkDiscoveryProtocolConfig{ + Protocol: "cdp", + Operation: "listen", + }, + }, + Policy: &types.HostNetworkPolicy{ + Security: &types.HostNetworkSecurityPolicy{ + AllowPromiscuous: types.NewBool(false), + MacChanges: types.NewBool(true), + ForgedTransmits: types.NewBool(true), + }, + NicTeaming: &types.HostNicTeamingPolicy{ + Policy: "loadbalance_srcid", + ReversePolicy: types.NewBool(true), + NotifySwitches: types.NewBool(true), + RollingOrder: types.NewBool(false), + FailureCriteria: &types.HostNicFailureCriteria{ + CheckSpeed: "minimum", + Speed: 10, + CheckDuplex: types.NewBool(false), + FullDuplex: types.NewBool(false), + CheckErrorPercent: types.NewBool(false), + Percentage: 0, + CheckBeacon: types.NewBool(false), + }, + NicOrder: &types.HostNicOrderPolicy{ + ActiveNic: []string{"vmnic0"}, + StandbyNic: nil, + }, + }, + OffloadPolicy: &types.HostNetOffloadCapabilities{ + CsumOffload: types.NewBool(true), + TcpSegmentation: types.NewBool(true), + ZeroCopyXmit: types.NewBool(true), + }, + ShapingPolicy: &types.HostNetworkTrafficShapingPolicy{ + Enabled: types.NewBool(false), + AverageBandwidth: 0, + PeakBandwidth: 0, + BurstSize: 0, + }, + }, + Mtu: 0, + }, + }, + }, + ProxySwitch: nil, + Portgroup: []types.HostPortGroup{ + { + Key: "key-vim.host.PortGroup-VM Network", + Port: nil, + Vswitch: "key-vim.host.VirtualSwitch-vSwitch0", + ComputedPolicy: types.HostNetworkPolicy{ + Security: &types.HostNetworkSecurityPolicy{ + AllowPromiscuous: types.NewBool(false), + MacChanges: types.NewBool(true), + ForgedTransmits: types.NewBool(true), + }, + NicTeaming: &types.HostNicTeamingPolicy{ + Policy: "loadbalance_srcid", + ReversePolicy: types.NewBool(true), + NotifySwitches: types.NewBool(true), + RollingOrder: types.NewBool(false), + FailureCriteria: &types.HostNicFailureCriteria{ + CheckSpeed: "minimum", + Speed: 10, + CheckDuplex: types.NewBool(false), + FullDuplex: types.NewBool(false), + CheckErrorPercent: types.NewBool(false), + Percentage: 0, + CheckBeacon: types.NewBool(false), + }, + NicOrder: &types.HostNicOrderPolicy{ + ActiveNic: []string{"vmnic0"}, + StandbyNic: nil, + }, + }, + OffloadPolicy: &types.HostNetOffloadCapabilities{ + CsumOffload: types.NewBool(true), + TcpSegmentation: types.NewBool(true), + ZeroCopyXmit: types.NewBool(true), + }, + ShapingPolicy: &types.HostNetworkTrafficShapingPolicy{ + Enabled: types.NewBool(false), + AverageBandwidth: 0, + PeakBandwidth: 0, + BurstSize: 0, + }, + }, + Spec: types.HostPortGroupSpec{ + Name: "VM Network", + VlanId: 0, + VswitchName: "vSwitch0", + Policy: types.HostNetworkPolicy{ + Security: &types.HostNetworkSecurityPolicy{}, + NicTeaming: &types.HostNicTeamingPolicy{ + Policy: "", + ReversePolicy: (*bool)(nil), + NotifySwitches: (*bool)(nil), + RollingOrder: (*bool)(nil), + FailureCriteria: &types.HostNicFailureCriteria{}, + NicOrder: (*types.HostNicOrderPolicy)(nil), + }, + OffloadPolicy: &types.HostNetOffloadCapabilities{}, + ShapingPolicy: &types.HostNetworkTrafficShapingPolicy{}, + }, + }, + }, + { + Key: "key-vim.host.PortGroup-Management Network", + Port: []types.HostPortGroupPort{ + { + Key: "key-vim.host.PortGroup.Port-33554436", + Mac: []string{"00:0c:29:81:d8:a0"}, + Type: "host", + }, + }, + Vswitch: "key-vim.host.VirtualSwitch-vSwitch0", + ComputedPolicy: types.HostNetworkPolicy{ + Security: &types.HostNetworkSecurityPolicy{ + AllowPromiscuous: types.NewBool(false), + MacChanges: types.NewBool(true), + ForgedTransmits: types.NewBool(true), + }, + NicTeaming: &types.HostNicTeamingPolicy{ + Policy: "loadbalance_srcid", + ReversePolicy: types.NewBool(true), + NotifySwitches: types.NewBool(true), + RollingOrder: types.NewBool(false), + FailureCriteria: &types.HostNicFailureCriteria{ + CheckSpeed: "minimum", + Speed: 10, + CheckDuplex: types.NewBool(false), + FullDuplex: types.NewBool(false), + CheckErrorPercent: types.NewBool(false), + Percentage: 0, + CheckBeacon: types.NewBool(false), + }, + NicOrder: &types.HostNicOrderPolicy{ + ActiveNic: []string{"vmnic0"}, + StandbyNic: nil, + }, + }, + OffloadPolicy: &types.HostNetOffloadCapabilities{ + CsumOffload: types.NewBool(true), + TcpSegmentation: types.NewBool(true), + ZeroCopyXmit: types.NewBool(true), + }, + ShapingPolicy: &types.HostNetworkTrafficShapingPolicy{ + Enabled: types.NewBool(false), + AverageBandwidth: 0, + PeakBandwidth: 0, + BurstSize: 0, + }, + }, + Spec: types.HostPortGroupSpec{ + Name: "Management Network", + VlanId: 0, + VswitchName: "vSwitch0", + Policy: types.HostNetworkPolicy{ + Security: &types.HostNetworkSecurityPolicy{}, + NicTeaming: &types.HostNicTeamingPolicy{ + Policy: "loadbalance_srcid", + ReversePolicy: (*bool)(nil), + NotifySwitches: types.NewBool(true), + RollingOrder: types.NewBool(false), + FailureCriteria: &types.HostNicFailureCriteria{ + CheckSpeed: "", + Speed: 0, + CheckDuplex: (*bool)(nil), + FullDuplex: (*bool)(nil), + CheckErrorPercent: (*bool)(nil), + Percentage: 0, + CheckBeacon: types.NewBool(false), + }, + NicOrder: &types.HostNicOrderPolicy{ + ActiveNic: []string{"vmnic0"}, + StandbyNic: nil, + }, + }, + OffloadPolicy: &types.HostNetOffloadCapabilities{}, + ShapingPolicy: &types.HostNetworkTrafficShapingPolicy{}, + }, + }, + }, + }, + Pnic: []types.PhysicalNic{ + { + Key: "key-vim.host.PhysicalNic-vmnic0", + Device: "vmnic0", + Pci: "0000:0b:00.0", + Driver: "nvmxnet3", + LinkSpeed: &types.PhysicalNicLinkInfo{ + SpeedMb: 10000, + Duplex: true, + }, + ValidLinkSpecification: []types.PhysicalNicLinkInfo{ + { + SpeedMb: 10000, + Duplex: true, + }, + }, + Spec: types.PhysicalNicSpec{ + Ip: &types.HostIpConfig{}, + LinkSpeed: &types.PhysicalNicLinkInfo{ + SpeedMb: 10000, + Duplex: true, + }, + }, + WakeOnLanSupported: false, + Mac: "00:0c:29:81:d8:a0", + FcoeConfiguration: &types.FcoeConfig{ + PriorityClass: 3, + SourceMac: "00:0c:29:81:d8:a0", + VlanRange: []types.FcoeConfigVlanRange{ + {}, + }, + Capabilities: types.FcoeConfigFcoeCapabilities{ + PriorityClass: false, + SourceMacAddress: false, + VlanRange: true, + }, + FcoeActive: false, + }, + VmDirectPathGen2Supported: types.NewBool(false), + VmDirectPathGen2SupportedMode: "", + ResourcePoolSchedulerAllowed: types.NewBool(true), + ResourcePoolSchedulerDisallowedReason: nil, + AutoNegotiateSupported: types.NewBool(false), + }, + { + Key: "key-vim.host.PhysicalNic-vmnic1", + Device: "vmnic1", + Pci: "0000:13:00.0", + Driver: "nvmxnet3", + LinkSpeed: &types.PhysicalNicLinkInfo{ + SpeedMb: 10000, + Duplex: true, + }, + ValidLinkSpecification: []types.PhysicalNicLinkInfo{ + { + SpeedMb: 10000, + Duplex: true, + }, + }, + Spec: types.PhysicalNicSpec{ + Ip: &types.HostIpConfig{}, + LinkSpeed: &types.PhysicalNicLinkInfo{ + SpeedMb: 10000, + Duplex: true, + }, + }, + WakeOnLanSupported: false, + Mac: "00:0c:29:81:d8:aa", + FcoeConfiguration: &types.FcoeConfig{ + PriorityClass: 3, + SourceMac: "00:0c:29:81:d8:aa", + VlanRange: []types.FcoeConfigVlanRange{ + {}, + }, + Capabilities: types.FcoeConfigFcoeCapabilities{ + PriorityClass: false, + SourceMacAddress: false, + VlanRange: true, + }, + FcoeActive: false, + }, + VmDirectPathGen2Supported: types.NewBool(false), + VmDirectPathGen2SupportedMode: "", + ResourcePoolSchedulerAllowed: types.NewBool(true), + ResourcePoolSchedulerDisallowedReason: nil, + AutoNegotiateSupported: types.NewBool(false), + }, + }, + Vnic: []types.HostVirtualNic{ + { + Device: "vmk0", + Key: "key-vim.host.VirtualNic-vmk0", + Portgroup: "Management Network", + Spec: types.HostVirtualNicSpec{ + Ip: &types.HostIpConfig{ + Dhcp: true, + IpAddress: "127.0.0.1", + SubnetMask: "255.0.0.0", + IpV6Config: (*types.HostIpConfigIpV6AddressConfiguration)(nil), + }, + Mac: "00:0c:29:81:d8:a0", + DistributedVirtualPort: (*types.DistributedVirtualSwitchPortConnection)(nil), + Portgroup: "Management Network", + Mtu: 1500, + TsoEnabled: types.NewBool(true), + NetStackInstanceKey: "defaultTcpipStack", + OpaqueNetwork: (*types.HostVirtualNicOpaqueNetworkSpec)(nil), + ExternalId: "", + PinnedPnic: "", + IpRouteSpec: (*types.HostVirtualNicIpRouteSpec)(nil), + }, + Port: "key-vim.host.PortGroup.Port-33554436", + }, + }, + ConsoleVnic: nil, + DnsConfig: &types.HostDnsConfig{ + Dhcp: true, + VirtualNicDevice: "vmk0", + HostName: "localhost", + DomainName: "localdomain", + Address: []string{"8.8.8.8"}, + SearchDomain: []string{"localdomain"}, + }, + IpRouteConfig: &types.HostIpRouteConfig{ + DefaultGateway: "127.0.0.1", + GatewayDevice: "", + IpV6DefaultGateway: "", + IpV6GatewayDevice: "", + }, + ConsoleIpRouteConfig: nil, + RouteTableInfo: &types.HostIpRouteTableInfo{ + IpRoute: []types.HostIpRouteEntry{ + { + Network: "0.0.0.0", + PrefixLength: 0, + Gateway: "127.0.0.1", + DeviceName: "vmk0", + }, + { + Network: "127.0.0.0", + PrefixLength: 8, + Gateway: "0.0.0.0", + DeviceName: "vmk0", + }, + }, + Ipv6Route: nil, + }, + Dhcp: nil, + Nat: nil, + IpV6Enabled: types.NewBool(false), + AtBootIpV6Enabled: types.NewBool(false), + NetStackInstance: []types.HostNetStackInstance{ + { + Key: "vSphereProvisioning", + Name: "", + DnsConfig: &types.HostDnsConfig{}, + IpRouteConfig: &types.HostIpRouteConfig{}, + RequestedMaxNumberOfConnections: 11000, + CongestionControlAlgorithm: "newreno", + IpV6Enabled: types.NewBool(true), + RouteTableConfig: (*types.HostIpRouteTableConfig)(nil), + }, + { + Key: "vmotion", + Name: "", + DnsConfig: &types.HostDnsConfig{}, + IpRouteConfig: &types.HostIpRouteConfig{}, + RequestedMaxNumberOfConnections: 11000, + CongestionControlAlgorithm: "newreno", + IpV6Enabled: types.NewBool(true), + RouteTableConfig: (*types.HostIpRouteTableConfig)(nil), + }, + { + Key: "defaultTcpipStack", + Name: "defaultTcpipStack", + DnsConfig: &types.HostDnsConfig{ + Dhcp: true, + VirtualNicDevice: "vmk0", + HostName: "localhost", + DomainName: "localdomain", + Address: []string{"8.8.8.8"}, + SearchDomain: []string{"localdomain"}, + }, + IpRouteConfig: &types.HostIpRouteConfig{ + DefaultGateway: "127.0.0.1", + GatewayDevice: "", + IpV6DefaultGateway: "", + IpV6GatewayDevice: "", + }, + RequestedMaxNumberOfConnections: 11000, + CongestionControlAlgorithm: "newreno", + IpV6Enabled: types.NewBool(true), + RouteTableConfig: &types.HostIpRouteTableConfig{ + IpRoute: []types.HostIpRouteOp{ + { + ChangeOperation: "ignore", + Route: types.HostIpRouteEntry{ + Network: "0.0.0.0", + PrefixLength: 0, + Gateway: "127.0.0.1", + DeviceName: "vmk0", + }, + }, + { + ChangeOperation: "ignore", + Route: types.HostIpRouteEntry{ + Network: "127.0.0.0", + PrefixLength: 8, + Gateway: "0.0.0.0", + DeviceName: "vmk0", + }, + }, + }, + Ipv6Route: nil, + }, + }, + }, + OpaqueSwitch: nil, + OpaqueNetwork: nil, + }, + Vmotion: &types.HostVMotionInfo{ + NetConfig: &types.HostVMotionNetConfig{ + CandidateVnic: []types.HostVirtualNic{ + { + Device: "vmk0", + Key: "VMotionConfig.vmotion.key-vim.host.VirtualNic-vmk0", + Portgroup: "Management Network", + Spec: types.HostVirtualNicSpec{ + Ip: &types.HostIpConfig{ + Dhcp: true, + IpAddress: "127.0.0.1", + SubnetMask: "255.0.0.0", + IpV6Config: (*types.HostIpConfigIpV6AddressConfiguration)(nil), + }, + Mac: "00:0c:29:81:d8:a0", + DistributedVirtualPort: (*types.DistributedVirtualSwitchPortConnection)(nil), + Portgroup: "Management Network", + Mtu: 1500, + TsoEnabled: types.NewBool(true), + NetStackInstanceKey: "defaultTcpipStack", + OpaqueNetwork: (*types.HostVirtualNicOpaqueNetworkSpec)(nil), + ExternalId: "", + PinnedPnic: "", + IpRouteSpec: (*types.HostVirtualNicIpRouteSpec)(nil), + }, + Port: "", + }, + }, + SelectedVnic: "", + }, + IpConfig: (*types.HostIpConfig)(nil), + }, + VirtualNicManagerInfo: &types.HostVirtualNicManagerInfo{ + NetConfig: []types.VirtualNicManagerNetConfig{ + { + NicType: "faultToleranceLogging", + MultiSelectAllowed: true, + CandidateVnic: []types.HostVirtualNic{ + { + Device: "vmk0", + Key: "faultToleranceLogging.key-vim.host.VirtualNic-vmk0", + Portgroup: "Management Network", + Spec: types.HostVirtualNicSpec{ + Ip: &types.HostIpConfig{ + Dhcp: true, + IpAddress: "127.0.0.1", + SubnetMask: "255.0.0.0", + IpV6Config: (*types.HostIpConfigIpV6AddressConfiguration)(nil), + }, + Mac: "00:0c:29:81:d8:a0", + DistributedVirtualPort: (*types.DistributedVirtualSwitchPortConnection)(nil), + Portgroup: "Management Network", + Mtu: 1500, + TsoEnabled: types.NewBool(true), + NetStackInstanceKey: "defaultTcpipStack", + OpaqueNetwork: (*types.HostVirtualNicOpaqueNetworkSpec)(nil), + ExternalId: "", + PinnedPnic: "", + IpRouteSpec: (*types.HostVirtualNicIpRouteSpec)(nil), + }, + Port: "", + }, + }, + SelectedVnic: nil, + }, + { + NicType: "management", + MultiSelectAllowed: true, + CandidateVnic: []types.HostVirtualNic{ + { + Device: "vmk0", + Key: "management.key-vim.host.VirtualNic-vmk0", + Portgroup: "Management Network", + Spec: types.HostVirtualNicSpec{ + Ip: &types.HostIpConfig{ + Dhcp: true, + IpAddress: "127.0.0.1", + SubnetMask: "255.0.0.0", + IpV6Config: (*types.HostIpConfigIpV6AddressConfiguration)(nil), + }, + Mac: "00:0c:29:81:d8:a0", + DistributedVirtualPort: (*types.DistributedVirtualSwitchPortConnection)(nil), + Portgroup: "Management Network", + Mtu: 1500, + TsoEnabled: types.NewBool(true), + NetStackInstanceKey: "defaultTcpipStack", + OpaqueNetwork: (*types.HostVirtualNicOpaqueNetworkSpec)(nil), + ExternalId: "", + PinnedPnic: "", + IpRouteSpec: (*types.HostVirtualNicIpRouteSpec)(nil), + }, + Port: "", + }, + }, + SelectedVnic: []string{"management.key-vim.host.VirtualNic-vmk0"}, + }, + { + NicType: "vSphereProvisioning", + MultiSelectAllowed: true, + CandidateVnic: []types.HostVirtualNic{ + { + Device: "vmk0", + Key: "vSphereProvisioning.key-vim.host.VirtualNic-vmk0", + Portgroup: "Management Network", + Spec: types.HostVirtualNicSpec{ + Ip: &types.HostIpConfig{ + Dhcp: true, + IpAddress: "127.0.0.1", + SubnetMask: "255.0.0.0", + IpV6Config: (*types.HostIpConfigIpV6AddressConfiguration)(nil), + }, + Mac: "00:0c:29:81:d8:a0", + DistributedVirtualPort: (*types.DistributedVirtualSwitchPortConnection)(nil), + Portgroup: "Management Network", + Mtu: 1500, + TsoEnabled: types.NewBool(true), + NetStackInstanceKey: "defaultTcpipStack", + OpaqueNetwork: (*types.HostVirtualNicOpaqueNetworkSpec)(nil), + ExternalId: "", + PinnedPnic: "", + IpRouteSpec: (*types.HostVirtualNicIpRouteSpec)(nil), + }, + Port: "", + }, + }, + SelectedVnic: nil, + }, + { + NicType: "vSphereReplication", + MultiSelectAllowed: true, + CandidateVnic: []types.HostVirtualNic{ + { + Device: "vmk0", + Key: "vSphereReplication.key-vim.host.VirtualNic-vmk0", + Portgroup: "Management Network", + Spec: types.HostVirtualNicSpec{ + Ip: &types.HostIpConfig{ + Dhcp: true, + IpAddress: "127.0.0.1", + SubnetMask: "255.0.0.0", + IpV6Config: (*types.HostIpConfigIpV6AddressConfiguration)(nil), + }, + Mac: "00:0c:29:81:d8:a0", + DistributedVirtualPort: (*types.DistributedVirtualSwitchPortConnection)(nil), + Portgroup: "Management Network", + Mtu: 1500, + TsoEnabled: types.NewBool(true), + NetStackInstanceKey: "defaultTcpipStack", + OpaqueNetwork: (*types.HostVirtualNicOpaqueNetworkSpec)(nil), + ExternalId: "", + PinnedPnic: "", + IpRouteSpec: (*types.HostVirtualNicIpRouteSpec)(nil), + }, + Port: "", + }, + }, + SelectedVnic: nil, + }, + { + NicType: "vSphereReplicationNFC", + MultiSelectAllowed: true, + CandidateVnic: []types.HostVirtualNic{ + { + Device: "vmk0", + Key: "vSphereReplicationNFC.key-vim.host.VirtualNic-vmk0", + Portgroup: "Management Network", + Spec: types.HostVirtualNicSpec{ + Ip: &types.HostIpConfig{ + Dhcp: true, + IpAddress: "127.0.0.1", + SubnetMask: "255.0.0.0", + IpV6Config: (*types.HostIpConfigIpV6AddressConfiguration)(nil), + }, + Mac: "00:0c:29:81:d8:a0", + DistributedVirtualPort: (*types.DistributedVirtualSwitchPortConnection)(nil), + Portgroup: "Management Network", + Mtu: 1500, + TsoEnabled: types.NewBool(true), + NetStackInstanceKey: "defaultTcpipStack", + OpaqueNetwork: (*types.HostVirtualNicOpaqueNetworkSpec)(nil), + ExternalId: "", + PinnedPnic: "", + IpRouteSpec: (*types.HostVirtualNicIpRouteSpec)(nil), + }, + Port: "", + }, + }, + SelectedVnic: nil, + }, + { + NicType: "vmotion", + MultiSelectAllowed: true, + CandidateVnic: []types.HostVirtualNic{ + { + Device: "vmk0", + Key: "vmotion.key-vim.host.VirtualNic-vmk0", + Portgroup: "Management Network", + Spec: types.HostVirtualNicSpec{ + Ip: &types.HostIpConfig{ + Dhcp: true, + IpAddress: "127.0.0.1", + SubnetMask: "255.0.0.0", + IpV6Config: (*types.HostIpConfigIpV6AddressConfiguration)(nil), + }, + Mac: "00:0c:29:81:d8:a0", + DistributedVirtualPort: (*types.DistributedVirtualSwitchPortConnection)(nil), + Portgroup: "Management Network", + Mtu: 1500, + TsoEnabled: types.NewBool(true), + NetStackInstanceKey: "defaultTcpipStack", + OpaqueNetwork: (*types.HostVirtualNicOpaqueNetworkSpec)(nil), + ExternalId: "", + PinnedPnic: "", + IpRouteSpec: (*types.HostVirtualNicIpRouteSpec)(nil), + }, + Port: "", + }, + }, + SelectedVnic: nil, + }, + { + NicType: "vsan", + MultiSelectAllowed: true, + CandidateVnic: []types.HostVirtualNic{ + { + Device: "vmk0", + Key: "vsan.key-vim.host.VirtualNic-vmk0", + Portgroup: "Management Network", + Spec: types.HostVirtualNicSpec{ + Ip: &types.HostIpConfig{ + Dhcp: true, + IpAddress: "127.0.0.1", + SubnetMask: "255.0.0.0", + IpV6Config: (*types.HostIpConfigIpV6AddressConfiguration)(nil), + }, + Mac: "00:0c:29:81:d8:a0", + DistributedVirtualPort: (*types.DistributedVirtualSwitchPortConnection)(nil), + Portgroup: "Management Network", + Mtu: 1500, + TsoEnabled: types.NewBool(true), + NetStackInstanceKey: "defaultTcpipStack", + OpaqueNetwork: (*types.HostVirtualNicOpaqueNetworkSpec)(nil), + ExternalId: "", + PinnedPnic: "", + IpRouteSpec: (*types.HostVirtualNicIpRouteSpec)(nil), + }, + Port: "", + }, + }, + SelectedVnic: nil, + }, + { + NicType: "vsanWitness", + MultiSelectAllowed: true, + CandidateVnic: []types.HostVirtualNic{ + { + Device: "vmk0", + Key: "vsanWitness.key-vim.host.VirtualNic-vmk0", + Portgroup: "Management Network", + Spec: types.HostVirtualNicSpec{ + Ip: &types.HostIpConfig{ + Dhcp: true, + IpAddress: "127.0.0.1", + SubnetMask: "255.0.0.0", + IpV6Config: (*types.HostIpConfigIpV6AddressConfiguration)(nil), + }, + Mac: "00:0c:29:81:d8:a0", + DistributedVirtualPort: (*types.DistributedVirtualSwitchPortConnection)(nil), + Portgroup: "Management Network", + Mtu: 1500, + TsoEnabled: types.NewBool(true), + NetStackInstanceKey: "defaultTcpipStack", + OpaqueNetwork: (*types.HostVirtualNicOpaqueNetworkSpec)(nil), + ExternalId: "", + PinnedPnic: "", + IpRouteSpec: (*types.HostVirtualNicIpRouteSpec)(nil), + }, + Port: "", + }, + }, + SelectedVnic: nil, + }, + }, + }, + Capabilities: &types.HostNetCapabilities{ + CanSetPhysicalNicLinkSpeed: true, + SupportsNicTeaming: true, + NicTeamingPolicy: []string{"loadbalance_ip", "loadbalance_srcmac", "loadbalance_srcid", "failover_explicit"}, + SupportsVlan: true, + UsesServiceConsoleNic: false, + SupportsNetworkHints: true, + MaxPortGroupsPerVswitch: 0, + VswitchConfigSupported: true, + VnicConfigSupported: true, + IpRouteConfigSupported: true, + DnsConfigSupported: true, + DhcpOnVnicSupported: true, + IpV6Supported: types.NewBool(true), + }, + DatastoreCapabilities: &types.HostDatastoreSystemCapabilities{ + NfsMountCreationRequired: true, + NfsMountCreationSupported: true, + LocalDatastoreSupported: false, + VmfsExtentExpansionSupported: types.NewBool(true), + }, + OffloadCapabilities: &types.HostNetOffloadCapabilities{ + CsumOffload: types.NewBool(true), + TcpSegmentation: types.NewBool(true), + ZeroCopyXmit: types.NewBool(true), + }, + Service: &types.HostServiceInfo{ + Service: []types.HostService{ + { + Key: "DCUI", + Label: "Direct Console UI", + Required: false, + Uninstallable: false, + Running: true, + Ruleset: nil, + Policy: "on", + SourcePackage: &types.HostServiceSourcePackage{ + SourcePackageName: "esx-base", + Description: "This VIB contains all of the base functionality of vSphere ESXi.", + }, + }, + { + Key: "TSM", + Label: "ESXi Shell", + Required: false, + Uninstallable: false, + Running: false, + Ruleset: nil, + Policy: "off", + SourcePackage: &types.HostServiceSourcePackage{ + SourcePackageName: "esx-base", + Description: "This VIB contains all of the base functionality of vSphere ESXi.", + }, + }, + { + Key: "TSM-SSH", + Label: "SSH", + Required: false, + Uninstallable: false, + Running: false, + Ruleset: nil, + Policy: "off", + SourcePackage: &types.HostServiceSourcePackage{ + SourcePackageName: "esx-base", + Description: "This VIB contains all of the base functionality of vSphere ESXi.", + }, + }, + { + Key: "lbtd", + Label: "Load-Based Teaming Daemon", + Required: false, + Uninstallable: false, + Running: true, + Ruleset: nil, + Policy: "on", + SourcePackage: &types.HostServiceSourcePackage{ + SourcePackageName: "esx-base", + Description: "This VIB contains all of the base functionality of vSphere ESXi.", + }, + }, + { + Key: "lwsmd", + Label: "Active Directory Service", + Required: false, + Uninstallable: false, + Running: false, + Ruleset: nil, + Policy: "off", + SourcePackage: &types.HostServiceSourcePackage{ + SourcePackageName: "esx-base", + Description: "This VIB contains all of the base functionality of vSphere ESXi.", + }, + }, + { + Key: "ntpd", + Label: "NTP Daemon", + Required: false, + Uninstallable: false, + Running: false, + Ruleset: []string{"ntpClient"}, + Policy: "off", + SourcePackage: &types.HostServiceSourcePackage{ + SourcePackageName: "esx-base", + Description: "This VIB contains all of the base functionality of vSphere ESXi.", + }, + }, + { + Key: "pcscd", + Label: "PC/SC Smart Card Daemon", + Required: false, + Uninstallable: false, + Running: false, + Ruleset: nil, + Policy: "off", + SourcePackage: &types.HostServiceSourcePackage{ + SourcePackageName: "esx-base", + Description: "This VIB contains all of the base functionality of vSphere ESXi.", + }, + }, + { + Key: "sfcbd-watchdog", + Label: "CIM Server", + Required: false, + Uninstallable: false, + Running: false, + Ruleset: []string{"CIMHttpServer", "CIMHttpsServer"}, + Policy: "on", + SourcePackage: &types.HostServiceSourcePackage{ + SourcePackageName: "esx-base", + Description: "This VIB contains all of the base functionality of vSphere ESXi.", + }, + }, + { + Key: "snmpd", + Label: "SNMP Server", + Required: false, + Uninstallable: false, + Running: false, + Ruleset: []string{"snmp"}, + Policy: "on", + SourcePackage: &types.HostServiceSourcePackage{ + SourcePackageName: "esx-base", + Description: "This VIB contains all of the base functionality of vSphere ESXi.", + }, + }, + { + Key: "vmsyslogd", + Label: "Syslog Server", + Required: true, + Uninstallable: false, + Running: true, + Ruleset: nil, + Policy: "on", + SourcePackage: &types.HostServiceSourcePackage{ + SourcePackageName: "esx-base", + Description: "This VIB contains all of the base functionality of vSphere ESXi.", + }, + }, + { + Key: "vpxa", + Label: "VMware vCenter Agent", + Required: false, + Uninstallable: false, + Running: false, + Ruleset: []string{"vpxHeartbeats"}, + Policy: "on", + SourcePackage: &types.HostServiceSourcePackage{ + SourcePackageName: "esx-base", + Description: "This VIB contains all of the base functionality of vSphere ESXi.", + }, + }, + { + Key: "xorg", + Label: "X.Org Server", + Required: false, + Uninstallable: false, + Running: false, + Ruleset: nil, + Policy: "on", + SourcePackage: &types.HostServiceSourcePackage{ + SourcePackageName: "esx-xserver", + Description: "This VIB contains X Server used for virtual machine 3D hardware acceleration.", + }, + }, + }, + }, + Firewall: &HostFirewallInfo, + AutoStart: &types.HostAutoStartManagerConfig{ + Defaults: &types.AutoStartDefaults{ + Enabled: (*bool)(nil), + StartDelay: 120, + StopDelay: 120, + WaitForHeartbeat: types.NewBool(false), + StopAction: "PowerOff", + }, + PowerInfo: nil, + }, + ActiveDiagnosticPartition: &types.HostDiagnosticPartition{ + StorageType: "directAttached", + DiagnosticType: "singleHost", + Slots: -15, + Id: types.HostScsiDiskPartition{ + DiskName: "mpx.vmhba0:C0:T0:L0", + Partition: 9, + }, + }, + Option: nil, + OptionDef: nil, + Flags: &types.HostFlagInfo{}, + AdminDisabled: (*bool)(nil), + LockdownMode: "lockdownDisabled", + Ipmi: (*types.HostIpmiInfo)(nil), + SslThumbprintInfo: (*types.HostSslThumbprintInfo)(nil), + SslThumbprintData: nil, + Certificate: []uint8{0x31, 0x30}, + PciPassthruInfo: nil, + AuthenticationManagerInfo: &types.HostAuthenticationManagerInfo{ + AuthConfig: []types.BaseHostAuthenticationStoreInfo{ + &types.HostLocalAuthenticationInfo{ + HostAuthenticationStoreInfo: types.HostAuthenticationStoreInfo{ + Enabled: true, + }, + }, + &types.HostActiveDirectoryInfo{ + HostDirectoryStoreInfo: types.HostDirectoryStoreInfo{}, + JoinedDomain: "", + TrustedDomain: nil, + DomainMembershipStatus: "", + SmartCardAuthenticationEnabled: types.NewBool(false), + }, + }, + }, + FeatureVersion: nil, + PowerSystemCapability: &types.PowerSystemCapability{ + AvailablePolicy: []types.HostPowerPolicy{ + { + Key: 1, + Name: "PowerPolicy.static.name", + ShortName: "static", + Description: "PowerPolicy.static.description", + }, + { + Key: 2, + Name: "PowerPolicy.dynamic.name", + ShortName: "dynamic", + Description: "PowerPolicy.dynamic.description", + }, + { + Key: 3, + Name: "PowerPolicy.low.name", + ShortName: "low", + Description: "PowerPolicy.low.description", + }, + { + Key: 4, + Name: "PowerPolicy.custom.name", + ShortName: "custom", + Description: "PowerPolicy.custom.description", + }, + }, + }, + PowerSystemInfo: &types.PowerSystemInfo{ + CurrentPolicy: types.HostPowerPolicy{ + Key: 2, + Name: "PowerPolicy.dynamic.name", + ShortName: "dynamic", + Description: "PowerPolicy.dynamic.description", + }, + }, + CacheConfigurationInfo: []types.HostCacheConfigurationInfo{ + { + Key: types.ManagedObjectReference{Type: "Datastore", Value: "5980f676-21a5db76-9eef-000c2981d8a0"}, + SwapSize: 0, + }, + }, + WakeOnLanCapable: types.NewBool(false), + FeatureCapability: nil, + MaskedFeatureCapability: nil, + VFlashConfigInfo: nil, + VsanHostConfig: &types.VsanHostConfigInfo{ + Enabled: types.NewBool(false), + HostSystem: &types.ManagedObjectReference{Type: "HostSystem", Value: "ha-host"}, + ClusterInfo: &types.VsanHostConfigInfoClusterInfo{}, + StorageInfo: &types.VsanHostConfigInfoStorageInfo{ + AutoClaimStorage: types.NewBool(false), + DiskMapping: nil, + DiskMapInfo: nil, + ChecksumEnabled: (*bool)(nil), + }, + NetworkInfo: &types.VsanHostConfigInfoNetworkInfo{}, + FaultDomainInfo: &types.VsanHostFaultDomainInfo{}, + }, + DomainList: nil, + ScriptCheckSum: nil, + HostConfigCheckSum: nil, + GraphicsInfo: nil, + SharedPassthruGpuTypes: nil, + GraphicsConfig: &types.HostGraphicsConfig{ + HostDefaultGraphicsType: "shared", + SharedPassthruAssignmentPolicy: "performance", + DeviceType: nil, + }, + IoFilterInfo: []types.HostIoFilterInfo{ + { + IoFilterInfo: types.IoFilterInfo{ + Id: "VMW_spm_1.0.0", + Name: "spm", + Vendor: "VMW", + Version: "1.0.230", + Type: "datastoreIoControl", + Summary: "VMware Storage I/O Control", + ReleaseDate: "2016-07-21", + }, + Available: true, + }, + { + IoFilterInfo: types.IoFilterInfo{ + Id: "VMW_vmwarevmcrypt_1.0.0", + Name: "vmwarevmcrypt", + Vendor: "VMW", + Version: "1.0.0", + Type: "encryption", + Summary: "VMcrypt IO Filter", + ReleaseDate: "2016-07-21", + }, + Available: true, + }, + }, + SriovDevicePool: nil, +} diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/host_firewall_system.go b/vendor/github.com/vmware/govmomi/simulator/esx/host_firewall_system.go new file mode 100644 index 00000000000..11c1285aad7 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/esx/host_firewall_system.go @@ -0,0 +1,1425 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package esx + +import "github.com/vmware/govmomi/vim25/types" + +// HostFirewallInfo is the default template for the HostSystem config.firewall property. +// Capture method: +// govc object.collect -s -dump HostSystem:ha-host config.firewall +var HostFirewallInfo = types.HostFirewallInfo{ + DynamicData: types.DynamicData{}, + DefaultPolicy: types.HostFirewallDefaultPolicy{ + DynamicData: types.DynamicData{}, + IncomingBlocked: types.NewBool(true), + OutgoingBlocked: types.NewBool(true), + }, + Ruleset: []types.HostFirewallRuleset{ + { + DynamicData: types.DynamicData{}, + Key: "CIMHttpServer", + Label: "CIM Server", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 5988, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "sfcbd-watchdog", + Enabled: true, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "CIMHttpsServer", + Label: "CIM Secure Server", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 5989, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "sfcbd-watchdog", + Enabled: true, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "CIMSLP", + Label: "CIM SLP", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 427, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "udp", + }, + { + DynamicData: types.DynamicData{}, + Port: 427, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "udp", + }, + { + DynamicData: types.DynamicData{}, + Port: 427, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 427, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: true, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "DHCPv6", + Label: "DHCPv6", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 547, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 546, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 547, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "udp", + }, + { + DynamicData: types.DynamicData{}, + Port: 546, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "udp", + }, + }, + Service: "", + Enabled: true, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "DVFilter", + Label: "DVFilter", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 2222, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: false, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "DVSSync", + Label: "DVSSync", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 8302, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "udp", + }, + { + DynamicData: types.DynamicData{}, + Port: 8301, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "udp", + }, + { + DynamicData: types.DynamicData{}, + Port: 8301, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "udp", + }, + { + DynamicData: types.DynamicData{}, + Port: 8302, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "udp", + }, + }, + Service: "", + Enabled: true, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "HBR", + Label: "HBR", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 31031, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 44046, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: true, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "NFC", + Label: "NFC", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 902, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 902, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: true, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "WOL", + Label: "WOL", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 9, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "udp", + }, + }, + Service: "", + Enabled: true, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "activeDirectoryAll", + Label: "Active Directory All", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 88, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "udp", + }, + { + DynamicData: types.DynamicData{}, + Port: 88, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 123, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "udp", + }, + { + DynamicData: types.DynamicData{}, + Port: 137, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "udp", + }, + { + DynamicData: types.DynamicData{}, + Port: 139, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 389, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 389, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "udp", + }, + { + DynamicData: types.DynamicData{}, + Port: 445, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 464, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "udp", + }, + { + DynamicData: types.DynamicData{}, + Port: 464, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 3268, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 7476, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 2020, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: false, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "cmmds", + Label: "Virtual SAN Clustering Service", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 12345, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "udp", + }, + { + DynamicData: types.DynamicData{}, + Port: 23451, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "udp", + }, + { + DynamicData: types.DynamicData{}, + Port: 12345, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "udp", + }, + { + DynamicData: types.DynamicData{}, + Port: 23451, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "udp", + }, + { + DynamicData: types.DynamicData{}, + Port: 12321, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "udp", + }, + { + DynamicData: types.DynamicData{}, + Port: 12321, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "udp", + }, + }, + Service: "", + Enabled: false, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "dhcp", + Label: "DHCP Client", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 68, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "udp", + }, + { + DynamicData: types.DynamicData{}, + Port: 68, + EndPort: 0, + Direction: "outbound", + PortType: "src", + Protocol: "udp", + }, + }, + Service: "", + Enabled: true, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "dns", + Label: "DNS Client", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 53, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "udp", + }, + { + DynamicData: types.DynamicData{}, + Port: 53, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "udp", + }, + { + DynamicData: types.DynamicData{}, + Port: 53, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: true, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "esxupdate", + Label: "esxupdate", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 443, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: false, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "faultTolerance", + Label: "Fault Tolerance", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 80, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 8300, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 8300, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: true, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "ftpClient", + Label: "FTP Client", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 21, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 20, + EndPort: 0, + Direction: "inbound", + PortType: "src", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: false, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "gdbserver", + Label: "gdbserver", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 1000, + EndPort: 9999, + Direction: "inbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 50000, + EndPort: 50999, + Direction: "inbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: false, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "httpClient", + Label: "httpClient", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 80, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 443, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: false, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "iSCSI", + Label: "Software iSCSI Client", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 3260, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: false, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "iofiltervp", + Label: "iofiltervp", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 9080, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: true, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "ipfam", + Label: "NSX Distributed Logical Router Service", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 6999, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "udp", + }, + { + DynamicData: types.DynamicData{}, + Port: 6999, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "udp", + }, + }, + Service: "", + Enabled: false, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "nfs41Client", + Label: "nfs41Client", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 0, + EndPort: 65535, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: false, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "nfsClient", + Label: "NFS Client", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 0, + EndPort: 65535, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: false, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "ntpClient", + Label: "NTP Client", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 123, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "udp", + }, + }, + Service: "ntpd", + Enabled: false, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "pvrdma", + Label: "pvrdma", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 28250, + EndPort: 28761, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 28250, + EndPort: 28761, + Direction: "inbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: false, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "rabbitmqproxy", + Label: "rabbitmqproxy", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 5671, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: true, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "rdt", + Label: "Virtual SAN Transport", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 2233, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 2233, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: false, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "remoteSerialPort", + Label: "VM serial port connected over network", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 0, + EndPort: 65535, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 23, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 1024, + EndPort: 65535, + Direction: "inbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: false, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "snmp", + Label: "SNMP Server", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 161, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "udp", + }, + }, + Service: "snmpd", + Enabled: true, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "sshClient", + Label: "SSH Client", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 22, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: false, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "sshServer", + Label: "SSH Server", + Required: true, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 22, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: true, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "syslog", + Label: "syslog", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 514, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "udp", + }, + { + DynamicData: types.DynamicData{}, + Port: 514, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 1514, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: false, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "updateManager", + Label: "vCenter Update Manager", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 80, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 9000, + EndPort: 9100, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: true, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "vMotion", + Label: "vMotion", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 8000, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 8000, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: true, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "vSPC", + Label: "VM serial port connected to vSPC", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 0, + EndPort: 65535, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: false, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "vSphereClient", + Label: "vSphere Web Client", + Required: true, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 902, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 443, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: true, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "vpxHeartbeats", + Label: "VMware vCenter Agent", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 902, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "udp", + }, + }, + Service: "vpxa", + Enabled: true, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "vsanEncryption", + Label: "vsanEncryption", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 0, + EndPort: 65535, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: false, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "vsanhealth-multicasttest", + Label: "vsanhealth-multicasttest", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 5001, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "udp", + }, + { + DynamicData: types.DynamicData{}, + Port: 5001, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "udp", + }, + }, + Service: "", + Enabled: false, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "vsanvp", + Label: "vsanvp", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 8080, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "tcp", + }, + { + DynamicData: types.DynamicData{}, + Port: 8080, + EndPort: 0, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: false, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "vvold", + Label: "vvold", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 0, + EndPort: 65535, + Direction: "outbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: false, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + { + DynamicData: types.DynamicData{}, + Key: "webAccess", + Label: "vSphere Web Access", + Required: false, + Rule: []types.HostFirewallRule{ + { + DynamicData: types.DynamicData{}, + Port: 80, + EndPort: 0, + Direction: "inbound", + PortType: "dst", + Protocol: "tcp", + }, + }, + Service: "", + Enabled: true, + AllowedHosts: &types.HostFirewallRulesetIpList{ + DynamicData: types.DynamicData{}, + IpAddress: nil, + IpNetwork: nil, + AllIp: true, + }, + }, + }, +} diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/host_hardware_info.go b/vendor/github.com/vmware/govmomi/simulator/esx/host_hardware_info.go new file mode 100644 index 00000000000..aa633ad34bd --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/esx/host_hardware_info.go @@ -0,0 +1,864 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package esx + +import ( + "time" + + "github.com/vmware/govmomi/vim25/types" +) + +// HostHardwareInfo is the default template for the HostSystem hardware property. +// Capture method: +// govc object.collect -s -dump HostSystem:ha-host hardware +var HostHardwareInfo = &types.HostHardwareInfo{ + SystemInfo: types.HostSystemInfo{ + Vendor: "VMware, Inc.", + Model: "VMware Virtual Platform", + Uuid: "e88d4d56-9f1e-3ea1-71fa-13a8e1a7fd70", + OtherIdentifyingInfo: []types.HostSystemIdentificationInfo{ + { + IdentifierValue: " No Asset Tag", + IdentifierType: &types.ElementDescription{ + Description: types.Description{ + Label: "Asset Tag", + Summary: "Asset tag of the system", + }, + Key: "AssetTag", + }, + }, + { + IdentifierValue: "[MS_VM_CERT/SHA1/27d66596a61c48dd3dc7216fd715126e33f59ae7]", + IdentifierType: &types.ElementDescription{ + Description: types.Description{ + Label: "OEM specific string", + Summary: "OEM specific string", + }, + Key: "OemSpecificString", + }, + }, + { + IdentifierValue: "Welcome to the Virtual Machine", + IdentifierType: &types.ElementDescription{ + Description: types.Description{ + Label: "OEM specific string", + Summary: "OEM specific string", + }, + Key: "OemSpecificString", + }, + }, + { + IdentifierValue: "VMware-56 4d 8d e8 1e 9f a1 3e-71 fa 13 a8 e1 a7 fd 70", + IdentifierType: &types.ElementDescription{ + Description: types.Description{ + Label: "Service tag", + Summary: "Service tag of the system", + }, + Key: "ServiceTag", + }, + }, + }, + }, + CpuPowerManagementInfo: &types.HostCpuPowerManagementInfo{ + CurrentPolicy: "Balanced", + HardwareSupport: "", + }, + CpuInfo: types.HostCpuInfo{ + NumCpuPackages: 2, + NumCpuCores: 2, + NumCpuThreads: 2, + Hz: 3591345000, + }, + CpuPkg: []types.HostCpuPackage{ + { + Index: 0, + Vendor: "intel", + Hz: 3591345000, + BusHz: 115849838, + Description: "Intel(R) Xeon(R) CPU E5-1620 0 @ 3.60GHz", + ThreadId: []int16{0}, + CpuFeature: []types.HostCpuIdInfo{ + { + Level: 0, + Vendor: "", + Eax: "0000:0000:0000:0000:0000:0000:0000:1101", + Ebx: "0111:0101:0110:1110:0110:0101:0100:0111", + Ecx: "0110:1100:0110:0101:0111:0100:0110:1110", + Edx: "0100:1001:0110:0101:0110:1110:0110:1001", + }, + { + Level: 1, + Vendor: "", + Eax: "0000:0000:0000:0010:0000:0110:1101:0111", + Ebx: "0000:0000:0000:0001:0000:1000:0000:0000", + Ecx: "1001:0111:1011:1010:0010:0010:0010:1011", + Edx: "0000:1111:1010:1011:1111:1011:1111:1111", + }, + { + Level: -2147483648, + Vendor: "", + Eax: "1000:0000:0000:0000:0000:0000:0000:1000", + Ebx: "0000:0000:0000:0000:0000:0000:0000:0000", + Ecx: "0000:0000:0000:0000:0000:0000:0000:0000", + Edx: "0000:0000:0000:0000:0000:0000:0000:0000", + }, + { + Level: -2147483647, + Vendor: "", + Eax: "0000:0000:0000:0000:0000:0000:0000:0000", + Ebx: "0000:0000:0000:0000:0000:0000:0000:0000", + Ecx: "0000:0000:0000:0000:0000:0000:0000:0001", + Edx: "0010:1000:0001:0000:0000:1000:0000:0000", + }, + { + Level: -2147483640, + Vendor: "", + Eax: "0000:0000:0000:0000:0011:0000:0010:1010", + Ebx: "0000:0000:0000:0000:0000:0000:0000:0000", + Ecx: "0000:0000:0000:0000:0000:0000:0000:0000", + Edx: "0000:0000:0000:0000:0000:0000:0000:0000", + }, + }, + }, + { + Index: 1, + Vendor: "intel", + Hz: 3591345000, + BusHz: 115849838, + Description: "Intel(R) Xeon(R) CPU E5-1620 0 @ 3.60GHz", + ThreadId: []int16{1}, + CpuFeature: []types.HostCpuIdInfo{ + { + Level: 0, + Vendor: "", + Eax: "0000:0000:0000:0000:0000:0000:0000:1101", + Ebx: "0111:0101:0110:1110:0110:0101:0100:0111", + Ecx: "0110:1100:0110:0101:0111:0100:0110:1110", + Edx: "0100:1001:0110:0101:0110:1110:0110:1001", + }, + { + Level: 1, + Vendor: "", + Eax: "0000:0000:0000:0010:0000:0110:1101:0111", + Ebx: "0000:0010:0000:0001:0000:1000:0000:0000", + Ecx: "1001:0111:1011:1010:0010:0010:0010:1011", + Edx: "0000:1111:1010:1011:1111:1011:1111:1111", + }, + { + Level: -2147483648, + Vendor: "", + Eax: "1000:0000:0000:0000:0000:0000:0000:1000", + Ebx: "0000:0000:0000:0000:0000:0000:0000:0000", + Ecx: "0000:0000:0000:0000:0000:0000:0000:0000", + Edx: "0000:0000:0000:0000:0000:0000:0000:0000", + }, + { + Level: -2147483647, + Vendor: "", + Eax: "0000:0000:0000:0000:0000:0000:0000:0000", + Ebx: "0000:0000:0000:0000:0000:0000:0000:0000", + Ecx: "0000:0000:0000:0000:0000:0000:0000:0001", + Edx: "0010:1000:0001:0000:0000:1000:0000:0000", + }, + { + Level: -2147483640, + Vendor: "", + Eax: "0000:0000:0000:0000:0011:0000:0010:1010", + Ebx: "0000:0000:0000:0000:0000:0000:0000:0000", + Ecx: "0000:0000:0000:0000:0000:0000:0000:0000", + Edx: "0000:0000:0000:0000:0000:0000:0000:0000", + }, + }, + }, + }, + MemorySize: 4294430720, + NumaInfo: &types.HostNumaInfo{ + Type: "NUMA", + NumNodes: 1, + NumaNode: []types.HostNumaNode{ + { + TypeId: 0x0, + CpuID: []int16{1, 0}, + MemoryRangeBegin: 4294967296, + MemoryRangeLength: 1073741824, + }, + }, + }, + SmcPresent: types.NewBool(false), + PciDevice: []types.HostPciDevice{ + { + Id: "0000:00:00.0", + ClassId: 1536, + Bus: 0x0, + Slot: 0x0, + Function: 0x0, + VendorId: -32634, + SubVendorId: 5549, + VendorName: "Intel Corporation", + DeviceId: 29072, + SubDeviceId: 6518, + ParentBridge: "", + DeviceName: "Virtual Machine Chipset", + }, + { + Id: "0000:00:01.0", + ClassId: 1540, + Bus: 0x0, + Slot: 0x1, + Function: 0x0, + VendorId: -32634, + SubVendorId: 0, + VendorName: "Intel Corporation", + DeviceId: 29073, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "440BX/ZX/DX - 82443BX/ZX/DX AGP bridge", + }, + { + Id: "0000:00:07.0", + ClassId: 1537, + Bus: 0x0, + Slot: 0x7, + Function: 0x0, + VendorId: -32634, + SubVendorId: 5549, + VendorName: "Intel Corporation", + DeviceId: 28944, + SubDeviceId: 6518, + ParentBridge: "", + DeviceName: "Virtual Machine Chipset", + }, + { + Id: "0000:00:07.1", + ClassId: 257, + Bus: 0x0, + Slot: 0x7, + Function: 0x1, + VendorId: -32634, + SubVendorId: 5549, + VendorName: "Intel Corporation", + DeviceId: 28945, + SubDeviceId: 6518, + ParentBridge: "", + DeviceName: "PIIX4 for 430TX/440BX/MX IDE Controller", + }, + { + Id: "0000:00:07.3", + ClassId: 1664, + Bus: 0x0, + Slot: 0x7, + Function: 0x3, + VendorId: -32634, + SubVendorId: 5549, + VendorName: "Intel Corporation", + DeviceId: 28947, + SubDeviceId: 6518, + ParentBridge: "", + DeviceName: "Virtual Machine Chipset", + }, + { + Id: "0000:00:07.7", + ClassId: 2176, + Bus: 0x0, + Slot: 0x7, + Function: 0x7, + VendorId: 5549, + SubVendorId: 5549, + VendorName: "VMware", + DeviceId: 1856, + SubDeviceId: 1856, + ParentBridge: "", + DeviceName: "Virtual Machine Communication Interface", + }, + { + Id: "0000:00:0f.0", + ClassId: 768, + Bus: 0x0, + Slot: 0xf, + Function: 0x0, + VendorId: 5549, + SubVendorId: 5549, + VendorName: "VMware", + DeviceId: 1029, + SubDeviceId: 1029, + ParentBridge: "", + DeviceName: "SVGA II Adapter", + }, + { + Id: "0000:00:11.0", + ClassId: 1540, + Bus: 0x0, + Slot: 0x11, + Function: 0x0, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1936, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI bridge", + }, + { + Id: "0000:00:15.0", + ClassId: 1540, + Bus: 0x0, + Slot: 0x15, + Function: 0x0, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:15.1", + ClassId: 1540, + Bus: 0x0, + Slot: 0x15, + Function: 0x1, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:15.2", + ClassId: 1540, + Bus: 0x0, + Slot: 0x15, + Function: 0x2, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:15.3", + ClassId: 1540, + Bus: 0x0, + Slot: 0x15, + Function: 0x3, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:15.4", + ClassId: 1540, + Bus: 0x0, + Slot: 0x15, + Function: 0x4, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:15.5", + ClassId: 1540, + Bus: 0x0, + Slot: 0x15, + Function: 0x5, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:15.6", + ClassId: 1540, + Bus: 0x0, + Slot: 0x15, + Function: 0x6, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:15.7", + ClassId: 1540, + Bus: 0x0, + Slot: 0x15, + Function: 0x7, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:16.0", + ClassId: 1540, + Bus: 0x0, + Slot: 0x16, + Function: 0x0, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:16.1", + ClassId: 1540, + Bus: 0x0, + Slot: 0x16, + Function: 0x1, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:16.2", + ClassId: 1540, + Bus: 0x0, + Slot: 0x16, + Function: 0x2, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:16.3", + ClassId: 1540, + Bus: 0x0, + Slot: 0x16, + Function: 0x3, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:16.4", + ClassId: 1540, + Bus: 0x0, + Slot: 0x16, + Function: 0x4, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:16.5", + ClassId: 1540, + Bus: 0x0, + Slot: 0x16, + Function: 0x5, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:16.6", + ClassId: 1540, + Bus: 0x0, + Slot: 0x16, + Function: 0x6, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:16.7", + ClassId: 1540, + Bus: 0x0, + Slot: 0x16, + Function: 0x7, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:17.0", + ClassId: 1540, + Bus: 0x0, + Slot: 0x17, + Function: 0x0, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:17.1", + ClassId: 1540, + Bus: 0x0, + Slot: 0x17, + Function: 0x1, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:17.2", + ClassId: 1540, + Bus: 0x0, + Slot: 0x17, + Function: 0x2, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:17.3", + ClassId: 1540, + Bus: 0x0, + Slot: 0x17, + Function: 0x3, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:17.4", + ClassId: 1540, + Bus: 0x0, + Slot: 0x17, + Function: 0x4, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:17.5", + ClassId: 1540, + Bus: 0x0, + Slot: 0x17, + Function: 0x5, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:17.6", + ClassId: 1540, + Bus: 0x0, + Slot: 0x17, + Function: 0x6, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:17.7", + ClassId: 1540, + Bus: 0x0, + Slot: 0x17, + Function: 0x7, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:18.0", + ClassId: 1540, + Bus: 0x0, + Slot: 0x18, + Function: 0x0, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:18.1", + ClassId: 1540, + Bus: 0x0, + Slot: 0x18, + Function: 0x1, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:18.2", + ClassId: 1540, + Bus: 0x0, + Slot: 0x18, + Function: 0x2, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:18.3", + ClassId: 1540, + Bus: 0x0, + Slot: 0x18, + Function: 0x3, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:18.4", + ClassId: 1540, + Bus: 0x0, + Slot: 0x18, + Function: 0x4, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:18.5", + ClassId: 1540, + Bus: 0x0, + Slot: 0x18, + Function: 0x5, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:18.6", + ClassId: 1540, + Bus: 0x0, + Slot: 0x18, + Function: 0x6, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:00:18.7", + ClassId: 1540, + Bus: 0x0, + Slot: 0x18, + Function: 0x7, + VendorId: 5549, + SubVendorId: 0, + VendorName: "VMware", + DeviceId: 1952, + SubDeviceId: 0, + ParentBridge: "", + DeviceName: "PCI Express Root Port", + }, + { + Id: "0000:03:00.0", + ClassId: 263, + Bus: 0x3, + Slot: 0x0, + Function: 0x0, + VendorId: 5549, + SubVendorId: 5549, + VendorName: "VMware", + DeviceId: 1984, + SubDeviceId: 1984, + ParentBridge: "0000:00:15.0", + DeviceName: "PVSCSI SCSI Controller", + }, + { + Id: "0000:0b:00.0", + ClassId: 512, + Bus: 0xb, + Slot: 0x0, + Function: 0x0, + VendorId: 5549, + SubVendorId: 5549, + VendorName: "VMware Inc.", + DeviceId: 1968, + SubDeviceId: 1968, + ParentBridge: "0000:00:16.0", + DeviceName: "vmxnet3 Virtual Ethernet Controller", + }, + { + Id: "0000:13:00.0", + ClassId: 512, + Bus: 0x13, + Slot: 0x0, + Function: 0x0, + VendorId: 5549, + SubVendorId: 5549, + VendorName: "VMware Inc.", + DeviceId: 1968, + SubDeviceId: 1968, + ParentBridge: "0000:00:17.0", + DeviceName: "vmxnet3 Virtual Ethernet Controller", + }, + }, + CpuFeature: []types.HostCpuIdInfo{ + { + Level: 0, + Vendor: "", + Eax: "0000:0000:0000:0000:0000:0000:0000:1101", + Ebx: "0111:0101:0110:1110:0110:0101:0100:0111", + Ecx: "0110:1100:0110:0101:0111:0100:0110:1110", + Edx: "0100:1001:0110:0101:0110:1110:0110:1001", + }, + { + Level: 1, + Vendor: "", + Eax: "0000:0000:0000:0010:0000:0110:1101:0111", + Ebx: "0000:0000:0000:0001:0000:1000:0000:0000", + Ecx: "1001:0111:1011:1010:0010:0010:0010:1011", + Edx: "0000:1111:1010:1011:1111:1011:1111:1111", + }, + { + Level: -2147483648, + Vendor: "", + Eax: "1000:0000:0000:0000:0000:0000:0000:1000", + Ebx: "0000:0000:0000:0000:0000:0000:0000:0000", + Ecx: "0000:0000:0000:0000:0000:0000:0000:0000", + Edx: "0000:0000:0000:0000:0000:0000:0000:0000", + }, + { + Level: -2147483647, + Vendor: "", + Eax: "0000:0000:0000:0000:0000:0000:0000:0000", + Ebx: "0000:0000:0000:0000:0000:0000:0000:0000", + Ecx: "0000:0000:0000:0000:0000:0000:0000:0001", + Edx: "0010:1000:0001:0000:0000:1000:0000:0000", + }, + { + Level: -2147483640, + Vendor: "", + Eax: "0000:0000:0000:0000:0011:0000:0010:1010", + Ebx: "0000:0000:0000:0000:0000:0000:0000:0000", + Ecx: "0000:0000:0000:0000:0000:0000:0000:0000", + Edx: "0000:0000:0000:0000:0000:0000:0000:0000", + }, + }, + BiosInfo: &types.HostBIOSInfo{ + BiosVersion: "6.00", + ReleaseDate: nil, + Vendor: "", + MajorRelease: 0, + MinorRelease: 0, + FirmwareMajorRelease: 0, + FirmwareMinorRelease: 0, + }, + ReliableMemoryInfo: &types.HostReliableMemoryInfo{}, +} + +func init() { + date, _ := time.Parse("2006-01-02", "2015-07-02") + + HostHardwareInfo.BiosInfo.ReleaseDate = &date +} diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/host_storage_device_info.go b/vendor/github.com/vmware/govmomi/simulator/esx/host_storage_device_info.go new file mode 100644 index 00000000000..9d1ae32dd40 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/esx/host_storage_device_info.go @@ -0,0 +1,346 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package esx + +import "github.com/vmware/govmomi/vim25/types" + +// HostStorageDeviceInfo is the default template for the HostSystem config.storageDevice property. +// Capture method: +// govc object.collect -s -dump HostSystem:ha-host config.storageDevice +var HostStorageDeviceInfo = types.HostStorageDeviceInfo{ + HostBusAdapter: []types.BaseHostHostBusAdapter{ + &types.HostParallelScsiHba{ + HostHostBusAdapter: types.HostHostBusAdapter{ + Key: "key-vim.host.ParallelScsiHba-vmhba0", + Device: "vmhba0", + Bus: 3, + Status: "unknown", + Model: "PVSCSI SCSI Controller", + Driver: "pvscsi", + Pci: "0000:03:00.0", + }, + }, + &types.HostBlockHba{ + HostHostBusAdapter: types.HostHostBusAdapter{ + Key: "key-vim.host.BlockHba-vmhba1", + Device: "vmhba1", + Bus: 0, + Status: "unknown", + Model: "PIIX4 for 430TX/440BX/MX IDE Controller", + Driver: "vmkata", + Pci: "0000:00:07.1", + }, + }, + &types.HostBlockHba{ + HostHostBusAdapter: types.HostHostBusAdapter{ + Key: "key-vim.host.BlockHba-vmhba64", + Device: "vmhba64", + Bus: 0, + Status: "unknown", + Model: "PIIX4 for 430TX/440BX/MX IDE Controller", + Driver: "vmkata", + Pci: "0000:00:07.1", + }, + }, + }, + ScsiLun: []types.BaseScsiLun{ + &types.ScsiLun{ + HostDevice: types.HostDevice{ + DeviceName: "/vmfs/devices/cdrom/mpx.vmhba1:C0:T0:L0", + DeviceType: "cdrom", + }, + Key: "key-vim.host.ScsiLun-0005000000766d686261313a303a30", + Uuid: "0005000000766d686261313a303a30", + Descriptor: []types.ScsiLunDescriptor{ + { + Quality: "lowQuality", + Id: "mpx.vmhba1:C0:T0:L0", + }, + { + Quality: "lowQuality", + Id: "vml.0005000000766d686261313a303a30", + }, + { + Quality: "lowQuality", + Id: "0005000000766d686261313a303a30", + }, + }, + CanonicalName: "mpx.vmhba1:C0:T0:L0", + DisplayName: "Local NECVMWar CD-ROM (mpx.vmhba1:C0:T0:L0)", + LunType: "cdrom", + Vendor: "NECVMWar", + Model: "VMware IDE CDR00", + Revision: "1.00", + ScsiLevel: 5, + SerialNumber: "unavailable", + DurableName: (*types.ScsiLunDurableName)(nil), + AlternateName: []types.ScsiLunDurableName{ + { + Namespace: "GENERIC_VPD", + NamespaceId: 0x5, + Data: []uint8{0x2d, 0x37, 0x39}, + }, + { + Namespace: "GENERIC_VPD", + NamespaceId: 0x5, + Data: []uint8{0x30}, + }, + }, + StandardInquiry: []uint8{0x30}, + QueueDepth: 0, + OperationalState: []string{"ok"}, + Capabilities: &types.ScsiLunCapabilities{}, + VStorageSupport: "vStorageUnsupported", + ProtocolEndpoint: types.NewBool(false), + }, + &types.HostScsiDisk{ + ScsiLun: types.ScsiLun{ + HostDevice: types.HostDevice{ + DeviceName: "/vmfs/devices/disks/mpx.vmhba0:C0:T0:L0", + DeviceType: "disk", + }, + Key: "key-vim.host.ScsiDisk-0000000000766d686261303a303a30", + Uuid: "0000000000766d686261303a303a30", + Descriptor: []types.ScsiLunDescriptor{ + { + Quality: "lowQuality", + Id: "mpx.vmhba0:C0:T0:L0", + }, + { + Quality: "lowQuality", + Id: "vml.0000000000766d686261303a303a30", + }, + { + Quality: "lowQuality", + Id: "0000000000766d686261303a303a30", + }, + }, + CanonicalName: "mpx.vmhba0:C0:T0:L0", + DisplayName: "Local VMware, Disk (mpx.vmhba0:C0:T0:L0)", + LunType: "disk", + Vendor: "VMware, ", + Model: "VMware Virtual S", + Revision: "1.0 ", + ScsiLevel: 2, + SerialNumber: "unavailable", + DurableName: (*types.ScsiLunDurableName)(nil), + AlternateName: []types.ScsiLunDurableName{ + { + Namespace: "GENERIC_VPD", + NamespaceId: 0x5, + Data: []uint8{0x2d, 0x37, 0x39}, + }, + { + Namespace: "GENERIC_VPD", + NamespaceId: 0x5, + Data: []uint8{0x30}, + }, + }, + StandardInquiry: []uint8{0x30}, + QueueDepth: 1024, + OperationalState: []string{"ok"}, + Capabilities: &types.ScsiLunCapabilities{}, + VStorageSupport: "vStorageUnsupported", + ProtocolEndpoint: types.NewBool(false), + }, + Capacity: types.HostDiskDimensionsLba{ + BlockSize: 512, + Block: 67108864, + }, + DevicePath: "/vmfs/devices/disks/mpx.vmhba0:C0:T0:L0", + Ssd: types.NewBool(true), + LocalDisk: types.NewBool(true), + PhysicalLocation: nil, + EmulatedDIXDIFEnabled: types.NewBool(false), + VsanDiskInfo: (*types.VsanHostVsanDiskInfo)(nil), + ScsiDiskType: "native512", + }, + }, + ScsiTopology: &types.HostScsiTopology{ + Adapter: []types.HostScsiTopologyInterface{ + { + Key: "key-vim.host.ScsiTopology.Interface-vmhba0", + Adapter: "key-vim.host.ParallelScsiHba-vmhba0", + Target: []types.HostScsiTopologyTarget{ + { + Key: "key-vim.host.ScsiTopology.Target-vmhba0:0:0", + Target: 0, + Lun: []types.HostScsiTopologyLun{ + { + Key: "key-vim.host.ScsiTopology.Lun-0000000000766d686261303a303a30", + Lun: 0, + ScsiLun: "key-vim.host.ScsiDisk-0000000000766d686261303a303a30", + }, + }, + Transport: &types.HostParallelScsiTargetTransport{}, + }, + }, + }, + { + Key: "key-vim.host.ScsiTopology.Interface-vmhba1", + Adapter: "key-vim.host.BlockHba-vmhba1", + Target: []types.HostScsiTopologyTarget{ + { + Key: "key-vim.host.ScsiTopology.Target-vmhba1:0:0", + Target: 0, + Lun: []types.HostScsiTopologyLun{ + { + Key: "key-vim.host.ScsiTopology.Lun-0005000000766d686261313a303a30", + Lun: 0, + ScsiLun: "key-vim.host.ScsiLun-0005000000766d686261313a303a30", + }, + }, + Transport: &types.HostBlockAdapterTargetTransport{}, + }, + }, + }, + { + Key: "key-vim.host.ScsiTopology.Interface-vmhba64", + Adapter: "key-vim.host.BlockHba-vmhba64", + Target: nil, + }, + }, + }, + MultipathInfo: &types.HostMultipathInfo{ + Lun: []types.HostMultipathInfoLogicalUnit{ + { + Key: "key-vim.host.MultipathInfo.LogicalUnit-0005000000766d686261313a303a30", + Id: "0005000000766d686261313a303a30", + Lun: "key-vim.host.ScsiLun-0005000000766d686261313a303a30", + Path: []types.HostMultipathInfoPath{ + { + Key: "key-vim.host.MultipathInfo.Path-vmhba1:C0:T0:L0", + Name: "vmhba1:C0:T0:L0", + PathState: "active", + State: "active", + IsWorkingPath: types.NewBool(true), + Adapter: "key-vim.host.BlockHba-vmhba1", + Lun: "key-vim.host.MultipathInfo.LogicalUnit-0005000000766d686261313a303a30", + Transport: &types.HostBlockAdapterTargetTransport{}, + }, + }, + Policy: &types.HostMultipathInfoFixedLogicalUnitPolicy{ + HostMultipathInfoLogicalUnitPolicy: types.HostMultipathInfoLogicalUnitPolicy{ + Policy: "VMW_PSP_FIXED", + }, + Prefer: "vmhba1:C0:T0:L0", + }, + StorageArrayTypePolicy: &types.HostMultipathInfoLogicalUnitStorageArrayTypePolicy{ + Policy: "VMW_SATP_LOCAL", + }, + }, + { + Key: "key-vim.host.MultipathInfo.LogicalUnit-0000000000766d686261303a303a30", + Id: "0000000000766d686261303a303a30", + Lun: "key-vim.host.ScsiDisk-0000000000766d686261303a303a30", + Path: []types.HostMultipathInfoPath{ + { + Key: "key-vim.host.MultipathInfo.Path-vmhba0:C0:T0:L0", + Name: "vmhba0:C0:T0:L0", + PathState: "active", + State: "active", + IsWorkingPath: types.NewBool(true), + Adapter: "key-vim.host.ParallelScsiHba-vmhba0", + Lun: "key-vim.host.MultipathInfo.LogicalUnit-0000000000766d686261303a303a30", + Transport: &types.HostParallelScsiTargetTransport{}, + }, + }, + Policy: &types.HostMultipathInfoFixedLogicalUnitPolicy{ + HostMultipathInfoLogicalUnitPolicy: types.HostMultipathInfoLogicalUnitPolicy{ + Policy: "VMW_PSP_FIXED", + }, + Prefer: "vmhba0:C0:T0:L0", + }, + StorageArrayTypePolicy: &types.HostMultipathInfoLogicalUnitStorageArrayTypePolicy{ + Policy: "VMW_SATP_LOCAL", + }, + }, + }, + }, + PlugStoreTopology: &types.HostPlugStoreTopology{ + Adapter: []types.HostPlugStoreTopologyAdapter{ + { + Key: "key-vim.host.PlugStoreTopology.Adapter-vmhba0", + Adapter: "key-vim.host.ParallelScsiHba-vmhba0", + Path: []string{"key-vim.host.PlugStoreTopology.Path-vmhba0:C0:T0:L0"}, + }, + { + Key: "key-vim.host.PlugStoreTopology.Adapter-vmhba1", + Adapter: "key-vim.host.BlockHba-vmhba1", + Path: []string{"key-vim.host.PlugStoreTopology.Path-vmhba1:C0:T0:L0"}, + }, + { + Key: "key-vim.host.PlugStoreTopology.Adapter-vmhba64", + Adapter: "key-vim.host.BlockHba-vmhba64", + Path: nil, + }, + }, + Path: []types.HostPlugStoreTopologyPath{ + { + Key: "key-vim.host.PlugStoreTopology.Path-vmhba0:C0:T0:L0", + Name: "vmhba0:C0:T0:L0", + ChannelNumber: 0, + TargetNumber: 0, + LunNumber: 0, + Adapter: "key-vim.host.PlugStoreTopology.Adapter-vmhba0", + Target: "key-vim.host.PlugStoreTopology.Target-pscsi.0:0", + Device: "key-vim.host.PlugStoreTopology.Device-0000000000766d686261303a303a30", + }, + { + Key: "key-vim.host.PlugStoreTopology.Path-vmhba1:C0:T0:L0", + Name: "vmhba1:C0:T0:L0", + ChannelNumber: 0, + TargetNumber: 0, + LunNumber: 0, + Adapter: "key-vim.host.PlugStoreTopology.Adapter-vmhba1", + Target: "key-vim.host.PlugStoreTopology.Target-ide.0:0", + Device: "key-vim.host.PlugStoreTopology.Device-0005000000766d686261313a303a30", + }, + }, + Target: []types.HostPlugStoreTopologyTarget{ + { + Key: "key-vim.host.PlugStoreTopology.Target-pscsi.0:0", + Transport: &types.HostParallelScsiTargetTransport{}, + }, + { + Key: "key-vim.host.PlugStoreTopology.Target-ide.0:0", + Transport: &types.HostBlockAdapterTargetTransport{}, + }, + }, + Device: []types.HostPlugStoreTopologyDevice{ + { + Key: "key-vim.host.PlugStoreTopology.Device-0005000000766d686261313a303a30", + Lun: "key-vim.host.ScsiLun-0005000000766d686261313a303a30", + Path: []string{"key-vim.host.PlugStoreTopology.Path-vmhba1:C0:T0:L0"}, + }, + { + Key: "key-vim.host.PlugStoreTopology.Device-0000000000766d686261303a303a30", + Lun: "key-vim.host.ScsiDisk-0000000000766d686261303a303a30", + Path: []string{"key-vim.host.PlugStoreTopology.Path-vmhba0:C0:T0:L0"}, + }, + }, + Plugin: []types.HostPlugStoreTopologyPlugin{ + { + Key: "key-vim.host.PlugStoreTopology.Plugin-NMP", + Name: "NMP", + Device: []string{"key-vim.host.PlugStoreTopology.Device-0005000000766d686261313a303a30", "key-vim.host.PlugStoreTopology.Device-0000000000766d686261303a303a30"}, + ClaimedPath: []string{"key-vim.host.PlugStoreTopology.Path-vmhba0:C0:T0:L0", "key-vim.host.PlugStoreTopology.Path-vmhba1:C0:T0:L0"}, + }, + }, + }, + SoftwareInternetScsiEnabled: false, +} diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/host_system.go b/vendor/github.com/vmware/govmomi/simulator/esx/host_system.go new file mode 100644 index 00000000000..13aa8b17a3e --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/esx/host_system.go @@ -0,0 +1,1791 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package esx + +import ( + "time" + + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +// HostSystem is the default template for HostSystem properties. +// Capture method: +// govc host.info -dump +var HostSystem = mo.HostSystem{ + ManagedEntity: mo.ManagedEntity{ + ExtensibleManagedObject: mo.ExtensibleManagedObject{ + Self: types.ManagedObjectReference{Type: "HostSystem", Value: "ha-host"}, + Value: nil, + AvailableField: nil, + }, + Parent: &types.ManagedObjectReference{Type: "ComputeResource", Value: "ha-compute-res"}, + CustomValue: nil, + OverallStatus: "", + ConfigStatus: "", + ConfigIssue: nil, + EffectiveRole: nil, + Permission: nil, + Name: "", + DisabledMethod: nil, + RecentTask: nil, + DeclaredAlarmState: nil, + TriggeredAlarmState: nil, + AlarmActionsEnabled: (*bool)(nil), + Tag: nil, + }, + Runtime: types.HostRuntimeInfo{ + DynamicData: types.DynamicData{}, + ConnectionState: "connected", + PowerState: "poweredOn", + StandbyMode: "", + InMaintenanceMode: false, + BootTime: (*time.Time)(nil), + HealthSystemRuntime: &types.HealthSystemRuntime{ + DynamicData: types.DynamicData{}, + SystemHealthInfo: &types.HostSystemHealthInfo{ + DynamicData: types.DynamicData{}, + NumericSensorInfo: []types.HostNumericSensorInfo{ + { + DynamicData: types.DynamicData{}, + Name: "VMware Rollup Health State", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "system", + }, + { + DynamicData: types.DynamicData{}, + Name: "CPU socket #0 Level-1 Cache is 16384 B", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Processors", + }, + { + DynamicData: types.DynamicData{}, + Name: "CPU socket #0 Level-2 Cache is 0 B", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Processors", + }, + { + DynamicData: types.DynamicData{}, + Name: "CPU socket #1 Level-1 Cache is 16384 B", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Processors", + }, + { + DynamicData: types.DynamicData{}, + Name: "CPU socket #1 Level-2 Cache is 0 B", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Processors", + }, + { + DynamicData: types.DynamicData{}, + Name: "Phoenix Technologies LTD System BIOS 6.00 2014-05-20 00:00:00.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware, Inc. VMware ESXi 6.0.0 build-3634798 2016-03-07 00:00:00.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware sata-ata-piix 2.12-10vmw.600.2.34.3634798 2016-03-08 07:38:41.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware lsu-lsi-mptsas-plugin 1.0.0-1vmw.600.2.34.3634798 2016-03-08 07:39:28.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware net-mlx4-core 1.9.7.0-1vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware lsu-lsi-mpt2sas-plugin 1.0.0-4vmw.600.2.34.3634798 2016-03-08 07:39:28.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware scsi-aacraid 1.1.5.1-9vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware ata-pata-via 0.3.3-2vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware scsi-qla4xxx 5.01.03.2-7vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware sata-sata-promise 2.12-3vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware scsi-megaraid-mbox 2.20.5.1-6vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware vsan 6.0.0-2.34.3563498 2016-02-17 17:18:19.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware net-e1000 8.0.3.1-5vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware ata-pata-serverworks 0.4.3-3vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware scsi-mptspi 4.23.01.00-9vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware net-nx-nic 5.0.621-5vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware block-cciss 3.6.14-10vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware net-bnx2x 1.78.80.v60.12-1vmw.600.2.34.3634798 2016-03-08 07:38:41.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware ipmi-ipmi-devintf 39.1-4vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware scsi-mptsas 4.23.01.00-9vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware scsi-megaraid2 2.00.4-9vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware nvme 1.0e.0.35-1vmw.600.2.34.3634798 2016-03-08 07:38:46.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware esx-xserver 6.0.0-2.34.3634798 2016-03-08 07:39:27.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware nmlx4-en 3.0.0.0-1vmw.600.2.34.3634798 2016-03-08 07:38:46.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware lsu-hp-hpsa-plugin 1.0.0-1vmw.600.2.34.3634798 2016-03-08 07:39:28.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware scsi-megaraid-sas 6.603.55.00-2vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware net-enic 2.1.2.38-2vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware lsi-msgpt3 06.255.12.00-8vmw.600.2.34.3634798 2016-03-08 07:38:46.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware sata-ahci 3.0-22vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware net-forcedeth 0.61-2vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware ata-pata-atiixp 0.4.6-4vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware elxnet 10.2.309.6v-1vmw.600.2.34.3634798 2016-03-08 07:38:46.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware esx-dvfilter-generic-fastpath 6.0.0-2.34.3634798 2016-03-08 07:39:28.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware uhci-usb-uhci 1.0-3vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware ata-pata-amd 0.3.10-3vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware sata-sata-sil24 1.1-1vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware ohci-usb-ohci 1.0-3vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware net-igb 5.0.5.1.1-5vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware ata-pata-pdc2027x 1.0-3vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware ehci-ehci-hcd 1.0-3vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware lsu-lsi-lsi-mr3-plugin 1.0.0-2vmw.600.2.34.3634798 2016-03-08 07:39:28.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware net-ixgbe 3.7.13.7.14iov-20vmw.600.2.34.3634798 2016-03-08 07:38:41.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware vsanhealth 6.0.0-3000000.3.0.2.34.3544323 2016-02-12 06:45:30.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware net-cnic 1.78.76.v60.13-2vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware sata-sata-svw 2.3-3vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware ipmi-ipmi-msghandler 39.1-4vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware emulex-esx-elxnetcli 10.2.309.6v-2.34.3634798 2016-03-08 07:39:28.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware scsi-aic79xx 3.1-5vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware qlnativefc 2.0.12.0-5vmw.600.2.34.3634798 2016-03-08 07:38:46.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware lsu-lsi-lsi-msgpt3-plugin 1.0.0-1vmw.600.2.34.3634798 2016-03-08 07:39:28.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware ima-qla4xxx 2.02.18-1vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware net-mlx4-en 1.9.7.0-1vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware net-e1000e 3.2.2.1-1vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware net-tg3 3.131d.v60.4-2vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware scsi-hpsa 6.0.0.44-4vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware scsi-bnx2fc 1.78.78.v60.8-1vmw.600.2.34.3634798 2016-03-08 07:38:41.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware cpu-microcode 6.0.0-2.34.3634798 2016-03-08 07:39:28.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware scsi-fnic 1.5.0.45-3vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware nmlx4-rdma 3.0.0.0-1vmw.600.2.34.3634798 2016-03-08 07:38:46.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware net-vmxnet3 1.1.3.0-3vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware lpfc 10.2.309.8-2vmw.600.2.34.3634798 2016-03-08 07:38:46.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware esx-ui 1.0.0-3617585 2016-03-03 04:52:43.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware ata-pata-cmd64x 0.2.5-3vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware lsi-mr3 6.605.08.00-7vmw.600.2.34.3634798 2016-03-08 07:38:46.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware ata-pata-hpt3x2n 0.3.4-3vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware sata-sata-nv 3.5-4vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware misc-cnic-register 1.78.75.v60.7-1vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware lsu-lsi-megaraid-sas-plugin 1.0.0-2vmw.600.2.34.3634798 2016-03-08 07:39:28.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware ata-pata-sil680 0.4.8-3vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware esx-tboot 6.0.0-2.34.3634798 2016-03-08 07:39:27.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware xhci-xhci 1.0-3vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware scsi-ips 7.12.05-4vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware scsi-adp94xx 1.0.8.12-6vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware rste 2.0.2.0088-4vmw.600.2.34.3634798 2016-03-08 07:38:46.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware ipmi-ipmi-si-drv 39.1-4vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMWARE mtip32xx-native 3.8.5-1vmw.600.2.34.3634798 2016-03-08 07:38:46.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware scsi-mpt2sas 19.00.00.00-1vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware misc-drivers 6.0.0-2.34.3634798 2016-03-08 07:38:41.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware nmlx4-core 3.0.0.0-1vmw.600.2.34.3634798 2016-03-08 07:38:46.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware sata-sata-sil 2.3-4vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware esx-base 6.0.0-2.34.3634798 2016-03-08 07:39:18.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware scsi-bnx2i 2.78.76.v60.8-1vmw.600.2.34.3634798 2016-03-08 07:38:41.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "VMware net-bnx2 2.2.4f.v60.10-1vmw.600.2.34.3634798 2016-03-08 07:38:45.000", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "e1000 driver 8.0.3.1-NAPI", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + { + DynamicData: types.DynamicData{}, + Name: "e1000 device firmware N/A", + HealthState: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Sensor is operating under normal conditions", + }, + Key: "green", + }, + CurrentReading: 0, + UnitModifier: 0, + BaseUnits: "", + RateUnits: "", + SensorType: "Software Components", + }, + }, + }, + HardwareStatusInfo: &types.HostHardwareStatusInfo{ + DynamicData: types.DynamicData{}, + MemoryStatusInfo: nil, + CpuStatusInfo: []types.BaseHostHardwareElementInfo{ + &types.HostHardwareElementInfo{ + DynamicData: types.DynamicData{}, + Name: "CPU socket #0", + Status: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Physical element is functioning as expected", + }, + Key: "Green", + }, + }, + &types.HostHardwareElementInfo{ + DynamicData: types.DynamicData{}, + Name: "CPU socket #1", + Status: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Green", + Summary: "Physical element is functioning as expected", + }, + Key: "Green", + }, + }, + }, + StorageStatusInfo: nil, + }, + }, + DasHostState: (*types.ClusterDasFdmHostState)(nil), + TpmPcrValues: nil, + VsanRuntimeInfo: &types.VsanHostRuntimeInfo{}, + NetworkRuntimeInfo: &types.HostRuntimeInfoNetworkRuntimeInfo{ + DynamicData: types.DynamicData{}, + NetStackInstanceRuntimeInfo: []types.HostRuntimeInfoNetStackInstanceRuntimeInfo{ + { + DynamicData: types.DynamicData{}, + NetStackInstanceKey: "defaultTcpipStack", + State: "active", + VmknicKeys: []string{"vmk0"}, + MaxNumberOfConnections: 11000, + CurrentIpV6Enabled: types.NewBool(true), + }, + }, + NetworkResourceRuntime: (*types.HostNetworkResourceRuntime)(nil), + }, + VFlashResourceRuntimeInfo: (*types.HostVFlashManagerVFlashResourceRunTimeInfo)(nil), + HostMaxVirtualDiskCapacity: 68169720922112, + }, + Summary: types.HostListSummary{ + DynamicData: types.DynamicData{}, + Host: &types.ManagedObjectReference{Type: "HostSystem", Value: "ha-host"}, + Hardware: &types.HostHardwareSummary{ + DynamicData: types.DynamicData{}, + Vendor: "VMware, Inc.", + Model: "VMware Virtual Platform", + Uuid: "564d2f12-8041-639b-5018-05a835b72eaf", + OtherIdentifyingInfo: []types.HostSystemIdentificationInfo{ + { + DynamicData: types.DynamicData{}, + IdentifierValue: " No Asset Tag", + IdentifierType: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Asset Tag", + Summary: "Asset tag of the system", + }, + Key: "AssetTag", + }, + }, + { + DynamicData: types.DynamicData{}, + IdentifierValue: "[MS_VM_CERT/SHA1/27d66596a61c48dd3dc7216fd715126e33f59ae7]", + IdentifierType: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "OEM specific string", + Summary: "OEM specific string", + }, + Key: "OemSpecificString", + }, + }, + { + DynamicData: types.DynamicData{}, + IdentifierValue: "Welcome to the Virtual Machine", + IdentifierType: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "OEM specific string", + Summary: "OEM specific string", + }, + Key: "OemSpecificString", + }, + }, + { + DynamicData: types.DynamicData{}, + IdentifierValue: "VMware-56 4d 2f 12 80 41 63 9b-50 18 05 a8 35 b7 2e af", + IdentifierType: &types.ElementDescription{ + Description: types.Description{ + DynamicData: types.DynamicData{}, + Label: "Service tag", + Summary: "Service tag of the system", + }, + Key: "ServiceTag", + }, + }, + }, + MemorySize: 4294430720, + CpuModel: "Intel(R) Core(TM) i7-3615QM CPU @ 2.30GHz", + CpuMhz: 2294, + NumCpuPkgs: 2, + NumCpuCores: 2, + NumCpuThreads: 2, + NumNics: 1, + NumHBAs: 3, + }, + Runtime: (*types.HostRuntimeInfo)(nil), + Config: types.HostConfigSummary{ + DynamicData: types.DynamicData{}, + Name: "localhost.localdomain", + Port: 902, + SslThumbprint: "", + Product: &HostConfigInfo.Product, + VmotionEnabled: false, + FaultToleranceEnabled: types.NewBool(true), + FeatureVersion: nil, + AgentVmDatastore: (*types.ManagedObjectReference)(nil), + AgentVmNetwork: (*types.ManagedObjectReference)(nil), + }, + QuickStats: types.HostListSummaryQuickStats{ + DynamicData: types.DynamicData{}, + OverallCpuUsage: 67, + OverallMemoryUsage: 1404, + DistributedCpuFairness: 0, + DistributedMemoryFairness: 0, + Uptime: 77229, + }, + OverallStatus: "gray", + RebootRequired: false, + CustomValue: nil, + ManagementServerIp: "", + MaxEVCModeKey: "", + CurrentEVCModeKey: "", + Gateway: (*types.HostListSummaryGatewaySummary)(nil), + }, + Hardware: (*types.HostHardwareInfo)(nil), + Capability: (*types.HostCapability)(nil), + LicensableResource: types.HostLicensableResourceInfo{}, + ConfigManager: types.HostConfigManager{ + DynamicData: types.DynamicData{}, + CpuScheduler: &types.ManagedObjectReference{Type: "HostCpuSchedulerSystem", Value: "cpuSchedulerSystem"}, + DatastoreSystem: &types.ManagedObjectReference{Type: "HostDatastoreSystem", Value: "ha-datastoresystem"}, + MemoryManager: &types.ManagedObjectReference{Type: "HostMemorySystem", Value: "memoryManagerSystem"}, + StorageSystem: &types.ManagedObjectReference{Type: "HostStorageSystem", Value: "storageSystem"}, + NetworkSystem: &types.ManagedObjectReference{Type: "HostNetworkSystem", Value: "networkSystem"}, + VmotionSystem: &types.ManagedObjectReference{Type: "HostVMotionSystem", Value: "ha-vmotion-system"}, + VirtualNicManager: &types.ManagedObjectReference{Type: "HostVirtualNicManager", Value: "ha-vnic-mgr"}, + ServiceSystem: &types.ManagedObjectReference{Type: "HostServiceSystem", Value: "serviceSystem"}, + FirewallSystem: &types.ManagedObjectReference{Type: "HostFirewallSystem", Value: "firewallSystem"}, + AdvancedOption: &types.ManagedObjectReference{Type: "OptionManager", Value: "ha-adv-options"}, + DiagnosticSystem: &types.ManagedObjectReference{Type: "HostDiagnosticSystem", Value: "diagnosticsystem"}, + AutoStartManager: &types.ManagedObjectReference{Type: "HostAutoStartManager", Value: "ha-autostart-mgr"}, + SnmpSystem: &types.ManagedObjectReference{Type: "HostSnmpSystem", Value: "ha-snmp-agent"}, + DateTimeSystem: &types.ManagedObjectReference{Type: "HostDateTimeSystem", Value: "dateTimeSystem"}, + PatchManager: &types.ManagedObjectReference{Type: "HostPatchManager", Value: "ha-host-patch-manager"}, + ImageConfigManager: &types.ManagedObjectReference{Type: "HostImageConfigManager", Value: "ha-image-config-manager"}, + BootDeviceSystem: (*types.ManagedObjectReference)(nil), + FirmwareSystem: &types.ManagedObjectReference{Type: "HostFirmwareSystem", Value: "ha-firmwareSystem"}, + HealthStatusSystem: &types.ManagedObjectReference{Type: "HostHealthStatusSystem", Value: "healthStatusSystem"}, + PciPassthruSystem: &types.ManagedObjectReference{Type: "HostPciPassthruSystem", Value: "ha-pcipassthrusystem"}, + LicenseManager: &types.ManagedObjectReference{Type: "LicenseManager", Value: "ha-license-manager"}, + KernelModuleSystem: &types.ManagedObjectReference{Type: "HostKernelModuleSystem", Value: "kernelModuleSystem"}, + AuthenticationManager: &types.ManagedObjectReference{Type: "HostAuthenticationManager", Value: "ha-auth-manager"}, + PowerSystem: &types.ManagedObjectReference{Type: "HostPowerSystem", Value: "ha-power-system"}, + CacheConfigurationManager: &types.ManagedObjectReference{Type: "HostCacheConfigurationManager", Value: "ha-cache-configuration-manager"}, + EsxAgentHostManager: (*types.ManagedObjectReference)(nil), + IscsiManager: &types.ManagedObjectReference{Type: "IscsiManager", Value: "iscsiManager"}, + VFlashManager: &types.ManagedObjectReference{Type: "HostVFlashManager", Value: "ha-vflash-manager"}, + VsanSystem: &types.ManagedObjectReference{Type: "HostVsanSystem", Value: "vsanSystem"}, + MessageBusProxy: &types.ManagedObjectReference{Type: "MessageBusProxy", Value: "messageBusProxy"}, + UserDirectory: &types.ManagedObjectReference{Type: "UserDirectory", Value: "ha-user-directory"}, + AccountManager: &types.ManagedObjectReference{Type: "HostLocalAccountManager", Value: "ha-localacctmgr"}, + HostAccessManager: &types.ManagedObjectReference{Type: "HostAccessManager", Value: "ha-host-access-manager"}, + GraphicsManager: &types.ManagedObjectReference{Type: "HostGraphicsManager", Value: "ha-graphics-manager"}, + VsanInternalSystem: &types.ManagedObjectReference{Type: "HostVsanInternalSystem", Value: "ha-vsan-internal-system"}, + CertificateManager: &types.ManagedObjectReference{Type: "HostCertificateManager", Value: "ha-certificate-manager"}, + }, + Config: &HostConfigInfo, + Vm: nil, + Datastore: nil, + Network: nil, + DatastoreBrowser: types.ManagedObjectReference{Type: "HostDatastoreBrowser", Value: "ha-host-datastorebrowser"}, + SystemResources: (*types.HostSystemResourceInfo)(nil), +} diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/performance_manager.go b/vendor/github.com/vmware/govmomi/simulator/esx/performance_manager.go new file mode 100644 index 00000000000..584e3cad373 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/esx/performance_manager.go @@ -0,0 +1,9885 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package esx + +import "github.com/vmware/govmomi/vim25/types" + +// PerfCounter is the default template for the PerformanceManager perfCounter property. +// Capture method: +// govc object.collect -s -dump PerformanceManager:ha-perfmgr perfCounter +var PerfCounter = []types.PerfCounterInfo{ + { + Key: 0, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Usage", + Summary: "CPU usage as a percentage during the interval", + }, + Key: "usage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "none", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{1, 2, 3}, + }, + { + Key: 1, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Usage", + Summary: "CPU usage as a percentage during the interval", + }, + Key: "usage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 2, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Usage", + Summary: "CPU usage as a percentage during the interval", + }, + Key: "usage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "maximum", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 3, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Usage", + Summary: "CPU usage as a percentage during the interval", + }, + Key: "usage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "minimum", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 4, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Usage in MHz", + Summary: "CPU usage in megahertz during the interval", + }, + Key: "usagemhz", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "MHz", + Summary: "Megahertz", + }, + Key: "megaHertz", + }, + RollupType: "none", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{5, 6, 7}, + }, + { + Key: 5, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Usage in MHz", + Summary: "CPU usage in megahertz during the interval", + }, + Key: "usagemhz", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "MHz", + Summary: "Megahertz", + }, + Key: "megaHertz", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 6, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Usage in MHz", + Summary: "CPU usage in megahertz during the interval", + }, + Key: "usagemhz", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "MHz", + Summary: "Megahertz", + }, + Key: "megaHertz", + }, + RollupType: "maximum", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 7, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Usage in MHz", + Summary: "CPU usage in megahertz during the interval", + }, + Key: "usagemhz", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "MHz", + Summary: "Megahertz", + }, + Key: "megaHertz", + }, + RollupType: "minimum", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 8, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Reserved capacity", + Summary: "Total CPU capacity reserved by virtual machines", + }, + Key: "reservedCapacity", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "MHz", + Summary: "Megahertz", + }, + Key: "megaHertz", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 9, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "Amount of time spent on system processes on each virtual CPU in the virtual machine", + }, + Key: "system", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 10, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Wait", + Summary: "Total CPU time spent in wait state", + }, + Key: "wait", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 11, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Ready", + Summary: "Time that the virtual machine was ready, but could not get scheduled to run on the physical CPU during last measurement interval", + }, + Key: "ready", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 12, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Used", + Summary: "Total CPU usage", + }, + Key: "used", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 13, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Idle", + Summary: "Total time that the CPU spent in an idle state", + }, + Key: "idle", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 14, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Swap wait", + Summary: "CPU time spent waiting for swap-in", + }, + Key: "swapwait", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 15, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Utilization", + Summary: "CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)", + }, + Key: "utilization", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "none", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{16, 17, 18}, + }, + { + Key: 16, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Utilization", + Summary: "CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)", + }, + Key: "utilization", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 17, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Utilization", + Summary: "CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)", + }, + Key: "utilization", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "maximum", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 18, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Utilization", + Summary: "CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)", + }, + Key: "utilization", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "minimum", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 19, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Core Utilization", + Summary: "CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)", + }, + Key: "coreUtilization", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "none", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{20, 21, 22}, + }, + { + Key: 20, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Core Utilization", + Summary: "CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)", + }, + Key: "coreUtilization", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 21, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Core Utilization", + Summary: "CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)", + }, + Key: "coreUtilization", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "maximum", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 22, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Core Utilization", + Summary: "CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)", + }, + Key: "coreUtilization", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "minimum", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 23, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Total capacity", + Summary: "Total CPU capacity reserved by and available for virtual machines", + }, + Key: "totalCapacity", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "MHz", + Summary: "Megahertz", + }, + Key: "megaHertz", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 24, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Latency", + Summary: "Percent of time the virtual machine is unable to run because it is contending for access to the physical CPU(s)", + }, + Key: "latency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 25, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Entitlement", + Summary: "CPU resources devoted by the ESX scheduler", + }, + Key: "entitlement", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "MHz", + Summary: "Megahertz", + }, + Key: "megaHertz", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 26, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Demand", + Summary: "The amount of CPU resources a virtual machine would use if there were no CPU contention or CPU limit", + }, + Key: "demand", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "MHz", + Summary: "Megahertz", + }, + Key: "megaHertz", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 27, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Co-stop", + Summary: "Time the virtual machine is ready to run, but is unable to run due to co-scheduling constraints", + }, + Key: "costop", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 28, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Max limited", + Summary: "Time the virtual machine is ready to run, but is not run due to maxing out its CPU limit setting", + }, + Key: "maxlimited", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 29, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Overlap", + Summary: "Time the virtual machine was interrupted to perform system services on behalf of itself or other virtual machines", + }, + Key: "overlap", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 30, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Run", + Summary: "Time the virtual machine is scheduled to run", + }, + Key: "run", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 31, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Demand-to-entitlement ratio", + Summary: "CPU resource entitlement to CPU demand ratio (in percents)", + }, + Key: "demandEntitlementRatio", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 32, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Readiness", + Summary: "Percentage of time that the virtual machine was ready, but could not get scheduled to run on the physical CPU", + }, + Key: "readiness", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU", + Summary: "CPU", + }, + Key: "cpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65536, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Host consumed %", + Summary: "Percentage of host physical memory that has been consumed", + }, + Key: "usage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{65537, 65538, 65539}, + }, + { + Key: 65537, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Host consumed %", + Summary: "Percentage of host physical memory that has been consumed", + }, + Key: "usage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65538, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Host consumed %", + Summary: "Percentage of host physical memory that has been consumed", + }, + Key: "usage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65539, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Host consumed %", + Summary: "Percentage of host physical memory that has been consumed", + }, + Key: "usage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65540, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Granted", + Summary: "Amount of host physical memory or physical memory that is mapped for a virtual machine or a host", + }, + Key: "granted", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{65541, 65542, 65543}, + }, + { + Key: 65541, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Granted", + Summary: "Amount of host physical memory or physical memory that is mapped for a virtual machine or a host", + }, + Key: "granted", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65542, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Granted", + Summary: "Amount of host physical memory or physical memory that is mapped for a virtual machine or a host", + }, + Key: "granted", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65543, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Granted", + Summary: "Amount of host physical memory or physical memory that is mapped for a virtual machine or a host", + }, + Key: "granted", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65544, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Active", + Summary: "Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXi", + }, + Key: "active", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{65545, 65546, 65547}, + }, + { + Key: 65545, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Active", + Summary: "Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXi", + }, + Key: "active", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65546, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Active", + Summary: "Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXi", + }, + Key: "active", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65547, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Active", + Summary: "Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXi", + }, + Key: "active", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65548, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Shared", + Summary: "Amount of guest physical memory that is shared within a single virtual machine or across virtual machines", + }, + Key: "shared", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{65549, 65550, 65551}, + }, + { + Key: 65549, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Shared", + Summary: "Amount of guest physical memory that is shared within a single virtual machine or across virtual machines", + }, + Key: "shared", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65550, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Shared", + Summary: "Amount of guest physical memory that is shared within a single virtual machine or across virtual machines", + }, + Key: "shared", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65551, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Shared", + Summary: "Amount of guest physical memory that is shared within a single virtual machine or across virtual machines", + }, + Key: "shared", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65552, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Zero pages", + Summary: "Guest physical memory pages whose content is 0x00", + }, + Key: "zero", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{65553, 65554, 65555}, + }, + { + Key: 65553, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Zero pages", + Summary: "Guest physical memory pages whose content is 0x00", + }, + Key: "zero", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65554, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Zero pages", + Summary: "Guest physical memory pages whose content is 0x00", + }, + Key: "zero", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65555, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Zero pages", + Summary: "Guest physical memory pages whose content is 0x00", + }, + Key: "zero", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65556, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Reservation available", + Summary: "Amount by which reservation can be raised", + }, + Key: "unreserved", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{65557, 65558, 65559}, + }, + { + Key: 65557, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Reservation available", + Summary: "Amount by which reservation can be raised", + }, + Key: "unreserved", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65558, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Reservation available", + Summary: "Amount by which reservation can be raised", + }, + Key: "unreserved", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65559, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Reservation available", + Summary: "Amount by which reservation can be raised", + }, + Key: "unreserved", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65560, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Swap consumed", + Summary: "Swap storage space consumed", + }, + Key: "swapused", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{65561, 65562, 65563}, + }, + { + Key: 65561, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Swap consumed", + Summary: "Swap storage space consumed", + }, + Key: "swapused", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65562, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Swap consumed", + Summary: "Swap storage space consumed", + }, + Key: "swapused", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65563, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Swap consumed", + Summary: "Swap storage space consumed", + }, + Key: "swapused", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65568, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Shared common", + Summary: "Amount of host physical memory that backs shared guest physical memory (Shared)", + }, + Key: "sharedcommon", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{65569, 65570, 65571}, + }, + { + Key: 65569, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Shared common", + Summary: "Amount of host physical memory that backs shared guest physical memory (Shared)", + }, + Key: "sharedcommon", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65570, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Shared common", + Summary: "Amount of host physical memory that backs shared guest physical memory (Shared)", + }, + Key: "sharedcommon", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65571, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Shared common", + Summary: "Amount of host physical memory that backs shared guest physical memory (Shared)", + }, + Key: "sharedcommon", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65572, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Heap", + Summary: "Virtual address space of ESXi that is dedicated to its heap", + }, + Key: "heap", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{65573, 65574, 65575}, + }, + { + Key: 65573, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Heap", + Summary: "Virtual address space of ESXi that is dedicated to its heap", + }, + Key: "heap", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65574, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Heap", + Summary: "Virtual address space of ESXi that is dedicated to its heap", + }, + Key: "heap", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65575, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Heap", + Summary: "Virtual address space of ESXi that is dedicated to its heap", + }, + Key: "heap", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65576, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Heap free", + Summary: "Free address space in the heap of ESXi. This is less than or equal to Heap", + }, + Key: "heapfree", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{65577, 65578, 65579}, + }, + { + Key: 65577, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Heap free", + Summary: "Free address space in the heap of ESXi. This is less than or equal to Heap", + }, + Key: "heapfree", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65578, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Heap free", + Summary: "Free address space in the heap of ESXi. This is less than or equal to Heap", + }, + Key: "heapfree", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65579, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Heap free", + Summary: "Free address space in the heap of ESXi. This is less than or equal to Heap", + }, + Key: "heapfree", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65580, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Free state", + Summary: "Current memory availability state of ESXi. Possible values are high, clear, soft, hard, low. The state value determines the techniques used for memory reclamation from virtual machines", + }, + Key: "state", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65581, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Ballooned memory", + Summary: "Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guest", + }, + Key: "vmmemctl", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{65582, 65583, 65584}, + }, + { + Key: 65582, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Ballooned memory", + Summary: "Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guest", + }, + Key: "vmmemctl", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65583, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Ballooned memory", + Summary: "Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guest", + }, + Key: "vmmemctl", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65584, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Ballooned memory", + Summary: "Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guest", + }, + Key: "vmmemctl", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65585, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Overhead consumed", + Summary: "Host physical memory consumed by ESXi data structures for running the virtual machines", + }, + Key: "overhead", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{65586, 65587, 65588}, + }, + { + Key: 65586, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Overhead consumed", + Summary: "Host physical memory consumed by ESXi data structures for running the virtual machines", + }, + Key: "overhead", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65587, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Overhead consumed", + Summary: "Host physical memory consumed by ESXi data structures for running the virtual machines", + }, + Key: "overhead", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65588, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Overhead consumed", + Summary: "Host physical memory consumed by ESXi data structures for running the virtual machines", + }, + Key: "overhead", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65589, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Reservation consumed", + Summary: "Memory reservation consumed by powered-on virtual machines", + }, + Key: "reservedCapacity", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "MB", + Summary: "Megabyte", + }, + Key: "megaBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65590, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Swapped", + Summary: "Amount of guest physical memory that is swapped out to the swap space", + }, + Key: "swapped", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{65591, 65592, 65593}, + }, + { + Key: 65591, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Swapped", + Summary: "Amount of guest physical memory that is swapped out to the swap space", + }, + Key: "swapped", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65592, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Swapped", + Summary: "Amount of guest physical memory that is swapped out to the swap space", + }, + Key: "swapped", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65593, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Swapped", + Summary: "Amount of guest physical memory that is swapped out to the swap space", + }, + Key: "swapped", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65594, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Swap target", + Summary: "Amount of memory that ESXi needs to reclaim by swapping", + }, + Key: "swaptarget", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{65595, 65596, 65597}, + }, + { + Key: 65595, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Swap target", + Summary: "Amount of memory that ESXi needs to reclaim by swapping", + }, + Key: "swaptarget", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65596, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Swap target", + Summary: "Amount of memory that ESXi needs to reclaim by swapping", + }, + Key: "swaptarget", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65597, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Swap target", + Summary: "Amount of memory that ESXi needs to reclaim by swapping", + }, + Key: "swaptarget", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65598, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Swap in", + Summary: "Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counter", + }, + Key: "swapin", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{65599, 65600, 65601}, + }, + { + Key: 65599, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Swap in", + Summary: "Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counter", + }, + Key: "swapin", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65600, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Swap in", + Summary: "Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counter", + }, + Key: "swapin", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65601, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Swap in", + Summary: "Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counter", + }, + Key: "swapin", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65602, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Swap out", + Summary: "Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.", + }, + Key: "swapout", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{65603, 65604, 65605}, + }, + { + Key: 65603, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Swap out", + Summary: "Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.", + }, + Key: "swapout", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65604, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Swap out", + Summary: "Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.", + }, + Key: "swapout", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65605, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Swap out", + Summary: "Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.", + }, + Key: "swapout", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65606, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Balloon target", + Summary: "Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXi", + }, + Key: "vmmemctltarget", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{65607, 65608, 65609}, + }, + { + Key: 65607, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Balloon target", + Summary: "Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXi", + }, + Key: "vmmemctltarget", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65608, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Balloon target", + Summary: "Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXi", + }, + Key: "vmmemctltarget", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65609, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Balloon target", + Summary: "Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXi", + }, + Key: "vmmemctltarget", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65610, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Consumed", + Summary: "Amount of host physical memory consumed for backing up guest physical memory pages", + }, + Key: "consumed", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{65611, 65612, 65613}, + }, + { + Key: 65611, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Consumed", + Summary: "Amount of host physical memory consumed for backing up guest physical memory pages", + }, + Key: "consumed", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65612, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Consumed", + Summary: "Amount of host physical memory consumed for backing up guest physical memory pages", + }, + Key: "consumed", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65613, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Consumed", + Summary: "Amount of host physical memory consumed for backing up guest physical memory pages", + }, + Key: "consumed", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65614, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VMkernel consumed", + Summary: "Amount of host physical memory consumed by VMkernel", + }, + Key: "sysUsage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{65615, 65616, 65617}, + }, + { + Key: 65615, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VMkernel consumed", + Summary: "Amount of host physical memory consumed by VMkernel", + }, + Key: "sysUsage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65616, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VMkernel consumed", + Summary: "Amount of host physical memory consumed by VMkernel", + }, + Key: "sysUsage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65617, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VMkernel consumed", + Summary: "Amount of host physical memory consumed by VMkernel", + }, + Key: "sysUsage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65618, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Swap in rate", + Summary: "Rate at which guest physical memory is swapped in from the swap space", + }, + Key: "swapinRate", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65619, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Swap out rate", + Summary: "Rate at which guest physical memory is swapped out to the swap space", + }, + Key: "swapoutRate", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65620, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Active write", + Summary: "Amount of guest physical memory that is being actively written by guest. Activeness is estimated by ESXi", + }, + Key: "activewrite", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65621, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Compressed", + Summary: "Guest physical memory pages that have undergone memory compression", + }, + Key: "compressed", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65622, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Compression rate", + Summary: "Rate of guest physical memory page compression by ESXi", + }, + Key: "compressionRate", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65623, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Decompression rate", + Summary: "Rate of guest physical memory decompression", + }, + Key: "decompressionRate", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65624, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Overhead reserved", + Summary: "Host physical memory reserved by ESXi, for its data structures, for running the virtual machine", + }, + Key: "overheadMax", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65625, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Total reservation", + Summary: "Total reservation, available and consumed, for powered-on virtual machines", + }, + Key: "totalCapacity", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "MB", + Summary: "Megabyte", + }, + Key: "megaBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65626, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Compressed", + Summary: "Amount of guest physical memory pages compressed by ESXi", + }, + Key: "zipped", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65627, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Compression saved", + Summary: "Host physical memory, reclaimed from a virtual machine, by memory compression. This value is less than the value of 'Compressed' memory", + }, + Key: "zipSaved", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65628, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Page-fault latency", + Summary: "Percentage of time the virtual machine spent waiting to swap in or decompress guest physical memory", + }, + Key: "latency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65629, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Entitlement", + Summary: "Amount of host physical memory the virtual machine deserves, as determined by ESXi", + }, + Key: "entitlement", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65630, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Reclamation threshold", + Summary: "Threshold of free host physical memory below which ESXi will begin actively reclaiming memory from virtual machines by swapping, compression and ballooning", + }, + Key: "lowfreethreshold", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65631, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Host cache consumed", + Summary: "Storage space consumed on the host swap cache for storing swapped guest physical memory pages", + }, + Key: "llSwapUsed", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{65635, 65636, 65637}, + }, + { + Key: 65632, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Host cache swap in rate", + Summary: "Rate at which guest physical memory is swapped in from the host swap cache", + }, + Key: "llSwapInRate", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65633, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Host cache swap out rate", + Summary: "Rate at which guest physical memory is swapped out to the host swap cache", + }, + Key: "llSwapOutRate", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65634, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Overhead active", + Summary: "Estimate of the host physical memory, from Overhead consumed, that is actively read or written to by ESXi", + }, + Key: "overheadTouched", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65635, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Host cache consumed", + Summary: "Storage space consumed on the host swap cache for storing swapped guest physical memory pages", + }, + Key: "llSwapUsed", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65636, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Host cache consumed", + Summary: "Storage space consumed on the host swap cache for storing swapped guest physical memory pages", + }, + Key: "llSwapUsed", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65637, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Host cache consumed", + Summary: "Storage space consumed on the host swap cache for storing swapped guest physical memory pages", + }, + Key: "llSwapUsed", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65638, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Host cache swap in", + Summary: "Amount of guest physical memory swapped in from host cache", + }, + Key: "llSwapIn", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{65639, 65640, 65641}, + }, + { + Key: 65639, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Host cache swap in", + Summary: "Amount of guest physical memory swapped in from host cache", + }, + Key: "llSwapIn", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65640, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Host cache swap in", + Summary: "Amount of guest physical memory swapped in from host cache", + }, + Key: "llSwapIn", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65641, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Host cache swap in", + Summary: "Amount of guest physical memory swapped in from host cache", + }, + Key: "llSwapIn", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65642, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Host cache swap out", + Summary: "Amount of guest physical memory swapped out to the host swap cache", + }, + Key: "llSwapOut", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{65643, 65644, 65645}, + }, + { + Key: 65643, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Host cache swap out", + Summary: "Amount of guest physical memory swapped out to the host swap cache", + }, + Key: "llSwapOut", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65644, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Host cache swap out", + Summary: "Amount of guest physical memory swapped out to the host swap cache", + }, + Key: "llSwapOut", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65645, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Host cache swap out", + Summary: "Amount of guest physical memory swapped out to the host swap cache", + }, + Key: "llSwapOut", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65646, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VMFS PB Cache Size", + Summary: "Space used for holding VMFS Pointer Blocks in memory", + }, + Key: "vmfs.pbc.size", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "MB", + Summary: "Megabyte", + }, + Key: "megaBytes", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65647, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Maximum VMFS PB Cache Size", + Summary: "Maximum size the VMFS Pointer Block Cache can grow to", + }, + Key: "vmfs.pbc.sizeMax", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "MB", + Summary: "Megabyte", + }, + Key: "megaBytes", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65648, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VMFS Working Set", + Summary: "Amount of file blocks whose addresses are cached in the VMFS PB Cache", + }, + Key: "vmfs.pbc.workingSet", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "TB", + Summary: "Terabyte", + }, + Key: "teraBytes", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65649, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Maximum VMFS Working Set", + Summary: "Maximum amount of file blocks whose addresses are cached in the VMFS PB Cache", + }, + Key: "vmfs.pbc.workingSetMax", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "TB", + Summary: "Terabyte", + }, + Key: "teraBytes", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65650, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VMFS PB Cache Overhead", + Summary: "Amount of VMFS heap used by the VMFS PB Cache", + }, + Key: "vmfs.pbc.overhead", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 65651, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VMFS PB Cache Capacity Miss Ratio", + Summary: "Trailing average of the ratio of capacity misses to compulsory misses for the VMFS PB Cache", + }, + Key: "vmfs.pbc.capMissRatio", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory", + Summary: "Memory", + }, + Key: "mem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131072, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Usage", + Summary: "Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.", + }, + Key: "usage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "none", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{131073, 131074, 131075}, + }, + { + Key: 131073, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Usage", + Summary: "Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.", + }, + Key: "usage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131074, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Usage", + Summary: "Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.", + }, + Key: "usage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "maximum", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131075, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Usage", + Summary: "Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.", + }, + Key: "usage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "minimum", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131076, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Read requests", + Summary: "Number of disk reads during the collection interval", + }, + Key: "numberRead", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131077, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Write requests", + Summary: "Number of disk writes during the collection interval", + }, + Key: "numberWrite", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131078, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Read rate", + Summary: "Average number of kilobytes read from the disk each second during the collection interval", + }, + Key: "read", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131079, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Write rate", + Summary: "Average number of kilobytes written to disk each second during the collection interval", + }, + Key: "write", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131080, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Commands issued", + Summary: "Number of SCSI commands issued during the collection interval", + }, + Key: "commands", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131081, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Commands aborted", + Summary: "Number of SCSI commands aborted during the collection interval", + }, + Key: "commandsAborted", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131082, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Bus resets", + Summary: "Number of SCSI-bus reset commands issued during the collection interval", + }, + Key: "busResets", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131083, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Physical device read latency", + Summary: "Average amount of time, in milliseconds, to read from the physical device", + }, + Key: "deviceReadLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131084, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Kernel read latency", + Summary: "Average amount of time, in milliseconds, spent by VMkernel to process each SCSI read command", + }, + Key: "kernelReadLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131085, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Read latency", + Summary: "Average amount of time taken during the collection interval to process a SCSI read command issued from the guest OS to the virtual machine", + }, + Key: "totalReadLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131086, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Queue read latency", + Summary: "Average amount of time spent in the VMkernel queue, per SCSI read command, during the collection interval", + }, + Key: "queueReadLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131087, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Physical device write latency", + Summary: "Average amount of time, in milliseconds, to write to the physical device", + }, + Key: "deviceWriteLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131088, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Kernel write latency", + Summary: "Average amount of time, in milliseconds, spent by VMkernel to process each SCSI write command", + }, + Key: "kernelWriteLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131089, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Write latency", + Summary: "Average amount of time taken during the collection interval to process a SCSI write command issued by the guest OS to the virtual machine", + }, + Key: "totalWriteLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131090, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Queue write latency", + Summary: "Average amount of time spent in the VMkernel queue, per SCSI write command, during the collection interval", + }, + Key: "queueWriteLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131091, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Physical device command latency", + Summary: "Average amount of time, in milliseconds, to complete a SCSI command from the physical device", + }, + Key: "deviceLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131092, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Kernel command latency", + Summary: "Average amount of time, in milliseconds, spent by VMkernel to process each SCSI command", + }, + Key: "kernelLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131093, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Command latency", + Summary: "Average amount of time taken during the collection interval to process a SCSI command issued by the guest OS to the virtual machine", + }, + Key: "totalLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131094, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Queue command latency", + Summary: "Average amount of time spent in the VMkernel queue, per SCSI command, during the collection interval", + }, + Key: "queueLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131095, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Highest latency", + Summary: "Highest latency value across all disks used by the host", + }, + Key: "maxTotalLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131096, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Maximum queue depth", + Summary: "Maximum queue depth", + }, + Key: "maxQueueDepth", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131097, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Average read requests per second", + Summary: "Average number of disk reads per second during the collection interval", + }, + Key: "numberReadAveraged", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131098, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Average write requests per second", + Summary: "Average number of disk writes per second during the collection interval", + }, + Key: "numberWriteAveraged", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 131099, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Average commands issued per second", + Summary: "Average number of SCSI commands issued per second during the collection interval", + }, + Key: "commandsAveraged", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk", + Summary: "Disk", + }, + Key: "disk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 196608, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Usage", + Summary: "Network utilization (combined transmit-rates and receive-rates) during the interval", + }, + Key: "usage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Network", + Summary: "Network", + }, + Key: "net", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "none", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{196609, 196610, 196611}, + }, + { + Key: 196609, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Usage", + Summary: "Network utilization (combined transmit-rates and receive-rates) during the interval", + }, + Key: "usage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Network", + Summary: "Network", + }, + Key: "net", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 196610, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Usage", + Summary: "Network utilization (combined transmit-rates and receive-rates) during the interval", + }, + Key: "usage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Network", + Summary: "Network", + }, + Key: "net", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "maximum", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 196611, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Usage", + Summary: "Network utilization (combined transmit-rates and receive-rates) during the interval", + }, + Key: "usage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Network", + Summary: "Network", + }, + Key: "net", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "minimum", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 196612, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Packets received", + Summary: "Number of packets received during the interval", + }, + Key: "packetsRx", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Network", + Summary: "Network", + }, + Key: "net", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 196613, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Packets transmitted", + Summary: "Number of packets transmitted during the interval", + }, + Key: "packetsTx", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Network", + Summary: "Network", + }, + Key: "net", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 196614, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Data receive rate", + Summary: "Average rate at which data was received during the interval", + }, + Key: "received", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Network", + Summary: "Network", + }, + Key: "net", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 196615, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Data transmit rate", + Summary: "Average rate at which data was transmitted during the interval", + }, + Key: "transmitted", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Network", + Summary: "Network", + }, + Key: "net", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 196616, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Receive packets dropped", + Summary: "Number of receives dropped", + }, + Key: "droppedRx", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Network", + Summary: "Network", + }, + Key: "net", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 196617, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Transmit packets dropped", + Summary: "Number of transmits dropped", + }, + Key: "droppedTx", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Network", + Summary: "Network", + }, + Key: "net", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 196618, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Data receive rate", + Summary: "Average amount of data received per second", + }, + Key: "bytesRx", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Network", + Summary: "Network", + }, + Key: "net", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 196619, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Data transmit rate", + Summary: "Average amount of data transmitted per second", + }, + Key: "bytesTx", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Network", + Summary: "Network", + }, + Key: "net", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 196620, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Broadcast receives", + Summary: "Number of broadcast packets received during the sampling interval", + }, + Key: "broadcastRx", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Network", + Summary: "Network", + }, + Key: "net", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 196621, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Broadcast transmits", + Summary: "Number of broadcast packets transmitted during the sampling interval", + }, + Key: "broadcastTx", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Network", + Summary: "Network", + }, + Key: "net", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 196622, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Multicast receives", + Summary: "Number of multicast packets received during the sampling interval", + }, + Key: "multicastRx", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Network", + Summary: "Network", + }, + Key: "net", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 196623, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Multicast transmits", + Summary: "Number of multicast packets transmitted during the sampling interval", + }, + Key: "multicastTx", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Network", + Summary: "Network", + }, + Key: "net", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 196624, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Packet receive errors", + Summary: "Number of packets with errors received during the sampling interval", + }, + Key: "errorsRx", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Network", + Summary: "Network", + }, + Key: "net", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 196625, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Packet transmit errors", + Summary: "Number of packets with errors transmitted during the sampling interval", + }, + Key: "errorsTx", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Network", + Summary: "Network", + }, + Key: "net", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 196626, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Unknown protocol frames", + Summary: "Number of frames with unknown protocol received during the sampling interval", + }, + Key: "unknownProtos", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Network", + Summary: "Network", + }, + Key: "net", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 196627, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "pnicBytesRx", + Summary: "pnicBytesRx", + }, + Key: "pnicBytesRx", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Network", + Summary: "Network", + }, + Key: "net", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 196628, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "pnicBytesTx", + Summary: "pnicBytesTx", + }, + Key: "pnicBytesTx", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Network", + Summary: "Network", + }, + Key: "net", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262144, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Uptime", + Summary: "Total time elapsed, in seconds, since last system startup", + }, + Key: "uptime", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "s", + Summary: "Second", + }, + Key: "second", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262145, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Heartbeat", + Summary: "Number of heartbeats issued per virtual machine during the interval", + }, + Key: "heartbeat", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262146, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Disk usage", + Summary: "Amount of disk space usage for each mount point", + }, + Key: "diskUsage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262147, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource CPU usage (None)", + Summary: "Amount of CPU used by the Service Console and other applications during the interval", + }, + Key: "resourceCpuUsage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "MHz", + Summary: "Megahertz", + }, + Key: "megaHertz", + }, + RollupType: "none", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{262148, 262149, 262150}, + }, + { + Key: 262148, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource CPU usage (Average)", + Summary: "Amount of CPU used by the Service Console and other applications during the interval", + }, + Key: "resourceCpuUsage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "MHz", + Summary: "Megahertz", + }, + Key: "megaHertz", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262149, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource CPU usage (Maximum)", + Summary: "Amount of CPU used by the Service Console and other applications during the interval", + }, + Key: "resourceCpuUsage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "MHz", + Summary: "Megahertz", + }, + Key: "megaHertz", + }, + RollupType: "maximum", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262150, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource CPU usage (Minimum)", + Summary: "Amount of CPU used by the Service Console and other applications during the interval", + }, + Key: "resourceCpuUsage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "MHz", + Summary: "Megahertz", + }, + Key: "megaHertz", + }, + RollupType: "minimum", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262151, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource memory touched", + Summary: "Memory touched by the system resource group", + }, + Key: "resourceMemTouched", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262152, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource memory mapped", + Summary: "Memory mapped by the system resource group", + }, + Key: "resourceMemMapped", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262153, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource memory share saved", + Summary: "Memory saved due to sharing by the system resource group", + }, + Key: "resourceMemShared", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262154, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource memory swapped", + Summary: "Memory swapped out by the system resource group", + }, + Key: "resourceMemSwapped", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262155, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource memory overhead", + Summary: "Overhead memory consumed by the system resource group", + }, + Key: "resourceMemOverhead", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262156, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource memory shared", + Summary: "Memory shared by the system resource group", + }, + Key: "resourceMemCow", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262157, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource memory zero", + Summary: "Zero filled memory used by the system resource group", + }, + Key: "resourceMemZero", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262158, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource CPU running (1 min. average)", + Summary: "CPU running average over 1 minute of the system resource group", + }, + Key: "resourceCpuRun1", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262159, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource CPU active (1 min average)", + Summary: "CPU active average over 1 minute of the system resource group", + }, + Key: "resourceCpuAct1", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262160, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource CPU maximum limited (1 min)", + Summary: "CPU maximum limited over 1 minute of the system resource group", + }, + Key: "resourceCpuMaxLimited1", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262161, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource CPU running (5 min average)", + Summary: "CPU running average over 5 minutes of the system resource group", + }, + Key: "resourceCpuRun5", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262162, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource CPU active (5 min average)", + Summary: "CPU active average over 5 minutes of the system resource group", + }, + Key: "resourceCpuAct5", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262163, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource CPU maximum limited (5 min)", + Summary: "CPU maximum limited over 5 minutes of the system resource group", + }, + Key: "resourceCpuMaxLimited5", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262164, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource CPU allocation minimum (in MHz)", + Summary: "CPU allocation reservation (in MHz) of the system resource group", + }, + Key: "resourceCpuAllocMin", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "MHz", + Summary: "Megahertz", + }, + Key: "megaHertz", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262165, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource CPU allocation maximum (in MHz)", + Summary: "CPU allocation limit (in MHz) of the system resource group", + }, + Key: "resourceCpuAllocMax", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "MHz", + Summary: "Megahertz", + }, + Key: "megaHertz", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262166, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource CPU allocation shares", + Summary: "CPU allocation shares of the system resource group", + }, + Key: "resourceCpuAllocShares", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262167, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource memory allocation minimum (in KB)", + Summary: "Memory allocation reservation (in KB) of the system resource group", + }, + Key: "resourceMemAllocMin", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262168, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource memory allocation maximum (in KB)", + Summary: "Memory allocation limit (in KB) of the system resource group", + }, + Key: "resourceMemAllocMax", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262169, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource memory allocation shares", + Summary: "Memory allocation shares of the system resource group", + }, + Key: "resourceMemAllocShares", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262170, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "OS Uptime", + Summary: "Total time elapsed, in seconds, since last operating system boot-up", + }, + Key: "osUptime", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "s", + Summary: "Second", + }, + Key: "second", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262171, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource memory consumed", + Summary: "Memory consumed by the system resource group", + }, + Key: "resourceMemConsumed", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 262172, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "File descriptors used", + Summary: "Number of file descriptors used by the system resource group", + }, + Key: "resourceFdUsage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "System", + Summary: "System", + }, + Key: "sys", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 327680, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Active (1 min average)", + Summary: "CPU active average over 1 minute", + }, + Key: "actav1", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource group CPU", + Summary: "Resource group CPU", + }, + Key: "rescpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 327681, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Active (1 min peak)", + Summary: "CPU active peak over 1 minute", + }, + Key: "actpk1", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource group CPU", + Summary: "Resource group CPU", + }, + Key: "rescpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 327682, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Running (1 min average)", + Summary: "CPU running average over 1 minute", + }, + Key: "runav1", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource group CPU", + Summary: "Resource group CPU", + }, + Key: "rescpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 327683, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Active (5 min average)", + Summary: "CPU active average over 5 minutes", + }, + Key: "actav5", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource group CPU", + Summary: "Resource group CPU", + }, + Key: "rescpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 327684, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Active (5 min peak)", + Summary: "CPU active peak over 5 minutes", + }, + Key: "actpk5", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource group CPU", + Summary: "Resource group CPU", + }, + Key: "rescpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 327685, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Running (5 min average)", + Summary: "CPU running average over 5 minutes", + }, + Key: "runav5", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource group CPU", + Summary: "Resource group CPU", + }, + Key: "rescpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 327686, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Active (15 min average)", + Summary: "CPU active average over 15 minutes", + }, + Key: "actav15", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource group CPU", + Summary: "Resource group CPU", + }, + Key: "rescpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 327687, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Active (15 min peak)", + Summary: "CPU active peak over 15 minutes", + }, + Key: "actpk15", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource group CPU", + Summary: "Resource group CPU", + }, + Key: "rescpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 327688, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Running (15 min average)", + Summary: "CPU running average over 15 minutes", + }, + Key: "runav15", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource group CPU", + Summary: "Resource group CPU", + }, + Key: "rescpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 327689, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Running (1 min peak)", + Summary: "CPU running peak over 1 minute", + }, + Key: "runpk1", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource group CPU", + Summary: "Resource group CPU", + }, + Key: "rescpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 327690, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Throttled (1 min average)", + Summary: "Amount of CPU resources over the limit that were refused, average over 1 minute", + }, + Key: "maxLimited1", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource group CPU", + Summary: "Resource group CPU", + }, + Key: "rescpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 327691, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Running (5 min peak)", + Summary: "CPU running peak over 5 minutes", + }, + Key: "runpk5", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource group CPU", + Summary: "Resource group CPU", + }, + Key: "rescpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 327692, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Throttled (5 min average)", + Summary: "Amount of CPU resources over the limit that were refused, average over 5 minutes", + }, + Key: "maxLimited5", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource group CPU", + Summary: "Resource group CPU", + }, + Key: "rescpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 327693, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Running (15 min peak)", + Summary: "CPU running peak over 15 minutes", + }, + Key: "runpk15", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource group CPU", + Summary: "Resource group CPU", + }, + Key: "rescpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 327694, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Throttled (15 min average)", + Summary: "Amount of CPU resources over the limit that were refused, average over 15 minutes", + }, + Key: "maxLimited15", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource group CPU", + Summary: "Resource group CPU", + }, + Key: "rescpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 327695, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Group CPU sample count", + Summary: "Group CPU sample count", + }, + Key: "sampleCount", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource group CPU", + Summary: "Resource group CPU", + }, + Key: "rescpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 327696, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Group CPU sample period", + Summary: "Group CPU sample period", + }, + Key: "samplePeriod", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Resource group CPU", + Summary: "Resource group CPU", + }, + Key: "rescpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 393216, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory used", + Summary: "Amount of total configured memory that is available for use", + }, + Key: "memUsed", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Management agent", + Summary: "Management agent", + }, + Key: "managementAgent", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 393217, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory swap used", + Summary: "Sum of the memory swapped by all powered-on virtual machines on the host", + }, + Key: "swapUsed", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Management agent", + Summary: "Management agent", + }, + Key: "managementAgent", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 393218, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory swap in", + Summary: "Amount of memory that is swapped in for the Service Console", + }, + Key: "swapIn", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Management agent", + Summary: "Management agent", + }, + Key: "managementAgent", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 393219, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory swap out", + Summary: "Amount of memory that is swapped out for the Service Console", + }, + Key: "swapOut", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Management agent", + Summary: "Management agent", + }, + Key: "managementAgent", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 393220, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "CPU usage", + Summary: "Amount of Service Console CPU usage", + }, + Key: "cpuUsage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Management agent", + Summary: "Management agent", + }, + Key: "managementAgent", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "MHz", + Summary: "Megahertz", + }, + Key: "megaHertz", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 458752, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Average commands issued per second", + Summary: "Average number of commands issued per second by the storage adapter during the collection interval", + }, + Key: "commandsAveraged", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage adapter", + Summary: "Storage adapter", + }, + Key: "storageAdapter", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 458753, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Average read requests per second", + Summary: "Average number of read commands issued per second by the storage adapter during the collection interval", + }, + Key: "numberReadAveraged", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage adapter", + Summary: "Storage adapter", + }, + Key: "storageAdapter", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 458754, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Average write requests per second", + Summary: "Average number of write commands issued per second by the storage adapter during the collection interval", + }, + Key: "numberWriteAveraged", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage adapter", + Summary: "Storage adapter", + }, + Key: "storageAdapter", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 458755, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Read rate", + Summary: "Rate of reading data by the storage adapter", + }, + Key: "read", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage adapter", + Summary: "Storage adapter", + }, + Key: "storageAdapter", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 458756, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Write rate", + Summary: "Rate of writing data by the storage adapter", + }, + Key: "write", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage adapter", + Summary: "Storage adapter", + }, + Key: "storageAdapter", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 458757, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Read latency", + Summary: "The average time a read by the storage adapter takes", + }, + Key: "totalReadLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage adapter", + Summary: "Storage adapter", + }, + Key: "storageAdapter", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 458758, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Write latency", + Summary: "The average time a write by the storage adapter takes", + }, + Key: "totalWriteLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage adapter", + Summary: "Storage adapter", + }, + Key: "storageAdapter", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 458759, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Highest latency", + Summary: "Highest latency value across all storage adapters used by the host", + }, + Key: "maxTotalLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage adapter", + Summary: "Storage adapter", + }, + Key: "storageAdapter", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 524288, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Average commands issued per second", + Summary: "Average number of commands issued per second on the storage path during the collection interval", + }, + Key: "commandsAveraged", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage path", + Summary: "Storage path", + }, + Key: "storagePath", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 524289, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Average read requests per second", + Summary: "Average number of read commands issued per second on the storage path during the collection interval", + }, + Key: "numberReadAveraged", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage path", + Summary: "Storage path", + }, + Key: "storagePath", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 524290, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Average write requests per second", + Summary: "Average number of write commands issued per second on the storage path during the collection interval", + }, + Key: "numberWriteAveraged", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage path", + Summary: "Storage path", + }, + Key: "storagePath", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 524291, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Read rate", + Summary: "Rate of reading data on the storage path", + }, + Key: "read", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage path", + Summary: "Storage path", + }, + Key: "storagePath", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 524292, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Write rate", + Summary: "Rate of writing data on the storage path", + }, + Key: "write", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage path", + Summary: "Storage path", + }, + Key: "storagePath", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 524293, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Read latency", + Summary: "The average time a read issued on the storage path takes", + }, + Key: "totalReadLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage path", + Summary: "Storage path", + }, + Key: "storagePath", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 524294, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Write latency", + Summary: "The average time a write issued on the storage path takes", + }, + Key: "totalWriteLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage path", + Summary: "Storage path", + }, + Key: "storagePath", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 524295, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Highest latency", + Summary: "Highest latency value across all storage paths used by the host", + }, + Key: "maxTotalLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage path", + Summary: "Storage path", + }, + Key: "storagePath", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 589824, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Average read requests per second", + Summary: "Average number of read commands issued per second to the virtual disk during the collection interval", + }, + Key: "numberReadAveraged", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual disk", + Summary: "Virtual disk", + }, + Key: "virtualDisk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 589825, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Average write requests per second", + Summary: "Average number of write commands issued per second to the virtual disk during the collection interval", + }, + Key: "numberWriteAveraged", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual disk", + Summary: "Virtual disk", + }, + Key: "virtualDisk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 589826, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Read rate", + Summary: "Rate of reading data from the virtual disk", + }, + Key: "read", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual disk", + Summary: "Virtual disk", + }, + Key: "virtualDisk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 589827, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Write rate", + Summary: "Rate of writing data to the virtual disk", + }, + Key: "write", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual disk", + Summary: "Virtual disk", + }, + Key: "virtualDisk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 589828, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Read latency", + Summary: "The average time a read from the virtual disk takes", + }, + Key: "totalReadLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual disk", + Summary: "Virtual disk", + }, + Key: "virtualDisk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 589829, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Write latency", + Summary: "The average time a write to the virtual disk takes", + }, + Key: "totalWriteLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual disk", + Summary: "Virtual disk", + }, + Key: "virtualDisk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 589830, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Average number of outstanding read requests", + Summary: "Average number of outstanding read requests to the virtual disk during the collection interval", + }, + Key: "readOIO", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual disk", + Summary: "Virtual disk", + }, + Key: "virtualDisk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 589831, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Average number of outstanding write requests", + Summary: "Average number of outstanding write requests to the virtual disk during the collection interval", + }, + Key: "writeOIO", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual disk", + Summary: "Virtual disk", + }, + Key: "virtualDisk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 589832, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Read workload metric", + Summary: "Storage DRS virtual disk metric for the read workload model", + }, + Key: "readLoadMetric", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual disk", + Summary: "Virtual disk", + }, + Key: "virtualDisk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 589833, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Write workload metric", + Summary: "Storage DRS virtual disk metric for the write workload model", + }, + Key: "writeLoadMetric", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual disk", + Summary: "Virtual disk", + }, + Key: "virtualDisk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 589834, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Read request size", + Summary: "Average read request size in bytes", + }, + Key: "readIOSize", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual disk", + Summary: "Virtual disk", + }, + Key: "virtualDisk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 589835, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Write request size", + Summary: "Average write request size in bytes", + }, + Key: "writeIOSize", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual disk", + Summary: "Virtual disk", + }, + Key: "virtualDisk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 589836, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Number of small seeks", + Summary: "Number of seeks during the interval that were less than 64 LBNs apart", + }, + Key: "smallSeeks", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual disk", + Summary: "Virtual disk", + }, + Key: "virtualDisk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 589837, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Number of medium seeks", + Summary: "Number of seeks during the interval that were between 64 and 8192 LBNs apart", + }, + Key: "mediumSeeks", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual disk", + Summary: "Virtual disk", + }, + Key: "virtualDisk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 589838, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Number of large seeks", + Summary: "Number of seeks during the interval that were greater than 8192 LBNs apart", + }, + Key: "largeSeeks", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual disk", + Summary: "Virtual disk", + }, + Key: "virtualDisk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 589839, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Read Latency (us)", + Summary: "Read latency in microseconds", + }, + Key: "readLatencyUS", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual disk", + Summary: "Virtual disk", + }, + Key: "virtualDisk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "µs", + Summary: "Microsecond", + }, + Key: "microsecond", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 589840, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Write Latency (us)", + Summary: "Write latency in microseconds", + }, + Key: "writeLatencyUS", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual disk", + Summary: "Virtual disk", + }, + Key: "virtualDisk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "µs", + Summary: "Microsecond", + }, + Key: "microsecond", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 589841, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual Flash Read Cache I/Os per second for the virtual disk", + Summary: "The average virtual Flash Read Cache I/Os per second value for the virtual disk", + }, + Key: "vFlashCacheIops", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual disk", + Summary: "Virtual disk", + }, + Key: "virtualDisk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 589842, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual Flash Read Cache latency for the virtual disk", + Summary: "The average virtual Flash Read Cache latency value for the virtual disk", + }, + Key: "vFlashCacheLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual disk", + Summary: "Virtual disk", + }, + Key: "virtualDisk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "µs", + Summary: "Microsecond", + }, + Key: "microsecond", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 589843, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual Flash Read Cache throughput for virtual disk", + Summary: "The average virtual Flash Read Cache throughput value for the virtual disk", + }, + Key: "vFlashCacheThroughput", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual disk", + Summary: "Virtual disk", + }, + Key: "virtualDisk", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 655360, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Average read requests per second", + Summary: "Average number of read commands issued per second to the datastore during the collection interval", + }, + Key: "numberReadAveraged", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Datastore", + Summary: "Datastore", + }, + Key: "datastore", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 655361, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Average write requests per second", + Summary: "Average number of write commands issued per second to the datastore during the collection interval", + }, + Key: "numberWriteAveraged", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Datastore", + Summary: "Datastore", + }, + Key: "datastore", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 655362, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Read rate", + Summary: "Rate of reading data from the datastore", + }, + Key: "read", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Datastore", + Summary: "Datastore", + }, + Key: "datastore", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 655363, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Write rate", + Summary: "Rate of writing data to the datastore", + }, + Key: "write", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Datastore", + Summary: "Datastore", + }, + Key: "datastore", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 655364, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Read latency", + Summary: "The average time a read from the datastore takes", + }, + Key: "totalReadLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Datastore", + Summary: "Datastore", + }, + Key: "datastore", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 655365, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Write latency", + Summary: "The average time a write to the datastore takes", + }, + Key: "totalWriteLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Datastore", + Summary: "Datastore", + }, + Key: "datastore", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 655366, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage I/O Control normalized latency", + Summary: "Storage I/O Control size-normalized I/O latency", + }, + Key: "sizeNormalizedDatastoreLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Datastore", + Summary: "Datastore", + }, + Key: "datastore", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "µs", + Summary: "Microsecond", + }, + Key: "microsecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 655367, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage I/O Control aggregated IOPS", + Summary: "Storage I/O Control aggregated IOPS", + }, + Key: "datastoreIops", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Datastore", + Summary: "Datastore", + }, + Key: "datastore", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 655368, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage DRS datastore bytes read", + Summary: "Storage DRS datastore bytes read", + }, + Key: "datastoreReadBytes", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Datastore", + Summary: "Datastore", + }, + Key: "datastore", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 655369, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage DRS datastore bytes written", + Summary: "Storage DRS datastore bytes written", + }, + Key: "datastoreWriteBytes", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Datastore", + Summary: "Datastore", + }, + Key: "datastore", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 655370, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage DRS datastore read I/O rate", + Summary: "Storage DRS datastore read I/O rate", + }, + Key: "datastoreReadIops", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Datastore", + Summary: "Datastore", + }, + Key: "datastore", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 655371, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage DRS datastore write I/O rate", + Summary: "Storage DRS datastore write I/O rate", + }, + Key: "datastoreWriteIops", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Datastore", + Summary: "Datastore", + }, + Key: "datastore", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 655372, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage DRS datastore normalized read latency", + Summary: "Storage DRS datastore normalized read latency", + }, + Key: "datastoreNormalReadLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Datastore", + Summary: "Datastore", + }, + Key: "datastore", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 655373, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage DRS datastore normalized write latency", + Summary: "Storage DRS datastore normalized write latency", + }, + Key: "datastoreNormalWriteLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Datastore", + Summary: "Datastore", + }, + Key: "datastore", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 655374, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage DRS datastore outstanding read requests", + Summary: "Storage DRS datastore outstanding read requests", + }, + Key: "datastoreReadOIO", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Datastore", + Summary: "Datastore", + }, + Key: "datastore", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 655375, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage DRS datastore outstanding write requests", + Summary: "Storage DRS datastore outstanding write requests", + }, + Key: "datastoreWriteOIO", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Datastore", + Summary: "Datastore", + }, + Key: "datastore", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 655376, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage I/O Control datastore maximum queue depth", + Summary: "Storage I/O Control datastore maximum queue depth", + }, + Key: "datastoreMaxQueueDepth", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Datastore", + Summary: "Datastore", + }, + Key: "datastore", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 655377, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage DRS datastore read workload metric", + Summary: "Storage DRS datastore metric for read workload model", + }, + Key: "datastoreReadLoadMetric", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Datastore", + Summary: "Datastore", + }, + Key: "datastore", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 655378, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage DRS datastore write workload metric", + Summary: "Storage DRS datastore metric for write workload model", + }, + Key: "datastoreWriteLoadMetric", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Datastore", + Summary: "Datastore", + }, + Key: "datastore", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 655379, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Highest latency", + Summary: "Highest latency value across all datastores used by the host", + }, + Key: "maxTotalLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Datastore", + Summary: "Datastore", + }, + Key: "datastore", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 655380, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Storage I/O Control active time percentage", + Summary: "Percentage of time Storage I/O Control actively controlled datastore latency", + }, + Key: "siocActiveTimePercentage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Datastore", + Summary: "Datastore", + }, + Key: "datastore", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 655381, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Datastore latency observed by VMs", + Summary: "The average datastore latency as seen by virtual machines", + }, + Key: "datastoreVMObservedLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Datastore", + Summary: "Datastore", + }, + Key: "datastore", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "µs", + Summary: "Microsecond", + }, + Key: "microsecond", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 720896, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Usage", + Summary: "Current power usage", + }, + Key: "power", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Power", + Summary: "Power", + }, + Key: "power", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "W", + Summary: "Watt", + }, + Key: "watt", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 720897, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Cap", + Summary: "Maximum allowed power usage", + }, + Key: "powerCap", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Power", + Summary: "Power", + }, + Key: "power", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "W", + Summary: "Watt", + }, + Key: "watt", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 720898, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Energy usage", + Summary: "Total energy used since last stats reset", + }, + Key: "energy", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Power", + Summary: "Power", + }, + Key: "power", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "J", + Summary: "Joule", + }, + Key: "joule", + }, + RollupType: "summation", + StatsType: "delta", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 786432, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "vSphere Replication VM Count", + Summary: "Current number of replicated virtual machines", + }, + Key: "hbrNumVms", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "vSphere Replication", + Summary: "vSphere Replication", + }, + Key: "hbr", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 786433, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Replication Data Receive Rate", + Summary: "Average amount of data received per second", + }, + Key: "hbrNetRx", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "vSphere Replication", + Summary: "vSphere Replication", + }, + Key: "hbr", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 786434, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Replication Data Transmit Rate", + Summary: "Average amount of data transmitted per second", + }, + Key: "hbrNetTx", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "vSphere Replication", + Summary: "vSphere Replication", + }, + Key: "hbr", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 851968, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Number of caches controlled by the virtual flash module", + Summary: "Number of caches controlled by the virtual flash module", + }, + Key: "numActiveVMDKs", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Virtual flash", + Summary: "Virtual flash module related statistical values", + }, + Key: "vflashModule", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1245184, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Read IOPS", + Summary: "Read IOPS", + }, + Key: "readIops", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VSAN DOM Objects", + Summary: "VSAN DOM object related statistical values", + }, + Key: "vsanDomObj", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1245185, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Read throughput", + Summary: "Read throughput in kBps", + }, + Key: "readThroughput", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VSAN DOM Objects", + Summary: "VSAN DOM object related statistical values", + }, + Key: "vsanDomObj", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1245186, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Average read latency", + Summary: "Average read latency in ms", + }, + Key: "readAvgLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VSAN DOM Objects", + Summary: "VSAN DOM object related statistical values", + }, + Key: "vsanDomObj", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1245187, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Max read latency", + Summary: "Max read latency in ms", + }, + Key: "readMaxLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VSAN DOM Objects", + Summary: "VSAN DOM object related statistical values", + }, + Key: "vsanDomObj", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1245188, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Cache hit rate", + Summary: "Cache hit rate percentage", + }, + Key: "readCacheHitRate", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VSAN DOM Objects", + Summary: "VSAN DOM object related statistical values", + }, + Key: "vsanDomObj", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1245189, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Read congestion per sampling interval", + Summary: "Read congestion", + }, + Key: "readCongestion", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VSAN DOM Objects", + Summary: "VSAN DOM object related statistical values", + }, + Key: "vsanDomObj", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1245190, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Write IOPS", + Summary: "Write IOPS", + }, + Key: "writeIops", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VSAN DOM Objects", + Summary: "VSAN DOM object related statistical values", + }, + Key: "vsanDomObj", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1245191, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Write throughput", + Summary: "Write throughput in kBps", + }, + Key: "writeThroughput", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VSAN DOM Objects", + Summary: "VSAN DOM object related statistical values", + }, + Key: "vsanDomObj", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1245192, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Average write latency", + Summary: "Average write latency in ms", + }, + Key: "writeAvgLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VSAN DOM Objects", + Summary: "VSAN DOM object related statistical values", + }, + Key: "vsanDomObj", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1245193, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Max write latency", + Summary: "Max write latency in ms", + }, + Key: "writeMaxLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VSAN DOM Objects", + Summary: "VSAN DOM object related statistical values", + }, + Key: "vsanDomObj", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1245194, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Write congestion per sampling interval", + Summary: "Write congestion", + }, + Key: "writeCongestion", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VSAN DOM Objects", + Summary: "VSAN DOM object related statistical values", + }, + Key: "vsanDomObj", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1245195, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Recovery write IOPS", + Summary: "Recovery write IOPS", + }, + Key: "recoveryWriteIops", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VSAN DOM Objects", + Summary: "VSAN DOM object related statistical values", + }, + Key: "vsanDomObj", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1245196, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Recovery write through-put", + Summary: "Recovery write through-put in kBps", + }, + Key: "recoveryWriteThroughput", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VSAN DOM Objects", + Summary: "VSAN DOM object related statistical values", + }, + Key: "vsanDomObj", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KBps", + Summary: "Kilobytes per second", + }, + Key: "kiloBytesPerSecond", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1245197, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Average recovery write latency", + Summary: "Average recovery write latency in ms", + }, + Key: "recoveryWriteAvgLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VSAN DOM Objects", + Summary: "VSAN DOM object related statistical values", + }, + Key: "vsanDomObj", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1245198, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Max recovery write latency", + Summary: "Max recovery write latency in ms", + }, + Key: "recoveryWriteMaxLatency", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VSAN DOM Objects", + Summary: "VSAN DOM object related statistical values", + }, + Key: "vsanDomObj", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "ms", + Summary: "Millisecond", + }, + Key: "millisecond", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1245199, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Recovery write congestion per sampling interval", + Summary: "Recovery write congestion", + }, + Key: "recoveryWriteCongestion", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "VSAN DOM Objects", + Summary: "VSAN DOM object related statistical values", + }, + Key: "vsanDomObj", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "num", + Summary: "Number", + }, + Key: "number", + }, + RollupType: "average", + StatsType: "rate", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1310720, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Utilization", + Summary: "The utilization of a GPU in percentages", + }, + Key: "utilization", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "GPU", + Summary: "GPU", + }, + Key: "gpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{1310721, 1310722, 1310723}, + }, + { + Key: 1310721, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Utilization", + Summary: "The utilization of a GPU in percentages", + }, + Key: "utilization", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "GPU", + Summary: "GPU", + }, + Key: "gpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1310722, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Utilization", + Summary: "The utilization of a GPU in percentages", + }, + Key: "utilization", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "GPU", + Summary: "GPU", + }, + Key: "gpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1310723, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Utilization", + Summary: "The utilization of a GPU in percentages", + }, + Key: "utilization", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "GPU", + Summary: "GPU", + }, + Key: "gpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1310724, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory used", + Summary: "The amount of GPU memory used in kilobytes", + }, + Key: "mem.used", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "GPU", + Summary: "GPU", + }, + Key: "gpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{1310725, 1310726, 1310727}, + }, + { + Key: 1310725, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory used", + Summary: "The amount of GPU memory used in kilobytes", + }, + Key: "mem.used", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "GPU", + Summary: "GPU", + }, + Key: "gpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1310726, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory used", + Summary: "The amount of GPU memory used in kilobytes", + }, + Key: "mem.used", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "GPU", + Summary: "GPU", + }, + Key: "gpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1310727, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory used", + Summary: "The amount of GPU memory used in kilobytes", + }, + Key: "mem.used", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "GPU", + Summary: "GPU", + }, + Key: "gpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "KB", + Summary: "Kilobyte", + }, + Key: "kiloBytes", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1310728, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory usage", + Summary: "The amount of GPU memory used in percentages of the total available", + }, + Key: "mem.usage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "GPU", + Summary: "GPU", + }, + Key: "gpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "none", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: []int32{1310729, 1310730, 1310731}, + }, + { + Key: 1310729, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory usage", + Summary: "The amount of GPU memory used in percentages of the total available", + }, + Key: "mem.usage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "GPU", + Summary: "GPU", + }, + Key: "gpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1310730, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory usage", + Summary: "The amount of GPU memory used in percentages of the total available", + }, + Key: "mem.usage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "GPU", + Summary: "GPU", + }, + Key: "gpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "maximum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1310731, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Memory usage", + Summary: "The amount of GPU memory used in percentages of the total available", + }, + Key: "mem.usage", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "GPU", + Summary: "GPU", + }, + Key: "gpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "%", + Summary: "Percentage", + }, + Key: "percent", + }, + RollupType: "minimum", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1310732, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Temperature", + Summary: "The temperature of a GPU in degrees celsius", + }, + Key: "temperature", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "GPU", + Summary: "GPU", + }, + Key: "gpu", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "℃", + Summary: "Temperature in degrees Celsius", + }, + Key: "celsius", + }, + RollupType: "average", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, + { + Key: 1376256, + NameInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "Persistent memory available reservation", + Summary: "Persistent memory available reservation on a host.", + }, + Key: "available.reservation", + }, + GroupInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "PMEM", + Summary: "PMEM", + }, + Key: "pmem", + }, + UnitInfo: &types.ElementDescription{ + Description: types.Description{ + Label: "MB", + Summary: "Megabyte", + }, + Key: "megaBytes", + }, + RollupType: "latest", + StatsType: "absolute", + Level: 0, + PerDeviceLevel: 0, + AssociatedCounterId: nil, + }, +} diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/resource_pool.go b/vendor/github.com/vmware/govmomi/simulator/esx/resource_pool.go new file mode 100644 index 00000000000..b8cb91d8d4b --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/esx/resource_pool.go @@ -0,0 +1,165 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package esx + +import ( + "time" + + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +// ResourcePool is the default template for ResourcePool properties. +// Capture method: +// govc pool.info "*" -dump +var ResourcePool = mo.ResourcePool{ + ManagedEntity: mo.ManagedEntity{ + ExtensibleManagedObject: mo.ExtensibleManagedObject{ + Self: types.ManagedObjectReference{Type: "ResourcePool", Value: "ha-root-pool"}, + Value: nil, + AvailableField: nil, + }, + Parent: &types.ManagedObjectReference{Type: "ComputeResource", Value: "ha-compute-res"}, + CustomValue: nil, + OverallStatus: "green", + ConfigStatus: "green", + ConfigIssue: nil, + EffectiveRole: []int32{-1}, + Permission: nil, + Name: "Resources", + DisabledMethod: []string{"CreateVApp", "CreateChildVM_Task"}, + RecentTask: nil, + DeclaredAlarmState: nil, + TriggeredAlarmState: nil, + AlarmActionsEnabled: (*bool)(nil), + Tag: nil, + }, + Summary: &types.ResourcePoolSummary{ + DynamicData: types.DynamicData{}, + Name: "Resources", + Config: types.ResourceConfigSpec{ + DynamicData: types.DynamicData{}, + Entity: &types.ManagedObjectReference{Type: "ResourcePool", Value: "ha-root-pool"}, + ChangeVersion: "", + LastModified: (*time.Time)(nil), + CpuAllocation: types.ResourceAllocationInfo{ + DynamicData: types.DynamicData{}, + Reservation: types.NewInt64(4121), + ExpandableReservation: types.NewBool(false), + Limit: types.NewInt64(4121), + Shares: &types.SharesInfo{ + DynamicData: types.DynamicData{}, + Shares: 9000, + Level: "custom", + }, + OverheadLimit: nil, + }, + MemoryAllocation: types.ResourceAllocationInfo{ + DynamicData: types.DynamicData{}, + Reservation: types.NewInt64(961), + ExpandableReservation: types.NewBool(false), + Limit: types.NewInt64(961), + Shares: &types.SharesInfo{ + DynamicData: types.DynamicData{}, + Shares: 9000, + Level: "custom", + }, + OverheadLimit: nil, + }, + }, + Runtime: types.ResourcePoolRuntimeInfo{ + DynamicData: types.DynamicData{}, + Memory: types.ResourcePoolResourceUsage{ + DynamicData: types.DynamicData{}, + ReservationUsed: 0, + ReservationUsedForVm: 0, + UnreservedForPool: 1007681536, + UnreservedForVm: 1007681536, + OverallUsage: 0, + MaxUsage: 1007681536, + }, + Cpu: types.ResourcePoolResourceUsage{ + DynamicData: types.DynamicData{}, + ReservationUsed: 0, + ReservationUsedForVm: 0, + UnreservedForPool: 4121, + UnreservedForVm: 4121, + OverallUsage: 0, + MaxUsage: 4121, + }, + OverallStatus: "green", + }, + QuickStats: (*types.ResourcePoolQuickStats)(nil), + ConfiguredMemoryMB: 0, + }, + Runtime: types.ResourcePoolRuntimeInfo{ + DynamicData: types.DynamicData{}, + Memory: types.ResourcePoolResourceUsage{ + DynamicData: types.DynamicData{}, + ReservationUsed: 0, + ReservationUsedForVm: 0, + UnreservedForPool: 1007681536, + UnreservedForVm: 1007681536, + OverallUsage: 0, + MaxUsage: 1007681536, + }, + Cpu: types.ResourcePoolResourceUsage{ + DynamicData: types.DynamicData{}, + ReservationUsed: 0, + ReservationUsedForVm: 0, + UnreservedForPool: 4121, + UnreservedForVm: 4121, + OverallUsage: 0, + MaxUsage: 4121, + }, + OverallStatus: "green", + }, + Owner: types.ManagedObjectReference{Type: "ComputeResource", Value: "ha-compute-res"}, + ResourcePool: nil, + Vm: nil, + Config: types.ResourceConfigSpec{ + DynamicData: types.DynamicData{}, + Entity: &types.ManagedObjectReference{Type: "ResourcePool", Value: "ha-root-pool"}, + ChangeVersion: "", + LastModified: (*time.Time)(nil), + CpuAllocation: types.ResourceAllocationInfo{ + DynamicData: types.DynamicData{}, + Reservation: types.NewInt64(4121), + ExpandableReservation: types.NewBool(false), + Limit: types.NewInt64(4121), + Shares: &types.SharesInfo{ + DynamicData: types.DynamicData{}, + Shares: 9000, + Level: "custom", + }, + OverheadLimit: nil, + }, + MemoryAllocation: types.ResourceAllocationInfo{ + DynamicData: types.DynamicData{}, + Reservation: types.NewInt64(961), + ExpandableReservation: types.NewBool(false), + Limit: types.NewInt64(961), + Shares: &types.SharesInfo{ + DynamicData: types.DynamicData{}, + Shares: 9000, + Level: "custom", + }, + OverheadLimit: nil, + }, + }, + ChildConfiguration: nil, +} diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/root_folder.go b/vendor/github.com/vmware/govmomi/simulator/esx/root_folder.go new file mode 100644 index 00000000000..3aefd1d812d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/esx/root_folder.go @@ -0,0 +1,76 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package esx + +import ( + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +// RootFolder is the default template for the ServiceContent rootFolder property. +// Capture method: +// govc folder.info -dump / +var RootFolder = mo.Folder{ + ManagedEntity: mo.ManagedEntity{ + ExtensibleManagedObject: mo.ExtensibleManagedObject{ + Self: types.ManagedObjectReference{Type: "Folder", Value: "ha-folder-root"}, + Value: nil, + AvailableField: nil, + }, + Parent: (*types.ManagedObjectReference)(nil), + CustomValue: nil, + OverallStatus: "green", + ConfigStatus: "green", + ConfigIssue: nil, + EffectiveRole: []int32{-1}, + Permission: []types.Permission{ + { + DynamicData: types.DynamicData{}, + Entity: &types.ManagedObjectReference{Type: "Folder", Value: "ha-folder-root"}, + Principal: "vpxuser", + Group: false, + RoleId: -1, + Propagate: true, + }, + { + DynamicData: types.DynamicData{}, + Entity: &types.ManagedObjectReference{Type: "Folder", Value: "ha-folder-root"}, + Principal: "dcui", + Group: false, + RoleId: -1, + Propagate: true, + }, + { + DynamicData: types.DynamicData{}, + Entity: &types.ManagedObjectReference{Type: "Folder", Value: "ha-folder-root"}, + Principal: "root", + Group: false, + RoleId: -1, + Propagate: true, + }, + }, + Name: "ha-folder-root", + DisabledMethod: nil, + RecentTask: nil, + DeclaredAlarmState: nil, + TriggeredAlarmState: nil, + AlarmActionsEnabled: (*bool)(nil), + Tag: nil, + }, + ChildType: []string{"Datacenter"}, + ChildEntity: nil, +} diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/service_content.go b/vendor/github.com/vmware/govmomi/simulator/esx/service_content.go new file mode 100644 index 00000000000..cc8938f8782 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/esx/service_content.go @@ -0,0 +1,86 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package esx + +import "github.com/vmware/govmomi/vim25/types" + +// ServiceContent is the default template for the ServiceInstance content property. +// Capture method: +// govc object.collect -s -dump - content +var ServiceContent = types.ServiceContent{ + RootFolder: types.ManagedObjectReference{Type: "Folder", Value: "ha-folder-root"}, + PropertyCollector: types.ManagedObjectReference{Type: "PropertyCollector", Value: "ha-property-collector"}, + ViewManager: &types.ManagedObjectReference{Type: "ViewManager", Value: "ViewManager"}, + About: types.AboutInfo{ + Name: "VMware ESXi", + FullName: "VMware ESXi 6.5.0 build-5969303", + Vendor: "VMware, Inc.", + Version: "6.5.0", + Build: "5969303", + LocaleVersion: "INTL", + LocaleBuild: "000", + OsType: "vmnix-x86", + ProductLineId: "embeddedEsx", + ApiType: "HostAgent", + ApiVersion: "6.5", + InstanceUuid: "", + LicenseProductName: "VMware ESX Server", + LicenseProductVersion: "6.0", + }, + Setting: &types.ManagedObjectReference{Type: "OptionManager", Value: "HostAgentSettings"}, + UserDirectory: &types.ManagedObjectReference{Type: "UserDirectory", Value: "ha-user-directory"}, + SessionManager: &types.ManagedObjectReference{Type: "SessionManager", Value: "ha-sessionmgr"}, + AuthorizationManager: &types.ManagedObjectReference{Type: "AuthorizationManager", Value: "ha-authmgr"}, + ServiceManager: &types.ManagedObjectReference{Type: "ServiceManager", Value: "ha-servicemanager"}, + PerfManager: &types.ManagedObjectReference{Type: "PerformanceManager", Value: "ha-perfmgr"}, + ScheduledTaskManager: (*types.ManagedObjectReference)(nil), + AlarmManager: (*types.ManagedObjectReference)(nil), + EventManager: &types.ManagedObjectReference{Type: "EventManager", Value: "ha-eventmgr"}, + TaskManager: &types.ManagedObjectReference{Type: "TaskManager", Value: "ha-taskmgr"}, + ExtensionManager: (*types.ManagedObjectReference)(nil), + CustomizationSpecManager: (*types.ManagedObjectReference)(nil), + CustomFieldsManager: (*types.ManagedObjectReference)(nil), + AccountManager: &types.ManagedObjectReference{Type: "HostLocalAccountManager", Value: "ha-localacctmgr"}, + DiagnosticManager: &types.ManagedObjectReference{Type: "DiagnosticManager", Value: "ha-diagnosticmgr"}, + LicenseManager: &types.ManagedObjectReference{Type: "LicenseManager", Value: "ha-license-manager"}, + SearchIndex: &types.ManagedObjectReference{Type: "SearchIndex", Value: "ha-searchindex"}, + FileManager: &types.ManagedObjectReference{Type: "FileManager", Value: "ha-nfc-file-manager"}, + DatastoreNamespaceManager: &types.ManagedObjectReference{Type: "DatastoreNamespaceManager", Value: "ha-datastore-namespace-manager"}, + VirtualDiskManager: &types.ManagedObjectReference{Type: "VirtualDiskManager", Value: "ha-vdiskmanager"}, + VirtualizationManager: (*types.ManagedObjectReference)(nil), + SnmpSystem: (*types.ManagedObjectReference)(nil), + VmProvisioningChecker: (*types.ManagedObjectReference)(nil), + VmCompatibilityChecker: (*types.ManagedObjectReference)(nil), + OvfManager: &types.ManagedObjectReference{Type: "OvfManager", Value: "ha-ovf-manager"}, + IpPoolManager: (*types.ManagedObjectReference)(nil), + DvSwitchManager: &types.ManagedObjectReference{Type: "DistributedVirtualSwitchManager", Value: "ha-dvsmanager"}, + HostProfileManager: (*types.ManagedObjectReference)(nil), + ClusterProfileManager: (*types.ManagedObjectReference)(nil), + ComplianceManager: (*types.ManagedObjectReference)(nil), + LocalizationManager: &types.ManagedObjectReference{Type: "LocalizationManager", Value: "ha-l10n-manager"}, + StorageResourceManager: &types.ManagedObjectReference{Type: "StorageResourceManager", Value: "ha-storage-resource-manager"}, + GuestOperationsManager: &types.ManagedObjectReference{Type: "GuestOperationsManager", Value: "ha-guest-operations-manager"}, + OverheadMemoryManager: (*types.ManagedObjectReference)(nil), + CertificateManager: (*types.ManagedObjectReference)(nil), + IoFilterManager: (*types.ManagedObjectReference)(nil), + VStorageObjectManager: &types.ManagedObjectReference{Type: "HostVStorageObjectManager", Value: "ha-vstorage-object-manager"}, + HostSpecManager: (*types.ManagedObjectReference)(nil), + CryptoManager: &types.ManagedObjectReference{Type: "CryptoManager", Value: "ha-crypto-manager"}, + HealthUpdateManager: (*types.ManagedObjectReference)(nil), + FailoverClusterConfigurator: (*types.ManagedObjectReference)(nil), + FailoverClusterManager: (*types.ManagedObjectReference)(nil), +} diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/setting.go b/vendor/github.com/vmware/govmomi/simulator/esx/setting.go new file mode 100644 index 00000000000..933eaa475da --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/esx/setting.go @@ -0,0 +1,30 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package esx + +import "github.com/vmware/govmomi/vim25/types" + +// Setting is captured from ESX's HostSystem.configManager.advancedOption +// Capture method: +// govc object.collect -s -dump $(govc object.collect -s HostSystem:ha-host configManager.advancedOption) setting +var Setting = []types.BaseOptionValue{ + // This list is currently pruned to include a single option for testing + &types.OptionValue{ + Key: "Config.HostAgent.log.level", + Value: "info", + }, +} diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/virtual_device.go b/vendor/github.com/vmware/govmomi/simulator/esx/virtual_device.go new file mode 100644 index 00000000000..234113b4537 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/esx/virtual_device.go @@ -0,0 +1,242 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package esx + +import "github.com/vmware/govmomi/vim25/types" + +// VirtualDevice is the default set of VirtualDevice types created for a VirtualMachine +// Capture method: +// govc vm.create foo +// govc object.collect -s -dump vm/foo config.hardware.device +var VirtualDevice = []types.BaseVirtualDevice{ + &types.VirtualIDEController{ + VirtualController: types.VirtualController{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 200, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "IDE 0", + Summary: "IDE 0", + }, + Backing: nil, + Connectable: (*types.VirtualDeviceConnectInfo)(nil), + SlotInfo: nil, + ControllerKey: 0, + UnitNumber: (*int32)(nil), + }, + BusNumber: 0, + Device: nil, + }, + }, + &types.VirtualIDEController{ + VirtualController: types.VirtualController{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 201, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "IDE 1", + Summary: "IDE 1", + }, + Backing: nil, + Connectable: (*types.VirtualDeviceConnectInfo)(nil), + SlotInfo: nil, + ControllerKey: 0, + UnitNumber: (*int32)(nil), + }, + BusNumber: 1, + Device: nil, + }, + }, + &types.VirtualPS2Controller{ + VirtualController: types.VirtualController{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 300, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "PS2 controller 0", + Summary: "PS2 controller 0", + }, + Backing: nil, + Connectable: (*types.VirtualDeviceConnectInfo)(nil), + SlotInfo: nil, + ControllerKey: 0, + UnitNumber: (*int32)(nil), + }, + BusNumber: 0, + Device: []int32{600, 700}, + }, + }, + &types.VirtualPCIController{ + VirtualController: types.VirtualController{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 100, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "PCI controller 0", + Summary: "PCI controller 0", + }, + Backing: nil, + Connectable: (*types.VirtualDeviceConnectInfo)(nil), + SlotInfo: nil, + ControllerKey: 0, + UnitNumber: (*int32)(nil), + }, + BusNumber: 0, + Device: []int32{500, 12000}, + }, + }, + &types.VirtualSIOController{ + VirtualController: types.VirtualController{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 400, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "SIO controller 0", + Summary: "SIO controller 0", + }, + Backing: nil, + Connectable: (*types.VirtualDeviceConnectInfo)(nil), + SlotInfo: nil, + ControllerKey: 0, + UnitNumber: (*int32)(nil), + }, + BusNumber: 0, + Device: nil, + }, + }, + &types.VirtualKeyboard{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 600, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "Keyboard ", + Summary: "Keyboard", + }, + Backing: nil, + Connectable: (*types.VirtualDeviceConnectInfo)(nil), + SlotInfo: nil, + ControllerKey: 300, + UnitNumber: types.NewInt32(0), + }, + }, + &types.VirtualPointingDevice{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 700, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "Pointing device", + Summary: "Pointing device; Device", + }, + Backing: &types.VirtualPointingDeviceDeviceBackingInfo{ + VirtualDeviceDeviceBackingInfo: types.VirtualDeviceDeviceBackingInfo{ + VirtualDeviceBackingInfo: types.VirtualDeviceBackingInfo{}, + DeviceName: "", + UseAutoDetect: types.NewBool(false), + }, + HostPointingDevice: "autodetect", + }, + Connectable: (*types.VirtualDeviceConnectInfo)(nil), + SlotInfo: nil, + ControllerKey: 300, + UnitNumber: types.NewInt32(1), + }, + }, + &types.VirtualMachineVideoCard{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 500, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "Video card ", + Summary: "Video card", + }, + Backing: nil, + Connectable: (*types.VirtualDeviceConnectInfo)(nil), + SlotInfo: nil, + ControllerKey: 100, + UnitNumber: types.NewInt32(0), + }, + VideoRamSizeInKB: 4096, + NumDisplays: 1, + UseAutoDetect: types.NewBool(false), + Enable3DSupport: types.NewBool(false), + Use3dRenderer: "automatic", + GraphicsMemorySizeInKB: 262144, + }, + &types.VirtualMachineVMCIDevice{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 12000, + DeviceInfo: &types.Description{ + DynamicData: types.DynamicData{}, + Label: "VMCI device", + Summary: "Device on the virtual machine PCI bus that provides support for the virtual machine communication interface", + }, + Backing: nil, + Connectable: (*types.VirtualDeviceConnectInfo)(nil), + SlotInfo: nil, + ControllerKey: 100, + UnitNumber: types.NewInt32(17), + }, + Id: -1, + AllowUnrestrictedCommunication: types.NewBool(false), + FilterEnable: types.NewBool(true), + FilterInfo: (*types.VirtualMachineVMCIDeviceFilterInfo)(nil), + }, +} + +// EthernetCard template for types.VirtualEthernetCard +var EthernetCard = types.VirtualE1000{ + VirtualEthernetCard: types.VirtualEthernetCard{ + VirtualDevice: types.VirtualDevice{ + DynamicData: types.DynamicData{}, + Key: 4000, + Backing: &types.VirtualEthernetCardNetworkBackingInfo{ + VirtualDeviceDeviceBackingInfo: types.VirtualDeviceDeviceBackingInfo{ + VirtualDeviceBackingInfo: types.VirtualDeviceBackingInfo{}, + DeviceName: "VM Network", + UseAutoDetect: types.NewBool(false), + }, + Network: (*types.ManagedObjectReference)(nil), + InPassthroughMode: types.NewBool(false), + }, + Connectable: &types.VirtualDeviceConnectInfo{ + DynamicData: types.DynamicData{}, + StartConnected: true, + AllowGuestControl: true, + Connected: false, + Status: "untried", + }, + SlotInfo: &types.VirtualDevicePciBusSlotInfo{ + VirtualDeviceBusSlotInfo: types.VirtualDeviceBusSlotInfo{}, + PciSlotNumber: 32, + }, + ControllerKey: 100, + UnitNumber: types.NewInt32(7), + }, + AddressType: "generated", + MacAddress: "", + WakeOnLanEnabled: types.NewBool(true), + }, +} diff --git a/vendor/github.com/vmware/govmomi/simulator/file_manager.go b/vendor/github.com/vmware/govmomi/simulator/file_manager.go new file mode 100644 index 00000000000..6e76b5fd02e --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/file_manager.go @@ -0,0 +1,251 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "io" + "os" + "path" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/simulator/esx" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type FileManager struct { + mo.FileManager +} + +func NewFileManager(ref types.ManagedObjectReference) object.Reference { + m := &FileManager{} + m.Self = ref + return m +} + +func (f *FileManager) findDatastore(ref mo.Reference, name string) (*Datastore, types.BaseMethodFault) { + var refs []types.ManagedObjectReference + + switch obj := ref.(type) { + case *Folder: + refs = obj.ChildEntity + case *StoragePod: + refs = obj.ChildEntity + } + + for _, ref := range refs { + switch obj := Map.Get(ref).(type) { + case *Datastore: + if obj.Name == name { + return obj, nil + } + case *Folder, *StoragePod: + ds, _ := f.findDatastore(obj, name) + if ds != nil { + return ds, nil + } + } + } + + return nil, &types.InvalidDatastore{Name: name} +} + +func (f *FileManager) resolve(dc *types.ManagedObjectReference, name string) (string, types.BaseMethodFault) { + p, fault := parseDatastorePath(name) + if fault != nil { + return "", fault + } + + if dc == nil { + if Map.IsESX() { + dc = &esx.Datacenter.Self + } else { + return "", &types.InvalidArgument{InvalidProperty: "dc"} + } + } + + folder := Map.Get(*dc).(*mo.Datacenter).DatastoreFolder + + ds, fault := f.findDatastore(Map.Get(folder), p.Datastore) + if fault != nil { + return "", fault + } + + dir := ds.Info.GetDatastoreInfo().Url + + return path.Join(dir, p.Path), nil +} + +func (f *FileManager) fault(name string, err error, fault types.BaseFileFault) types.BaseMethodFault { + switch { + case os.IsNotExist(err): + fault = new(types.FileNotFound) + case os.IsExist(err): + fault = new(types.FileAlreadyExists) + } + + fault.GetFileFault().File = name + + return fault.(types.BaseMethodFault) +} + +func (f *FileManager) deleteDatastoreFile(req *types.DeleteDatastoreFile_Task) types.BaseMethodFault { + file, fault := f.resolve(req.Datacenter, req.Name) + if fault != nil { + return fault + } + + _, err := os.Stat(file) + if err != nil { + if os.IsNotExist(err) { + return f.fault(file, err, new(types.CannotDeleteFile)) + } + } + + err = os.RemoveAll(file) + if err != nil { + return f.fault(file, err, new(types.CannotDeleteFile)) + } + + return nil +} + +func (f *FileManager) DeleteDatastoreFileTask(req *types.DeleteDatastoreFile_Task) soap.HasFault { + task := CreateTask(f, "deleteDatastoreFile", func(*Task) (types.AnyType, types.BaseMethodFault) { + return nil, f.deleteDatastoreFile(req) + }) + + return &methods.DeleteDatastoreFile_TaskBody{ + Res: &types.DeleteDatastoreFile_TaskResponse{ + Returnval: task.Run(), + }, + } +} + +func (f *FileManager) MakeDirectory(req *types.MakeDirectory) soap.HasFault { + body := &methods.MakeDirectoryBody{} + + name, fault := f.resolve(req.Datacenter, req.Name) + if fault != nil { + body.Fault_ = Fault("", fault) + return body + } + + mkdir := os.Mkdir + + if isTrue(req.CreateParentDirectories) { + mkdir = os.MkdirAll + } + + err := mkdir(name, 0700) + if err != nil { + fault = f.fault(req.Name, err, new(types.CannotCreateFile)) + body.Fault_ = Fault(err.Error(), fault) + return body + } + + return body +} + +func (f *FileManager) moveDatastoreFile(req *types.MoveDatastoreFile_Task) types.BaseMethodFault { + src, fault := f.resolve(req.SourceDatacenter, req.SourceName) + if fault != nil { + return fault + } + + dst, fault := f.resolve(req.DestinationDatacenter, req.DestinationName) + if fault != nil { + return fault + } + + if !isTrue(req.Force) { + _, err := os.Stat(dst) + if err == nil { + return f.fault(dst, nil, new(types.FileAlreadyExistsFault)) + } + } + + err := os.Rename(src, dst) + if err != nil { + return f.fault(src, err, new(types.CannotAccessFile)) + } + + return nil +} + +func (f *FileManager) MoveDatastoreFileTask(req *types.MoveDatastoreFile_Task) soap.HasFault { + task := CreateTask(f, "moveDatastoreFile", func(*Task) (types.AnyType, types.BaseMethodFault) { + return nil, f.moveDatastoreFile(req) + }) + + return &methods.MoveDatastoreFile_TaskBody{ + Res: &types.MoveDatastoreFile_TaskResponse{ + Returnval: task.Run(), + }, + } +} + +func (f *FileManager) copyDatastoreFile(req *types.CopyDatastoreFile_Task) types.BaseMethodFault { + src, fault := f.resolve(req.SourceDatacenter, req.SourceName) + if fault != nil { + return fault + } + + dst, fault := f.resolve(req.DestinationDatacenter, req.DestinationName) + if fault != nil { + return fault + } + + if !isTrue(req.Force) { + _, err := os.Stat(dst) + if err == nil { + return f.fault(dst, nil, new(types.FileAlreadyExistsFault)) + } + } + + r, err := os.Open(src) + if err != nil { + return f.fault(dst, err, new(types.CannotAccessFile)) + } + defer r.Close() + + w, err := os.Create(dst) + if err != nil { + return f.fault(dst, err, new(types.CannotCreateFile)) + } + defer w.Close() + + if _, err = io.Copy(w, r); err != nil { + return f.fault(dst, err, new(types.CannotCreateFile)) + } + + return nil +} + +func (f *FileManager) CopyDatastoreFileTask(req *types.CopyDatastoreFile_Task) soap.HasFault { + task := CreateTask(f, "copyDatastoreFile", func(*Task) (types.AnyType, types.BaseMethodFault) { + return nil, f.copyDatastoreFile(req) + }) + + return &methods.CopyDatastoreFile_TaskBody{ + Res: &types.CopyDatastoreFile_TaskResponse{ + Returnval: task.Run(), + }, + } +} diff --git a/vendor/github.com/vmware/govmomi/simulator/folder.go b/vendor/github.com/vmware/govmomi/simulator/folder.go new file mode 100644 index 00000000000..f5dea3291e5 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/folder.go @@ -0,0 +1,471 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "fmt" + "math/rand" + "path" + "sync" + + "github.com/google/uuid" + + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type Folder struct { + mo.Folder + + m sync.Mutex +} + +// update references when objects are added/removed from a Folder +func (f *Folder) update(o mo.Reference, u func(types.ManagedObjectReference, []types.ManagedObjectReference) []types.ManagedObjectReference) { + ref := o.Reference() + + if f.Parent == nil { + return // this is the root folder + } + + switch ref.Type { + case "Datacenter", "Folder": + return // nothing to update + } + + dc := Map.getEntityDatacenter(f) + + switch ref.Type { + case "Network", "DistributedVirtualSwitch", "DistributedVirtualPortgroup": + dc.Network = u(ref, dc.Network) + case "Datastore": + dc.Datastore = u(ref, dc.Datastore) + } +} + +func networkSummary(n *mo.Network) *types.NetworkSummary { + return &types.NetworkSummary{ + Network: &n.Self, + Name: n.Name, + Accessible: true, + } +} + +func (f *Folder) putChild(o mo.Entity) { + Map.PutEntity(f, o) + + f.m.Lock() + defer f.m.Unlock() + + f.ChildEntity = AddReference(o.Reference(), f.ChildEntity) + + f.update(o, AddReference) + + switch e := o.(type) { + case *mo.Network: + e.Summary = networkSummary(e) + case *mo.OpaqueNetwork: + e.Summary = networkSummary(&e.Network) + case *DistributedVirtualPortgroup: + e.Summary = networkSummary(&e.Network) + } +} + +func (f *Folder) removeChild(o mo.Reference) { + Map.Remove(o.Reference()) + + f.m.Lock() + defer f.m.Unlock() + + f.ChildEntity = RemoveReference(o.Reference(), f.ChildEntity) + + f.update(o, RemoveReference) +} + +func (f *Folder) hasChildType(kind string) bool { + for _, t := range f.ChildType { + if t == kind { + return true + } + } + return false +} + +func (f *Folder) typeNotSupported() *soap.Fault { + return Fault(fmt.Sprintf("%s supports types: %#v", f.Self, f.ChildType), &types.NotSupported{}) +} + +type addStandaloneHost struct { + *Folder + + req *types.AddStandaloneHost_Task +} + +func (add *addStandaloneHost) Run(task *Task) (types.AnyType, types.BaseMethodFault) { + host, err := CreateStandaloneHost(add.Folder, add.req.Spec) + if err != nil { + return nil, err + } + + if add.req.AddConnected { + host.Runtime.ConnectionState = types.HostSystemConnectionStateConnected + } + + return host.Reference(), nil +} + +func (f *Folder) AddStandaloneHostTask(a *types.AddStandaloneHost_Task) soap.HasFault { + r := &methods.AddStandaloneHost_TaskBody{} + + if f.hasChildType("ComputeResource") && f.hasChildType("Folder") { + r.Res = &types.AddStandaloneHost_TaskResponse{ + Returnval: NewTask(&addStandaloneHost{f, a}).Run(), + } + } else { + r.Fault_ = f.typeNotSupported() + } + + return r +} + +func (f *Folder) CreateFolder(c *types.CreateFolder) soap.HasFault { + r := &methods.CreateFolderBody{} + + if f.hasChildType("Folder") { + folder := &Folder{} + + folder.Name = c.Name + folder.ChildType = f.ChildType + + f.putChild(folder) + + r.Res = &types.CreateFolderResponse{ + Returnval: folder.Self, + } + } else { + r.Fault_ = f.typeNotSupported() + } + + return r +} + +// StoragePod aka "Datastore Cluster" +type StoragePod struct { + mo.StoragePod +} + +func (f *Folder) CreateStoragePod(c *types.CreateStoragePod) soap.HasFault { + r := &methods.CreateStoragePodBody{} + + if f.hasChildType("StoragePod") { + pod := &StoragePod{} + + pod.Name = c.Name + pod.ChildType = []string{"Datastore"} + + f.putChild(pod) + + r.Res = &types.CreateStoragePodResponse{ + Returnval: pod.Self, + } + } else { + r.Fault_ = f.typeNotSupported() + } + + return r +} + +func (p *StoragePod) MoveIntoFolderTask(c *types.MoveIntoFolder_Task) soap.HasFault { + return (&Folder{Folder: p.Folder}).MoveIntoFolderTask(c) +} + +func (f *Folder) CreateDatacenter(c *types.CreateDatacenter) soap.HasFault { + r := &methods.CreateDatacenterBody{} + + if f.hasChildType("Datacenter") && f.hasChildType("Folder") { + dc := &mo.Datacenter{} + + dc.Name = c.Name + + f.putChild(dc) + + createDatacenterFolders(dc, true) + + r.Res = &types.CreateDatacenterResponse{ + Returnval: dc.Self, + } + } else { + r.Fault_ = f.typeNotSupported() + } + + return r +} + +func (f *Folder) CreateClusterEx(c *types.CreateClusterEx) soap.HasFault { + r := &methods.CreateClusterExBody{} + + if f.hasChildType("ComputeResource") && f.hasChildType("Folder") { + cluster, err := CreateClusterComputeResource(f, c.Name, c.Spec) + if err != nil { + r.Fault_ = Fault("", err) + return r + } + + r.Res = &types.CreateClusterExResponse{ + Returnval: cluster.Self, + } + } else { + r.Fault_ = f.typeNotSupported() + } + + return r +} + +type createVM struct { + *Folder + + req *types.CreateVM_Task + + register bool +} + +func (c *createVM) Run(task *Task) (types.AnyType, types.BaseMethodFault) { + vm, err := NewVirtualMachine(c.Folder.Self, &c.req.Config) + if err != nil { + return nil, err + } + + vm.ResourcePool = &c.req.Pool + + if c.req.Host == nil { + var hosts []types.ManagedObjectReference + + pool := Map.Get(c.req.Pool).(mo.Entity) + + switch cr := Map.getEntityComputeResource(pool).(type) { + case *mo.ComputeResource: + hosts = cr.Host + case *ClusterComputeResource: + hosts = cr.Host + } + + // Assuming for now that all hosts have access to the datastore + host := hosts[rand.Intn(len(hosts))] + vm.Runtime.Host = &host + } else { + vm.Runtime.Host = c.req.Host + } + + vm.Guest = &types.GuestInfo{ + ToolsStatus: types.VirtualMachineToolsStatusToolsNotInstalled, + ToolsVersion: "0", + } + + vm.Summary.Guest = &types.VirtualMachineGuestSummary{ + ToolsStatus: vm.Guest.ToolsStatus, + } + vm.Summary.Config.VmPathName = vm.Config.Files.VmPathName + vm.Summary.Runtime.Host = vm.Runtime.Host + + err = vm.create(&c.req.Config, c.register) + if err != nil { + return nil, err + } + + c.Folder.putChild(vm) + + host := Map.Get(*vm.Runtime.Host).(*HostSystem) + host.Vm = append(host.Vm, vm.Self) + + for i := range vm.Datastore { + ds := Map.Get(vm.Datastore[i]).(*Datastore) + ds.Vm = append(ds.Vm, vm.Self) + } + + switch rp := Map.Get(*vm.ResourcePool).(type) { + case *ResourcePool: + rp.Vm = append(rp.Vm, vm.Self) + case *VirtualApp: + rp.Vm = append(rp.Vm, vm.Self) + } + + return vm.Reference(), nil +} + +func (f *Folder) CreateVMTask(c *types.CreateVM_Task) soap.HasFault { + return &methods.CreateVM_TaskBody{ + Res: &types.CreateVM_TaskResponse{ + Returnval: NewTask(&createVM{f, c, false}).Run(), + }, + } +} + +type registerVM struct { + *Folder + + req *types.RegisterVM_Task +} + +func (c *registerVM) Run(task *Task) (types.AnyType, types.BaseMethodFault) { + host := c.req.Host + pool := c.req.Pool + + if c.req.AsTemplate { + if host == nil { + return nil, &types.InvalidArgument{InvalidProperty: "host"} + } else if pool != nil { + return nil, &types.InvalidArgument{InvalidProperty: "pool"} + } + + pool = hostParent(&Map.Get(*host).(*HostSystem).HostSystem).ResourcePool + } else { + if pool == nil { + return nil, &types.InvalidArgument{InvalidProperty: "pool"} + } + } + + if c.req.Path == "" { + return nil, &types.InvalidArgument{InvalidProperty: "path"} + } + + s := Map.SearchIndex() + r := s.FindByDatastorePath(&types.FindByDatastorePath{ + This: s.Reference(), + Path: c.req.Path, + Datacenter: Map.getEntityDatacenter(c.Folder).Reference(), + }) + + if ref := r.(*methods.FindByDatastorePathBody).Res.Returnval; ref != nil { + return nil, &types.AlreadyExists{Name: ref.Value} + } + + if c.req.Name == "" { + p, err := parseDatastorePath(c.req.Path) + if err != nil { + return nil, err + } + + c.req.Name = path.Dir(p.Path) + } + + create := NewTask(&createVM{ + Folder: c.Folder, + register: true, + req: &types.CreateVM_Task{ + This: c.Folder.Reference(), + Config: types.VirtualMachineConfigSpec{ + Name: c.req.Name, + Files: &types.VirtualMachineFileInfo{ + VmPathName: c.req.Path, + }, + }, + Pool: *pool, + Host: host, + }, + }) + + create.Run() + + if create.Info.Error != nil { + return nil, create.Info.Error.Fault + } + + return create.Info.Result, nil +} + +func (f *Folder) RegisterVMTask(c *types.RegisterVM_Task) soap.HasFault { + return &methods.RegisterVM_TaskBody{ + Res: &types.RegisterVM_TaskResponse{ + Returnval: NewTask(®isterVM{f, c}).Run(), + }, + } +} + +func (f *Folder) MoveIntoFolderTask(c *types.MoveIntoFolder_Task) soap.HasFault { + task := CreateTask(f, "moveIntoFolder", func(t *Task) (types.AnyType, types.BaseMethodFault) { + for _, ref := range c.List { + obj := Map.Get(ref).(mo.Entity) + + parent, ok := Map.Get(*(obj.Entity()).Parent).(*Folder) + + if !ok || !f.hasChildType(ref.Type) { + return nil, &types.NotSupported{} + } + + parent.removeChild(ref) + f.putChild(obj) + } + + return nil, nil + }) + + return &methods.MoveIntoFolder_TaskBody{ + Res: &types.MoveIntoFolder_TaskResponse{ + Returnval: task.Run(), + }, + } +} + +func (f *Folder) CreateDVSTask(req *types.CreateDVS_Task) soap.HasFault { + task := CreateTask(f, "createDVS", func(t *Task) (types.AnyType, types.BaseMethodFault) { + spec := req.Spec.ConfigSpec.GetDVSConfigSpec() + dvs := &DistributedVirtualSwitch{} + dvs.Name = spec.Name + dvs.Entity().Name = dvs.Name + + if Map.FindByName(dvs.Name, f.ChildEntity) != nil { + return nil, &types.InvalidArgument{InvalidProperty: "name"} + } + + dvs.Uuid = uuid.New().String() + + f.putChild(dvs) + + dvs.Summary = types.DVSSummary{ + Name: dvs.Name, + Uuid: dvs.Uuid, + NumPorts: spec.NumStandalonePorts, + ProductInfo: req.Spec.ProductInfo, + Description: spec.Description, + } + + if dvs.Summary.ProductInfo == nil { + product := Map.content().About + dvs.Summary.ProductInfo = &types.DistributedVirtualSwitchProductSpec{ + Name: "DVS", + Vendor: product.Vendor, + Version: product.Version, + Build: product.Build, + ForwardingClass: "etherswitch", + } + } + + return dvs.Reference(), nil + }) + + return &methods.CreateDVS_TaskBody{ + Res: &types.CreateDVS_TaskResponse{ + Returnval: task.Run(), + }, + } +} + +func (f *Folder) RenameTask(r *types.Rename_Task) soap.HasFault { + return RenameTask(f, r) +} diff --git a/vendor/github.com/vmware/govmomi/simulator/guest_id.go b/vendor/github.com/vmware/govmomi/simulator/guest_id.go new file mode 100644 index 00000000000..87cf4aaf835 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/guest_id.go @@ -0,0 +1,171 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import "github.com/vmware/govmomi/vim25/types" + +// GuestID is the list of valid types.VirtualMachineGuestOsIdentifier +var GuestID = []types.VirtualMachineGuestOsIdentifier{ + types.VirtualMachineGuestOsIdentifierDosGuest, + types.VirtualMachineGuestOsIdentifierWin31Guest, + types.VirtualMachineGuestOsIdentifierWin95Guest, + types.VirtualMachineGuestOsIdentifierWin98Guest, + types.VirtualMachineGuestOsIdentifierWinMeGuest, + types.VirtualMachineGuestOsIdentifierWinNTGuest, + types.VirtualMachineGuestOsIdentifierWin2000ProGuest, + types.VirtualMachineGuestOsIdentifierWin2000ServGuest, + types.VirtualMachineGuestOsIdentifierWin2000AdvServGuest, + types.VirtualMachineGuestOsIdentifierWinXPHomeGuest, + types.VirtualMachineGuestOsIdentifierWinXPProGuest, + types.VirtualMachineGuestOsIdentifierWinXPPro64Guest, + types.VirtualMachineGuestOsIdentifierWinNetWebGuest, + types.VirtualMachineGuestOsIdentifierWinNetStandardGuest, + types.VirtualMachineGuestOsIdentifierWinNetEnterpriseGuest, + types.VirtualMachineGuestOsIdentifierWinNetDatacenterGuest, + types.VirtualMachineGuestOsIdentifierWinNetBusinessGuest, + types.VirtualMachineGuestOsIdentifierWinNetStandard64Guest, + types.VirtualMachineGuestOsIdentifierWinNetEnterprise64Guest, + types.VirtualMachineGuestOsIdentifierWinLonghornGuest, + types.VirtualMachineGuestOsIdentifierWinLonghorn64Guest, + types.VirtualMachineGuestOsIdentifierWinNetDatacenter64Guest, + types.VirtualMachineGuestOsIdentifierWinVistaGuest, + types.VirtualMachineGuestOsIdentifierWinVista64Guest, + types.VirtualMachineGuestOsIdentifierWindows7Guest, + types.VirtualMachineGuestOsIdentifierWindows7_64Guest, + types.VirtualMachineGuestOsIdentifierWindows7Server64Guest, + types.VirtualMachineGuestOsIdentifierWindows8Guest, + types.VirtualMachineGuestOsIdentifierWindows8_64Guest, + types.VirtualMachineGuestOsIdentifierWindows8Server64Guest, + types.VirtualMachineGuestOsIdentifierWindows9Guest, + types.VirtualMachineGuestOsIdentifierWindows9_64Guest, + types.VirtualMachineGuestOsIdentifierWindows9Server64Guest, + types.VirtualMachineGuestOsIdentifierWindowsHyperVGuest, + types.VirtualMachineGuestOsIdentifierFreebsdGuest, + types.VirtualMachineGuestOsIdentifierFreebsd64Guest, + types.VirtualMachineGuestOsIdentifierRedhatGuest, + types.VirtualMachineGuestOsIdentifierRhel2Guest, + types.VirtualMachineGuestOsIdentifierRhel3Guest, + types.VirtualMachineGuestOsIdentifierRhel3_64Guest, + types.VirtualMachineGuestOsIdentifierRhel4Guest, + types.VirtualMachineGuestOsIdentifierRhel4_64Guest, + types.VirtualMachineGuestOsIdentifierRhel5Guest, + types.VirtualMachineGuestOsIdentifierRhel5_64Guest, + types.VirtualMachineGuestOsIdentifierRhel6Guest, + types.VirtualMachineGuestOsIdentifierRhel6_64Guest, + types.VirtualMachineGuestOsIdentifierRhel7Guest, + types.VirtualMachineGuestOsIdentifierRhel7_64Guest, + types.VirtualMachineGuestOsIdentifierCentosGuest, + types.VirtualMachineGuestOsIdentifierCentos64Guest, + types.VirtualMachineGuestOsIdentifierCentos6Guest, + types.VirtualMachineGuestOsIdentifierCentos6_64Guest, + types.VirtualMachineGuestOsIdentifierCentos7Guest, + types.VirtualMachineGuestOsIdentifierCentos7_64Guest, + types.VirtualMachineGuestOsIdentifierOracleLinuxGuest, + types.VirtualMachineGuestOsIdentifierOracleLinux64Guest, + types.VirtualMachineGuestOsIdentifierOracleLinux6Guest, + types.VirtualMachineGuestOsIdentifierOracleLinux6_64Guest, + types.VirtualMachineGuestOsIdentifierOracleLinux7Guest, + types.VirtualMachineGuestOsIdentifierOracleLinux7_64Guest, + types.VirtualMachineGuestOsIdentifierSuseGuest, + types.VirtualMachineGuestOsIdentifierSuse64Guest, + types.VirtualMachineGuestOsIdentifierSlesGuest, + types.VirtualMachineGuestOsIdentifierSles64Guest, + types.VirtualMachineGuestOsIdentifierSles10Guest, + types.VirtualMachineGuestOsIdentifierSles10_64Guest, + types.VirtualMachineGuestOsIdentifierSles11Guest, + types.VirtualMachineGuestOsIdentifierSles11_64Guest, + types.VirtualMachineGuestOsIdentifierSles12Guest, + types.VirtualMachineGuestOsIdentifierSles12_64Guest, + types.VirtualMachineGuestOsIdentifierNld9Guest, + types.VirtualMachineGuestOsIdentifierOesGuest, + types.VirtualMachineGuestOsIdentifierSjdsGuest, + types.VirtualMachineGuestOsIdentifierMandrakeGuest, + types.VirtualMachineGuestOsIdentifierMandrivaGuest, + types.VirtualMachineGuestOsIdentifierMandriva64Guest, + types.VirtualMachineGuestOsIdentifierTurboLinuxGuest, + types.VirtualMachineGuestOsIdentifierTurboLinux64Guest, + types.VirtualMachineGuestOsIdentifierUbuntuGuest, + types.VirtualMachineGuestOsIdentifierUbuntu64Guest, + types.VirtualMachineGuestOsIdentifierDebian4Guest, + types.VirtualMachineGuestOsIdentifierDebian4_64Guest, + types.VirtualMachineGuestOsIdentifierDebian5Guest, + types.VirtualMachineGuestOsIdentifierDebian5_64Guest, + types.VirtualMachineGuestOsIdentifierDebian6Guest, + types.VirtualMachineGuestOsIdentifierDebian6_64Guest, + types.VirtualMachineGuestOsIdentifierDebian7Guest, + types.VirtualMachineGuestOsIdentifierDebian7_64Guest, + types.VirtualMachineGuestOsIdentifierDebian8Guest, + types.VirtualMachineGuestOsIdentifierDebian8_64Guest, + types.VirtualMachineGuestOsIdentifierDebian9Guest, + types.VirtualMachineGuestOsIdentifierDebian9_64Guest, + types.VirtualMachineGuestOsIdentifierDebian10Guest, + types.VirtualMachineGuestOsIdentifierDebian10_64Guest, + types.VirtualMachineGuestOsIdentifierAsianux3Guest, + types.VirtualMachineGuestOsIdentifierAsianux3_64Guest, + types.VirtualMachineGuestOsIdentifierAsianux4Guest, + types.VirtualMachineGuestOsIdentifierAsianux4_64Guest, + types.VirtualMachineGuestOsIdentifierAsianux5_64Guest, + types.VirtualMachineGuestOsIdentifierAsianux7_64Guest, + types.VirtualMachineGuestOsIdentifierOpensuseGuest, + types.VirtualMachineGuestOsIdentifierOpensuse64Guest, + types.VirtualMachineGuestOsIdentifierFedoraGuest, + types.VirtualMachineGuestOsIdentifierFedora64Guest, + types.VirtualMachineGuestOsIdentifierCoreos64Guest, + types.VirtualMachineGuestOsIdentifierVmwarePhoton64Guest, + types.VirtualMachineGuestOsIdentifierOther24xLinuxGuest, + types.VirtualMachineGuestOsIdentifierOther26xLinuxGuest, + types.VirtualMachineGuestOsIdentifierOtherLinuxGuest, + types.VirtualMachineGuestOsIdentifierOther3xLinuxGuest, + types.VirtualMachineGuestOsIdentifierGenericLinuxGuest, + types.VirtualMachineGuestOsIdentifierOther24xLinux64Guest, + types.VirtualMachineGuestOsIdentifierOther26xLinux64Guest, + types.VirtualMachineGuestOsIdentifierOther3xLinux64Guest, + types.VirtualMachineGuestOsIdentifierOtherLinux64Guest, + types.VirtualMachineGuestOsIdentifierSolaris6Guest, + types.VirtualMachineGuestOsIdentifierSolaris7Guest, + types.VirtualMachineGuestOsIdentifierSolaris8Guest, + types.VirtualMachineGuestOsIdentifierSolaris9Guest, + types.VirtualMachineGuestOsIdentifierSolaris10Guest, + types.VirtualMachineGuestOsIdentifierSolaris10_64Guest, + types.VirtualMachineGuestOsIdentifierSolaris11_64Guest, + types.VirtualMachineGuestOsIdentifierOs2Guest, + types.VirtualMachineGuestOsIdentifierEComStationGuest, + types.VirtualMachineGuestOsIdentifierEComStation2Guest, + types.VirtualMachineGuestOsIdentifierNetware4Guest, + types.VirtualMachineGuestOsIdentifierNetware5Guest, + types.VirtualMachineGuestOsIdentifierNetware6Guest, + types.VirtualMachineGuestOsIdentifierOpenServer5Guest, + types.VirtualMachineGuestOsIdentifierOpenServer6Guest, + types.VirtualMachineGuestOsIdentifierUnixWare7Guest, + types.VirtualMachineGuestOsIdentifierDarwinGuest, + types.VirtualMachineGuestOsIdentifierDarwin64Guest, + types.VirtualMachineGuestOsIdentifierDarwin10Guest, + types.VirtualMachineGuestOsIdentifierDarwin10_64Guest, + types.VirtualMachineGuestOsIdentifierDarwin11Guest, + types.VirtualMachineGuestOsIdentifierDarwin11_64Guest, + types.VirtualMachineGuestOsIdentifierDarwin12_64Guest, + types.VirtualMachineGuestOsIdentifierDarwin13_64Guest, + types.VirtualMachineGuestOsIdentifierDarwin14_64Guest, + types.VirtualMachineGuestOsIdentifierDarwin15_64Guest, + types.VirtualMachineGuestOsIdentifierDarwin16_64Guest, + types.VirtualMachineGuestOsIdentifierVmkernelGuest, + types.VirtualMachineGuestOsIdentifierVmkernel5Guest, + types.VirtualMachineGuestOsIdentifierVmkernel6Guest, + types.VirtualMachineGuestOsIdentifierVmkernel65Guest, + types.VirtualMachineGuestOsIdentifierOtherGuest, + types.VirtualMachineGuestOsIdentifierOtherGuest64, +} diff --git a/vendor/github.com/vmware/govmomi/simulator/guest_id.sh b/vendor/github.com/vmware/govmomi/simulator/guest_id.sh new file mode 100755 index 00000000000..e7981633758 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/guest_id.sh @@ -0,0 +1,35 @@ +#!/bin/bash -e + +pushd "$(dirname "$0")" >/dev/null + +{ + cat < guest_id.go + +goimports -w guest_id.go diff --git a/vendor/github.com/vmware/govmomi/simulator/host_datastore_browser.go b/vendor/github.com/vmware/govmomi/simulator/host_datastore_browser.go new file mode 100644 index 00000000000..0f1ea8f4475 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/host_datastore_browser.go @@ -0,0 +1,254 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "io/ioutil" + "log" + "os" + "path" + "strings" + + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type HostDatastoreBrowser struct { + mo.HostDatastoreBrowser +} + +type searchDatastore struct { + *HostDatastoreBrowser + + DatastorePath string + SearchSpec *types.HostDatastoreBrowserSearchSpec + + res []types.HostDatastoreBrowserSearchResults + + recurse bool +} + +func (s *searchDatastore) addFile(file os.FileInfo, res *types.HostDatastoreBrowserSearchResults) { + details := s.SearchSpec.Details + if details == nil { + details = new(types.FileQueryFlags) + } + + name := file.Name() + + info := types.FileInfo{ + Path: name, + } + + var finfo types.BaseFileInfo = &info + + if details.FileSize { + info.FileSize = file.Size() + } + + if details.Modification { + mtime := file.ModTime() + info.Modification = &mtime + } + + if isTrue(details.FileOwner) { + // Assume for now this process created all files in the datastore + user := os.Getenv("USER") + + info.Owner = user + } + + if file.IsDir() { + finfo = &types.FolderFileInfo{FileInfo: info} + } else if details.FileType { + switch path.Ext(name) { + case ".img": + finfo = &types.FloppyImageFileInfo{FileInfo: info} + case ".iso": + finfo = &types.IsoImageFileInfo{FileInfo: info} + case ".log": + finfo = &types.VmLogFileInfo{FileInfo: info} + case ".nvram": + finfo = &types.VmNvramFileInfo{FileInfo: info} + case ".vmdk": + // TODO: lookup device to set other fields + finfo = &types.VmDiskFileInfo{FileInfo: info} + case ".vmx": + finfo = &types.VmConfigFileInfo{FileInfo: info} + } + } + + res.File = append(res.File, finfo) +} + +func (s *searchDatastore) queryMatch(file os.FileInfo) bool { + if len(s.SearchSpec.Query) == 0 { + return true + } + + name := file.Name() + ext := path.Ext(name) + + for _, q := range s.SearchSpec.Query { + switch q.(type) { + case *types.FileQuery: + return true + case *types.FolderFileQuery: + if file.IsDir() { + return true + } + case *types.FloppyImageFileQuery: + if ext == ".img" { + return true + } + case *types.IsoImageFileQuery: + if ext == ".iso" { + return true + } + case *types.VmConfigFileQuery: + if ext == ".vmx" { + // TODO: check Filter and Details fields + return true + } + case *types.VmDiskFileQuery: + if ext == ".vmdk" { + if strings.HasSuffix(name, "-flat.vmdk") { + // only matches the descriptor, not the backing file(s) + return false + } + // TODO: check Filter and Details fields + return true + } + case *types.VmLogFileQuery: + if ext == ".log" { + return strings.HasPrefix(name, "vmware") + } + case *types.VmNvramFileQuery: + if ext == ".nvram" { + return true + } + case *types.VmSnapshotFileQuery: + if ext == ".vmsn" { + return true + } + } + } + + return false +} + +func (s *searchDatastore) search(ds *types.ManagedObjectReference, folder string, dir string) error { + files, err := ioutil.ReadDir(dir) + if err != nil { + log.Printf("search %s: %s", dir, err) + return err + } + + res := types.HostDatastoreBrowserSearchResults{ + Datastore: ds, + FolderPath: folder, + } + + for _, file := range files { + name := file.Name() + + if s.queryMatch(file) { + for _, m := range s.SearchSpec.MatchPattern { + if ok, _ := path.Match(m, name); ok { + s.addFile(file, &res) + break + } + } + } + + if s.recurse && file.IsDir() { + _ = s.search(ds, path.Join(folder, name), path.Join(dir, name)) + } + } + + s.res = append(s.res, res) + + return nil +} + +func (s *searchDatastore) Run(Task *Task) (types.AnyType, types.BaseMethodFault) { + p, fault := parseDatastorePath(s.DatastorePath) + if fault != nil { + return nil, fault + } + + ref := Map.FindByName(p.Datastore, s.Datastore) + if ref == nil { + return nil, &types.InvalidDatastore{Name: p.Datastore} + } + + ds := ref.(*Datastore) + + dir := path.Join(ds.Info.GetDatastoreInfo().Url, p.Path) + + err := s.search(&ds.Self, s.DatastorePath, dir) + if err != nil { + ff := types.FileFault{ + File: p.Path, + } + + if os.IsNotExist(err) { + return nil, &types.FileNotFound{FileFault: ff} + } + + return nil, &types.InvalidArgument{InvalidProperty: p.Path} + } + + if s.recurse { + return types.ArrayOfHostDatastoreBrowserSearchResults{ + HostDatastoreBrowserSearchResults: s.res, + }, nil + } + + return s.res[0], nil +} + +func (b *HostDatastoreBrowser) SearchDatastoreTask(s *types.SearchDatastore_Task) soap.HasFault { + task := NewTask(&searchDatastore{ + HostDatastoreBrowser: b, + DatastorePath: s.DatastorePath, + SearchSpec: s.SearchSpec, + }) + + return &methods.SearchDatastore_TaskBody{ + Res: &types.SearchDatastore_TaskResponse{ + Returnval: task.Run(), + }, + } +} + +func (b *HostDatastoreBrowser) SearchDatastoreSubFoldersTask(s *types.SearchDatastoreSubFolders_Task) soap.HasFault { + task := NewTask(&searchDatastore{ + HostDatastoreBrowser: b, + DatastorePath: s.DatastorePath, + SearchSpec: s.SearchSpec, + recurse: true, + }) + + return &methods.SearchDatastoreSubFolders_TaskBody{ + Res: &types.SearchDatastoreSubFolders_TaskResponse{ + Returnval: task.Run(), + }, + } +} diff --git a/vendor/github.com/vmware/govmomi/simulator/host_datastore_system.go b/vendor/github.com/vmware/govmomi/simulator/host_datastore_system.go new file mode 100644 index 00000000000..ff3fce83d4a --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/host_datastore_system.go @@ -0,0 +1,161 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "os" + "path" + + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type HostDatastoreSystem struct { + mo.HostDatastoreSystem + + Host *mo.HostSystem +} + +func (dss *HostDatastoreSystem) add(ds *Datastore) *soap.Fault { + info := ds.Info.GetDatastoreInfo() + + info.Name = ds.Name + + if e := Map.FindByName(ds.Name, dss.Datastore); e != nil { + return Fault(e.Reference().Value, &types.DuplicateName{ + Name: ds.Name, + Object: e.Reference(), + }) + } + + fi, err := os.Stat(info.Url) + if err == nil && !fi.IsDir() { + err = os.ErrInvalid + } + + if err != nil { + switch { + case os.IsNotExist(err): + return Fault(err.Error(), &types.NotFound{}) + default: + return Fault(err.Error(), &types.HostConfigFault{}) + } + } + + folder := Map.getEntityFolder(dss.Host, "datastore") + ds.Self.Type = typeName(ds) + // Datastore is the only type where create methods do not include the parent (Folder in this case), + // but we need the moref to be unique per DC/datastoreFolder, but not per-HostSystem. + ds.Self.Value += "@" + folder.Self.Value + // TODO: name should be made unique in the case of Local ds type + + ds.Summary.Datastore = &ds.Self + ds.Summary.Name = ds.Name + ds.Summary.Url = info.Url + + dss.Datastore = append(dss.Datastore, ds.Self) + dss.Host.Datastore = dss.Datastore + parent := hostParent(dss.Host) + parent.Datastore = AddReference(ds.Self, parent.Datastore) + + browser := &HostDatastoreBrowser{} + browser.Datastore = dss.Datastore + ds.Browser = Map.Put(browser).Reference() + + folder.putChild(ds) + + return nil +} + +func (dss *HostDatastoreSystem) CreateLocalDatastore(c *types.CreateLocalDatastore) soap.HasFault { + r := &methods.CreateLocalDatastoreBody{} + + ds := &Datastore{} + ds.Name = c.Name + ds.Self.Value = c.Path + + ds.Info = &types.LocalDatastoreInfo{ + DatastoreInfo: types.DatastoreInfo{ + Name: c.Name, + Url: c.Path, + }, + Path: c.Path, + } + + ds.Summary.Type = "local" + + if err := dss.add(ds); err != nil { + r.Fault_ = err + return r + } + + ds.Host = append(ds.Host, types.DatastoreHostMount{ + Key: dss.Host.Reference(), + MountInfo: types.HostMountInfo{ + AccessMode: string(types.HostMountModeReadWrite), + Mounted: types.NewBool(true), + Accessible: types.NewBool(true), + }, + }) + + _ = ds.RefreshDatastore(&types.RefreshDatastore{This: ds.Self}) + + r.Res = &types.CreateLocalDatastoreResponse{ + Returnval: ds.Self, + } + + return r +} + +func (dss *HostDatastoreSystem) CreateNasDatastore(c *types.CreateNasDatastore) soap.HasFault { + r := &methods.CreateNasDatastoreBody{} + + ds := &Datastore{} + ds.Name = path.Base(c.Spec.LocalPath) + ds.Self.Value = c.Spec.RemoteHost + ":" + c.Spec.RemotePath + + ds.Info = &types.NasDatastoreInfo{ + DatastoreInfo: types.DatastoreInfo{ + Url: c.Spec.LocalPath, + }, + Nas: &types.HostNasVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Name: c.Spec.LocalPath, + Type: c.Spec.Type, + }, + RemoteHost: c.Spec.RemoteHost, + RemotePath: c.Spec.RemotePath, + }, + } + + ds.Summary.Type = c.Spec.Type + + if err := dss.add(ds); err != nil { + r.Fault_ = err + return r + } + + _ = ds.RefreshDatastore(&types.RefreshDatastore{This: ds.Self}) + + r.Res = &types.CreateNasDatastoreResponse{ + Returnval: ds.Self, + } + + return r +} diff --git a/vendor/github.com/vmware/govmomi/simulator/host_firewall_system.go b/vendor/github.com/vmware/govmomi/simulator/host_firewall_system.go new file mode 100644 index 00000000000..fd596386aa1 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/host_firewall_system.go @@ -0,0 +1,87 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "github.com/vmware/govmomi/simulator/esx" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type HostFirewallSystem struct { + mo.HostFirewallSystem +} + +func NewHostFirewallSystem(_ *mo.HostSystem) *HostFirewallSystem { + info := esx.HostFirewallInfo + + return &HostFirewallSystem{ + HostFirewallSystem: mo.HostFirewallSystem{ + FirewallInfo: &info, + }, + } +} + +func DisableRuleset(info *types.HostFirewallInfo, id string) bool { + for i := range info.Ruleset { + if info.Ruleset[i].Key == id { + info.Ruleset[i].Enabled = false + return true + } + } + + return false +} + +func (s *HostFirewallSystem) DisableRuleset(req *types.DisableRuleset) soap.HasFault { + body := &methods.DisableRulesetBody{} + + if DisableRuleset(s.HostFirewallSystem.FirewallInfo, req.Id) { + body.Res = new(types.DisableRulesetResponse) + return body + } + + body.Fault_ = Fault("", &types.NotFound{}) + + return body +} + +func EnableRuleset(info *types.HostFirewallInfo, id string) bool { + for i := range info.Ruleset { + if info.Ruleset[i].Key == id { + info.Ruleset[i].Enabled = true + return true + } + } + + return false +} + +func (s *HostFirewallSystem) EnableRuleset(req *types.EnableRuleset) soap.HasFault { + body := &methods.EnableRulesetBody{} + + if EnableRuleset(s.HostFirewallSystem.FirewallInfo, req.Id) { + body.Res = new(types.EnableRulesetResponse) + return body + } + + body.Fault_ = Fault("", &types.NotFound{}) + + return body +} diff --git a/vendor/github.com/vmware/govmomi/simulator/host_network_system.go b/vendor/github.com/vmware/govmomi/simulator/host_network_system.go new file mode 100644 index 00000000000..64a2bd759c6 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/host_network_system.go @@ -0,0 +1,171 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type HostNetworkSystem struct { + mo.HostNetworkSystem + + Host *mo.HostSystem +} + +func NewHostNetworkSystem(host *mo.HostSystem) *HostNetworkSystem { + return &HostNetworkSystem{ + Host: host, + HostNetworkSystem: mo.HostNetworkSystem{ + NetworkInfo: &types.HostNetworkInfo{ + Vswitch: []types.HostVirtualSwitch{ + { + Name: "vSwitch0", + Portgroup: []string{"VM Network"}, + }, + }, + }, + }, + } +} + +func (s *HostNetworkSystem) folder() *Folder { + f := Map.getEntityDatacenter(s.Host).NetworkFolder + return Map.Get(f).(*Folder) +} + +func (s *HostNetworkSystem) AddVirtualSwitch(c *types.AddVirtualSwitch) soap.HasFault { + r := &methods.AddVirtualSwitchBody{} + + for _, vswitch := range s.NetworkInfo.Vswitch { + if vswitch.Name == c.VswitchName { + r.Fault_ = Fault("", &types.AlreadyExists{Name: c.VswitchName}) + return r + } + } + + s.NetworkInfo.Vswitch = append(s.NetworkInfo.Vswitch, types.HostVirtualSwitch{ + Name: c.VswitchName, + }) + + r.Res = &types.AddVirtualSwitchResponse{} + + return r +} + +func (s *HostNetworkSystem) RemoveVirtualSwitch(c *types.RemoveVirtualSwitch) soap.HasFault { + r := &methods.RemoveVirtualSwitchBody{} + + vs := s.NetworkInfo.Vswitch + + for i, v := range vs { + if v.Name == c.VswitchName { + s.NetworkInfo.Vswitch = append(vs[:i], vs[i+1:]...) + r.Res = &types.RemoveVirtualSwitchResponse{} + return r + } + } + + r.Fault_ = Fault("", &types.NotFound{}) + + return r +} + +func (s *HostNetworkSystem) AddPortGroup(c *types.AddPortGroup) soap.HasFault { + var vswitch *types.HostVirtualSwitch + + r := &methods.AddPortGroupBody{} + + if c.Portgrp.Name == "" { + r.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "name"}) + return r + } + + for i := range s.NetworkInfo.Vswitch { + if s.NetworkInfo.Vswitch[i].Name == c.Portgrp.VswitchName { + vswitch = &s.NetworkInfo.Vswitch[i] + break + } + } + + if vswitch == nil { + r.Fault_ = Fault("", &types.NotFound{}) + return r + } + + network := &mo.Network{} + network.Name = c.Portgrp.Name + network.Entity().Name = network.Name + + folder := s.folder() + + if obj := Map.FindByName(c.Portgrp.Name, folder.ChildEntity); obj != nil { + r.Fault_ = Fault("", &types.DuplicateName{ + Name: c.Portgrp.Name, + Object: obj.Reference(), + }) + + return r + } + + folder.putChild(network) + + vswitch.Portgroup = append(vswitch.Portgroup, c.Portgrp.Name) + r.Res = &types.AddPortGroupResponse{} + + return r +} + +func (s *HostNetworkSystem) RemovePortGroup(c *types.RemovePortGroup) soap.HasFault { + var vswitch *types.HostVirtualSwitch + + r := &methods.RemovePortGroupBody{} + + for i, v := range s.NetworkInfo.Vswitch { + for j, pg := range v.Portgroup { + if pg == c.PgName { + vswitch = &s.NetworkInfo.Vswitch[i] + vswitch.Portgroup = append(vswitch.Portgroup[:j], vswitch.Portgroup[j+1:]...) + } + } + } + + if vswitch == nil { + r.Fault_ = Fault("", &types.NotFound{}) + return r + } + + folder := s.folder() + e := Map.FindByName(c.PgName, folder.ChildEntity) + folder.removeChild(e.Reference()) + + r.Res = &types.RemovePortGroupResponse{} + + return r +} + +func (s *HostNetworkSystem) UpdateNetworkConfig(req *types.UpdateNetworkConfig) soap.HasFault { + s.NetworkConfig = &req.Config + + return &methods.UpdateNetworkConfigBody{ + Res: &types.UpdateNetworkConfigResponse{ + Returnval: types.HostNetworkConfigResult{}, + }, + } +} diff --git a/vendor/github.com/vmware/govmomi/simulator/host_system.go b/vendor/github.com/vmware/govmomi/simulator/host_system.go new file mode 100644 index 00000000000..600dbcf3885 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/host_system.go @@ -0,0 +1,180 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "time" + + "github.com/google/uuid" + "github.com/vmware/govmomi/simulator/esx" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type HostSystem struct { + mo.HostSystem +} + +func NewHostSystem(host mo.HostSystem) *HostSystem { + now := time.Now() + + hs := &HostSystem{ + HostSystem: host, + } + + hs.Name = hs.Summary.Config.Name + hs.Summary.Runtime = &hs.Runtime + hs.Summary.Runtime.BootTime = &now + + id := uuid.New().String() + + hardware := *host.Summary.Hardware + hs.Summary.Hardware = &hardware + hs.Summary.Hardware.Uuid = id + + info := *esx.HostHardwareInfo + info.SystemInfo.Uuid = id + hs.Hardware = &info + + config := []struct { + ref **types.ManagedObjectReference + obj mo.Reference + }{ + {&hs.ConfigManager.DatastoreSystem, &HostDatastoreSystem{Host: &hs.HostSystem}}, + {&hs.ConfigManager.NetworkSystem, NewHostNetworkSystem(&hs.HostSystem)}, + {&hs.ConfigManager.AdvancedOption, NewOptionManager(nil, esx.Setting)}, + {&hs.ConfigManager.FirewallSystem, NewHostFirewallSystem(&hs.HostSystem)}, + } + + for _, c := range config { + ref := Map.Put(c.obj).Reference() + + *c.ref = &ref + } + + return hs +} + +func hostParent(host *mo.HostSystem) *mo.ComputeResource { + switch parent := Map.Get(*host.Parent).(type) { + case *mo.ComputeResource: + return parent + case *ClusterComputeResource: + return &parent.ComputeResource + default: + return nil + } +} + +func addComputeResource(s *types.ComputeResourceSummary, h *HostSystem) { + s.TotalCpu += h.Summary.Hardware.CpuMhz + s.TotalMemory += h.Summary.Hardware.MemorySize + s.NumCpuCores += h.Summary.Hardware.NumCpuCores + s.NumCpuThreads += h.Summary.Hardware.NumCpuThreads + s.EffectiveCpu += h.Summary.Hardware.CpuMhz + s.EffectiveMemory += h.Summary.Hardware.MemorySize + s.NumHosts++ + s.NumEffectiveHosts++ + s.OverallStatus = types.ManagedEntityStatusGreen +} + +// CreateDefaultESX creates a standalone ESX +// Adds objects of type: Datacenter, Network, ComputeResource, ResourcePool and HostSystem +func CreateDefaultESX(f *Folder) { + dc := &esx.Datacenter + f.putChild(dc) + createDatacenterFolders(dc, false) + + host := NewHostSystem(esx.HostSystem) + + summary := new(types.ComputeResourceSummary) + addComputeResource(summary, host) + + cr := &mo.ComputeResource{Summary: summary} + cr.Self = *host.Parent + cr.Name = host.Name + cr.Host = append(cr.Host, host.Reference()) + Map.PutEntity(cr, host) + + pool := NewResourcePool() + cr.ResourcePool = &pool.Self + Map.PutEntity(cr, pool) + pool.Owner = cr.Self + + Map.Get(dc.HostFolder).(*Folder).putChild(cr) +} + +// CreateStandaloneHost uses esx.HostSystem as a template, applying the given spec +// and creating the ComputeResource parent and ResourcePool sibling. +func CreateStandaloneHost(f *Folder, spec types.HostConnectSpec) (*HostSystem, types.BaseMethodFault) { + if spec.HostName == "" { + return nil, &types.NoHost{} + } + + pool := NewResourcePool() + host := NewHostSystem(esx.HostSystem) + + host.Summary.Config.Name = spec.HostName + host.Name = host.Summary.Config.Name + host.Runtime.ConnectionState = types.HostSystemConnectionStateDisconnected + + summary := new(types.ComputeResourceSummary) + addComputeResource(summary, host) + + cr := &mo.ComputeResource{Summary: summary} + + Map.PutEntity(cr, Map.NewEntity(host)) + + Map.PutEntity(cr, Map.NewEntity(pool)) + + cr.Name = host.Name + cr.Host = append(cr.Host, host.Reference()) + cr.ResourcePool = &pool.Self + + f.putChild(cr) + pool.Owner = cr.Self + + return host, nil +} + +func (h *HostSystem) EnterMaintenanceModeTask(spec *types.EnterMaintenanceMode_Task) soap.HasFault { + task := CreateTask(h, "enterMaintenanceMode", func(t *Task) (types.AnyType, types.BaseMethodFault) { + h.Runtime.InMaintenanceMode = true + return nil, nil + }) + + return &methods.EnterMaintenanceMode_TaskBody{ + Res: &types.EnterMaintenanceMode_TaskResponse{ + Returnval: task.Run(), + }, + } +} + +func (h *HostSystem) ExitMaintenanceModeTask(spec *types.ExitMaintenanceMode_Task) soap.HasFault { + task := CreateTask(h, "exitMaintenanceMode", func(t *Task) (types.AnyType, types.BaseMethodFault) { + h.Runtime.InMaintenanceMode = false + return nil, nil + }) + + return &methods.ExitMaintenanceMode_TaskBody{ + Res: &types.ExitMaintenanceMode_TaskResponse{ + Returnval: task.Run(), + }, + } +} diff --git a/vendor/github.com/vmware/govmomi/simulator/ip_pool_manager.go b/vendor/github.com/vmware/govmomi/simulator/ip_pool_manager.go new file mode 100644 index 00000000000..af8104a659f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/ip_pool_manager.go @@ -0,0 +1,392 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "errors" + "fmt" + "net" + "strconv" + "strings" + + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +var ipPool = MustNewIpPool(&types.IpPool{ + Id: 1, + Name: "ip-pool", + AvailableIpv4Addresses: 250, + AvailableIpv6Addresses: 250, + AllocatedIpv6Addresses: 0, + AllocatedIpv4Addresses: 0, + Ipv4Config: &types.IpPoolIpPoolConfigInfo{ + Netmask: "10.10.10.255", + Gateway: "10.10.10.1", + SubnetAddress: "10.10.10.0", + Range: "10.10.10.2#250", + }, + Ipv6Config: &types.IpPoolIpPoolConfigInfo{ + Netmask: "2001:4860:0:2001::ff", + Gateway: "2001:4860:0:2001::1", + SubnetAddress: "2001:4860:0:2001::0", + Range: "2001:4860:0:2001::2#250", + }, +}) + +// IpPoolManager implements a simple IP Pool manager in which all pools are shared +// across different datacenters. +type IpPoolManager struct { + mo.IpPoolManager + + pools map[int32]*IpPool + nextPoolId int32 +} + +func NewIpPoolManager(ref types.ManagedObjectReference) *IpPoolManager { + m := &IpPoolManager{} + m.Self = ref + + m.pools = map[int32]*IpPool{ + 1: ipPool, + } + m.nextPoolId = 2 + + return m +} + +func (m *IpPoolManager) CreateIpPool(req *types.CreateIpPool) soap.HasFault { + body := &methods.CreateIpPoolBody{} + id := m.nextPoolId + + var err error + m.pools[id], err = NewIpPool(&req.Pool) + if err != nil { + body.Fault_ = Fault("", &types.RuntimeFault{}) + return body + } + + m.nextPoolId++ + + body.Res = &types.CreateIpPoolResponse{ + Returnval: id, + } + + return body +} + +func (m *IpPoolManager) DestroyIpPool(req *types.DestroyIpPool) soap.HasFault { + delete(m.pools, req.Id) + + return &methods.DestroyIpPoolBody{ + Res: &types.DestroyIpPoolResponse{}, + } +} + +func (m *IpPoolManager) QueryIpPools(req *types.QueryIpPools) soap.HasFault { + pools := []types.IpPool{} + + for i := int32(1); i < m.nextPoolId; i++ { + if p, ok := m.pools[i]; ok { + pools = append(pools, *p.config) + } + } + + return &methods.QueryIpPoolsBody{ + Res: &types.QueryIpPoolsResponse{ + Returnval: pools, + }, + } +} + +func (m *IpPoolManager) UpdateIpPool(req *types.UpdateIpPool) soap.HasFault { + body := &methods.UpdateIpPoolBody{} + + var pool *IpPool + var err error + var ok bool + + if pool, ok = m.pools[req.Pool.Id]; !ok { + body.Fault_ = Fault("", &types.NotFoundFault{}) + return body + } + + if pool.config.AllocatedIpv4Addresses+pool.config.AllocatedIpv6Addresses != 0 { + body.Fault_ = Fault("update a pool has been used is not supported", &types.RuntimeFault{}) + return body + } + + m.pools[req.Pool.Id], err = NewIpPool(&req.Pool) + if err != nil { + body.Fault_ = Fault(err.Error(), &types.RuntimeFault{}) + return body + } + + body.Res = &types.UpdateIpPoolResponse{} + + return body +} + +func (m *IpPoolManager) AllocateIpv4Address(req *types.AllocateIpv4Address) soap.HasFault { + body := &methods.AllocateIpv4AddressBody{} + + pool, ok := m.pools[req.PoolId] + if !ok { + body.Fault_ = Fault("", &types.InvalidArgument{}) + return body + } + + ip, err := pool.AllocateIPv4(req.AllocationId) + if err != nil { + body.Fault_ = Fault(err.Error(), &types.RuntimeFault{}) + return body + } + + body.Res = &types.AllocateIpv4AddressResponse{ + Returnval: ip, + } + + return body +} + +func (m *IpPoolManager) AllocateIpv6Address(req *types.AllocateIpv6Address) soap.HasFault { + body := &methods.AllocateIpv6AddressBody{} + + pool, ok := m.pools[req.PoolId] + if !ok { + body.Fault_ = Fault("", &types.InvalidArgument{}) + return body + } + + ip, err := pool.AllocateIpv6(req.AllocationId) + if err != nil { + body.Fault_ = Fault(err.Error(), &types.RuntimeFault{}) + return body + } + + body.Res = &types.AllocateIpv6AddressResponse{ + Returnval: ip, + } + + return body +} + +func (m *IpPoolManager) ReleaseIpAllocation(req *types.ReleaseIpAllocation) soap.HasFault { + body := &methods.ReleaseIpAllocationBody{} + + pool, ok := m.pools[req.PoolId] + if !ok { + body.Fault_ = Fault("", &types.InvalidArgument{}) + return body + } + + pool.ReleaseIpv4(req.AllocationId) + pool.ReleaseIpv6(req.AllocationId) + + body.Res = &types.ReleaseIpAllocationResponse{} + + return body +} + +func (m *IpPoolManager) QueryIPAllocations(req *types.QueryIPAllocations) soap.HasFault { + body := &methods.QueryIPAllocationsBody{} + + pool, ok := m.pools[req.PoolId] + if !ok { + body.Fault_ = Fault("", &types.InvalidArgument{}) + return body + } + + body.Res = &types.QueryIPAllocationsResponse{} + + ipv4, ok := pool.ipv4Allocation[req.ExtensionKey] + if ok { + body.Res.Returnval = append(body.Res.Returnval, types.IpPoolManagerIpAllocation{ + IpAddress: ipv4, + AllocationId: req.ExtensionKey, + }) + } + + ipv6, ok := pool.ipv6Allocation[req.ExtensionKey] + if ok { + body.Res.Returnval = append(body.Res.Returnval, types.IpPoolManagerIpAllocation{ + IpAddress: ipv6, + AllocationId: req.ExtensionKey, + }) + } + + return body +} + +var ( + errNoIpAvailable = errors.New("no ip address available") + errInvalidAllocation = errors.New("allocation id not recognized") +) + +type IpPool struct { + config *types.IpPool + ipv4Allocation map[string]string + ipv6Allocation map[string]string + ipv4Pool []string + ipv6Pool []string +} + +func MustNewIpPool(config *types.IpPool) *IpPool { + pool, err := NewIpPool(config) + if err != nil { + panic(err) + } + + return pool +} + +func NewIpPool(config *types.IpPool) (*IpPool, error) { + pool := &IpPool{ + config: config, + ipv4Allocation: make(map[string]string), + ipv6Allocation: make(map[string]string), + } + + return pool, pool.init() +} + +func (p *IpPool) init() error { + // IPv4 range + if p.config.Ipv4Config != nil { + ranges := strings.Split(p.config.Ipv4Config.Range, ",") + for _, r := range ranges { + sp := strings.Split(r, "#") + if len(sp) != 2 { + return fmt.Errorf("format of range should be ip#number; got %q", r) + } + + ip := net.ParseIP(strings.TrimSpace(sp[0])).To4() + if ip == nil { + return fmt.Errorf("bad ip format: %q", sp[0]) + } + + length, err := strconv.Atoi(sp[1]) + if err != nil { + return err + } + + for i := 0; i < length; i++ { + p.ipv4Pool = append(p.ipv4Pool, net.IPv4(ip[0], ip[1], ip[2], ip[3]+byte(i)).String()) + } + } + } + + // IPv6 range + if p.config.Ipv6Config != nil { + ranges := strings.Split(p.config.Ipv6Config.Range, ",") + for _, r := range ranges { + sp := strings.Split(r, "#") + if len(sp) != 2 { + return fmt.Errorf("format of range should be ip#number; got %q", r) + } + + ip := net.ParseIP(strings.TrimSpace(sp[0])).To16() + if ip == nil { + return fmt.Errorf("bad ip format: %q", sp[0]) + } + + length, err := strconv.Atoi(sp[1]) + if err != nil { + return err + } + + for i := 0; i < length; i++ { + var ipv6 [16]byte + copy(ipv6[:], ip) + ipv6[15] += byte(i) + p.ipv6Pool = append(p.ipv6Pool, net.IP(ipv6[:]).String()) + } + } + } + + return nil +} + +func (p *IpPool) AllocateIPv4(allocation string) (string, error) { + if ip, ok := p.ipv4Allocation[allocation]; ok { + return ip, nil + } + + l := len(p.ipv4Pool) + if l == 0 { + return "", errNoIpAvailable + } + + ip := p.ipv4Pool[l-1] + + p.config.AvailableIpv4Addresses-- + p.config.AllocatedIpv4Addresses++ + p.ipv4Pool = p.ipv4Pool[:l-1] + p.ipv4Allocation[allocation] = ip + + return ip, nil +} + +func (p *IpPool) ReleaseIpv4(allocation string) error { + ip, ok := p.ipv4Allocation[allocation] + if !ok { + return errInvalidAllocation + } + + delete(p.ipv4Allocation, allocation) + p.config.AvailableIpv4Addresses++ + p.config.AllocatedIpv4Addresses-- + p.ipv4Pool = append(p.ipv4Pool, ip) + + return nil +} + +func (p *IpPool) AllocateIpv6(allocation string) (string, error) { + if ip, ok := p.ipv6Allocation[allocation]; ok { + return ip, nil + } + + l := len(p.ipv6Pool) + if l == 0 { + return "", errNoIpAvailable + } + + ip := p.ipv6Pool[l-1] + + p.config.AvailableIpv6Addresses-- + p.config.AllocatedIpv6Addresses++ + p.ipv6Pool = p.ipv6Pool[:l-1] + p.ipv6Allocation[allocation] = ip + + return ip, nil +} + +func (p *IpPool) ReleaseIpv6(allocation string) error { + ip, ok := p.ipv6Allocation[allocation] + if !ok { + return errInvalidAllocation + } + + delete(p.ipv6Allocation, allocation) + p.config.AvailableIpv6Addresses++ + p.config.AllocatedIpv6Addresses-- + p.ipv6Pool = append(p.ipv6Pool, ip) + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/simulator/license_manager.go b/vendor/github.com/vmware/govmomi/simulator/license_manager.go new file mode 100644 index 00000000000..13565d32ef1 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/license_manager.go @@ -0,0 +1,156 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Copyright 2017 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package simulator + +import ( + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +// EvalLicense is the default license +var EvalLicense = types.LicenseManagerLicenseInfo{ + LicenseKey: "00000-00000-00000-00000-00000", + EditionKey: "eval", + Name: "Evaluation Mode", + Properties: []types.KeyAnyValue{ + { + Key: "feature", + Value: types.KeyValue{ + Key: "serialuri:2", + Value: "Remote virtual Serial Port Concentrator", + }, + }, + { + Key: "feature", + Value: types.KeyValue{ + Key: "dvs", + Value: "vSphere Distributed Switch", + }, + }, + }, +} + +type LicenseManager struct { + mo.LicenseManager +} + +func NewLicenseManager(ref types.ManagedObjectReference) object.Reference { + m := &LicenseManager{} + m.Self = ref + m.Licenses = []types.LicenseManagerLicenseInfo{EvalLicense} + + if Map.IsVPX() { + am := Map.Put(&LicenseAssignmentManager{}).Reference() + m.LicenseAssignmentManager = &am + } + + return m +} + +func (m *LicenseManager) AddLicense(req *types.AddLicense) soap.HasFault { + body := &methods.AddLicenseBody{ + Res: &types.AddLicenseResponse{}, + } + + for _, license := range m.Licenses { + if license.LicenseKey == req.LicenseKey { + body.Res.Returnval = licenseInfo(license.LicenseKey, license.Labels) + return body + } + } + + m.Licenses = append(m.Licenses, types.LicenseManagerLicenseInfo{ + LicenseKey: req.LicenseKey, + Labels: req.Labels, + }) + + body.Res.Returnval = licenseInfo(req.LicenseKey, req.Labels) + + return body +} + +func (m *LicenseManager) RemoveLicense(req *types.RemoveLicense) soap.HasFault { + body := &methods.RemoveLicenseBody{ + Res: &types.RemoveLicenseResponse{}, + } + + for i, license := range m.Licenses { + if req.LicenseKey == license.LicenseKey { + m.Licenses = append(m.Licenses[:i], m.Licenses[i+1:]...) + return body + } + } + return body +} + +type LicenseAssignmentManager struct { + mo.LicenseAssignmentManager +} + +func (m *LicenseAssignmentManager) QueryAssignedLicenses(req *types.QueryAssignedLicenses) soap.HasFault { + body := &methods.QueryAssignedLicensesBody{ + Res: &types.QueryAssignedLicensesResponse{}, + } + + // EntityId can be a HostSystem or the vCenter InstanceUuid + if req.EntityId != "" { + if req.EntityId != Map.content().About.InstanceUuid { + id := types.ManagedObjectReference{ + Type: "HostSystem", + Value: req.EntityId, + } + + if Map.Get(id) == nil { + return body + } + } + } + + body.Res.Returnval = []types.LicenseAssignmentManagerLicenseAssignment{ + { + EntityId: req.EntityId, + AssignedLicense: EvalLicense, + }, + } + + return body +} + +func licenseInfo(key string, labels []types.KeyValue) types.LicenseManagerLicenseInfo { + info := EvalLicense + + info.LicenseKey = key + info.Labels = labels + + return info +} diff --git a/vendor/github.com/vmware/govmomi/simulator/model.go b/vendor/github.com/vmware/govmomi/simulator/model.go new file mode 100644 index 00000000000..4bc26f9793f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/model.go @@ -0,0 +1,484 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/simulator/esx" + "github.com/vmware/govmomi/simulator/vpx" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +// Model is used to populate a Model with an initial set of managed entities. +// This is a simple helper for tests running against a simulator, to populate an inventory +// with commonly used models. +type Model struct { + Service *Service + + ServiceContent types.ServiceContent + RootFolder mo.Folder + + // Autostart will power on Model created VMs when true + Autostart bool + + // Datacenter specifies the number of Datacenter entities to create + Datacenter int + + // Portgroup specifies the number of DistributedVirtualPortgroup entities to create per Datacenter + Portgroup int + + // Host specifies the number of standalone HostSystems entities to create per Datacenter + Host int + + // Cluster specifies the number of ClusterComputeResource entities to create per Datacenter + Cluster int + + // ClusterHost specifies the number of HostSystems entities to create within a Cluster + ClusterHost int + + // Pool specifies the number of ResourcePool entities to create per Cluster + Pool int + + // Datastore specifies the number of Datastore entities to create + // Each Datastore will have temporary local file storage and will be mounted + // on every HostSystem created by the ModelConfig + Datastore int + + // Machine specifies the number of VirtualMachine entities to create per ResourcePool + Machine int + + // Folder specifies the number of Datacenter to place within a Folder. + // This includes a folder for the Datacenter itself and its host, vm, network and datastore folders. + // All resources for the Datacenter are placed within these folders, rather than the top-level folders. + Folder int + + // App specifies the number of VirtualApp to create per Cluster + App int + + // Pod specifies the number of StoragePod to create per Cluster + Pod int + + // total number of inventory objects, set by Count() + total int + + dirs []string +} + +// ESX is the default Model for a standalone ESX instance +func ESX() *Model { + return &Model{ + ServiceContent: esx.ServiceContent, + RootFolder: esx.RootFolder, + Autostart: true, + Datastore: 1, + Machine: 2, + } +} + +// VPX is the default Model for a vCenter instance +func VPX() *Model { + return &Model{ + ServiceContent: vpx.ServiceContent, + RootFolder: vpx.RootFolder, + Autostart: true, + Datacenter: 1, + Portgroup: 1, + Host: 1, + Cluster: 1, + ClusterHost: 3, + Datastore: 1, + Machine: 2, + } +} + +// Count returns a Model with total number of each existing type +func (m *Model) Count() Model { + count := Model{} + + for ref, obj := range Map.objects { + if _, ok := obj.(mo.Entity); !ok { + continue + } + + count.total++ + + switch ref.Type { + case "Datacenter": + count.Datacenter++ + case "DistributedVirtualPortgroup": + count.Portgroup++ + case "ClusterComputeResource": + count.Cluster++ + case "Datastore": + count.Datastore++ + case "HostSystem": + count.Host++ + case "VirtualMachine": + count.Machine++ + case "ResourcePool": + count.Pool++ + case "VirtualApp": + count.App++ + case "Folder": + count.Folder++ + case "StoragePod": + count.Pod++ + } + } + + return count +} + +func (*Model) fmtName(prefix string, num int) string { + return fmt.Sprintf("%s%d", prefix, num) +} + +// Create populates the Model with the given ModelConfig +func (m *Model) Create() error { + m.Service = New(NewServiceInstance(m.ServiceContent, m.RootFolder)) + + ctx := context.Background() + client := m.Service.client + root := object.NewRootFolder(client) + + // After all hosts are created, this var is used to mount the host datastores. + var hosts []*object.HostSystem + // We need to defer VM creation until after the datastores are created. + var vms []func() error + // 1 DVS per DC, added to all hosts + var dvs *object.DistributedVirtualSwitch + // 1 NIC per VM, backed by a DVPG if Model.Portgroup > 0 + vmnet := esx.EthernetCard.Backing + + // addHost adds a cluster host or a stanalone host. + addHost := func(name string, f func(types.HostConnectSpec) (*object.Task, error)) (*object.HostSystem, error) { + spec := types.HostConnectSpec{ + HostName: name, + } + + task, err := f(spec) + if err != nil { + return nil, err + } + + info, err := task.WaitForResult(context.Background(), nil) + if err != nil { + return nil, err + } + + host := object.NewHostSystem(client, info.Result.(types.ManagedObjectReference)) + hosts = append(hosts, host) + + if dvs != nil { + config := &types.DVSConfigSpec{ + Host: []types.DistributedVirtualSwitchHostMemberConfigSpec{{ + Operation: string(types.ConfigSpecOperationAdd), + Host: host.Reference(), + }}, + } + + _, _ = dvs.Reconfigure(ctx, config) + } + + return host, nil + } + + // addMachine returns a func to create a VM. + addMachine := func(prefix string, host *object.HostSystem, pool *object.ResourcePool, folders *object.DatacenterFolders) { + nic := esx.EthernetCard + nic.Backing = vmnet + ds := types.ManagedObjectReference{} + + f := func() error { + for i := 0; i < m.Machine; i++ { + name := m.fmtName(prefix+"_VM", i) + + config := types.VirtualMachineConfigSpec{ + Name: name, + GuestId: string(types.VirtualMachineGuestOsIdentifierOtherGuest), + Files: &types.VirtualMachineFileInfo{ + VmPathName: "[LocalDS_0]", + }, + } + + if pool == nil { + pool, _ = host.ResourcePool(ctx) + } + + var devices object.VirtualDeviceList + + scsi, _ := devices.CreateSCSIController("pvscsi") + ide, _ := devices.CreateIDEController() + cdrom, _ := devices.CreateCdrom(ide.(*types.VirtualIDEController)) + disk := devices.CreateDisk(scsi.(types.BaseVirtualController), ds, + config.Files.VmPathName+" "+path.Join(name, "disk1.vmdk")) + + devices = append(devices, scsi, cdrom, disk, &nic) + + config.DeviceChange, _ = devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd) + + task, err := folders.VmFolder.CreateVM(ctx, config, pool, host) + if err != nil { + return err + } + + info, err := task.WaitForResult(ctx, nil) + if err != nil { + return err + } + + vm := object.NewVirtualMachine(client, info.Result.(types.ManagedObjectReference)) + + if m.Autostart { + _, _ = vm.PowerOn(ctx) + } + } + + return nil + } + + vms = append(vms, f) + } + + nfolder := 0 + + for ndc := 0; ndc < m.Datacenter; ndc++ { + dcName := m.fmtName("DC", ndc) + folder := root + fName := m.fmtName("F", nfolder) + + // If Datacenter > Folder, don't create folders for the first N DCs. + if nfolder < m.Folder && ndc >= (m.Datacenter-m.Folder) { + f, err := folder.CreateFolder(ctx, fName) + if err != nil { + return err + } + folder = f + } + + dc, err := folder.CreateDatacenter(ctx, dcName) + if err != nil { + return err + } + + folders, err := dc.Folders(ctx) + if err != nil { + return err + } + + if m.Pod > 0 { + for pod := 0; pod < m.Pod; pod++ { + _, _ = folders.DatastoreFolder.CreateStoragePod(ctx, m.fmtName(dcName+"_POD", pod)) + } + } + + if folder != root { + // Create sub-folders and use them to create any resources that follow + subs := []**object.Folder{&folders.DatastoreFolder, &folders.HostFolder, &folders.NetworkFolder, &folders.VmFolder} + + for _, sub := range subs { + f, err := (*sub).CreateFolder(ctx, fName) + if err != nil { + return err + } + + *sub = f + } + + nfolder++ + } + + if m.Portgroup > 0 { + var spec types.DVSCreateSpec + spec.ConfigSpec = &types.VMwareDVSConfigSpec{} + spec.ConfigSpec.GetDVSConfigSpec().Name = m.fmtName("DVS", 0) + + task, err := folders.NetworkFolder.CreateDVS(ctx, spec) + if err != nil { + return err + } + + info, err := task.WaitForResult(ctx, nil) + if err != nil { + return err + } + + dvs = object.NewDistributedVirtualSwitch(client, info.Result.(types.ManagedObjectReference)) + + for npg := 0; npg < m.Portgroup; npg++ { + name := m.fmtName(dcName+"_DVPG", npg) + + task, err = dvs.AddPortgroup(ctx, []types.DVPortgroupConfigSpec{{Name: name}}) + if err != nil { + return err + } + + err = task.Wait(ctx) + if err != nil { + return err + } + + // Use the 1st DVPG for the VMs eth0 backing + if npg == 0 { + // AddPortgroup_Task does not return the moid, so we look it up by name + net := Map.Get(folders.NetworkFolder.Reference()).(*Folder) + pg := Map.FindByName(name, net.ChildEntity) + + vmnet, _ = object.NewDistributedVirtualPortgroup(client, pg.Reference()).EthernetCardBackingInfo(ctx) + } + } + } + + for nhost := 0; nhost < m.Host; nhost++ { + name := m.fmtName(dcName+"_H", nhost) + + host, err := addHost(name, func(spec types.HostConnectSpec) (*object.Task, error) { + return folders.HostFolder.AddStandaloneHost(ctx, spec, true, nil, nil) + }) + if err != nil { + return err + } + + addMachine(name, host, nil, folders) + } + + for ncluster := 0; ncluster < m.Cluster; ncluster++ { + clusterName := m.fmtName(dcName+"_C", ncluster) + + cluster, err := folders.HostFolder.CreateCluster(ctx, clusterName, types.ClusterConfigSpecEx{}) + if err != nil { + return err + } + + for nhost := 0; nhost < m.ClusterHost; nhost++ { + name := m.fmtName(clusterName+"_H", nhost) + + _, err = addHost(name, func(spec types.HostConnectSpec) (*object.Task, error) { + return cluster.AddHost(ctx, spec, true, nil, nil) + }) + if err != nil { + return err + } + } + + pool, err := cluster.ResourcePool(ctx) + if err != nil { + return err + } + + prefix := clusterName + "_RP" + + addMachine(prefix+"0", nil, pool, folders) + + for npool := 1; npool <= m.Pool; npool++ { + spec := types.DefaultResourceConfigSpec() + + _, err = pool.Create(ctx, m.fmtName(prefix, npool), spec) + if err != nil { + return err + } + } + + prefix = clusterName + "_APP" + + for napp := 0; napp < m.App; napp++ { + rspec := types.DefaultResourceConfigSpec() + vspec := NewVAppConfigSpec() + name := m.fmtName(prefix, napp) + + vapp, err := pool.CreateVApp(ctx, name, rspec, vspec, nil) + if err != nil { + return err + } + + addMachine(name, nil, vapp.ResourcePool, folders) + } + } + } + + if m.ServiceContent.RootFolder == esx.RootFolder.Reference() { + // ESX model + host := object.NewHostSystem(client, esx.HostSystem.Reference()) + hosts = append(hosts, host) + + dc := object.NewDatacenter(client, esx.Datacenter.Reference()) + folders, err := dc.Folders(ctx) + if err != nil { + return err + } + + addMachine(host.Reference().Value, host, nil, folders) + } + + for i := 0; i < m.Datastore; i++ { + err := m.createLocalDatastore(m.fmtName("LocalDS_", i), hosts) + if err != nil { + return err + } + } + + for _, createVM := range vms { + err := createVM() + if err != nil { + return err + } + } + + return nil +} + +var tempDir = func() (string, error) { + return ioutil.TempDir("", "govcsim-") +} + +func (m *Model) createLocalDatastore(name string, hosts []*object.HostSystem) error { + ctx := context.Background() + dir, err := tempDir() + if err != nil { + return err + } + + m.dirs = append(m.dirs, dir) + + for _, host := range hosts { + dss, err := host.ConfigManager().DatastoreSystem(ctx) + if err != nil { + return err + } + + _, err = dss.CreateLocalDatastore(ctx, name, dir) + if err != nil { + return err + } + } + + return nil +} + +// Remove cleans up items created by the Model, such as local datastore directories +func (m *Model) Remove() { + for _, dir := range m.dirs { + _ = os.RemoveAll(dir) + } +} diff --git a/vendor/github.com/vmware/govmomi/simulator/option_manager.go b/vendor/github.com/vmware/govmomi/simulator/option_manager.go new file mode 100644 index 00000000000..4615882c364 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/option_manager.go @@ -0,0 +1,59 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "strings" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type OptionManager struct { + mo.OptionManager +} + +func NewOptionManager(ref *types.ManagedObjectReference, setting []types.BaseOptionValue) object.Reference { + s := &OptionManager{} + if ref != nil { + s.Self = *ref + } + s.Setting = setting + return s +} + +func (m *OptionManager) QueryOptions(req *types.QueryOptions) soap.HasFault { + body := &methods.QueryOptionsBody{} + res := &types.QueryOptionsResponse{} + + for _, opt := range m.Setting { + if strings.HasPrefix(opt.GetOptionValue().Key, req.Name) { + res.Returnval = append(res.Returnval, opt) + } + } + + if len(res.Returnval) == 0 { + body.Fault_ = Fault("", &types.InvalidName{Name: req.Name}) + } else { + body.Res = res + } + + return body +} diff --git a/vendor/github.com/vmware/govmomi/simulator/os_unix.go b/vendor/github.com/vmware/govmomi/simulator/os_unix.go new file mode 100644 index 00000000000..63c15621ace --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/os_unix.go @@ -0,0 +1,38 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import "syscall" + +func (ds *Datastore) stat() error { + info := ds.Info.GetDatastoreInfo() + var stat syscall.Statfs_t + + err := syscall.Statfs(info.Url, &stat) + if err != nil { + return err + } + + bsize := uint64(stat.Bsize) / 512 + + info.FreeSpace = int64(stat.Bfree*bsize) >> 1 + + ds.Summary.FreeSpace = info.FreeSpace + ds.Summary.Capacity = int64(stat.Blocks*bsize) >> 1 + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/simulator/os_windows.go b/vendor/github.com/vmware/govmomi/simulator/os_windows.go new file mode 100644 index 00000000000..55cf2ab07ef --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/os_windows.go @@ -0,0 +1,26 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import "os" + +func (ds *Datastore) stat() error { + info := ds.Info.GetDatastoreInfo() + + _, err := os.Stat(info.Url) + return err +} diff --git a/vendor/github.com/vmware/govmomi/simulator/performance_manager.go b/vendor/github.com/vmware/govmomi/simulator/performance_manager.go new file mode 100644 index 00000000000..e6a70efc0a7 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/performance_manager.go @@ -0,0 +1,35 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/simulator/esx" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type PerformanceManager struct { + mo.PerformanceManager +} + +func NewPerformanceManager(ref types.ManagedObjectReference) object.Reference { + m := &PerformanceManager{} + m.Self = ref + m.PerfCounter = esx.PerfCounter + return m +} diff --git a/vendor/github.com/vmware/govmomi/simulator/portgroup.go b/vendor/github.com/vmware/govmomi/simulator/portgroup.go new file mode 100644 index 00000000000..434f5fd4728 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/portgroup.go @@ -0,0 +1,82 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type DistributedVirtualPortgroup struct { + mo.DistributedVirtualPortgroup +} + +func (s *DistributedVirtualPortgroup) ReconfigureDVPortgroupTask(req *types.ReconfigureDVPortgroup_Task) soap.HasFault { + task := CreateTask(s, "reconfigureDvPortgroup", func(t *Task) (types.AnyType, types.BaseMethodFault) { + s.Config.DefaultPortConfig = req.Spec.DefaultPortConfig + s.Config.NumPorts = req.Spec.NumPorts + s.Config.AutoExpand = req.Spec.AutoExpand + s.Config.Type = req.Spec.Type + s.Config.Description = req.Spec.Description + s.Config.DynamicData = req.Spec.DynamicData + s.Config.Name = req.Spec.Name + s.Config.Policy = req.Spec.Policy + s.Config.PortNameFormat = req.Spec.PortNameFormat + s.Config.VmVnicNetworkResourcePoolKey = req.Spec.VmVnicNetworkResourcePoolKey + + return nil, nil + }) + + return &methods.ReconfigureDVPortgroup_TaskBody{ + Res: &types.ReconfigureDVPortgroup_TaskResponse{ + Returnval: task.Run(), + }, + } +} + +func (s *DistributedVirtualPortgroup) DestroyTask(req *types.Destroy_Task) soap.HasFault { + task := CreateTask(s, "destroy", func(t *Task) (types.AnyType, types.BaseMethodFault) { + vswitch := Map.Get(*s.Config.DistributedVirtualSwitch).(*DistributedVirtualSwitch) + for i, pg := range vswitch.Portgroup { + if pg.Reference() == s.Reference() { + vswitch.Portgroup = append(vswitch.Portgroup[:i], vswitch.Portgroup[i+1:]...) + break + } + } + + f := Map.getEntityParent(vswitch, "Folder").(*Folder) + f.removeChild(s.Reference()) + + for i, name := range vswitch.Summary.PortgroupName { + if name == s.Name { + vswitch.Summary.PortgroupName = append(vswitch.Summary.PortgroupName[:i], + vswitch.Summary.PortgroupName[i+1:]...) + } + } + + return nil, nil + }) + + return &methods.Destroy_TaskBody{ + Res: &types.Destroy_TaskResponse{ + Returnval: task.Run(), + }, + } + +} diff --git a/vendor/github.com/vmware/govmomi/simulator/property_collector.go b/vendor/github.com/vmware/govmomi/simulator/property_collector.go new file mode 100644 index 00000000000..8ef7898d84d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/property_collector.go @@ -0,0 +1,548 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "errors" + "log" + "path" + "reflect" + "strings" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type PropertyCollector struct { + mo.PropertyCollector +} + +func NewPropertyCollector(ref types.ManagedObjectReference) object.Reference { + s := &PropertyCollector{} + s.Self = ref + return s +} + +var errMissingField = errors.New("missing field") +var errEmptyField = errors.New("empty field") + +func getObject(ref types.ManagedObjectReference) (reflect.Value, bool) { + obj := Map.Get(ref) + if obj == nil { + return reflect.Value{}, false + } + + rval := reflect.ValueOf(obj).Elem() + rtype := rval.Type() + + // PropertyCollector is for Managed Object types only (package mo). + // If the registry object is not in the mo package, assume it is a wrapper + // type where the first field is an embedded mo type. + // We need to dig out the mo type for PropSet.All to work properly and + // for the case where the type has a field of the same name, for example: + // mo.ResourcePool.ResourcePool + for { + if path.Base(rtype.PkgPath()) != "mo" { + if rtype.Kind() != reflect.Struct || rtype.NumField() == 0 { + log.Printf("%#v does not have an embedded mo type", ref) + return reflect.Value{}, false + } + rval = rval.Field(0) + rtype = rval.Type() + } else { + break + } + } + + return rval, true +} + +func fieldValueInterface(f reflect.StructField, rval reflect.Value) interface{} { + if rval.Kind() == reflect.Ptr { + rval = rval.Elem() + } + + pval := rval.Interface() + + if rval.Kind() == reflect.Slice { + // Convert slice to types.ArrayOf* + switch v := pval.(type) { + case []string: + pval = &types.ArrayOfString{ + String: v, + } + case []int32: + pval = &types.ArrayOfInt{ + Int: v, + } + default: + kind := f.Type.Elem().Name() + // Remove govmomi interface prefix name + if strings.HasPrefix(kind, "Base") { + kind = kind[4:] + } + akind, _ := typeFunc("ArrayOf" + kind) + a := reflect.New(akind) + a.Elem().FieldByName(kind).Set(rval) + pval = a.Interface() + } + } + + return pval +} + +func fieldValue(rval reflect.Value, p string) (interface{}, error) { + var value interface{} + + fields := strings.Split(p, ".") + + for i, name := range fields { + kind := rval.Type().Kind() + + if kind == reflect.Interface { + if rval.IsNil() { + continue + } + rval = rval.Elem() + kind = rval.Type().Kind() + } + + if kind == reflect.Ptr { + if rval.IsNil() { + continue + } + rval = rval.Elem() + } + + x := ucFirst(name) + val := rval.FieldByName(x) + if !val.IsValid() { + return nil, errMissingField + } + + if isEmpty(val) { + return nil, errEmptyField + } + + if i == len(fields)-1 { + ftype, _ := rval.Type().FieldByName(x) + value = fieldValueInterface(ftype, val) + break + } + + rval = val + } + + return value, nil +} + +func fieldRefs(f interface{}) []types.ManagedObjectReference { + switch fv := f.(type) { + case types.ManagedObjectReference: + return []types.ManagedObjectReference{fv} + case *types.ArrayOfManagedObjectReference: + return fv.ManagedObjectReference + case nil: + // empty field + } + + return nil +} + +func isEmpty(rval reflect.Value) bool { + switch rval.Kind() { + case reflect.Ptr: + return rval.IsNil() + case reflect.String, reflect.Slice: + return rval.Len() == 0 + } + + return false +} + +func isTrue(v *bool) bool { + return v != nil && *v +} + +func isFalse(v *bool) bool { + return v == nil || *v == false +} + +func lcFirst(s string) string { + return strings.ToLower(s[:1]) + s[1:] +} + +func ucFirst(s string) string { + return strings.ToUpper(s[:1]) + s[1:] +} + +type retrieveResult struct { + *types.RetrieveResult + req *types.RetrievePropertiesEx + collected map[types.ManagedObjectReference]bool + specs map[string]*types.TraversalSpec +} + +func (rr *retrieveResult) collectAll(rval reflect.Value, rtype reflect.Type, content *types.ObjectContent) { + for i := 0; i < rval.NumField(); i++ { + val := rval.Field(i) + + f := rtype.Field(i) + + if isEmpty(val) || f.Name == "Self" { + continue + } + + if f.Anonymous { + // recurse into embedded field + rr.collectAll(val, f.Type, content) + continue + } + + content.PropSet = append(content.PropSet, types.DynamicProperty{ + Name: lcFirst(f.Name), + Val: fieldValueInterface(f, val), + }) + } +} + +func (rr *retrieveResult) collectFields(rval reflect.Value, fields []string, content *types.ObjectContent) { + seen := make(map[string]bool) + + for i := range content.PropSet { + seen[content.PropSet[i].Name] = true // mark any already collected via embedded field + } + + for _, name := range fields { + if seen[name] { + // rvc 'ls' includes the "name" property twice, then fails with no error message or stack trace + // in RbVmomi::VIM::ObjectContent.to_hash_uncached when it sees the 2nd "name" property. + continue + } + seen[name] = true + + val, err := fieldValue(rval, name) + if err == nil { + prop := types.DynamicProperty{ + Name: name, + Val: val, + } + + content.PropSet = append(content.PropSet, prop) + continue + } + + switch err { + case errEmptyField: + // ok + case errMissingField: + content.MissingSet = append(content.MissingSet, types.MissingProperty{ + Path: name, + Fault: types.LocalizedMethodFault{Fault: &types.InvalidProperty{ + Name: name, + }}, + }) + } + } +} + +func (rr *retrieveResult) collect(ref types.ManagedObjectReference) { + if rr.collected[ref] { + return + } + + content := types.ObjectContent{ + Obj: ref, + } + + rval, ok := getObject(ref) + if !ok { + // Possible if a test uses Map.Remove instead of Destroy_Task + log.Printf("object %s no longer exists", ref) + return + } + + rtype := rval.Type() + + for _, spec := range rr.req.SpecSet { + for _, p := range spec.PropSet { + if p.Type != ref.Type { + // e.g. ManagedEntity, ComputeResource + field, ok := rtype.FieldByName(p.Type) + + if !(ok && field.Anonymous) { + continue + } + } + + if isTrue(p.All) { + rr.collectAll(rval, rtype, &content) + continue + } + + rr.collectFields(rval, p.PathSet, &content) + } + } + + if len(content.PropSet) != 0 || len(content.MissingSet) != 0 { + rr.Objects = append(rr.Objects, content) + } + + rr.collected[ref] = true +} + +func (rr *retrieveResult) selectSet(obj reflect.Value, s []types.BaseSelectionSpec, refs *[]types.ManagedObjectReference) types.BaseMethodFault { + for _, ss := range s { + ts, ok := ss.(*types.TraversalSpec) + + if ok { + if ts.Name != "" { + rr.specs[ts.Name] = ts + } + } + } + + for _, ss := range s { + ts, ok := ss.(*types.TraversalSpec) + if !ok { + ts = rr.specs[ss.GetSelectionSpec().Name] + if ts == nil { + return &types.InvalidArgument{InvalidProperty: "undefined TraversalSpec name"} + } + } + + f, _ := fieldValue(obj, ts.Path) + + for _, ref := range fieldRefs(f) { + if isFalse(ts.Skip) { + *refs = append(*refs, ref) + } + + rval, ok := getObject(ref) + if ok { + if err := rr.selectSet(rval, ts.SelectSet, refs); err != nil { + return err + } + } + } + } + + return nil +} + +func (pc *PropertyCollector) collect(r *types.RetrievePropertiesEx) (*types.RetrieveResult, types.BaseMethodFault) { + var refs []types.ManagedObjectReference + + rr := &retrieveResult{ + RetrieveResult: &types.RetrieveResult{}, + req: r, + collected: make(map[types.ManagedObjectReference]bool), + specs: make(map[string]*types.TraversalSpec), + } + + // Select object references + for _, spec := range r.SpecSet { + for _, o := range spec.ObjectSet { + rval, ok := getObject(o.Obj) + + if !ok { + if isFalse(spec.ReportMissingObjectsInResults) { + return nil, &types.ManagedObjectNotFound{Obj: o.Obj} + } + continue + } + + if o.SelectSet == nil || isFalse(o.Skip) { + refs = append(refs, o.Obj) + } + + if err := rr.selectSet(rval, o.SelectSet, &refs); err != nil { + return nil, err + } + } + } + + for _, ref := range refs { + rr.collect(ref) + } + + return rr.RetrieveResult, nil +} + +func (pc *PropertyCollector) CreateFilter(c *types.CreateFilter) soap.HasFault { + body := &methods.CreateFilterBody{} + + filter := &PropertyFilter{pc: pc} + filter.PartialUpdates = c.PartialUpdates + filter.Spec = c.Spec + + pc.Filter = append(pc.Filter, Map.Put(filter).Reference()) + + body.Res = &types.CreateFilterResponse{ + Returnval: filter.Self, + } + + return body +} + +func (pc *PropertyCollector) CreatePropertyCollector(c *types.CreatePropertyCollector) soap.HasFault { + body := &methods.CreatePropertyCollectorBody{} + + cpc := &PropertyCollector{} + + body.Res = &types.CreatePropertyCollectorResponse{ + Returnval: Map.Put(cpc).Reference(), + } + + return body +} + +func (pc *PropertyCollector) DestroyPropertyCollector(c *types.DestroyPropertyCollector) soap.HasFault { + body := &methods.DestroyPropertyCollectorBody{} + + for _, ref := range pc.Filter { + filter := Map.Get(ref).(*PropertyFilter) + filter.DestroyPropertyFilter(&types.DestroyPropertyFilter{This: ref}) + } + + Map.Remove(c.This) + + body.Res = &types.DestroyPropertyCollectorResponse{} + + return body +} + +func (pc *PropertyCollector) RetrievePropertiesEx(r *types.RetrievePropertiesEx) soap.HasFault { + body := &methods.RetrievePropertiesExBody{} + + res, fault := pc.collect(r) + + if fault != nil { + body.Fault_ = Fault("", fault) + } else { + body.Res = &types.RetrievePropertiesExResponse{ + Returnval: res, + } + } + + return body +} + +// RetrieveProperties is deprecated, but govmomi is still using it at the moment. +func (pc *PropertyCollector) RetrieveProperties(r *types.RetrieveProperties) soap.HasFault { + body := &methods.RetrievePropertiesBody{} + + res := pc.RetrievePropertiesEx(&types.RetrievePropertiesEx{ + This: r.This, + SpecSet: r.SpecSet, + }) + + if res.Fault() != nil { + body.Fault_ = res.Fault() + } else { + body.Res = &types.RetrievePropertiesResponse{ + Returnval: res.(*methods.RetrievePropertiesExBody).Res.Returnval.Objects, + } + } + + return body +} + +func (pc *PropertyCollector) CancelWaitForUpdates(r *types.CancelWaitForUpdates) soap.HasFault { + return &methods.CancelWaitForUpdatesBody{Res: new(types.CancelWaitForUpdatesResponse)} +} + +func (pc *PropertyCollector) WaitForUpdatesEx(r *types.WaitForUpdatesEx) soap.HasFault { + body := &methods.WaitForUpdatesExBody{} + + // At the moment we need to support Task completion. Handlers can simply set the Task + // state before returning and the non-incremental update is enough for the client. + // We can wait for incremental updates to simulate timeouts, etc. + if r.Version != "" { + body.Fault_ = Fault("incremental updates not supported yet", &types.NotSupported{}) + return body + } + + update := &types.UpdateSet{ + Version: "-", + } + + for _, ref := range pc.Filter { + filter := Map.Get(ref).(*PropertyFilter) + + r := &types.RetrievePropertiesEx{} + r.SpecSet = append(r.SpecSet, filter.Spec) + + res, fault := pc.collect(r) + if fault != nil { + body.Fault_ = Fault("", fault) + return body + } + + fu := types.PropertyFilterUpdate{ + Filter: ref, + } + + for _, o := range res.Objects { + ou := types.ObjectUpdate{ + Obj: o.Obj, + Kind: types.ObjectUpdateKindEnter, + } + + for _, p := range o.PropSet { + ou.ChangeSet = append(ou.ChangeSet, types.PropertyChange{ + Op: types.PropertyChangeOpAssign, + Name: p.Name, + Val: p.Val, + }) + } + + fu.ObjectSet = append(fu.ObjectSet, ou) + } + + update.FilterSet = append(update.FilterSet, fu) + } + + body.Res = &types.WaitForUpdatesExResponse{ + Returnval: update, + } + + return body +} + +// WaitForUpdates is deprecated, but pyvmomi is still using it at the moment. +func (pc *PropertyCollector) WaitForUpdates(r *types.WaitForUpdates) soap.HasFault { + body := &methods.WaitForUpdatesBody{} + + res := pc.WaitForUpdatesEx(&types.WaitForUpdatesEx{ + This: r.This, + Version: r.Version, + }) + + if res.Fault() != nil { + body.Fault_ = res.Fault() + } else { + body.Res = &types.WaitForUpdatesResponse{ + Returnval: *res.(*methods.WaitForUpdatesExBody).Res.Returnval, + } + } + + return body +} diff --git a/vendor/github.com/vmware/govmomi/simulator/property_filter.go b/vendor/github.com/vmware/govmomi/simulator/property_filter.go new file mode 100644 index 00000000000..0d7d9a38daf --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/property_filter.go @@ -0,0 +1,42 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type PropertyFilter struct { + mo.PropertyFilter + + pc *PropertyCollector +} + +func (f *PropertyFilter) DestroyPropertyFilter(c *types.DestroyPropertyFilter) soap.HasFault { + body := &methods.DestroyPropertyFilterBody{} + + f.pc.Filter = RemoveReference(c.This, f.pc.Filter) + + Map.Remove(c.This) + + body.Res = &types.DestroyPropertyFilterResponse{} + + return body +} diff --git a/vendor/github.com/vmware/govmomi/simulator/registry.go b/vendor/github.com/vmware/govmomi/simulator/registry.go new file mode 100644 index 00000000000..2fdc55f10ea --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/registry.go @@ -0,0 +1,338 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "fmt" + "reflect" + "strings" + "sync" + + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +// This is a map from a reference type name to a reference value name prefix. +// It's a convention that VirtualCenter follows. The map is not complete, but +// it should cover the most popular objects. +var refValueMap = map[string]string{ + "DistributedVirtualPortgroup": "dvportgroup", + "EnvironmentBrowser": "envbrowser", + "HostSystem": "host", + "ResourcePool": "resgroup", + "VirtualMachine": "vm", + "VirtualMachineSnapshot": "snapshot", + "VmwareDistributedVirtualSwitch": "dvs", +} + +// Map is the default Registry instance. +var Map = NewRegistry() + +// RegisterObject interface supports callbacks when objects are added and removed from the Registry +type RegisterObject interface { + mo.Reference + PutObject(mo.Reference) + RemoveObject(types.ManagedObjectReference) +} + +// Registry manages a map of mo.Reference objects +type Registry struct { + m sync.Mutex + objects map[types.ManagedObjectReference]mo.Reference + handlers map[types.ManagedObjectReference]RegisterObject + counter int +} + +// NewRegistry creates a new instances of Registry +func NewRegistry() *Registry { + r := &Registry{ + objects: make(map[types.ManagedObjectReference]mo.Reference), + handlers: make(map[types.ManagedObjectReference]RegisterObject), + } + + return r +} + +// typeName returns the type of the given object. +func typeName(item mo.Reference) string { + return reflect.TypeOf(item).Elem().Name() +} + +// valuePrefix returns the value name prefix of a given object +func valuePrefix(typeName string) string { + if v, ok := refValueMap[typeName]; ok { + return v + } + + return strings.ToLower(typeName) +} + +// newReference returns a new MOR, where Type defaults to type of the given item +// and Value defaults to a unique id for the given type. +func (r *Registry) newReference(item mo.Reference) types.ManagedObjectReference { + ref := item.Reference() + + if ref.Type == "" { + ref.Type = typeName(item) + } + + if ref.Value == "" { + r.counter++ + ref.Value = fmt.Sprintf("%s-%d", valuePrefix(ref.Type), r.counter) + } + + return ref +} + +// AddHandler adds a RegisterObject handler to the Registry. +func (r *Registry) AddHandler(h RegisterObject) { + r.handlers[h.Reference()] = h +} + +// NewEntity sets Entity().Self with a new, unique Value. +// Useful for creating object instances from templates. +func (r *Registry) NewEntity(item mo.Entity) mo.Entity { + e := item.Entity() + e.Self.Value = "" + e.Self = r.newReference(item) + return item +} + +// PutEntity sets item.Parent to that of parent.Self before adding item to the Registry. +func (r *Registry) PutEntity(parent mo.Entity, item mo.Entity) mo.Entity { + e := item.Entity() + + if parent != nil { + e.Parent = &parent.Entity().Self + } + + r.Put(item) + + return item +} + +// Get returns the object for the given reference. +func (r *Registry) Get(ref types.ManagedObjectReference) mo.Reference { + r.m.Lock() + defer r.m.Unlock() + + return r.objects[ref] +} + +// Any returns the first instance of entity type specified by kind. +func (r *Registry) Any(kind string) mo.Entity { + r.m.Lock() + defer r.m.Unlock() + + for ref, val := range r.objects { + if ref.Type == kind { + return val.(mo.Entity) + } + } + + return nil +} + +// Put adds a new object to Registry, generating a ManagedObjectReference if not already set. +func (r *Registry) Put(item mo.Reference) mo.Reference { + r.m.Lock() + defer r.m.Unlock() + + ref := item.Reference() + if ref.Type == "" || ref.Value == "" { + ref = r.newReference(item) + // mo.Reference() returns a value, not a pointer so use reflect to set the Self field + reflect.ValueOf(item).Elem().FieldByName("Self").Set(reflect.ValueOf(ref)) + } + + if me, ok := item.(mo.Entity); ok { + me.Entity().ConfigStatus = types.ManagedEntityStatusGreen + me.Entity().OverallStatus = types.ManagedEntityStatusGreen + me.Entity().EffectiveRole = []int32{-1} // Admin + } + + r.objects[ref] = item + + for _, h := range r.handlers { + h.PutObject(item) + } + + return item +} + +// Remove removes an object from the Registry. +func (r *Registry) Remove(item types.ManagedObjectReference) { + r.m.Lock() + defer r.m.Unlock() + + for _, h := range r.handlers { + h.RemoveObject(item) + } + + delete(r.objects, item) + delete(r.handlers, item) +} + +// getEntityParent traverses up the inventory and returns the first object of type kind. +// If no object of type kind is found, the method will panic when it reaches the +// inventory root Folder where the Parent field is nil. +func (r *Registry) getEntityParent(item mo.Entity, kind string) mo.Entity { + for { + parent := item.Entity().Parent + + item = r.Get(*parent).(mo.Entity) + + if item.Reference().Type == kind { + return item + } + } +} + +// getEntityDatacenter returns the Datacenter containing the given item +func (r *Registry) getEntityDatacenter(item mo.Entity) *mo.Datacenter { + return r.getEntityParent(item, "Datacenter").(*mo.Datacenter) +} + +func (r *Registry) getEntityFolder(item mo.Entity, kind string) *Folder { + dc := Map.getEntityDatacenter(item) + + var ref types.ManagedObjectReference + + switch kind { + case "datastore": + ref = dc.DatastoreFolder + } + + folder := r.Get(ref).(*Folder) + + // If Model was created with Folder option, use that Folder; else use top-level folder + for _, child := range folder.ChildEntity { + if child.Type == "Folder" { + folder = Map.Get(child).(*Folder) + break + } + } + + return folder +} + +// getEntityComputeResource returns the ComputeResource parent for the given item. +// A ResourcePool for example may have N Parents of type ResourcePool, but the top +// most Parent pool is always a ComputeResource child. +func (r *Registry) getEntityComputeResource(item mo.Entity) mo.Entity { + for { + parent := item.Entity().Parent + + item = r.Get(*parent).(mo.Entity) + + switch item.Reference().Type { + case "ComputeResource": + return item + case "ClusterComputeResource": + return item + } + } +} + +// FindByName returns the first mo.Entity of the given refs whose Name field is equal to the given name. +// If there is no match, nil is returned. +// This method is useful for cases where objects are required to have a unique name, such as Datastore with +// a HostStorageSystem or HostSystem within a ClusterComputeResource. +func (r *Registry) FindByName(name string, refs []types.ManagedObjectReference) mo.Entity { + for _, ref := range refs { + if e, ok := r.Get(ref).(mo.Entity); ok { + if name == e.Entity().Name { + return e + } + } + } + + return nil +} + +// FindReference returns the 1st match found in refs, or nil if not found. +func FindReference(refs []types.ManagedObjectReference, match ...types.ManagedObjectReference) *types.ManagedObjectReference { + for _, ref := range refs { + for _, m := range match { + if ref == m { + return &ref + } + } + } + + return nil +} + +// RemoveReference returns a slice with ref removed from refs +func RemoveReference(ref types.ManagedObjectReference, refs []types.ManagedObjectReference) []types.ManagedObjectReference { + var result []types.ManagedObjectReference + + for i, r := range refs { + if r == ref { + result = append(result, refs[i+1:]...) + break + } + + result = append(result, r) + } + + return result +} + +// AddReference returns a slice with ref appended if not already in refs. +func AddReference(ref types.ManagedObjectReference, refs []types.ManagedObjectReference) []types.ManagedObjectReference { + if FindReference(refs, ref) == nil { + return append(refs, ref) + } + + return refs +} + +func (r *Registry) content() types.ServiceContent { + return r.Get(methods.ServiceInstance).(*ServiceInstance).Content +} + +// IsESX returns true if this Registry maps an ESX model +func (r *Registry) IsESX() bool { + return r.content().About.ApiType == "HostAgent" +} + +// IsVPX returns true if this Registry maps a VPX model +func (r *Registry) IsVPX() bool { + return !r.IsESX() +} + +// SearchIndex returns the SearchIndex singleton +func (r *Registry) SearchIndex() *SearchIndex { + return r.Get(r.content().SearchIndex.Reference()).(*SearchIndex) +} + +// FileManager returns the FileManager singleton +func (r *Registry) FileManager() *FileManager { + return r.Get(r.content().FileManager.Reference()).(*FileManager) +} + +// VirtualDiskManager returns the VirtualDiskManager singleton +func (r *Registry) VirtualDiskManager() *VirtualDiskManager { + return r.Get(r.content().VirtualDiskManager.Reference()).(*VirtualDiskManager) +} + +// ViewManager returns the ViewManager singleton +func (r *Registry) ViewManager() *ViewManager { + return r.Get(r.content().ViewManager.Reference()).(*ViewManager) +} diff --git a/vendor/github.com/vmware/govmomi/simulator/resource_pool.go b/vendor/github.com/vmware/govmomi/simulator/resource_pool.go new file mode 100644 index 00000000000..604f7b44dbd --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/resource_pool.go @@ -0,0 +1,312 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "fmt" + "strings" + + "github.com/vmware/govmomi/simulator/esx" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type ResourcePool struct { + mo.ResourcePool +} + +func NewResourcePool() *ResourcePool { + pool := &ResourcePool{ + ResourcePool: esx.ResourcePool, + } + + if Map.IsVPX() { + pool.DisabledMethod = nil // Enable VApp methods for VC + } + + return pool +} + +func allResourceFieldsSet(info *types.ResourceAllocationInfo) bool { + return info.Reservation != nil && + info.Limit != nil && + info.ExpandableReservation != nil && + info.Shares != nil +} + +func allResourceFieldsValid(info *types.ResourceAllocationInfo) bool { + if info.Reservation != nil { + if *info.Reservation < 0 { + return false + } + } + + if info.Limit != nil { + if *info.Limit < -1 { + return false + } + } + + if info.Shares != nil { + if info.Shares.Level == types.SharesLevelCustom { + if info.Shares.Shares < 0 { + return false + } + } + } + + if info.OverheadLimit != nil { + return false + } + + return true +} + +func (p *ResourcePool) createChild(name string, spec types.ResourceConfigSpec) (*ResourcePool, *soap.Fault) { + if e := Map.FindByName(name, p.ResourcePool.ResourcePool); e != nil { + return nil, Fault("", &types.DuplicateName{ + Name: e.Entity().Name, + Object: e.Reference(), + }) + } + + if !(allResourceFieldsSet(&spec.CpuAllocation) && allResourceFieldsValid(&spec.CpuAllocation)) { + return nil, Fault("", &types.InvalidArgument{ + InvalidProperty: "spec.cpuAllocation", + }) + } + + if !(allResourceFieldsSet(&spec.MemoryAllocation) && allResourceFieldsValid(&spec.MemoryAllocation)) { + return nil, Fault("", &types.InvalidArgument{ + InvalidProperty: "spec.memoryAllocation", + }) + } + + child := NewResourcePool() + + child.Name = name + child.Owner = p.Owner + child.Summary.GetResourcePoolSummary().Name = name + child.Config.CpuAllocation = spec.CpuAllocation + child.Config.MemoryAllocation = spec.MemoryAllocation + child.Config.Entity = spec.Entity + + return child, nil +} + +func (p *ResourcePool) CreateResourcePool(c *types.CreateResourcePool) soap.HasFault { + body := &methods.CreateResourcePoolBody{} + + child, err := p.createChild(c.Name, c.Spec) + if err != nil { + body.Fault_ = err + return body + } + + Map.PutEntity(p, Map.NewEntity(child)) + + p.ResourcePool.ResourcePool = append(p.ResourcePool.ResourcePool, child.Reference()) + + body.Res = &types.CreateResourcePoolResponse{ + Returnval: child.Reference(), + } + + return body +} + +func updateResourceAllocation(kind string, src, dst *types.ResourceAllocationInfo) types.BaseMethodFault { + if !allResourceFieldsValid(src) { + return &types.InvalidArgument{ + InvalidProperty: fmt.Sprintf("spec.%sAllocation", kind), + } + } + + if src.Reservation != nil { + dst.Reservation = src.Reservation + } + + if src.Limit != nil { + dst.Limit = src.Limit + } + + if src.Shares != nil { + dst.Shares = src.Shares + } + + return nil +} + +func (p *ResourcePool) UpdateConfig(c *types.UpdateConfig) soap.HasFault { + body := &methods.UpdateConfigBody{} + + if c.Name != "" { + if e := Map.FindByName(c.Name, p.ResourcePool.ResourcePool); e != nil { + body.Fault_ = Fault("", &types.DuplicateName{ + Name: e.Entity().Name, + Object: e.Reference(), + }) + return body + } + + p.Name = c.Name + } + + spec := c.Config + + if spec != nil { + if err := updateResourceAllocation("memory", &spec.MemoryAllocation, &p.Config.MemoryAllocation); err != nil { + body.Fault_ = Fault("", err) + return body + } + + if err := updateResourceAllocation("cpu", &spec.CpuAllocation, &p.Config.CpuAllocation); err != nil { + body.Fault_ = Fault("", err) + return body + } + } + + body.Res = &types.UpdateConfigResponse{} + + return body +} + +type VirtualApp struct { + mo.VirtualApp +} + +func NewVAppConfigSpec() types.VAppConfigSpec { + spec := types.VAppConfigSpec{ + Annotation: "vcsim", + VmConfigSpec: types.VmConfigSpec{ + Product: []types.VAppProductSpec{ + { + Info: &types.VAppProductInfo{ + Name: "vcsim", + Vendor: "VMware", + VendorUrl: "http://www.vmware.com/", + Version: "0.1", + }, + ArrayUpdateSpec: types.ArrayUpdateSpec{ + Operation: types.ArrayUpdateOperationAdd, + }, + }, + }, + }, + } + + return spec +} + +func (p *ResourcePool) CreateVApp(req *types.CreateVApp) soap.HasFault { + body := &methods.CreateVAppBody{} + + pool, err := p.createChild(req.Name, req.ResSpec) + if err != nil { + body.Fault_ = err + return body + } + + child := &VirtualApp{} + child.ResourcePool = pool.ResourcePool + child.Self.Type = "VirtualApp" + child.ParentFolder = req.VmFolder + + if child.ParentFolder == nil { + folder := Map.getEntityDatacenter(p).VmFolder + child.ParentFolder = &folder + } + + child.VAppConfig = &types.VAppConfigInfo{ + VmConfigInfo: types.VmConfigInfo{}, + Annotation: req.ConfigSpec.Annotation, + } + + for _, product := range req.ConfigSpec.Product { + child.VAppConfig.Product = append(child.VAppConfig.Product, *product.Info) + } + + Map.PutEntity(p, Map.NewEntity(child)) + + p.ResourcePool.ResourcePool = append(p.ResourcePool.ResourcePool, child.Reference()) + + body.Res = &types.CreateVAppResponse{ + Returnval: child.Reference(), + } + + return body +} + +func (a *VirtualApp) CreateChildVMTask(req *types.CreateChildVM_Task) soap.HasFault { + body := &methods.CreateChildVM_TaskBody{} + + folder := Map.Get(*a.ParentFolder).(*Folder) + + res := folder.CreateVMTask(&types.CreateVM_Task{ + This: folder.Self, + Config: req.Config, + Host: req.Host, + Pool: req.This, + }) + + body.Res = &types.CreateChildVM_TaskResponse{ + Returnval: res.(*methods.CreateVM_TaskBody).Res.Returnval, + } + + return body +} + +func (a *VirtualApp) DestroyTask(req *types.Destroy_Task) soap.HasFault { + return (&ResourcePool{ResourcePool: a.ResourcePool}).DestroyTask(req) +} + +func (p *ResourcePool) DestroyTask(req *types.Destroy_Task) soap.HasFault { + task := CreateTask(p, "destroy", func(t *Task) (types.AnyType, types.BaseMethodFault) { + if strings.HasSuffix(p.Parent.Type, "ComputeResource") { + // Can't destroy the root pool + return nil, &types.InvalidArgument{} + } + + pp := Map.Get(*p.Parent).(*ResourcePool) + + parent := &pp.ResourcePool + // Remove child reference from rp + parent.ResourcePool = RemoveReference(req.This, parent.ResourcePool) + + // The grandchildren become children of the parent (rp) + parent.ResourcePool = append(parent.ResourcePool, p.ResourcePool.ResourcePool...) + + // And VMs move to the parent + vms := p.ResourcePool.Vm + for _, vm := range vms { + Map.Get(vm).(*VirtualMachine).ResourcePool = &parent.Self + } + + parent.Vm = append(parent.Vm, vms...) + + Map.Remove(req.This) + + return nil, nil + }) + + return &methods.Destroy_TaskBody{ + Res: &types.Destroy_TaskResponse{ + Returnval: task.Run(), + }, + } +} diff --git a/vendor/github.com/vmware/govmomi/simulator/search_index.go b/vendor/github.com/vmware/govmomi/simulator/search_index.go new file mode 100644 index 00000000000..c56dff2cba7 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/search_index.go @@ -0,0 +1,155 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "strings" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type SearchIndex struct { + mo.SearchIndex +} + +func NewSearchIndex(ref types.ManagedObjectReference) object.Reference { + m := &SearchIndex{} + m.Self = ref + return m +} + +func (s *SearchIndex) FindByDatastorePath(r *types.FindByDatastorePath) soap.HasFault { + res := &methods.FindByDatastorePathBody{Res: new(types.FindByDatastorePathResponse)} + + for ref, obj := range Map.objects { + vm, ok := obj.(*VirtualMachine) + if !ok { + continue + } + + if vm.Config.Files.VmPathName == r.Path { + res.Res.Returnval = &ref + break + } + } + + return res +} + +func (s *SearchIndex) FindByInventoryPath(req *types.FindByInventoryPath) soap.HasFault { + body := &methods.FindByInventoryPathBody{Res: new(types.FindByInventoryPathResponse)} + + path := strings.Split(req.InventoryPath, "/") + if len(path) <= 1 { + return body + } + + root := Map.content().RootFolder + o := &root + + for _, name := range path[1:] { + f := s.FindChild(&types.FindChild{Entity: *o, Name: name}) + + o = f.(*methods.FindChildBody).Res.Returnval + if o == nil { + break + } + } + + body.Res.Returnval = o + + return body +} + +func (s *SearchIndex) FindChild(req *types.FindChild) soap.HasFault { + body := &methods.FindChildBody{} + + obj := Map.Get(req.Entity) + + if obj == nil { + body.Fault_ = Fault("", &types.ManagedObjectNotFound{Obj: req.Entity}) + return body + } + + body.Res = new(types.FindChildResponse) + + var children []types.ManagedObjectReference + + switch e := obj.(type) { + case *mo.Datacenter: + children = []types.ManagedObjectReference{e.VmFolder, e.HostFolder, e.DatastoreFolder, e.NetworkFolder} + case *Folder: + children = e.ChildEntity + case *mo.ComputeResource: + children = e.Host + children = append(children, *e.ResourcePool) + case *ClusterComputeResource: + children = e.Host + children = append(children, *e.ResourcePool) + case *ResourcePool: + children = e.ResourcePool.ResourcePool + children = append(children, e.Vm...) + case *VirtualApp: + children = e.ResourcePool.ResourcePool + children = append(children, e.Vm...) + } + + match := Map.FindByName(req.Name, children) + + if match != nil { + ref := match.Reference() + body.Res.Returnval = &ref + } + + return body +} + +func (s *SearchIndex) FindByUuid(req *types.FindByUuid) soap.HasFault { + body := &methods.FindByUuidBody{Res: new(types.FindByUuidResponse)} + + if req.VmSearch { + // Find Virtual Machine using UUID + for ref, obj := range Map.objects { + vm, ok := obj.(*VirtualMachine) + if !ok { + continue + } + if vm.Config.Uuid == req.Uuid { + body.Res.Returnval = &ref + break + } + } + } else { + // Find Host System using UUID + for ref, obj := range Map.objects { + host, ok := obj.(*HostSystem) + if !ok { + continue + } + if host.Summary.Hardware.Uuid == req.Uuid { + body.Res.Returnval = &ref + break + } + } + } + + return body +} diff --git a/vendor/github.com/vmware/govmomi/simulator/service_instance.go b/vendor/github.com/vmware/govmomi/simulator/service_instance.go new file mode 100644 index 00000000000..4f6a3bbe7db --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/service_instance.go @@ -0,0 +1,99 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "time" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/simulator/vpx" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type ServiceInstance struct { + mo.ServiceInstance +} + +func NewServiceInstance(content types.ServiceContent, folder mo.Folder) *ServiceInstance { + Map = NewRegistry() + + s := &ServiceInstance{} + + s.Self = methods.ServiceInstance + s.Content = content + + Map.Put(s) + + f := &Folder{Folder: folder} + Map.Put(f) + + var setting []types.BaseOptionValue + + if content.About.ApiType == "HostAgent" { + CreateDefaultESX(f) + } else { + setting = vpx.Setting + } + + objects := []object.Reference{ + NewSessionManager(*s.Content.SessionManager), + NewAuthorizationManager(*s.Content.AuthorizationManager), + NewPerformanceManager(*s.Content.PerfManager), + NewPropertyCollector(s.Content.PropertyCollector), + NewFileManager(*s.Content.FileManager), + NewVirtualDiskManager(*s.Content.VirtualDiskManager), + NewLicenseManager(*s.Content.LicenseManager), + NewSearchIndex(*s.Content.SearchIndex), + NewViewManager(*s.Content.ViewManager), + NewTaskManager(*s.Content.TaskManager), + NewUserDirectory(*s.Content.UserDirectory), + NewOptionManager(s.Content.Setting, setting), + } + + if s.Content.CustomFieldsManager != nil { + objects = append(objects, NewCustomFieldsManager(*s.Content.CustomFieldsManager)) + } + + if s.Content.IpPoolManager != nil { + objects = append(objects, NewIpPoolManager(*s.Content.IpPoolManager)) + } + + for _, o := range objects { + Map.Put(o) + } + + return s +} + +func (s *ServiceInstance) RetrieveServiceContent(*types.RetrieveServiceContent) soap.HasFault { + return &methods.RetrieveServiceContentBody{ + Res: &types.RetrieveServiceContentResponse{ + Returnval: s.Content, + }, + } +} + +func (*ServiceInstance) CurrentTime(*types.CurrentTime) soap.HasFault { + return &methods.CurrentTimeBody{ + Res: &types.CurrentTimeResponse{ + Returnval: time.Now(), + }, + } +} diff --git a/vendor/github.com/vmware/govmomi/simulator/session_manager.go b/vendor/github.com/vmware/govmomi/simulator/session_manager.go new file mode 100644 index 00000000000..92a8f9b0941 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/session_manager.go @@ -0,0 +1,83 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "time" + + "github.com/google/uuid" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/session" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type SessionManager struct { + mo.SessionManager + + ServiceHostName string +} + +func NewSessionManager(ref types.ManagedObjectReference) object.Reference { + s := &SessionManager{} + s.Self = ref + return s +} + +func (s *SessionManager) Login(login *types.Login) soap.HasFault { + body := &methods.LoginBody{} + + if login.Locale == "" { + login.Locale = session.Locale + } + + if login.UserName == "" || login.Password == "" { + body.Fault_ = Fault("Login failure", &types.InvalidLogin{}) + } else { + body.Res = &types.LoginResponse{ + Returnval: types.UserSession{ + Key: uuid.New().String(), + UserName: login.UserName, + FullName: login.UserName, + LoginTime: time.Now(), + LastActiveTime: time.Now(), + Locale: login.Locale, + MessageLocale: login.Locale, + }, + } + } + + return body +} + +func (s *SessionManager) Logout(*types.Logout) soap.HasFault { + return &methods.LogoutBody{Res: new(types.LogoutResponse)} +} + +func (s *SessionManager) AcquireGenericServiceTicket(ticket *types.AcquireGenericServiceTicket) soap.HasFault { + return &methods.AcquireGenericServiceTicketBody{ + Res: &types.AcquireGenericServiceTicketResponse{ + Returnval: types.SessionManagerGenericServiceTicket{ + Id: uuid.New().String(), + HostName: s.ServiceHostName, + }, + }, + } +} diff --git a/vendor/github.com/vmware/govmomi/simulator/simulator.go b/vendor/github.com/vmware/govmomi/simulator/simulator.go new file mode 100644 index 00000000000..9a9cf0194bf --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/simulator.go @@ -0,0 +1,551 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "encoding/pem" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path" + "reflect" + "sort" + "strings" + + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" + "github.com/vmware/govmomi/vim25/xml" +) + +// Trace when set to true, writes SOAP traffic to stderr +var Trace = false + +// Method encapsulates a decoded SOAP client request +type Method struct { + Name string + This types.ManagedObjectReference + Body types.AnyType +} + +// Service decodes incoming requests and dispatches to a Handler +type Service struct { + client *vim25.Client + + readAll func(io.Reader) ([]byte, error) + + TLS *tls.Config +} + +// Server provides a simulator Service over HTTP +type Server struct { + *httptest.Server + URL *url.URL + + caFile string +} + +// New returns an initialized simulator Service instance +func New(instance *ServiceInstance) *Service { + s := &Service{ + readAll: ioutil.ReadAll, + } + + s.client, _ = vim25.NewClient(context.Background(), s) + + return s +} + +type serverFaultBody struct { + Reason *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *serverFaultBody) Fault() *soap.Fault { return b.Reason } + +func serverFault(msg string) soap.HasFault { + return &serverFaultBody{Reason: Fault(msg, &types.InvalidRequest{})} +} + +// Fault wraps the given message and fault in a soap.Fault +func Fault(msg string, fault types.BaseMethodFault) *soap.Fault { + f := &soap.Fault{ + Code: "ServerFaultCode", + String: msg, + } + + f.Detail.Fault = fault + + return f +} + +func (s *Service) call(method *Method) soap.HasFault { + handler := Map.Get(method.This) + + if handler == nil { + msg := fmt.Sprintf("managed object not found: %s", method.This) + log.Print(msg) + fault := &types.ManagedObjectNotFound{Obj: method.This} + return &serverFaultBody{Reason: Fault(msg, fault)} + } + + name := method.Name + + if strings.HasSuffix(name, vTaskSuffix) { + // Make golint happy renaming "Foo_Task" -> "FooTask" + name = name[:len(name)-len(vTaskSuffix)] + sTaskSuffix + } + + m := reflect.ValueOf(handler).MethodByName(name) + if !m.IsValid() { + msg := fmt.Sprintf("%s does not implement: %s", method.This, method.Name) + log.Print(msg) + fault := &types.MethodNotFound{Receiver: method.This, Method: method.Name} + return &serverFaultBody{Reason: Fault(msg, fault)} + } + + if e, ok := handler.(mo.Entity); ok { + for _, dm := range e.Entity().DisabledMethod { + if name == dm { + msg := fmt.Sprintf("%s method is disabled: %s", method.This, method.Name) + fault := &types.MethodDisabled{} + return &serverFaultBody{Reason: Fault(msg, fault)} + } + } + } + + res := m.Call([]reflect.Value{reflect.ValueOf(method.Body)}) + + return res[0].Interface().(soap.HasFault) +} + +// RoundTrip implements the soap.RoundTripper interface in process. +// Rather than encode/decode SOAP over HTTP, this implementation uses reflection. +func (s *Service) RoundTrip(ctx context.Context, request, response soap.HasFault) error { + field := func(r soap.HasFault, name string) reflect.Value { + return reflect.ValueOf(r).Elem().FieldByName(name) + } + + // Every struct passed to soap.RoundTrip has "Req" and "Res" fields + req := field(request, "Req") + + // Every request has a "This" field. + this := req.Elem().FieldByName("This") + + method := &Method{ + Name: req.Elem().Type().Name(), + This: this.Interface().(types.ManagedObjectReference), + Body: req.Interface(), + } + + res := s.call(method) + + if err := res.Fault(); err != nil { + return soap.WrapSoapFault(err) + } + + field(response, "Res").Set(field(res, "Res")) + + return nil +} + +// soapEnvelope is a copy of soap.Envelope, with namespace changed to "soapenv", +// and additional namespace attributes required by some client libraries. +// Go still has issues decoding with such a namespace, but encoding is ok. +type soapEnvelope struct { + XMLName xml.Name `xml:"soapenv:Envelope"` + Enc string `xml:"xmlns:soapenc,attr"` + Env string `xml:"xmlns:soapenv,attr"` + XSD string `xml:"xmlns:xsd,attr"` + XSI string `xml:"xmlns:xsi,attr"` + Body interface{} `xml:"soapenv:Body"` +} + +// soapFault is a copy of soap.Fault, with the same changes as soapEnvelope +type soapFault struct { + XMLName xml.Name `xml:"soapenv:Fault"` + Code string `xml:"faultcode"` + String string `xml:"faultstring"` + Detail struct { + Fault types.AnyType `xml:",any,typeattr"` + } `xml:"detail"` +} + +// About generates some info about the simulator. +func (s *Service) About(w http.ResponseWriter, r *http.Request) { + var about struct { + Methods []string + Types []string + } + + seen := make(map[string]bool) + + f := reflect.TypeOf((*soap.HasFault)(nil)).Elem() + + for _, obj := range Map.objects { + kind := obj.Reference().Type + if seen[kind] { + continue + } + seen[kind] = true + + about.Types = append(about.Types, kind) + + t := reflect.TypeOf(obj) + for i := 0; i < t.NumMethod(); i++ { + m := t.Method(i) + if seen[m.Name] { + continue + } + seen[m.Name] = true + + if m.Type.NumIn() != 2 || m.Type.NumOut() != 1 || m.Type.Out(0) != f { + continue + } + + about.Methods = append(about.Methods, strings.Replace(m.Name, "Task", "_Task", 1)) + } + } + + sort.Strings(about.Methods) + sort.Strings(about.Types) + + w.Header().Set("Content-Type", "application/json") + enc := json.NewEncoder(w) + enc.SetIndent("", " ") + _ = enc.Encode(&about) +} + +// ServeSDK implements the http.Handler interface +func (s *Service) ServeSDK(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + body, err := s.readAll(r.Body) + _ = r.Body.Close() + if err != nil { + log.Printf("error reading body: %s", err) + w.WriteHeader(http.StatusBadRequest) + return + } + + if Trace { + fmt.Fprintf(os.Stderr, "Request: %s\n", string(body)) + } + + var res soap.HasFault + var soapBody interface{} + + method, err := UnmarshalBody(body) + if err != nil { + res = serverFault(err.Error()) + } else { + res = s.call(method) + } + + if f := res.Fault(); f != nil { + w.WriteHeader(http.StatusInternalServerError) + + // the generated method/*Body structs use the '*soap.Fault' type, + // so we need our own Body type to use the modified '*soapFault' type. + soapBody = struct { + Fault *soapFault + }{ + &soapFault{ + Code: f.Code, + String: f.String, + Detail: f.Detail, + }, + } + } else { + w.WriteHeader(http.StatusOK) + + soapBody = res + } + + var out bytes.Buffer + + fmt.Fprint(&out, xml.Header) + e := xml.NewEncoder(&out) + err = e.Encode(&soapEnvelope{ + Enc: "http://schemas.xmlsoap.org/soap/encoding/", + Env: "http://schemas.xmlsoap.org/soap/envelope/", + XSD: "http://www.w3.org/2001/XMLSchema", + XSI: "http://www.w3.org/2001/XMLSchema-instance", + Body: soapBody, + }) + if err == nil { + err = e.Flush() + } + + if err != nil { + log.Printf("error encoding %s response: %s", method.Name, err) + return + } + + if Trace { + fmt.Fprintf(os.Stderr, "Response: %s\n", out.String()) + } + + _, _ = w.Write(out.Bytes()) +} + +func (s *Service) findDatastore(query url.Values) (*Datastore, error) { + ctx := context.Background() + + finder := find.NewFinder(s.client, false) + dc, err := finder.DatacenterOrDefault(ctx, query.Get("dcName")) + if err != nil { + return nil, err + } + + finder.SetDatacenter(dc) + + ds, err := finder.DatastoreOrDefault(ctx, query.Get("dsName")) + if err != nil { + return nil, err + } + + return Map.Get(ds.Reference()).(*Datastore), nil +} + +const folderPrefix = "/folder/" + +// ServeDatastore handler for Datastore access via /folder path. +func (s *Service) ServeDatastore(w http.ResponseWriter, r *http.Request) { + ds, ferr := s.findDatastore(r.URL.Query()) + if ferr != nil { + log.Printf("failed to locate datastore with query params: %s", r.URL.RawQuery) + w.WriteHeader(http.StatusNotFound) + return + } + + file := strings.TrimPrefix(r.URL.Path, folderPrefix) + p := path.Join(ds.Info.GetDatastoreInfo().Url, file) + + switch r.Method { + case "GET": + f, err := os.Open(p) + if err != nil { + log.Printf("failed to %s '%s': %s", r.Method, p, err) + w.WriteHeader(http.StatusNotFound) + return + } + defer f.Close() + + _, _ = io.Copy(w, f) + case "POST": + _, err := os.Stat(p) + if err == nil { + // File exists + w.WriteHeader(http.StatusConflict) + return + } + + // File does not exist, fallthrough to create via PUT logic + fallthrough + case "PUT": + f, err := os.Create(p) + if err != nil { + log.Printf("failed to %s '%s': %s", r.Method, p, err) + w.WriteHeader(http.StatusInternalServerError) + return + } + defer f.Close() + + _, _ = io.Copy(f, r.Body) + default: + w.WriteHeader(http.StatusMethodNotAllowed) + } +} + +// ServiceVersions handler for the /sdk/vimServiceVersions.xml path. +func (*Service) ServiceVersions(w http.ResponseWriter, r *http.Request) { + // pyvmomi depends on this + + const versions = xml.Header + ` + + urn:vim25 + 6.5 + + 6.0 + 5.5 + + + +` + fmt.Fprint(w, versions) +} + +// NewServer returns an http Server instance for the given service +func (s *Service) NewServer() *Server { + mux := http.NewServeMux() + path := "/sdk" + + mux.HandleFunc(path, s.ServeSDK) + mux.HandleFunc(path+"/vimServiceVersions.xml", s.ServiceVersions) + mux.HandleFunc(folderPrefix, s.ServeDatastore) + mux.HandleFunc("/about", s.About) + + // Using NewUnstartedServer() instead of NewServer(), + // for use in main.go, where Start() blocks, we can still set ServiceHostName + ts := httptest.NewUnstartedServer(mux) + + u := &url.URL{ + Scheme: "http", + Host: ts.Listener.Addr().String(), + Path: path, + User: url.UserPassword("user", "pass"), + } + + // Redirect clients to this http server, rather than HostSystem.Name + Map.Get(*s.client.ServiceContent.SessionManager).(*SessionManager).ServiceHostName = u.Host + + if f := flag.Lookup("httptest.serve"); f != nil { + // Avoid the blocking behaviour of httptest.Server.Start() when this flag is set + _ = f.Value.Set("") + } + + if s.TLS == nil { + ts.Start() + } else { + ts.TLS = s.TLS + ts.StartTLS() + u.Scheme += "s" + } + + return &Server{ + Server: ts, + URL: u, + } +} + +// Certificate returns the TLS certificate for the Server if started with TLS enabled. +// This method will panic if TLS is not enabled for the server. +func (s *Server) Certificate() *x509.Certificate { + // By default httptest.StartTLS uses http/internal.LocalhostCert, which we can access here: + cert, _ := x509.ParseCertificate(s.TLS.Certificates[0].Certificate[0]) + return cert +} + +// CertificateInfo returns Server.Certificate() as object.HostCertificateInfo +func (s *Server) CertificateInfo() *object.HostCertificateInfo { + info := new(object.HostCertificateInfo) + info.FromCertificate(s.Certificate()) + return info +} + +// CertificateFile returns a file name, where the file contains the PEM encoded Server.Certificate. +// The temporary file is removed when Server.Close() is called. +func (s *Server) CertificateFile() (string, error) { + if s.caFile != "" { + return s.caFile, nil + } + + f, err := ioutil.TempFile("", "vcsim-") + if err != nil { + return "", err + } + defer f.Close() + + s.caFile = f.Name() + cert := s.Certificate() + return s.caFile, pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) +} + +// Close shuts down the server and blocks until all outstanding +// requests on this server have completed. +func (s *Server) Close() { + s.Server.Close() + if s.caFile != "" { + _ = os.Remove(s.caFile) + } +} + +var typeFunc = types.TypeFunc() + +// UnmarshalBody extracts the Body from a soap.Envelope and unmarshals to the corresponding govmomi type +func UnmarshalBody(data []byte) (*Method, error) { + body := struct { + Content string `xml:",innerxml"` + }{} + + req := soap.Envelope{ + Body: &body, + } + + err := xml.Unmarshal(data, &req) + if err != nil { + return nil, fmt.Errorf("xml.Unmarshal: %s", err) + } + + decoder := xml.NewDecoder(bytes.NewReader([]byte(body.Content))) + decoder.TypeFunc = typeFunc // required to decode interface types + + var start *xml.StartElement + + for { + tok, derr := decoder.Token() + if derr != nil { + return nil, fmt.Errorf("decoding body: %s", err) + } + if t, ok := tok.(xml.StartElement); ok { + start = &t + break + } + } + + kind := start.Name.Local + + rtype, ok := typeFunc(kind) + if !ok { + return nil, fmt.Errorf("no vmomi type defined for '%s'", kind) + } + + var val interface{} + if rtype != nil { + val = reflect.New(rtype).Interface() + } + + err = decoder.DecodeElement(val, start) + if err != nil { + return nil, fmt.Errorf("decoding %s: %s", kind, err) + } + + method := &Method{Name: kind, Body: val} + + field := reflect.ValueOf(val).Elem().FieldByName("This") + + method.This = field.Interface().(types.ManagedObjectReference) + + return method, nil +} diff --git a/vendor/github.com/vmware/govmomi/simulator/snapshot.go b/vendor/github.com/vmware/govmomi/simulator/snapshot.go new file mode 100644 index 00000000000..ad98fbf3c42 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/snapshot.go @@ -0,0 +1,68 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type VirtualMachineSnapshot struct { + mo.VirtualMachineSnapshot +} + +func (v *VirtualMachineSnapshot) RemoveSnapshotTask(req *types.RemoveSnapshot_Task) soap.HasFault { + task := CreateTask(v, "removeSnapshot", func(t *Task) (types.AnyType, types.BaseMethodFault) { + Map.Remove(req.This) + + vm := Map.Get(v.Vm).(*VirtualMachine) + + if vm.Snapshot.CurrentSnapshot != nil && *vm.Snapshot.CurrentSnapshot == req.This { + parent := findParentSnapshotInTree(vm.Snapshot.RootSnapshotList, req.This) + vm.Snapshot.CurrentSnapshot = parent + } + + vm.Snapshot.RootSnapshotList = removeSnapshotInTree(vm.Snapshot.RootSnapshotList, req.This, req.RemoveChildren) + + return nil, nil + }) + + return &methods.RemoveSnapshot_TaskBody{ + Res: &types.RemoveSnapshot_TaskResponse{ + Returnval: task.Run(), + }, + } +} + +func (v *VirtualMachineSnapshot) RevertToSnapshotTask(req *types.RevertToSnapshot_Task) soap.HasFault { + task := CreateTask(v, "revertToSnapshot", func(t *Task) (types.AnyType, types.BaseMethodFault) { + vm := Map.Get(v.Vm).(*VirtualMachine) + + ref := v.Reference() + vm.Snapshot.CurrentSnapshot = &ref + + return nil, nil + }) + + return &methods.RevertToSnapshot_TaskBody{ + Res: &types.RevertToSnapshot_TaskResponse{ + Returnval: task.Run(), + }, + } +} diff --git a/vendor/github.com/vmware/govmomi/simulator/task.go b/vendor/github.com/vmware/govmomi/simulator/task.go new file mode 100644 index 00000000000..d6dd5bc415f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/task.go @@ -0,0 +1,102 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "fmt" + "reflect" + "strings" + "time" + + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +const vTaskSuffix = "_Task" // vmomi suffix +const sTaskSuffix = "Task" // simulator suffix (avoiding golint warning) + +type Task struct { + mo.Task + + Execute func(*Task) (types.AnyType, types.BaseMethodFault) +} + +func NewTask(runner TaskRunner) *Task { + ref := runner.Reference() + name := reflect.TypeOf(runner).Elem().Name() + name = strings.Replace(name, "VM", "Vm", 1) // "VM" for the type to make go-lint happy, but "Vm" for the vmodl ID + return CreateTask(ref, name, runner.Run) +} + +func CreateTask(e mo.Reference, name string, run func(*Task) (types.AnyType, types.BaseMethodFault)) *Task { + ref := e.Reference() + id := name + + if strings.HasSuffix(id, sTaskSuffix) { + id = id[:len(id)-len(sTaskSuffix)] + name = id + vTaskSuffix + } + + task := &Task{ + Execute: run, + } + + Map.Put(task) + + task.Info.Key = task.Self.Value + task.Info.Task = task.Self + task.Info.Name = ucFirst(name) + task.Info.DescriptionId = fmt.Sprintf("%s.%s", ref.Type, id) + task.Info.Entity = &ref + task.Info.EntityName = ref.Value + + task.Info.QueueTime = time.Now() + task.Info.State = types.TaskInfoStateQueued + + return task +} + +type TaskRunner interface { + mo.Reference + + Run(*Task) (types.AnyType, types.BaseMethodFault) +} + +func (t *Task) Run() types.ManagedObjectReference { + now := time.Now() + t.Info.StartTime = &now + + t.Info.State = types.TaskInfoStateRunning + + res, err := t.Execute(t) + + now = time.Now() + t.Info.CompleteTime = &now + + if err != nil { + t.Info.State = types.TaskInfoStateError + t.Info.Error = &types.LocalizedMethodFault{ + Fault: err, + LocalizedMessage: fmt.Sprintf("%T", err), + } + } else { + t.Info.Result = res + t.Info.State = types.TaskInfoStateSuccess + } + + return t.Self +} diff --git a/vendor/github.com/vmware/govmomi/simulator/task_manager.go b/vendor/github.com/vmware/govmomi/simulator/task_manager.go new file mode 100644 index 00000000000..df271082596 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/task_manager.go @@ -0,0 +1,52 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +var recentTaskMax = 200 // the VC limit + +type TaskManager struct { + mo.TaskManager +} + +func NewTaskManager(ref types.ManagedObjectReference) object.Reference { + s := &TaskManager{} + s.Self = ref + Map.AddHandler(s) + return s +} + +func (m *TaskManager) PutObject(obj mo.Reference) { + ref := obj.Reference() + if ref.Type != "Task" { + return + } + + m.RecentTask = append(m.RecentTask, ref) + + if len(m.RecentTask) > recentTaskMax { + m.RecentTask = m.RecentTask[1:] + } +} + +func (m *TaskManager) RemoveObject(_ types.ManagedObjectReference) { +} diff --git a/vendor/github.com/vmware/govmomi/simulator/user_directory.go b/vendor/github.com/vmware/govmomi/simulator/user_directory.go new file mode 100644 index 00000000000..2a2f0a3aabc --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/user_directory.go @@ -0,0 +1,78 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "strings" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +var DefaultUserGroup = []*types.UserSearchResult{ + {FullName: "root", Group: true, Principal: "root"}, + {FullName: "root", Group: false, Principal: "root"}, + {FullName: "administrator", Group: false, Principal: "admin"}, +} + +type UserDirectory struct { + mo.UserDirectory + + userGroup []*types.UserSearchResult +} + +func NewUserDirectory(ref types.ManagedObjectReference) object.Reference { + u := &UserDirectory{} + + u.Self = ref + u.userGroup = DefaultUserGroup + + return u +} + +func (u *UserDirectory) RetrieveUserGroups(req *types.RetrieveUserGroups) soap.HasFault { + compare := compareFunc(req.SearchStr, req.ExactMatch) + + var res []types.BaseUserSearchResult + for _, ug := range u.userGroup { + if req.FindUsers && !ug.Group || req.FindGroups && ug.Group { + if compare(ug.Principal) { + res = append(res, ug) + } + } + } + + body := &methods.RetrieveUserGroupsBody{ + Res: &types.RetrieveUserGroupsResponse{ + Returnval: res, + }, + } + + return body +} + +func compareFunc(compared string, exactly bool) func(string) bool { + return func(s string) bool { + if exactly { + return s == compared + } + return strings.Contains(strings.ToLower(s), strings.ToLower(compared)) + } +} diff --git a/vendor/github.com/vmware/govmomi/simulator/view_manager.go b/vendor/github.com/vmware/govmomi/simulator/view_manager.go new file mode 100644 index 00000000000..959f22847b9 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/view_manager.go @@ -0,0 +1,184 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "reflect" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type ViewManager struct { + mo.ViewManager + + entities map[string]bool +} + +var entities = []struct { + Type reflect.Type + Container bool +}{ + {reflect.TypeOf((*mo.ManagedEntity)(nil)).Elem(), true}, + {reflect.TypeOf((*mo.Folder)(nil)).Elem(), true}, + {reflect.TypeOf((*mo.StoragePod)(nil)).Elem(), true}, + {reflect.TypeOf((*mo.Datacenter)(nil)).Elem(), true}, + {reflect.TypeOf((*mo.ComputeResource)(nil)).Elem(), true}, + {reflect.TypeOf((*mo.ClusterComputeResource)(nil)).Elem(), true}, + {reflect.TypeOf((*mo.HostSystem)(nil)).Elem(), true}, + {reflect.TypeOf((*mo.ResourcePool)(nil)).Elem(), true}, + {reflect.TypeOf((*mo.VirtualApp)(nil)).Elem(), true}, + {reflect.TypeOf((*mo.VirtualMachine)(nil)).Elem(), false}, + {reflect.TypeOf((*mo.Datastore)(nil)).Elem(), false}, + {reflect.TypeOf((*mo.Network)(nil)).Elem(), false}, + {reflect.TypeOf((*mo.OpaqueNetwork)(nil)).Elem(), false}, + {reflect.TypeOf((*mo.DistributedVirtualPortgroup)(nil)).Elem(), false}, + {reflect.TypeOf((*mo.DistributedVirtualSwitch)(nil)).Elem(), false}, + {reflect.TypeOf((*mo.VmwareDistributedVirtualSwitch)(nil)).Elem(), false}, +} + +func NewViewManager(ref types.ManagedObjectReference) object.Reference { + s := &ViewManager{ + entities: make(map[string]bool), + } + + s.Self = ref + + for _, e := range entities { + s.entities[e.Type.Name()] = e.Container + } + + return s +} + +func destroyView(ref types.ManagedObjectReference) soap.HasFault { + m := Map.ViewManager() + + m.ViewList = RemoveReference(ref, m.ViewList) + + return &methods.DestroyViewBody{ + Res: &types.DestroyViewResponse{}, + } +} + +func (m *ViewManager) CreateContainerView(req *types.CreateContainerView) soap.HasFault { + body := &methods.CreateContainerViewBody{} + + root := Map.Get(req.Container) + if root == nil { + body.Fault_ = Fault("", &types.ManagedObjectNotFound{Obj: req.Container}) + return body + } + + if m.entities[root.Reference().Type] != true { + body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "container"}) + return body + } + + container := &ContainerView{ + mo.ContainerView{ + Container: root.Reference(), + Recursive: req.Recursive, + Type: req.Type, + }, + make(map[string]bool), + } + + for _, ctype := range container.Type { + if _, ok := m.entities[ctype]; !ok { + body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "type"}) + return body + } + + container.types[ctype] = true + + for _, e := range entities { + // Check for embedded types + if f, ok := e.Type.FieldByName(ctype); ok && f.Anonymous { + container.types[e.Type.Name()] = true + } + } + } + + Map.Put(container) + + m.ViewList = append(m.ViewList, container.Reference()) + + body.Res = &types.CreateContainerViewResponse{ + Returnval: container.Self, + } + + container.add(root) + + return body +} + +type ContainerView struct { + mo.ContainerView + + types map[string]bool +} + +func (v *ContainerView) DestroyView(c *types.DestroyView) soap.HasFault { + return destroyView(c.This) +} + +func (v *ContainerView) include(o types.ManagedObjectReference) bool { + if len(v.types) == 0 { + return true + } + + return v.types[o.Type] +} + +func (v *ContainerView) add(root mo.Reference) { + var children []types.ManagedObjectReference + + switch e := root.(type) { + case *mo.Datacenter: + children = []types.ManagedObjectReference{e.VmFolder, e.HostFolder, e.DatastoreFolder, e.NetworkFolder} + case *Folder: + children = e.ChildEntity + case *mo.ComputeResource: + children = e.Host + children = append(children, *e.ResourcePool) + case *ClusterComputeResource: + children = e.Host + children = append(children, *e.ResourcePool) + case *ResourcePool: + children = e.ResourcePool.ResourcePool + children = append(children, e.Vm...) + case *VirtualApp: + children = e.ResourcePool.ResourcePool + children = append(children, e.Vm...) + case *HostSystem: + children = e.Vm + } + + for _, child := range children { + if v.include(child) { + v.View = AddReference(child, v.View) + } + + if v.Recursive { + v.add(Map.Get(child)) + } + } +} diff --git a/vendor/github.com/vmware/govmomi/simulator/virtual_disk_manager.go b/vendor/github.com/vmware/govmomi/simulator/virtual_disk_manager.go new file mode 100644 index 00000000000..ba37e42ed6b --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/virtual_disk_manager.go @@ -0,0 +1,193 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "os" + "strings" + + "github.com/google/uuid" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type VirtualDiskManager struct { + mo.VirtualDiskManager +} + +func NewVirtualDiskManager(ref types.ManagedObjectReference) object.Reference { + m := &VirtualDiskManager{} + m.Self = ref + return m +} + +func (m *VirtualDiskManager) names(name string) []string { + return []string{ + strings.Replace(name, ".vmdk", "-flat.vmdk", 1), + name, + } +} + +func (m *VirtualDiskManager) createVirtualDisk(req *types.CreateVirtualDisk_Task) types.BaseMethodFault { + fm := Map.FileManager() + + file, fault := fm.resolve(req.Datacenter, req.Name) + if fault != nil { + return fault + } + + for _, name := range m.names(file) { + _, err := os.Stat(name) + if err == nil { + return fm.fault(name, nil, new(types.FileAlreadyExists)) + } + + f, err := os.Create(name) + if err != nil { + return fm.fault(name, err, new(types.CannotCreateFile)) + } + + _ = f.Close() + } + + return nil +} + +func (m *VirtualDiskManager) CreateVirtualDiskTask(req *types.CreateVirtualDisk_Task) soap.HasFault { + task := CreateTask(m, "createVirtualDisk", func(*Task) (types.AnyType, types.BaseMethodFault) { + return nil, m.createVirtualDisk(req) + }) + + return &methods.CreateVirtualDisk_TaskBody{ + Res: &types.CreateVirtualDisk_TaskResponse{ + Returnval: task.Run(), + }, + } +} + +func (m *VirtualDiskManager) DeleteVirtualDiskTask(req *types.DeleteVirtualDisk_Task) soap.HasFault { + task := CreateTask(m, "deleteVirtualDisk", func(*Task) (types.AnyType, types.BaseMethodFault) { + fm := Map.FileManager() + + for _, name := range m.names(req.Name) { + err := fm.deleteDatastoreFile(&types.DeleteDatastoreFile_Task{ + Name: name, + Datacenter: req.Datacenter, + }) + + if err != nil { + return nil, err + } + } + + return nil, nil + }) + + return &methods.DeleteVirtualDisk_TaskBody{ + Res: &types.DeleteVirtualDisk_TaskResponse{ + Returnval: task.Run(), + }, + } +} + +func (m *VirtualDiskManager) MoveVirtualDiskTask(req *types.MoveVirtualDisk_Task) soap.HasFault { + task := CreateTask(m, "moveVirtualDisk", func(*Task) (types.AnyType, types.BaseMethodFault) { + fm := Map.FileManager() + + dest := m.names(req.DestName) + + for i, name := range m.names(req.SourceName) { + err := fm.moveDatastoreFile(&types.MoveDatastoreFile_Task{ + SourceName: name, + SourceDatacenter: req.SourceDatacenter, + DestinationName: dest[i], + DestinationDatacenter: req.DestDatacenter, + Force: req.Force, + }) + + if err != nil { + return nil, err + } + } + + return nil, nil + }) + + return &methods.MoveVirtualDisk_TaskBody{ + Res: &types.MoveVirtualDisk_TaskResponse{ + Returnval: task.Run(), + }, + } +} + +func (m *VirtualDiskManager) CopyVirtualDiskTask(req *types.CopyVirtualDisk_Task) soap.HasFault { + task := CreateTask(m, "copyVirtualDisk", func(*Task) (types.AnyType, types.BaseMethodFault) { + fm := Map.FileManager() + + dest := m.names(req.DestName) + + for i, name := range m.names(req.SourceName) { + err := fm.copyDatastoreFile(&types.CopyDatastoreFile_Task{ + SourceName: name, + SourceDatacenter: req.SourceDatacenter, + DestinationName: dest[i], + DestinationDatacenter: req.DestDatacenter, + Force: req.Force, + }) + + if err != nil { + return nil, err + } + } + + return nil, nil + }) + + return &methods.CopyVirtualDisk_TaskBody{ + Res: &types.CopyVirtualDisk_TaskResponse{ + Returnval: task.Run(), + }, + } +} + +func (m *VirtualDiskManager) QueryVirtualDiskUuid(req *types.QueryVirtualDiskUuid) soap.HasFault { + body := new(methods.QueryVirtualDiskUuidBody) + + fm := Map.FileManager() + + file, fault := fm.resolve(req.Datacenter, req.Name) + if fault != nil { + body.Fault_ = Fault("", fault) + return body + } + + _, err := os.Stat(file) + if err != nil { + fault = fm.fault(file, err, new(types.CannotAccessFile)) + body.Fault_ = Fault("", fault) + return body + } + + body.Res = &types.QueryVirtualDiskUuidResponse{ + Returnval: uuid.NewSHA1(uuid.NameSpaceOID, []byte(file)).String(), + } + + return body +} diff --git a/vendor/github.com/vmware/govmomi/simulator/virtual_machine.go b/vendor/github.com/vmware/govmomi/simulator/virtual_machine.go new file mode 100644 index 00000000000..640a280dc58 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/virtual_machine.go @@ -0,0 +1,885 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "fmt" + "io" + "log" + "net" + "os" + "path" + "strings" + "sync/atomic" + "time" + + "github.com/google/uuid" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/simulator/esx" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type VirtualMachine struct { + mo.VirtualMachine + + log *log.Logger + out io.Closer + sid int32 +} + +func NewVirtualMachine(parent types.ManagedObjectReference, spec *types.VirtualMachineConfigSpec) (*VirtualMachine, types.BaseMethodFault) { + vm := &VirtualMachine{} + vm.Parent = &parent + + if spec.Name == "" { + return nil, &types.InvalidVmConfig{Property: "configSpec.name"} + } + + if spec.Files == nil || spec.Files.VmPathName == "" { + return nil, &types.InvalidVmConfig{Property: "configSpec.files.vmPathName"} + } + + rspec := types.DefaultResourceConfigSpec() + vm.Config = &types.VirtualMachineConfigInfo{ + ExtraConfig: []types.BaseOptionValue{&types.OptionValue{Key: "govcsim", Value: "TRUE"}}, + Tools: &types.ToolsConfigInfo{}, + MemoryAllocation: &rspec.MemoryAllocation, + CpuAllocation: &rspec.CpuAllocation, + } + vm.Summary.Guest = &types.VirtualMachineGuestSummary{} + vm.Summary.Storage = &types.VirtualMachineStorageSummary{} + vm.Summary.Vm = &vm.Self + + // Append VM Name as the directory name if not specified + if strings.HasSuffix(spec.Files.VmPathName, "]") { // e.g. "[datastore1]" + spec.Files.VmPathName += " " + spec.Name + } + + if !strings.HasSuffix(spec.Files.VmPathName, ".vmx") { + spec.Files.VmPathName = path.Join(spec.Files.VmPathName, spec.Name+".vmx") + } + + dsPath := path.Dir(spec.Files.VmPathName) + + defaults := types.VirtualMachineConfigSpec{ + NumCPUs: 1, + NumCoresPerSocket: 1, + MemoryMB: 32, + Uuid: uuid.New().String(), + Version: "vmx-11", + Files: &types.VirtualMachineFileInfo{ + SnapshotDirectory: dsPath, + SuspendDirectory: dsPath, + LogDirectory: dsPath, + }, + } + + // Add the default devices + defaults.DeviceChange, _ = object.VirtualDeviceList(esx.VirtualDevice).ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd) + + err := vm.configure(&defaults) + if err != nil { + return nil, err + } + + vm.Runtime.PowerState = types.VirtualMachinePowerStatePoweredOff + vm.Runtime.ConnectionState = types.VirtualMachineConnectionStateConnected + vm.Summary.Runtime = vm.Runtime + + vm.Summary.QuickStats.GuestHeartbeatStatus = types.ManagedEntityStatusGray + vm.Summary.OverallStatus = types.ManagedEntityStatusGreen + vm.ConfigStatus = types.ManagedEntityStatusGreen + + return vm, nil +} + +func (vm *VirtualMachine) apply(spec *types.VirtualMachineConfigSpec) { + if spec.Files == nil { + spec.Files = new(types.VirtualMachineFileInfo) + } + + apply := []struct { + src string + dst *string + }{ + {spec.Name, &vm.Name}, + {spec.Name, &vm.Config.Name}, + {spec.Name, &vm.Summary.Config.Name}, + {spec.GuestId, &vm.Config.GuestId}, + {spec.GuestId, &vm.Config.GuestFullName}, + {spec.GuestId, &vm.Summary.Guest.GuestId}, + {spec.GuestId, &vm.Summary.Config.GuestId}, + {spec.GuestId, &vm.Summary.Config.GuestFullName}, + {spec.Uuid, &vm.Config.Uuid}, + {spec.Version, &vm.Config.Version}, + {spec.Files.VmPathName, &vm.Config.Files.VmPathName}, + {spec.Files.VmPathName, &vm.Summary.Config.VmPathName}, + {spec.Files.SnapshotDirectory, &vm.Config.Files.SnapshotDirectory}, + {spec.Files.LogDirectory, &vm.Config.Files.LogDirectory}, + } + + for _, f := range apply { + if f.src != "" { + *f.dst = f.src + } + } + + if spec.MemoryMB != 0 { + vm.Config.Hardware.MemoryMB = int32(spec.MemoryMB) + vm.Summary.Config.MemorySizeMB = vm.Config.Hardware.MemoryMB + } + + if spec.NumCPUs != 0 { + vm.Config.Hardware.NumCPU = spec.NumCPUs + vm.Summary.Config.NumCpu = vm.Config.Hardware.NumCPU + } + + vm.Config.ExtraConfig = append(vm.Config.ExtraConfig, spec.ExtraConfig...) + + vm.Config.Modified = time.Now() + + vm.Summary.Config.Uuid = vm.Config.Uuid +} + +func validateGuestID(id string) types.BaseMethodFault { + for _, x := range GuestID { + if id == string(x) { + return nil + } + } + + return &types.InvalidArgument{InvalidProperty: "configSpec.guestId"} +} + +func (vm *VirtualMachine) configure(spec *types.VirtualMachineConfigSpec) types.BaseMethodFault { + vm.apply(spec) + + if spec.MemoryAllocation != nil { + if err := updateResourceAllocation("memory", spec.MemoryAllocation, vm.Config.MemoryAllocation); err != nil { + return err + } + } + + if spec.CpuAllocation != nil { + if err := updateResourceAllocation("cpu", spec.CpuAllocation, vm.Config.CpuAllocation); err != nil { + return err + } + } + + if spec.GuestId != "" { + if err := validateGuestID(spec.GuestId); err != nil { + return err + } + } + + return vm.configureDevices(spec) +} + +func (vm *VirtualMachine) useDatastore(name string) *Datastore { + host := Map.Get(*vm.Runtime.Host).(*HostSystem) + + ds := Map.FindByName(name, host.Datastore).(*Datastore) + + vm.Datastore = AddReference(ds.Self, vm.Datastore) + + return ds +} + +func (vm *VirtualMachine) setLog(w io.WriteCloser) { + vm.out = w + vm.log = log.New(w, "vmx ", log.Flags()) +} + +func (vm *VirtualMachine) createFile(spec string, name string, register bool) (*os.File, types.BaseMethodFault) { + p, fault := parseDatastorePath(spec) + if fault != nil { + return nil, fault + } + + ds := vm.useDatastore(p.Datastore) + + file := path.Join(ds.Info.GetDatastoreInfo().Url, p.Path) + + if name != "" { + if path.Ext(file) != "" { + file = path.Dir(file) + } + + file = path.Join(file, name) + } + + if register { + f, err := os.Open(file) + if err != nil { + log.Printf("register %s: %s", vm.Reference(), err) + if os.IsNotExist(err) { + return nil, &types.NotFound{} + } + + return nil, &types.InvalidArgument{} + } + + return f, nil + } + + dir := path.Dir(file) + + _ = os.MkdirAll(dir, 0700) + + _, err := os.Stat(file) + if err == nil { + return nil, &types.FileAlreadyExists{ + FileFault: types.FileFault{ + File: file, + }, + } + } + + f, err := os.Create(file) + if err != nil { + return nil, &types.FileFault{ + File: file, + } + } + + return f, nil +} + +func (vm *VirtualMachine) create(spec *types.VirtualMachineConfigSpec, register bool) types.BaseMethodFault { + vm.apply(spec) + + files := []struct { + spec string + name string + use func(w io.WriteCloser) + }{ + {vm.Config.Files.VmPathName, "", nil}, + {vm.Config.Files.VmPathName, fmt.Sprintf("%s.nvram", vm.Name), nil}, + {vm.Config.Files.LogDirectory, "vmware.log", vm.setLog}, + } + + for _, file := range files { + f, err := vm.createFile(file.spec, file.name, register) + if err != nil { + return err + } + + if file.use != nil { + file.use(f) + } else { + _ = f.Close() + } + } + + vm.log.Print("created") + + return vm.configureDevices(spec) +} + +var vmwOUI = net.HardwareAddr([]byte{0x0, 0xc, 0x29}) + +// From http://pubs.vmware.com/vsphere-60/index.jsp?topic=%2Fcom.vmware.vsphere.networking.doc%2FGUID-DC7478FF-DC44-4625-9AD7-38208C56A552.html +// "The host generates generateMAC addresses that consists of the VMware OUI 00:0C:29 and the last three octets in hexadecimal +// format of the virtual machine UUID. The virtual machine UUID is based on a hash calculated by using the UUID of the +// ESXi physical machine and the path to the configuration file (.vmx) of the virtual machine." +func (vm *VirtualMachine) generateMAC() string { + id := uuid.New() // Random is fine for now. + + offset := len(id) - len(vmwOUI) + + mac := append(vmwOUI, id[offset:]...) + + return mac.String() +} + +func (vm *VirtualMachine) configureDevice(devices object.VirtualDeviceList, device types.BaseVirtualDevice) types.BaseMethodFault { + d := device.GetVirtualDevice() + var controller types.BaseVirtualController + + if d.Key < 0 { + // Choose a unique key + if d.Key == -1 { + d.Key = devices.NewKey() + } + + d.Key *= -1 + + for { + if devices.FindByKey(d.Key) == nil { + break + } + d.Key++ + } + } + + label := devices.Name(device) + summary := label + dc := Map.getEntityDatacenter(Map.Get(*vm.Parent).(mo.Entity)) + dm := Map.VirtualDiskManager() + + switch x := device.(type) { + case types.BaseVirtualEthernetCard: + controller = devices.PickController((*types.VirtualPCIController)(nil)) + var net types.ManagedObjectReference + + switch b := d.Backing.(type) { + case *types.VirtualEthernetCardNetworkBackingInfo: + summary = b.DeviceName + net = Map.FindByName(b.DeviceName, dc.Network).Reference() + b.Network = &net + case *types.VirtualEthernetCardDistributedVirtualPortBackingInfo: + summary = fmt.Sprintf("DVSwitch: %s", b.Port.SwitchUuid) + net.Type = "DistributedVirtualPortgroup" + net.Value = b.Port.PortgroupKey + } + + vm.Network = append(vm.Network, net) + + c := x.GetVirtualEthernetCard() + if c.MacAddress == "" { + c.MacAddress = vm.generateMAC() + } + case *types.VirtualDisk: + switch b := d.Backing.(type) { + case types.BaseVirtualDeviceFileBackingInfo: + info := b.GetVirtualDeviceFileBackingInfo() + + if info.FileName == "" { + filename, err := vm.genVmdkPath() + if err != nil { + return err + } + + info.FileName = filename + } + + err := dm.createVirtualDisk(&types.CreateVirtualDisk_Task{ + Datacenter: &dc.Self, + Name: info.FileName, + }) + if err != nil { + return err + } + + p, _ := parseDatastorePath(info.FileName) + + info.Datastore = &types.ManagedObjectReference{ + Type: "Datastore", + Value: p.Datastore, + } + } + } + + if d.UnitNumber == nil && controller != nil { + devices.AssignController(device, controller) + } + + if d.DeviceInfo == nil { + d.DeviceInfo = &types.Description{ + Label: label, + Summary: summary, + } + } + + return nil +} + +func removeDevice(devices object.VirtualDeviceList, device types.BaseVirtualDevice) object.VirtualDeviceList { + var result object.VirtualDeviceList + + for i, d := range devices { + if d.GetVirtualDevice().Key == device.GetVirtualDevice().Key { + result = append(result, devices[i+1:]...) + break + } + + result = append(result, d) + } + + return result +} + +func (vm *VirtualMachine) genVmdkPath() (string, types.BaseMethodFault) { + vmdir := path.Dir(vm.Config.Files.VmPathName) + + index := 0 + for { + var filename string + if index == 0 { + filename = fmt.Sprintf("%s.vmdk", vm.Config.Name) + } else { + filename = fmt.Sprintf("%s_%d.vmdk", vm.Config.Name, index) + } + + f, err := vm.createFile(vmdir, filename, false) + if err != nil { + switch err.(type) { + case *types.FileAlreadyExists: + index++ + continue + default: + return "", err + } + } + + _ = f.Close() + _ = os.Remove(f.Name()) + + return path.Join(vmdir, filename), nil + } +} + +func (vm *VirtualMachine) configureDevices(spec *types.VirtualMachineConfigSpec) types.BaseMethodFault { + devices := object.VirtualDeviceList(vm.Config.Hardware.Device) + + for i, change := range spec.DeviceChange { + dspec := change.GetVirtualDeviceConfigSpec() + device := dspec.Device.GetVirtualDevice() + invalid := &types.InvalidDeviceSpec{DeviceIndex: int32(i)} + + switch dspec.Operation { + case types.VirtualDeviceConfigSpecOperationAdd: + if devices.FindByKey(device.Key) != nil { + if vm.Self.Value != "" { // moid isn't set until CreateVM is done + return invalid + } + + // In this case, the CreateVM() spec included one of the default devices + devices = removeDevice(devices, device) + } + + err := vm.configureDevice(devices, dspec.Device) + if err != nil { + return err + } + + devices = append(devices, dspec.Device) + case types.VirtualDeviceConfigSpecOperationRemove: + devices = removeDevice(devices, dspec.Device) + } + } + + vm.Config.Hardware.Device = []types.BaseVirtualDevice(devices) + + return nil +} + +type powerVMTask struct { + *VirtualMachine + + state types.VirtualMachinePowerState +} + +func (c *powerVMTask) Run(task *Task) (types.AnyType, types.BaseMethodFault) { + c.log.Printf("running power task: requesting %s, existing %s", + c.state, c.VirtualMachine.Runtime.PowerState) + + if c.VirtualMachine.Runtime.PowerState == c.state { + return nil, &types.InvalidPowerState{ + RequestedState: c.state, + ExistingState: c.VirtualMachine.Runtime.PowerState, + } + } + + c.VirtualMachine.Runtime.PowerState = c.state + c.VirtualMachine.Summary.Runtime.PowerState = c.state + + bt := &c.VirtualMachine.Summary.Runtime.BootTime + if c.state == types.VirtualMachinePowerStatePoweredOn { + now := time.Now() + *bt = &now + } else { + *bt = nil + } + + return nil, nil +} + +func (vm *VirtualMachine) PowerOnVMTask(c *types.PowerOnVM_Task) soap.HasFault { + runner := &powerVMTask{vm, types.VirtualMachinePowerStatePoweredOn} + task := CreateTask(runner.Reference(), "powerOn", runner.Run) + + return &methods.PowerOnVM_TaskBody{ + Res: &types.PowerOnVM_TaskResponse{ + Returnval: task.Run(), + }, + } +} + +func (vm *VirtualMachine) PowerOffVMTask(c *types.PowerOffVM_Task) soap.HasFault { + runner := &powerVMTask{vm, types.VirtualMachinePowerStatePoweredOff} + task := CreateTask(runner.Reference(), "powerOff", runner.Run) + + return &methods.PowerOffVM_TaskBody{ + Res: &types.PowerOffVM_TaskResponse{ + Returnval: task.Run(), + }, + } +} + +func (vm *VirtualMachine) ReconfigVMTask(req *types.ReconfigVM_Task) soap.HasFault { + task := CreateTask(vm, "reconfigVm", func(t *Task) (types.AnyType, types.BaseMethodFault) { + err := vm.configure(&req.Spec) + if err != nil { + return nil, err + } + + return nil, nil + }) + + return &methods.ReconfigVM_TaskBody{ + Res: &types.ReconfigVM_TaskResponse{ + Returnval: task.Run(), + }, + } +} + +func (vm *VirtualMachine) DestroyTask(req *types.Destroy_Task) soap.HasFault { + task := CreateTask(vm, "destroy", func(t *Task) (types.AnyType, types.BaseMethodFault) { + r := vm.UnregisterVM(&types.UnregisterVM{ + This: req.This, + }) + + if r.Fault() != nil { + return nil, r.Fault().VimFault().(types.BaseMethodFault) + } + + // Delete VM files from the datastore (ignoring result for now) + m := Map.FileManager() + dc := Map.getEntityDatacenter(vm).Reference() + + _ = m.DeleteDatastoreFileTask(&types.DeleteDatastoreFile_Task{ + This: m.Reference(), + Name: vm.Config.Files.LogDirectory, + Datacenter: &dc, + }) + + return nil, nil + }) + + return &methods.Destroy_TaskBody{ + Res: &types.Destroy_TaskResponse{ + Returnval: task.Run(), + }, + } +} + +func (vm *VirtualMachine) UnregisterVM(c *types.UnregisterVM) soap.HasFault { + r := &methods.UnregisterVMBody{} + + if vm.Runtime.PowerState == types.VirtualMachinePowerStatePoweredOn { + r.Fault_ = Fault("", &types.InvalidPowerState{ + RequestedState: types.VirtualMachinePowerStatePoweredOff, + ExistingState: vm.Runtime.PowerState, + }) + + return r + } + + _ = vm.out.Close() // Close log fd + + Map.getEntityParent(vm, "Folder").(*Folder).removeChild(c.This) + + host := Map.Get(*vm.Runtime.Host).(*HostSystem) + host.Vm = RemoveReference(vm.Self, host.Vm) + + switch pool := Map.Get(*vm.ResourcePool).(type) { + case *ResourcePool: + pool.Vm = RemoveReference(vm.Self, pool.Vm) + case *VirtualApp: + pool.Vm = RemoveReference(vm.Self, pool.Vm) + } + + for i := range vm.Datastore { + ds := Map.Get(vm.Datastore[i]).(*Datastore) + ds.Vm = RemoveReference(vm.Self, ds.Vm) + } + + r.Res = new(types.UnregisterVMResponse) + + return r +} + +func (vm *VirtualMachine) CloneVMTask(req *types.CloneVM_Task) soap.HasFault { + task := CreateTask(vm, "cloneVm", func(t *Task) (types.AnyType, types.BaseMethodFault) { + folder := Map.Get(req.Folder).(*Folder) + + config := types.VirtualMachineConfigSpec{ + Name: req.Name, + GuestId: vm.Config.GuestId, + Files: &types.VirtualMachineFileInfo{ + VmPathName: strings.Replace(vm.Config.Files.VmPathName, vm.Name, req.Name, -1), + }, + } + + res := folder.CreateVMTask(&types.CreateVM_Task{ + This: folder.Self, + Config: config, + Pool: *vm.ResourcePool, + }) + + ctask := Map.Get(res.(*methods.CreateVM_TaskBody).Res.Returnval).(*Task) + if ctask.Info.Error != nil { + return nil, ctask.Info.Error.Fault + } + + return ctask.Info.Result.(types.ManagedObjectReference), nil + }) + + return &methods.CloneVM_TaskBody{ + Res: &types.CloneVM_TaskResponse{ + Returnval: task.Run(), + }, + } +} + +func (vm *VirtualMachine) RelocateVMTask(req *types.RelocateVM_Task) soap.HasFault { + task := CreateTask(vm, "relocateVm", func(t *Task) (types.AnyType, types.BaseMethodFault) { + if ref := req.Spec.Datastore; ref != nil { + ds := Map.Get(*ref).(*Datastore) + ds.Vm = RemoveReference(*ref, ds.Vm) + + vm.Datastore = []types.ManagedObjectReference{*ref} + + // TODO: migrate vm.Config.Files (and vm.Summary.Config.VmPathName) + } + + if ref := req.Spec.Pool; ref != nil { + pool := Map.Get(*ref).(*ResourcePool) + pool.Vm = RemoveReference(*ref, pool.Vm) + + vm.ResourcePool = ref + } + + if ref := req.Spec.Host; ref != nil { + host := Map.Get(*ref).(*HostSystem) + host.Vm = RemoveReference(*ref, host.Vm) + + vm.Runtime.Host = ref + } + + return nil, nil + }) + + return &methods.RelocateVM_TaskBody{ + Res: &types.RelocateVM_TaskResponse{ + Returnval: task.Run(), + }, + } +} + +func (vm *VirtualMachine) CreateSnapshotTask(req *types.CreateSnapshot_Task) soap.HasFault { + task := CreateTask(vm, "createSnapshot", func(t *Task) (types.AnyType, types.BaseMethodFault) { + if vm.Snapshot == nil { + vm.Snapshot = &types.VirtualMachineSnapshotInfo{} + } + + snapshot := &VirtualMachineSnapshot{} + snapshot.Vm = vm.Reference() + snapshot.Config = *vm.Config + + Map.Put(snapshot) + + treeItem := types.VirtualMachineSnapshotTree{ + Snapshot: snapshot.Self, + Vm: snapshot.Vm, + Name: req.Name, + Description: req.Description, + Id: atomic.AddInt32(&vm.sid, 1), + CreateTime: time.Now(), + State: vm.Runtime.PowerState, + Quiesced: req.Quiesce, + BackupManifest: "", + ReplaySupported: types.NewBool(false), + } + + cur := vm.Snapshot.CurrentSnapshot + if cur != nil { + parent := Map.Get(*cur).(*VirtualMachineSnapshot) + parent.ChildSnapshot = append(parent.ChildSnapshot, snapshot.Self) + + ss := findSnapshotInTree(vm.Snapshot.RootSnapshotList, *cur) + ss.ChildSnapshotList = append(ss.ChildSnapshotList, treeItem) + } else { + vm.Snapshot.RootSnapshotList = append(vm.Snapshot.RootSnapshotList, treeItem) + } + + vm.Snapshot.CurrentSnapshot = &snapshot.Self + + return nil, nil + }) + + return &methods.CreateSnapshot_TaskBody{ + Res: &types.CreateSnapshot_TaskResponse{ + Returnval: task.Run(), + }, + } +} + +func (vm *VirtualMachine) RevertToCurrentSnapshotTask(req *types.RevertToCurrentSnapshot_Task) soap.HasFault { + body := &methods.RevertToCurrentSnapshot_TaskBody{} + + if vm.Snapshot == nil || vm.Snapshot.CurrentSnapshot == nil { + body.Fault_ = Fault("snapshot not found", &types.NotFound{}) + + return body + } + + task := CreateTask(vm, "revertSnapshot", func(t *Task) (types.AnyType, types.BaseMethodFault) { + return nil, nil + }) + + body.Res = &types.RevertToCurrentSnapshot_TaskResponse{ + Returnval: task.Run(), + } + + return body +} + +func (vm *VirtualMachine) RemoveAllSnapshotsTask(req *types.RemoveAllSnapshots_Task) soap.HasFault { + task := CreateTask(vm, "RemoveAllSnapshots", func(t *Task) (types.AnyType, types.BaseMethodFault) { + if vm.Snapshot == nil { + return nil, nil + } + + refs := allSnapshotsInTree(vm.Snapshot.RootSnapshotList) + + vm.Snapshot.CurrentSnapshot = nil + vm.Snapshot.RootSnapshotList = nil + + for _, ref := range refs { + Map.Remove(ref) + } + + return nil, nil + }) + + return &methods.RemoveAllSnapshots_TaskBody{ + Res: &types.RemoveAllSnapshots_TaskResponse{ + Returnval: task.Run(), + }, + } +} + +func (vm *VirtualMachine) ShutdownGuest(c *types.ShutdownGuest) soap.HasFault { + r := &methods.ShutdownGuestBody{} + // should be poweron + if vm.Runtime.PowerState == types.VirtualMachinePowerStatePoweredOff { + r.Fault_ = Fault("", &types.InvalidPowerState{ + RequestedState: types.VirtualMachinePowerStatePoweredOn, + ExistingState: vm.Runtime.PowerState, + }) + + return r + } + // change state + vm.Runtime.PowerState = types.VirtualMachinePowerStatePoweredOff + vm.Summary.Runtime.PowerState = types.VirtualMachinePowerStatePoweredOff + + r.Res = new(types.ShutdownGuestResponse) + + return r +} + +func findSnapshotInTree(tree []types.VirtualMachineSnapshotTree, ref types.ManagedObjectReference) *types.VirtualMachineSnapshotTree { + if tree == nil { + return nil + } + + for i, ss := range tree { + if ss.Snapshot == ref { + return &tree[i] + } + + target := findSnapshotInTree(ss.ChildSnapshotList, ref) + if target != nil { + return target + } + } + + return nil +} + +func findParentSnapshot(tree types.VirtualMachineSnapshotTree, ref types.ManagedObjectReference) *types.ManagedObjectReference { + for _, ss := range tree.ChildSnapshotList { + if ss.Snapshot == ref { + return &tree.Snapshot + } + + res := findParentSnapshot(ss, ref) + if res != nil { + return res + } + } + + return nil +} + +func findParentSnapshotInTree(tree []types.VirtualMachineSnapshotTree, ref types.ManagedObjectReference) *types.ManagedObjectReference { + if tree == nil { + return nil + } + + for _, ss := range tree { + res := findParentSnapshot(ss, ref) + if res != nil { + return res + } + } + + return nil +} + +func removeSnapshotInTree(tree []types.VirtualMachineSnapshotTree, ref types.ManagedObjectReference, removeChildren bool) []types.VirtualMachineSnapshotTree { + if tree == nil { + return tree + } + + var result []types.VirtualMachineSnapshotTree + + for _, ss := range tree { + if ss.Snapshot == ref { + if !removeChildren { + result = append(result, ss.ChildSnapshotList...) + } + } else { + ss.ChildSnapshotList = removeSnapshotInTree(ss.ChildSnapshotList, ref, removeChildren) + result = append(result, ss) + } + } + + return result +} + +func allSnapshotsInTree(tree []types.VirtualMachineSnapshotTree) []types.ManagedObjectReference { + var result []types.ManagedObjectReference + + if tree == nil { + return result + } + + for _, ss := range tree { + result = append(result, ss.Snapshot) + result = append(result, allSnapshotsInTree(ss.ChildSnapshotList)...) + } + + return result +} diff --git a/vendor/github.com/vmware/govmomi/simulator/vpx/BUILD b/vendor/github.com/vmware/govmomi/simulator/vpx/BUILD new file mode 100644 index 00000000000..86abe7b5b42 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/vpx/BUILD @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "root_folder.go", + "service_content.go", + "setting.go", + ], + importpath = "github.com/vmware/govmomi/simulator/vpx", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library", + "//vendor/github.com/vmware/govmomi/vim25/types:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/vmware/govmomi/simulator/vpx/doc.go b/vendor/github.com/vmware/govmomi/simulator/vpx/doc.go new file mode 100644 index 00000000000..1765887029a --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/vpx/doc.go @@ -0,0 +1,20 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package vpx contains SOAP responses from a vCenter server, captured using `govc ... -dump`. +*/ +package vpx diff --git a/vendor/github.com/vmware/govmomi/simulator/vpx/root_folder.go b/vendor/github.com/vmware/govmomi/simulator/vpx/root_folder.go new file mode 100644 index 00000000000..a1cce0d8d94 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/vpx/root_folder.go @@ -0,0 +1,64 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vpx + +import ( + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +var RootFolder = mo.Folder{ + ManagedEntity: mo.ManagedEntity{ + ExtensibleManagedObject: mo.ExtensibleManagedObject{ + Self: types.ManagedObjectReference{Type: "Folder", Value: "group-d1"}, + Value: nil, + AvailableField: nil, + }, + Parent: (*types.ManagedObjectReference)(nil), + CustomValue: nil, + OverallStatus: "green", + ConfigStatus: "green", + ConfigIssue: nil, + EffectiveRole: []int32{-1}, + Permission: []types.Permission{ + { + DynamicData: types.DynamicData{}, + Entity: &types.ManagedObjectReference{Type: "Folder", Value: "group-d1"}, + Principal: "VSPHERE.LOCAL\\Administrator", + Group: false, + RoleId: -1, + Propagate: true, + }, + { + DynamicData: types.DynamicData{}, + Entity: &types.ManagedObjectReference{Type: "Folder", Value: "group-d1"}, + Principal: "VSPHERE.LOCAL\\Administrators", + Group: true, + RoleId: -1, + Propagate: true, + }, + }, + Name: "Datacenters", + DisabledMethod: nil, + RecentTask: nil, + DeclaredAlarmState: nil, + AlarmActionsEnabled: (*bool)(nil), + Tag: nil, + }, + ChildType: []string{"Folder", "Datacenter"}, + ChildEntity: nil, +} diff --git a/vendor/github.com/vmware/govmomi/simulator/vpx/service_content.go b/vendor/github.com/vmware/govmomi/simulator/vpx/service_content.go new file mode 100644 index 00000000000..90b93cc147e --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/vpx/service_content.go @@ -0,0 +1,86 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vpx + +import "github.com/vmware/govmomi/vim25/types" + +// ServiceContent is the default template for the ServiceInstance content property. +// Capture method: +// govc object.collect -s -dump - content +var ServiceContent = types.ServiceContent{ + RootFolder: types.ManagedObjectReference{Type: "Folder", Value: "group-d1"}, + PropertyCollector: types.ManagedObjectReference{Type: "PropertyCollector", Value: "propertyCollector"}, + ViewManager: &types.ManagedObjectReference{Type: "ViewManager", Value: "ViewManager"}, + About: types.AboutInfo{ + Name: "VMware vCenter Server", + FullName: "VMware vCenter Server 6.5.0 build-5973321", + Vendor: "VMware, Inc.", + Version: "6.5.0", + Build: "5973321", + LocaleVersion: "INTL", + LocaleBuild: "000", + OsType: "linux-x64", + ProductLineId: "vpx", + ApiType: "VirtualCenter", + ApiVersion: "6.5", + InstanceUuid: "dbed6e0c-bd88-4ef6-b594-21283e1c677f", + LicenseProductName: "VMware VirtualCenter Server", + LicenseProductVersion: "6.0", + }, + Setting: &types.ManagedObjectReference{Type: "OptionManager", Value: "VpxSettings"}, + UserDirectory: &types.ManagedObjectReference{Type: "UserDirectory", Value: "UserDirectory"}, + SessionManager: &types.ManagedObjectReference{Type: "SessionManager", Value: "SessionManager"}, + AuthorizationManager: &types.ManagedObjectReference{Type: "AuthorizationManager", Value: "AuthorizationManager"}, + ServiceManager: &types.ManagedObjectReference{Type: "ServiceManager", Value: "ServiceMgr"}, + PerfManager: &types.ManagedObjectReference{Type: "PerformanceManager", Value: "PerfMgr"}, + ScheduledTaskManager: &types.ManagedObjectReference{Type: "ScheduledTaskManager", Value: "ScheduledTaskManager"}, + AlarmManager: &types.ManagedObjectReference{Type: "AlarmManager", Value: "AlarmManager"}, + EventManager: &types.ManagedObjectReference{Type: "EventManager", Value: "EventManager"}, + TaskManager: &types.ManagedObjectReference{Type: "TaskManager", Value: "TaskManager"}, + ExtensionManager: &types.ManagedObjectReference{Type: "ExtensionManager", Value: "ExtensionManager"}, + CustomizationSpecManager: &types.ManagedObjectReference{Type: "CustomizationSpecManager", Value: "CustomizationSpecManager"}, + CustomFieldsManager: &types.ManagedObjectReference{Type: "CustomFieldsManager", Value: "CustomFieldsManager"}, + AccountManager: (*types.ManagedObjectReference)(nil), + DiagnosticManager: &types.ManagedObjectReference{Type: "DiagnosticManager", Value: "DiagMgr"}, + LicenseManager: &types.ManagedObjectReference{Type: "LicenseManager", Value: "LicenseManager"}, + SearchIndex: &types.ManagedObjectReference{Type: "SearchIndex", Value: "SearchIndex"}, + FileManager: &types.ManagedObjectReference{Type: "FileManager", Value: "FileManager"}, + DatastoreNamespaceManager: &types.ManagedObjectReference{Type: "DatastoreNamespaceManager", Value: "DatastoreNamespaceManager"}, + VirtualDiskManager: &types.ManagedObjectReference{Type: "VirtualDiskManager", Value: "virtualDiskManager"}, + VirtualizationManager: (*types.ManagedObjectReference)(nil), + SnmpSystem: &types.ManagedObjectReference{Type: "HostSnmpSystem", Value: "SnmpSystem"}, + VmProvisioningChecker: &types.ManagedObjectReference{Type: "VirtualMachineProvisioningChecker", Value: "ProvChecker"}, + VmCompatibilityChecker: &types.ManagedObjectReference{Type: "VirtualMachineCompatibilityChecker", Value: "CompatChecker"}, + OvfManager: &types.ManagedObjectReference{Type: "OvfManager", Value: "OvfManager"}, + IpPoolManager: &types.ManagedObjectReference{Type: "IpPoolManager", Value: "IpPoolManager"}, + DvSwitchManager: &types.ManagedObjectReference{Type: "DistributedVirtualSwitchManager", Value: "DVSManager"}, + HostProfileManager: &types.ManagedObjectReference{Type: "HostProfileManager", Value: "HostProfileManager"}, + ClusterProfileManager: &types.ManagedObjectReference{Type: "ClusterProfileManager", Value: "ClusterProfileManager"}, + ComplianceManager: &types.ManagedObjectReference{Type: "ProfileComplianceManager", Value: "MoComplianceManager"}, + LocalizationManager: &types.ManagedObjectReference{Type: "LocalizationManager", Value: "LocalizationManager"}, + StorageResourceManager: &types.ManagedObjectReference{Type: "StorageResourceManager", Value: "StorageResourceManager"}, + GuestOperationsManager: &types.ManagedObjectReference{Type: "GuestOperationsManager", Value: "guestOperationsManager"}, + OverheadMemoryManager: &types.ManagedObjectReference{Type: "OverheadMemoryManager", Value: "OverheadMemoryManager"}, + CertificateManager: &types.ManagedObjectReference{Type: "CertificateManager", Value: "certificateManager"}, + IoFilterManager: &types.ManagedObjectReference{Type: "IoFilterManager", Value: "IoFilterManager"}, + VStorageObjectManager: &types.ManagedObjectReference{Type: "VcenterVStorageObjectManager", Value: "VStorageObjectManager"}, + HostSpecManager: &types.ManagedObjectReference{Type: "HostSpecificationManager", Value: "HostSpecificationManager"}, + CryptoManager: &types.ManagedObjectReference{Type: "CryptoManagerKmip", Value: "CryptoManager"}, + HealthUpdateManager: &types.ManagedObjectReference{Type: "HealthUpdateManager", Value: "HealthUpdateManager"}, + FailoverClusterConfigurator: &types.ManagedObjectReference{Type: "FailoverClusterConfigurator", Value: "FailoverClusterConfigurator"}, + FailoverClusterManager: &types.ManagedObjectReference{Type: "FailoverClusterManager", Value: "FailoverClusterManager"}, +} diff --git a/vendor/github.com/vmware/govmomi/simulator/vpx/setting.go b/vendor/github.com/vmware/govmomi/simulator/vpx/setting.go new file mode 100644 index 00000000000..dfbb28b7ab4 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/vpx/setting.go @@ -0,0 +1,60 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vpx + +import "github.com/vmware/govmomi/vim25/types" + +// Setting is captured from VC's ServiceContent.OptionManager.setting +var Setting = []types.BaseOptionValue{ + // This list is currently pruned to include sso options only with sso.enabled set to false + &types.OptionValue{ + Key: "config.vpxd.sso.sts.uri", + Value: "https://127.0.0.1/sts/STSService/vsphere.local", + }, + &types.OptionValue{ + Key: "config.vpxd.sso.solutionUser.privateKey", + Value: "/etc/vmware-vpx/ssl/vcsoluser.key", + }, + &types.OptionValue{ + Key: "config.vpxd.sso.solutionUser.name", + Value: "vpxd-b643d01c-928f-469b-96a5-d571d762a78e@vsphere.local", + }, + &types.OptionValue{ + Key: "config.vpxd.sso.solutionUser.certificate", + Value: "/etc/vmware-vpx/ssl/vcsoluser.crt", + }, + &types.OptionValue{ + Key: "config.vpxd.sso.groupcheck.uri", + Value: "https://127.0.0.1/sso-adminserver/sdk/vsphere.local", + }, + &types.OptionValue{ + Key: "config.vpxd.sso.enabled", + Value: "false", + }, + &types.OptionValue{ + Key: "config.vpxd.sso.default.isGroup", + Value: "false", + }, + &types.OptionValue{ + Key: "config.vpxd.sso.default.admin", + Value: "Administrator@vsphere.local", + }, + &types.OptionValue{ + Key: "config.vpxd.sso.admin.uri", + Value: "https://127.0.0.1/sso-adminserver/sdk/vsphere.local", + }, +} diff --git a/vendor/github.com/vmware/govmomi/vim25/methods/BUILD b/vendor/github.com/vmware/govmomi/vim25/methods/BUILD index 0f2bb367b78..c8e9b028a19 100644 --- a/vendor/github.com/vmware/govmomi/vim25/methods/BUILD +++ b/vendor/github.com/vmware/govmomi/vim25/methods/BUILD @@ -3,7 +3,6 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "internal.go", "methods.go", "service_content.go", ], diff --git a/vendor/github.com/vmware/govmomi/vim25/methods/internal.go b/vendor/github.com/vmware/govmomi/vim25/methods/internal.go deleted file mode 100644 index a79adf3a874..00000000000 --- a/vendor/github.com/vmware/govmomi/vim25/methods/internal.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package methods - -import ( - "context" - - "github.com/vmware/govmomi/vim25/soap" - "github.com/vmware/govmomi/vim25/types" -) - -type RetrieveDynamicTypeManagerBody struct { - Req *types.RetrieveDynamicTypeManager `xml:"urn:vim25 RetrieveDynamicTypeManager"` - Res *types.RetrieveDynamicTypeManagerResponse `xml:"urn:vim25 RetrieveDynamicTypeManagerResponse"` - Fault_ *soap.Fault -} - -func (b *RetrieveDynamicTypeManagerBody) Fault() *soap.Fault { return b.Fault_ } - -func RetrieveDynamicTypeManager(ctx context.Context, r soap.RoundTripper, req *types.RetrieveDynamicTypeManager) (*types.RetrieveDynamicTypeManagerResponse, error) { - var reqBody, resBody RetrieveDynamicTypeManagerBody - - reqBody.Req = req - - if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { - return nil, err - } - - return resBody.Res, nil -} - -type RetrieveManagedMethodExecuterBody struct { - Req *types.RetrieveManagedMethodExecuter `xml:"urn:vim25 RetrieveManagedMethodExecuter"` - Res *types.RetrieveManagedMethodExecuterResponse `xml:"urn:vim25 RetrieveManagedMethodExecuterResponse"` - Fault_ *soap.Fault -} - -func (b *RetrieveManagedMethodExecuterBody) Fault() *soap.Fault { return b.Fault_ } - -func RetrieveManagedMethodExecuter(ctx context.Context, r soap.RoundTripper, req *types.RetrieveManagedMethodExecuter) (*types.RetrieveManagedMethodExecuterResponse, error) { - var reqBody, resBody RetrieveManagedMethodExecuterBody - - reqBody.Req = req - - if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { - return nil, err - } - - return resBody.Res, nil -} - -type DynamicTypeMgrQueryMoInstancesBody struct { - Req *types.DynamicTypeMgrQueryMoInstances `xml:"urn:vim25 DynamicTypeMgrQueryMoInstances"` - Res *types.DynamicTypeMgrQueryMoInstancesResponse `xml:"urn:vim25 DynamicTypeMgrQueryMoInstancesResponse"` - Fault_ *soap.Fault -} - -func (b *DynamicTypeMgrQueryMoInstancesBody) Fault() *soap.Fault { return b.Fault_ } - -func DynamicTypeMgrQueryMoInstances(ctx context.Context, r soap.RoundTripper, req *types.DynamicTypeMgrQueryMoInstances) (*types.DynamicTypeMgrQueryMoInstancesResponse, error) { - var reqBody, resBody DynamicTypeMgrQueryMoInstancesBody - - reqBody.Req = req - - if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { - return nil, err - } - - return resBody.Res, nil -} - -type DynamicTypeMgrQueryTypeInfoBody struct { - Req *types.DynamicTypeMgrQueryTypeInfo `xml:"urn:vim25 DynamicTypeMgrQueryTypeInfo"` - Res *types.DynamicTypeMgrQueryTypeInfoResponse `xml:"urn:vim25 DynamicTypeMgrQueryTypeInfoResponse"` - Fault_ *soap.Fault -} - -func (b *DynamicTypeMgrQueryTypeInfoBody) Fault() *soap.Fault { return b.Fault_ } - -func DynamicTypeMgrQueryTypeInfo(ctx context.Context, r soap.RoundTripper, req *types.DynamicTypeMgrQueryTypeInfo) (*types.DynamicTypeMgrQueryTypeInfoResponse, error) { - var reqBody, resBody DynamicTypeMgrQueryTypeInfoBody - - reqBody.Req = req - - if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { - return nil, err - } - - return resBody.Res, nil -} - -type ExecuteSoapBody struct { - Req *types.ExecuteSoap `xml:"urn:vim25 ExecuteSoap"` - Res *types.ExecuteSoapResponse `xml:"urn:vim25 ExecuteSoapResponse"` - Fault_ *soap.Fault -} - -func (b *ExecuteSoapBody) Fault() *soap.Fault { return b.Fault_ } - -func ExecuteSoap(ctx context.Context, r soap.RoundTripper, req *types.ExecuteSoap) (*types.ExecuteSoapResponse, error) { - var reqBody, resBody ExecuteSoapBody - - reqBody.Req = req - - if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { - return nil, err - } - - return resBody.Res, nil -} diff --git a/vendor/github.com/vmware/govmomi/vim25/soap/client.go b/vendor/github.com/vmware/govmomi/vim25/soap/client.go index 9ac3cc4d6d9..4c05a6c6297 100644 --- a/vendor/github.com/vmware/govmomi/vim25/soap/client.go +++ b/vendor/github.com/vmware/govmomi/vim25/soap/client.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2017 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -60,6 +60,7 @@ const ( type header struct { Cookie string `xml:"vcSessionCookie,omitempty"` + ID string `xml:"operationID,omitempty"` } type Client struct { @@ -78,7 +79,7 @@ type Client struct { Version string // Vim version UserAgent string - header *header + cookie string } var schemeMatch = regexp.MustCompile(`^\w+://`) @@ -168,10 +169,7 @@ func (c *Client) NewServiceClient(path string, namespace string) *Client { // Set SOAP Header cookie for _, cookie := range client.Jar.Cookies(u) { if cookie.Name == "vmware_soap_session" { - client.header = &header{ - Cookie: cookie.Value, - } - + client.cookie = cookie.Value break } } @@ -433,7 +431,15 @@ func (c *Client) RoundTrip(ctx context.Context, reqBody, resBody HasFault) error reqEnv := Envelope{Body: reqBody} resEnv := Envelope{Body: resBody} - reqEnv.Header = c.header + h := &header{ + Cookie: c.cookie, + } + + if id, ok := ctx.Value(types.ID{}).(string); ok { + h.ID = id + } + + reqEnv.Header = h // Create debugging context for this round trip d := c.d.newRoundTrip() @@ -614,6 +620,7 @@ type Download struct { Headers map[string]string Ticket *http.Cookie Progress progress.Sinker + Writer io.Writer } var DefaultDownload = Download{ @@ -655,7 +662,46 @@ func (c *Client) Download(u *url.URL, param *Download) (io.ReadCloser, int64, er return nil, 0, err } - return res.Body, res.ContentLength, nil + r := res.Body + + return r, res.ContentLength, nil +} + +func (c *Client) WriteFile(file string, src io.Reader, size int64, s progress.Sinker, w io.Writer) error { + var err error + + r := src + + fh, err := os.Create(file) + if err != nil { + return err + } + + if s != nil { + pr := progress.NewReader(s, src, size) + src = pr + + // Mark progress reader as done when returning from this function. + defer func() { + pr.Done(err) + }() + } + + if w == nil { + w = fh + } else { + w = io.MultiWriter(w, fh) + } + + _, err = io.Copy(w, r) + + cerr := fh.Close() + + if err == nil { + err = cerr + } + + return err } // DownloadFile GETs the given URL to a local file @@ -669,37 +715,6 @@ func (c *Client) DownloadFile(file string, u *url.URL, param *Download) error { if err != nil { return err } - defer rc.Close() - var r io.Reader = rc - - fh, err := os.Create(file) - if err != nil { - return err - } - defer fh.Close() - - if param.Progress != nil { - pr := progress.NewReader(param.Progress, r, contentLength) - r = pr - - // Mark progress reader as done when returning from this function. - defer func() { - pr.Done(err) - }() - } - - _, err = io.Copy(fh, r) - if err != nil { - return err - } - - // Assign error before returning so that it gets picked up by the deferred - // function marking the progress reader as done. - err = fh.Close() - if err != nil { - return err - } - - return nil + return c.WriteFile(file, rc, contentLength, param.Progress, param.Writer) } diff --git a/vendor/github.com/vmware/govmomi/vim25/types/BUILD b/vendor/github.com/vmware/govmomi/vim25/types/BUILD index 2f45f97c702..a3208849389 100644 --- a/vendor/github.com/vmware/govmomi/vim25/types/BUILD +++ b/vendor/github.com/vmware/govmomi/vim25/types/BUILD @@ -8,7 +8,6 @@ go_library( "fault.go", "helpers.go", "if.go", - "internal.go", "registry.go", "types.go", ], diff --git a/vendor/github.com/vmware/govmomi/vim25/types/helpers.go b/vendor/github.com/vmware/govmomi/vim25/types/helpers.go index 2364ed4213c..dd5f049fa80 100644 --- a/vendor/github.com/vmware/govmomi/vim25/types/helpers.go +++ b/vendor/github.com/vmware/govmomi/vim25/types/helpers.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2015 VMware, Inc. All Rights Reserved. +Copyright (c) 2015-2017 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,12 +16,27 @@ limitations under the License. package types -import "strings" +import ( + "strings" + "time" +) func NewBool(v bool) *bool { return &v } +func NewInt32(v int32) *int32 { + return &v +} + +func NewInt64(v int64) *int64 { + return &v +} + +func NewTime(v time.Time) *time.Time { + return &v +} + func NewReference(r ManagedObjectReference) *ManagedObjectReference { return &r } @@ -50,3 +65,24 @@ func (r *ManagedObjectReference) FromString(o string) bool { func (c *PerfCounterInfo) Name() string { return c.GroupInfo.GetElementDescription().Key + "." + c.NameInfo.GetElementDescription().Key + "." + string(c.RollupType) } + +func defaultResourceAllocationInfo() ResourceAllocationInfo { + return ResourceAllocationInfo{ + Reservation: NewInt64(0), + ExpandableReservation: NewBool(true), + Limit: NewInt64(-1), + Shares: &SharesInfo{ + Level: SharesLevelNormal, + }, + } +} + +// DefaultResourceConfigSpec returns a ResourceConfigSpec populated with the same default field values as vCenter. +// Note that the wsdl marks these fields as optional, but they are required to be set when creating a resource pool. +// They are only optional when updating a resource pool. +func DefaultResourceConfigSpec() ResourceConfigSpec { + return ResourceConfigSpec{ + CpuAllocation: defaultResourceAllocationInfo(), + MemoryAllocation: defaultResourceAllocationInfo(), + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/if.go b/vendor/github.com/vmware/govmomi/vim25/types/if.go index dbf594cfc0a..5b93cb433f1 100644 --- a/vendor/github.com/vmware/govmomi/vim25/types/if.go +++ b/vendor/github.com/vmware/govmomi/vim25/types/if.go @@ -2360,16 +2360,6 @@ func init() { t["BaseReplicationVmFault"] = reflect.TypeOf((*ReplicationVmFault)(nil)).Elem() } -func (b *ResourceAllocationInfo) GetResourceAllocationInfo() *ResourceAllocationInfo { return b } - -type BaseResourceAllocationInfo interface { - GetResourceAllocationInfo() *ResourceAllocationInfo -} - -func init() { - t["BaseResourceAllocationInfo"] = reflect.TypeOf((*ResourceAllocationInfo)(nil)).Elem() -} - func (b *ResourceInUse) GetResourceInUse() *ResourceInUse { return b } type BaseResourceInUse interface { diff --git a/vendor/github.com/vmware/govmomi/vim25/types/internal.go b/vendor/github.com/vmware/govmomi/vim25/types/internal.go deleted file mode 100644 index 0c2693499b6..00000000000 --- a/vendor/github.com/vmware/govmomi/vim25/types/internal.go +++ /dev/null @@ -1,266 +0,0 @@ -/* -Copyright (c) 2014 VMware, Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import "reflect" - -type DynamicTypeMgrQueryMoInstances struct { - This ManagedObjectReference `xml:"_this"` - FilterSpec BaseDynamicTypeMgrFilterSpec `xml:"filterSpec,omitempty,typeattr"` -} - -type DynamicTypeMgrQueryMoInstancesResponse struct { - Returnval []DynamicTypeMgrMoInstance `xml:"urn:vim25 returnval"` -} - -type DynamicTypeEnumTypeInfo struct { - DynamicData - - Name string `xml:"name"` - WsdlName string `xml:"wsdlName"` - Version string `xml:"version"` - Value []string `xml:"value,omitempty"` - Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` -} - -func init() { - t["DynamicTypeEnumTypeInfo"] = reflect.TypeOf((*DynamicTypeEnumTypeInfo)(nil)).Elem() -} - -type DynamicTypeMgrAllTypeInfo struct { - DynamicData - - ManagedTypeInfo []DynamicTypeMgrManagedTypeInfo `xml:"managedTypeInfo,omitempty"` - EnumTypeInfo []DynamicTypeEnumTypeInfo `xml:"enumTypeInfo,omitempty"` - DataTypeInfo []DynamicTypeMgrDataTypeInfo `xml:"dataTypeInfo,omitempty"` -} - -func init() { - t["DynamicTypeMgrAllTypeInfo"] = reflect.TypeOf((*DynamicTypeMgrAllTypeInfo)(nil)).Elem() -} - -type DynamicTypeMgrAnnotation struct { - DynamicData - - Name string `xml:"name"` - Parameter []string `xml:"parameter,omitempty"` -} - -func init() { - t["DynamicTypeMgrAnnotation"] = reflect.TypeOf((*DynamicTypeMgrAnnotation)(nil)).Elem() -} - -type DynamicTypeMgrDataTypeInfo struct { - DynamicData - - Name string `xml:"name"` - WsdlName string `xml:"wsdlName"` - Version string `xml:"version"` - Base []string `xml:"base,omitempty"` - Property []DynamicTypeMgrPropertyTypeInfo `xml:"property,omitempty"` - Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` -} - -func init() { - t["DynamicTypeMgrDataTypeInfo"] = reflect.TypeOf((*DynamicTypeMgrDataTypeInfo)(nil)).Elem() -} - -func (b *DynamicTypeMgrFilterSpec) GetDynamicTypeMgrFilterSpec() *DynamicTypeMgrFilterSpec { return b } - -type BaseDynamicTypeMgrFilterSpec interface { - GetDynamicTypeMgrFilterSpec() *DynamicTypeMgrFilterSpec -} - -type DynamicTypeMgrFilterSpec struct { - DynamicData -} - -func init() { - t["DynamicTypeMgrFilterSpec"] = reflect.TypeOf((*DynamicTypeMgrFilterSpec)(nil)).Elem() -} - -type DynamicTypeMgrManagedTypeInfo struct { - DynamicData - - Name string `xml:"name"` - WsdlName string `xml:"wsdlName"` - Version string `xml:"version"` - Base []string `xml:"base,omitempty"` - Property []DynamicTypeMgrPropertyTypeInfo `xml:"property,omitempty"` - Method []DynamicTypeMgrMethodTypeInfo `xml:"method,omitempty"` - Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` -} - -func init() { - t["DynamicTypeMgrManagedTypeInfo"] = reflect.TypeOf((*DynamicTypeMgrManagedTypeInfo)(nil)).Elem() -} - -type DynamicTypeMgrMethodTypeInfo struct { - DynamicData - - Name string `xml:"name"` - WsdlName string `xml:"wsdlName"` - Version string `xml:"version"` - ParamTypeInfo []DynamicTypeMgrParamTypeInfo `xml:"paramTypeInfo,omitempty"` - ReturnTypeInfo *DynamicTypeMgrParamTypeInfo `xml:"returnTypeInfo,omitempty"` - Fault []string `xml:"fault,omitempty"` - PrivId string `xml:"privId,omitempty"` - Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` -} - -func init() { - t["DynamicTypeMgrMethodTypeInfo"] = reflect.TypeOf((*DynamicTypeMgrMethodTypeInfo)(nil)).Elem() -} - -type DynamicTypeMgrMoFilterSpec struct { - DynamicTypeMgrFilterSpec - - Id string `xml:"id,omitempty"` - TypeSubstr string `xml:"typeSubstr,omitempty"` -} - -func init() { - t["DynamicTypeMgrMoFilterSpec"] = reflect.TypeOf((*DynamicTypeMgrMoFilterSpec)(nil)).Elem() -} - -type DynamicTypeMgrMoInstance struct { - DynamicData - - Id string `xml:"id"` - MoType string `xml:"moType"` -} - -func init() { - t["DynamicTypeMgrMoInstance"] = reflect.TypeOf((*DynamicTypeMgrMoInstance)(nil)).Elem() -} - -type DynamicTypeMgrParamTypeInfo struct { - DynamicData - - Name string `xml:"name"` - Version string `xml:"version"` - Type string `xml:"type"` - PrivId string `xml:"privId,omitempty"` - Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` -} - -func init() { - t["DynamicTypeMgrParamTypeInfo"] = reflect.TypeOf((*DynamicTypeMgrParamTypeInfo)(nil)).Elem() -} - -type DynamicTypeMgrPropertyTypeInfo struct { - DynamicData - - Name string `xml:"name"` - Version string `xml:"version"` - Type string `xml:"type"` - PrivId string `xml:"privId,omitempty"` - MsgIdFormat string `xml:"msgIdFormat,omitempty"` - Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` -} - -type DynamicTypeMgrQueryTypeInfo struct { - This ManagedObjectReference `xml:"_this"` - FilterSpec BaseDynamicTypeMgrFilterSpec `xml:"filterSpec,omitempty,typeattr"` -} - -type DynamicTypeMgrQueryTypeInfoResponse struct { - Returnval DynamicTypeMgrAllTypeInfo `xml:"urn:vim25 returnval"` -} - -func init() { - t["DynamicTypeMgrPropertyTypeInfo"] = reflect.TypeOf((*DynamicTypeMgrPropertyTypeInfo)(nil)).Elem() -} - -type DynamicTypeMgrTypeFilterSpec struct { - DynamicTypeMgrFilterSpec - - TypeSubstr string `xml:"typeSubstr,omitempty"` -} - -func init() { - t["DynamicTypeMgrTypeFilterSpec"] = reflect.TypeOf((*DynamicTypeMgrTypeFilterSpec)(nil)).Elem() -} - -type ReflectManagedMethodExecuterSoapArgument struct { - DynamicData - - Name string `xml:"name"` - Val string `xml:"val"` -} - -func init() { - t["ReflectManagedMethodExecuterSoapArgument"] = reflect.TypeOf((*ReflectManagedMethodExecuterSoapArgument)(nil)).Elem() -} - -type ReflectManagedMethodExecuterSoapFault struct { - DynamicData - - FaultMsg string `xml:"faultMsg"` - FaultDetail string `xml:"faultDetail,omitempty"` -} - -func init() { - t["ReflectManagedMethodExecuterSoapFault"] = reflect.TypeOf((*ReflectManagedMethodExecuterSoapFault)(nil)).Elem() -} - -type ReflectManagedMethodExecuterSoapResult struct { - DynamicData - - Response string `xml:"response,omitempty"` - Fault *ReflectManagedMethodExecuterSoapFault `xml:"fault,omitempty"` -} - -type RetrieveDynamicTypeManager struct { - This ManagedObjectReference `xml:"_this"` -} - -type RetrieveDynamicTypeManagerResponse struct { - Returnval *InternalDynamicTypeManager `xml:"urn:vim25 returnval"` -} - -type RetrieveManagedMethodExecuter struct { - This ManagedObjectReference `xml:"_this"` -} - -func init() { - t["RetrieveManagedMethodExecuter"] = reflect.TypeOf((*RetrieveManagedMethodExecuter)(nil)).Elem() -} - -type RetrieveManagedMethodExecuterResponse struct { - Returnval *ReflectManagedMethodExecuter `xml:"urn:vim25 returnval"` -} - -type InternalDynamicTypeManager struct { - ManagedObjectReference -} - -type ReflectManagedMethodExecuter struct { - ManagedObjectReference -} - -type ExecuteSoap struct { - This ManagedObjectReference `xml:"_this"` - Moid string `xml:"moid"` - Version string `xml:"version"` - Method string `xml:"method"` - Argument []ReflectManagedMethodExecuterSoapArgument `xml:"argument,omitempty"` -} - -type ExecuteSoapResponse struct { - Returnval *ReflectManagedMethodExecuterSoapResult `xml:"urn:vim25 returnval"` -} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/types.go b/vendor/github.com/vmware/govmomi/vim25/types/types.go index fa62c01815d..50cad1b6051 100644 --- a/vendor/github.com/vmware/govmomi/vim25/types/types.go +++ b/vendor/github.com/vmware/govmomi/vim25/types/types.go @@ -11331,7 +11331,7 @@ func init() { type DVSNetworkResourcePoolAllocationInfo struct { DynamicData - Limit int64 `xml:"limit,omitempty"` + Limit *int64 `xml:"limit"` Shares *SharesInfo `xml:"shares,omitempty"` PriorityTag int32 `xml:"priorityTag,omitempty"` } @@ -14165,9 +14165,9 @@ func init() { type DvsHostInfrastructureTrafficResourceAllocation struct { DynamicData - Limit int64 `xml:"limit,omitempty"` + Limit *int64 `xml:"limit"` Shares *SharesInfo `xml:"shares,omitempty"` - Reservation int64 `xml:"reservation,omitempty"` + Reservation *int64 `xml:"reservation"` } func init() { @@ -14846,7 +14846,7 @@ type DvsVnicAllocatedResource struct { Vm ManagedObjectReference `xml:"vm"` VnicKey string `xml:"vnicKey"` - Reservation int64 `xml:"reservation,omitempty"` + Reservation *int64 `xml:"reservation"` } func init() { @@ -18341,9 +18341,9 @@ func init() { type GuestPosixFileAttributes struct { GuestFileAttributes - OwnerId int32 `xml:"ownerId,omitempty"` - GroupId int32 `xml:"groupId,omitempty"` - Permissions int64 `xml:"permissions,omitempty"` + OwnerId *int32 `xml:"ownerId"` + GroupId *int32 `xml:"groupId"` + Permissions int64 `xml:"permissions,omitempty"` } func init() { @@ -22589,7 +22589,7 @@ type HostPlacedVirtualNicIdentifier struct { Vm ManagedObjectReference `xml:"vm"` VnicKey string `xml:"vnicKey"` - Reservation int32 `xml:"reservation,omitempty"` + Reservation *int32 `xml:"reservation"` } func init() { @@ -27456,7 +27456,7 @@ type LimitExceeded struct { VimFault Property string `xml:"property,omitempty"` - Limit int32 `xml:"limit,omitempty"` + Limit *int32 `xml:"limit"` } func init() { @@ -27624,7 +27624,7 @@ func init() { type ListKeysRequestType struct { This ManagedObjectReference `xml:"_this"` - Limit int32 `xml:"limit,omitempty"` + Limit *int32 `xml:"limit"` } func init() { @@ -27643,7 +27643,7 @@ func init() { type ListKmipServersRequestType struct { This ManagedObjectReference `xml:"_this"` - Limit int32 `xml:"limit,omitempty"` + Limit *int32 `xml:"limit"` } func init() { @@ -28546,7 +28546,7 @@ func init() { type MethodActionArgument struct { DynamicData - Value AnyType `xml:"value,omitempty,typeattr"` + Value AnyType `xml:"value,typeattr"` } func init() { @@ -29452,7 +29452,7 @@ func init() { type NamespaceLimitReached struct { VimFault - Limit int32 `xml:"limit,omitempty"` + Limit *int32 `xml:"limit"` } func init() { @@ -31074,7 +31074,7 @@ type OptionValue struct { DynamicData Key string `xml:"key"` - Value AnyType `xml:"value,omitempty,typeattr"` + Value AnyType `xml:"value,typeattr"` } func init() { @@ -34231,7 +34231,7 @@ type PropertyChange struct { Name string `xml:"name"` Op PropertyChangeOp `xml:"op"` - Val AnyType `xml:"val,omitempty,typeattr"` + Val AnyType `xml:"val,typeattr"` } func init() { @@ -36332,7 +36332,7 @@ func init() { type QueryVsanObjectUuidsByFilterRequestType struct { This ManagedObjectReference `xml:"_this"` Uuids []string `xml:"uuids,omitempty"` - Limit int32 `xml:"limit,omitempty"` + Limit *int32 `xml:"limit"` Version int32 `xml:"version,omitempty"` } @@ -39065,11 +39065,11 @@ type ResolveMultipleUnresolvedVmfsVolumesResponse struct { type ResourceAllocationInfo struct { DynamicData - Reservation int64 `xml:"reservation,omitempty"` + Reservation *int64 `xml:"reservation"` ExpandableReservation *bool `xml:"expandableReservation"` - Limit int64 `xml:"limit,omitempty"` + Limit *int64 `xml:"limit"` Shares *SharesInfo `xml:"shares,omitempty"` - OverheadLimit int64 `xml:"overheadLimit,omitempty"` + OverheadLimit *int64 `xml:"overheadLimit"` } func init() { @@ -39100,11 +39100,11 @@ func init() { type ResourceConfigSpec struct { DynamicData - Entity *ManagedObjectReference `xml:"entity,omitempty"` - ChangeVersion string `xml:"changeVersion,omitempty"` - LastModified *time.Time `xml:"lastModified"` - CpuAllocation BaseResourceAllocationInfo `xml:"cpuAllocation,typeattr"` - MemoryAllocation BaseResourceAllocationInfo `xml:"memoryAllocation,typeattr"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` + ChangeVersion string `xml:"changeVersion,omitempty"` + LastModified *time.Time `xml:"lastModified"` + CpuAllocation ResourceAllocationInfo `xml:"cpuAllocation"` + MemoryAllocation ResourceAllocationInfo `xml:"memoryAllocation"` } func init() { @@ -42322,9 +42322,9 @@ func init() { type StorageIOAllocationInfo struct { DynamicData - Limit int64 `xml:"limit,omitempty"` + Limit *int64 `xml:"limit"` Shares *SharesInfo `xml:"shares,omitempty"` - Reservation int32 `xml:"reservation,omitempty"` + Reservation *int32 `xml:"reservation"` } func init() { @@ -47957,9 +47957,9 @@ func init() { type VirtualEthernetCardResourceAllocation struct { DynamicData - Reservation int64 `xml:"reservation,omitempty"` + Reservation *int64 `xml:"reservation"` Share SharesInfo `xml:"share"` - Limit int64 `xml:"limit,omitempty"` + Limit *int64 `xml:"limit"` } func init() { @@ -48356,8 +48356,8 @@ type VirtualMachineConfigInfo struct { ConsolePreferences *VirtualMachineConsolePreferences `xml:"consolePreferences,omitempty"` DefaultPowerOps VirtualMachineDefaultPowerOpInfo `xml:"defaultPowerOps"` Hardware VirtualHardware `xml:"hardware"` - CpuAllocation BaseResourceAllocationInfo `xml:"cpuAllocation,omitempty,typeattr"` - MemoryAllocation BaseResourceAllocationInfo `xml:"memoryAllocation,omitempty,typeattr"` + CpuAllocation *ResourceAllocationInfo `xml:"cpuAllocation,omitempty"` + MemoryAllocation *ResourceAllocationInfo `xml:"memoryAllocation,omitempty"` LatencySensitivity *LatencySensitivity `xml:"latencySensitivity,omitempty"` MemoryHotAddEnabled *bool `xml:"memoryHotAddEnabled"` CpuHotAddEnabled *bool `xml:"cpuHotAddEnabled"` @@ -48493,8 +48493,8 @@ type VirtualMachineConfigSpec struct { VirtualICH7MPresent *bool `xml:"virtualICH7MPresent"` VirtualSMCPresent *bool `xml:"virtualSMCPresent"` DeviceChange []BaseVirtualDeviceConfigSpec `xml:"deviceChange,omitempty,typeattr"` - CpuAllocation BaseResourceAllocationInfo `xml:"cpuAllocation,omitempty,typeattr"` - MemoryAllocation BaseResourceAllocationInfo `xml:"memoryAllocation,omitempty,typeattr"` + CpuAllocation *ResourceAllocationInfo `xml:"cpuAllocation,omitempty"` + MemoryAllocation *ResourceAllocationInfo `xml:"memoryAllocation,omitempty"` LatencySensitivity *LatencySensitivity `xml:"latencySensitivity,omitempty"` CpuAffinity *VirtualMachineAffinityInfo `xml:"cpuAffinity,omitempty"` MemoryAffinity *VirtualMachineAffinityInfo `xml:"memoryAffinity,omitempty"` @@ -53154,8 +53154,8 @@ type WaitForUpdatesResponse struct { type WaitOptions struct { DynamicData - MaxWaitSeconds int32 `xml:"maxWaitSeconds,omitempty"` - MaxObjectUpdates int32 `xml:"maxObjectUpdates,omitempty"` + MaxWaitSeconds *int32 `xml:"maxWaitSeconds"` + MaxObjectUpdates int32 `xml:"maxObjectUpdates,omitempty"` } func init() { From a8180d6fe03e52be8799e7c2c6e44ec6376e30a6 Mon Sep 17 00:00:00 2001 From: Doug MacEachern Date: Thu, 16 Nov 2017 20:14:46 -0800 Subject: [PATCH 055/264] Add vSphere Cloud Provider simulator based tests Initial set of vcsim based tests. --- .../vsphere/vclib/datacenter_test.go | 174 ++++++++++++++++++ 1 file changed, 174 insertions(+) create mode 100644 pkg/cloudprovider/providers/vsphere/vclib/datacenter_test.go diff --git a/pkg/cloudprovider/providers/vsphere/vclib/datacenter_test.go b/pkg/cloudprovider/providers/vsphere/vclib/datacenter_test.go new file mode 100644 index 00000000000..522ed2b7fc2 --- /dev/null +++ b/pkg/cloudprovider/providers/vsphere/vclib/datacenter_test.go @@ -0,0 +1,174 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vclib + +import ( + "context" + "testing" + + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/simulator" +) + +func TestDatacenter(t *testing.T) { + ctx := context.Background() + + // vCenter model + initial set of objects (cluster, hosts, VMs, network, datastore, etc) + model := simulator.VPX() + + defer model.Remove() + err := model.Create() + if err != nil { + t.Fatal(err) + } + + s := model.Service.NewServer() + defer s.Close() + + avm := simulator.Map.Any("VirtualMachine").(*simulator.VirtualMachine) + + c, err := govmomi.NewClient(ctx, s.URL, true) + if err != nil { + t.Fatal(err) + } + + vc := &VSphereConnection{GoVmomiClient: c} + + _, err = GetDatacenter(ctx, vc, "enoent") + if err == nil { + t.Error("expected error") + } + + dc, err := GetDatacenter(ctx, vc, "DC0") + if err != nil { + t.Error(err) + } + + _, err = dc.GetVMByUUID(ctx, "enoent") + if err == nil { + t.Error("expected error") + } + + _, err = dc.GetVMByUUID(ctx, avm.Summary.Config.Uuid) + if err != nil { + t.Error(err) + } + + _, err = dc.GetVMByPath(ctx, "enoent") + if err == nil { + t.Error("expected error") + } + + vm, err := dc.GetVMByPath(ctx, "/DC0/vm/"+avm.Name) + if err != nil { + t.Error(err) + } + + _, err = dc.GetDatastoreByPath(ctx, "enoent") // invalid format + if err == nil { + t.Error("expected error") + } + + _, err = dc.GetDatastoreByPath(ctx, "[enoent] no/no.vmx") + if err == nil { + t.Error("expected error") + } + + _, err = dc.GetDatastoreByPath(ctx, avm.Summary.Config.VmPathName) + if err != nil { + t.Error(err) + } + + _, err = dc.GetDatastoreByName(ctx, "enoent") + if err == nil { + t.Error("expected error") + } + + ds, err := dc.GetDatastoreByName(ctx, "LocalDS_0") + if err != nil { + t.Error(err) + } + + _, err = dc.GetFolderByPath(ctx, "enoent") + if err == nil { + t.Error("expected error") + } + + _, err = dc.GetFolderByPath(ctx, "/DC0/vm") + if err != nil { + t.Error(err) + } + + _, err = dc.GetVMMoList(ctx, nil, nil) + if err == nil { + t.Error("expected error") + } + + _, err = dc.GetVMMoList(ctx, []*VirtualMachine{vm}, []string{"enoent"}) // invalid property + if err == nil { + t.Error("expected error") + } + + _, err = dc.GetVMMoList(ctx, []*VirtualMachine{vm}, []string{"summary"}) + if err != nil { + t.Error(err) + } + + vmdk := ds.Path(avm.Name + "/disk1.vmdk") + + _, err = dc.GetVirtualDiskPage83Data(ctx, vmdk+"-enoent") + if err == nil { + t.Error("expected error") + } + + _, err = dc.GetVirtualDiskPage83Data(ctx, vmdk) + if err != nil { + t.Error(err) + } + + _, err = dc.GetDatastoreMoList(ctx, nil, nil) + if err == nil { + t.Error("expected error") + } + + _, err = dc.GetDatastoreMoList(ctx, []*Datastore{ds}, []string{"enoent"}) // invalid property + if err == nil { + t.Error("expected error") + } + + _, err = dc.GetDatastoreMoList(ctx, []*Datastore{ds}, []string{DatastoreInfoProperty}) + if err != nil { + t.Error(err) + } + + nodeVolumes := map[string][]string{ + avm.Name: {"enoent", vmdk}, + } + + attached, err := dc.CheckDisksAttached(ctx, nodeVolumes) + if err != nil { + t.Error(err) + } + + if attached[avm.Name]["enoent"] { + t.Error("should not be attached") + } + + if !attached[avm.Name][vmdk] { + t.Errorf("%s should be attached", vmdk) + } +} From ea085e0a32a6b723e5c565e60d8941b5a760bb68 Mon Sep 17 00:00:00 2001 From: Eric Chiang Date: Fri, 15 Dec 2017 15:02:31 -0800 Subject: [PATCH 056/264] client-go: remove import of github.com/gregjones/httpcache --- pkg/kubectl/cmd/util/BUILD | 1 + pkg/kubectl/cmd/util/factory_client_access.go | 12 ++- pkg/kubectl/util/BUILD | 1 + pkg/kubectl/util/transport/BUILD | 34 +++++++ pkg/kubectl/util/transport/round_tripper.go | 51 ++++++++++ .../util/transport/round_tripper_test.go | 95 +++++++++++++++++++ staging/src/k8s.io/client-go/rest/config.go | 5 - .../src/k8s.io/client-go/rest/config_test.go | 1 - .../src/k8s.io/client-go/rest/transport.go | 1 - staging/src/k8s.io/client-go/transport/BUILD | 3 - .../src/k8s.io/client-go/transport/config.go | 4 - .../client-go/transport/round_trippers.go | 31 ------ .../transport/round_trippers_test.go | 61 ------------ 13 files changed, 193 insertions(+), 107 deletions(-) create mode 100644 pkg/kubectl/util/transport/BUILD create mode 100644 pkg/kubectl/util/transport/round_tripper.go create mode 100644 pkg/kubectl/util/transport/round_tripper_test.go diff --git a/pkg/kubectl/cmd/util/BUILD b/pkg/kubectl/cmd/util/BUILD index 282666a1be4..6541d3953e9 100644 --- a/pkg/kubectl/cmd/util/BUILD +++ b/pkg/kubectl/cmd/util/BUILD @@ -40,6 +40,7 @@ go_library( "//pkg/kubectl/plugins:go_default_library", "//pkg/kubectl/resource:go_default_library", "//pkg/kubectl/scheme:go_default_library", + "//pkg/kubectl/util/transport:go_default_library", "//pkg/kubectl/validation:go_default_library", "//pkg/printers:go_default_library", "//pkg/printers/internalversion:go_default_library", diff --git a/pkg/kubectl/cmd/util/factory_client_access.go b/pkg/kubectl/cmd/util/factory_client_access.go index d03ebdc165c..391aa33a384 100644 --- a/pkg/kubectl/cmd/util/factory_client_access.go +++ b/pkg/kubectl/cmd/util/factory_client_access.go @@ -23,6 +23,7 @@ import ( "flag" "fmt" "io" + "net/http" "os" "path/filepath" "regexp" @@ -59,6 +60,7 @@ import ( "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/resource" + "k8s.io/kubernetes/pkg/kubectl/util/transport" "k8s.io/kubernetes/pkg/printers" printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" ) @@ -109,7 +111,15 @@ func (f *discoveryFactory) DiscoveryClient() (discovery.CachedDiscoveryInterface return nil, err } - cfg.CacheDir = f.cacheDir + if f.cacheDir != "" { + wt := cfg.WrapTransport + cfg.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { + if wt != nil { + rt = wt(rt) + } + return transport.NewCacheRoundTripper(f.cacheDir, rt) + } + } discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg) if err != nil { diff --git a/pkg/kubectl/util/BUILD b/pkg/kubectl/util/BUILD index 96457f550ef..c1c2e305d65 100644 --- a/pkg/kubectl/util/BUILD +++ b/pkg/kubectl/util/BUILD @@ -101,6 +101,7 @@ filegroup( "//pkg/kubectl/util/logs:all-srcs", "//pkg/kubectl/util/slice:all-srcs", "//pkg/kubectl/util/term:all-srcs", + "//pkg/kubectl/util/transport:all-srcs", ], tags = ["automanaged"], visibility = ["//build/visible_to:pkg_kubectl_util_CONSUMERS"], diff --git a/pkg/kubectl/util/transport/BUILD b/pkg/kubectl/util/transport/BUILD new file mode 100644 index 00000000000..ffdf4a2607e --- /dev/null +++ b/pkg/kubectl/util/transport/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["round_tripper.go"], + importpath = "k8s.io/kubernetes/pkg/kubectl/util/transport", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/gregjones/httpcache:go_default_library", + "//vendor/github.com/gregjones/httpcache/diskcache:go_default_library", + "//vendor/github.com/peterbourgon/diskv:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["round_tripper_test.go"], + embed = [":go_default_library"], + importpath = "k8s.io/kubernetes/pkg/kubectl/util/transport", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/kubectl/util/transport/round_tripper.go b/pkg/kubectl/util/transport/round_tripper.go new file mode 100644 index 00000000000..82e3e502e5e --- /dev/null +++ b/pkg/kubectl/util/transport/round_tripper.go @@ -0,0 +1,51 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package transport provides a round tripper capable of caching HTTP responses. +package transport + +import ( + "net/http" + "path/filepath" + + "github.com/gregjones/httpcache" + "github.com/gregjones/httpcache/diskcache" + "github.com/peterbourgon/diskv" +) + +type cacheRoundTripper struct { + rt *httpcache.Transport +} + +// NewCacheRoundTripper creates a roundtripper that reads the ETag on +// response headers and send the If-None-Match header on subsequent +// corresponding requests. +func NewCacheRoundTripper(cacheDir string, rt http.RoundTripper) http.RoundTripper { + d := diskv.New(diskv.Options{ + BasePath: cacheDir, + TempDir: filepath.Join(cacheDir, ".diskv-temp"), + }) + t := httpcache.NewTransport(diskcache.NewWithDiskv(d)) + t.Transport = rt + + return &cacheRoundTripper{rt: t} +} + +func (rt *cacheRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return rt.rt.RoundTrip(req) +} + +func (rt *cacheRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt.Transport } diff --git a/pkg/kubectl/util/transport/round_tripper_test.go b/pkg/kubectl/util/transport/round_tripper_test.go new file mode 100644 index 00000000000..e68e8e37223 --- /dev/null +++ b/pkg/kubectl/util/transport/round_tripper_test.go @@ -0,0 +1,95 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import ( + "bytes" + "io/ioutil" + "net/http" + "net/url" + "os" + "testing" +) + +// copied from k8s.io/client-go/transport/round_trippers_test.go +type testRoundTripper struct { + Request *http.Request + Response *http.Response + Err error +} + +func (rt *testRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + rt.Request = req + return rt.Response, rt.Err +} + +func TestCacheRoundTripper(t *testing.T) { + rt := &testRoundTripper{} + cacheDir, err := ioutil.TempDir("", "cache-rt") + defer os.RemoveAll(cacheDir) + if err != nil { + t.Fatal(err) + } + cache := NewCacheRoundTripper(cacheDir, rt) + + // First call, caches the response + req := &http.Request{ + Method: http.MethodGet, + URL: &url.URL{Host: "localhost"}, + } + rt.Response = &http.Response{ + Header: http.Header{"ETag": []string{`"123456"`}}, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Content"))), + StatusCode: http.StatusOK, + } + resp, err := cache.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + content, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if string(content) != "Content" { + t.Errorf(`Expected Body to be "Content", got %q`, string(content)) + } + + // Second call, returns cached response + req = &http.Request{ + Method: http.MethodGet, + URL: &url.URL{Host: "localhost"}, + } + rt.Response = &http.Response{ + StatusCode: http.StatusNotModified, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Other Content"))), + } + + resp, err = cache.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + + // Read body and make sure we have the initial content + content, err = ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + t.Fatal(err) + } + if string(content) != "Content" { + t.Errorf("Invalid content read from cache %q", string(content)) + } +} diff --git a/staging/src/k8s.io/client-go/rest/config.go b/staging/src/k8s.io/client-go/rest/config.go index 038fee94537..eb006ea101e 100644 --- a/staging/src/k8s.io/client-go/rest/config.go +++ b/staging/src/k8s.io/client-go/rest/config.go @@ -71,10 +71,6 @@ type Config struct { // TODO: demonstrate an OAuth2 compatible client. BearerToken string - // CacheDir is the directory where we'll store HTTP cached responses. - // If set to empty string, no caching mechanism will be used. - CacheDir string - // Impersonate is the configuration that RESTClient will use for impersonation. Impersonate ImpersonationConfig @@ -434,7 +430,6 @@ func CopyConfig(config *Config) *Config { Username: config.Username, Password: config.Password, BearerToken: config.BearerToken, - CacheDir: config.CacheDir, Impersonate: ImpersonationConfig{ Groups: config.Impersonate.Groups, Extra: config.Impersonate.Extra, diff --git a/staging/src/k8s.io/client-go/rest/config_test.go b/staging/src/k8s.io/client-go/rest/config_test.go index 0e86442dbd0..5d4d1a2fe3e 100644 --- a/staging/src/k8s.io/client-go/rest/config_test.go +++ b/staging/src/k8s.io/client-go/rest/config_test.go @@ -267,7 +267,6 @@ func TestAnonymousConfig(t *testing.T) { expected.BearerToken = "" expected.Username = "" expected.Password = "" - expected.CacheDir = "" expected.AuthProvider = nil expected.AuthConfigPersister = nil expected.TLSClientConfig.CertData = nil diff --git a/staging/src/k8s.io/client-go/rest/transport.go b/staging/src/k8s.io/client-go/rest/transport.go index f59f8dbe278..878c6abf164 100644 --- a/staging/src/k8s.io/client-go/rest/transport.go +++ b/staging/src/k8s.io/client-go/rest/transport.go @@ -89,7 +89,6 @@ func (c *Config) TransportConfig() (*transport.Config, error) { }, Username: c.Username, Password: c.Password, - CacheDir: c.CacheDir, BearerToken: c.BearerToken, Impersonate: transport.ImpersonationConfig{ UserName: c.Impersonate.UserName, diff --git a/staging/src/k8s.io/client-go/transport/BUILD b/staging/src/k8s.io/client-go/transport/BUILD index e5a5a570939..d9c8502faf5 100644 --- a/staging/src/k8s.io/client-go/transport/BUILD +++ b/staging/src/k8s.io/client-go/transport/BUILD @@ -28,9 +28,6 @@ go_library( importpath = "k8s.io/client-go/transport", deps = [ "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/gregjones/httpcache:go_default_library", - "//vendor/github.com/gregjones/httpcache/diskcache:go_default_library", - "//vendor/github.com/peterbourgon/diskv:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/transport/config.go b/staging/src/k8s.io/client-go/transport/config.go index 425f8f87a53..af347dafea8 100644 --- a/staging/src/k8s.io/client-go/transport/config.go +++ b/staging/src/k8s.io/client-go/transport/config.go @@ -37,10 +37,6 @@ type Config struct { // Bearer token for authentication BearerToken string - // CacheDir is the directory where we'll store HTTP cached responses. - // If set to empty string, no caching mechanism will be used. - CacheDir string - // Impersonate is the config that this Config will impersonate using Impersonate ImpersonationConfig diff --git a/staging/src/k8s.io/client-go/transport/round_trippers.go b/staging/src/k8s.io/client-go/transport/round_trippers.go index 2ee605d7be0..c728b18775f 100644 --- a/staging/src/k8s.io/client-go/transport/round_trippers.go +++ b/staging/src/k8s.io/client-go/transport/round_trippers.go @@ -19,14 +19,10 @@ package transport import ( "fmt" "net/http" - "path/filepath" "strings" "time" "github.com/golang/glog" - "github.com/gregjones/httpcache" - "github.com/gregjones/httpcache/diskcache" - "github.com/peterbourgon/diskv" utilnet "k8s.io/apimachinery/pkg/util/net" ) @@ -60,9 +56,6 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip len(config.Impersonate.Extra) > 0 { rt = NewImpersonatingRoundTripper(config.Impersonate, rt) } - if len(config.CacheDir) > 0 { - rt = NewCacheRoundTripper(config.CacheDir, rt) - } return rt, nil } @@ -86,30 +79,6 @@ type requestCanceler interface { CancelRequest(*http.Request) } -type cacheRoundTripper struct { - rt *httpcache.Transport -} - -// NewCacheRoundTripper creates a roundtripper that reads the ETag on -// response headers and send the If-None-Match header on subsequent -// corresponding requests. -func NewCacheRoundTripper(cacheDir string, rt http.RoundTripper) http.RoundTripper { - d := diskv.New(diskv.Options{ - BasePath: cacheDir, - TempDir: filepath.Join(cacheDir, ".diskv-temp"), - }) - t := httpcache.NewTransport(diskcache.NewWithDiskv(d)) - t.Transport = rt - - return &cacheRoundTripper{rt: t} -} - -func (rt *cacheRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - return rt.rt.RoundTrip(req) -} - -func (rt *cacheRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt.Transport } - type authProxyRoundTripper struct { username string groups []string diff --git a/staging/src/k8s.io/client-go/transport/round_trippers_test.go b/staging/src/k8s.io/client-go/transport/round_trippers_test.go index c1e30c3f208..d5ffc6bde30 100644 --- a/staging/src/k8s.io/client-go/transport/round_trippers_test.go +++ b/staging/src/k8s.io/client-go/transport/round_trippers_test.go @@ -17,11 +17,7 @@ limitations under the License. package transport import ( - "bytes" - "io/ioutil" "net/http" - "net/url" - "os" "reflect" "strings" "testing" @@ -220,60 +216,3 @@ func TestAuthProxyRoundTripper(t *testing.T) { } } } - -func TestCacheRoundTripper(t *testing.T) { - rt := &testRoundTripper{} - cacheDir, err := ioutil.TempDir("", "cache-rt") - defer os.RemoveAll(cacheDir) - if err != nil { - t.Fatal(err) - } - cache := NewCacheRoundTripper(cacheDir, rt) - - // First call, caches the response - req := &http.Request{ - Method: http.MethodGet, - URL: &url.URL{Host: "localhost"}, - } - rt.Response = &http.Response{ - Header: http.Header{"ETag": []string{`"123456"`}}, - Body: ioutil.NopCloser(bytes.NewReader([]byte("Content"))), - StatusCode: http.StatusOK, - } - resp, err := cache.RoundTrip(req) - if err != nil { - t.Fatal(err) - } - content, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - if string(content) != "Content" { - t.Errorf(`Expected Body to be "Content", got %q`, string(content)) - } - - // Second call, returns cached response - req = &http.Request{ - Method: http.MethodGet, - URL: &url.URL{Host: "localhost"}, - } - rt.Response = &http.Response{ - StatusCode: http.StatusNotModified, - Body: ioutil.NopCloser(bytes.NewReader([]byte("Other Content"))), - } - - resp, err = cache.RoundTrip(req) - if err != nil { - t.Fatal(err) - } - - // Read body and make sure we have the initial content - content, err = ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - t.Fatal(err) - } - if string(content) != "Content" { - t.Errorf("Invalid content read from cache %q", string(content)) - } -} From dedeb99c97dcd0e5814ba49cf0f82aa7bf23f4ad Mon Sep 17 00:00:00 2001 From: Eric Chiang Date: Mon, 8 Jan 2018 10:00:13 -0800 Subject: [PATCH 057/264] generated: update staging godeps --- .../apiextensions-apiserver/Godeps/Godeps.json | 16 ---------------- staging/src/k8s.io/apiserver/Godeps/Godeps.json | 12 ------------ staging/src/k8s.io/client-go/Godeps/Godeps.json | 16 ---------------- .../k8s.io/kube-aggregator/Godeps/Godeps.json | 16 ---------------- staging/src/k8s.io/metrics/Godeps/Godeps.json | 16 ---------------- .../k8s.io/sample-apiserver/Godeps/Godeps.json | 16 ---------------- .../k8s.io/sample-controller/Godeps/Godeps.json | 16 ---------------- 7 files changed, 108 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json index 2aaf0acc53b..770352cf7d1 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json @@ -190,10 +190,6 @@ "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, - { - "ImportPath": "github.com/google/btree", - "Rev": "7d79101e329e5a3adf994758c578dab82b90c017" - }, { "ImportPath": "github.com/google/gofuzz", "Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c" @@ -210,14 +206,6 @@ "ImportPath": "github.com/googleapis/gnostic/extensions", "Rev": "0c5108395e2debce0d731cf0287ddf7242066aba" }, - { - "ImportPath": "github.com/gregjones/httpcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, - { - "ImportPath": "github.com/gregjones/httpcache/diskcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, { "ImportPath": "github.com/hashicorp/golang-lru", "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" @@ -270,10 +258,6 @@ "ImportPath": "github.com/pborman/uuid", "Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4" }, - { - "ImportPath": "github.com/peterbourgon/diskv", - "Rev": "5f041e8faa004a95c88a202771f4cc3e991971e6" - }, { "ImportPath": "github.com/pmezard/go-difflib/difflib", "Rev": "d8ed2627bdf02c080bf22230dbb337003b7aba2d" diff --git a/staging/src/k8s.io/apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiserver/Godeps/Godeps.json index a8011ef1d06..b1fcb0a32f1 100644 --- a/staging/src/k8s.io/apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiserver/Godeps/Godeps.json @@ -518,14 +518,6 @@ "ImportPath": "github.com/gophercloud/gophercloud/pagination", "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, - { - "ImportPath": "github.com/gregjones/httpcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, - { - "ImportPath": "github.com/gregjones/httpcache/diskcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, { "ImportPath": "github.com/grpc-ecosystem/go-grpc-prometheus", "Rev": "2500245aa6110c562d17020fb31a2c133d737799" @@ -594,10 +586,6 @@ "ImportPath": "github.com/pborman/uuid", "Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4" }, - { - "ImportPath": "github.com/peterbourgon/diskv", - "Rev": "5f041e8faa004a95c88a202771f4cc3e991971e6" - }, { "ImportPath": "github.com/pmezard/go-difflib/difflib", "Rev": "d8ed2627bdf02c080bf22230dbb337003b7aba2d" diff --git a/staging/src/k8s.io/client-go/Godeps/Godeps.json b/staging/src/k8s.io/client-go/Godeps/Godeps.json index c3d9ca80a46..4fae7ddb897 100644 --- a/staging/src/k8s.io/client-go/Godeps/Godeps.json +++ b/staging/src/k8s.io/client-go/Godeps/Godeps.json @@ -150,10 +150,6 @@ "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, - { - "ImportPath": "github.com/google/btree", - "Rev": "7d79101e329e5a3adf994758c578dab82b90c017" - }, { "ImportPath": "github.com/google/gofuzz", "Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c" @@ -198,14 +194,6 @@ "ImportPath": "github.com/gophercloud/gophercloud/pagination", "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, - { - "ImportPath": "github.com/gregjones/httpcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, - { - "ImportPath": "github.com/gregjones/httpcache/diskcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, { "ImportPath": "github.com/hashicorp/golang-lru", "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" @@ -246,10 +234,6 @@ "ImportPath": "github.com/mailru/easyjson/jwriter", "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" }, - { - "ImportPath": "github.com/peterbourgon/diskv", - "Rev": "5f041e8faa004a95c88a202771f4cc3e991971e6" - }, { "ImportPath": "github.com/pmezard/go-difflib/difflib", "Rev": "d8ed2627bdf02c080bf22230dbb337003b7aba2d" diff --git a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json index bac5d82c33e..abddac483d5 100644 --- a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json +++ b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json @@ -170,10 +170,6 @@ "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, - { - "ImportPath": "github.com/google/btree", - "Rev": "7d79101e329e5a3adf994758c578dab82b90c017" - }, { "ImportPath": "github.com/google/gofuzz", "Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c" @@ -190,14 +186,6 @@ "ImportPath": "github.com/googleapis/gnostic/extensions", "Rev": "0c5108395e2debce0d731cf0287ddf7242066aba" }, - { - "ImportPath": "github.com/gregjones/httpcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, - { - "ImportPath": "github.com/gregjones/httpcache/diskcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, { "ImportPath": "github.com/hashicorp/golang-lru", "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" @@ -250,10 +238,6 @@ "ImportPath": "github.com/pborman/uuid", "Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4" }, - { - "ImportPath": "github.com/peterbourgon/diskv", - "Rev": "5f041e8faa004a95c88a202771f4cc3e991971e6" - }, { "ImportPath": "github.com/pmezard/go-difflib/difflib", "Rev": "d8ed2627bdf02c080bf22230dbb337003b7aba2d" diff --git a/staging/src/k8s.io/metrics/Godeps/Godeps.json b/staging/src/k8s.io/metrics/Godeps/Godeps.json index e964a638073..ce359bb79d8 100644 --- a/staging/src/k8s.io/metrics/Godeps/Godeps.json +++ b/staging/src/k8s.io/metrics/Godeps/Godeps.json @@ -74,10 +74,6 @@ "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, - { - "ImportPath": "github.com/google/btree", - "Rev": "7d79101e329e5a3adf994758c578dab82b90c017" - }, { "ImportPath": "github.com/google/gofuzz", "Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c" @@ -94,14 +90,6 @@ "ImportPath": "github.com/googleapis/gnostic/extensions", "Rev": "0c5108395e2debce0d731cf0287ddf7242066aba" }, - { - "ImportPath": "github.com/gregjones/httpcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, - { - "ImportPath": "github.com/gregjones/httpcache/diskcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, { "ImportPath": "github.com/json-iterator/go", "Rev": "13f86432b882000a51c6e610c620974462691a97" @@ -122,10 +110,6 @@ "ImportPath": "github.com/mailru/easyjson/jwriter", "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" }, - { - "ImportPath": "github.com/peterbourgon/diskv", - "Rev": "5f041e8faa004a95c88a202771f4cc3e991971e6" - }, { "ImportPath": "github.com/spf13/pflag", "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" diff --git a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json index 27ea0ca6a47..ce731cb79fd 100644 --- a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json @@ -162,10 +162,6 @@ "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, - { - "ImportPath": "github.com/google/btree", - "Rev": "7d79101e329e5a3adf994758c578dab82b90c017" - }, { "ImportPath": "github.com/google/gofuzz", "Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c" @@ -182,14 +178,6 @@ "ImportPath": "github.com/googleapis/gnostic/extensions", "Rev": "0c5108395e2debce0d731cf0287ddf7242066aba" }, - { - "ImportPath": "github.com/gregjones/httpcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, - { - "ImportPath": "github.com/gregjones/httpcache/diskcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, { "ImportPath": "github.com/hashicorp/golang-lru", "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" @@ -242,10 +230,6 @@ "ImportPath": "github.com/pborman/uuid", "Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4" }, - { - "ImportPath": "github.com/peterbourgon/diskv", - "Rev": "5f041e8faa004a95c88a202771f4cc3e991971e6" - }, { "ImportPath": "github.com/prometheus/client_golang/prometheus", "Rev": "e7e903064f5e9eb5da98208bae10b475d4db0f8c" diff --git a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json index 5c0249ded73..ce34ffc7dfd 100644 --- a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json @@ -82,10 +82,6 @@ "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, - { - "ImportPath": "github.com/google/btree", - "Rev": "7d79101e329e5a3adf994758c578dab82b90c017" - }, { "ImportPath": "github.com/google/gofuzz", "Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c" @@ -102,14 +98,6 @@ "ImportPath": "github.com/googleapis/gnostic/extensions", "Rev": "0c5108395e2debce0d731cf0287ddf7242066aba" }, - { - "ImportPath": "github.com/gregjones/httpcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, - { - "ImportPath": "github.com/gregjones/httpcache/diskcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, { "ImportPath": "github.com/hashicorp/golang-lru", "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" @@ -146,10 +134,6 @@ "ImportPath": "github.com/mailru/easyjson/jwriter", "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" }, - { - "ImportPath": "github.com/peterbourgon/diskv", - "Rev": "5f041e8faa004a95c88a202771f4cc3e991971e6" - }, { "ImportPath": "github.com/spf13/pflag", "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" From f6721480f425bdbfb10b4edc831187f69c63ba9f Mon Sep 17 00:00:00 2001 From: David Ashpole Date: Mon, 8 Jan 2018 10:20:02 -0800 Subject: [PATCH 058/264] enable on-demand metrics for eviction --- pkg/kubelet/eviction/eviction_manager.go | 4 +++- pkg/kubelet/eviction/helpers.go | 3 ++- pkg/kubelet/eviction/helpers_test.go | 2 +- pkg/kubelet/server/server_test.go | 2 +- pkg/kubelet/server/stats/handler.go | 6 ++++-- pkg/kubelet/server/stats/summary.go | 11 ++++++----- pkg/kubelet/server/stats/summary_test.go | 10 +++++----- .../stats/testing/mock_stats_provider.go | 18 +++++++++--------- pkg/kubelet/stats/helper.go | 12 +++++++++--- pkg/kubelet/stats/stats_provider.go | 8 ++++---- pkg/kubelet/stats/stats_provider_test.go | 7 ++++--- 11 files changed, 48 insertions(+), 35 deletions(-) diff --git a/pkg/kubelet/eviction/eviction_manager.go b/pkg/kubelet/eviction/eviction_manager.go index 007c204f7fb..7811f3bea5f 100644 --- a/pkg/kubelet/eviction/eviction_manager.go +++ b/pkg/kubelet/eviction/eviction_manager.go @@ -455,7 +455,9 @@ func (m *managerImpl) reclaimNodeLevelResources(resourceToReclaim v1.ResourceNam // localStorageEviction checks the EmptyDir volume usage for each pod and determine whether it exceeds the specified limit and needs // to be evicted. It also checks every container in the pod, if the container overlay usage exceeds the limit, the pod will be evicted too. func (m *managerImpl) localStorageEviction(pods []*v1.Pod) []*v1.Pod { - summary, err := m.summaryProvider.Get() + // do not update node-level stats as local storage evictions do not utilize them. + forceStatsUpdate := false + summary, err := m.summaryProvider.Get(forceStatsUpdate) if err != nil { glog.Errorf("Could not get summary provider") return nil diff --git a/pkg/kubelet/eviction/helpers.go b/pkg/kubelet/eviction/helpers.go index 3ae9c0306a6..4376c842882 100644 --- a/pkg/kubelet/eviction/helpers.go +++ b/pkg/kubelet/eviction/helpers.go @@ -712,7 +712,8 @@ func (a byEvictionPriority) Less(i, j int) bool { // makeSignalObservations derives observations using the specified summary provider. func makeSignalObservations(summaryProvider stats.SummaryProvider, capacityProvider CapacityProvider, pods []*v1.Pod) (signalObservations, statsFunc, error) { - summary, err := summaryProvider.Get() + updateStats := true + summary, err := summaryProvider.Get(updateStats) if err != nil { return nil, nil, err } diff --git a/pkg/kubelet/eviction/helpers_test.go b/pkg/kubelet/eviction/helpers_test.go index 5a794112051..727e0be14b9 100644 --- a/pkg/kubelet/eviction/helpers_test.go +++ b/pkg/kubelet/eviction/helpers_test.go @@ -920,7 +920,7 @@ type fakeSummaryProvider struct { result *statsapi.Summary } -func (f *fakeSummaryProvider) Get() (*statsapi.Summary, error) { +func (f *fakeSummaryProvider) Get(updateStats bool) (*statsapi.Summary, error) { return f.result, nil } diff --git a/pkg/kubelet/server/server_test.go b/pkg/kubelet/server/server_test.go index c1b068f2318..230b7df43a7 100644 --- a/pkg/kubelet/server/server_test.go +++ b/pkg/kubelet/server/server_test.go @@ -176,7 +176,7 @@ func (fk *fakeKubelet) ListVolumesForPod(podUID types.UID) (map[string]volume.Vo func (_ *fakeKubelet) RootFsStats() (*statsapi.FsStats, error) { return nil, nil } func (_ *fakeKubelet) ListPodStats() ([]statsapi.PodStats, error) { return nil, nil } func (_ *fakeKubelet) ImageFsStats() (*statsapi.FsStats, error) { return nil, nil } -func (_ *fakeKubelet) GetCgroupStats(cgroupName string) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) { +func (_ *fakeKubelet) GetCgroupStats(cgroupName string, updateStats bool) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) { return nil, nil, nil } diff --git a/pkg/kubelet/server/stats/handler.go b/pkg/kubelet/server/stats/handler.go index 969b25f3de1..85d441e9f73 100644 --- a/pkg/kubelet/server/stats/handler.go +++ b/pkg/kubelet/server/stats/handler.go @@ -50,7 +50,7 @@ type StatsProvider interface { // // GetCgroupStats returns the stats and the networking usage of the cgroup // with the specified cgroupName. - GetCgroupStats(cgroupName string) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) + GetCgroupStats(cgroupName string, updateStats bool) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) // RootFsStats returns the stats of the node root filesystem. RootFsStats() (*statsapi.FsStats, error) @@ -183,7 +183,9 @@ func (h *handler) handleStats(request *restful.Request, response *restful.Respon // Handles stats summary requests to /stats/summary func (h *handler) handleSummary(request *restful.Request, response *restful.Response) { - summary, err := h.summaryProvider.Get() + // external calls to the summary API use cached stats + forceStatsUpdate := false + summary, err := h.summaryProvider.Get(forceStatsUpdate) if err != nil { handleError(response, "/stats/summary", err) } else { diff --git a/pkg/kubelet/server/stats/summary.go b/pkg/kubelet/server/stats/summary.go index 3e59d772475..a1ffc22ea49 100644 --- a/pkg/kubelet/server/stats/summary.go +++ b/pkg/kubelet/server/stats/summary.go @@ -25,7 +25,9 @@ import ( ) type SummaryProvider interface { - Get() (*statsapi.Summary, error) + // Get provides a new Summary with the stats from Kubelet, + // and will update some stats if updateStats is true + Get(updateStats bool) (*statsapi.Summary, error) } // summaryProviderImpl implements the SummaryProvider interface. @@ -41,8 +43,7 @@ func NewSummaryProvider(statsProvider StatsProvider) SummaryProvider { return &summaryProviderImpl{statsProvider} } -// Get provides a new Summary with the stats from Kubelet. -func (sp *summaryProviderImpl) Get() (*statsapi.Summary, error) { +func (sp *summaryProviderImpl) Get(updateStats bool) (*statsapi.Summary, error) { // TODO(timstclair): Consider returning a best-effort response if any of // the following errors occur. node, err := sp.provider.GetNode() @@ -50,7 +51,7 @@ func (sp *summaryProviderImpl) Get() (*statsapi.Summary, error) { return nil, fmt.Errorf("failed to get node info: %v", err) } nodeConfig := sp.provider.GetNodeConfig() - rootStats, networkStats, err := sp.provider.GetCgroupStats("/") + rootStats, networkStats, err := sp.provider.GetCgroupStats("/", updateStats) if err != nil { return nil, fmt.Errorf("failed to get root cgroup stats: %v", err) } @@ -87,7 +88,7 @@ func (sp *summaryProviderImpl) Get() (*statsapi.Summary, error) { if name == "" { continue } - s, _, err := sp.provider.GetCgroupStats(name) + s, _, err := sp.provider.GetCgroupStats(name, false) if err != nil { glog.Errorf("Failed to get system container stats for %q: %v", name, err) continue diff --git a/pkg/kubelet/server/stats/summary_test.go b/pkg/kubelet/server/stats/summary_test.go index 422688446f9..49b4ebebba1 100644 --- a/pkg/kubelet/server/stats/summary_test.go +++ b/pkg/kubelet/server/stats/summary_test.go @@ -69,13 +69,13 @@ func TestSummaryProvider(t *testing.T) { On("ListPodStats").Return(podStats, nil). On("ImageFsStats").Return(imageFsStats, nil). On("RootFsStats").Return(rootFsStats, nil). - On("GetCgroupStats", "/").Return(cgroupStatsMap["/"].cs, cgroupStatsMap["/"].ns, nil). - On("GetCgroupStats", "/runtime").Return(cgroupStatsMap["/runtime"].cs, cgroupStatsMap["/runtime"].ns, nil). - On("GetCgroupStats", "/misc").Return(cgroupStatsMap["/misc"].cs, cgroupStatsMap["/misc"].ns, nil). - On("GetCgroupStats", "/kubelet").Return(cgroupStatsMap["/kubelet"].cs, cgroupStatsMap["/kubelet"].ns, nil) + On("GetCgroupStats", "/", true).Return(cgroupStatsMap["/"].cs, cgroupStatsMap["/"].ns, nil). + On("GetCgroupStats", "/runtime", false).Return(cgroupStatsMap["/runtime"].cs, cgroupStatsMap["/runtime"].ns, nil). + On("GetCgroupStats", "/misc", false).Return(cgroupStatsMap["/misc"].cs, cgroupStatsMap["/misc"].ns, nil). + On("GetCgroupStats", "/kubelet", false).Return(cgroupStatsMap["/kubelet"].cs, cgroupStatsMap["/kubelet"].ns, nil) provider := NewSummaryProvider(mockStatsProvider) - summary, err := provider.Get() + summary, err := provider.Get(true) assert.NoError(err) assert.Equal(summary.Node.NodeName, "test-node") diff --git a/pkg/kubelet/server/stats/testing/mock_stats_provider.go b/pkg/kubelet/server/stats/testing/mock_stats_provider.go index befa19b0bf7..0da5d872f30 100644 --- a/pkg/kubelet/server/stats/testing/mock_stats_provider.go +++ b/pkg/kubelet/server/stats/testing/mock_stats_provider.go @@ -33,13 +33,13 @@ type StatsProvider struct { mock.Mock } -// GetCgroupStats provides a mock function with given fields: cgroupName -func (_m *StatsProvider) GetCgroupStats(cgroupName string) (*v1alpha1.ContainerStats, *v1alpha1.NetworkStats, error) { - ret := _m.Called(cgroupName) +// GetCgroupStats provides a mock function with given fields: cgroupName, updateStats +func (_m *StatsProvider) GetCgroupStats(cgroupName string, updateStats bool) (*v1alpha1.ContainerStats, *v1alpha1.NetworkStats, error) { + ret := _m.Called(cgroupName, updateStats) var r0 *v1alpha1.ContainerStats - if rf, ok := ret.Get(0).(func(string) *v1alpha1.ContainerStats); ok { - r0 = rf(cgroupName) + if rf, ok := ret.Get(0).(func(string, bool) *v1alpha1.ContainerStats); ok { + r0 = rf(cgroupName, updateStats) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*v1alpha1.ContainerStats) @@ -47,8 +47,8 @@ func (_m *StatsProvider) GetCgroupStats(cgroupName string) (*v1alpha1.ContainerS } var r1 *v1alpha1.NetworkStats - if rf, ok := ret.Get(1).(func(string) *v1alpha1.NetworkStats); ok { - r1 = rf(cgroupName) + if rf, ok := ret.Get(1).(func(string, bool) *v1alpha1.NetworkStats); ok { + r1 = rf(cgroupName, updateStats) } else { if ret.Get(1) != nil { r1 = ret.Get(1).(*v1alpha1.NetworkStats) @@ -56,8 +56,8 @@ func (_m *StatsProvider) GetCgroupStats(cgroupName string) (*v1alpha1.ContainerS } var r2 error - if rf, ok := ret.Get(2).(func(string) error); ok { - r2 = rf(cgroupName) + if rf, ok := ret.Get(2).(func(string, bool) error); ok { + r2 = rf(cgroupName, updateStats) } else { r2 = ret.Error(2) } diff --git a/pkg/kubelet/stats/helper.go b/pkg/kubelet/stats/helper.go index 6856a8c76da..cee923722a7 100644 --- a/pkg/kubelet/stats/helper.go +++ b/pkg/kubelet/stats/helper.go @@ -238,11 +238,17 @@ func isMemoryUnlimited(v uint64) bool { // getCgroupInfo returns the information of the container with the specified // containerName from cadvisor. -func getCgroupInfo(cadvisor cadvisor.Interface, containerName string) (*cadvisorapiv2.ContainerInfo, error) { +func getCgroupInfo(cadvisor cadvisor.Interface, containerName string, updateStats bool) (*cadvisorapiv2.ContainerInfo, error) { + var maxAge *time.Duration + if updateStats { + age := 0 * time.Second + maxAge = &age + } infoMap, err := cadvisor.ContainerInfoV2(containerName, cadvisorapiv2.RequestOptions{ IdType: cadvisorapiv2.TypeName, Count: 2, // 2 samples are needed to compute "instantaneous" CPU Recursive: false, + MaxAge: maxAge, }) if err != nil { return nil, fmt.Errorf("failed to get container info for %q: %v", containerName, err) @@ -256,8 +262,8 @@ func getCgroupInfo(cadvisor cadvisor.Interface, containerName string) (*cadvisor // getCgroupStats returns the latest stats of the container having the // specified containerName from cadvisor. -func getCgroupStats(cadvisor cadvisor.Interface, containerName string) (*cadvisorapiv2.ContainerStats, error) { - info, err := getCgroupInfo(cadvisor, containerName) +func getCgroupStats(cadvisor cadvisor.Interface, containerName string, updateStats bool) (*cadvisorapiv2.ContainerStats, error) { + info, err := getCgroupInfo(cadvisor, containerName, updateStats) if err != nil { return nil, err } diff --git a/pkg/kubelet/stats/stats_provider.go b/pkg/kubelet/stats/stats_provider.go index b61a4107940..235f1a46d2d 100644 --- a/pkg/kubelet/stats/stats_provider.go +++ b/pkg/kubelet/stats/stats_provider.go @@ -88,8 +88,8 @@ type containerStatsProvider interface { // GetCgroupStats returns the stats of the cgroup with the cgroupName. Note that // this function doesn't generate filesystem stats. -func (p *StatsProvider) GetCgroupStats(cgroupName string) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) { - info, err := getCgroupInfo(p.cadvisor, cgroupName) +func (p *StatsProvider) GetCgroupStats(cgroupName string, updateStats bool) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) { + info, err := getCgroupInfo(p.cadvisor, cgroupName, updateStats) if err != nil { return nil, nil, fmt.Errorf("failed to get cgroup stats for %q: %v", cgroupName, err) } @@ -113,8 +113,8 @@ func (p *StatsProvider) RootFsStats() (*statsapi.FsStats, error) { } // Get the root container stats's timestamp, which will be used as the - // imageFs stats timestamp. - rootStats, err := getCgroupStats(p.cadvisor, "/") + // imageFs stats timestamp. Dont force a stats update, as we only want the timestamp. + rootStats, err := getCgroupStats(p.cadvisor, "/", false) if err != nil { return nil, fmt.Errorf("failed to get root container stats: %v", err) } diff --git a/pkg/kubelet/stats/stats_provider_test.go b/pkg/kubelet/stats/stats_provider_test.go index 20616f39025..6f02704ed59 100644 --- a/pkg/kubelet/stats/stats_provider_test.go +++ b/pkg/kubelet/stats/stats_provider_test.go @@ -71,6 +71,7 @@ func TestGetCgroupStats(t *testing.T) { const ( cgroupName = "test-cgroup-name" containerInfoSeed = 1000 + updateStats = false ) var ( mockCadvisor = new(cadvisortest.Mock) @@ -87,7 +88,7 @@ func TestGetCgroupStats(t *testing.T) { mockCadvisor.On("ContainerInfoV2", cgroupName, options).Return(containerInfoMap, nil) provider := newStatsProvider(mockCadvisor, mockPodManager, mockRuntimeCache, fakeContainerStatsProvider{}) - cs, ns, err := provider.GetCgroupStats(cgroupName) + cs, ns, err := provider.GetCgroupStats(cgroupName, updateStats) assert.NoError(err) checkCPUStats(t, "", containerInfoSeed, cs.CPU) @@ -599,8 +600,8 @@ type fakeResourceAnalyzer struct { podVolumeStats serverstats.PodVolumeStats } -func (o *fakeResourceAnalyzer) Start() {} -func (o *fakeResourceAnalyzer) Get() (*statsapi.Summary, error) { return nil, nil } +func (o *fakeResourceAnalyzer) Start() {} +func (o *fakeResourceAnalyzer) Get(bool) (*statsapi.Summary, error) { return nil, nil } func (o *fakeResourceAnalyzer) GetPodVolumeStats(uid types.UID) (serverstats.PodVolumeStats, bool) { return o.podVolumeStats, true } From 2ccf22a5d03dad3f46f76d587fe02a44cdb3a6f3 Mon Sep 17 00:00:00 2001 From: mlmhl Date: Thu, 4 Jan 2018 13:19:29 +0800 Subject: [PATCH 059/264] fix rbd ConstructVolumeSpec bug --- pkg/volume/rbd/rbd.go | 37 ++++++++++- pkg/volume/rbd/rbd_test.go | 128 ++++++++++++++++++++++++++++++++++++- pkg/volume/rbd/rbd_util.go | 14 +++- 3 files changed, 174 insertions(+), 5 deletions(-) diff --git a/pkg/volume/rbd/rbd.go b/pkg/volume/rbd/rbd.go index 0d4e8c21958..12c2898e3d1 100644 --- a/pkg/volume/rbd/rbd.go +++ b/pkg/volume/rbd/rbd.go @@ -357,7 +357,26 @@ func (plugin *rbdPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*vol } s := dstrings.Split(sourceName, "-image-") if len(s) != 2 { - return nil, fmt.Errorf("sourceName %s wrong, should be pool+\"-image-\"+imageName", sourceName) + // The mountPath parameter is the volume mount path for a specific pod, its format + // is /var/lib/kubelet/pods/{podUID}/volumes/{volumePluginName}/{volumeName}. + // mounter.GetDeviceNameFromMount will find the device path(such as /dev/rbd0) by + // mountPath first, and then try to find the global device mount path from the mounted + // path list of this device. sourceName is extracted from this global device mount path. + // mounter.GetDeviceNameFromMount expects the global device mount path conforms to canonical + // format: /var/lib/kubelet/plugins/kubernetes.io/rbd/mounts/{pool}-image-{image}. + // If this assertion failed, it means that the global device mount path is created by + // the deprecated format: /var/lib/kubelet/plugins/kubernetes.io/rbd/rbd/{pool}-image-{image}. + // So we will try to check whether this old style global device mount path exist or not. + // If existed, extract the sourceName from this old style path, otherwise return an error. + glog.V(3).Infof("SourceName %s wrong, fallback to old format", sourceName) + sourceName, err = plugin.getDeviceNameFromOldMountPath(mounter, mountPath) + if err != nil { + return nil, err + } + s = dstrings.Split(sourceName, "-image-") + if len(s) != 2 { + return nil, fmt.Errorf("sourceName %s wrong, should be pool+\"-image-\"+imageName", sourceName) + } } rbdVolume := &v1.Volume{ Name: volumeName, @@ -492,6 +511,22 @@ func (plugin *rbdPlugin) newUnmapperInternal(volName string, podUID types.UID, m }, nil } +func (plugin *rbdPlugin) getDeviceNameFromOldMountPath(mounter mount.Interface, mountPath string) (string, error) { + refs, err := mount.GetMountRefsByDev(mounter, mountPath) + if err != nil { + return "", err + } + // baseMountPath is the prefix of deprecated device global mounted path, + // such as: /var/lib/kubelet/plugins/kubernetes.io/rbd/rbd + baseMountPath := filepath.Join(plugin.host.GetPluginDir(rbdPluginName), "rbd") + for _, ref := range refs { + if dstrings.HasPrefix(ref, baseMountPath) { + return filepath.Rel(baseMountPath, ref) + } + } + return "", fmt.Errorf("can't find source name from mounted path: %s", mountPath) +} + func (plugin *rbdPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.RBD == nil { return nil, fmt.Errorf("spec.PersistentVolumeSource.Spec.RBD is nil") diff --git a/pkg/volume/rbd/rbd_test.go b/pkg/volume/rbd/rbd_test.go index 23b9b969f96..9107fe7f5be 100644 --- a/pkg/volume/rbd/rbd_test.go +++ b/pkg/volume/rbd/rbd_test.go @@ -325,7 +325,7 @@ func TestPlugin(t *testing.T) { }, }, expectedDevicePath: "/dev/rbd1", - expectedDeviceMountPath: fmt.Sprintf("%s/plugins/kubernetes.io/rbd/rbd/pool1-image-image1", tmpDir), + expectedDeviceMountPath: fmt.Sprintf("%s/plugins/kubernetes.io/rbd/mounts/pool1-image-image1", tmpDir), expectedPodMountPath: fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~rbd/vol1", tmpDir, podUID), }) cases = append(cases, &testcase{ @@ -353,7 +353,7 @@ func TestPlugin(t *testing.T) { }, }, expectedDevicePath: "/dev/rbd1", - expectedDeviceMountPath: fmt.Sprintf("%s/plugins/kubernetes.io/rbd/rbd/pool2-image-image2", tmpDir), + expectedDeviceMountPath: fmt.Sprintf("%s/plugins/kubernetes.io/rbd/mounts/pool2-image-image2", tmpDir), expectedPodMountPath: fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~rbd/vol2", tmpDir, podUID), }) @@ -450,3 +450,127 @@ func TestGetSecretNameAndNamespace(t *testing.T) { t.Errorf("getSecretNameAndNamespace returned incorrect values, expected %s and %s but got %s and %s", secretName, secretNamespace, foundSecretName, foundSecretNamespace) } } + +// https://github.com/kubernetes/kubernetes/issues/57744 +func TestGetDeviceMountPath(t *testing.T) { + tmpDir, err := utiltesting.MkTmpdir("rbd_test") + if err != nil { + t.Fatalf("error creating temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + fakeVolumeHost := volumetest.NewFakeVolumeHost(tmpDir, nil, nil) + plugMgr := volume.VolumePluginMgr{} + plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, fakeVolumeHost) + plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd") + if err != nil { + t.Errorf("Can't find the plugin by name") + } + fdm := NewFakeDiskManager() + + // attacher + attacher, err := plug.(*rbdPlugin).newAttacherInternal(fdm) + if err != nil { + t.Errorf("Failed to make a new Attacher: %v", err) + } + + pool, image := "pool", "image" + spec := volume.NewSpecFromVolume(&v1.Volume{ + Name: "vol", + VolumeSource: v1.VolumeSource{ + RBD: &v1.RBDVolumeSource{ + CephMonitors: []string{"a", "b"}, + RBDPool: pool, + RBDImage: image, + FSType: "ext4", + }, + }, + }) + + deprecatedDir := fmt.Sprintf("%s/plugins/kubernetes.io/rbd/rbd/%s-image-%s", tmpDir, pool, image) + canonicalDir := fmt.Sprintf("%s/plugins/kubernetes.io/rbd/mounts/%s-image-%s", tmpDir, pool, image) + + type testCase struct { + deprecated bool + targetPath string + } + for _, c := range []testCase{ + {false, canonicalDir}, + {true, deprecatedDir}, + } { + if c.deprecated { + // This is a deprecated device mount path, we create it, + // and hope attacher.GetDeviceMountPath return c.targetPath. + if err := os.MkdirAll(c.targetPath, 0700); err != nil { + t.Fatalf("Create deprecated mount path failed: %v", err) + } + } + mountPath, err := attacher.GetDeviceMountPath(spec) + if err != nil { + t.Fatalf("GetDeviceMountPath failed: %v", err) + } + if mountPath != c.targetPath { + t.Errorf("Mismatch device mount path: wanted %s, got %s", c.targetPath, mountPath) + } + } +} + +// https://github.com/kubernetes/kubernetes/issues/57744 +func TestConstructVolumeSpec(t *testing.T) { + tmpDir, err := utiltesting.MkTmpdir("rbd_test") + if err != nil { + t.Fatalf("error creating temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + fakeVolumeHost := volumetest.NewFakeVolumeHost(tmpDir, nil, nil) + plugMgr := volume.VolumePluginMgr{} + plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, fakeVolumeHost) + plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd") + if err != nil { + t.Errorf("Can't find the plugin by name") + } + fakeMounter := fakeVolumeHost.GetMounter(plug.GetPluginName()).(*mount.FakeMounter) + + pool, image, volumeName := "pool", "image", "vol" + podMountPath := fmt.Sprintf("%s/pods/pod123/volumes/kubernetes.io~rbd/%s", tmpDir, volumeName) + deprecatedDir := fmt.Sprintf("%s/plugins/kubernetes.io/rbd/rbd/%s-image-%s", tmpDir, pool, image) + canonicalDir := fmt.Sprintf("%s/plugins/kubernetes.io/rbd/mounts/%s-image-%s", tmpDir, pool, image) + + type testCase struct { + volumeName string + targetPath string + } + + for _, c := range []testCase{ + {"vol", canonicalDir}, + {"vol", deprecatedDir}, + } { + if err := os.MkdirAll(c.targetPath, 0700); err != nil { + t.Fatalf("Create mount path %s failed: %v", c.targetPath, err) + } + if err = fakeMounter.Mount("/dev/rbd0", c.targetPath, "fake", nil); err != nil { + t.Fatalf("Mount %s to %s failed: %v", c.targetPath, podMountPath, err) + } + if err = fakeMounter.Mount(c.targetPath, podMountPath, "fake", []string{"bind"}); err != nil { + t.Fatalf("Mount %s to %s failed: %v", c.targetPath, podMountPath, err) + } + spec, err := plug.ConstructVolumeSpec(c.volumeName, podMountPath) + if err != nil { + t.Errorf("ConstructVolumeSpec failed: %v", err) + } else { + if spec.Volume.RBD.RBDPool != pool { + t.Errorf("Mismatch rbd pool: wanted %s, got %s", pool, spec.Volume.RBD.RBDPool) + } + if spec.Volume.RBD.RBDImage != image { + t.Fatalf("Mismatch rbd image: wanted %s, got %s", image, spec.Volume.RBD.RBDImage) + } + } + if err = fakeMounter.Unmount(podMountPath); err != nil { + t.Fatalf("Unmount pod path %s failed: %v", podMountPath, err) + } + if err = fakeMounter.Unmount(c.targetPath); err != nil { + t.Fatalf("Unmount device path %s failed: %v", c.targetPath, err) + } + } +} diff --git a/pkg/volume/rbd/rbd_util.go b/pkg/volume/rbd/rbd_util.go index 4f6d9e0121f..bbb936b1290 100644 --- a/pkg/volume/rbd/rbd_util.go +++ b/pkg/volume/rbd/rbd_util.go @@ -37,6 +37,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/wait" fileutil "k8s.io/kubernetes/pkg/util/file" + "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/volume" volutil "k8s.io/kubernetes/pkg/volume/util" @@ -110,9 +111,18 @@ func waitForPath(pool, image string, maxRetries int) (string, bool) { return "", false } -// make a directory like /var/lib/kubelet/plugins/kubernetes.io/pod/rbd/pool-image-image +// make a directory like /var/lib/kubelet/plugins/kubernetes.io/rbd/mounts/pool-image-image func makePDNameInternal(host volume.VolumeHost, pool string, image string) string { - return path.Join(host.GetPluginDir(rbdPluginName), "rbd", pool+"-image-"+image) + // Backward compatibility for the deprecated format: /var/lib/kubelet/plugins/kubernetes.io/rbd/rbd/pool-image-image + deprecatedDir := path.Join(host.GetPluginDir(rbdPluginName), "rbd", pool+"-image-"+image) + info, err := os.Stat(deprecatedDir) + if err == nil && info.IsDir() { + // The device mount path has already been created with the deprecated format, return it. + glog.V(5).Infof("Deprecated format path %s found", deprecatedDir) + return deprecatedDir + } + // Return the canonical format path. + return path.Join(host.GetPluginDir(rbdPluginName), mount.MountsInGlobalPDPath, pool+"-image-"+image) } // make a directory like /var/lib/kubelet/plugins/kubernetes.io/rbd/volumeDevices/pool-image-image From 86ffa59d340c7b8d31d7ee3655d8a83053dcd95b Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Fri, 5 Jan 2018 16:13:51 +0800 Subject: [PATCH 060/264] refactor customresource handler --- .../pkg/apiserver/apiserver.go | 1 - .../pkg/apiserver/customresource_handler.go | 140 ++++++++++-------- 2 files changed, 75 insertions(+), 66 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go index de90cef9068..b8af7d55595 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go @@ -177,7 +177,6 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) versionDiscoveryHandler, groupDiscoveryHandler, s.GenericAPIServer.RequestContextMapper(), - s.Informers.Apiextensions().InternalVersion().CustomResourceDefinitions().Lister(), s.Informers.Apiextensions().InternalVersion().CustomResourceDefinitions(), delegateHandler, c.ExtraConfig.CRDRESTOptionsGetter, diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go index 773a0657726..3112b9353fd 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go @@ -44,12 +44,13 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/endpoints/handlers" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" apirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/storage/storagebackend" "k8s.io/client-go/discovery" - cache "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/cache" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" apiservervalidation "k8s.io/apiextensions-apiserver/pkg/apiserver/validation" @@ -67,6 +68,9 @@ type crdHandler struct { customStorageLock sync.Mutex // customStorage contains a crdStorageMap + // atomic.Value has a very good read performance compared to sync.RWMutex + // see https://gist.github.com/dim/152e6bf80e1384ea72e17ac717a5000a + // which is suited for most read and rarely write cases customStorage atomic.Value requestContextMapper apirequest.RequestContextMapper @@ -96,7 +100,6 @@ func NewCustomResourceDefinitionHandler( versionDiscoveryHandler *versionDiscoveryHandler, groupDiscoveryHandler *groupDiscoveryHandler, requestContextMapper apirequest.RequestContextMapper, - crdLister listers.CustomResourceDefinitionLister, crdInformer informers.CustomResourceDefinitionInformer, delegate http.Handler, restOptionsGetter generic.RESTOptionsGetter, @@ -106,7 +109,7 @@ func NewCustomResourceDefinitionHandler( groupDiscoveryHandler: groupDiscoveryHandler, customStorage: atomic.Value{}, requestContextMapper: requestContextMapper, - crdLister: crdLister, + crdLister: crdInformer.Lister(), delegate: delegate, restOptionsGetter: restOptionsGetter, admission: admission, @@ -120,19 +123,20 @@ func NewCustomResourceDefinitionHandler( }) ret.customStorage.Store(crdStorageMap{}) + return ret } func (r *crdHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { ctx, ok := r.requestContextMapper.Get(req) if !ok { - // programmer error - panic("missing context") + responsewriters.InternalError(w, req, fmt.Errorf("no context found for request")) + return } requestInfo, ok := apirequest.RequestInfoFrom(ctx) if !ok { - // programmer error - panic("missing requestInfo") + responsewriters.InternalError(w, req, fmt.Errorf("no RequestInfo found in the context")) + return } if !requestInfo.IsResourceRequest { pathParts := splitPath(requestInfo.Path) @@ -168,6 +172,7 @@ func (r *crdHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { } if !apiextensions.IsCRDConditionTrue(crd, apiextensions.Established) { r.delegate.ServeHTTP(w, req) + return } if len(requestInfo.Subresource) > 0 { http.NotFound(w, req) @@ -176,7 +181,7 @@ func (r *crdHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { terminating := apiextensions.IsCRDConditionTrue(crd, apiextensions.Terminating) - crdInfo, err := r.getServingInfoFor(crd) + crdInfo, err := r.getOrCreateServingInfoFor(crd) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -242,19 +247,52 @@ func (r *crdHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { } } +func (r *crdHandler) updateCustomResourceDefinition(oldObj, newObj interface{}) { + oldCRD := oldObj.(*apiextensions.CustomResourceDefinition) + newCRD := newObj.(*apiextensions.CustomResourceDefinition) + + r.customStorageLock.Lock() + defer r.customStorageLock.Unlock() + + storageMap := r.customStorage.Load().(crdStorageMap) + oldInfo, found := storageMap[newCRD.UID] + if !found { + return + } + if apiequality.Semantic.DeepEqual(&newCRD.Spec, oldInfo.spec) && apiequality.Semantic.DeepEqual(&newCRD.Status.AcceptedNames, oldInfo.acceptedNames) { + glog.V(6).Infof("Ignoring customresourcedefinition %s update because neither spec, nor accepted names changed", oldCRD.Name) + return + } + + glog.V(4).Infof("Updating customresourcedefinition %s", oldCRD.Name) + + // Copy because we cannot write to storageMap without a race + // as it is used without locking elsewhere. + storageMap2 := storageMap.clone() + if oldInfo, ok := storageMap2[types.UID(oldCRD.UID)]; ok { + oldInfo.storage.DestroyFunc() + delete(storageMap2, types.UID(oldCRD.UID)) + } + + r.customStorage.Store(storageMap2) +} + // removeDeadStorage removes REST storage that isn't being used func (r *crdHandler) removeDeadStorage() { - // these don't have to be live. A snapshot is fine - // if we wrongly delete, that's ok. The rest storage will be recreated on the next request - // if we wrongly miss one, that's ok. We'll get it next time - storageMap := r.customStorage.Load().(crdStorageMap) allCustomResourceDefinitions, err := r.crdLister.List(labels.Everything()) if err != nil { utilruntime.HandleError(err) return } - for uid, s := range storageMap { + r.customStorageLock.Lock() + defer r.customStorageLock.Unlock() + + storageMap := r.customStorage.Load().(crdStorageMap) + // Copy because we cannot write to storageMap without a race + // as it is used without locking elsewhere + storageMap2 := storageMap.clone() + for uid, s := range storageMap2 { found := false for _, crd := range allCustomResourceDefinitions { if crd.UID == uid { @@ -265,38 +303,33 @@ func (r *crdHandler) removeDeadStorage() { if !found { glog.V(4).Infof("Removing dead CRD storage for %v", s.requestScope.Resource) s.storage.DestroyFunc() - delete(storageMap, uid) + delete(storageMap2, uid) } } - - r.customStorageLock.Lock() - defer r.customStorageLock.Unlock() - - r.customStorage.Store(storageMap) + r.customStorage.Store(storageMap2) } // GetCustomResourceListerCollectionDeleter returns the ListerCollectionDeleter for // the given uid, or nil if one does not exist. func (r *crdHandler) GetCustomResourceListerCollectionDeleter(crd *apiextensions.CustomResourceDefinition) finalizer.ListerCollectionDeleter { - info, err := r.getServingInfoFor(crd) + info, err := r.getOrCreateServingInfoFor(crd) if err != nil { utilruntime.HandleError(err) } return info.storage } -func (r *crdHandler) getServingInfoFor(crd *apiextensions.CustomResourceDefinition) (*crdInfo, error) { +func (r *crdHandler) getOrCreateServingInfoFor(crd *apiextensions.CustomResourceDefinition) (*crdInfo, error) { storageMap := r.customStorage.Load().(crdStorageMap) - ret, ok := storageMap[crd.UID] - if ok { + if ret, ok := storageMap[crd.UID]; ok { return ret, nil } r.customStorageLock.Lock() defer r.customStorageLock.Unlock() - ret, ok = storageMap[crd.UID] - if ok { + storageMap = r.customStorage.Load().(crdStorageMap) + if ret, ok := storageMap[crd.UID]; ok { return ret, nil } @@ -384,7 +417,7 @@ func (r *crdHandler) getServingInfoFor(crd *apiextensions.CustomResourceDefiniti MetaGroupVersion: metav1.SchemeGroupVersion, } - ret = &crdInfo{ + ret := &crdInfo{ spec: &crd.Spec, acceptedNames: &crd.Status.AcceptedNames, @@ -392,16 +425,13 @@ func (r *crdHandler) getServingInfoFor(crd *apiextensions.CustomResourceDefiniti requestScope: requestScope, } - storageMap2 := make(crdStorageMap, len(storageMap)) - // Copy because we cannot write to storageMap without a race - // as it is used without locking elsewhere - for k, v := range storageMap { - storageMap2[k] = v - } + // as it is used without locking elsewhere. + storageMap2 := storageMap.clone() storageMap2[crd.UID] = ret r.customStorage.Store(storageMap2) + return ret, nil } @@ -423,39 +453,6 @@ func (c crdObjectConverter) ConvertFieldLabel(version, kind, label, value string } } -func (c *crdHandler) updateCustomResourceDefinition(oldObj, newObj interface{}) { - oldCRD := oldObj.(*apiextensions.CustomResourceDefinition) - newCRD := newObj.(*apiextensions.CustomResourceDefinition) - - c.customStorageLock.Lock() - defer c.customStorageLock.Unlock() - storageMap := c.customStorage.Load().(crdStorageMap) - - oldInfo, found := storageMap[newCRD.UID] - if !found { - return - } - if apiequality.Semantic.DeepEqual(&newCRD.Spec, oldInfo.spec) && apiequality.Semantic.DeepEqual(&newCRD.Status.AcceptedNames, oldInfo.acceptedNames) { - glog.V(6).Infof("Ignoring customresourcedefinition %s update because neither spec, nor accepted names changed", oldCRD.Name) - return - } - - glog.V(4).Infof("Updating customresourcedefinition %s", oldCRD.Name) - storageMap2 := make(crdStorageMap, len(storageMap)) - - // Copy because we cannot write to storageMap without a race - // as it is used without locking elsewhere - for k, v := range storageMap { - if k == oldCRD.UID { - v.storage.DestroyFunc() - continue - } - storageMap2[k] = v - } - - c.customStorage.Store(storageMap2) -} - type unstructuredNegotiatedSerializer struct { typer runtime.ObjectTyper creator runtime.ObjectCreater @@ -578,3 +575,16 @@ func (t CRDRESTOptionsGetter) GetRESTOptions(resource schema.GroupResource) (gen } return ret, nil } + +// clone returns a clone of the provided crdStorageMap. +// The clone is a shallow copy of the map. +func (in crdStorageMap) clone() crdStorageMap { + if in == nil { + return nil + } + out := make(crdStorageMap, len(in)) + for key, value := range in { + out[key] = value + } + return out +} From e7530405456daafd014cb8e7702d5ce177dbf9e7 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Fri, 5 Jan 2018 16:14:00 +0800 Subject: [PATCH 061/264] run update bazel and staging-godep --- staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json | 4 ++++ .../src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD | 1 + 2 files changed, 5 insertions(+) diff --git a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json index 08607c1ddcf..b00cf3779c8 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json @@ -1790,6 +1790,10 @@ "ImportPath": "k8s.io/apiserver/pkg/endpoints/handlers", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/apiserver/pkg/endpoints/request", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD index a3842388097..dbed4134690 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD @@ -54,6 +54,7 @@ go_library( "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/discovery:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/handlers:go_default_library", + "//vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library", From d8924c1da89518b102f2c2260258a1740118aeb0 Mon Sep 17 00:00:00 2001 From: Antoine Cotten Date: Tue, 9 Jan 2018 13:54:50 +0100 Subject: [PATCH 062/264] Make code generators log to stderr by default --- staging/src/k8s.io/code-generator/cmd/client-gen/main.go | 1 + staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go | 1 + staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go | 1 + staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go | 1 + staging/src/k8s.io/code-generator/cmd/go-to-protobuf/main.go | 1 + staging/src/k8s.io/code-generator/cmd/informer-gen/main.go | 1 + staging/src/k8s.io/code-generator/cmd/lister-gen/main.go | 1 + staging/src/k8s.io/code-generator/cmd/openapi-gen/main.go | 1 + 8 files changed, 8 insertions(+) diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/main.go b/staging/src/k8s.io/code-generator/cmd/client-gen/main.go index 5869d83a1d5..ca829c30b1b 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/main.go @@ -39,6 +39,7 @@ func main() { genericArgs.AddFlags(pflag.CommandLine) customArgs.AddFlags(pflag.CommandLine, "k8s.io/kubernetes/pkg/apis") // TODO: move this input path out of client-gen + flag.Set("logtostderr", "true") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() diff --git a/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go b/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go index 4cba1c6bdf2..afb060762b1 100644 --- a/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go @@ -55,6 +55,7 @@ func main() { genericArgs.AddFlags(pflag.CommandLine) customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() diff --git a/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go index bb506cf7f9e..5eabfd1b6a3 100644 --- a/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go @@ -63,6 +63,7 @@ func main() { genericArgs.AddFlags(pflag.CommandLine) customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() diff --git a/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go index 89d2b39bc6a..4f55680b4f8 100644 --- a/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go @@ -62,6 +62,7 @@ func main() { genericArgs.AddFlags(pflag.CommandLine) customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() diff --git a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/main.go b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/main.go index ce1756c11a6..847a6a5a02b 100644 --- a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/main.go +++ b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/main.go @@ -29,6 +29,7 @@ var g = protobuf.New() func init() { g.BindFlags(flag.CommandLine) + goflag.Set("logtostderr", "true") flag.CommandLine.AddGoFlagSet(goflag.CommandLine) } diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/main.go b/staging/src/k8s.io/code-generator/cmd/informer-gen/main.go index fc6feeb30bc..e993e620eb5 100644 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/main.go @@ -41,6 +41,7 @@ func main() { genericArgs.AddFlags(pflag.CommandLine) customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() diff --git a/staging/src/k8s.io/code-generator/cmd/lister-gen/main.go b/staging/src/k8s.io/code-generator/cmd/lister-gen/main.go index a89817f4957..6c48240aca2 100644 --- a/staging/src/k8s.io/code-generator/cmd/lister-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/lister-gen/main.go @@ -38,6 +38,7 @@ func main() { genericArgs.AddFlags(pflag.CommandLine) customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() diff --git a/staging/src/k8s.io/code-generator/cmd/openapi-gen/main.go b/staging/src/k8s.io/code-generator/cmd/openapi-gen/main.go index e3c6f6c6cd8..c324c10bab9 100644 --- a/staging/src/k8s.io/code-generator/cmd/openapi-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/openapi-gen/main.go @@ -40,6 +40,7 @@ func main() { genericArgs.AddFlags(pflag.CommandLine) customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() From e9cf3f1ac4c97bea716fbac03d2cd71c5189d5c4 Mon Sep 17 00:00:00 2001 From: vikaschoudhary16 Date: Sat, 16 Dec 2017 01:38:46 -0500 Subject: [PATCH 063/264] Handle Unhealthy devices Update node capacity with sum of both healthy and unhealthy devices. Node allocatable reflect only healthy devices. --- pkg/kubelet/cm/container_manager.go | 5 +- pkg/kubelet/cm/container_manager_linux.go | 2 +- pkg/kubelet/cm/container_manager_stub.go | 4 +- pkg/kubelet/cm/deviceplugin/manager.go | 74 +++++++++++++----- pkg/kubelet/cm/deviceplugin/manager_stub.go | 4 +- pkg/kubelet/cm/deviceplugin/manager_test.go | 86 +++++++++++++-------- pkg/kubelet/cm/deviceplugin/types.go | 4 +- pkg/kubelet/kubelet_node_status.go | 13 +++- 8 files changed, 128 insertions(+), 64 deletions(-) diff --git a/pkg/kubelet/cm/container_manager.go b/pkg/kubelet/cm/container_manager.go index da7bf4a4642..4f16fa6f9d9 100644 --- a/pkg/kubelet/cm/container_manager.go +++ b/pkg/kubelet/cm/container_manager.go @@ -70,9 +70,10 @@ type ContainerManager interface { // GetCapacity returns the amount of compute resources tracked by container manager available on the node. GetCapacity() v1.ResourceList - // GetDevicePluginResourceCapacity returns the amount of device plugin resources available on the node + // GetDevicePluginResourceCapacity returns the node capacity (amount of total device plugin resources), + // node allocatable (amount of total healthy resources reported by device plugin), // and inactive device plugin resources previously registered on the node. - GetDevicePluginResourceCapacity() (v1.ResourceList, []string) + GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) // UpdateQOSCgroups performs housekeeping updates to ensure that the top // level QoS containers have their desired state in a thread-safe way diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index 6c6c7068172..feb2f0219ae 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -887,6 +887,6 @@ func (cm *containerManagerImpl) GetCapacity() v1.ResourceList { return cm.capacity } -func (cm *containerManagerImpl) GetDevicePluginResourceCapacity() (v1.ResourceList, []string) { +func (cm *containerManagerImpl) GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) { return cm.devicePluginManager.GetCapacity() } diff --git a/pkg/kubelet/cm/container_manager_stub.go b/pkg/kubelet/cm/container_manager_stub.go index 27a86849582..0f7516e0515 100644 --- a/pkg/kubelet/cm/container_manager_stub.go +++ b/pkg/kubelet/cm/container_manager_stub.go @@ -70,8 +70,8 @@ func (cm *containerManagerStub) GetCapacity() v1.ResourceList { return nil } -func (cm *containerManagerStub) GetDevicePluginResourceCapacity() (v1.ResourceList, []string) { - return nil, []string{} +func (cm *containerManagerStub) GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) { + return nil, nil, []string{} } func (cm *containerManagerStub) NewPodContainerManager() PodContainerManager { diff --git a/pkg/kubelet/cm/deviceplugin/manager.go b/pkg/kubelet/cm/deviceplugin/manager.go index 5e1137c1149..1c55e898480 100644 --- a/pkg/kubelet/cm/deviceplugin/manager.go +++ b/pkg/kubelet/cm/deviceplugin/manager.go @@ -73,8 +73,11 @@ type ManagerImpl struct { // e.g. a new device is advertised, two old devices are deleted and a running device fails. callback monitorCallback - // allDevices contains all of registered resourceNames and their exported device IDs. - allDevices map[string]sets.String + // healthyDevices contains all of the registered healthy resourceNames and their exported device IDs. + healthyDevices map[string]sets.String + + // unhealthyDevices contains all of the unhealthy devices and their exported device IDs. + unhealthyDevices map[string]sets.String // allocatedDevices contains allocated deviceIds, keyed by resourceName. allocatedDevices map[string]sets.String @@ -106,7 +109,8 @@ func newManagerImpl(socketPath string) (*ManagerImpl, error) { endpoints: make(map[string]endpoint), socketname: file, socketdir: dir, - allDevices: make(map[string]sets.String), + healthyDevices: make(map[string]sets.String), + unhealthyDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.String), podDevices: make(podDevices), } @@ -128,20 +132,24 @@ func newManagerImpl(socketPath string) (*ManagerImpl, error) { func (m *ManagerImpl) genericDeviceUpdateCallback(resourceName string, added, updated, deleted []pluginapi.Device) { kept := append(updated, added...) m.mutex.Lock() - if _, ok := m.allDevices[resourceName]; !ok { - m.allDevices[resourceName] = sets.NewString() + if _, ok := m.healthyDevices[resourceName]; !ok { + m.healthyDevices[resourceName] = sets.NewString() + } + if _, ok := m.unhealthyDevices[resourceName]; !ok { + m.unhealthyDevices[resourceName] = sets.NewString() } - // For now, Manager only keeps track of healthy devices. - // TODO: adds support to track unhealthy devices. for _, dev := range kept { if dev.Health == pluginapi.Healthy { - m.allDevices[resourceName].Insert(dev.ID) + m.healthyDevices[resourceName].Insert(dev.ID) + m.unhealthyDevices[resourceName].Delete(dev.ID) } else { - m.allDevices[resourceName].Delete(dev.ID) + m.unhealthyDevices[resourceName].Insert(dev.ID) + m.healthyDevices[resourceName].Delete(dev.ID) } } for _, dev := range deleted { - m.allDevices[resourceName].Delete(dev.ID) + m.healthyDevices[resourceName].Delete(dev.ID) + m.unhealthyDevices[resourceName].Delete(dev.ID) } m.mutex.Unlock() m.writeCheckpoint() @@ -371,7 +379,8 @@ func (m *ManagerImpl) addEndpoint(r *pluginapi.RegisterRequest) { // GetCapacity is expected to be called when Kubelet updates its node status. // The first returned variable contains the registered device plugin resource capacity. -// The second returned variable contains previously registered resources that are no longer active. +// The second returned variable contains the registered device plugin resource allocatable. +// The third returned variable contains previously registered resources that are no longer active. // Kubelet uses this information to update resource capacity/allocatable in its node status. // After the call, device plugin can remove the inactive resources from its internal list as the // change is already reflected in Kubelet node status. @@ -380,25 +389,47 @@ func (m *ManagerImpl) addEndpoint(r *pluginapi.RegisterRequest) { // cm.UpdatePluginResource() run during predicate Admit guarantees we adjust nodeinfo // capacity for already allocated pods so that they can continue to run. However, new pods // requiring device plugin resources will not be scheduled till device plugin re-registers. -func (m *ManagerImpl) GetCapacity() (v1.ResourceList, []string) { +func (m *ManagerImpl) GetCapacity() (v1.ResourceList, v1.ResourceList, []string) { needsUpdateCheckpoint := false var capacity = v1.ResourceList{} + var allocatable = v1.ResourceList{} var deletedResources []string m.mutex.Lock() - for resourceName, devices := range m.allDevices { + for resourceName, devices := range m.healthyDevices { if _, ok := m.endpoints[resourceName]; !ok { - delete(m.allDevices, resourceName) + delete(m.healthyDevices, resourceName) deletedResources = append(deletedResources, resourceName) needsUpdateCheckpoint = true } else { capacity[v1.ResourceName(resourceName)] = *resource.NewQuantity(int64(devices.Len()), resource.DecimalSI) + allocatable[v1.ResourceName(resourceName)] = *resource.NewQuantity(int64(devices.Len()), resource.DecimalSI) + } + } + for resourceName, devices := range m.unhealthyDevices { + if _, ok := m.endpoints[resourceName]; !ok { + delete(m.unhealthyDevices, resourceName) + alreadyDeleted := false + for _, name := range deletedResources { + if name == resourceName { + alreadyDeleted = true + } + } + if !alreadyDeleted { + deletedResources = append(deletedResources, resourceName) + } + needsUpdateCheckpoint = true + } else { + capacityCount := capacity[v1.ResourceName(resourceName)] + unhealthyCount := *resource.NewQuantity(int64(devices.Len()), resource.DecimalSI) + capacityCount.Add(unhealthyCount) + capacity[v1.ResourceName(resourceName)] = capacityCount } } m.mutex.Unlock() if needsUpdateCheckpoint { m.writeCheckpoint() } - return capacity, deletedResources + return capacity, allocatable, deletedResources } // checkpointData struct is used to store pod to device allocation information @@ -416,7 +447,7 @@ func (m *ManagerImpl) writeCheckpoint() error { PodDeviceEntries: m.podDevices.toCheckpointData(), RegisteredDevices: make(map[string][]string), } - for resource, devices := range m.allDevices { + for resource, devices := range m.healthyDevices { data.RegisteredDevices[resource] = devices.UnsortedList() } m.mutex.Unlock() @@ -453,9 +484,10 @@ func (m *ManagerImpl) readCheckpoint() error { m.podDevices.fromCheckpointData(data.PodDeviceEntries) m.allocatedDevices = m.podDevices.devices() for resource, devices := range data.RegisteredDevices { - m.allDevices[resource] = sets.NewString() + // TODO: Support Checkpointing for unhealthy devices as well + m.healthyDevices[resource] = sets.NewString() for _, dev := range devices { - m.allDevices[resource].Insert(dev) + m.healthyDevices[resource].Insert(dev) } } return nil @@ -508,7 +540,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi } glog.V(3).Infof("Needs to allocate %v %v for pod %q container %q", needed, resource, podUID, contName) // Needs to allocate additional devices. - if _, ok := m.allDevices[resource]; !ok { + if _, ok := m.healthyDevices[resource]; !ok { return nil, fmt.Errorf("can't allocate unregistered device %v", resource) } devices = sets.NewString() @@ -527,7 +559,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi // Gets Devices in use. devicesInUse := m.allocatedDevices[resource] // Gets a list of available devices. - available := m.allDevices[resource].Difference(devicesInUse) + available := m.healthyDevices[resource].Difference(devicesInUse) if int(available.Len()) < needed { return nil, fmt.Errorf("requested number of devices unavailable for %s. Requested: %d, Available: %d", resource, needed, available.Len()) } @@ -557,7 +589,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont resource := string(k) needed := int(v.Value()) glog.V(3).Infof("needs %d %s", needed, resource) - _, registeredResource := m.allDevices[resource] + _, registeredResource := m.healthyDevices[resource] _, allocatedResource := m.allocatedDevices[resource] // Continues if this is neither an active device plugin resource nor // a resource we have previously allocated. diff --git a/pkg/kubelet/cm/deviceplugin/manager_stub.go b/pkg/kubelet/cm/deviceplugin/manager_stub.go index 903a0077a2c..c0f6e2d2f17 100644 --- a/pkg/kubelet/cm/deviceplugin/manager_stub.go +++ b/pkg/kubelet/cm/deviceplugin/manager_stub.go @@ -58,6 +58,6 @@ func (h *ManagerStub) GetDeviceRunContainerOptions(pod *v1.Pod, container *v1.Co } // GetCapacity simply returns nil capacity and empty removed resource list. -func (h *ManagerStub) GetCapacity() (v1.ResourceList, []string) { - return nil, []string{} +func (h *ManagerStub) GetCapacity() (v1.ResourceList, v1.ResourceList, []string) { + return nil, nil, []string{} } diff --git a/pkg/kubelet/cm/deviceplugin/manager_test.go b/pkg/kubelet/cm/deviceplugin/manager_test.go index d7a032694c5..33ad9e08a4c 100644 --- a/pkg/kubelet/cm/deviceplugin/manager_test.go +++ b/pkg/kubelet/cm/deviceplugin/manager_test.go @@ -149,7 +149,7 @@ func cleanup(t *testing.T, m Manager, p *Stub) { m.Stop() } -func TestUpdateCapacity(t *testing.T) { +func TestUpdateCapacityAllocatable(t *testing.T) { testManager, err := newManagerImpl(socketName) as := assert.New(t) as.NotNil(testManager) @@ -167,61 +167,81 @@ func TestUpdateCapacity(t *testing.T) { resourceName1 := "domain1.com/resource1" testManager.endpoints[resourceName1] = &endpointImpl{devices: make(map[string]pluginapi.Device)} callback(resourceName1, devs, []pluginapi.Device{}, []pluginapi.Device{}) - capacity, removedResources := testManager.GetCapacity() + capacity, allocatable, removedResources := testManager.GetCapacity() resource1Capacity, ok := capacity[v1.ResourceName(resourceName1)] as.True(ok) - as.Equal(int64(2), resource1Capacity.Value()) + resource1Allocatable, ok := allocatable[v1.ResourceName(resourceName1)] + as.True(ok) + as.Equal(int64(3), resource1Capacity.Value()) + as.Equal(int64(2), resource1Allocatable.Value()) as.Equal(0, len(removedResources)) - // Deletes an unhealthy device should NOT change capacity. + // Deletes an unhealthy device should NOT change allocatable but change capacity. callback(resourceName1, []pluginapi.Device{}, []pluginapi.Device{}, []pluginapi.Device{devs[2]}) - capacity, removedResources = testManager.GetCapacity() + capacity, allocatable, removedResources = testManager.GetCapacity() resource1Capacity, ok = capacity[v1.ResourceName(resourceName1)] as.True(ok) + resource1Allocatable, ok = allocatable[v1.ResourceName(resourceName1)] + as.True(ok) as.Equal(int64(2), resource1Capacity.Value()) + as.Equal(int64(2), resource1Allocatable.Value()) as.Equal(0, len(removedResources)) - // Updates a healthy device to unhealthy should reduce capacity by 1. + // Updates a healthy device to unhealthy should reduce allocatable by 1. dev2 := devs[1] dev2.Health = pluginapi.Unhealthy callback(resourceName1, []pluginapi.Device{}, []pluginapi.Device{dev2}, []pluginapi.Device{}) - capacity, removedResources = testManager.GetCapacity() + capacity, allocatable, removedResources = testManager.GetCapacity() resource1Capacity, ok = capacity[v1.ResourceName(resourceName1)] as.True(ok) - as.Equal(int64(1), resource1Capacity.Value()) + resource1Allocatable, ok = allocatable[v1.ResourceName(resourceName1)] + as.True(ok) + as.Equal(int64(2), resource1Capacity.Value()) + as.Equal(int64(1), resource1Allocatable.Value()) as.Equal(0, len(removedResources)) - // Deletes a healthy device should reduce capacity by 1. + // Deletes a healthy device should reduce capacity and allocatable by 1. callback(resourceName1, []pluginapi.Device{}, []pluginapi.Device{}, []pluginapi.Device{devs[0]}) - capacity, removedResources = testManager.GetCapacity() + capacity, allocatable, removedResources = testManager.GetCapacity() resource1Capacity, ok = capacity[v1.ResourceName(resourceName1)] as.True(ok) - as.Equal(int64(0), resource1Capacity.Value()) + resource1Allocatable, ok = allocatable[v1.ResourceName(resourceName1)] + as.True(ok) + as.Equal(int64(0), resource1Allocatable.Value()) + as.Equal(int64(1), resource1Capacity.Value()) as.Equal(0, len(removedResources)) // Tests adding another resource. resourceName2 := "resource2" testManager.endpoints[resourceName2] = &endpointImpl{devices: make(map[string]pluginapi.Device)} callback(resourceName2, devs, []pluginapi.Device{}, []pluginapi.Device{}) - capacity, removedResources = testManager.GetCapacity() + capacity, allocatable, removedResources = testManager.GetCapacity() as.Equal(2, len(capacity)) resource2Capacity, ok := capacity[v1.ResourceName(resourceName2)] as.True(ok) - as.Equal(int64(2), resource2Capacity.Value()) + resource2Allocatable, ok := allocatable[v1.ResourceName(resourceName2)] + as.True(ok) + as.Equal(int64(3), resource2Capacity.Value()) + as.Equal(int64(2), resource2Allocatable.Value()) as.Equal(0, len(removedResources)) // Removes resourceName1 endpoint. Verifies testManager.GetCapacity() reports that resourceName1 - // is removed from capacity and it no longer exists in allDevices after the call. + // is removed from capacity and it no longer exists in healthyDevices after the call. delete(testManager.endpoints, resourceName1) - capacity, removed := testManager.GetCapacity() + capacity, allocatable, removed := testManager.GetCapacity() as.Equal([]string{resourceName1}, removed) _, ok = capacity[v1.ResourceName(resourceName1)] as.False(ok) val, ok := capacity[v1.ResourceName(resourceName2)] as.True(ok) - as.Equal(int64(2), val.Value()) - _, ok = testManager.allDevices[resourceName1] + as.Equal(int64(3), val.Value()) + _, ok = testManager.healthyDevices[resourceName1] as.False(ok) + _, ok = testManager.unhealthyDevices[resourceName1] + as.False(ok) + fmt.Println("removed: ", removed) + as.Equal(1, len(removed)) + } type stringPairType struct { @@ -270,7 +290,7 @@ func TestCheckpoint(t *testing.T) { defer os.RemoveAll(tmpDir) testManager := &ManagerImpl{ socketdir: tmpDir, - allDevices: make(map[string]sets.String), + healthyDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.String), podDevices: make(podDevices), } @@ -294,19 +314,19 @@ func TestCheckpoint(t *testing.T) { constructAllocResp(map[string]string{"/dev/r1dev4": "/dev/r1dev4"}, map[string]string{"/home/r1lib1": "/usr/r1lib1"}, map[string]string{})) - testManager.allDevices[resourceName1] = sets.NewString() - testManager.allDevices[resourceName1].Insert("dev1") - testManager.allDevices[resourceName1].Insert("dev2") - testManager.allDevices[resourceName1].Insert("dev3") - testManager.allDevices[resourceName1].Insert("dev4") - testManager.allDevices[resourceName1].Insert("dev5") - testManager.allDevices[resourceName2] = sets.NewString() - testManager.allDevices[resourceName2].Insert("dev1") - testManager.allDevices[resourceName2].Insert("dev2") + testManager.healthyDevices[resourceName1] = sets.NewString() + testManager.healthyDevices[resourceName1].Insert("dev1") + testManager.healthyDevices[resourceName1].Insert("dev2") + testManager.healthyDevices[resourceName1].Insert("dev3") + testManager.healthyDevices[resourceName1].Insert("dev4") + testManager.healthyDevices[resourceName1].Insert("dev5") + testManager.healthyDevices[resourceName2] = sets.NewString() + testManager.healthyDevices[resourceName2].Insert("dev1") + testManager.healthyDevices[resourceName2].Insert("dev2") expectedPodDevices := testManager.podDevices expectedAllocatedDevices := testManager.podDevices.devices() - expectedAllDevices := testManager.allDevices + expectedAllDevices := testManager.healthyDevices err = testManager.writeCheckpoint() @@ -331,7 +351,7 @@ func TestCheckpoint(t *testing.T) { } } as.True(reflect.DeepEqual(expectedAllocatedDevices, testManager.allocatedDevices)) - as.True(reflect.DeepEqual(expectedAllDevices, testManager.allDevices)) + as.True(reflect.DeepEqual(expectedAllDevices, testManager.healthyDevices)) } type activePodsStub struct { @@ -388,7 +408,7 @@ func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestReso testManager := &ManagerImpl{ socketdir: tmpDir, callback: monitorCallback, - allDevices: make(map[string]sets.String), + healthyDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.String), endpoints: make(map[string]endpoint), podDevices: make(podDevices), @@ -397,9 +417,9 @@ func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestReso } testManager.store, _ = utilstore.NewFileStore("/tmp/", utilfs.DefaultFs{}) for _, res := range testRes { - testManager.allDevices[res.resourceName] = sets.NewString() + testManager.healthyDevices[res.resourceName] = sets.NewString() for _, dev := range res.devs { - testManager.allDevices[res.resourceName].Insert(dev) + testManager.healthyDevices[res.resourceName].Insert(dev) } if res.resourceName == "domain1.com/resource1" { testManager.endpoints[res.resourceName] = &MockEndpoint{ @@ -682,7 +702,7 @@ func TestSanitizeNodeAllocatable(t *testing.T) { testManager := &ManagerImpl{ callback: monitorCallback, - allDevices: make(map[string]sets.String), + healthyDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.String), podDevices: make(podDevices), } diff --git a/pkg/kubelet/cm/deviceplugin/types.go b/pkg/kubelet/cm/deviceplugin/types.go index c4465a8be4c..d27b11e845e 100644 --- a/pkg/kubelet/cm/deviceplugin/types.go +++ b/pkg/kubelet/cm/deviceplugin/types.go @@ -53,9 +53,9 @@ type Manager interface { // for the found one. An empty struct is returned in case no cached state is found. GetDeviceRunContainerOptions(pod *v1.Pod, container *v1.Container) *DeviceRunContainerOptions - // GetCapacity returns the amount of available device plugin resource capacity + // GetCapacity returns the amount of available device plugin resource capacity, resource allocatable // and inactive device plugin resources previously registered on the node. - GetCapacity() (v1.ResourceList, []string) + GetCapacity() (v1.ResourceList, v1.ResourceList, []string) } // DeviceRunContainerOptions contains the combined container runtime settings to consume its allocated devices. diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index 006283e808f..696ac04a1b6 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -550,6 +550,10 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *v1.Node) { } } + var devicePluginAllocatable v1.ResourceList + var devicePluginCapacity v1.ResourceList + var removedDevicePlugins []string + // TODO: Post NotReady if we cannot get MachineInfo from cAdvisor. This needs to start // cAdvisor locally, e.g. for test-cmd.sh, and in integration test. info, err := kl.GetCachedMachineInfo() @@ -594,13 +598,14 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *v1.Node) { } } - devicePluginCapacity, removedDevicePlugins := kl.containerManager.GetDevicePluginResourceCapacity() + devicePluginCapacity, devicePluginAllocatable, removedDevicePlugins = kl.containerManager.GetDevicePluginResourceCapacity() if devicePluginCapacity != nil { for k, v := range devicePluginCapacity { glog.V(2).Infof("Update capacity for %s to %d", k, v.Value()) node.Status.Capacity[k] = v } } + for _, removedResource := range removedDevicePlugins { glog.V(2).Infof("Remove capacity for %s", removedResource) delete(node.Status.Capacity, v1.ResourceName(removedResource)) @@ -631,6 +636,12 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *v1.Node) { } node.Status.Allocatable[k] = value } + if devicePluginAllocatable != nil { + for k, v := range devicePluginAllocatable { + glog.V(2).Infof("Update allocatable for %s to %d", k, v.Value()) + node.Status.Allocatable[k] = v + } + } // for every huge page reservation, we need to remove it from allocatable memory for k, v := range node.Status.Capacity { if v1helper.IsHugePageResourceName(k) { From 31ff8c6b9a08d1168502e423bdd3fdbe4f2f729b Mon Sep 17 00:00:00 2001 From: Ryan Phillips Date: Tue, 19 Dec 2017 10:04:23 -0600 Subject: [PATCH 064/264] etcd client: add keepalive --- .../pkg/storage/storagebackend/factory/etcd3.go | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go index a5ccbf2fdd4..48bba73110b 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go @@ -17,6 +17,8 @@ limitations under the License. package factory import ( + "time" + "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/pkg/transport" "golang.org/x/net/context" @@ -27,6 +29,13 @@ import ( "k8s.io/apiserver/pkg/storage/value" ) +// The short keepalive timeout and interval have been chosen to aggressively +// detect a failed etcd server without introducing much overhead. +var ( + keepaliveTime = 30 * time.Second + keepaliveTimeout = 10 * time.Second +) + func newETCD3Storage(c storagebackend.Config) (storage.Interface, DestroyFunc, error) { tlsInfo := transport.TLSInfo{ CertFile: c.CertFile, @@ -43,8 +52,10 @@ func newETCD3Storage(c storagebackend.Config) (storage.Interface, DestroyFunc, e tlsConfig = nil } cfg := clientv3.Config{ - Endpoints: c.ServerList, - TLS: tlsConfig, + DialKeepAliveTime: keepaliveTime, + DialKeepAliveTimeout: keepaliveTimeout, + Endpoints: c.ServerList, + TLS: tlsConfig, } client, err := clientv3.New(cfg) if err != nil { From f416e38a1e377c83d7defc49d4306c455de012de Mon Sep 17 00:00:00 2001 From: David Eads Date: Tue, 9 Jan 2018 15:54:39 -0500 Subject: [PATCH 065/264] make controller port exposure optional --- cmd/kube-controller-manager/app/controllermanager.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index 85dc51de5c8..25de45f5041 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -128,7 +128,9 @@ func Run(s *options.CMServer) error { return err } - go startHTTP(s) + if s.Port >= 0 { + go startHTTP(s) + } recorder := createRecorder(kubeClient) From d9b5773101e930431f24fe178d988271c1becc35 Mon Sep 17 00:00:00 2001 From: Aaron Crickenberger Date: Fri, 22 Dec 2017 17:09:51 -0500 Subject: [PATCH 066/264] Treat staging repos as authoritative for all files Move files from kubernetes/foo root back to kubernetes/kubernetes/staging/src/k8s.io/foo root Then: - add CONTRIBUTING.md for all staging repos - add .PULL_REQUEST_TEMPLATE to all staging repos - ignore .github while diffing generated protobuf --- hack/verify-generated-protobuf.sh | 2 +- .../api/.github/PULL_REQUEST_TEMPLATE.md | 2 ++ staging/src/k8s.io/api/CONTRIBUTING.md | 7 ++++++ .../.github/PULL_REQUEST_TEMPLATE.md | 2 ++ .../apiextensions-apiserver/CONTRIBUTING.md | 7 ++++++ .../.github/PULL_REQUEST_TEMPLATE.md | 2 ++ .../src/k8s.io/apimachinery/CONTRIBUTING.md | 7 ++++++ .../.github/PULL_REQUEST_TEMPLATE.md | 2 ++ .../.github/PULL_REQUEST_TEMPLATE.md | 2 ++ staging/src/k8s.io/client-go/CONTRIBUTING.md | 7 ++++++ .../.github/PULL_REQUEST_TEMPLATE.md | 2 ++ .../src/k8s.io/code-generator/CONTRIBUTING.md | 7 ++++++ .../.github/PULL_REQUEST_TEMPLATE.md | 2 ++ .../k8s.io/kube-aggregator/CONTRIBUTING.md | 7 ++++++ .../metrics/.github/PULL_REQUEST_TEMPLATE.md | 2 ++ staging/src/k8s.io/metrics/CONTRIBUTING.md | 25 +++---------------- .../.github/PULL_REQUEST_TEMPLATE.md | 2 ++ .../k8s.io/sample-apiserver/CONTRIBUTING.md | 7 ++++++ .../.github/PULL_REQUEST_TEMPLATE.md | 2 ++ .../k8s.io/sample-controller/CONTRIBUTING.md | 7 ++++++ 20 files changed, 80 insertions(+), 23 deletions(-) create mode 100644 staging/src/k8s.io/api/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 staging/src/k8s.io/api/CONTRIBUTING.md create mode 100644 staging/src/k8s.io/apiextensions-apiserver/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 staging/src/k8s.io/apiextensions-apiserver/CONTRIBUTING.md create mode 100644 staging/src/k8s.io/apimachinery/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 staging/src/k8s.io/apimachinery/CONTRIBUTING.md create mode 100644 staging/src/k8s.io/apiserver/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 staging/src/k8s.io/client-go/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 staging/src/k8s.io/client-go/CONTRIBUTING.md create mode 100644 staging/src/k8s.io/code-generator/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 staging/src/k8s.io/code-generator/CONTRIBUTING.md create mode 100644 staging/src/k8s.io/kube-aggregator/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 staging/src/k8s.io/kube-aggregator/CONTRIBUTING.md create mode 100644 staging/src/k8s.io/metrics/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 staging/src/k8s.io/sample-apiserver/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 staging/src/k8s.io/sample-apiserver/CONTRIBUTING.md create mode 100644 staging/src/k8s.io/sample-controller/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 staging/src/k8s.io/sample-controller/CONTRIBUTING.md diff --git a/hack/verify-generated-protobuf.sh b/hack/verify-generated-protobuf.sh index 3c8518c4af3..a57f0fe4d77 100755 --- a/hack/verify-generated-protobuf.sh +++ b/hack/verify-generated-protobuf.sh @@ -43,7 +43,7 @@ for APIROOT in ${APIROOTS}; do TMP_APIROOT="${_tmp}/${APIROOT}" echo "diffing ${APIROOT} against freshly generated protobuf" ret=0 - diff -Naupr -I 'Auto generated by' -x 'zz_generated.*' "${KUBE_ROOT}/${APIROOT}" "${TMP_APIROOT}" || ret=$? + diff -Naupr -I 'Auto generated by' -x 'zz_generated.*' -x '.github' "${KUBE_ROOT}/${APIROOT}" "${TMP_APIROOT}" || ret=$? cp -a "${TMP_APIROOT}"/* "${KUBE_ROOT}/${APIROOT}/" if [[ $ret -eq 0 ]]; then echo "${APIROOT} up to date." diff --git a/staging/src/k8s.io/api/.github/PULL_REQUEST_TEMPLATE.md b/staging/src/k8s.io/api/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..e559c074bb5 --- /dev/null +++ b/staging/src/k8s.io/api/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,2 @@ +Sorry, we do not accept changes directly against this repository. Please see +CONTRIBUTING.md for information on where and how to contribute instead. diff --git a/staging/src/k8s.io/api/CONTRIBUTING.md b/staging/src/k8s.io/api/CONTRIBUTING.md new file mode 100644 index 00000000000..d9e171333cc --- /dev/null +++ b/staging/src/k8s.io/api/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing guidelines + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kuberentes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/api](https://git.k8s.io/kubernetes/staging/src/k8s.io/api) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information diff --git a/staging/src/k8s.io/apiextensions-apiserver/.github/PULL_REQUEST_TEMPLATE.md b/staging/src/k8s.io/apiextensions-apiserver/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..e559c074bb5 --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,2 @@ +Sorry, we do not accept changes directly against this repository. Please see +CONTRIBUTING.md for information on where and how to contribute instead. diff --git a/staging/src/k8s.io/apiextensions-apiserver/CONTRIBUTING.md b/staging/src/k8s.io/apiextensions-apiserver/CONTRIBUTING.md new file mode 100644 index 00000000000..cea7c91b260 --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing guidelines + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kuberentes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/apiextensions-apiserver](https://git.k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information diff --git a/staging/src/k8s.io/apimachinery/.github/PULL_REQUEST_TEMPLATE.md b/staging/src/k8s.io/apimachinery/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..e559c074bb5 --- /dev/null +++ b/staging/src/k8s.io/apimachinery/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,2 @@ +Sorry, we do not accept changes directly against this repository. Please see +CONTRIBUTING.md for information on where and how to contribute instead. diff --git a/staging/src/k8s.io/apimachinery/CONTRIBUTING.md b/staging/src/k8s.io/apimachinery/CONTRIBUTING.md new file mode 100644 index 00000000000..41eb4c62bc6 --- /dev/null +++ b/staging/src/k8s.io/apimachinery/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing guidelines + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kuberentes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/apimachinery](https://git.k8s.io/kubernetes/staging/src/k8s.io/apimachinery) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information diff --git a/staging/src/k8s.io/apiserver/.github/PULL_REQUEST_TEMPLATE.md b/staging/src/k8s.io/apiserver/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..e559c074bb5 --- /dev/null +++ b/staging/src/k8s.io/apiserver/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,2 @@ +Sorry, we do not accept changes directly against this repository. Please see +CONTRIBUTING.md for information on where and how to contribute instead. diff --git a/staging/src/k8s.io/client-go/.github/PULL_REQUEST_TEMPLATE.md b/staging/src/k8s.io/client-go/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..e559c074bb5 --- /dev/null +++ b/staging/src/k8s.io/client-go/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,2 @@ +Sorry, we do not accept changes directly against this repository. Please see +CONTRIBUTING.md for information on where and how to contribute instead. diff --git a/staging/src/k8s.io/client-go/CONTRIBUTING.md b/staging/src/k8s.io/client-go/CONTRIBUTING.md new file mode 100644 index 00000000000..df408d019e6 --- /dev/null +++ b/staging/src/k8s.io/client-go/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing guidelines + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kuberentes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/client-go](https://git.k8s.io/kubernetes/staging/src/k8s.io/client-go) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information diff --git a/staging/src/k8s.io/code-generator/.github/PULL_REQUEST_TEMPLATE.md b/staging/src/k8s.io/code-generator/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..e559c074bb5 --- /dev/null +++ b/staging/src/k8s.io/code-generator/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,2 @@ +Sorry, we do not accept changes directly against this repository. Please see +CONTRIBUTING.md for information on where and how to contribute instead. diff --git a/staging/src/k8s.io/code-generator/CONTRIBUTING.md b/staging/src/k8s.io/code-generator/CONTRIBUTING.md new file mode 100644 index 00000000000..da7836aa972 --- /dev/null +++ b/staging/src/k8s.io/code-generator/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing guidelines + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kuberentes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/code-generator](https://git.k8s.io/kubernetes/staging/src/k8s.io/code-generator) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information diff --git a/staging/src/k8s.io/kube-aggregator/.github/PULL_REQUEST_TEMPLATE.md b/staging/src/k8s.io/kube-aggregator/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..e559c074bb5 --- /dev/null +++ b/staging/src/k8s.io/kube-aggregator/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,2 @@ +Sorry, we do not accept changes directly against this repository. Please see +CONTRIBUTING.md for information on where and how to contribute instead. diff --git a/staging/src/k8s.io/kube-aggregator/CONTRIBUTING.md b/staging/src/k8s.io/kube-aggregator/CONTRIBUTING.md new file mode 100644 index 00000000000..483b3cbc435 --- /dev/null +++ b/staging/src/k8s.io/kube-aggregator/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing guidelines + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kuberentes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/kube-aggregator](https://git.k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information diff --git a/staging/src/k8s.io/metrics/.github/PULL_REQUEST_TEMPLATE.md b/staging/src/k8s.io/metrics/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..e559c074bb5 --- /dev/null +++ b/staging/src/k8s.io/metrics/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,2 @@ +Sorry, we do not accept changes directly against this repository. Please see +CONTRIBUTING.md for information on where and how to contribute instead. diff --git a/staging/src/k8s.io/metrics/CONTRIBUTING.md b/staging/src/k8s.io/metrics/CONTRIBUTING.md index 18eca2f0726..e35f90ad56e 100644 --- a/staging/src/k8s.io/metrics/CONTRIBUTING.md +++ b/staging/src/k8s.io/metrics/CONTRIBUTING.md @@ -1,26 +1,7 @@ # Contributing guidelines -## How to become a contributor and submit your own code +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kuberentes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. -### Contributor License Agreements +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/metrics](https://git.k8s.io/kubernetes/staging/src/k8s.io/metrics) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). -We'd love to accept your patches! Before we can take them, we have to jump a couple of legal hurdles. - -Please fill out either the individual or corporate Contributor License Agreement (CLA). - - * If you are an individual writing original source code and you're sure you own the intellectual property, then you'll need to sign an [individual CLA](https://identity.linuxfoundation.org/node/285/node/285/individual-signup). - * If you work for a company that wants to allow you to contribute your work, then you'll need to sign a [corporate CLA](https://identity.linuxfoundation.org/node/285/organization-signup). - -Follow either of the two links above to access the appropriate CLA and instructions for how to sign and return it. Once we receive it, we'll be able to accept your pull requests. - -### Contributing A Patch - -1. Submit an issue describing your proposed change to the repo in question. -1. The [repo owners](OWNERS) will respond to your issue promptly. -1. If your proposed change is accepted, and you haven't already done so, sign a Contributor License Agreement (see details above). -1. Fork the desired repo, develop and test your code changes. -1. Submit a pull request. - -### Adding dependencies - -If your patch depends on new packages, add that package with [`godep`](https://github.com/tools/godep). Follow the [instructions to add a dependency](https://github.com/kubernetes/kubernetes/blob/master/docs/devel/development.md#godep-and-dependency-management). +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information diff --git a/staging/src/k8s.io/sample-apiserver/.github/PULL_REQUEST_TEMPLATE.md b/staging/src/k8s.io/sample-apiserver/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..e559c074bb5 --- /dev/null +++ b/staging/src/k8s.io/sample-apiserver/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,2 @@ +Sorry, we do not accept changes directly against this repository. Please see +CONTRIBUTING.md for information on where and how to contribute instead. diff --git a/staging/src/k8s.io/sample-apiserver/CONTRIBUTING.md b/staging/src/k8s.io/sample-apiserver/CONTRIBUTING.md new file mode 100644 index 00000000000..8379e606490 --- /dev/null +++ b/staging/src/k8s.io/sample-apiserver/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing guidelines + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kuberentes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/sample-apiserver](https://git.k8s.io/kubernetes/staging/src/k8s.io/sample-apiserver) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information diff --git a/staging/src/k8s.io/sample-controller/.github/PULL_REQUEST_TEMPLATE.md b/staging/src/k8s.io/sample-controller/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..e559c074bb5 --- /dev/null +++ b/staging/src/k8s.io/sample-controller/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,2 @@ +Sorry, we do not accept changes directly against this repository. Please see +CONTRIBUTING.md for information on where and how to contribute instead. diff --git a/staging/src/k8s.io/sample-controller/CONTRIBUTING.md b/staging/src/k8s.io/sample-controller/CONTRIBUTING.md new file mode 100644 index 00000000000..3598f4aa55a --- /dev/null +++ b/staging/src/k8s.io/sample-controller/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing guidelines + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kuberentes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/sample-controller](https://git.k8s.io/kubernetes/staging/src/k8s.io/sample-controller) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information From 5029bb56c434c0099fd1d2e78de7531c69430753 Mon Sep 17 00:00:00 2001 From: Chao Xu Date: Tue, 9 Jan 2018 15:58:18 -0800 Subject: [PATCH 067/264] Let mutating webhook defaults the object after applying the patch sent back by the webhook --- .../plugin/webhook/mutating/admission.go | 8 +- test/e2e/apimachinery/webhook.go | 78 +++++++++++++++++++ test/images/webhook/Makefile | 4 +- test/images/webhook/main.go | 33 ++++++++ test/utils/image/manifest.go | 2 +- 5 files changed, 120 insertions(+), 5 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go index f944152770d..ec0ae942b69 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go @@ -112,6 +112,7 @@ type MutatingWebhook struct { namespaceMatcher namespace.Matcher clientManager config.ClientManager convertor versioned.Convertor + defaulter runtime.ObjectDefaulter jsonSerializer runtime.Serializer } @@ -137,6 +138,7 @@ func (a *MutatingWebhook) SetScheme(scheme *runtime.Scheme) { Serializer: serializer.NewCodecFactory(scheme).LegacyCodec(admissionv1beta1.SchemeGroupVersion), })) a.convertor.Scheme = scheme + a.defaulter = scheme a.jsonSerializer = json.NewSerializer(json.DefaultMetaFactory, scheme, scheme, false) } } @@ -171,6 +173,9 @@ func (a *MutatingWebhook) ValidateInitialization() error { if err := a.convertor.Validate(); err != nil { return fmt.Errorf("MutatingWebhook.convertor is not properly setup: %v", err) } + if a.defaulter == nil { + return fmt.Errorf("MutatingWebhook.defaulter is not properly setup: %v") + } go a.hookSource.Run(wait.NeverStop) return nil } @@ -312,10 +317,9 @@ func (a *MutatingWebhook) callAttrMutatingHook(ctx context.Context, h *v1beta1.W if err != nil { return apierrors.NewInternalError(err) } - // TODO: if we have multiple mutating webhooks, we can remember the json - // instead of encoding and decoding for each one. if _, _, err := a.jsonSerializer.Decode(patchedJS, nil, attr.Object); err != nil { return apierrors.NewInternalError(err) } + a.defaulter.Default(attr.Object) return nil } diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index 755055c9696..63d69315f1f 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -133,6 +133,12 @@ var _ = SIGDescribe("AdmissionWebhook", func() { testMutatingConfigMapWebhook(f) }) + It("Should mutate pod and apply defaults after mutation", func() { + registerMutatingWebhookForPod(f, context) + defer client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(mutatingWebhookConfigName, nil) + testMutatingPodWebhook(f) + }) + It("Should mutate crd", func() { crdCleanup, dynamicClient := createCRD(f) defer crdCleanup() @@ -423,6 +429,7 @@ func registerMutatingWebhookForConfigMap(f *framework.Framework, context *certCo // The webhook configuration is honored in 1s. time.Sleep(10 * time.Second) } + func testMutatingConfigMapWebhook(f *framework.Framework) { By("create a configmap that should be updated by the webhook") client := f.ClientSet @@ -439,6 +446,77 @@ func testMutatingConfigMapWebhook(f *framework.Framework) { } } +func registerMutatingWebhookForPod(f *framework.Framework, context *certContext) { + client := f.ClientSet + By("Registering the mutating pod webhook via the AdmissionRegistration API") + + namespace := f.Namespace.Name + + _, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: mutatingWebhookConfigName, + }, + Webhooks: []v1beta1.Webhook{ + { + Name: "adding-init-container.k8s.io", + Rules: []v1beta1.RuleWithOperations{{ + Operations: []v1beta1.OperationType{v1beta1.Create}, + Rule: v1beta1.Rule{ + APIGroups: []string{""}, + APIVersions: []string{"v1"}, + Resources: []string{"pods"}, + }, + }}, + ClientConfig: v1beta1.WebhookClientConfig{ + Service: &v1beta1.ServiceReference{ + Namespace: namespace, + Name: serviceName, + Path: strPtr("/mutating-pods"), + }, + CABundle: context.signingCert, + }, + }, + }, + }) + framework.ExpectNoError(err, "registering mutating webhook config %s with namespace %s", mutatingWebhookConfigName, namespace) + + // The webhook configuration is honored in 1s. + time.Sleep(10 * time.Second) +} + +func testMutatingPodWebhook(f *framework.Framework) { + By("create a pod that should be updated by the webhook") + client := f.ClientSet + configMap := toBeMutatedPod(f) + mutatedPod, err := client.CoreV1().Pods(f.Namespace.Name).Create(configMap) + Expect(err).To(BeNil()) + if len(mutatedPod.Spec.InitContainers) != 1 { + framework.Failf("expect pod to have 1 init container, got %#v", mutatedPod.Spec.InitContainers) + } + if got, expected := mutatedPod.Spec.InitContainers[0].Name, "webhook-added-init-container"; got != expected { + framework.Failf("expect the init container name to be %q, got %q", expected, got) + } + if got, expected := mutatedPod.Spec.InitContainers[0].TerminationMessagePolicy, v1.TerminationMessageReadFile; got != expected { + framework.Failf("expect the init terminationMessagePolicy to be default to %q, got %q", expected, got) + } +} + +func toBeMutatedPod(f *framework.Framework) *v1.Pod { + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "webhook-to-be-mutated", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "example", + Image: framework.GetPauseImageName(f.ClientSet), + }, + }, + }, + } +} + func testWebhook(f *framework.Framework) { By("create a pod that should be denied by the webhook") client := f.ClientSet diff --git a/test/images/webhook/Makefile b/test/images/webhook/Makefile index a201dd5b233..d9ce02940ef 100644 --- a/test/images/webhook/Makefile +++ b/test/images/webhook/Makefile @@ -14,7 +14,7 @@ build: CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o webhook . - docker build --no-cache -t gcr.io/kubernetes-e2e-test-images/k8s-sample-admission-webhook-amd64:1.8v7 . + docker build --no-cache -t gcr.io/kubernetes-e2e-test-images/k8s-sample-admission-webhook-amd64:1.9v1 . rm -rf webhook push: - gcloud docker -- push gcr.io/kubernetes-e2e-test-images/k8s-sample-admission-webhook-amd64:1.8v7 + gcloud docker -- push gcr.io/kubernetes-e2e-test-images/k8s-sample-admission-webhook-amd64:1.9v1 diff --git a/test/images/webhook/main.go b/test/images/webhook/main.go index da2e4e9d3fb..bdf68ba9c72 100644 --- a/test/images/webhook/main.go +++ b/test/images/webhook/main.go @@ -40,6 +40,9 @@ const ( patch2 string = `[ { "op": "add", "path": "/data/mutation-stage-2", "value": "yes" } ]` + addInitContainerPatch string = `[ + {"op":"add","path":"/spec/initContainers","value":[{"image":"webhook-added-image","name":"webhook-added-init-container","resources":{}}]} + ]` ) // Config contains the server (the webhook) cert and key. @@ -108,6 +111,31 @@ func admitPods(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { return &reviewResponse } +func mutatePods(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { + glog.V(2).Info("mutating pods") + podResource := metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} + if ar.Request.Resource != podResource { + glog.Errorf("expect resource to be %s", podResource) + return nil + } + + raw := ar.Request.Object.Raw + pod := corev1.Pod{} + deserializer := codecs.UniversalDeserializer() + if _, _, err := deserializer.Decode(raw, nil, &pod); err != nil { + glog.Error(err) + return toAdmissionResponse(err) + } + reviewResponse := v1beta1.AdmissionResponse{} + reviewResponse.Allowed = true + if pod.Name == "webhook-to-be-mutated" { + reviewResponse.Patch = []byte(addInitContainerPatch) + pt := v1beta1.PatchTypeJSONPatch + reviewResponse.PatchType = &pt + } + return &reviewResponse +} + // deny configmaps with specific key-value pair. func admitConfigMaps(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { glog.V(2).Info("admitting configmaps") @@ -271,6 +299,10 @@ func servePods(w http.ResponseWriter, r *http.Request) { serve(w, r, admitPods) } +func serveMutatePods(w http.ResponseWriter, r *http.Request) { + serve(w, r, mutatePods) +} + func serveConfigmaps(w http.ResponseWriter, r *http.Request) { serve(w, r, admitConfigMaps) } @@ -293,6 +325,7 @@ func main() { flag.Parse() http.HandleFunc("/pods", servePods) + http.HandleFunc("/mutating-pods", serveMutatePods) http.HandleFunc("/configmaps", serveConfigmaps) http.HandleFunc("/mutating-configmaps", serveMutateConfigmaps) http.HandleFunc("/crd", serveCRD) diff --git a/test/utils/image/manifest.go b/test/utils/image/manifest.go index 7b19ed01861..bac17165482 100644 --- a/test/utils/image/manifest.go +++ b/test/utils/image/manifest.go @@ -48,7 +48,7 @@ func (i *ImageConfig) SetVersion(version string) { } var ( - AdmissionWebhook = ImageConfig{e2eRegistry, "k8s-sample-admission-webhook", "1.8v7", true} + AdmissionWebhook = ImageConfig{e2eRegistry, "k8s-sample-admission-webhook", "1.9v1", true} APIServer = ImageConfig{e2eRegistry, "k8s-aggregator-sample-apiserver", "1.7v2", true} AppArmorLoader = ImageConfig{gcRegistry, "apparmor-loader", "0.1", false} BusyBox = ImageConfig{gcRegistry, "busybox", "1.24", false} From 6894e3d32bc9990c82b0681aff280e0f9b844db3 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Thu, 4 Jan 2018 23:21:53 -0800 Subject: [PATCH 068/264] Support utilities --- .../providers/gce/cloud/utils.go | 167 +++++++++++++++ .../providers/gce/cloud/utils_test.go | 197 ++++++++++++++++++ 2 files changed, 364 insertions(+) create mode 100644 pkg/cloudprovider/providers/gce/cloud/utils.go create mode 100644 pkg/cloudprovider/providers/gce/cloud/utils_test.go diff --git a/pkg/cloudprovider/providers/gce/cloud/utils.go b/pkg/cloudprovider/providers/gce/cloud/utils.go new file mode 100644 index 00000000000..dd4a07cfd05 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/utils.go @@ -0,0 +1,167 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloud + +import ( + "encoding/json" + "fmt" + "strings" + + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" +) + +const ( + gaPrefix = "https://www.googleapis.com/compute/v1/" + alphaPrefix = "https://www.googleapis.com/compute/alpha/" + betaPrefix = "https://www.googleapis.com/compute/beta/" +) + +var ( + allPrefixes = []string{gaPrefix, alphaPrefix, betaPrefix} +) + +// ResourceID identifies a GCE resource as parsed from compute resource URL. +type ResourceID struct { + ProjectID string + Resource string + Key *meta.Key +} + +// Equal returns true if two resource IDs are equal. +func (r *ResourceID) Equal(other *ResourceID) bool { + if r.ProjectID != other.ProjectID || r.Resource != other.Resource { + return false + } + if r.Key != nil && other.Key != nil { + return *r.Key == *other.Key + } + if r.Key == nil && other.Key == nil { + return true + } + return false +} + +// ParseResourceURL parses resource URLs of the following formats: +// +// projects//global// +// projects//regions/// +// projects//zones/// +// [https://www.googleapis.com/compute/]/projects//global// +// [https://www.googleapis.com/compute/]/projects//regions/// +// [https://www.googleapis.com/compute/]/projects//zones/// +func ParseResourceURL(url string) (*ResourceID, error) { + errNotValid := fmt.Errorf("%q is not a valid resource URL", url) + + // Remove the "https://..." prefix if present + for _, prefix := range allPrefixes { + if strings.HasPrefix(url, prefix) { + if len(url) < len(prefix) { + return nil, errNotValid + } + url = url[len(prefix):] + break + } + } + + parts := strings.Split(url, "/") + if len(parts) < 2 || parts[0] != "projects" { + return nil, errNotValid + } + + ret := &ResourceID{ProjectID: parts[1]} + if len(parts) == 2 { + ret.Resource = "projects" + return ret, nil + } + + if len(parts) < 4 { + return nil, errNotValid + } + + if len(parts) == 4 { + switch parts[2] { + case "regions": + ret.Resource = "regions" + ret.Key = meta.GlobalKey(parts[3]) + return ret, nil + case "zones": + ret.Resource = "zones" + ret.Key = meta.GlobalKey(parts[3]) + return ret, nil + default: + return nil, errNotValid + } + } + + switch parts[2] { + case "global": + if len(parts) != 5 { + return nil, errNotValid + } + ret.Resource = parts[3] + ret.Key = meta.GlobalKey(parts[4]) + return ret, nil + case "regions": + if len(parts) != 6 { + return nil, errNotValid + } + ret.Resource = parts[4] + ret.Key = meta.RegionalKey(parts[5], parts[3]) + return ret, nil + case "zones": + if len(parts) != 6 { + return nil, errNotValid + } + ret.Resource = parts[4] + ret.Key = meta.ZonalKey(parts[5], parts[3]) + return ret, nil + } + return nil, errNotValid +} + +func copyViaJSON(dest, src interface{}) error { + bytes, err := json.Marshal(src) + if err != nil { + return err + } + return json.Unmarshal(bytes, dest) +} + +// SelfLink returns the self link URL for the given object. +func SelfLink(ver meta.Version, project, resource string, key meta.Key) string { + var prefix string + switch ver { + case meta.VersionAlpha: + prefix = alphaPrefix + case meta.VersionBeta: + prefix = betaPrefix + case meta.VersionGA: + prefix = gaPrefix + default: + prefix = "invalid-prefix" + } + + switch key.Type() { + case meta.Zonal: + return fmt.Sprintf("%sprojects/%s/zones/%s/%s/%s", prefix, project, key.Zone, resource, key.Name) + case meta.Regional: + return fmt.Sprintf("%sprojects/%s/regions/%s/%s/%s", prefix, project, key.Region, resource, key.Name) + case meta.Global: + return fmt.Sprintf("%sprojects/%s/%s/%s", prefix, project, resource, key.Name) + } + return "invalid-self-link" +} diff --git a/pkg/cloudprovider/providers/gce/cloud/utils_test.go b/pkg/cloudprovider/providers/gce/cloud/utils_test.go new file mode 100644 index 00000000000..823c8e73c88 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/utils_test.go @@ -0,0 +1,197 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloud + +import ( + "errors" + "testing" + + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" +) + +func TestParseResourceURL(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + in string + r *ResourceID + }{ + { + "https://www.googleapis.com/compute/v1/projects/some-gce-project", + &ResourceID{"some-gce-project", "projects", nil}, + }, + { + "https://www.googleapis.com/compute/v1/projects/some-gce-project/regions/us-central1", + &ResourceID{"some-gce-project", "regions", meta.GlobalKey("us-central1")}, + }, + { + "https://www.googleapis.com/compute/v1/projects/some-gce-project/zones/us-central1-b", + &ResourceID{"some-gce-project", "zones", meta.GlobalKey("us-central1-b")}, + }, + { + "https://www.googleapis.com/compute/v1/projects/some-gce-project/global/operations/operation-1513289952196-56054460af5a0-b1dae0c3-9bbf9dbf", + &ResourceID{"some-gce-project", "operations", meta.GlobalKey("operation-1513289952196-56054460af5a0-b1dae0c3-9bbf9dbf")}, + }, + { + "https://www.googleapis.com/compute/alpha/projects/some-gce-project/regions/us-central1/addresses/my-address", + &ResourceID{"some-gce-project", "addresses", meta.RegionalKey("my-address", "us-central1")}, + }, + { + "https://www.googleapis.com/compute/v1/projects/some-gce-project/zones/us-central1-c/instances/instance-1", + &ResourceID{"some-gce-project", "instances", meta.ZonalKey("instance-1", "us-central1-c")}, + }, + { + "projects/some-gce-project", + &ResourceID{"some-gce-project", "projects", nil}, + }, + { + "projects/some-gce-project/regions/us-central1", + &ResourceID{"some-gce-project", "regions", meta.GlobalKey("us-central1")}, + }, + { + "projects/some-gce-project/zones/us-central1-b", + &ResourceID{"some-gce-project", "zones", meta.GlobalKey("us-central1-b")}, + }, + { + "projects/some-gce-project/global/operations/operation-1513289952196-56054460af5a0-b1dae0c3-9bbf9dbf", + &ResourceID{"some-gce-project", "operations", meta.GlobalKey("operation-1513289952196-56054460af5a0-b1dae0c3-9bbf9dbf")}, + }, + { + "projects/some-gce-project/regions/us-central1/addresses/my-address", + &ResourceID{"some-gce-project", "addresses", meta.RegionalKey("my-address", "us-central1")}, + }, + { + "projects/some-gce-project/zones/us-central1-c/instances/instance-1", + &ResourceID{"some-gce-project", "instances", meta.ZonalKey("instance-1", "us-central1-c")}, + }, + } { + r, err := ParseResourceURL(tc.in) + if err != nil { + t.Errorf("ParseResourceURL(%q) = %+v, %v; want _, nil", tc.in, r, err) + continue + } + if !r.Equal(tc.r) { + t.Errorf("ParseResourceURL(%q) = %+v, nil; want %+v, nil", tc.in, r, tc.r) + } + } + // Malformed URLs. + for _, tc := range []string{ + "", + "/", + "/a", + "/a/b", + "/a/b/c", + "/a/b/c/d", + "/a/b/c/d/e", + "/a/b/c/d/e/f", + "https://www.googleapis.com/compute/v1/projects/some-gce-project/global", + "projects/some-gce-project/global", + "projects/some-gce-project/global/foo/bar/baz", + "projects/some-gce-project/zones/us-central1-c/res", + "projects/some-gce-project/zones/us-central1-c/res/name/extra", + "https://www.googleapis.com/compute/gamma/projects/some-gce-project/global/addresses/name", + } { + r, err := ParseResourceURL(tc) + if err == nil { + t.Errorf("ParseResourceURL(%q) = %+v, %v, want _, error", tc, r, err) + } + } +} + +type A struct { + A, B, C string +} + +type B struct { + A, B, D string +} + +type E struct{} + +func (*E) MarshalJSON() ([]byte, error) { + return nil, errors.New("injected error") +} + +func TestCopyVisJSON(t *testing.T) { + t.Parallel() + + var b B + srcA := &A{"aa", "bb", "cc"} + err := copyViaJSON(&b, srcA) + if err != nil { + t.Errorf(`copyViaJSON(&b, %+v) = %v, want nil`, srcA, err) + } else { + expectedB := B{"aa", "bb", ""} + if b != expectedB { + t.Errorf("b == %+v, want %+v", b, expectedB) + } + } + + var a A + srcB := &B{"aaa", "bbb", "ccc"} + err = copyViaJSON(&a, srcB) + if err != nil { + t.Errorf(`copyViaJSON(&a, %+v) = %v, want nil`, srcB, err) + } else { + expectedA := A{"aaa", "bbb", ""} + if a != expectedA { + t.Errorf("a == %+v, want %+v", a, expectedA) + } + } + + if err := copyViaJSON(&a, &E{}); err == nil { + t.Errorf("copyViaJSON(&a, &E{}) = nil, want error") + } +} + +func TestSelfLink(t *testing.T) { + t.Parallel() + + for _, tc := range []struct{ + ver meta.Version + project string + resource string + key meta.Key + want string + }{ + { + meta.VersionAlpha, + "proj1", + "addresses", + *meta.RegionalKey("key1", "us-central1"), + "https://www.googleapis.com/compute/alpha/projects/proj1/regions/us-central1/addresses/key1", + }, + { + meta.VersionBeta, + "proj3", + "disks", + *meta.ZonalKey("key2", "us-central1-b"), + "https://www.googleapis.com/compute/beta/projects/proj3/zones/us-central1-b/disks/key2", + }, + { + meta.VersionGA, + "proj4", + "urlMaps", + *meta.GlobalKey("key3"), + "https://www.googleapis.com/compute/v1/projects/proj4/urlMaps/key3", + }, + }{ + if link := SelfLink(tc.ver, tc.project, tc.resource, tc.key); link != tc.want { + t.Errorf("SelfLink(%v, %q, %q, %v) = %v, want %q", tc.ver, tc.project, tc.resource, tc.key, link, tc.want) + } + } +} From b19149406eda9730a8f233ab374e3521fdc08016 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Thu, 4 Jan 2018 23:34:06 -0800 Subject: [PATCH 069/264] "meta" type descriptions used for code generation --- .../providers/gce/cloud/meta/doc.go | 19 + .../providers/gce/cloud/meta/key.go | 96 +++++ .../providers/gce/cloud/meta/key_test.go | 75 ++++ .../providers/gce/cloud/meta/meta.go | 372 ++++++++++++++++++ .../providers/gce/cloud/meta/method.go | 241 ++++++++++++ .../providers/gce/cloud/meta/service.go | 273 +++++++++++++ 6 files changed, 1076 insertions(+) create mode 100644 pkg/cloudprovider/providers/gce/cloud/meta/doc.go create mode 100644 pkg/cloudprovider/providers/gce/cloud/meta/key.go create mode 100644 pkg/cloudprovider/providers/gce/cloud/meta/key_test.go create mode 100644 pkg/cloudprovider/providers/gce/cloud/meta/meta.go create mode 100644 pkg/cloudprovider/providers/gce/cloud/meta/method.go create mode 100644 pkg/cloudprovider/providers/gce/cloud/meta/service.go diff --git a/pkg/cloudprovider/providers/gce/cloud/meta/doc.go b/pkg/cloudprovider/providers/gce/cloud/meta/doc.go new file mode 100644 index 00000000000..7aa24e06379 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/meta/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package meta contains the meta description of the GCE cloud types to +// generate code for. +package meta diff --git a/pkg/cloudprovider/providers/gce/cloud/meta/key.go b/pkg/cloudprovider/providers/gce/cloud/meta/key.go new file mode 100644 index 00000000000..fff2543c7b0 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/meta/key.go @@ -0,0 +1,96 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" +) + +// Key for a GCP resource. +type Key struct { + Name string + Zone string + Region string +} + +// KeyType is the type of the key. +type KeyType string + +const ( + // Zonal key type. + Zonal = "zonal" + // Regional key type. + Regional = "regional" + // Global key type. + Global = "global" +) + +// ZonalKey returns the key for a zonal resource. +func ZonalKey(name, zone string) *Key { + return &Key{name, zone, ""} +} + +// RegionalKey returns the key for a regional resource. +func RegionalKey(name, region string) *Key { + return &Key{name, "", region} +} + +// GlobalKey returns the key for a global resource. +func GlobalKey(name string) *Key { + return &Key{name, "", ""} +} + +// Type returns the type of the key. +func (k *Key) Type() KeyType { + switch { + case k.Zone != "": + return Zonal + case k.Region != "": + return Regional + default: + return Global + } +} + +// String returns a string representation of the key. +func (k Key) String() string { + switch k.Type() { + case Zonal: + return fmt.Sprintf("Key{%q, zone: %q}", k.Name, k.Zone) + case Regional: + return fmt.Sprintf("Key{%q, region: %q}", k.Name, k.Region) + default: + return fmt.Sprintf("Key{%q}", k.Name) + } +} + +// Valid is true if the key is valid. +func (k *Key) Valid(typeName string) bool { + if k.Zone != "" && k.Region != "" { + return false + } + return true +} + +// KeysToMap creates a map[Key]bool from a list of keys. +func KeysToMap(keys ...Key) map[Key]bool { + ret := map[Key]bool{} + for _, k := range keys { + ret[k] = true + } + return ret +} diff --git a/pkg/cloudprovider/providers/gce/cloud/meta/key_test.go b/pkg/cloudprovider/providers/gce/cloud/meta/key_test.go new file mode 100644 index 00000000000..0f1a6df8b4c --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/meta/key_test.go @@ -0,0 +1,75 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "testing" +) + +func TestKeyType(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + key *Key + want KeyType + }{ + {GlobalKey("abc"), Global}, + {ZonalKey("abc", "us-central1-b"), Zonal}, + {RegionalKey("abc", "us-central1"), Regional}, + } { + if tc.key.Type() != tc.want { + t.Errorf("key.Type() == %v, want %v", tc.key.Type(), tc.want) + } + } +} + +func TestKeyString(t *testing.T) { + t.Parallel() + + for _, k := range []*Key{ + GlobalKey("abc"), + RegionalKey("abc", "us-central1"), + ZonalKey("abc", "us-central1-b"), + } { + if k.String() == "" { + t.Errorf(`k.String() = "", want non-empty`) + } + } +} + +func TestKeyValid(t *testing.T) { + t.Parallel() + + region := "us-central1" + zone := "us-central1-b" + + for _, tc := range []struct { + key *Key + typeName string + want bool + }{ + // Note: these test cases need to be synchronized with the + // actual settings for each type. + {GlobalKey("abc"), "UrlMap", true}, + {&Key{"abc", zone, region}, "UrlMap", false}, + } { + valid := tc.key.Valid(tc.typeName) + if valid != tc.want { + t.Errorf("key %+v, type %v; key.Valid() = %v, want %v", tc.key, tc.typeName, valid, tc.want) + } + } +} diff --git a/pkg/cloudprovider/providers/gce/cloud/meta/meta.go b/pkg/cloudprovider/providers/gce/cloud/meta/meta.go new file mode 100644 index 00000000000..3f60c00f412 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/meta/meta.go @@ -0,0 +1,372 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "reflect" + + alpha "google.golang.org/api/compute/v0.alpha" + beta "google.golang.org/api/compute/v0.beta" + ga "google.golang.org/api/compute/v1" +) + +// Version of the API (ga, alpha, beta). +type Version string + +const ( + // NoGet prevents the Get() method from being generated. + NoGet = 1 << iota + // NoList prevents the List() method from being generated. + NoList = 1 << iota + // NoDelete prevents the Delete() method from being generated. + NoDelete = 1 << iota + // NoInsert prevents the Insert() method from being generated. + NoInsert = 1 << iota + // CustomOps specifies that an empty interface xxxOps will be generated to + // enable custom method calls to be attached to the generated service + // interface. + CustomOps = 1 << iota + // AggregatedList will generated a method for AggregatedList(). + AggregatedList = 1 << iota + + // ReadOnly specifies that the given resource is read-only and should not + // have insert() or delete() methods generated for the wrapper. + ReadOnly = NoDelete | NoInsert + + // VersionGA is the API version in compute.v1. + VersionGA Version = "ga" + // VersionAlpha is the API version in computer.v0.alpha. + VersionAlpha Version = "alpha" + // VersionBeta is the API version in computer.v0.beta. + VersionBeta Version = "beta" +) + +// AllVersions is a list of all versions of the GCE API. +var AllVersions = []Version{ + VersionGA, + VersionAlpha, + VersionBeta, +} + +// AllServices are a list of all the services to generate code for. Keep +// this list in lexiographical order by object type. +var AllServices = []*ServiceInfo{ + &ServiceInfo{ + Object: "Address", + Service: "Addresses", + Resource: "addresses", + keyType: Regional, + serviceType: reflect.TypeOf(&ga.AddressesService{}), + }, + &ServiceInfo{ + Object: "Address", + Service: "Addresses", + Resource: "addresses", + version: VersionAlpha, + keyType: Regional, + serviceType: reflect.TypeOf(&alpha.AddressesService{}), + }, + &ServiceInfo{ + Object: "Address", + Service: "Addresses", + Resource: "addresses", + version: VersionBeta, + keyType: Regional, + serviceType: reflect.TypeOf(&beta.AddressesService{}), + }, + &ServiceInfo{ + Object: "Address", + Service: "GlobalAddresses", + Resource: "addresses", + keyType: Global, + serviceType: reflect.TypeOf(&ga.GlobalAddressesService{}), + }, + &ServiceInfo{ + Object: "BackendService", + Service: "BackendServices", + Resource: "backendServices", + keyType: Global, + serviceType: reflect.TypeOf(&ga.BackendServicesService{}), + additionalMethods: []string{ + "GetHealth", + "Update", + }, + }, + &ServiceInfo{ + Object: "BackendService", + Service: "BackendServices", + Resource: "backendServices", + version: VersionAlpha, + keyType: Global, + serviceType: reflect.TypeOf(&alpha.BackendServicesService{}), + additionalMethods: []string{"Update"}, + }, + &ServiceInfo{ + Object: "BackendService", + Service: "RegionBackendServices", + Resource: "backendServices", + version: VersionAlpha, + keyType: Regional, + serviceType: reflect.TypeOf(&alpha.RegionBackendServicesService{}), + additionalMethods: []string{ + "GetHealth", + "Update", + }, + }, + &ServiceInfo{ + Object: "Disk", + Service: "Disks", + Resource: "disks", + keyType: Zonal, + serviceType: reflect.TypeOf(&ga.DisksService{}), + }, + &ServiceInfo{ + Object: "Disk", + Service: "Disks", + Resource: "disks", + version: VersionAlpha, + keyType: Zonal, + serviceType: reflect.TypeOf(&alpha.DisksService{}), + }, + &ServiceInfo{ + Object: "Disk", + Service: "RegionDisks", + Resource: "disks", + version: VersionAlpha, + keyType: Regional, + serviceType: reflect.TypeOf(&alpha.DisksService{}), + }, + &ServiceInfo{ + Object: "Firewall", + Service: "Firewalls", + Resource: "firewalls", + keyType: Global, + serviceType: reflect.TypeOf(&ga.FirewallsService{}), + additionalMethods: []string{ + "Update", + }, + }, + &ServiceInfo{ + Object: "ForwardingRule", + Service: "ForwardingRules", + Resource: "forwardingRules", + keyType: Regional, + serviceType: reflect.TypeOf(&ga.ForwardingRulesService{}), + }, + &ServiceInfo{ + Object: "ForwardingRule", + Service: "ForwardingRules", + Resource: "forwardingRules", + version: VersionAlpha, + keyType: Regional, + serviceType: reflect.TypeOf(&alpha.ForwardingRulesService{}), + }, + &ServiceInfo{ + Object: "ForwardingRule", + Service: "GlobalForwardingRules", + Resource: "forwardingRules", + keyType: Global, + serviceType: reflect.TypeOf(&ga.GlobalForwardingRulesService{}), + additionalMethods: []string{ + "SetTarget", + }, + }, + &ServiceInfo{ + Object: "HealthCheck", + Service: "HealthChecks", + Resource: "healthChecks", + keyType: Global, + serviceType: reflect.TypeOf(&ga.HealthChecksService{}), + additionalMethods: []string{ + "Update", + }, + }, + &ServiceInfo{ + Object: "HealthCheck", + Service: "HealthChecks", + Resource: "healthChecks", + version: VersionAlpha, + keyType: Global, + serviceType: reflect.TypeOf(&alpha.HealthChecksService{}), + additionalMethods: []string{ + "Update", + }, + }, + &ServiceInfo{ + Object: "HttpHealthCheck", + Service: "HttpHealthChecks", + Resource: "httpHealthChecks", + keyType: Global, + serviceType: reflect.TypeOf(&ga.HttpHealthChecksService{}), + additionalMethods: []string{ + "Update", + }, + }, + &ServiceInfo{ + Object: "HttpsHealthCheck", + Service: "HttpsHealthChecks", + Resource: "httpsHealthChecks", + keyType: Global, + serviceType: reflect.TypeOf(&ga.HttpsHealthChecksService{}), + additionalMethods: []string{ + "Update", + }, + }, + &ServiceInfo{ + Object: "InstanceGroup", + Service: "InstanceGroups", + Resource: "instanceGroups", + keyType: Zonal, + serviceType: reflect.TypeOf(&ga.InstanceGroupsService{}), + additionalMethods: []string{ + "AddInstances", + "ListInstances", + "RemoveInstances", + "SetNamedPorts", + }, + }, + &ServiceInfo{ + Object: "Instance", + Service: "Instances", + Resource: "instances", + keyType: Zonal, + serviceType: reflect.TypeOf(&ga.InstancesService{}), + additionalMethods: []string{ + "AttachDisk", + "DetachDisk", + }, + }, + &ServiceInfo{ + Object: "Instance", + Service: "Instances", + Resource: "instances", + version: VersionBeta, + keyType: Zonal, + serviceType: reflect.TypeOf(&beta.InstancesService{}), + additionalMethods: []string{ + "AttachDisk", + "DetachDisk", + }, + }, + &ServiceInfo{ + Object: "Instance", + Service: "Instances", + Resource: "instances", + version: VersionAlpha, + keyType: Zonal, + serviceType: reflect.TypeOf(&alpha.InstancesService{}), + additionalMethods: []string{ + "AttachDisk", + "DetachDisk", + "UpdateNetworkInterface", + }, + }, + &ServiceInfo{ + Object: "NetworkEndpointGroup", + Service: "NetworkEndpointGroups", + Resource: "networkEndpointGroups", + version: VersionAlpha, + keyType: Zonal, + serviceType: reflect.TypeOf(&alpha.NetworkEndpointGroupsService{}), + additionalMethods: []string{ + "AttachNetworkEndpoints", + "DetachNetworkEndpoints", + }, + options: AggregatedList, + }, + &ServiceInfo{ + Object: "Project", + Service: "Projects", + Resource: "projects", + keyType: Global, + // Generate only the stub with no methods. + options: NoGet | NoList | NoInsert | NoDelete | CustomOps, + serviceType: reflect.TypeOf(&ga.ProjectsService{}), + }, + &ServiceInfo{ + Object: "Region", + Service: "Regions", + Resource: "regions", + keyType: Global, + options: ReadOnly, + serviceType: reflect.TypeOf(&ga.RegionsService{}), + }, + &ServiceInfo{ + Object: "Route", + Service: "Routes", + Resource: "routes", + keyType: Global, + serviceType: reflect.TypeOf(&ga.RoutesService{}), + }, + &ServiceInfo{ + Object: "SslCertificate", + Service: "SslCertificates", + Resource: "sslCertificates", + keyType: Global, + serviceType: reflect.TypeOf(&ga.SslCertificatesService{}), + }, + &ServiceInfo{ + Object: "TargetHttpProxy", + Service: "TargetHttpProxies", + Resource: "targetHttpProxies", + keyType: Global, + serviceType: reflect.TypeOf(&ga.TargetHttpProxiesService{}), + additionalMethods: []string{ + "SetUrlMap", + }, + }, + &ServiceInfo{ + Object: "TargetHttpsProxy", + Service: "TargetHttpsProxies", + Resource: "targetHttpsProxies", + keyType: Global, + serviceType: reflect.TypeOf(&ga.TargetHttpsProxiesService{}), + additionalMethods: []string{ + "SetSslCertificates", + "SetUrlMap", + }, + }, + &ServiceInfo{ + Object: "TargetPool", + Service: "TargetPools", + Resource: "targetPools", + keyType: Regional, + serviceType: reflect.TypeOf(&ga.TargetPoolsService{}), + additionalMethods: []string{ + "AddInstance", + "RemoveInstance", + }, + }, + &ServiceInfo{ + Object: "UrlMap", + Service: "UrlMaps", + Resource: "urlMaps", + keyType: Global, + serviceType: reflect.TypeOf(&ga.UrlMapsService{}), + additionalMethods: []string{ + "Update", + }, + }, + &ServiceInfo{ + Object: "Zone", + Service: "Zones", + Resource: "zones", + keyType: Global, + options: ReadOnly, + serviceType: reflect.TypeOf(&ga.ZonesService{}), + }, +} diff --git a/pkg/cloudprovider/providers/gce/cloud/meta/method.go b/pkg/cloudprovider/providers/gce/cloud/meta/method.go new file mode 100644 index 00000000000..5adf065fae4 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/meta/method.go @@ -0,0 +1,241 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" + "reflect" + "strings" + + "github.com/golang/glog" +) + +func newArg(t reflect.Type) *arg { + ret := &arg{} + + // Dereference the pointer types to get at the underlying concrete type. +Loop: + for { + switch t.Kind() { + case reflect.Ptr: + ret.numPtr++ + t = t.Elem() + default: + ret.pkg = t.PkgPath() + ret.typeName += t.Name() + break Loop + } + } + return ret +} + +type arg struct { + pkg, typeName string + numPtr int +} + +func (a *arg) normalizedPkg() string { + if a.pkg == "" { + return "" + } + + // Strip the repo.../vendor/ prefix from the package path if present. + parts := strings.Split(a.pkg, "/") + // Remove vendor prefix. + for i := 0; i < len(parts); i++ { + if parts[i] == "vendor" { + parts = parts[i+1:] + break + } + } + switch strings.Join(parts, "/") { + case "google.golang.org/api/compute/v1": + return "ga." + case "google.golang.org/api/compute/v0.alpha": + return "alpha." + case "google.golang.org/api/compute/v0.beta": + return "beta." + default: + panic(fmt.Errorf("unhandled package %q", a.pkg)) + } +} + +func (a *arg) String() string { + var ret string + for i := 0; i < a.numPtr; i++ { + ret += "*" + } + ret += a.normalizedPkg() + ret += a.typeName + return ret +} + +// newMethod returns a newly initialized method. +func newMethod(s *ServiceInfo, m reflect.Method) *Method { + ret := &Method{s, m, ""} + ret.init() + return ret +} + +// Method is used to generate the calling code non-standard methods. +type Method struct { + *ServiceInfo + m reflect.Method + + ReturnType string +} + +// argsSkip is the number of arguments to skip when generating the +// synthesized method. +func (mr *Method) argsSkip() int { + switch mr.keyType { + case Zonal: + return 4 + case Regional: + return 4 + case Global: + return 3 + } + panic(fmt.Errorf("invalid KeyType %v", mr.keyType)) +} + +// args return a list of arguments to the method, skipping the first skip +// elements. If nameArgs is true, then the arguments will include a generated +// parameter name (arg). prefix will be added to the parameters. +func (mr *Method) args(skip int, nameArgs bool, prefix []string) []string { + var args []*arg + fType := mr.m.Func.Type() + for i := 0; i < fType.NumIn(); i++ { + t := fType.In(i) + args = append(args, newArg(t)) + } + + var a []string + for i := skip; i < fType.NumIn(); i++ { + if nameArgs { + a = append(a, fmt.Sprintf("arg%d %s", i-skip, args[i])) + } else { + a = append(a, args[i].String()) + } + } + return append(prefix, a...) +} + +func (mr *Method) init() { + fType := mr.m.Func.Type() + if fType.NumIn() < mr.argsSkip() { + err := fmt.Errorf("method %q.%q, arity = %d which is less than required (< %d)", + mr.Service, mr.Name(), fType.NumIn(), mr.argsSkip()) + panic(err) + } + // Skipped args should all be string (they will be projectID, zone, region etc). + for i := 1; i < mr.argsSkip(); i++ { + if fType.In(i).Kind() != reflect.String { + panic(fmt.Errorf("method %q.%q: skipped args can only be strings", mr.Service, mr.Name())) + } + } + // Return of the method must return a single value of type *xxxCall. + if fType.NumOut() != 1 || fType.Out(0).Kind() != reflect.Ptr || !strings.HasSuffix(fType.Out(0).Elem().Name(), "Call") { + panic(fmt.Errorf("method %q.%q: generator only supports methods returning an *xxxCall object", + mr.Service, mr.Name())) + } + returnType := fType.Out(0) + returnTypeName := fType.Out(0).Elem().Name() + // xxxCall must have a Do() method. + doMethod, ok := returnType.MethodByName("Do") + if !ok { + panic(fmt.Errorf("method %q.%q: return type %q does not have a Do() method", + mr.Service, mr.Name(), returnTypeName)) + } + // Do() method must return (*T, error). + switch doMethod.Func.Type().NumOut() { + case 2: + glog.Infof("Method %q.%q: return type %q of Do() = %v, %v", + mr.Service, mr.Name(), returnTypeName, doMethod.Func.Type().Out(0), doMethod.Func.Type().Out(1)) + out0 := doMethod.Func.Type().Out(0) + if out0.Kind() != reflect.Ptr { + panic(fmt.Errorf("method %q.%q: return type %q of Do() = S, _; S must be pointer type (%v)", + mr.Service, mr.Name(), returnTypeName, out0)) + } + mr.ReturnType = out0.Elem().Name() + if out0.Elem().Name() == "Operation" { + glog.Infof("Method %q.%q is an *Operation", mr.Service, mr.Name()) + } else { + glog.Infof("Method %q.%q returns %v", mr.Service, mr.Name(), out0) + } + // Second argument must be "error". + if doMethod.Func.Type().Out(1).Name() != "error" { + panic(fmt.Errorf("method %q.%q: return type %q of Do() = S, T; T must be 'error'", + mr.Service, mr.Name(), returnTypeName)) + } + break + default: + panic(fmt.Errorf("method %q.%q: %q Do() return type is not handled by the generator", + mr.Service, mr.Name(), returnTypeName)) + } +} + +func (mr *Method) Name() string { + return mr.m.Name +} + +func (mr *Method) CallArgs() string { + var args []string + for i := mr.argsSkip(); i < mr.m.Func.Type().NumIn(); i++ { + args = append(args, fmt.Sprintf("arg%d", i-mr.argsSkip())) + } + if len(args) == 0 { + return "" + } + return fmt.Sprintf(", %s", strings.Join(args, ", ")) +} + +func (mr *Method) MockHookName() string { + return mr.m.Name + "Hook" +} + +func (mr *Method) MockHook() string { + args := mr.args(mr.argsSkip(), false, []string{ + fmt.Sprintf("*%s", mr.MockWrapType()), + "context.Context", + "meta.Key", + }) + if mr.ReturnType == "Operation" { + return fmt.Sprintf("%v func(%v) error", mr.MockHookName(), strings.Join(args, ", ")) + } + return fmt.Sprintf("%v func(%v) (*%v.%v, error)", mr.MockHookName(), strings.Join(args, ", "), mr.Version(), mr.ReturnType) +} + +func (mr *Method) FcnArgs() string { + args := mr.args(mr.argsSkip(), true, []string{ + "ctx context.Context", + "key meta.Key", + }) + + if mr.ReturnType == "Operation" { + return fmt.Sprintf("%v(%v) error", mr.m.Name, strings.Join(args, ", ")) + } + return fmt.Sprintf("%v(%v) (*%v.%v, error)", mr.m.Name, strings.Join(args, ", "), mr.Version(), mr.ReturnType) +} + +func (mr *Method) InterfaceFunc() string { + args := mr.args(mr.argsSkip(), false, []string{"context.Context", "meta.Key"}) + if mr.ReturnType == "Operation" { + return fmt.Sprintf("%v(%v) error", mr.m.Name, strings.Join(args, ", ")) + } + return fmt.Sprintf("%v(%v) (*%v.%v, error)", mr.m.Name, strings.Join(args, ", "), mr.Version(), mr.ReturnType) +} diff --git a/pkg/cloudprovider/providers/gce/cloud/meta/service.go b/pkg/cloudprovider/providers/gce/cloud/meta/service.go new file mode 100644 index 00000000000..ffa3385075b --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/meta/service.go @@ -0,0 +1,273 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "errors" + "fmt" + "reflect" +) + +// ServiceInfo defines the entry for a Service that code will be generated for. +type ServiceInfo struct { + // Object is the Go name of the object type that the service deals + // with. Example: "ForwardingRule". + Object string + // Service is the Go name of the service struct i.e. where the methods + // are defined. Examples: "GlobalForwardingRules". + Service string + // Resource is the plural noun of the resource in the compute API URL (e.g. + // "forwardingRules"). + Resource string + // version if unspecified will be assumed to be VersionGA. + version Version + keyType KeyType + serviceType reflect.Type + + additionalMethods []string + options int + aggregatedListField string +} + +// Version returns the version of the Service, defaulting to GA if APIVersion +// is empty. +func (i *ServiceInfo) Version() Version { + if i.version == "" { + return VersionGA + } + return i.version +} + +// VersionTitle returns the capitalized golang CamelCase name for the version. +func (i *ServiceInfo) VersionTitle() string { + switch i.Version() { + case VersionGA: + return "GA" + case VersionAlpha: + return "Alpha" + case VersionBeta: + return "Beta" + } + panic(fmt.Errorf("invalid version %q", i.Version())) +} + +// WrapType is the name of the wrapper service type. +func (i *ServiceInfo) WrapType() string { + switch i.Version() { + case VersionGA: + return i.Service + case VersionAlpha: + return "Alpha" + i.Service + case VersionBeta: + return "Beta" + i.Service + } + return "Invalid" +} + +// WrapTypeOps is the name of the additional operations type. +func (i *ServiceInfo) WrapTypeOps() string { + return i.WrapType() + "Ops" +} + +// FQObjectType is fully qualified name of the object (e.g. compute.Instance). +func (i *ServiceInfo) FQObjectType() string { + return fmt.Sprintf("%v.%v", i.Version(), i.Object) +} + +// ObjectListType is the compute List type for the object (contains Items field). +func (i *ServiceInfo) ObjectListType() string { + return fmt.Sprintf("%v.%vList", i.Version(), i.Object) +} + +// ObjectAggregatedListType is the compute List type for the object (contains Items field). +func (i *ServiceInfo) ObjectAggregatedListType() string { + return fmt.Sprintf("%v.%vAggregatedList", i.Version(), i.Object) +} + +// MockWrapType is the name of the concrete mock for this type. +func (i *ServiceInfo) MockWrapType() string { + return "Mock" + i.WrapType() +} + +// MockField is the name of the field in the mock struct. +func (i *ServiceInfo) MockField() string { + return "Mock" + i.WrapType() +} + +// GCEWrapType is the name of the GCE wrapper type. +func (i *ServiceInfo) GCEWrapType() string { + return "GCE" + i.WrapType() +} + +// Field is the name of the GCE struct. +func (i *ServiceInfo) Field() string { + return "gce" + i.WrapType() +} + +// Methods returns a list of additional methods to generate code for. +func (i *ServiceInfo) Methods() []*Method { + methods := map[string]bool{} + for _, m := range i.additionalMethods { + methods[m] = true + } + + var ret []*Method + for j := 0; j < i.serviceType.NumMethod(); j++ { + m := i.serviceType.Method(j) + if _, ok := methods[m.Name]; !ok { + continue + } + ret = append(ret, newMethod(i, m)) + methods[m.Name] = false + } + + for k, b := range methods { + if b { + panic(fmt.Errorf("method %q was not found in service %q", k, i.Service)) + } + } + + return ret +} + +// KeyIsGlobal is true if the key is global. +func (i *ServiceInfo) KeyIsGlobal() bool { + return i.keyType == Global +} + +// KeyIsRegional is true if the key is regional. +func (i *ServiceInfo) KeyIsRegional() bool { + return i.keyType == Regional +} + +// KeyIsZonal is true if the key is zonal. +func (i *ServiceInfo) KeyIsZonal() bool { + return i.keyType == Zonal +} + +// MakeKey returns the call used to create the appropriate key type. +func (i *ServiceInfo) MakeKey(name, location string) string { + switch i.keyType { + case Global: + return fmt.Sprintf("GlobalKey(%q)", name) + case Regional: + return fmt.Sprintf("RegionalKey(%q, %q)", name, location) + case Zonal: + return fmt.Sprintf("ZonalKey(%q, %q)", name, location) + } + return "Invalid" +} + +// GenerateGet is true if the method is to be generated. +func (i *ServiceInfo) GenerateGet() bool { + return i.options&NoGet == 0 +} + +// GenerateList is true if the method is to be generated. +func (i *ServiceInfo) GenerateList() bool { + return i.options&NoList == 0 +} + +// GenerateDelete is true if the method is to be generated. +func (i *ServiceInfo) GenerateDelete() bool { + return i.options&NoDelete == 0 +} + +// GenerateInsert is true if the method is to be generated. +func (i *ServiceInfo) GenerateInsert() bool { + return i.options&NoInsert == 0 +} + +// GenerateCustomOps is true if we should generated a xxxOps interface for +// adding additional methods to the generated interface. +func (i *ServiceInfo) GenerateCustomOps() bool { + return i.options&CustomOps != 0 +} + +// AggregatedList is true if the method is to be generated. +func (i *ServiceInfo) AggregatedList() bool { + return i.options&AggregatedList != 0 +} + +// AggregatedListField is the name of the field used for the aggregated list +// call. This is typically the same as the name of the service, but can be +// customized by setting the aggregatedListField field. +func (i *ServiceInfo) AggregatedListField() string { + if i.aggregatedListField == "" { + return i.Service + } + return i.aggregatedListField +} + +// ServiceGroup is a grouping of the same service but at different API versions. +type ServiceGroup struct { + Alpha *ServiceInfo + Beta *ServiceInfo + GA *ServiceInfo +} + +func (sg *ServiceGroup) Service() string { + switch { + case sg.GA != nil: + return sg.GA.Service + case sg.Alpha != nil: + return sg.Alpha.Service + case sg.Beta != nil: + return sg.Beta.Service + default: + panic(errors.New("service group is empty")) + } +} + +func (sg *ServiceGroup) HasGA() bool { + return sg.GA != nil +} + +func (sg *ServiceGroup) HasAlpha() bool { + return sg.Alpha != nil +} + +func (sg *ServiceGroup) HasBeta() bool { + return sg.Beta != nil +} + +// groupServices together by version. +func groupServices(services []*ServiceInfo) map[string]*ServiceGroup { + ret := map[string]*ServiceGroup{} + for _, si := range services { + if _, ok := ret[si.Service]; !ok { + ret[si.Service] = &ServiceGroup{} + } + group := ret[si.Service] + switch si.Version() { + case VersionAlpha: + group.Alpha = si + case VersionBeta: + group.Beta = si + case VersionGA: + group.GA = si + } + } + return ret +} + +// AllServicesByGroup is a map of service name to ServicesGroup. +var AllServicesByGroup map[string]*ServiceGroup + +func init() { + AllServicesByGroup = groupServices(AllServices) +} From 94ddfd17e769bc4b4b407adc864233252d62eb38 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Thu, 4 Jan 2018 23:34:30 -0800 Subject: [PATCH 070/264] Implementation of the compute "filter" handling for List() --- .../providers/gce/cloud/filter/filter.go | 303 ++++++++++++++++++ .../providers/gce/cloud/filter/filter_test.go | 176 ++++++++++ 2 files changed, 479 insertions(+) create mode 100644 pkg/cloudprovider/providers/gce/cloud/filter/filter.go create mode 100644 pkg/cloudprovider/providers/gce/cloud/filter/filter_test.go diff --git a/pkg/cloudprovider/providers/gce/cloud/filter/filter.go b/pkg/cloudprovider/providers/gce/cloud/filter/filter.go new file mode 100644 index 00000000000..c08005726c8 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/filter/filter.go @@ -0,0 +1,303 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package filter encapsulates the filter argument to compute API calls. +// +// // List all global addresses (no filter). +// c.GlobalAddresses().List(ctx, filter.None) +// +// // List global addresses filtering for name matching "abc.*". +// c.GlobalAddresses().List(ctx, filter.Regexp("name", "abc.*")) +// +// // List on multiple conditions. +// f := filter.Regexp("name", "homer.*").AndNotRegexp("name", "homers") +// c.GlobalAddresses().List(ctx, f) +package filter + +import ( + "errors" + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/golang/glog" +) + +var ( + // None indicates that the List result set should not be filter (i.e. + // return all values). + None *F +) + +// Regexp returns a filter for fieldName matches regexp v. +func Regexp(fieldName, v string) *F { + return (&F{}).AndRegexp(fieldName, v) +} + +// NotRegexp returns a filter for fieldName not matches regexp v. +func NotRegexp(fieldName, v string) *F { + return (&F{}).AndNotRegexp(fieldName, v) +} + +// EqualInt returns a filter for fieldName ~ v. +func EqualInt(fieldName string, v int) *F { + return (&F{}).AndEqualInt(fieldName, v) +} + +// NotEqualInt returns a filter for fieldName != v. +func NotEqualInt(fieldName string, v int) *F { + return (&F{}).AndNotEqualInt(fieldName, v) +} + +// EqualBool returns a filter for fieldName == v. +func EqualBool(fieldName string, v bool) *F { + return (&F{}).AndEqualBool(fieldName, v) +} + +// NotEqualBool returns a filter for fieldName != v. +func NotEqualBool(fieldName string, v bool) *F { + return (&F{}).AndNotEqualBool(fieldName, v) +} + +// F is a filter to be used with List() operations. +// +// From the compute API description: +// +// Sets a filter {expression} for filtering listed resources. Your {expression} +// must be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only atomic field +// types are supported (string, number, boolean). The comparison_string must be +// either eq (equals) or ne (not equals). The literal_string is the string value +// to filter to. The literal value must be valid for the type of field you are +// filtering by (string, number, boolean). For string fields, the literal value is +// interpreted as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on instances +// that have set the scheduling.automaticRestart field to true. Use filtering on +// nested fields to take advantage of labels to organize and search for results +// based on label values. +// +// To filter on multiple expressions, provide each separate expression within +// parentheses. For example, (scheduling.automaticRestart eq true) +// (zone eq us-central1-f). Multiple expressions are treated as AND expressions, +// meaning that resources must match all expressions to pass the filters. +type F struct { + predicates []filterPredicate +} + +// And joins two filters together. +func (fl *F) And(rest *F) *F { + fl.predicates = append(fl.predicates, rest.predicates...) + return fl +} + +// AndRegexp adds a field match string predicate. +func (fl *F) AndRegexp(fieldName, v string) *F { + fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: equals, s: &v}) + return fl +} + +// AndNotRegexp adds a field not match string predicate. +func (fl *F) AndNotRegexp(fieldName, v string) *F { + fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: notEquals, s: &v}) + return fl +} + +// AndEqualInt adds a field == int predicate. +func (fl *F) AndEqualInt(fieldName string, v int) *F { + fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: equals, i: &v}) + return fl +} + +// AndNotEqualInt adds a field != int predicate. +func (fl *F) AndNotEqualInt(fieldName string, v int) *F { + fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: notEquals, i: &v}) + return fl +} + +// AndEqualBool adds a field == bool predicate. +func (fl *F) AndEqualBool(fieldName string, v bool) *F { + fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: equals, b: &v}) + return fl +} + +// AndNotEqualBool adds a field != bool predicate. +func (fl *F) AndNotEqualBool(fieldName string, v bool) *F { + fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: notEquals, b: &v}) + return fl +} + +func (fl *F) String() string { + if len(fl.predicates) == 1 { + return fl.predicates[0].String() + } + + var pl []string + for _, p := range fl.predicates { + pl = append(pl, "("+p.String()+")") + } + return strings.Join(pl, " ") +} + +// Match returns true if the F as specifies matches the given object. This +// is used by the Mock implementations to perform filtering and SHOULD NOT be +// used in production code as it is not well-tested to be equivalent to the +// actual compute API. +func (fl *F) Match(obj interface{}) bool { + if fl == nil { + return true + } + for _, p := range fl.predicates { + if !p.match(obj) { + return false + } + } + return true +} + +type filterOp int + +const ( + equals filterOp = iota + notEquals filterOp = iota +) + +// filterPredicate is an individual predicate for a fieldName and value. +type filterPredicate struct { + fieldName string + + op filterOp + s *string + i *int + b *bool +} + +func (fp *filterPredicate) String() string { + var op string + switch fp.op { + case equals: + op = "eq" + case notEquals: + op = "ne" + default: + op = "invalidOp" + } + + var value string + switch { + case fp.s != nil: + // There does not seem to be any sort of escaping as specified in the + // document. This means it's possible to create malformed expressions. + value = *fp.s + case fp.i != nil: + value = fmt.Sprintf("%d", *fp.i) + case fp.b != nil: + value = fmt.Sprintf("%t", *fp.b) + default: + value = "invalidValue" + } + + return fmt.Sprintf("%s %s %s", fp.fieldName, op, value) +} + +func (fp *filterPredicate) match(o interface{}) bool { + v, err := extractValue(fp.fieldName, o) + glog.V(6).Infof("extractValue(%q, %#v) = %v, %v", fp.fieldName, o, v, err) + if err != nil { + return false + } + + var match bool + switch x := v.(type) { + case string: + if fp.s == nil { + return false + } + re, err := regexp.Compile(*fp.s) + if err != nil { + glog.Errorf("Match regexp %q is invalid: %v", *fp.s, err) + return false + } + match = re.Match([]byte(x)) + case int: + if fp.i == nil { + return false + } + match = x == *fp.i + case bool: + if fp.b == nil { + return false + } + match = x == *fp.b + } + + switch fp.op { + case equals: + return match + case notEquals: + return !match + } + + return false +} + +// snakeToCamelCase converts from "names_like_this" to "NamesLikeThis" to +// interoperate between proto and Golang naming conventions. +func snakeToCamelCase(s string) string { + parts := strings.Split(s, "_") + var ret string + for _, x := range parts { + ret += strings.Title(x) + } + return ret +} + +// extractValue returns the value of the field named by path in object o if it exists. +func extractValue(path string, o interface{}) (interface{}, error) { + parts := strings.Split(path, ".") + for _, f := range parts { + v := reflect.ValueOf(o) + // Dereference Ptr to handle *struct. + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return nil, errors.New("field is nil") + } + v = v.Elem() + } + if v.Kind() != reflect.Struct { + return nil, fmt.Errorf("cannot get field from non-struct (%T)", o) + } + v = v.FieldByName(snakeToCamelCase(f)) + if !v.IsValid() { + return nil, fmt.Errorf("cannot get field %q as it is not a valid field in %T", f, o) + } + if !v.CanInterface() { + return nil, fmt.Errorf("cannot get field %q in obj of type %T", f, o) + } + o = v.Interface() + } + switch o.(type) { + case string, int, bool: + return o, nil + } + return nil, fmt.Errorf("unhandled object of type %T", o) +} diff --git a/pkg/cloudprovider/providers/gce/cloud/filter/filter_test.go b/pkg/cloudprovider/providers/gce/cloud/filter/filter_test.go new file mode 100644 index 00000000000..46b3c279a47 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/filter/filter_test.go @@ -0,0 +1,176 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filter + +import ( + "reflect" + "testing" +) + +func TestFilterToString(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + f *F + want string + }{ + {Regexp("field1", "abc"), `field1 eq abc`}, + {NotRegexp("field1", "abc"), `field1 ne abc`}, + {EqualInt("field1", 13), "field1 eq 13"}, + {NotEqualInt("field1", 13), "field1 ne 13"}, + {EqualBool("field1", true), "field1 eq true"}, + {NotEqualBool("field1", true), "field1 ne true"}, + {Regexp("field1", "abc").AndRegexp("field2", "def"), `(field1 eq abc) (field2 eq def)`}, + {Regexp("field1", "abc").AndNotEqualInt("field2", 17), `(field1 eq abc) (field2 ne 17)`}, + {Regexp("field1", "abc").And(EqualInt("field2", 17)), `(field1 eq abc) (field2 eq 17)`}, + } { + if tc.f.String() != tc.want { + t.Errorf("filter %#v String() = %q, want %q", tc.f, tc.f.String(), tc.want) + } + } +} + +func TestFilterMatch(t *testing.T) { + t.Parallel() + + type inner struct { + X string + } + type S struct { + S string + I int + B bool + Unhandled struct{} + NestedField *inner + } + + for _, tc := range []struct { + f *F + o interface{} + want bool + }{ + {f: None, o: &S{}, want: true}, + {f: Regexp("s", "abc"), o: &S{}}, + {f: EqualInt("i", 10), o: &S{}}, + {f: EqualBool("b", true), o: &S{}}, + {f: NotRegexp("s", "abc"), o: &S{}, want: true}, + {f: NotEqualInt("i", 10), o: &S{}, want: true}, + {f: NotEqualBool("b", true), o: &S{}, want: true}, + {f: Regexp("s", "abc").AndEqualBool("b", true), o: &S{}}, + {f: Regexp("s", "abc"), o: &S{S: "abc"}, want: true}, + {f: Regexp("s", "a.*"), o: &S{S: "abc"}, want: true}, + {f: Regexp("s", "a((("), o: &S{S: "abc"}}, + {f: NotRegexp("s", "abc"), o: &S{S: "abc"}}, + {f: EqualInt("i", 10), o: &S{I: 11}}, + {f: EqualInt("i", 10), o: &S{I: 10}, want: true}, + {f: Regexp("s", "abc").AndEqualBool("b", true), o: &S{S: "abc"}}, + {f: Regexp("s", "abcd").AndEqualBool("b", true), o: &S{S: "abc"}}, + {f: Regexp("s", "abc").AndEqualBool("b", true), o: &S{S: "abc", B: true}, want: true}, + {f: Regexp("s", "abc").And(EqualBool("b", true)), o: &S{S: "abc", B: true}, want: true}, + {f: Regexp("unhandled", "xyz"), o: &S{}}, + {f: Regexp("nested_field.x", "xyz"), o: &S{}}, + {f: Regexp("nested_field.x", "xyz"), o: &S{NestedField: &inner{"xyz"}}, want: true}, + {f: NotRegexp("nested_field.x", "xyz"), o: &S{NestedField: &inner{"xyz"}}}, + {f: Regexp("nested_field.y", "xyz"), o: &S{NestedField: &inner{"xyz"}}}, + {f: Regexp("nested_field", "xyz"), o: &S{NestedField: &inner{"xyz"}}}, + } { + got := tc.f.Match(tc.o) + if got != tc.want { + t.Errorf("%v: Match(%+v) = %v, want %v", tc.f, tc.o, got, tc.want) + } + } +} + +func TestFilterSnakeToCamelCase(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + s string + want string + }{ + {"", ""}, + {"abc", "Abc"}, + {"_foo", "Foo"}, + {"a_b_c", "ABC"}, + {"a_BC_def", "ABCDef"}, + {"a_Bc_def", "ABcDef"}, + } { + got := snakeToCamelCase(tc.s) + if got != tc.want { + t.Errorf("snakeToCamelCase(%q) = %q, want %q", tc.s, got, tc.want) + } + } +} + +func TestFilterExtractValue(t *testing.T) { + t.Parallel() + + type nest2 struct { + Y string + } + type nest struct { + X string + Nest2 nest2 + } + st := &struct { + S string + I int + F bool + Nest nest + NestPtr *nest + + Unhandled float64 + }{ + "abc", + 13, + true, + nest{"xyz", nest2{"zzz"}}, + &nest{"yyy", nest2{}}, + 0.0, + } + + for _, tc := range []struct { + path string + o interface{} + want interface{} + wantErr bool + }{ + {path: "s", o: st, want: "abc"}, + {path: "i", o: st, want: 13}, + {path: "f", o: st, want: true}, + {path: "nest.x", o: st, want: "xyz"}, + {path: "nest_ptr.x", o: st, want: "yyy"}, + // Error cases. + {path: "", o: st, wantErr: true}, + {path: "no_such_field", o: st, wantErr: true}, + {path: "s.invalid_type", o: st, wantErr: true}, + {path: "unhandled", o: st, wantErr: true}, + {path: "nest.x", o: &struct{ Nest *nest }{}, wantErr: true}, + } { + o, err := extractValue(tc.path, tc.o) + gotErr := err != nil + if gotErr != tc.wantErr { + t.Errorf("extractValue(%v, %+v) = %v, %v; gotErr = %v, tc.wantErr = %v", tc.path, tc.o, o, err, gotErr, tc.wantErr) + } + if err != nil { + continue + } + if !reflect.DeepEqual(o, tc.want) { + t.Errorf("extractValue(%v, %+v) = %v, nil; want %v, nil", tc.path, tc.o, o, tc.want) + } + } +} From 8250950d15521f9ffa1f966a5b358bda65197f49 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Thu, 4 Jan 2018 23:34:37 -0800 Subject: [PATCH 071/264] documentation --- pkg/cloudprovider/providers/gce/cloud/doc.go | 111 +++++++++++++++++++ 1 file changed, 111 insertions(+) create mode 100644 pkg/cloudprovider/providers/gce/cloud/doc.go diff --git a/pkg/cloudprovider/providers/gce/cloud/doc.go b/pkg/cloudprovider/providers/gce/cloud/doc.go new file mode 100644 index 00000000000..d0d7a6cfb19 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/doc.go @@ -0,0 +1,111 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cloud implements a more golang friendly interface to the GCE compute +// API. The code in this package is generated automatically via the generator +// implemented in "gen/main.go". The code generator creates the basic CRUD +// actions for the given resource: "Insert", "Get", "List" and "Delete". +// Additional methods by customizing the ServiceInfo object (see below). +// Generated code includes a full mock of the GCE compute API. +// +// Usage +// +// The root of the GCE compute API is the interface "Cloud". Code written using +// Cloud can be used against the actual implementation "GCE" or "MockGCE". +// +// func foo(cloud Cloud) { +// igs, err := cloud.InstanceGroups().List(ctx, "us-central1-b", filter.None) +// ... +// } +// // Run foo against the actual cloud. +// foo(NewGCE(&Service{...})) +// // Run foo with a mock. +// foo(NewMockGCE()) +// +// Rate limiting and routing +// +// The generated code allows for custom policies for operation rate limiting +// and GCE project routing. See RateLimiter and ProjectRouter for more details. +// +// Mocks +// +// Mocks are automatically generated for each type implementing basic logic for +// resource manipulation. This eliminates the boilerplate required to mock GCE +// functionality. Each method will also have a corresponding "xxxHook" +// function generated in the mock structure where unit test code can hook the +// execution of the method. +// +// Mocks for different versions of the same service will share the same set of +// objects, i.e. an alpha object will be visible with beta and GA methods. +// Note that translation is done with JSON serialization between the API versions. +// +// Changing service code generation +// +// The list of services to generate is contained in "meta/meta.go". To add a +// service, add an entry to the list "meta.AllServices". An example entry: +// +// &ServiceInfo{ +// Object: "InstanceGroup", // Name of the object type. +// Service: "InstanceGroups", // Name of the service. +// version: meta.VersionAlpha, // API version (one entry per version is needed). +// keyType: Zonal, // What kind of resource this is. +// serviceType: reflect.TypeOf(&alpha.InstanceGroupsService{}), // Associated golang type. +// additionalMethods: []string{ // Additional methods to generate code for. +// "SetNamedPorts", +// }, +// options: // Or'd ("|") together. +// } +// +// Read-only objects +// +// Services such as Regions and Zones do not allow for mutations. Specify +// "ReadOnly" in ServiceInfo.options to omit the mutation methods. +// +// Adding custom methods +// +// Some methods that may not be properly handled by the generated code. To enable +// addition of custom code to the generated mocks, set the "CustomOps" option +// in "meta.ServiceInfo" entry. This will make the generated service interface +// embed a "Ops" interface. This interface MUST be written by hand +// and contain the custom method logic. Corresponding methods must be added to +// the corresponding Mockxxx and GCExxx struct types. +// +// // In "meta/meta.go": +// &ServiceInfo{ +// Object: "InstanceGroup", +// ... +// options: CustomOps, +// } +// +// // In the generated code "gen.go": +// type InstanceGroups interface { +// InstanceGroupsOps // Added by CustomOps option. +// ... +// } +// +// // In hand written file: +// type InstanceGroupsOps interface { +// MyMethod() +// } +// +// func (mock *MockInstanceGroups) MyMethod() { +// // Custom mock implementation. +// } +// +// func (gce *GCEInstanceGroups) MyMethod() { +// // Custom implementation. +// } +package cloud From 75bff35884a52275825741c3cbf1da46ca363d5f Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Thu, 4 Jan 2018 23:34:58 -0800 Subject: [PATCH 072/264] long running operation support --- pkg/cloudprovider/providers/gce/cloud/op.go | 142 ++++++++++++++++++++ 1 file changed, 142 insertions(+) create mode 100644 pkg/cloudprovider/providers/gce/cloud/op.go diff --git a/pkg/cloudprovider/providers/gce/cloud/op.go b/pkg/cloudprovider/providers/gce/cloud/op.go new file mode 100644 index 00000000000..92ee6f3f6f3 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/op.go @@ -0,0 +1,142 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloud + +import ( + "context" + + alpha "google.golang.org/api/compute/v0.alpha" + beta "google.golang.org/api/compute/v0.beta" + ga "google.golang.org/api/compute/v1" + + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" +) + +// operation is a GCE operation that can be watied on. +type operation interface { + // isDone queries GCE for the done status. This call can block. + isDone(ctx context.Context) (bool, error) + // rateLimitKey returns the rate limit key to use for the given operation. + // This rate limit will govern how fast the server will be polled for + // operation completion status. + rateLimitKey() *RateLimitKey +} + +type gaOperation struct { + s *Service + op *ga.Operation + projectID string +} + +func (o *gaOperation) isDone(ctx context.Context) (bool, error) { + var ( + op *ga.Operation + err error + ) + + switch { + case o.op.Region != "": + op, err = o.s.GA.RegionOperations.Get(o.projectID, o.op.Region, o.op.Name).Context(ctx).Do() + case o.op.Zone != "": + op, err = o.s.GA.ZoneOperations.Get(o.projectID, o.op.Zone, o.op.Name).Context(ctx).Do() + default: + op, err = o.s.GA.GlobalOperations.Get(o.projectID, o.op.Name).Context(ctx).Do() + } + if err != nil { + return false, err + } + return op != nil && op.Status == "DONE", nil +} + +func (o *gaOperation) rateLimitKey() *RateLimitKey { + return &RateLimitKey{ + ProjectID: o.projectID, + Operation: "Get", + Service: "Operations", + Version: meta.VersionGA, + } +} + +type alphaOperation struct { + s *Service + op *alpha.Operation + projectID string +} + +func (o *alphaOperation) isDone(ctx context.Context) (bool, error) { + var ( + op *alpha.Operation + err error + ) + + switch { + case o.op.Region != "": + op, err = o.s.Alpha.RegionOperations.Get(o.projectID, o.op.Region, o.op.Name).Context(ctx).Do() + case o.op.Zone != "": + op, err = o.s.Alpha.ZoneOperations.Get(o.projectID, o.op.Zone, o.op.Name).Context(ctx).Do() + default: + op, err = o.s.Alpha.GlobalOperations.Get(o.projectID, o.op.Name).Context(ctx).Do() + } + if err != nil { + return false, err + } + return op != nil && op.Status == "DONE", nil +} + +func (o *alphaOperation) rateLimitKey() *RateLimitKey { + return &RateLimitKey{ + ProjectID: o.projectID, + Operation: "Get", + Service: "Operations", + Version: meta.VersionAlpha, + } +} + +type betaOperation struct { + s *Service + op *beta.Operation + projectID string +} + +func (o *betaOperation) isDone(ctx context.Context) (bool, error) { + var ( + op *beta.Operation + err error + ) + + switch { + case o.op.Region != "": + op, err = o.s.Beta.RegionOperations.Get(o.projectID, o.op.Region, o.op.Name).Context(ctx).Do() + case o.op.Zone != "": + op, err = o.s.Beta.ZoneOperations.Get(o.projectID, o.op.Zone, o.op.Name).Context(ctx).Do() + default: + op, err = o.s.Beta.GlobalOperations.Get(o.projectID, o.op.Name).Context(ctx).Do() + } + if err != nil { + return false, err + } + return op != nil && op.Status == "DONE", nil +} + +func (o *betaOperation) rateLimitKey() *RateLimitKey { + return &RateLimitKey{ + ProjectID: o.projectID, + Operation: "Get", + Service: "Operations", + Version: meta.VersionBeta, + } +} From 968cce929c500cd8d5e172a9e1e70924e7b427e0 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Thu, 4 Jan 2018 23:35:09 -0800 Subject: [PATCH 073/264] code generation --- .../providers/gce/cloud/gen/main.go | 1140 +++++++++++++++++ 1 file changed, 1140 insertions(+) create mode 100644 pkg/cloudprovider/providers/gce/cloud/gen/main.go diff --git a/pkg/cloudprovider/providers/gce/cloud/gen/main.go b/pkg/cloudprovider/providers/gce/cloud/gen/main.go new file mode 100644 index 00000000000..f8dcd730b9f --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/gen/main.go @@ -0,0 +1,1140 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Generator for GCE compute wrapper code. You must regenerate the code after +// modifying this file: +// +// $ go run gen/main.go > gen.go +package main + +import ( + "bytes" + "flag" + "fmt" + "io" + "os" + "os/exec" + "text/template" + "time" + + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" + "github.com/golang/glog" +) + +const ( + gofmt = "gofmt" + packageRoot = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" + + // readOnly specifies that the given resource is read-only and should not + // have insert() or delete() methods generated for the wrapper. + readOnly = iota +) + +var flags = struct { + gofmt bool + mode string +}{} + +func init() { + flag.BoolVar(&flags.gofmt, "gofmt", true, "run output through gofmt") + flag.StringVar(&flags.mode, "mode", "src", "content to generate: src, test, dummy") +} + +// gofmtContent runs "gofmt" on the given contents. +func gofmtContent(r io.Reader) string { + cmd := exec.Command(gofmt, "-s") + out := &bytes.Buffer{} + cmd.Stdin = r + cmd.Stdout = out + cmdErr := &bytes.Buffer{} + cmd.Stderr = cmdErr + + if err := cmd.Run(); err != nil { + fmt.Fprintf(os.Stderr, cmdErr.String()) + panic(err) + } + return out.String() +} + +// genHeader generate the header for the file. +func genHeader(wr io.Writer) { + const text = `/* +Copyright {{.Year}} The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was generated by "go run gen/main.go > gen.go". Do not edit +// directly. + +package cloud + +import ( + "context" + "fmt" + "net/http" + "sync" + + "google.golang.org/api/googleapi" + "github.com/golang/glog" + + "{{.PackageRoot}}/filter" + "{{.PackageRoot}}/meta" + +` + tmpl := template.Must(template.New("header").Parse(text)) + values := map[string]string{ + "Year": fmt.Sprintf("%v", time.Now().Year()), + "PackageRoot": packageRoot, + } + if err := tmpl.Execute(wr, values); err != nil { + panic(err) + } + + var hasGA, hasAlpha, hasBeta bool + for _, s := range meta.AllServices { + switch s.Version() { + case meta.VersionGA: + hasGA = true + case meta.VersionAlpha: + hasAlpha = true + case meta.VersionBeta: + hasBeta = true + } + } + if hasAlpha { + fmt.Fprintln(wr, ` alpha "google.golang.org/api/compute/v0.alpha"`) + } + if hasBeta { + fmt.Fprintln(wr, ` beta "google.golang.org/api/compute/v0.beta"`) + } + if hasGA { + fmt.Fprintln(wr, ` ga "google.golang.org/api/compute/v1"`) + } + fmt.Fprintf(wr, ")\n\n") +} + +// genStubs generates the interface and wrapper stubs. +func genStubs(wr io.Writer) { + const text = `// Cloud is an interface for the GCE compute API. +type Cloud interface { +{{- range .All}} + {{.WrapType}}() {{.WrapType}} +{{- end}} +} + +// NewGCE returns a GCE. +func NewGCE(s *Service) *GCE { + g := &GCE{ + {{- range .All}} + {{.Field}}: &{{.GCEWrapType}}{s}, + {{- end}} + } + return g +} + +// GCE implements Cloud. +var _ Cloud = (*GCE)(nil) + +// GCE is the golang adapter for the compute APIs. +type GCE struct { +{{- range .All}} + {{.Field}} *{{.GCEWrapType}} +{{- end}} +} + +{{range .All}} +func (gce *GCE) {{.WrapType}}() {{.WrapType}} { + return gce.{{.Field}} +} +{{- end}} + +// NewMockGCE returns a new mock for GCE. +func NewMockGCE() *MockGCE { + {{- range .Groups}} + mock{{.Service}}Objs := map[meta.Key]*Mock{{.Service}}Obj{} + {{- end}} + + mock := &MockGCE{ + {{- range .All}} + {{.MockField}}: New{{.MockWrapType}}(mock{{.Service}}Objs), + {{- end}} + } + return mock +} + +// MockGCE implements Cloud. +var _ Cloud = (*MockGCE)(nil) + +// MockGCE is the mock for the compute API. +type MockGCE struct { +{{- range .All}} + {{.MockField}} *{{.MockWrapType}} +{{- end}} +} +{{range .All}} +func (mock *MockGCE) {{.WrapType}}() {{.WrapType}} { + return mock.{{.MockField}} +} +{{end}} + +{{range .Groups}} +// Mock{{.Service}}Obj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type Mock{{.Service}}Obj struct { + Obj interface{} +} +{{- if .HasAlpha}} +// ToAlpha retrieves the given version of the object. +func (m *Mock{{.Service}}Obj) ToAlpha() *{{.Alpha.FQObjectType}} { + if ret, ok := m.Obj.(*{{.Alpha.FQObjectType}}); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &{{.Alpha.FQObjectType}}{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *{{.Alpha.FQObjectType}} via JSON: %v", m.Obj, err) + } + return ret +} +{{- end}} +{{- if .HasBeta}} +// ToBeta retrieves the given version of the object. +func (m *Mock{{.Service}}Obj) ToBeta() *{{.Beta.FQObjectType}} { + if ret, ok := m.Obj.(*{{.Beta.FQObjectType}}); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &{{.Beta.FQObjectType}}{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *{{.Beta.FQObjectType}} via JSON: %v", m.Obj, err) + } + return ret +} +{{- end}} +{{- if .HasGA}} +// ToGA retrieves the given version of the object. +func (m *Mock{{.Service}}Obj) ToGA() *{{.GA.FQObjectType}} { + if ret, ok := m.Obj.(*{{.GA.FQObjectType}}); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &{{.GA.FQObjectType}}{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *{{.GA.FQObjectType}} via JSON: %v", m.Obj, err) + } + return ret +} +{{- end}} +{{- end}} +` + data := struct { + All []*meta.ServiceInfo + Groups map[string]*meta.ServiceGroup + }{meta.AllServices, meta.AllServicesByGroup} + + tmpl := template.Must(template.New("interface").Parse(text)) + if err := tmpl.Execute(wr, data); err != nil { + panic(err) + } +} + +// genTypes generates the type wrappers. +func genTypes(wr io.Writer) { + const text = `// {{.WrapType}} is an interface that allows for mocking of {{.Service}}. +type {{.WrapType}} interface { +{{- if .GenerateCustomOps}} + // {{.WrapTypeOps}} is an interface with additional non-CRUD type methods. + // This interface is expected to be implemented by hand (non-autogenerated). + {{.WrapTypeOps}} +{{- end}} +{{- if .GenerateGet}} + Get(ctx context.Context, key meta.Key) (*{{.FQObjectType}}, error) +{{- end -}} +{{- if .GenerateList}} +{{- if .KeyIsGlobal}} + List(ctx context.Context, fl *filter.F) ([]*{{.FQObjectType}}, error) +{{- end -}} +{{- if .KeyIsRegional}} + List(ctx context.Context, region string, fl *filter.F) ([]*{{.FQObjectType}}, error) +{{- end -}} +{{- if .KeyIsZonal}} + List(ctx context.Context, zone string, fl *filter.F) ([]*{{.FQObjectType}}, error) +{{- end -}} +{{- end -}} +{{- if .GenerateInsert}} + Insert(ctx context.Context, key meta.Key, obj *{{.FQObjectType}}) error +{{- end -}} +{{- if .GenerateDelete}} + Delete(ctx context.Context, key meta.Key) error +{{- end -}} +{{- if .AggregatedList}} + AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*{{.FQObjectType}}, error) +{{- end}} +{{- with .Methods -}} +{{- range .}} + {{.InterfaceFunc}} +{{- end -}} +{{- end}} +} + +// New{{.MockWrapType}} returns a new mock for {{.Service}}. +func New{{.MockWrapType}}(objs map[meta.Key]*Mock{{.Service}}Obj) *{{.MockWrapType}} { + mock := &{{.MockWrapType}}{ + Objects: objs, + {{- if .GenerateGet}} + GetError: map[meta.Key]error{}, + {{- end -}} + {{- if .GenerateInsert}} + InsertError: map[meta.Key]error{}, + {{- end -}} + {{- if .GenerateDelete}} + DeleteError: map[meta.Key]error{}, + {{- end}} + } + return mock +} + +// {{.MockWrapType}} is the mock for {{.Service}}. +type {{.MockWrapType}} struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*Mock{{.Service}}Obj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + {{- if .GenerateGet}} + GetError map[meta.Key]error + {{- end -}} + {{- if .GenerateList}} + ListError *error + {{- end -}} + {{- if .GenerateInsert}} + InsertError map[meta.Key]error + {{- end -}} + {{- if .GenerateDelete}} + DeleteError map[meta.Key]error + {{- end -}} + {{- if .AggregatedList}} + AggregatedListError *error + {{- end}} + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + {{- if .GenerateGet}} + GetHook func(m *{{.MockWrapType}}, ctx context.Context, key meta.Key) (bool, *{{.FQObjectType}}, error) + {{- end -}} + {{- if .GenerateList}} + {{- if .KeyIsGlobal}} + ListHook func(m *{{.MockWrapType}}, ctx context.Context, fl *filter.F) (bool, []*{{.FQObjectType}}, error) + {{- end -}} + {{- if .KeyIsRegional}} + ListHook func(m *{{.MockWrapType}}, ctx context.Context, region string, fl *filter.F) (bool, []*{{.FQObjectType}}, error) + {{- end -}} + {{- if .KeyIsZonal}} + ListHook func(m *{{.MockWrapType}}, ctx context.Context, zone string, fl *filter.F) (bool, []*{{.FQObjectType}}, error) + {{- end}} + {{- end -}} + {{- if .GenerateInsert}} + InsertHook func(m *{{.MockWrapType}}, ctx context.Context, key meta.Key, obj *{{.FQObjectType}}) (bool, error) + {{- end -}} + {{- if .GenerateDelete}} + DeleteHook func(m *{{.MockWrapType}}, ctx context.Context, key meta.Key) (bool, error) + {{- end -}} + {{- if .AggregatedList}} + AggregatedListHook func(m *{{.MockWrapType}}, ctx context.Context, fl *filter.F) (bool, map[string][]*{{.FQObjectType}}, error) + {{- end}} + +{{- with .Methods -}} +{{- range .}} + {{.MockHook}} +{{- end -}} +{{- end}} + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +{{- if .GenerateGet}} +// Get returns the object from the mock. +func (m *{{.MockWrapType}}) Get(ctx context.Context, key meta.Key) (*{{.FQObjectType}}, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = %+v, %v", ctx, key, obj ,err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.To{{.VersionTitle}}() + glog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("{{.MockWrapType}} %v not found", key), + } + glog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} +{{- end}} + +{{- if .GenerateList}} +{{if .KeyIsGlobal -}} +// List all of the objects in the mock. +func (m *{{.MockWrapType}}) List(ctx context.Context, fl *filter.F) ([]*{{.FQObjectType}}, error) { +{{- end -}} +{{- if .KeyIsRegional -}} +// List all of the objects in the mock in the given region. +func (m *{{.MockWrapType}}) List(ctx context.Context, region string, fl *filter.F) ([]*{{.FQObjectType}}, error) { +{{- end -}} +{{- if .KeyIsZonal -}} +// List all of the objects in the mock in the given zone. +func (m *{{.MockWrapType}}) List(ctx context.Context, zone string, fl *filter.F) ([]*{{.FQObjectType}}, error) { +{{- end}} + if m.ListHook != nil { + {{if .KeyIsGlobal -}} + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("{{.MockWrapType}}.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + {{- end -}} + {{- if .KeyIsRegional -}} + if intercept, objs, err := m.ListHook(m, ctx, region, fl); intercept { + glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + {{- end -}} + {{- if .KeyIsZonal -}} + if intercept, objs, err := m.ListHook(m, ctx, zone, fl); intercept { + glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + {{- end}} + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + {{if .KeyIsGlobal -}} + glog.V(5).Infof("{{.MockWrapType}}.List(%v, %v) = nil, %v", ctx, fl, err) + {{- end -}} + {{- if .KeyIsRegional -}} + glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + {{- end -}} + {{- if .KeyIsZonal -}} + glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + {{- end}} + + return nil, *m.ListError + } + + var objs []*{{.FQObjectType}} +{{- if .KeyIsGlobal}} + for _, obj := range m.Objects { +{{- else}} + for key, obj := range m.Objects { +{{- end -}} +{{- if .KeyIsRegional}} + if key.Region != region { + continue + } +{{- end -}} +{{- if .KeyIsZonal}} + if key.Zone != zone { + continue + } +{{- end}} + if ! fl.Match(obj.To{{.VersionTitle}}()) { + continue + } + objs = append(objs, obj.To{{.VersionTitle}}()) + } + + {{if .KeyIsGlobal -}} + glog.V(5).Infof("{{.MockWrapType}}.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + {{- end -}} + {{- if .KeyIsRegional -}} + glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + {{- end -}} + {{- if .KeyIsZonal -}} + glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + {{- end}} + return objs, nil +} +{{- end}} + +{{- if .GenerateInsert}} +// Insert is a mock for inserting/creating a new object. +func (m *{{.MockWrapType}}) Insert(ctx context.Context, key meta.Key, obj *{{.FQObjectType}}) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("{{.MockWrapType}} %v exists", key), + } + glog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.Version{{.VersionTitle}}, "mock-project", "{{.Resource}}", key) + } + + m.Objects[key] = &Mock{{.Service}}Obj{obj} + glog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} +{{- end}} + +{{- if .GenerateDelete}} +// Delete is a mock for deleting the object. +func (m *{{.MockWrapType}}) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("{{.MockWrapType}} %v not found", key), + } + glog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = nil", ctx, key) + return nil +} +{{- end}} + +{{- if .AggregatedList}} +// AggregatedList is a mock for AggregatedList. +func (m *{{.MockWrapType}}) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*{{.FQObjectType}}, error) { + if m.AggregatedListHook != nil { + if intercept, objs, err := m.AggregatedListHook(m, ctx, fl); intercept { + glog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.AggregatedListError != nil { + err := *m.AggregatedListError + glog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + return nil, err + } + + objs := map[string][]*{{.FQObjectType}}{} + for _, obj := range m.Objects { + res, err := ParseResourceURL(obj.To{{.VersionTitle}}().SelfLink) + {{- if .KeyIsRegional}} + location := res.Key.Region + {{- end -}} + {{- if .KeyIsZonal}} + location := res.Key.Zone + {{- end}} + if err != nil { + glog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + return nil, err + } + if ! fl.Match(obj.To{{.VersionTitle}}()) { + continue + } + objs[location] = append(objs[location], obj.To{{.VersionTitle}}()) + } + glog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} +{{- end}} + +// Obj wraps the object for use in the mock. +func (m *{{.MockWrapType}}) Obj(o *{{.FQObjectType}}) *Mock{{.Service}}Obj { + return &Mock{{.Service}}Obj{o} +} + +{{with .Methods -}} +{{- range .}} +// {{.Name}} is a mock for the corresponding method. +func (m *{{.MockWrapType}}) {{.FcnArgs}} { +{{- if eq .ReturnType "Operation"}} + if m.{{.MockHookName}} != nil { + return m.{{.MockHookName}}(m, ctx, key {{.CallArgs}}) + } + return nil +{{- else}} + if m.{{.MockHookName}} != nil { + return m.{{.MockHookName}}(m, ctx, key {{.CallArgs}}) + } + return nil, fmt.Errorf("{{.MockHookName}} must be set") +{{- end}} +} +{{end -}} +{{- end}} +// {{.GCEWrapType}} is a simplifying adapter for the GCE {{.Service}}. +type {{.GCEWrapType}} struct { + s *Service +} + +{{- if .GenerateGet}} +// Get the {{.Object}} named by key. +func (g *{{.GCEWrapType}}) Get(ctx context.Context, key meta.Key) (*{{.FQObjectType}}, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "{{.Version}}", "{{.Service}}") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("{{.Version}}"), + Service: "{{.Service}}", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } +{{- if .KeyIsGlobal}} + call := g.s.{{.VersionTitle}}.{{.Service}}.Get(projectID, key.Name) +{{- end -}} +{{- if .KeyIsRegional}} + call := g.s.{{.VersionTitle}}.{{.Service}}.Get(projectID, key.Region, key.Name) +{{- end -}} +{{- if .KeyIsZonal}} + call := g.s.{{.VersionTitle}}.{{.Service}}.Get(projectID, key.Zone, key.Name) +{{- end}} + call.Context(ctx) + return call.Do() +} +{{- end}} + +{{- if .GenerateList}} +// List all {{.Object}} objects. +{{- if .KeyIsGlobal}} +func (g *{{.GCEWrapType}}) List(ctx context.Context, fl *filter.F) ([]*{{.FQObjectType}}, error) { +{{- end -}} +{{- if .KeyIsRegional}} +func (g *{{.GCEWrapType}}) List(ctx context.Context, region string, fl *filter.F) ([]*{{.FQObjectType}}, error) { +{{- end -}} +{{- if .KeyIsZonal}} +func (g *{{.GCEWrapType}}) List(ctx context.Context, zone string, fl *filter.F) ([]*{{.FQObjectType}}, error) { +{{- end}} +projectID := g.s.ProjectRouter.ProjectID(ctx, "{{.Version}}", "{{.Service}}") +rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("{{.Version}}"), + Service: "{{.Service}}", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } +{{- if .KeyIsGlobal}} + call := g.s.{{.VersionTitle}}.{{.Service}}.List(projectID) +{{- end -}} +{{- if .KeyIsRegional}} + call := g.s.{{.VersionTitle}}.{{.Service}}.List(projectID, region) +{{- end -}} +{{- if .KeyIsZonal}} + call := g.s.{{.VersionTitle}}.{{.Service}}.List(projectID, zone) +{{- end}} + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*{{.FQObjectType}} + f := func(l *{{.ObjectListType}}) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} +{{- end}} + +{{- if .GenerateInsert}} +// Insert {{.Object}} with key of value obj. +func (g *{{.GCEWrapType}}) Insert(ctx context.Context, key meta.Key, obj *{{.FQObjectType}}) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "{{.Version}}", "{{.Service}}") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("{{.Version}}"), + Service: "{{.Service}}", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name +{{- if .KeyIsGlobal}} + call := g.s.{{.VersionTitle}}.{{.Service}}.Insert(projectID, obj) +{{- end -}} +{{- if .KeyIsRegional}} + call := g.s.{{.VersionTitle}}.{{.Service}}.Insert(projectID, key.Region, obj) +{{- end -}} +{{- if .KeyIsZonal}} + call := g.s.{{.VersionTitle}}.{{.Service}}.Insert(projectID, key.Zone, obj) +{{- end}} + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} +{{- end}} + +{{- if .GenerateDelete}} +// Delete the {{.Object}} referenced by key. +func (g *{{.GCEWrapType}}) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "{{.Version}}", "{{.Service}}") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("{{.Version}}"), + Service: "{{.Service}}", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } +{{- if .KeyIsGlobal}} + call := g.s.{{.VersionTitle}}.{{.Service}}.Delete(projectID, key.Name) +{{end -}} +{{- if .KeyIsRegional}} + call := g.s.{{.VersionTitle}}.{{.Service}}.Delete(projectID, key.Region, key.Name) +{{- end -}} +{{- if .KeyIsZonal}} + call := g.s.{{.VersionTitle}}.{{.Service}}.Delete(projectID, key.Zone, key.Name) +{{- end}} + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} +{{end -}} + +{{- if .AggregatedList}} +// AggregatedList lists all resources of the given type across all locations. +func (g *{{.GCEWrapType}}) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*{{.FQObjectType}}, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "{{.Version}}", "{{.Service}}") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AggregatedList", + Version: meta.Version("{{.Version}}"), + Service: "{{.Service}}", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + + call := g.s.{{.VersionTitle}}.{{.Service}}.AggregatedList(projectID) + call.Context(ctx) + if fl != filter.None { + call.Filter(fl.String()) + } + + all := map[string][]*{{.FQObjectType}}{} + f := func(l *{{.ObjectAggregatedListType}}) error { + for k, v := range l.Items { + all[k] = append(all[k], v.{{.AggregatedListField}}...) + } + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} +{{- end}} + +{{- with .Methods -}} +{{- range .}} +// {{.Name}} is a method on {{.GCEWrapType}}. +func (g *{{.GCEWrapType}}) {{.FcnArgs}} { + projectID := g.s.ProjectRouter.ProjectID(ctx, "{{.Version}}", "{{.Service}}") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "{{.Name}}", + Version: meta.Version("{{.Version}}"), + Service: "{{.Service}}", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + {{- if eq .ReturnType "Operation"}} + return err + {{- else}} + return nil, err + {{- end}} + } +{{- if .KeyIsGlobal}} + call := g.s.{{.VersionTitle}}.{{.Service}}.{{.Name}}(projectID, key.Name {{.CallArgs}}) +{{- end -}} +{{- if .KeyIsRegional}} + call := g.s.{{.VersionTitle}}.{{.Service}}.{{.Name}}(projectID, key.Region, key.Name {{.CallArgs}}) +{{- end -}} +{{- if .KeyIsZonal}} + call := g.s.{{.VersionTitle}}.{{.Service}}.{{.Name}}(projectID, key.Zone, key.Name {{.CallArgs}}) +{{- end}} + call.Context(ctx) +{{- if eq .ReturnType "Operation"}} + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +{{- else}} + return call.Do() +{{- end}} +} +{{end -}} +{{- end}} +` + tmpl := template.Must(template.New("interface").Parse(text)) + for _, s := range meta.AllServices { + if err := tmpl.Execute(wr, s); err != nil { + panic(err) + } + } +} + +func genUnitTestHeader(wr io.Writer) { + const text = `/* +Copyright {{.Year}} The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was generated by "go run gen/main.go -mode test > gen_test.go". Do not edit +// directly. + +package cloud + +import ( + "context" + "reflect" + "testing" + + alpha "google.golang.org/api/compute/v0.alpha" + beta "google.golang.org/api/compute/v0.beta" + ga "google.golang.org/api/compute/v1" + + "{{.PackageRoot}}/filter" + "{{.PackageRoot}}/meta" +) + +const location = "location" +` + tmpl := template.Must(template.New("header").Parse(text)) + values := map[string]string{ + "Year": fmt.Sprintf("%v", time.Now().Year()), + "PackageRoot": packageRoot, + } + if err := tmpl.Execute(wr, values); err != nil { + panic(err) + } +} + +func genUnitTestServices(wr io.Writer) { + const text = ` +func Test{{.Service}}Group(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key +{{- if .HasAlpha}} + keyAlpha := meta.{{.Alpha.MakeKey "key-alpha" "location"}} + key = keyAlpha +{{- end}} +{{- if .HasBeta}} + keyBeta := meta.{{.Beta.MakeKey "key-beta" "location"}} + key = keyBeta +{{- end}} +{{- if .HasGA}} + keyGA := meta.{{.GA.MakeKey "key-ga" "location"}} + key = keyGA +{{- end}} + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. +{{- if .HasAlpha}}{{- if .Alpha.GenerateGet}} + if _, err := mock.Alpha{{.Service}}().Get(ctx, *key); err == nil { + t.Errorf("Alpha{{.Service}}().Get(%v, %v) = _, nil; want error", ctx, key) + } +{{- end}}{{- end}} +{{- if .HasBeta}}{{- if .Beta.GenerateGet}} + if _, err := mock.Beta{{.Service}}().Get(ctx, *key); err == nil { + t.Errorf("Beta{{.Service}}().Get(%v, %v) = _, nil; want error", ctx, key) + } +{{- end}}{{- end}} +{{- if .HasGA}}{{- if .GA.GenerateGet}} + if _, err := mock.{{.Service}}().Get(ctx, *key); err == nil { + t.Errorf("{{.Service}}().Get(%v, %v) = _, nil; want error", ctx, key) + } +{{- end}}{{- end}} + + // Insert. +{{- if .HasAlpha}}{{- if .Alpha.GenerateInsert}} + { + obj := &alpha.{{.Alpha.Object}}{} + if err := mock.Alpha{{.Service}}().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("Alpha{{.Service}}().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } +{{- end}}{{- end}} +{{- if .HasBeta}}{{- if .Beta.GenerateInsert}} + { + obj := &beta.{{.Beta.Object}}{} + if err := mock.Beta{{.Service}}().Insert(ctx, *keyBeta, obj); err != nil { + t.Errorf("Beta{{.Service}}().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } +{{- end}}{{- end}} +{{- if .HasGA}}{{- if .GA.GenerateInsert}} + { + obj := &ga.{{.GA.Object}}{} + if err := mock.{{.Service}}().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("{{.Service}}().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } +{{- end}}{{- end}} + + // Get across versions. +{{- if .HasAlpha}}{{- if .Alpha.GenerateInsert}} + if obj, err := mock.Alpha{{.Service}}().Get(ctx, *key); err != nil { + t.Errorf("Alpha{{.Service}}().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } +{{- end}}{{- end}} +{{- if .HasBeta}}{{- if .Beta.GenerateInsert}} + if obj, err := mock.Beta{{.Service}}().Get(ctx, *key); err != nil { + t.Errorf("Beta{{.Service}}().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } +{{- end}}{{- end}} +{{- if .HasGA}}{{- if .GA.GenerateInsert}} + if obj, err := mock.{{.Service}}().Get(ctx, *key); err != nil { + t.Errorf("{{.Service}}().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } +{{- end}}{{- end}} + + // List. +{{- if .HasAlpha}} + mock.MockAlpha{{.Service}}.Objects[*keyAlpha] = mock.MockAlpha{{.Service}}.Obj(&alpha.{{.Alpha.Object}}{Name: keyAlpha.Name}) +{{- end}} +{{- if .HasBeta}} + mock.MockBeta{{.Service}}.Objects[*keyBeta] = mock.MockBeta{{.Service}}.Obj(&beta.{{.Beta.Object}}{Name: keyBeta.Name}) +{{- end}} +{{- if .HasGA}} + mock.Mock{{.Service}}.Objects[*keyGA] = mock.Mock{{.Service}}.Obj(&ga.{{.GA.Object}}{Name: keyGA.Name}) +{{- end}} + want := map[string]bool{ +{{- if .HasAlpha}} + "key-alpha": true, +{{- end}} +{{- if .HasBeta}} + "key-beta": true, +{{- end}} +{{- if .HasGA}} + "key-ga": true, +{{- end}} + } + _ = want // ignore unused variables. + +{{- if .HasAlpha}}{{- if .Alpha.GenerateList}} + { + {{- if .Alpha.KeyIsGlobal }} + objs, err := mock.Alpha{{.Service}}().List(ctx, filter.None) + {{- else}} + objs, err := mock.Alpha{{.Service}}().List(ctx, location, filter.None) + {{- end}} + if err != nil { + t.Errorf("Alpha{{.Service}}().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Alpha{{.Service}}().List(); got %+v, want %+v", got, want) + } + } + } +{{- end}}{{- end}} +{{- if .HasBeta}}{{- if .Beta.GenerateList}} + { + {{- if .Beta.KeyIsGlobal }} + objs, err := mock.Beta{{.Service}}().List(ctx, filter.None) + {{- else}} + objs, err := mock.Beta{{.Service}}().List(ctx, location, filter.None) + {{- end}} + if err != nil { + t.Errorf("Beta{{.Service}}().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Alpha{{.Service}}().List(); got %+v, want %+v", got, want) + } + } + } +{{- end}}{{- end}} +{{- if .HasGA}}{{- if .GA.GenerateList}} + { + {{- if .GA.KeyIsGlobal }} + objs, err := mock.{{.Service}}().List(ctx, filter.None) + {{- else}} + objs, err := mock.{{.Service}}().List(ctx, location, filter.None) + {{- end}} + if err != nil { + t.Errorf("{{.Service}}().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Alpha{{.Service}}().List(); got %+v, want %+v", got, want) + } + } + } +{{- end}}{{- end}} + + // Delete across versions. +{{- if .HasAlpha}}{{- if .Alpha.GenerateDelete}} + if err := mock.Alpha{{.Service}}().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("Alpha{{.Service}}().Delete(%v, %v) = %v; want nil", ctx, key, err) + } +{{- end}}{{- end}} +{{- if .HasBeta}}{{- if .Beta.GenerateDelete}} + if err := mock.Beta{{.Service}}().Delete(ctx, *keyBeta); err != nil { + t.Errorf("Beta{{.Service}}().Delete(%v, %v) = %v; want nil", ctx, key, err) + } +{{- end}}{{- end}} +{{- if .HasGA}}{{- if .GA.GenerateDelete}} + if err := mock.{{.Service}}().Delete(ctx, *keyGA); err != nil { + t.Errorf("{{.Service}}().Delete(%v, %v) = %v; want nil", ctx, key, err) + } +{{- end}}{{- end}} + + // Delete not found. +{{- if .HasAlpha}}{{- if .Alpha.GenerateDelete}} + if err := mock.Alpha{{.Service}}().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("Alpha{{.Service}}().Delete(%v, %v) = nil; want error", ctx, key) + } +{{- end}}{{- end}} +{{- if .HasBeta}}{{- if .Beta.GenerateDelete}} + if err := mock.Beta{{.Service}}().Delete(ctx, *keyBeta); err == nil { + t.Errorf("Beta{{.Service}}().Delete(%v, %v) = nil; want error", ctx, key) + } +{{- end}}{{- end}} +{{- if .HasGA}}{{- if .GA.GenerateDelete}} + if err := mock.{{.Service}}().Delete(ctx, *keyGA); err == nil { + t.Errorf("{{.Service}}().Delete(%v, %v) = nil; want error", ctx, key) + } +{{- end}}{{- end}} +} +` + tmpl := template.Must(template.New("unittest").Parse(text)) + for _, s := range meta.AllServicesByGroup { + if err := tmpl.Execute(wr, s); err != nil { + panic(err) + } + } +} + +func main() { + flag.Parse() + + out := &bytes.Buffer{} + + switch flags.mode { + case "src": + genHeader(out) + genStubs(out) + genTypes(out) + case "test": + genUnitTestHeader(out) + genUnitTestServices(out) + default: + glog.Fatalf("Invalid -mode: %q", flags.mode) + } + + if flags.gofmt { + fmt.Print(gofmtContent(out)) + } else { + fmt.Print(out.String()) + } +} From 329e0b1cb57178196f8854bc67573bc4d2813454 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Thu, 4 Jan 2018 23:35:41 -0800 Subject: [PATCH 074/264] support interfaces for the generated code --- .../providers/gce/cloud/project.go | 44 +++++++++++ .../providers/gce/cloud/ratelimit.go | 67 ++++++++++++++++ .../providers/gce/cloud/service.go | 79 +++++++++++++++++++ 3 files changed, 190 insertions(+) create mode 100644 pkg/cloudprovider/providers/gce/cloud/project.go create mode 100644 pkg/cloudprovider/providers/gce/cloud/ratelimit.go create mode 100644 pkg/cloudprovider/providers/gce/cloud/service.go diff --git a/pkg/cloudprovider/providers/gce/cloud/project.go b/pkg/cloudprovider/providers/gce/cloud/project.go new file mode 100644 index 00000000000..74299e4a23e --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/project.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloud + +import ( + "context" + + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" +) + +// ProjectRouter routes service calls to the appropriate GCE project. +type ProjectRouter interface { + // ProjectID returns the project ID (non-numeric) to be used for a call + // to an API (version,service). Example tuples: ("ga", "ForwardingRules"), + // ("alpha", "GlobalAddresses"). + // + // This allows for plumbing different service calls to the appropriate + // project, for instance, networking services to a separate project + // than instance management. + ProjectID(ctx context.Context, version meta.Version, service string) string +} + +// SingleProjectRouter routes all service calls to the same project ID. +type SingleProjectRouter struct { + ID string +} + +func (r *SingleProjectRouter) ProjectID(ctx context.Context, version meta.Version, service string) string { + return r.ID +} diff --git a/pkg/cloudprovider/providers/gce/cloud/ratelimit.go b/pkg/cloudprovider/providers/gce/cloud/ratelimit.go new file mode 100644 index 00000000000..948f1d36d89 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/ratelimit.go @@ -0,0 +1,67 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloud + +import ( + "context" + "time" + + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" +) + +// RateLimitKey is a key identifying the operation to be rate limited. The rate limit +// queue will be determined based on the contents of RateKey. +type RateLimitKey struct { + // ProjectID is the non-numeric ID of the project. + ProjectID string + // Operation is the specific method being invoked (e.g. "Get", "List"). + Operation string + // Version is the API version of the call. + Version meta.Version + // Service is the service being invoked (e.g. "Firewalls", "BackendServices") + Service string +} + +// RateLimiter is the interface for a rate limiting policy. +type RateLimiter interface { + // Accept uses the RateLimitKey to derive a sleep time for the calling + // goroutine. This call will block until the operation is ready for + // execution. + // + // Accept returns an error if the given context ctx was canceled + // while waiting for acceptance into the queue. + Accept(ctx context.Context, key *RateLimitKey) error +} + +// NopRateLimiter is a rate limiter that performs no rate limiting. +type NopRateLimiter struct { +} + +func (*NopRateLimiter) Accept(ctx context.Context, key *RateLimitKey) error { + // Rate limit polling of the Operation status to avoid hammering GCE + // for the status of an operation. + const pollTime = time.Duration(1) * time.Second + if key.Operation == "Get" && key.Service == "Operations" { + select { + case <-time.NewTimer(pollTime).C: + break + case <-ctx.Done(): + return ctx.Err() + } + } + return nil +} diff --git a/pkg/cloudprovider/providers/gce/cloud/service.go b/pkg/cloudprovider/providers/gce/cloud/service.go new file mode 100644 index 00000000000..8a6c0a6cf95 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/service.go @@ -0,0 +1,79 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloud + +import ( + "context" + "fmt" + + alpha "google.golang.org/api/compute/v0.alpha" + beta "google.golang.org/api/compute/v0.beta" + ga "google.golang.org/api/compute/v1" +) + +// Service is the top-level adapter for all of the different compute API +// versions. +type Service struct { + GA *ga.Service + Alpha *alpha.Service + Beta *beta.Service + ProjectRouter ProjectRouter + RateLimiter RateLimiter +} + +// wrapOperation wraps a GCE anyOP in a version generic operation type. +func (g *Service) wrapOperation(anyOp interface{}) (operation, error) { + switch o := anyOp.(type) { + case *ga.Operation: + r, err := ParseResourceURL(o.SelfLink) + if err != nil { + return nil, err + } + return &gaOperation{g, o, r.ProjectID}, nil + case *alpha.Operation: + r, err := ParseResourceURL(o.SelfLink) + if err != nil { + return nil, err + } + return &alphaOperation{g, o, r.ProjectID}, nil + case *beta.Operation: + r, err := ParseResourceURL(o.SelfLink) + if err != nil { + return nil, err + } + return &betaOperation{g, o, r.ProjectID}, nil + default: + return nil, fmt.Errorf("invalid type %T", anyOp) + } +} + +// WaitForCompletion of a long running operation. This will poll the state of +// GCE for the completion status of the given operation. genericOp can be one +// of alpha, beta, ga Operation types. +func (g *Service) WaitForCompletion(ctx context.Context, genericOp interface{}) error { + op, err := g.wrapOperation(genericOp) + if err != nil { + return err + } + for done, err := op.isDone(ctx); !done; done, err = op.isDone(ctx) { + if err != nil { + return err + } + g.RateLimiter.Accept(ctx, op.rateLimitKey()) + } + return nil +} From e230bd967b3cab2a264cbf59b03660dcd24a130c Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Thu, 4 Jan 2018 23:35:58 -0800 Subject: [PATCH 075/264] Generated code (see gen/main.go for the source) --- pkg/cloudprovider/providers/gce/cloud/gen.go | 10351 ++++++++++++++++ .../providers/gce/cloud/gen_test.go | 1749 +++ 2 files changed, 12100 insertions(+) create mode 100644 pkg/cloudprovider/providers/gce/cloud/gen.go create mode 100644 pkg/cloudprovider/providers/gce/cloud/gen_test.go diff --git a/pkg/cloudprovider/providers/gce/cloud/gen.go b/pkg/cloudprovider/providers/gce/cloud/gen.go new file mode 100644 index 00000000000..ef7a2c62eaf --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/gen.go @@ -0,0 +1,10351 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was generated by "go run gen/main.go > gen.go". Do not edit +// directly. + +package cloud + +import ( + "context" + "fmt" + "net/http" + "sync" + + "github.com/golang/glog" + "google.golang.org/api/googleapi" + + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" + + alpha "google.golang.org/api/compute/v0.alpha" + beta "google.golang.org/api/compute/v0.beta" + ga "google.golang.org/api/compute/v1" +) + +// Cloud is an interface for the GCE compute API. +type Cloud interface { + Addresses() Addresses + AlphaAddresses() AlphaAddresses + BetaAddresses() BetaAddresses + GlobalAddresses() GlobalAddresses + BackendServices() BackendServices + AlphaBackendServices() AlphaBackendServices + AlphaRegionBackendServices() AlphaRegionBackendServices + Disks() Disks + AlphaDisks() AlphaDisks + AlphaRegionDisks() AlphaRegionDisks + Firewalls() Firewalls + ForwardingRules() ForwardingRules + AlphaForwardingRules() AlphaForwardingRules + GlobalForwardingRules() GlobalForwardingRules + HealthChecks() HealthChecks + AlphaHealthChecks() AlphaHealthChecks + HttpHealthChecks() HttpHealthChecks + HttpsHealthChecks() HttpsHealthChecks + InstanceGroups() InstanceGroups + Instances() Instances + BetaInstances() BetaInstances + AlphaInstances() AlphaInstances + AlphaNetworkEndpointGroups() AlphaNetworkEndpointGroups + Projects() Projects + Regions() Regions + Routes() Routes + SslCertificates() SslCertificates + TargetHttpProxies() TargetHttpProxies + TargetHttpsProxies() TargetHttpsProxies + TargetPools() TargetPools + UrlMaps() UrlMaps + Zones() Zones +} + +// NewGCE returns a GCE. +func NewGCE(s *Service) *GCE { + g := &GCE{ + gceAddresses: &GCEAddresses{s}, + gceAlphaAddresses: &GCEAlphaAddresses{s}, + gceBetaAddresses: &GCEBetaAddresses{s}, + gceGlobalAddresses: &GCEGlobalAddresses{s}, + gceBackendServices: &GCEBackendServices{s}, + gceAlphaBackendServices: &GCEAlphaBackendServices{s}, + gceAlphaRegionBackendServices: &GCEAlphaRegionBackendServices{s}, + gceDisks: &GCEDisks{s}, + gceAlphaDisks: &GCEAlphaDisks{s}, + gceAlphaRegionDisks: &GCEAlphaRegionDisks{s}, + gceFirewalls: &GCEFirewalls{s}, + gceForwardingRules: &GCEForwardingRules{s}, + gceAlphaForwardingRules: &GCEAlphaForwardingRules{s}, + gceGlobalForwardingRules: &GCEGlobalForwardingRules{s}, + gceHealthChecks: &GCEHealthChecks{s}, + gceAlphaHealthChecks: &GCEAlphaHealthChecks{s}, + gceHttpHealthChecks: &GCEHttpHealthChecks{s}, + gceHttpsHealthChecks: &GCEHttpsHealthChecks{s}, + gceInstanceGroups: &GCEInstanceGroups{s}, + gceInstances: &GCEInstances{s}, + gceBetaInstances: &GCEBetaInstances{s}, + gceAlphaInstances: &GCEAlphaInstances{s}, + gceAlphaNetworkEndpointGroups: &GCEAlphaNetworkEndpointGroups{s}, + gceProjects: &GCEProjects{s}, + gceRegions: &GCERegions{s}, + gceRoutes: &GCERoutes{s}, + gceSslCertificates: &GCESslCertificates{s}, + gceTargetHttpProxies: &GCETargetHttpProxies{s}, + gceTargetHttpsProxies: &GCETargetHttpsProxies{s}, + gceTargetPools: &GCETargetPools{s}, + gceUrlMaps: &GCEUrlMaps{s}, + gceZones: &GCEZones{s}, + } + return g +} + +// GCE implements Cloud. +var _ Cloud = (*GCE)(nil) + +// GCE is the golang adapter for the compute APIs. +type GCE struct { + gceAddresses *GCEAddresses + gceAlphaAddresses *GCEAlphaAddresses + gceBetaAddresses *GCEBetaAddresses + gceGlobalAddresses *GCEGlobalAddresses + gceBackendServices *GCEBackendServices + gceAlphaBackendServices *GCEAlphaBackendServices + gceAlphaRegionBackendServices *GCEAlphaRegionBackendServices + gceDisks *GCEDisks + gceAlphaDisks *GCEAlphaDisks + gceAlphaRegionDisks *GCEAlphaRegionDisks + gceFirewalls *GCEFirewalls + gceForwardingRules *GCEForwardingRules + gceAlphaForwardingRules *GCEAlphaForwardingRules + gceGlobalForwardingRules *GCEGlobalForwardingRules + gceHealthChecks *GCEHealthChecks + gceAlphaHealthChecks *GCEAlphaHealthChecks + gceHttpHealthChecks *GCEHttpHealthChecks + gceHttpsHealthChecks *GCEHttpsHealthChecks + gceInstanceGroups *GCEInstanceGroups + gceInstances *GCEInstances + gceBetaInstances *GCEBetaInstances + gceAlphaInstances *GCEAlphaInstances + gceAlphaNetworkEndpointGroups *GCEAlphaNetworkEndpointGroups + gceProjects *GCEProjects + gceRegions *GCERegions + gceRoutes *GCERoutes + gceSslCertificates *GCESslCertificates + gceTargetHttpProxies *GCETargetHttpProxies + gceTargetHttpsProxies *GCETargetHttpsProxies + gceTargetPools *GCETargetPools + gceUrlMaps *GCEUrlMaps + gceZones *GCEZones +} + +func (gce *GCE) Addresses() Addresses { + return gce.gceAddresses +} +func (gce *GCE) AlphaAddresses() AlphaAddresses { + return gce.gceAlphaAddresses +} +func (gce *GCE) BetaAddresses() BetaAddresses { + return gce.gceBetaAddresses +} +func (gce *GCE) GlobalAddresses() GlobalAddresses { + return gce.gceGlobalAddresses +} +func (gce *GCE) BackendServices() BackendServices { + return gce.gceBackendServices +} +func (gce *GCE) AlphaBackendServices() AlphaBackendServices { + return gce.gceAlphaBackendServices +} +func (gce *GCE) AlphaRegionBackendServices() AlphaRegionBackendServices { + return gce.gceAlphaRegionBackendServices +} +func (gce *GCE) Disks() Disks { + return gce.gceDisks +} +func (gce *GCE) AlphaDisks() AlphaDisks { + return gce.gceAlphaDisks +} +func (gce *GCE) AlphaRegionDisks() AlphaRegionDisks { + return gce.gceAlphaRegionDisks +} +func (gce *GCE) Firewalls() Firewalls { + return gce.gceFirewalls +} +func (gce *GCE) ForwardingRules() ForwardingRules { + return gce.gceForwardingRules +} +func (gce *GCE) AlphaForwardingRules() AlphaForwardingRules { + return gce.gceAlphaForwardingRules +} +func (gce *GCE) GlobalForwardingRules() GlobalForwardingRules { + return gce.gceGlobalForwardingRules +} +func (gce *GCE) HealthChecks() HealthChecks { + return gce.gceHealthChecks +} +func (gce *GCE) AlphaHealthChecks() AlphaHealthChecks { + return gce.gceAlphaHealthChecks +} +func (gce *GCE) HttpHealthChecks() HttpHealthChecks { + return gce.gceHttpHealthChecks +} +func (gce *GCE) HttpsHealthChecks() HttpsHealthChecks { + return gce.gceHttpsHealthChecks +} +func (gce *GCE) InstanceGroups() InstanceGroups { + return gce.gceInstanceGroups +} +func (gce *GCE) Instances() Instances { + return gce.gceInstances +} +func (gce *GCE) BetaInstances() BetaInstances { + return gce.gceBetaInstances +} +func (gce *GCE) AlphaInstances() AlphaInstances { + return gce.gceAlphaInstances +} +func (gce *GCE) AlphaNetworkEndpointGroups() AlphaNetworkEndpointGroups { + return gce.gceAlphaNetworkEndpointGroups +} +func (gce *GCE) Projects() Projects { + return gce.gceProjects +} +func (gce *GCE) Regions() Regions { + return gce.gceRegions +} +func (gce *GCE) Routes() Routes { + return gce.gceRoutes +} +func (gce *GCE) SslCertificates() SslCertificates { + return gce.gceSslCertificates +} +func (gce *GCE) TargetHttpProxies() TargetHttpProxies { + return gce.gceTargetHttpProxies +} +func (gce *GCE) TargetHttpsProxies() TargetHttpsProxies { + return gce.gceTargetHttpsProxies +} +func (gce *GCE) TargetPools() TargetPools { + return gce.gceTargetPools +} +func (gce *GCE) UrlMaps() UrlMaps { + return gce.gceUrlMaps +} +func (gce *GCE) Zones() Zones { + return gce.gceZones +} + +// NewMockGCE returns a new mock for GCE. +func NewMockGCE() *MockGCE { + mockAddressesObjs := map[meta.Key]*MockAddressesObj{} + mockBackendServicesObjs := map[meta.Key]*MockBackendServicesObj{} + mockDisksObjs := map[meta.Key]*MockDisksObj{} + mockFirewallsObjs := map[meta.Key]*MockFirewallsObj{} + mockForwardingRulesObjs := map[meta.Key]*MockForwardingRulesObj{} + mockGlobalAddressesObjs := map[meta.Key]*MockGlobalAddressesObj{} + mockGlobalForwardingRulesObjs := map[meta.Key]*MockGlobalForwardingRulesObj{} + mockHealthChecksObjs := map[meta.Key]*MockHealthChecksObj{} + mockHttpHealthChecksObjs := map[meta.Key]*MockHttpHealthChecksObj{} + mockHttpsHealthChecksObjs := map[meta.Key]*MockHttpsHealthChecksObj{} + mockInstanceGroupsObjs := map[meta.Key]*MockInstanceGroupsObj{} + mockInstancesObjs := map[meta.Key]*MockInstancesObj{} + mockNetworkEndpointGroupsObjs := map[meta.Key]*MockNetworkEndpointGroupsObj{} + mockProjectsObjs := map[meta.Key]*MockProjectsObj{} + mockRegionBackendServicesObjs := map[meta.Key]*MockRegionBackendServicesObj{} + mockRegionDisksObjs := map[meta.Key]*MockRegionDisksObj{} + mockRegionsObjs := map[meta.Key]*MockRegionsObj{} + mockRoutesObjs := map[meta.Key]*MockRoutesObj{} + mockSslCertificatesObjs := map[meta.Key]*MockSslCertificatesObj{} + mockTargetHttpProxiesObjs := map[meta.Key]*MockTargetHttpProxiesObj{} + mockTargetHttpsProxiesObjs := map[meta.Key]*MockTargetHttpsProxiesObj{} + mockTargetPoolsObjs := map[meta.Key]*MockTargetPoolsObj{} + mockUrlMapsObjs := map[meta.Key]*MockUrlMapsObj{} + mockZonesObjs := map[meta.Key]*MockZonesObj{} + + mock := &MockGCE{ + MockAddresses: NewMockAddresses(mockAddressesObjs), + MockAlphaAddresses: NewMockAlphaAddresses(mockAddressesObjs), + MockBetaAddresses: NewMockBetaAddresses(mockAddressesObjs), + MockGlobalAddresses: NewMockGlobalAddresses(mockGlobalAddressesObjs), + MockBackendServices: NewMockBackendServices(mockBackendServicesObjs), + MockAlphaBackendServices: NewMockAlphaBackendServices(mockBackendServicesObjs), + MockAlphaRegionBackendServices: NewMockAlphaRegionBackendServices(mockRegionBackendServicesObjs), + MockDisks: NewMockDisks(mockDisksObjs), + MockAlphaDisks: NewMockAlphaDisks(mockDisksObjs), + MockAlphaRegionDisks: NewMockAlphaRegionDisks(mockRegionDisksObjs), + MockFirewalls: NewMockFirewalls(mockFirewallsObjs), + MockForwardingRules: NewMockForwardingRules(mockForwardingRulesObjs), + MockAlphaForwardingRules: NewMockAlphaForwardingRules(mockForwardingRulesObjs), + MockGlobalForwardingRules: NewMockGlobalForwardingRules(mockGlobalForwardingRulesObjs), + MockHealthChecks: NewMockHealthChecks(mockHealthChecksObjs), + MockAlphaHealthChecks: NewMockAlphaHealthChecks(mockHealthChecksObjs), + MockHttpHealthChecks: NewMockHttpHealthChecks(mockHttpHealthChecksObjs), + MockHttpsHealthChecks: NewMockHttpsHealthChecks(mockHttpsHealthChecksObjs), + MockInstanceGroups: NewMockInstanceGroups(mockInstanceGroupsObjs), + MockInstances: NewMockInstances(mockInstancesObjs), + MockBetaInstances: NewMockBetaInstances(mockInstancesObjs), + MockAlphaInstances: NewMockAlphaInstances(mockInstancesObjs), + MockAlphaNetworkEndpointGroups: NewMockAlphaNetworkEndpointGroups(mockNetworkEndpointGroupsObjs), + MockProjects: NewMockProjects(mockProjectsObjs), + MockRegions: NewMockRegions(mockRegionsObjs), + MockRoutes: NewMockRoutes(mockRoutesObjs), + MockSslCertificates: NewMockSslCertificates(mockSslCertificatesObjs), + MockTargetHttpProxies: NewMockTargetHttpProxies(mockTargetHttpProxiesObjs), + MockTargetHttpsProxies: NewMockTargetHttpsProxies(mockTargetHttpsProxiesObjs), + MockTargetPools: NewMockTargetPools(mockTargetPoolsObjs), + MockUrlMaps: NewMockUrlMaps(mockUrlMapsObjs), + MockZones: NewMockZones(mockZonesObjs), + } + return mock +} + +// MockGCE implements Cloud. +var _ Cloud = (*MockGCE)(nil) + +// MockGCE is the mock for the compute API. +type MockGCE struct { + MockAddresses *MockAddresses + MockAlphaAddresses *MockAlphaAddresses + MockBetaAddresses *MockBetaAddresses + MockGlobalAddresses *MockGlobalAddresses + MockBackendServices *MockBackendServices + MockAlphaBackendServices *MockAlphaBackendServices + MockAlphaRegionBackendServices *MockAlphaRegionBackendServices + MockDisks *MockDisks + MockAlphaDisks *MockAlphaDisks + MockAlphaRegionDisks *MockAlphaRegionDisks + MockFirewalls *MockFirewalls + MockForwardingRules *MockForwardingRules + MockAlphaForwardingRules *MockAlphaForwardingRules + MockGlobalForwardingRules *MockGlobalForwardingRules + MockHealthChecks *MockHealthChecks + MockAlphaHealthChecks *MockAlphaHealthChecks + MockHttpHealthChecks *MockHttpHealthChecks + MockHttpsHealthChecks *MockHttpsHealthChecks + MockInstanceGroups *MockInstanceGroups + MockInstances *MockInstances + MockBetaInstances *MockBetaInstances + MockAlphaInstances *MockAlphaInstances + MockAlphaNetworkEndpointGroups *MockAlphaNetworkEndpointGroups + MockProjects *MockProjects + MockRegions *MockRegions + MockRoutes *MockRoutes + MockSslCertificates *MockSslCertificates + MockTargetHttpProxies *MockTargetHttpProxies + MockTargetHttpsProxies *MockTargetHttpsProxies + MockTargetPools *MockTargetPools + MockUrlMaps *MockUrlMaps + MockZones *MockZones +} + +func (mock *MockGCE) Addresses() Addresses { + return mock.MockAddresses +} + +func (mock *MockGCE) AlphaAddresses() AlphaAddresses { + return mock.MockAlphaAddresses +} + +func (mock *MockGCE) BetaAddresses() BetaAddresses { + return mock.MockBetaAddresses +} + +func (mock *MockGCE) GlobalAddresses() GlobalAddresses { + return mock.MockGlobalAddresses +} + +func (mock *MockGCE) BackendServices() BackendServices { + return mock.MockBackendServices +} + +func (mock *MockGCE) AlphaBackendServices() AlphaBackendServices { + return mock.MockAlphaBackendServices +} + +func (mock *MockGCE) AlphaRegionBackendServices() AlphaRegionBackendServices { + return mock.MockAlphaRegionBackendServices +} + +func (mock *MockGCE) Disks() Disks { + return mock.MockDisks +} + +func (mock *MockGCE) AlphaDisks() AlphaDisks { + return mock.MockAlphaDisks +} + +func (mock *MockGCE) AlphaRegionDisks() AlphaRegionDisks { + return mock.MockAlphaRegionDisks +} + +func (mock *MockGCE) Firewalls() Firewalls { + return mock.MockFirewalls +} + +func (mock *MockGCE) ForwardingRules() ForwardingRules { + return mock.MockForwardingRules +} + +func (mock *MockGCE) AlphaForwardingRules() AlphaForwardingRules { + return mock.MockAlphaForwardingRules +} + +func (mock *MockGCE) GlobalForwardingRules() GlobalForwardingRules { + return mock.MockGlobalForwardingRules +} + +func (mock *MockGCE) HealthChecks() HealthChecks { + return mock.MockHealthChecks +} + +func (mock *MockGCE) AlphaHealthChecks() AlphaHealthChecks { + return mock.MockAlphaHealthChecks +} + +func (mock *MockGCE) HttpHealthChecks() HttpHealthChecks { + return mock.MockHttpHealthChecks +} + +func (mock *MockGCE) HttpsHealthChecks() HttpsHealthChecks { + return mock.MockHttpsHealthChecks +} + +func (mock *MockGCE) InstanceGroups() InstanceGroups { + return mock.MockInstanceGroups +} + +func (mock *MockGCE) Instances() Instances { + return mock.MockInstances +} + +func (mock *MockGCE) BetaInstances() BetaInstances { + return mock.MockBetaInstances +} + +func (mock *MockGCE) AlphaInstances() AlphaInstances { + return mock.MockAlphaInstances +} + +func (mock *MockGCE) AlphaNetworkEndpointGroups() AlphaNetworkEndpointGroups { + return mock.MockAlphaNetworkEndpointGroups +} + +func (mock *MockGCE) Projects() Projects { + return mock.MockProjects +} + +func (mock *MockGCE) Regions() Regions { + return mock.MockRegions +} + +func (mock *MockGCE) Routes() Routes { + return mock.MockRoutes +} + +func (mock *MockGCE) SslCertificates() SslCertificates { + return mock.MockSslCertificates +} + +func (mock *MockGCE) TargetHttpProxies() TargetHttpProxies { + return mock.MockTargetHttpProxies +} + +func (mock *MockGCE) TargetHttpsProxies() TargetHttpsProxies { + return mock.MockTargetHttpsProxies +} + +func (mock *MockGCE) TargetPools() TargetPools { + return mock.MockTargetPools +} + +func (mock *MockGCE) UrlMaps() UrlMaps { + return mock.MockUrlMaps +} + +func (mock *MockGCE) Zones() Zones { + return mock.MockZones +} + +// MockAddressesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockAddressesObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockAddressesObj) ToAlpha() *alpha.Address { + if ret, ok := m.Obj.(*alpha.Address); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.Address{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *alpha.Address via JSON: %v", m.Obj, err) + } + return ret +} + +// ToBeta retrieves the given version of the object. +func (m *MockAddressesObj) ToBeta() *beta.Address { + if ret, ok := m.Obj.(*beta.Address); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &beta.Address{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *beta.Address via JSON: %v", m.Obj, err) + } + return ret +} + +// ToGA retrieves the given version of the object. +func (m *MockAddressesObj) ToGA() *ga.Address { + if ret, ok := m.Obj.(*ga.Address); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.Address{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.Address via JSON: %v", m.Obj, err) + } + return ret +} + +// MockBackendServicesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockBackendServicesObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockBackendServicesObj) ToAlpha() *alpha.BackendService { + if ret, ok := m.Obj.(*alpha.BackendService); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.BackendService{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *alpha.BackendService via JSON: %v", m.Obj, err) + } + return ret +} + +// ToGA retrieves the given version of the object. +func (m *MockBackendServicesObj) ToGA() *ga.BackendService { + if ret, ok := m.Obj.(*ga.BackendService); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.BackendService{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.BackendService via JSON: %v", m.Obj, err) + } + return ret +} + +// MockDisksObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockDisksObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockDisksObj) ToAlpha() *alpha.Disk { + if ret, ok := m.Obj.(*alpha.Disk); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.Disk{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *alpha.Disk via JSON: %v", m.Obj, err) + } + return ret +} + +// ToGA retrieves the given version of the object. +func (m *MockDisksObj) ToGA() *ga.Disk { + if ret, ok := m.Obj.(*ga.Disk); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.Disk{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.Disk via JSON: %v", m.Obj, err) + } + return ret +} + +// MockFirewallsObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockFirewallsObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockFirewallsObj) ToGA() *ga.Firewall { + if ret, ok := m.Obj.(*ga.Firewall); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.Firewall{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.Firewall via JSON: %v", m.Obj, err) + } + return ret +} + +// MockForwardingRulesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockForwardingRulesObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockForwardingRulesObj) ToAlpha() *alpha.ForwardingRule { + if ret, ok := m.Obj.(*alpha.ForwardingRule); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.ForwardingRule{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *alpha.ForwardingRule via JSON: %v", m.Obj, err) + } + return ret +} + +// ToGA retrieves the given version of the object. +func (m *MockForwardingRulesObj) ToGA() *ga.ForwardingRule { + if ret, ok := m.Obj.(*ga.ForwardingRule); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.ForwardingRule{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.ForwardingRule via JSON: %v", m.Obj, err) + } + return ret +} + +// MockGlobalAddressesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockGlobalAddressesObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockGlobalAddressesObj) ToGA() *ga.Address { + if ret, ok := m.Obj.(*ga.Address); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.Address{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.Address via JSON: %v", m.Obj, err) + } + return ret +} + +// MockGlobalForwardingRulesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockGlobalForwardingRulesObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockGlobalForwardingRulesObj) ToGA() *ga.ForwardingRule { + if ret, ok := m.Obj.(*ga.ForwardingRule); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.ForwardingRule{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.ForwardingRule via JSON: %v", m.Obj, err) + } + return ret +} + +// MockHealthChecksObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockHealthChecksObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockHealthChecksObj) ToAlpha() *alpha.HealthCheck { + if ret, ok := m.Obj.(*alpha.HealthCheck); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.HealthCheck{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *alpha.HealthCheck via JSON: %v", m.Obj, err) + } + return ret +} + +// ToGA retrieves the given version of the object. +func (m *MockHealthChecksObj) ToGA() *ga.HealthCheck { + if ret, ok := m.Obj.(*ga.HealthCheck); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.HealthCheck{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.HealthCheck via JSON: %v", m.Obj, err) + } + return ret +} + +// MockHttpHealthChecksObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockHttpHealthChecksObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockHttpHealthChecksObj) ToGA() *ga.HttpHealthCheck { + if ret, ok := m.Obj.(*ga.HttpHealthCheck); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.HttpHealthCheck{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.HttpHealthCheck via JSON: %v", m.Obj, err) + } + return ret +} + +// MockHttpsHealthChecksObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockHttpsHealthChecksObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockHttpsHealthChecksObj) ToGA() *ga.HttpsHealthCheck { + if ret, ok := m.Obj.(*ga.HttpsHealthCheck); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.HttpsHealthCheck{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.HttpsHealthCheck via JSON: %v", m.Obj, err) + } + return ret +} + +// MockInstanceGroupsObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockInstanceGroupsObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockInstanceGroupsObj) ToGA() *ga.InstanceGroup { + if ret, ok := m.Obj.(*ga.InstanceGroup); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.InstanceGroup{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.InstanceGroup via JSON: %v", m.Obj, err) + } + return ret +} + +// MockInstancesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockInstancesObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockInstancesObj) ToAlpha() *alpha.Instance { + if ret, ok := m.Obj.(*alpha.Instance); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.Instance{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *alpha.Instance via JSON: %v", m.Obj, err) + } + return ret +} + +// ToBeta retrieves the given version of the object. +func (m *MockInstancesObj) ToBeta() *beta.Instance { + if ret, ok := m.Obj.(*beta.Instance); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &beta.Instance{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *beta.Instance via JSON: %v", m.Obj, err) + } + return ret +} + +// ToGA retrieves the given version of the object. +func (m *MockInstancesObj) ToGA() *ga.Instance { + if ret, ok := m.Obj.(*ga.Instance); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.Instance{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.Instance via JSON: %v", m.Obj, err) + } + return ret +} + +// MockNetworkEndpointGroupsObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockNetworkEndpointGroupsObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockNetworkEndpointGroupsObj) ToAlpha() *alpha.NetworkEndpointGroup { + if ret, ok := m.Obj.(*alpha.NetworkEndpointGroup); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.NetworkEndpointGroup{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *alpha.NetworkEndpointGroup via JSON: %v", m.Obj, err) + } + return ret +} + +// MockProjectsObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockProjectsObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockProjectsObj) ToGA() *ga.Project { + if ret, ok := m.Obj.(*ga.Project); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.Project{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.Project via JSON: %v", m.Obj, err) + } + return ret +} + +// MockRegionBackendServicesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockRegionBackendServicesObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockRegionBackendServicesObj) ToAlpha() *alpha.BackendService { + if ret, ok := m.Obj.(*alpha.BackendService); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.BackendService{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *alpha.BackendService via JSON: %v", m.Obj, err) + } + return ret +} + +// MockRegionDisksObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockRegionDisksObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockRegionDisksObj) ToAlpha() *alpha.Disk { + if ret, ok := m.Obj.(*alpha.Disk); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.Disk{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *alpha.Disk via JSON: %v", m.Obj, err) + } + return ret +} + +// MockRegionsObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockRegionsObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockRegionsObj) ToGA() *ga.Region { + if ret, ok := m.Obj.(*ga.Region); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.Region{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.Region via JSON: %v", m.Obj, err) + } + return ret +} + +// MockRoutesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockRoutesObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockRoutesObj) ToGA() *ga.Route { + if ret, ok := m.Obj.(*ga.Route); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.Route{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.Route via JSON: %v", m.Obj, err) + } + return ret +} + +// MockSslCertificatesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockSslCertificatesObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockSslCertificatesObj) ToGA() *ga.SslCertificate { + if ret, ok := m.Obj.(*ga.SslCertificate); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.SslCertificate{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.SslCertificate via JSON: %v", m.Obj, err) + } + return ret +} + +// MockTargetHttpProxiesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockTargetHttpProxiesObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockTargetHttpProxiesObj) ToGA() *ga.TargetHttpProxy { + if ret, ok := m.Obj.(*ga.TargetHttpProxy); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.TargetHttpProxy{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.TargetHttpProxy via JSON: %v", m.Obj, err) + } + return ret +} + +// MockTargetHttpsProxiesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockTargetHttpsProxiesObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockTargetHttpsProxiesObj) ToGA() *ga.TargetHttpsProxy { + if ret, ok := m.Obj.(*ga.TargetHttpsProxy); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.TargetHttpsProxy{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.TargetHttpsProxy via JSON: %v", m.Obj, err) + } + return ret +} + +// MockTargetPoolsObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockTargetPoolsObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockTargetPoolsObj) ToGA() *ga.TargetPool { + if ret, ok := m.Obj.(*ga.TargetPool); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.TargetPool{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.TargetPool via JSON: %v", m.Obj, err) + } + return ret +} + +// MockUrlMapsObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockUrlMapsObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockUrlMapsObj) ToGA() *ga.UrlMap { + if ret, ok := m.Obj.(*ga.UrlMap); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.UrlMap{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.UrlMap via JSON: %v", m.Obj, err) + } + return ret +} + +// MockZonesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockZonesObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockZonesObj) ToGA() *ga.Zone { + if ret, ok := m.Obj.(*ga.Zone); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.Zone{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.Zone via JSON: %v", m.Obj, err) + } + return ret +} + +// Addresses is an interface that allows for mocking of Addresses. +type Addresses interface { + Get(ctx context.Context, key meta.Key) (*ga.Address, error) + List(ctx context.Context, region string, fl *filter.F) ([]*ga.Address, error) + Insert(ctx context.Context, key meta.Key, obj *ga.Address) error + Delete(ctx context.Context, key meta.Key) error +} + +// NewMockAddresses returns a new mock for Addresses. +func NewMockAddresses(objs map[meta.Key]*MockAddressesObj) *MockAddresses { + mock := &MockAddresses{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAddresses is the mock for Addresses. +type MockAddresses struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockAddressesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockAddresses, ctx context.Context, key meta.Key) (bool, *ga.Address, error) + ListHook func(m *MockAddresses, ctx context.Context, region string, fl *filter.F) (bool, []*ga.Address, error) + InsertHook func(m *MockAddresses, ctx context.Context, key meta.Key, obj *ga.Address) (bool, error) + DeleteHook func(m *MockAddresses, ctx context.Context, key meta.Key) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAddresses) Get(ctx context.Context, key meta.Key) (*ga.Address, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAddresses %v not found", key), + } + glog.V(5).Infof("MockAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given region. +func (m *MockAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Address, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, region, fl); intercept { + glog.V(5).Infof("MockAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.Address + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAddresses) Insert(ctx context.Context, key meta.Key, obj *ga.Address) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAddresses %v exists", key), + } + glog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "addresses", key) + } + + m.Objects[key] = &MockAddressesObj{obj} + glog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAddresses) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAddresses %v not found", key), + } + glog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockAddresses.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAddresses) Obj(o *ga.Address) *MockAddressesObj { + return &MockAddressesObj{o} +} + +// GCEAddresses is a simplifying adapter for the GCE Addresses. +type GCEAddresses struct { + s *Service +} + +// Get the Address named by key. +func (g *GCEAddresses) Get(ctx context.Context, key meta.Key) (*ga.Address, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Addresses.Get(projectID, key.Region, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Address objects. +func (g *GCEAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Address, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Addresses.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.Address + f := func(l *ga.AddressList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Address with key of value obj. +func (g *GCEAddresses) Insert(ctx context.Context, key meta.Key, obj *ga.Address) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.Addresses.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Address referenced by key. +func (g *GCEAddresses) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.Addresses.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AlphaAddresses is an interface that allows for mocking of Addresses. +type AlphaAddresses interface { + Get(ctx context.Context, key meta.Key) (*alpha.Address, error) + List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Address, error) + Insert(ctx context.Context, key meta.Key, obj *alpha.Address) error + Delete(ctx context.Context, key meta.Key) error +} + +// NewMockAlphaAddresses returns a new mock for Addresses. +func NewMockAlphaAddresses(objs map[meta.Key]*MockAddressesObj) *MockAlphaAddresses { + mock := &MockAlphaAddresses{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaAddresses is the mock for Addresses. +type MockAlphaAddresses struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockAddressesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockAlphaAddresses, ctx context.Context, key meta.Key) (bool, *alpha.Address, error) + ListHook func(m *MockAlphaAddresses, ctx context.Context, region string, fl *filter.F) (bool, []*alpha.Address, error) + InsertHook func(m *MockAlphaAddresses, ctx context.Context, key meta.Key, obj *alpha.Address) (bool, error) + DeleteHook func(m *MockAlphaAddresses, ctx context.Context, key meta.Key) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaAddresses) Get(ctx context.Context, key meta.Key) (*alpha.Address, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToAlpha() + glog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaAddresses %v not found", key), + } + glog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given region. +func (m *MockAlphaAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Address, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, region, fl); intercept { + glog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.Address + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + glog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaAddresses) Insert(ctx context.Context, key meta.Key, obj *alpha.Address) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaAddresses %v exists", key), + } + glog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionAlpha, "mock-project", "addresses", key) + } + + m.Objects[key] = &MockAddressesObj{obj} + glog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaAddresses) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaAddresses %v not found", key), + } + glog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaAddresses) Obj(o *alpha.Address) *MockAddressesObj { + return &MockAddressesObj{o} +} + +// GCEAlphaAddresses is a simplifying adapter for the GCE Addresses. +type GCEAlphaAddresses struct { + s *Service +} + +// Get the Address named by key. +func (g *GCEAlphaAddresses) Get(ctx context.Context, key meta.Key) (*alpha.Address, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.Addresses.Get(projectID, key.Region, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Address objects. +func (g *GCEAlphaAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Address, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.Addresses.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.Address + f := func(l *alpha.AddressList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Address with key of value obj. +func (g *GCEAlphaAddresses) Insert(ctx context.Context, key meta.Key, obj *alpha.Address) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.Alpha.Addresses.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Address referenced by key. +func (g *GCEAlphaAddresses) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.Addresses.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// BetaAddresses is an interface that allows for mocking of Addresses. +type BetaAddresses interface { + Get(ctx context.Context, key meta.Key) (*beta.Address, error) + List(ctx context.Context, region string, fl *filter.F) ([]*beta.Address, error) + Insert(ctx context.Context, key meta.Key, obj *beta.Address) error + Delete(ctx context.Context, key meta.Key) error +} + +// NewMockBetaAddresses returns a new mock for Addresses. +func NewMockBetaAddresses(objs map[meta.Key]*MockAddressesObj) *MockBetaAddresses { + mock := &MockBetaAddresses{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockBetaAddresses is the mock for Addresses. +type MockBetaAddresses struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockAddressesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockBetaAddresses, ctx context.Context, key meta.Key) (bool, *beta.Address, error) + ListHook func(m *MockBetaAddresses, ctx context.Context, region string, fl *filter.F) (bool, []*beta.Address, error) + InsertHook func(m *MockBetaAddresses, ctx context.Context, key meta.Key, obj *beta.Address) (bool, error) + DeleteHook func(m *MockBetaAddresses, ctx context.Context, key meta.Key) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockBetaAddresses) Get(ctx context.Context, key meta.Key) (*beta.Address, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToBeta() + glog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaAddresses %v not found", key), + } + glog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given region. +func (m *MockBetaAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*beta.Address, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, region, fl); intercept { + glog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + + return nil, *m.ListError + } + + var objs []*beta.Address + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToBeta()) { + continue + } + objs = append(objs, obj.ToBeta()) + } + + glog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockBetaAddresses) Insert(ctx context.Context, key meta.Key, obj *beta.Address) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockBetaAddresses %v exists", key), + } + glog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionBeta, "mock-project", "addresses", key) + } + + m.Objects[key] = &MockAddressesObj{obj} + glog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockBetaAddresses) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaAddresses %v not found", key), + } + glog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockBetaAddresses) Obj(o *beta.Address) *MockAddressesObj { + return &MockAddressesObj{o} +} + +// GCEBetaAddresses is a simplifying adapter for the GCE Addresses. +type GCEBetaAddresses struct { + s *Service +} + +// Get the Address named by key. +func (g *GCEBetaAddresses) Get(ctx context.Context, key meta.Key) (*beta.Address, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("beta"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Beta.Addresses.Get(projectID, key.Region, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Address objects. +func (g *GCEBetaAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*beta.Address, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("beta"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Beta.Addresses.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*beta.Address + f := func(l *beta.AddressList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Address with key of value obj. +func (g *GCEBetaAddresses) Insert(ctx context.Context, key meta.Key, obj *beta.Address) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("beta"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.Beta.Addresses.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Address referenced by key. +func (g *GCEBetaAddresses) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("beta"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Beta.Addresses.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// GlobalAddresses is an interface that allows for mocking of GlobalAddresses. +type GlobalAddresses interface { + Get(ctx context.Context, key meta.Key) (*ga.Address, error) + List(ctx context.Context, fl *filter.F) ([]*ga.Address, error) + Insert(ctx context.Context, key meta.Key, obj *ga.Address) error + Delete(ctx context.Context, key meta.Key) error +} + +// NewMockGlobalAddresses returns a new mock for GlobalAddresses. +func NewMockGlobalAddresses(objs map[meta.Key]*MockGlobalAddressesObj) *MockGlobalAddresses { + mock := &MockGlobalAddresses{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockGlobalAddresses is the mock for GlobalAddresses. +type MockGlobalAddresses struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockGlobalAddressesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockGlobalAddresses, ctx context.Context, key meta.Key) (bool, *ga.Address, error) + ListHook func(m *MockGlobalAddresses, ctx context.Context, fl *filter.F) (bool, []*ga.Address, error) + InsertHook func(m *MockGlobalAddresses, ctx context.Context, key meta.Key, obj *ga.Address) (bool, error) + DeleteHook func(m *MockGlobalAddresses, ctx context.Context, key meta.Key) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockGlobalAddresses) Get(ctx context.Context, key meta.Key) (*ga.Address, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockGlobalAddresses %v not found", key), + } + glog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Address, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.Address + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockGlobalAddresses) Insert(ctx context.Context, key meta.Key, obj *ga.Address) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockGlobalAddresses %v exists", key), + } + glog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "addresses", key) + } + + m.Objects[key] = &MockGlobalAddressesObj{obj} + glog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockGlobalAddresses) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockGlobalAddresses %v not found", key), + } + glog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockGlobalAddresses) Obj(o *ga.Address) *MockGlobalAddressesObj { + return &MockGlobalAddressesObj{o} +} + +// GCEGlobalAddresses is a simplifying adapter for the GCE GlobalAddresses. +type GCEGlobalAddresses struct { + s *Service +} + +// Get the Address named by key. +func (g *GCEGlobalAddresses) Get(ctx context.Context, key meta.Key) (*ga.Address, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "GlobalAddresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.GlobalAddresses.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Address objects. +func (g *GCEGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Address, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "GlobalAddresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.GlobalAddresses.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.Address + f := func(l *ga.AddressList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Address with key of value obj. +func (g *GCEGlobalAddresses) Insert(ctx context.Context, key meta.Key, obj *ga.Address) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "GlobalAddresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.GlobalAddresses.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Address referenced by key. +func (g *GCEGlobalAddresses) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "GlobalAddresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.GlobalAddresses.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// BackendServices is an interface that allows for mocking of BackendServices. +type BackendServices interface { + Get(ctx context.Context, key meta.Key) (*ga.BackendService, error) + List(ctx context.Context, fl *filter.F) ([]*ga.BackendService, error) + Insert(ctx context.Context, key meta.Key, obj *ga.BackendService) error + Delete(ctx context.Context, key meta.Key) error + GetHealth(context.Context, meta.Key, *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) + Update(context.Context, meta.Key, *ga.BackendService) error +} + +// NewMockBackendServices returns a new mock for BackendServices. +func NewMockBackendServices(objs map[meta.Key]*MockBackendServicesObj) *MockBackendServices { + mock := &MockBackendServices{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockBackendServices is the mock for BackendServices. +type MockBackendServices struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockBackendServicesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockBackendServices, ctx context.Context, key meta.Key) (bool, *ga.BackendService, error) + ListHook func(m *MockBackendServices, ctx context.Context, fl *filter.F) (bool, []*ga.BackendService, error) + InsertHook func(m *MockBackendServices, ctx context.Context, key meta.Key, obj *ga.BackendService) (bool, error) + DeleteHook func(m *MockBackendServices, ctx context.Context, key meta.Key) (bool, error) + GetHealthHook func(*MockBackendServices, context.Context, meta.Key, *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) + UpdateHook func(*MockBackendServices, context.Context, meta.Key, *ga.BackendService) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockBackendServices) Get(ctx context.Context, key meta.Key) (*ga.BackendService, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBackendServices %v not found", key), + } + glog.V(5).Infof("MockBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.BackendService, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.BackendService + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockBackendServices) Insert(ctx context.Context, key meta.Key, obj *ga.BackendService) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockBackendServices %v exists", key), + } + glog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "backendServices", key) + } + + m.Objects[key] = &MockBackendServicesObj{obj} + glog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockBackendServices) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBackendServices %v not found", key), + } + glog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockBackendServices.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockBackendServices) Obj(o *ga.BackendService) *MockBackendServicesObj { + return &MockBackendServicesObj{o} +} + +// GetHealth is a mock for the corresponding method. +func (m *MockBackendServices) GetHealth(ctx context.Context, key meta.Key, arg0 *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) { + if m.GetHealthHook != nil { + return m.GetHealthHook(m, ctx, key, arg0) + } + return nil, fmt.Errorf("GetHealthHook must be set") +} + +// Update is a mock for the corresponding method. +func (m *MockBackendServices) Update(ctx context.Context, key meta.Key, arg0 *ga.BackendService) error { + if m.UpdateHook != nil { + return m.UpdateHook(m, ctx, key, arg0) + } + return nil +} + +// GCEBackendServices is a simplifying adapter for the GCE BackendServices. +type GCEBackendServices struct { + s *Service +} + +// Get the BackendService named by key. +func (g *GCEBackendServices) Get(ctx context.Context, key meta.Key) (*ga.BackendService, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "BackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.BackendServices.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all BackendService objects. +func (g *GCEBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.BackendService, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "BackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.BackendServices.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.BackendService + f := func(l *ga.BackendServiceList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert BackendService with key of value obj. +func (g *GCEBackendServices) Insert(ctx context.Context, key meta.Key, obj *ga.BackendService) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "BackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.BackendServices.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the BackendService referenced by key. +func (g *GCEBackendServices) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "BackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.BackendServices.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// GetHealth is a method on GCEBackendServices. +func (g *GCEBackendServices) GetHealth(ctx context.Context, key meta.Key, arg0 *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "GetHealth", + Version: meta.Version("ga"), + Service: "BackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.BackendServices.GetHealth(projectID, key.Name, arg0) + call.Context(ctx) + return call.Do() +} + +// Update is a method on GCEBackendServices. +func (g *GCEBackendServices) Update(ctx context.Context, key meta.Key, arg0 *ga.BackendService) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Update", + Version: meta.Version("ga"), + Service: "BackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.BackendServices.Update(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AlphaBackendServices is an interface that allows for mocking of BackendServices. +type AlphaBackendServices interface { + Get(ctx context.Context, key meta.Key) (*alpha.BackendService, error) + List(ctx context.Context, fl *filter.F) ([]*alpha.BackendService, error) + Insert(ctx context.Context, key meta.Key, obj *alpha.BackendService) error + Delete(ctx context.Context, key meta.Key) error + Update(context.Context, meta.Key, *alpha.BackendService) error +} + +// NewMockAlphaBackendServices returns a new mock for BackendServices. +func NewMockAlphaBackendServices(objs map[meta.Key]*MockBackendServicesObj) *MockAlphaBackendServices { + mock := &MockAlphaBackendServices{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaBackendServices is the mock for BackendServices. +type MockAlphaBackendServices struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockBackendServicesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockAlphaBackendServices, ctx context.Context, key meta.Key) (bool, *alpha.BackendService, error) + ListHook func(m *MockAlphaBackendServices, ctx context.Context, fl *filter.F) (bool, []*alpha.BackendService, error) + InsertHook func(m *MockAlphaBackendServices, ctx context.Context, key meta.Key, obj *alpha.BackendService) (bool, error) + DeleteHook func(m *MockAlphaBackendServices, ctx context.Context, key meta.Key) (bool, error) + UpdateHook func(*MockAlphaBackendServices, context.Context, meta.Key, *alpha.BackendService) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaBackendServices) Get(ctx context.Context, key meta.Key) (*alpha.BackendService, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToAlpha() + glog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaBackendServices %v not found", key), + } + glog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*alpha.BackendService, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.BackendService + for _, obj := range m.Objects { + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + glog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaBackendServices) Insert(ctx context.Context, key meta.Key, obj *alpha.BackendService) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaBackendServices %v exists", key), + } + glog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionAlpha, "mock-project", "backendServices", key) + } + + m.Objects[key] = &MockBackendServicesObj{obj} + glog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaBackendServices) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaBackendServices %v not found", key), + } + glog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaBackendServices) Obj(o *alpha.BackendService) *MockBackendServicesObj { + return &MockBackendServicesObj{o} +} + +// Update is a mock for the corresponding method. +func (m *MockAlphaBackendServices) Update(ctx context.Context, key meta.Key, arg0 *alpha.BackendService) error { + if m.UpdateHook != nil { + return m.UpdateHook(m, ctx, key, arg0) + } + return nil +} + +// GCEAlphaBackendServices is a simplifying adapter for the GCE BackendServices. +type GCEAlphaBackendServices struct { + s *Service +} + +// Get the BackendService named by key. +func (g *GCEAlphaBackendServices) Get(ctx context.Context, key meta.Key) (*alpha.BackendService, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "BackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.BackendServices.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all BackendService objects. +func (g *GCEAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*alpha.BackendService, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "BackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.BackendServices.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.BackendService + f := func(l *alpha.BackendServiceList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert BackendService with key of value obj. +func (g *GCEAlphaBackendServices) Insert(ctx context.Context, key meta.Key, obj *alpha.BackendService) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "BackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.Alpha.BackendServices.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the BackendService referenced by key. +func (g *GCEAlphaBackendServices) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "BackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.BackendServices.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Update is a method on GCEAlphaBackendServices. +func (g *GCEAlphaBackendServices) Update(ctx context.Context, key meta.Key, arg0 *alpha.BackendService) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Update", + Version: meta.Version("alpha"), + Service: "BackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.BackendServices.Update(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AlphaRegionBackendServices is an interface that allows for mocking of RegionBackendServices. +type AlphaRegionBackendServices interface { + Get(ctx context.Context, key meta.Key) (*alpha.BackendService, error) + List(ctx context.Context, region string, fl *filter.F) ([]*alpha.BackendService, error) + Insert(ctx context.Context, key meta.Key, obj *alpha.BackendService) error + Delete(ctx context.Context, key meta.Key) error + GetHealth(context.Context, meta.Key, *alpha.ResourceGroupReference) (*alpha.BackendServiceGroupHealth, error) + Update(context.Context, meta.Key, *alpha.BackendService) error +} + +// NewMockAlphaRegionBackendServices returns a new mock for RegionBackendServices. +func NewMockAlphaRegionBackendServices(objs map[meta.Key]*MockRegionBackendServicesObj) *MockAlphaRegionBackendServices { + mock := &MockAlphaRegionBackendServices{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaRegionBackendServices is the mock for RegionBackendServices. +type MockAlphaRegionBackendServices struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockRegionBackendServicesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockAlphaRegionBackendServices, ctx context.Context, key meta.Key) (bool, *alpha.BackendService, error) + ListHook func(m *MockAlphaRegionBackendServices, ctx context.Context, region string, fl *filter.F) (bool, []*alpha.BackendService, error) + InsertHook func(m *MockAlphaRegionBackendServices, ctx context.Context, key meta.Key, obj *alpha.BackendService) (bool, error) + DeleteHook func(m *MockAlphaRegionBackendServices, ctx context.Context, key meta.Key) (bool, error) + GetHealthHook func(*MockAlphaRegionBackendServices, context.Context, meta.Key, *alpha.ResourceGroupReference) (*alpha.BackendServiceGroupHealth, error) + UpdateHook func(*MockAlphaRegionBackendServices, context.Context, meta.Key, *alpha.BackendService) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaRegionBackendServices) Get(ctx context.Context, key meta.Key) (*alpha.BackendService, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToAlpha() + glog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaRegionBackendServices %v not found", key), + } + glog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given region. +func (m *MockAlphaRegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.BackendService, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, region, fl); intercept { + glog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.BackendService + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + glog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaRegionBackendServices) Insert(ctx context.Context, key meta.Key, obj *alpha.BackendService) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaRegionBackendServices %v exists", key), + } + glog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionAlpha, "mock-project", "backendServices", key) + } + + m.Objects[key] = &MockRegionBackendServicesObj{obj} + glog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaRegionBackendServices) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaRegionBackendServices %v not found", key), + } + glog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaRegionBackendServices) Obj(o *alpha.BackendService) *MockRegionBackendServicesObj { + return &MockRegionBackendServicesObj{o} +} + +// GetHealth is a mock for the corresponding method. +func (m *MockAlphaRegionBackendServices) GetHealth(ctx context.Context, key meta.Key, arg0 *alpha.ResourceGroupReference) (*alpha.BackendServiceGroupHealth, error) { + if m.GetHealthHook != nil { + return m.GetHealthHook(m, ctx, key, arg0) + } + return nil, fmt.Errorf("GetHealthHook must be set") +} + +// Update is a mock for the corresponding method. +func (m *MockAlphaRegionBackendServices) Update(ctx context.Context, key meta.Key, arg0 *alpha.BackendService) error { + if m.UpdateHook != nil { + return m.UpdateHook(m, ctx, key, arg0) + } + return nil +} + +// GCEAlphaRegionBackendServices is a simplifying adapter for the GCE RegionBackendServices. +type GCEAlphaRegionBackendServices struct { + s *Service +} + +// Get the BackendService named by key. +func (g *GCEAlphaRegionBackendServices) Get(ctx context.Context, key meta.Key) (*alpha.BackendService, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "RegionBackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.RegionBackendServices.Get(projectID, key.Region, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all BackendService objects. +func (g *GCEAlphaRegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.BackendService, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "RegionBackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.RegionBackendServices.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.BackendService + f := func(l *alpha.BackendServiceList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert BackendService with key of value obj. +func (g *GCEAlphaRegionBackendServices) Insert(ctx context.Context, key meta.Key, obj *alpha.BackendService) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "RegionBackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.Alpha.RegionBackendServices.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the BackendService referenced by key. +func (g *GCEAlphaRegionBackendServices) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "RegionBackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.RegionBackendServices.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// GetHealth is a method on GCEAlphaRegionBackendServices. +func (g *GCEAlphaRegionBackendServices) GetHealth(ctx context.Context, key meta.Key, arg0 *alpha.ResourceGroupReference) (*alpha.BackendServiceGroupHealth, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "GetHealth", + Version: meta.Version("alpha"), + Service: "RegionBackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.RegionBackendServices.GetHealth(projectID, key.Region, key.Name, arg0) + call.Context(ctx) + return call.Do() +} + +// Update is a method on GCEAlphaRegionBackendServices. +func (g *GCEAlphaRegionBackendServices) Update(ctx context.Context, key meta.Key, arg0 *alpha.BackendService) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Update", + Version: meta.Version("alpha"), + Service: "RegionBackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.RegionBackendServices.Update(projectID, key.Region, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Disks is an interface that allows for mocking of Disks. +type Disks interface { + Get(ctx context.Context, key meta.Key) (*ga.Disk, error) + List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Disk, error) + Insert(ctx context.Context, key meta.Key, obj *ga.Disk) error + Delete(ctx context.Context, key meta.Key) error +} + +// NewMockDisks returns a new mock for Disks. +func NewMockDisks(objs map[meta.Key]*MockDisksObj) *MockDisks { + mock := &MockDisks{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockDisks is the mock for Disks. +type MockDisks struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockDisksObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockDisks, ctx context.Context, key meta.Key) (bool, *ga.Disk, error) + ListHook func(m *MockDisks, ctx context.Context, zone string, fl *filter.F) (bool, []*ga.Disk, error) + InsertHook func(m *MockDisks, ctx context.Context, key meta.Key, obj *ga.Disk) (bool, error) + DeleteHook func(m *MockDisks, ctx context.Context, key meta.Key) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockDisks) Get(ctx context.Context, key meta.Key) (*ga.Disk, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockDisks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockDisks %v not found", key), + } + glog.V(5).Infof("MockDisks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given zone. +func (m *MockDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Disk, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, zone, fl); intercept { + glog.V(5).Infof("MockDisks.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockDisks.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.Disk + for key, obj := range m.Objects { + if key.Zone != zone { + continue + } + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockDisks.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockDisks) Insert(ctx context.Context, key meta.Key, obj *ga.Disk) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockDisks %v exists", key), + } + glog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "disks", key) + } + + m.Objects[key] = &MockDisksObj{obj} + glog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockDisks) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockDisks %v not found", key), + } + glog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockDisks.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockDisks) Obj(o *ga.Disk) *MockDisksObj { + return &MockDisksObj{o} +} + +// GCEDisks is a simplifying adapter for the GCE Disks. +type GCEDisks struct { + s *Service +} + +// Get the Disk named by key. +func (g *GCEDisks) Get(ctx context.Context, key meta.Key) (*ga.Disk, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "Disks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Disks.Get(projectID, key.Zone, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Disk objects. +func (g *GCEDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Disk, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "Disks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Disks.List(projectID, zone) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.Disk + f := func(l *ga.DiskList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Disk with key of value obj. +func (g *GCEDisks) Insert(ctx context.Context, key meta.Key, obj *ga.Disk) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "Disks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.Disks.Insert(projectID, key.Zone, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Disk referenced by key. +func (g *GCEDisks) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "Disks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.Disks.Delete(projectID, key.Zone, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AlphaDisks is an interface that allows for mocking of Disks. +type AlphaDisks interface { + Get(ctx context.Context, key meta.Key) (*alpha.Disk, error) + List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Disk, error) + Insert(ctx context.Context, key meta.Key, obj *alpha.Disk) error + Delete(ctx context.Context, key meta.Key) error +} + +// NewMockAlphaDisks returns a new mock for Disks. +func NewMockAlphaDisks(objs map[meta.Key]*MockDisksObj) *MockAlphaDisks { + mock := &MockAlphaDisks{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaDisks is the mock for Disks. +type MockAlphaDisks struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockDisksObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockAlphaDisks, ctx context.Context, key meta.Key) (bool, *alpha.Disk, error) + ListHook func(m *MockAlphaDisks, ctx context.Context, zone string, fl *filter.F) (bool, []*alpha.Disk, error) + InsertHook func(m *MockAlphaDisks, ctx context.Context, key meta.Key, obj *alpha.Disk) (bool, error) + DeleteHook func(m *MockAlphaDisks, ctx context.Context, key meta.Key) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaDisks) Get(ctx context.Context, key meta.Key) (*alpha.Disk, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockAlphaDisks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToAlpha() + glog.V(5).Infof("MockAlphaDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaDisks %v not found", key), + } + glog.V(5).Infof("MockAlphaDisks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given zone. +func (m *MockAlphaDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Disk, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, zone, fl); intercept { + glog.V(5).Infof("MockAlphaDisks.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockAlphaDisks.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.Disk + for key, obj := range m.Objects { + if key.Zone != zone { + continue + } + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + glog.V(5).Infof("MockAlphaDisks.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaDisks) Insert(ctx context.Context, key meta.Key, obj *alpha.Disk) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockAlphaDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockAlphaDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaDisks %v exists", key), + } + glog.V(5).Infof("MockAlphaDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionAlpha, "mock-project", "disks", key) + } + + m.Objects[key] = &MockDisksObj{obj} + glog.V(5).Infof("MockAlphaDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaDisks) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaDisks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockAlphaDisks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaDisks %v not found", key), + } + glog.V(5).Infof("MockAlphaDisks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockAlphaDisks.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaDisks) Obj(o *alpha.Disk) *MockDisksObj { + return &MockDisksObj{o} +} + +// GCEAlphaDisks is a simplifying adapter for the GCE Disks. +type GCEAlphaDisks struct { + s *Service +} + +// Get the Disk named by key. +func (g *GCEAlphaDisks) Get(ctx context.Context, key meta.Key) (*alpha.Disk, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Disks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "Disks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.Disks.Get(projectID, key.Zone, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Disk objects. +func (g *GCEAlphaDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Disk, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Disks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "Disks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.Disks.List(projectID, zone) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.Disk + f := func(l *alpha.DiskList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Disk with key of value obj. +func (g *GCEAlphaDisks) Insert(ctx context.Context, key meta.Key, obj *alpha.Disk) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Disks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "Disks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.Alpha.Disks.Insert(projectID, key.Zone, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Disk referenced by key. +func (g *GCEAlphaDisks) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Disks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "Disks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.Disks.Delete(projectID, key.Zone, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AlphaRegionDisks is an interface that allows for mocking of RegionDisks. +type AlphaRegionDisks interface { + Get(ctx context.Context, key meta.Key) (*alpha.Disk, error) + List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Disk, error) + Insert(ctx context.Context, key meta.Key, obj *alpha.Disk) error + Delete(ctx context.Context, key meta.Key) error +} + +// NewMockAlphaRegionDisks returns a new mock for RegionDisks. +func NewMockAlphaRegionDisks(objs map[meta.Key]*MockRegionDisksObj) *MockAlphaRegionDisks { + mock := &MockAlphaRegionDisks{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaRegionDisks is the mock for RegionDisks. +type MockAlphaRegionDisks struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockRegionDisksObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockAlphaRegionDisks, ctx context.Context, key meta.Key) (bool, *alpha.Disk, error) + ListHook func(m *MockAlphaRegionDisks, ctx context.Context, region string, fl *filter.F) (bool, []*alpha.Disk, error) + InsertHook func(m *MockAlphaRegionDisks, ctx context.Context, key meta.Key, obj *alpha.Disk) (bool, error) + DeleteHook func(m *MockAlphaRegionDisks, ctx context.Context, key meta.Key) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaRegionDisks) Get(ctx context.Context, key meta.Key) (*alpha.Disk, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaRegionDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockAlphaRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToAlpha() + glog.V(5).Infof("MockAlphaRegionDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaRegionDisks %v not found", key), + } + glog.V(5).Infof("MockAlphaRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given region. +func (m *MockAlphaRegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Disk, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, region, fl); intercept { + glog.V(5).Infof("MockAlphaRegionDisks.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockAlphaRegionDisks.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.Disk + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + glog.V(5).Infof("MockAlphaRegionDisks.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaRegionDisks) Insert(ctx context.Context, key meta.Key, obj *alpha.Disk) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockAlphaRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockAlphaRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaRegionDisks %v exists", key), + } + glog.V(5).Infof("MockAlphaRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionAlpha, "mock-project", "disks", key) + } + + m.Objects[key] = &MockRegionDisksObj{obj} + glog.V(5).Infof("MockAlphaRegionDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaRegionDisks) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaRegionDisks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockAlphaRegionDisks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaRegionDisks %v not found", key), + } + glog.V(5).Infof("MockAlphaRegionDisks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockAlphaRegionDisks.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaRegionDisks) Obj(o *alpha.Disk) *MockRegionDisksObj { + return &MockRegionDisksObj{o} +} + +// GCEAlphaRegionDisks is a simplifying adapter for the GCE RegionDisks. +type GCEAlphaRegionDisks struct { + s *Service +} + +// Get the Disk named by key. +func (g *GCEAlphaRegionDisks) Get(ctx context.Context, key meta.Key) (*alpha.Disk, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionDisks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "RegionDisks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.RegionDisks.Get(projectID, key.Region, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Disk objects. +func (g *GCEAlphaRegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Disk, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionDisks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "RegionDisks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.RegionDisks.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.Disk + f := func(l *alpha.DiskList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Disk with key of value obj. +func (g *GCEAlphaRegionDisks) Insert(ctx context.Context, key meta.Key, obj *alpha.Disk) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionDisks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "RegionDisks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.Alpha.RegionDisks.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Disk referenced by key. +func (g *GCEAlphaRegionDisks) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionDisks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "RegionDisks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.RegionDisks.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Firewalls is an interface that allows for mocking of Firewalls. +type Firewalls interface { + Get(ctx context.Context, key meta.Key) (*ga.Firewall, error) + List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, error) + Insert(ctx context.Context, key meta.Key, obj *ga.Firewall) error + Delete(ctx context.Context, key meta.Key) error + Update(context.Context, meta.Key, *ga.Firewall) error +} + +// NewMockFirewalls returns a new mock for Firewalls. +func NewMockFirewalls(objs map[meta.Key]*MockFirewallsObj) *MockFirewalls { + mock := &MockFirewalls{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockFirewalls is the mock for Firewalls. +type MockFirewalls struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockFirewallsObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockFirewalls, ctx context.Context, key meta.Key) (bool, *ga.Firewall, error) + ListHook func(m *MockFirewalls, ctx context.Context, fl *filter.F) (bool, []*ga.Firewall, error) + InsertHook func(m *MockFirewalls, ctx context.Context, key meta.Key, obj *ga.Firewall) (bool, error) + DeleteHook func(m *MockFirewalls, ctx context.Context, key meta.Key) (bool, error) + UpdateHook func(*MockFirewalls, context.Context, meta.Key, *ga.Firewall) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockFirewalls) Get(ctx context.Context, key meta.Key) (*ga.Firewall, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockFirewalls.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockFirewalls.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockFirewalls.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockFirewalls %v not found", key), + } + glog.V(5).Infof("MockFirewalls.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockFirewalls.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockFirewalls.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.Firewall + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockFirewalls.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockFirewalls) Insert(ctx context.Context, key meta.Key, obj *ga.Firewall) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockFirewalls %v exists", key), + } + glog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "firewalls", key) + } + + m.Objects[key] = &MockFirewallsObj{obj} + glog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockFirewalls) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockFirewalls %v not found", key), + } + glog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockFirewalls.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockFirewalls) Obj(o *ga.Firewall) *MockFirewallsObj { + return &MockFirewallsObj{o} +} + +// Update is a mock for the corresponding method. +func (m *MockFirewalls) Update(ctx context.Context, key meta.Key, arg0 *ga.Firewall) error { + if m.UpdateHook != nil { + return m.UpdateHook(m, ctx, key, arg0) + } + return nil +} + +// GCEFirewalls is a simplifying adapter for the GCE Firewalls. +type GCEFirewalls struct { + s *Service +} + +// Get the Firewall named by key. +func (g *GCEFirewalls) Get(ctx context.Context, key meta.Key) (*ga.Firewall, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "Firewalls", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Firewalls.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Firewall objects. +func (g *GCEFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "Firewalls", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Firewalls.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.Firewall + f := func(l *ga.FirewallList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Firewall with key of value obj. +func (g *GCEFirewalls) Insert(ctx context.Context, key meta.Key, obj *ga.Firewall) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "Firewalls", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.Firewalls.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Firewall referenced by key. +func (g *GCEFirewalls) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "Firewalls", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.Firewalls.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Update is a method on GCEFirewalls. +func (g *GCEFirewalls) Update(ctx context.Context, key meta.Key, arg0 *ga.Firewall) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Update", + Version: meta.Version("ga"), + Service: "Firewalls", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.Firewalls.Update(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// ForwardingRules is an interface that allows for mocking of ForwardingRules. +type ForwardingRules interface { + Get(ctx context.Context, key meta.Key) (*ga.ForwardingRule, error) + List(ctx context.Context, region string, fl *filter.F) ([]*ga.ForwardingRule, error) + Insert(ctx context.Context, key meta.Key, obj *ga.ForwardingRule) error + Delete(ctx context.Context, key meta.Key) error +} + +// NewMockForwardingRules returns a new mock for ForwardingRules. +func NewMockForwardingRules(objs map[meta.Key]*MockForwardingRulesObj) *MockForwardingRules { + mock := &MockForwardingRules{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockForwardingRules is the mock for ForwardingRules. +type MockForwardingRules struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockForwardingRulesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockForwardingRules, ctx context.Context, key meta.Key) (bool, *ga.ForwardingRule, error) + ListHook func(m *MockForwardingRules, ctx context.Context, region string, fl *filter.F) (bool, []*ga.ForwardingRule, error) + InsertHook func(m *MockForwardingRules, ctx context.Context, key meta.Key, obj *ga.ForwardingRule) (bool, error) + DeleteHook func(m *MockForwardingRules, ctx context.Context, key meta.Key) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockForwardingRules) Get(ctx context.Context, key meta.Key) (*ga.ForwardingRule, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockForwardingRules %v not found", key), + } + glog.V(5).Infof("MockForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given region. +func (m *MockForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*ga.ForwardingRule, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, region, fl); intercept { + glog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.ForwardingRule + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockForwardingRules) Insert(ctx context.Context, key meta.Key, obj *ga.ForwardingRule) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockForwardingRules %v exists", key), + } + glog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "forwardingRules", key) + } + + m.Objects[key] = &MockForwardingRulesObj{obj} + glog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockForwardingRules) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockForwardingRules %v not found", key), + } + glog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockForwardingRules) Obj(o *ga.ForwardingRule) *MockForwardingRulesObj { + return &MockForwardingRulesObj{o} +} + +// GCEForwardingRules is a simplifying adapter for the GCE ForwardingRules. +type GCEForwardingRules struct { + s *Service +} + +// Get the ForwardingRule named by key. +func (g *GCEForwardingRules) Get(ctx context.Context, key meta.Key) (*ga.ForwardingRule, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "ForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.ForwardingRules.Get(projectID, key.Region, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all ForwardingRule objects. +func (g *GCEForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*ga.ForwardingRule, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "ForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.ForwardingRules.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.ForwardingRule + f := func(l *ga.ForwardingRuleList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert ForwardingRule with key of value obj. +func (g *GCEForwardingRules) Insert(ctx context.Context, key meta.Key, obj *ga.ForwardingRule) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "ForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.ForwardingRules.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the ForwardingRule referenced by key. +func (g *GCEForwardingRules) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "ForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.ForwardingRules.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AlphaForwardingRules is an interface that allows for mocking of ForwardingRules. +type AlphaForwardingRules interface { + Get(ctx context.Context, key meta.Key) (*alpha.ForwardingRule, error) + List(ctx context.Context, region string, fl *filter.F) ([]*alpha.ForwardingRule, error) + Insert(ctx context.Context, key meta.Key, obj *alpha.ForwardingRule) error + Delete(ctx context.Context, key meta.Key) error +} + +// NewMockAlphaForwardingRules returns a new mock for ForwardingRules. +func NewMockAlphaForwardingRules(objs map[meta.Key]*MockForwardingRulesObj) *MockAlphaForwardingRules { + mock := &MockAlphaForwardingRules{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaForwardingRules is the mock for ForwardingRules. +type MockAlphaForwardingRules struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockForwardingRulesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockAlphaForwardingRules, ctx context.Context, key meta.Key) (bool, *alpha.ForwardingRule, error) + ListHook func(m *MockAlphaForwardingRules, ctx context.Context, region string, fl *filter.F) (bool, []*alpha.ForwardingRule, error) + InsertHook func(m *MockAlphaForwardingRules, ctx context.Context, key meta.Key, obj *alpha.ForwardingRule) (bool, error) + DeleteHook func(m *MockAlphaForwardingRules, ctx context.Context, key meta.Key) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaForwardingRules) Get(ctx context.Context, key meta.Key) (*alpha.ForwardingRule, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToAlpha() + glog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaForwardingRules %v not found", key), + } + glog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given region. +func (m *MockAlphaForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.ForwardingRule, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, region, fl); intercept { + glog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.ForwardingRule + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + glog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaForwardingRules) Insert(ctx context.Context, key meta.Key, obj *alpha.ForwardingRule) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaForwardingRules %v exists", key), + } + glog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionAlpha, "mock-project", "forwardingRules", key) + } + + m.Objects[key] = &MockForwardingRulesObj{obj} + glog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaForwardingRules) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaForwardingRules %v not found", key), + } + glog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaForwardingRules) Obj(o *alpha.ForwardingRule) *MockForwardingRulesObj { + return &MockForwardingRulesObj{o} +} + +// GCEAlphaForwardingRules is a simplifying adapter for the GCE ForwardingRules. +type GCEAlphaForwardingRules struct { + s *Service +} + +// Get the ForwardingRule named by key. +func (g *GCEAlphaForwardingRules) Get(ctx context.Context, key meta.Key) (*alpha.ForwardingRule, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "ForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.ForwardingRules.Get(projectID, key.Region, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all ForwardingRule objects. +func (g *GCEAlphaForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.ForwardingRule, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "ForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.ForwardingRules.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.ForwardingRule + f := func(l *alpha.ForwardingRuleList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert ForwardingRule with key of value obj. +func (g *GCEAlphaForwardingRules) Insert(ctx context.Context, key meta.Key, obj *alpha.ForwardingRule) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "ForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.Alpha.ForwardingRules.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the ForwardingRule referenced by key. +func (g *GCEAlphaForwardingRules) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "ForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.ForwardingRules.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// GlobalForwardingRules is an interface that allows for mocking of GlobalForwardingRules. +type GlobalForwardingRules interface { + Get(ctx context.Context, key meta.Key) (*ga.ForwardingRule, error) + List(ctx context.Context, fl *filter.F) ([]*ga.ForwardingRule, error) + Insert(ctx context.Context, key meta.Key, obj *ga.ForwardingRule) error + Delete(ctx context.Context, key meta.Key) error + SetTarget(context.Context, meta.Key, *ga.TargetReference) error +} + +// NewMockGlobalForwardingRules returns a new mock for GlobalForwardingRules. +func NewMockGlobalForwardingRules(objs map[meta.Key]*MockGlobalForwardingRulesObj) *MockGlobalForwardingRules { + mock := &MockGlobalForwardingRules{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockGlobalForwardingRules is the mock for GlobalForwardingRules. +type MockGlobalForwardingRules struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockGlobalForwardingRulesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockGlobalForwardingRules, ctx context.Context, key meta.Key) (bool, *ga.ForwardingRule, error) + ListHook func(m *MockGlobalForwardingRules, ctx context.Context, fl *filter.F) (bool, []*ga.ForwardingRule, error) + InsertHook func(m *MockGlobalForwardingRules, ctx context.Context, key meta.Key, obj *ga.ForwardingRule) (bool, error) + DeleteHook func(m *MockGlobalForwardingRules, ctx context.Context, key meta.Key) (bool, error) + SetTargetHook func(*MockGlobalForwardingRules, context.Context, meta.Key, *ga.TargetReference) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockGlobalForwardingRules) Get(ctx context.Context, key meta.Key) (*ga.ForwardingRule, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockGlobalForwardingRules %v not found", key), + } + glog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*ga.ForwardingRule, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.ForwardingRule + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockGlobalForwardingRules) Insert(ctx context.Context, key meta.Key, obj *ga.ForwardingRule) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockGlobalForwardingRules %v exists", key), + } + glog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "forwardingRules", key) + } + + m.Objects[key] = &MockGlobalForwardingRulesObj{obj} + glog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockGlobalForwardingRules) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockGlobalForwardingRules %v not found", key), + } + glog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockGlobalForwardingRules) Obj(o *ga.ForwardingRule) *MockGlobalForwardingRulesObj { + return &MockGlobalForwardingRulesObj{o} +} + +// SetTarget is a mock for the corresponding method. +func (m *MockGlobalForwardingRules) SetTarget(ctx context.Context, key meta.Key, arg0 *ga.TargetReference) error { + if m.SetTargetHook != nil { + return m.SetTargetHook(m, ctx, key, arg0) + } + return nil +} + +// GCEGlobalForwardingRules is a simplifying adapter for the GCE GlobalForwardingRules. +type GCEGlobalForwardingRules struct { + s *Service +} + +// Get the ForwardingRule named by key. +func (g *GCEGlobalForwardingRules) Get(ctx context.Context, key meta.Key) (*ga.ForwardingRule, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "GlobalForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.GlobalForwardingRules.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all ForwardingRule objects. +func (g *GCEGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*ga.ForwardingRule, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "GlobalForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.GlobalForwardingRules.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.ForwardingRule + f := func(l *ga.ForwardingRuleList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert ForwardingRule with key of value obj. +func (g *GCEGlobalForwardingRules) Insert(ctx context.Context, key meta.Key, obj *ga.ForwardingRule) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "GlobalForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.GlobalForwardingRules.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the ForwardingRule referenced by key. +func (g *GCEGlobalForwardingRules) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "GlobalForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.GlobalForwardingRules.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// SetTarget is a method on GCEGlobalForwardingRules. +func (g *GCEGlobalForwardingRules) SetTarget(ctx context.Context, key meta.Key, arg0 *ga.TargetReference) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetTarget", + Version: meta.Version("ga"), + Service: "GlobalForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.GlobalForwardingRules.SetTarget(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// HealthChecks is an interface that allows for mocking of HealthChecks. +type HealthChecks interface { + Get(ctx context.Context, key meta.Key) (*ga.HealthCheck, error) + List(ctx context.Context, fl *filter.F) ([]*ga.HealthCheck, error) + Insert(ctx context.Context, key meta.Key, obj *ga.HealthCheck) error + Delete(ctx context.Context, key meta.Key) error + Update(context.Context, meta.Key, *ga.HealthCheck) error +} + +// NewMockHealthChecks returns a new mock for HealthChecks. +func NewMockHealthChecks(objs map[meta.Key]*MockHealthChecksObj) *MockHealthChecks { + mock := &MockHealthChecks{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockHealthChecks is the mock for HealthChecks. +type MockHealthChecks struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockHealthChecksObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockHealthChecks, ctx context.Context, key meta.Key) (bool, *ga.HealthCheck, error) + ListHook func(m *MockHealthChecks, ctx context.Context, fl *filter.F) (bool, []*ga.HealthCheck, error) + InsertHook func(m *MockHealthChecks, ctx context.Context, key meta.Key, obj *ga.HealthCheck) (bool, error) + DeleteHook func(m *MockHealthChecks, ctx context.Context, key meta.Key) (bool, error) + UpdateHook func(*MockHealthChecks, context.Context, meta.Key, *ga.HealthCheck) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockHealthChecks) Get(ctx context.Context, key meta.Key) (*ga.HealthCheck, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockHealthChecks %v not found", key), + } + glog.V(5).Infof("MockHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HealthCheck, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.HealthCheck + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockHealthChecks) Insert(ctx context.Context, key meta.Key, obj *ga.HealthCheck) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockHealthChecks %v exists", key), + } + glog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "healthChecks", key) + } + + m.Objects[key] = &MockHealthChecksObj{obj} + glog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockHealthChecks) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockHealthChecks %v not found", key), + } + glog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockHealthChecks) Obj(o *ga.HealthCheck) *MockHealthChecksObj { + return &MockHealthChecksObj{o} +} + +// Update is a mock for the corresponding method. +func (m *MockHealthChecks) Update(ctx context.Context, key meta.Key, arg0 *ga.HealthCheck) error { + if m.UpdateHook != nil { + return m.UpdateHook(m, ctx, key, arg0) + } + return nil +} + +// GCEHealthChecks is a simplifying adapter for the GCE HealthChecks. +type GCEHealthChecks struct { + s *Service +} + +// Get the HealthCheck named by key. +func (g *GCEHealthChecks) Get(ctx context.Context, key meta.Key) (*ga.HealthCheck, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "HealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.HealthChecks.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all HealthCheck objects. +func (g *GCEHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HealthCheck, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "HealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.HealthChecks.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.HealthCheck + f := func(l *ga.HealthCheckList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert HealthCheck with key of value obj. +func (g *GCEHealthChecks) Insert(ctx context.Context, key meta.Key, obj *ga.HealthCheck) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "HealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.HealthChecks.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the HealthCheck referenced by key. +func (g *GCEHealthChecks) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "HealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.HealthChecks.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Update is a method on GCEHealthChecks. +func (g *GCEHealthChecks) Update(ctx context.Context, key meta.Key, arg0 *ga.HealthCheck) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Update", + Version: meta.Version("ga"), + Service: "HealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.HealthChecks.Update(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AlphaHealthChecks is an interface that allows for mocking of HealthChecks. +type AlphaHealthChecks interface { + Get(ctx context.Context, key meta.Key) (*alpha.HealthCheck, error) + List(ctx context.Context, fl *filter.F) ([]*alpha.HealthCheck, error) + Insert(ctx context.Context, key meta.Key, obj *alpha.HealthCheck) error + Delete(ctx context.Context, key meta.Key) error + Update(context.Context, meta.Key, *alpha.HealthCheck) error +} + +// NewMockAlphaHealthChecks returns a new mock for HealthChecks. +func NewMockAlphaHealthChecks(objs map[meta.Key]*MockHealthChecksObj) *MockAlphaHealthChecks { + mock := &MockAlphaHealthChecks{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaHealthChecks is the mock for HealthChecks. +type MockAlphaHealthChecks struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockHealthChecksObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockAlphaHealthChecks, ctx context.Context, key meta.Key) (bool, *alpha.HealthCheck, error) + ListHook func(m *MockAlphaHealthChecks, ctx context.Context, fl *filter.F) (bool, []*alpha.HealthCheck, error) + InsertHook func(m *MockAlphaHealthChecks, ctx context.Context, key meta.Key, obj *alpha.HealthCheck) (bool, error) + DeleteHook func(m *MockAlphaHealthChecks, ctx context.Context, key meta.Key) (bool, error) + UpdateHook func(*MockAlphaHealthChecks, context.Context, meta.Key, *alpha.HealthCheck) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaHealthChecks) Get(ctx context.Context, key meta.Key) (*alpha.HealthCheck, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToAlpha() + glog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaHealthChecks %v not found", key), + } + glog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alpha.HealthCheck, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.HealthCheck + for _, obj := range m.Objects { + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + glog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaHealthChecks) Insert(ctx context.Context, key meta.Key, obj *alpha.HealthCheck) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaHealthChecks %v exists", key), + } + glog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionAlpha, "mock-project", "healthChecks", key) + } + + m.Objects[key] = &MockHealthChecksObj{obj} + glog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaHealthChecks) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaHealthChecks %v not found", key), + } + glog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaHealthChecks) Obj(o *alpha.HealthCheck) *MockHealthChecksObj { + return &MockHealthChecksObj{o} +} + +// Update is a mock for the corresponding method. +func (m *MockAlphaHealthChecks) Update(ctx context.Context, key meta.Key, arg0 *alpha.HealthCheck) error { + if m.UpdateHook != nil { + return m.UpdateHook(m, ctx, key, arg0) + } + return nil +} + +// GCEAlphaHealthChecks is a simplifying adapter for the GCE HealthChecks. +type GCEAlphaHealthChecks struct { + s *Service +} + +// Get the HealthCheck named by key. +func (g *GCEAlphaHealthChecks) Get(ctx context.Context, key meta.Key) (*alpha.HealthCheck, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "HealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.HealthChecks.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all HealthCheck objects. +func (g *GCEAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alpha.HealthCheck, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "HealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.HealthChecks.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.HealthCheck + f := func(l *alpha.HealthCheckList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert HealthCheck with key of value obj. +func (g *GCEAlphaHealthChecks) Insert(ctx context.Context, key meta.Key, obj *alpha.HealthCheck) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "HealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.Alpha.HealthChecks.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the HealthCheck referenced by key. +func (g *GCEAlphaHealthChecks) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "HealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.HealthChecks.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Update is a method on GCEAlphaHealthChecks. +func (g *GCEAlphaHealthChecks) Update(ctx context.Context, key meta.Key, arg0 *alpha.HealthCheck) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Update", + Version: meta.Version("alpha"), + Service: "HealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.HealthChecks.Update(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// HttpHealthChecks is an interface that allows for mocking of HttpHealthChecks. +type HttpHealthChecks interface { + Get(ctx context.Context, key meta.Key) (*ga.HttpHealthCheck, error) + List(ctx context.Context, fl *filter.F) ([]*ga.HttpHealthCheck, error) + Insert(ctx context.Context, key meta.Key, obj *ga.HttpHealthCheck) error + Delete(ctx context.Context, key meta.Key) error + Update(context.Context, meta.Key, *ga.HttpHealthCheck) error +} + +// NewMockHttpHealthChecks returns a new mock for HttpHealthChecks. +func NewMockHttpHealthChecks(objs map[meta.Key]*MockHttpHealthChecksObj) *MockHttpHealthChecks { + mock := &MockHttpHealthChecks{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockHttpHealthChecks is the mock for HttpHealthChecks. +type MockHttpHealthChecks struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockHttpHealthChecksObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockHttpHealthChecks, ctx context.Context, key meta.Key) (bool, *ga.HttpHealthCheck, error) + ListHook func(m *MockHttpHealthChecks, ctx context.Context, fl *filter.F) (bool, []*ga.HttpHealthCheck, error) + InsertHook func(m *MockHttpHealthChecks, ctx context.Context, key meta.Key, obj *ga.HttpHealthCheck) (bool, error) + DeleteHook func(m *MockHttpHealthChecks, ctx context.Context, key meta.Key) (bool, error) + UpdateHook func(*MockHttpHealthChecks, context.Context, meta.Key, *ga.HttpHealthCheck) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockHttpHealthChecks) Get(ctx context.Context, key meta.Key) (*ga.HttpHealthCheck, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockHttpHealthChecks %v not found", key), + } + glog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpHealthCheck, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.HttpHealthCheck + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockHttpHealthChecks) Insert(ctx context.Context, key meta.Key, obj *ga.HttpHealthCheck) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockHttpHealthChecks %v exists", key), + } + glog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "httpHealthChecks", key) + } + + m.Objects[key] = &MockHttpHealthChecksObj{obj} + glog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockHttpHealthChecks) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockHttpHealthChecks %v not found", key), + } + glog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockHttpHealthChecks) Obj(o *ga.HttpHealthCheck) *MockHttpHealthChecksObj { + return &MockHttpHealthChecksObj{o} +} + +// Update is a mock for the corresponding method. +func (m *MockHttpHealthChecks) Update(ctx context.Context, key meta.Key, arg0 *ga.HttpHealthCheck) error { + if m.UpdateHook != nil { + return m.UpdateHook(m, ctx, key, arg0) + } + return nil +} + +// GCEHttpHealthChecks is a simplifying adapter for the GCE HttpHealthChecks. +type GCEHttpHealthChecks struct { + s *Service +} + +// Get the HttpHealthCheck named by key. +func (g *GCEHttpHealthChecks) Get(ctx context.Context, key meta.Key) (*ga.HttpHealthCheck, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "HttpHealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.HttpHealthChecks.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all HttpHealthCheck objects. +func (g *GCEHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpHealthCheck, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "HttpHealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.HttpHealthChecks.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.HttpHealthCheck + f := func(l *ga.HttpHealthCheckList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert HttpHealthCheck with key of value obj. +func (g *GCEHttpHealthChecks) Insert(ctx context.Context, key meta.Key, obj *ga.HttpHealthCheck) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "HttpHealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.HttpHealthChecks.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the HttpHealthCheck referenced by key. +func (g *GCEHttpHealthChecks) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "HttpHealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.HttpHealthChecks.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Update is a method on GCEHttpHealthChecks. +func (g *GCEHttpHealthChecks) Update(ctx context.Context, key meta.Key, arg0 *ga.HttpHealthCheck) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Update", + Version: meta.Version("ga"), + Service: "HttpHealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.HttpHealthChecks.Update(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// HttpsHealthChecks is an interface that allows for mocking of HttpsHealthChecks. +type HttpsHealthChecks interface { + Get(ctx context.Context, key meta.Key) (*ga.HttpsHealthCheck, error) + List(ctx context.Context, fl *filter.F) ([]*ga.HttpsHealthCheck, error) + Insert(ctx context.Context, key meta.Key, obj *ga.HttpsHealthCheck) error + Delete(ctx context.Context, key meta.Key) error + Update(context.Context, meta.Key, *ga.HttpsHealthCheck) error +} + +// NewMockHttpsHealthChecks returns a new mock for HttpsHealthChecks. +func NewMockHttpsHealthChecks(objs map[meta.Key]*MockHttpsHealthChecksObj) *MockHttpsHealthChecks { + mock := &MockHttpsHealthChecks{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockHttpsHealthChecks is the mock for HttpsHealthChecks. +type MockHttpsHealthChecks struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockHttpsHealthChecksObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockHttpsHealthChecks, ctx context.Context, key meta.Key) (bool, *ga.HttpsHealthCheck, error) + ListHook func(m *MockHttpsHealthChecks, ctx context.Context, fl *filter.F) (bool, []*ga.HttpsHealthCheck, error) + InsertHook func(m *MockHttpsHealthChecks, ctx context.Context, key meta.Key, obj *ga.HttpsHealthCheck) (bool, error) + DeleteHook func(m *MockHttpsHealthChecks, ctx context.Context, key meta.Key) (bool, error) + UpdateHook func(*MockHttpsHealthChecks, context.Context, meta.Key, *ga.HttpsHealthCheck) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockHttpsHealthChecks) Get(ctx context.Context, key meta.Key) (*ga.HttpsHealthCheck, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockHttpsHealthChecks %v not found", key), + } + glog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpsHealthCheck, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.HttpsHealthCheck + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockHttpsHealthChecks) Insert(ctx context.Context, key meta.Key, obj *ga.HttpsHealthCheck) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockHttpsHealthChecks %v exists", key), + } + glog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "httpsHealthChecks", key) + } + + m.Objects[key] = &MockHttpsHealthChecksObj{obj} + glog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockHttpsHealthChecks) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockHttpsHealthChecks %v not found", key), + } + glog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockHttpsHealthChecks) Obj(o *ga.HttpsHealthCheck) *MockHttpsHealthChecksObj { + return &MockHttpsHealthChecksObj{o} +} + +// Update is a mock for the corresponding method. +func (m *MockHttpsHealthChecks) Update(ctx context.Context, key meta.Key, arg0 *ga.HttpsHealthCheck) error { + if m.UpdateHook != nil { + return m.UpdateHook(m, ctx, key, arg0) + } + return nil +} + +// GCEHttpsHealthChecks is a simplifying adapter for the GCE HttpsHealthChecks. +type GCEHttpsHealthChecks struct { + s *Service +} + +// Get the HttpsHealthCheck named by key. +func (g *GCEHttpsHealthChecks) Get(ctx context.Context, key meta.Key) (*ga.HttpsHealthCheck, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "HttpsHealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.HttpsHealthChecks.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all HttpsHealthCheck objects. +func (g *GCEHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpsHealthCheck, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "HttpsHealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.HttpsHealthChecks.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.HttpsHealthCheck + f := func(l *ga.HttpsHealthCheckList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert HttpsHealthCheck with key of value obj. +func (g *GCEHttpsHealthChecks) Insert(ctx context.Context, key meta.Key, obj *ga.HttpsHealthCheck) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "HttpsHealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.HttpsHealthChecks.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the HttpsHealthCheck referenced by key. +func (g *GCEHttpsHealthChecks) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "HttpsHealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.HttpsHealthChecks.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Update is a method on GCEHttpsHealthChecks. +func (g *GCEHttpsHealthChecks) Update(ctx context.Context, key meta.Key, arg0 *ga.HttpsHealthCheck) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Update", + Version: meta.Version("ga"), + Service: "HttpsHealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.HttpsHealthChecks.Update(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// InstanceGroups is an interface that allows for mocking of InstanceGroups. +type InstanceGroups interface { + Get(ctx context.Context, key meta.Key) (*ga.InstanceGroup, error) + List(ctx context.Context, zone string, fl *filter.F) ([]*ga.InstanceGroup, error) + Insert(ctx context.Context, key meta.Key, obj *ga.InstanceGroup) error + Delete(ctx context.Context, key meta.Key) error + AddInstances(context.Context, meta.Key, *ga.InstanceGroupsAddInstancesRequest) error + ListInstances(context.Context, meta.Key, *ga.InstanceGroupsListInstancesRequest) (*ga.InstanceGroupsListInstances, error) + RemoveInstances(context.Context, meta.Key, *ga.InstanceGroupsRemoveInstancesRequest) error + SetNamedPorts(context.Context, meta.Key, *ga.InstanceGroupsSetNamedPortsRequest) error +} + +// NewMockInstanceGroups returns a new mock for InstanceGroups. +func NewMockInstanceGroups(objs map[meta.Key]*MockInstanceGroupsObj) *MockInstanceGroups { + mock := &MockInstanceGroups{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockInstanceGroups is the mock for InstanceGroups. +type MockInstanceGroups struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockInstanceGroupsObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockInstanceGroups, ctx context.Context, key meta.Key) (bool, *ga.InstanceGroup, error) + ListHook func(m *MockInstanceGroups, ctx context.Context, zone string, fl *filter.F) (bool, []*ga.InstanceGroup, error) + InsertHook func(m *MockInstanceGroups, ctx context.Context, key meta.Key, obj *ga.InstanceGroup) (bool, error) + DeleteHook func(m *MockInstanceGroups, ctx context.Context, key meta.Key) (bool, error) + AddInstancesHook func(*MockInstanceGroups, context.Context, meta.Key, *ga.InstanceGroupsAddInstancesRequest) error + ListInstancesHook func(*MockInstanceGroups, context.Context, meta.Key, *ga.InstanceGroupsListInstancesRequest) (*ga.InstanceGroupsListInstances, error) + RemoveInstancesHook func(*MockInstanceGroups, context.Context, meta.Key, *ga.InstanceGroupsRemoveInstancesRequest) error + SetNamedPortsHook func(*MockInstanceGroups, context.Context, meta.Key, *ga.InstanceGroupsSetNamedPortsRequest) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockInstanceGroups) Get(ctx context.Context, key meta.Key) (*ga.InstanceGroup, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockInstanceGroups %v not found", key), + } + glog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given zone. +func (m *MockInstanceGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.InstanceGroup, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, zone, fl); intercept { + glog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.InstanceGroup + for key, obj := range m.Objects { + if key.Zone != zone { + continue + } + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockInstanceGroups) Insert(ctx context.Context, key meta.Key, obj *ga.InstanceGroup) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockInstanceGroups %v exists", key), + } + glog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "instanceGroups", key) + } + + m.Objects[key] = &MockInstanceGroupsObj{obj} + glog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockInstanceGroups) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockInstanceGroups %v not found", key), + } + glog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockInstanceGroups) Obj(o *ga.InstanceGroup) *MockInstanceGroupsObj { + return &MockInstanceGroupsObj{o} +} + +// AddInstances is a mock for the corresponding method. +func (m *MockInstanceGroups) AddInstances(ctx context.Context, key meta.Key, arg0 *ga.InstanceGroupsAddInstancesRequest) error { + if m.AddInstancesHook != nil { + return m.AddInstancesHook(m, ctx, key, arg0) + } + return nil +} + +// ListInstances is a mock for the corresponding method. +func (m *MockInstanceGroups) ListInstances(ctx context.Context, key meta.Key, arg0 *ga.InstanceGroupsListInstancesRequest) (*ga.InstanceGroupsListInstances, error) { + if m.ListInstancesHook != nil { + return m.ListInstancesHook(m, ctx, key, arg0) + } + return nil, fmt.Errorf("ListInstancesHook must be set") +} + +// RemoveInstances is a mock for the corresponding method. +func (m *MockInstanceGroups) RemoveInstances(ctx context.Context, key meta.Key, arg0 *ga.InstanceGroupsRemoveInstancesRequest) error { + if m.RemoveInstancesHook != nil { + return m.RemoveInstancesHook(m, ctx, key, arg0) + } + return nil +} + +// SetNamedPorts is a mock for the corresponding method. +func (m *MockInstanceGroups) SetNamedPorts(ctx context.Context, key meta.Key, arg0 *ga.InstanceGroupsSetNamedPortsRequest) error { + if m.SetNamedPortsHook != nil { + return m.SetNamedPortsHook(m, ctx, key, arg0) + } + return nil +} + +// GCEInstanceGroups is a simplifying adapter for the GCE InstanceGroups. +type GCEInstanceGroups struct { + s *Service +} + +// Get the InstanceGroup named by key. +func (g *GCEInstanceGroups) Get(ctx context.Context, key meta.Key) (*ga.InstanceGroup, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.InstanceGroups.Get(projectID, key.Zone, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all InstanceGroup objects. +func (g *GCEInstanceGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.InstanceGroup, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.InstanceGroups.List(projectID, zone) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.InstanceGroup + f := func(l *ga.InstanceGroupList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert InstanceGroup with key of value obj. +func (g *GCEInstanceGroups) Insert(ctx context.Context, key meta.Key, obj *ga.InstanceGroup) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.InstanceGroups.Insert(projectID, key.Zone, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the InstanceGroup referenced by key. +func (g *GCEInstanceGroups) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.InstanceGroups.Delete(projectID, key.Zone, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AddInstances is a method on GCEInstanceGroups. +func (g *GCEInstanceGroups) AddInstances(ctx context.Context, key meta.Key, arg0 *ga.InstanceGroupsAddInstancesRequest) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AddInstances", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.InstanceGroups.AddInstances(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// ListInstances is a method on GCEInstanceGroups. +func (g *GCEInstanceGroups) ListInstances(ctx context.Context, key meta.Key, arg0 *ga.InstanceGroupsListInstancesRequest) (*ga.InstanceGroupsListInstances, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "ListInstances", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.InstanceGroups.ListInstances(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + return call.Do() +} + +// RemoveInstances is a method on GCEInstanceGroups. +func (g *GCEInstanceGroups) RemoveInstances(ctx context.Context, key meta.Key, arg0 *ga.InstanceGroupsRemoveInstancesRequest) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "RemoveInstances", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.InstanceGroups.RemoveInstances(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// SetNamedPorts is a method on GCEInstanceGroups. +func (g *GCEInstanceGroups) SetNamedPorts(ctx context.Context, key meta.Key, arg0 *ga.InstanceGroupsSetNamedPortsRequest) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetNamedPorts", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.InstanceGroups.SetNamedPorts(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Instances is an interface that allows for mocking of Instances. +type Instances interface { + Get(ctx context.Context, key meta.Key) (*ga.Instance, error) + List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Instance, error) + Insert(ctx context.Context, key meta.Key, obj *ga.Instance) error + Delete(ctx context.Context, key meta.Key) error + AttachDisk(context.Context, meta.Key, *ga.AttachedDisk) error + DetachDisk(context.Context, meta.Key, string) error +} + +// NewMockInstances returns a new mock for Instances. +func NewMockInstances(objs map[meta.Key]*MockInstancesObj) *MockInstances { + mock := &MockInstances{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockInstances is the mock for Instances. +type MockInstances struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockInstancesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockInstances, ctx context.Context, key meta.Key) (bool, *ga.Instance, error) + ListHook func(m *MockInstances, ctx context.Context, zone string, fl *filter.F) (bool, []*ga.Instance, error) + InsertHook func(m *MockInstances, ctx context.Context, key meta.Key, obj *ga.Instance) (bool, error) + DeleteHook func(m *MockInstances, ctx context.Context, key meta.Key) (bool, error) + AttachDiskHook func(*MockInstances, context.Context, meta.Key, *ga.AttachedDisk) error + DetachDiskHook func(*MockInstances, context.Context, meta.Key, string) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockInstances) Get(ctx context.Context, key meta.Key) (*ga.Instance, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockInstances.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockInstances %v not found", key), + } + glog.V(5).Infof("MockInstances.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given zone. +func (m *MockInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Instance, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, zone, fl); intercept { + glog.V(5).Infof("MockInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.Instance + for key, obj := range m.Objects { + if key.Zone != zone { + continue + } + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockInstances) Insert(ctx context.Context, key meta.Key, obj *ga.Instance) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockInstances %v exists", key), + } + glog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "instances", key) + } + + m.Objects[key] = &MockInstancesObj{obj} + glog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockInstances) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockInstances %v not found", key), + } + glog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockInstances.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockInstances) Obj(o *ga.Instance) *MockInstancesObj { + return &MockInstancesObj{o} +} + +// AttachDisk is a mock for the corresponding method. +func (m *MockInstances) AttachDisk(ctx context.Context, key meta.Key, arg0 *ga.AttachedDisk) error { + if m.AttachDiskHook != nil { + return m.AttachDiskHook(m, ctx, key, arg0) + } + return nil +} + +// DetachDisk is a mock for the corresponding method. +func (m *MockInstances) DetachDisk(ctx context.Context, key meta.Key, arg0 string) error { + if m.DetachDiskHook != nil { + return m.DetachDiskHook(m, ctx, key, arg0) + } + return nil +} + +// GCEInstances is a simplifying adapter for the GCE Instances. +type GCEInstances struct { + s *Service +} + +// Get the Instance named by key. +func (g *GCEInstances) Get(ctx context.Context, key meta.Key) (*ga.Instance, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Instances.Get(projectID, key.Zone, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Instance objects. +func (g *GCEInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Instance, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Instances.List(projectID, zone) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.Instance + f := func(l *ga.InstanceList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Instance with key of value obj. +func (g *GCEInstances) Insert(ctx context.Context, key meta.Key, obj *ga.Instance) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.Instances.Insert(projectID, key.Zone, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Instance referenced by key. +func (g *GCEInstances) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.Instances.Delete(projectID, key.Zone, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AttachDisk is a method on GCEInstances. +func (g *GCEInstances) AttachDisk(ctx context.Context, key meta.Key, arg0 *ga.AttachedDisk) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AttachDisk", + Version: meta.Version("ga"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.Instances.AttachDisk(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// DetachDisk is a method on GCEInstances. +func (g *GCEInstances) DetachDisk(ctx context.Context, key meta.Key, arg0 string) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "DetachDisk", + Version: meta.Version("ga"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.Instances.DetachDisk(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// BetaInstances is an interface that allows for mocking of Instances. +type BetaInstances interface { + Get(ctx context.Context, key meta.Key) (*beta.Instance, error) + List(ctx context.Context, zone string, fl *filter.F) ([]*beta.Instance, error) + Insert(ctx context.Context, key meta.Key, obj *beta.Instance) error + Delete(ctx context.Context, key meta.Key) error + AttachDisk(context.Context, meta.Key, *beta.AttachedDisk) error + DetachDisk(context.Context, meta.Key, string) error +} + +// NewMockBetaInstances returns a new mock for Instances. +func NewMockBetaInstances(objs map[meta.Key]*MockInstancesObj) *MockBetaInstances { + mock := &MockBetaInstances{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockBetaInstances is the mock for Instances. +type MockBetaInstances struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockInstancesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockBetaInstances, ctx context.Context, key meta.Key) (bool, *beta.Instance, error) + ListHook func(m *MockBetaInstances, ctx context.Context, zone string, fl *filter.F) (bool, []*beta.Instance, error) + InsertHook func(m *MockBetaInstances, ctx context.Context, key meta.Key, obj *beta.Instance) (bool, error) + DeleteHook func(m *MockBetaInstances, ctx context.Context, key meta.Key) (bool, error) + AttachDiskHook func(*MockBetaInstances, context.Context, meta.Key, *beta.AttachedDisk) error + DetachDiskHook func(*MockBetaInstances, context.Context, meta.Key, string) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockBetaInstances) Get(ctx context.Context, key meta.Key) (*beta.Instance, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockBetaInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockBetaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToBeta() + glog.V(5).Infof("MockBetaInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaInstances %v not found", key), + } + glog.V(5).Infof("MockBetaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given zone. +func (m *MockBetaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.Instance, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, zone, fl); intercept { + glog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + + return nil, *m.ListError + } + + var objs []*beta.Instance + for key, obj := range m.Objects { + if key.Zone != zone { + continue + } + if !fl.Match(obj.ToBeta()) { + continue + } + objs = append(objs, obj.ToBeta()) + } + + glog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockBetaInstances) Insert(ctx context.Context, key meta.Key, obj *beta.Instance) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockBetaInstances %v exists", key), + } + glog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionBeta, "mock-project", "instances", key) + } + + m.Objects[key] = &MockInstancesObj{obj} + glog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockBetaInstances) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaInstances %v not found", key), + } + glog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockBetaInstances) Obj(o *beta.Instance) *MockInstancesObj { + return &MockInstancesObj{o} +} + +// AttachDisk is a mock for the corresponding method. +func (m *MockBetaInstances) AttachDisk(ctx context.Context, key meta.Key, arg0 *beta.AttachedDisk) error { + if m.AttachDiskHook != nil { + return m.AttachDiskHook(m, ctx, key, arg0) + } + return nil +} + +// DetachDisk is a mock for the corresponding method. +func (m *MockBetaInstances) DetachDisk(ctx context.Context, key meta.Key, arg0 string) error { + if m.DetachDiskHook != nil { + return m.DetachDiskHook(m, ctx, key, arg0) + } + return nil +} + +// GCEBetaInstances is a simplifying adapter for the GCE Instances. +type GCEBetaInstances struct { + s *Service +} + +// Get the Instance named by key. +func (g *GCEBetaInstances) Get(ctx context.Context, key meta.Key) (*beta.Instance, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("beta"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Beta.Instances.Get(projectID, key.Zone, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Instance objects. +func (g *GCEBetaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.Instance, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("beta"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Beta.Instances.List(projectID, zone) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*beta.Instance + f := func(l *beta.InstanceList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Instance with key of value obj. +func (g *GCEBetaInstances) Insert(ctx context.Context, key meta.Key, obj *beta.Instance) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("beta"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.Beta.Instances.Insert(projectID, key.Zone, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Instance referenced by key. +func (g *GCEBetaInstances) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("beta"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Beta.Instances.Delete(projectID, key.Zone, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AttachDisk is a method on GCEBetaInstances. +func (g *GCEBetaInstances) AttachDisk(ctx context.Context, key meta.Key, arg0 *beta.AttachedDisk) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AttachDisk", + Version: meta.Version("beta"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Beta.Instances.AttachDisk(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// DetachDisk is a method on GCEBetaInstances. +func (g *GCEBetaInstances) DetachDisk(ctx context.Context, key meta.Key, arg0 string) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "DetachDisk", + Version: meta.Version("beta"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Beta.Instances.DetachDisk(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AlphaInstances is an interface that allows for mocking of Instances. +type AlphaInstances interface { + Get(ctx context.Context, key meta.Key) (*alpha.Instance, error) + List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Instance, error) + Insert(ctx context.Context, key meta.Key, obj *alpha.Instance) error + Delete(ctx context.Context, key meta.Key) error + AttachDisk(context.Context, meta.Key, *alpha.AttachedDisk) error + DetachDisk(context.Context, meta.Key, string) error + UpdateNetworkInterface(context.Context, meta.Key, string, *alpha.NetworkInterface) error +} + +// NewMockAlphaInstances returns a new mock for Instances. +func NewMockAlphaInstances(objs map[meta.Key]*MockInstancesObj) *MockAlphaInstances { + mock := &MockAlphaInstances{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaInstances is the mock for Instances. +type MockAlphaInstances struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockInstancesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockAlphaInstances, ctx context.Context, key meta.Key) (bool, *alpha.Instance, error) + ListHook func(m *MockAlphaInstances, ctx context.Context, zone string, fl *filter.F) (bool, []*alpha.Instance, error) + InsertHook func(m *MockAlphaInstances, ctx context.Context, key meta.Key, obj *alpha.Instance) (bool, error) + DeleteHook func(m *MockAlphaInstances, ctx context.Context, key meta.Key) (bool, error) + AttachDiskHook func(*MockAlphaInstances, context.Context, meta.Key, *alpha.AttachedDisk) error + DetachDiskHook func(*MockAlphaInstances, context.Context, meta.Key, string) error + UpdateNetworkInterfaceHook func(*MockAlphaInstances, context.Context, meta.Key, string, *alpha.NetworkInterface) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaInstances) Get(ctx context.Context, key meta.Key) (*alpha.Instance, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToAlpha() + glog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaInstances %v not found", key), + } + glog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given zone. +func (m *MockAlphaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Instance, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, zone, fl); intercept { + glog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.Instance + for key, obj := range m.Objects { + if key.Zone != zone { + continue + } + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + glog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaInstances) Insert(ctx context.Context, key meta.Key, obj *alpha.Instance) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaInstances %v exists", key), + } + glog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionAlpha, "mock-project", "instances", key) + } + + m.Objects[key] = &MockInstancesObj{obj} + glog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaInstances) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaInstances %v not found", key), + } + glog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaInstances) Obj(o *alpha.Instance) *MockInstancesObj { + return &MockInstancesObj{o} +} + +// AttachDisk is a mock for the corresponding method. +func (m *MockAlphaInstances) AttachDisk(ctx context.Context, key meta.Key, arg0 *alpha.AttachedDisk) error { + if m.AttachDiskHook != nil { + return m.AttachDiskHook(m, ctx, key, arg0) + } + return nil +} + +// DetachDisk is a mock for the corresponding method. +func (m *MockAlphaInstances) DetachDisk(ctx context.Context, key meta.Key, arg0 string) error { + if m.DetachDiskHook != nil { + return m.DetachDiskHook(m, ctx, key, arg0) + } + return nil +} + +// UpdateNetworkInterface is a mock for the corresponding method. +func (m *MockAlphaInstances) UpdateNetworkInterface(ctx context.Context, key meta.Key, arg0 string, arg1 *alpha.NetworkInterface) error { + if m.UpdateNetworkInterfaceHook != nil { + return m.UpdateNetworkInterfaceHook(m, ctx, key, arg0, arg1) + } + return nil +} + +// GCEAlphaInstances is a simplifying adapter for the GCE Instances. +type GCEAlphaInstances struct { + s *Service +} + +// Get the Instance named by key. +func (g *GCEAlphaInstances) Get(ctx context.Context, key meta.Key) (*alpha.Instance, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.Instances.Get(projectID, key.Zone, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Instance objects. +func (g *GCEAlphaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Instance, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.Instances.List(projectID, zone) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.Instance + f := func(l *alpha.InstanceList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Instance with key of value obj. +func (g *GCEAlphaInstances) Insert(ctx context.Context, key meta.Key, obj *alpha.Instance) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.Alpha.Instances.Insert(projectID, key.Zone, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Instance referenced by key. +func (g *GCEAlphaInstances) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.Instances.Delete(projectID, key.Zone, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AttachDisk is a method on GCEAlphaInstances. +func (g *GCEAlphaInstances) AttachDisk(ctx context.Context, key meta.Key, arg0 *alpha.AttachedDisk) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AttachDisk", + Version: meta.Version("alpha"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.Instances.AttachDisk(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// DetachDisk is a method on GCEAlphaInstances. +func (g *GCEAlphaInstances) DetachDisk(ctx context.Context, key meta.Key, arg0 string) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "DetachDisk", + Version: meta.Version("alpha"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.Instances.DetachDisk(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// UpdateNetworkInterface is a method on GCEAlphaInstances. +func (g *GCEAlphaInstances) UpdateNetworkInterface(ctx context.Context, key meta.Key, arg0 string, arg1 *alpha.NetworkInterface) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "UpdateNetworkInterface", + Version: meta.Version("alpha"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.Instances.UpdateNetworkInterface(projectID, key.Zone, key.Name, arg0, arg1) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AlphaNetworkEndpointGroups is an interface that allows for mocking of NetworkEndpointGroups. +type AlphaNetworkEndpointGroups interface { + Get(ctx context.Context, key meta.Key) (*alpha.NetworkEndpointGroup, error) + List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.NetworkEndpointGroup, error) + Insert(ctx context.Context, key meta.Key, obj *alpha.NetworkEndpointGroup) error + Delete(ctx context.Context, key meta.Key) error + AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*alpha.NetworkEndpointGroup, error) + AttachNetworkEndpoints(context.Context, meta.Key, *alpha.NetworkEndpointGroupsAttachEndpointsRequest) error + DetachNetworkEndpoints(context.Context, meta.Key, *alpha.NetworkEndpointGroupsDetachEndpointsRequest) error +} + +// NewMockAlphaNetworkEndpointGroups returns a new mock for NetworkEndpointGroups. +func NewMockAlphaNetworkEndpointGroups(objs map[meta.Key]*MockNetworkEndpointGroupsObj) *MockAlphaNetworkEndpointGroups { + mock := &MockAlphaNetworkEndpointGroups{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaNetworkEndpointGroups is the mock for NetworkEndpointGroups. +type MockAlphaNetworkEndpointGroups struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockNetworkEndpointGroupsObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + AggregatedListError *error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockAlphaNetworkEndpointGroups, ctx context.Context, key meta.Key) (bool, *alpha.NetworkEndpointGroup, error) + ListHook func(m *MockAlphaNetworkEndpointGroups, ctx context.Context, zone string, fl *filter.F) (bool, []*alpha.NetworkEndpointGroup, error) + InsertHook func(m *MockAlphaNetworkEndpointGroups, ctx context.Context, key meta.Key, obj *alpha.NetworkEndpointGroup) (bool, error) + DeleteHook func(m *MockAlphaNetworkEndpointGroups, ctx context.Context, key meta.Key) (bool, error) + AggregatedListHook func(m *MockAlphaNetworkEndpointGroups, ctx context.Context, fl *filter.F) (bool, map[string][]*alpha.NetworkEndpointGroup, error) + AttachNetworkEndpointsHook func(*MockAlphaNetworkEndpointGroups, context.Context, meta.Key, *alpha.NetworkEndpointGroupsAttachEndpointsRequest) error + DetachNetworkEndpointsHook func(*MockAlphaNetworkEndpointGroups, context.Context, meta.Key, *alpha.NetworkEndpointGroupsDetachEndpointsRequest) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaNetworkEndpointGroups) Get(ctx context.Context, key meta.Key) (*alpha.NetworkEndpointGroup, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToAlpha() + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaNetworkEndpointGroups %v not found", key), + } + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given zone. +func (m *MockAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.NetworkEndpointGroup, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, zone, fl); intercept { + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.NetworkEndpointGroup + for key, obj := range m.Objects { + if key.Zone != zone { + continue + } + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaNetworkEndpointGroups) Insert(ctx context.Context, key meta.Key, obj *alpha.NetworkEndpointGroup) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaNetworkEndpointGroups %v exists", key), + } + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionAlpha, "mock-project", "networkEndpointGroups", key) + } + + m.Objects[key] = &MockNetworkEndpointGroupsObj{obj} + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaNetworkEndpointGroups) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaNetworkEndpointGroups %v not found", key), + } + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// AggregatedList is a mock for AggregatedList. +func (m *MockAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*alpha.NetworkEndpointGroup, error) { + if m.AggregatedListHook != nil { + if intercept, objs, err := m.AggregatedListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.AggregatedListError != nil { + err := *m.AggregatedListError + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + return nil, err + } + + objs := map[string][]*alpha.NetworkEndpointGroup{} + for _, obj := range m.Objects { + res, err := ParseResourceURL(obj.ToAlpha().SelfLink) + location := res.Key.Zone + if err != nil { + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + return nil, err + } + if !fl.Match(obj.ToAlpha()) { + continue + } + objs[location] = append(objs[location], obj.ToAlpha()) + } + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaNetworkEndpointGroups) Obj(o *alpha.NetworkEndpointGroup) *MockNetworkEndpointGroupsObj { + return &MockNetworkEndpointGroupsObj{o} +} + +// AttachNetworkEndpoints is a mock for the corresponding method. +func (m *MockAlphaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Context, key meta.Key, arg0 *alpha.NetworkEndpointGroupsAttachEndpointsRequest) error { + if m.AttachNetworkEndpointsHook != nil { + return m.AttachNetworkEndpointsHook(m, ctx, key, arg0) + } + return nil +} + +// DetachNetworkEndpoints is a mock for the corresponding method. +func (m *MockAlphaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Context, key meta.Key, arg0 *alpha.NetworkEndpointGroupsDetachEndpointsRequest) error { + if m.DetachNetworkEndpointsHook != nil { + return m.DetachNetworkEndpointsHook(m, ctx, key, arg0) + } + return nil +} + +// GCEAlphaNetworkEndpointGroups is a simplifying adapter for the GCE NetworkEndpointGroups. +type GCEAlphaNetworkEndpointGroups struct { + s *Service +} + +// Get the NetworkEndpointGroup named by key. +func (g *GCEAlphaNetworkEndpointGroups) Get(ctx context.Context, key meta.Key) (*alpha.NetworkEndpointGroup, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "NetworkEndpointGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.NetworkEndpointGroups.Get(projectID, key.Zone, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all NetworkEndpointGroup objects. +func (g *GCEAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.NetworkEndpointGroup, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "NetworkEndpointGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.NetworkEndpointGroups.List(projectID, zone) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.NetworkEndpointGroup + f := func(l *alpha.NetworkEndpointGroupList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert NetworkEndpointGroup with key of value obj. +func (g *GCEAlphaNetworkEndpointGroups) Insert(ctx context.Context, key meta.Key, obj *alpha.NetworkEndpointGroup) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "NetworkEndpointGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.Alpha.NetworkEndpointGroups.Insert(projectID, key.Zone, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the NetworkEndpointGroup referenced by key. +func (g *GCEAlphaNetworkEndpointGroups) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "NetworkEndpointGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.NetworkEndpointGroups.Delete(projectID, key.Zone, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AggregatedList lists all resources of the given type across all locations. +func (g *GCEAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*alpha.NetworkEndpointGroup, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AggregatedList", + Version: meta.Version("alpha"), + Service: "NetworkEndpointGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + + call := g.s.Alpha.NetworkEndpointGroups.AggregatedList(projectID) + call.Context(ctx) + if fl != filter.None { + call.Filter(fl.String()) + } + + all := map[string][]*alpha.NetworkEndpointGroup{} + f := func(l *alpha.NetworkEndpointGroupAggregatedList) error { + for k, v := range l.Items { + all[k] = append(all[k], v.NetworkEndpointGroups...) + } + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// AttachNetworkEndpoints is a method on GCEAlphaNetworkEndpointGroups. +func (g *GCEAlphaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Context, key meta.Key, arg0 *alpha.NetworkEndpointGroupsAttachEndpointsRequest) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AttachNetworkEndpoints", + Version: meta.Version("alpha"), + Service: "NetworkEndpointGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.NetworkEndpointGroups.AttachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// DetachNetworkEndpoints is a method on GCEAlphaNetworkEndpointGroups. +func (g *GCEAlphaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Context, key meta.Key, arg0 *alpha.NetworkEndpointGroupsDetachEndpointsRequest) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "DetachNetworkEndpoints", + Version: meta.Version("alpha"), + Service: "NetworkEndpointGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.NetworkEndpointGroups.DetachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Projects is an interface that allows for mocking of Projects. +type Projects interface { + // ProjectsOps is an interface with additional non-CRUD type methods. + // This interface is expected to be implemented by hand (non-autogenerated). + ProjectsOps +} + +// NewMockProjects returns a new mock for Projects. +func NewMockProjects(objs map[meta.Key]*MockProjectsObj) *MockProjects { + mock := &MockProjects{ + Objects: objs, + } + return mock +} + +// MockProjects is the mock for Projects. +type MockProjects struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockProjectsObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Obj wraps the object for use in the mock. +func (m *MockProjects) Obj(o *ga.Project) *MockProjectsObj { + return &MockProjectsObj{o} +} + +// GCEProjects is a simplifying adapter for the GCE Projects. +type GCEProjects struct { + s *Service +} + +// Regions is an interface that allows for mocking of Regions. +type Regions interface { + Get(ctx context.Context, key meta.Key) (*ga.Region, error) + List(ctx context.Context, fl *filter.F) ([]*ga.Region, error) +} + +// NewMockRegions returns a new mock for Regions. +func NewMockRegions(objs map[meta.Key]*MockRegionsObj) *MockRegions { + mock := &MockRegions{ + Objects: objs, + GetError: map[meta.Key]error{}, + } + return mock +} + +// MockRegions is the mock for Regions. +type MockRegions struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockRegionsObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockRegions, ctx context.Context, key meta.Key) (bool, *ga.Region, error) + ListHook func(m *MockRegions, ctx context.Context, fl *filter.F) (bool, []*ga.Region, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockRegions) Get(ctx context.Context, key meta.Key) (*ga.Region, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockRegions.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockRegions.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockRegions.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockRegions %v not found", key), + } + glog.V(5).Infof("MockRegions.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockRegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockRegions.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockRegions.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.Region + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockRegions.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Obj wraps the object for use in the mock. +func (m *MockRegions) Obj(o *ga.Region) *MockRegionsObj { + return &MockRegionsObj{o} +} + +// GCERegions is a simplifying adapter for the GCE Regions. +type GCERegions struct { + s *Service +} + +// Get the Region named by key. +func (g *GCERegions) Get(ctx context.Context, key meta.Key) (*ga.Region, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Regions") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "Regions", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Regions.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Region objects. +func (g *GCERegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Regions") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "Regions", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Regions.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.Region + f := func(l *ga.RegionList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Routes is an interface that allows for mocking of Routes. +type Routes interface { + Get(ctx context.Context, key meta.Key) (*ga.Route, error) + List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) + Insert(ctx context.Context, key meta.Key, obj *ga.Route) error + Delete(ctx context.Context, key meta.Key) error +} + +// NewMockRoutes returns a new mock for Routes. +func NewMockRoutes(objs map[meta.Key]*MockRoutesObj) *MockRoutes { + mock := &MockRoutes{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockRoutes is the mock for Routes. +type MockRoutes struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockRoutesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockRoutes, ctx context.Context, key meta.Key) (bool, *ga.Route, error) + ListHook func(m *MockRoutes, ctx context.Context, fl *filter.F) (bool, []*ga.Route, error) + InsertHook func(m *MockRoutes, ctx context.Context, key meta.Key, obj *ga.Route) (bool, error) + DeleteHook func(m *MockRoutes, ctx context.Context, key meta.Key) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockRoutes) Get(ctx context.Context, key meta.Key) (*ga.Route, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockRoutes.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockRoutes.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockRoutes.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockRoutes %v not found", key), + } + glog.V(5).Infof("MockRoutes.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockRoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockRoutes.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockRoutes.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.Route + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockRoutes.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockRoutes) Insert(ctx context.Context, key meta.Key, obj *ga.Route) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockRoutes %v exists", key), + } + glog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "routes", key) + } + + m.Objects[key] = &MockRoutesObj{obj} + glog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockRoutes) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockRoutes %v not found", key), + } + glog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockRoutes.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockRoutes) Obj(o *ga.Route) *MockRoutesObj { + return &MockRoutesObj{o} +} + +// GCERoutes is a simplifying adapter for the GCE Routes. +type GCERoutes struct { + s *Service +} + +// Get the Route named by key. +func (g *GCERoutes) Get(ctx context.Context, key meta.Key) (*ga.Route, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "Routes", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Routes.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Route objects. +func (g *GCERoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "Routes", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Routes.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.Route + f := func(l *ga.RouteList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Route with key of value obj. +func (g *GCERoutes) Insert(ctx context.Context, key meta.Key, obj *ga.Route) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "Routes", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.Routes.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Route referenced by key. +func (g *GCERoutes) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "Routes", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.Routes.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// SslCertificates is an interface that allows for mocking of SslCertificates. +type SslCertificates interface { + Get(ctx context.Context, key meta.Key) (*ga.SslCertificate, error) + List(ctx context.Context, fl *filter.F) ([]*ga.SslCertificate, error) + Insert(ctx context.Context, key meta.Key, obj *ga.SslCertificate) error + Delete(ctx context.Context, key meta.Key) error +} + +// NewMockSslCertificates returns a new mock for SslCertificates. +func NewMockSslCertificates(objs map[meta.Key]*MockSslCertificatesObj) *MockSslCertificates { + mock := &MockSslCertificates{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockSslCertificates is the mock for SslCertificates. +type MockSslCertificates struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockSslCertificatesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockSslCertificates, ctx context.Context, key meta.Key) (bool, *ga.SslCertificate, error) + ListHook func(m *MockSslCertificates, ctx context.Context, fl *filter.F) (bool, []*ga.SslCertificate, error) + InsertHook func(m *MockSslCertificates, ctx context.Context, key meta.Key, obj *ga.SslCertificate) (bool, error) + DeleteHook func(m *MockSslCertificates, ctx context.Context, key meta.Key) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockSslCertificates) Get(ctx context.Context, key meta.Key) (*ga.SslCertificate, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockSslCertificates.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockSslCertificates.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockSslCertificates %v not found", key), + } + glog.V(5).Infof("MockSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockSslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.SslCertificate, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockSslCertificates.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockSslCertificates.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.SslCertificate + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockSslCertificates.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockSslCertificates) Insert(ctx context.Context, key meta.Key, obj *ga.SslCertificate) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockSslCertificates %v exists", key), + } + glog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "sslCertificates", key) + } + + m.Objects[key] = &MockSslCertificatesObj{obj} + glog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockSslCertificates) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockSslCertificates %v not found", key), + } + glog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockSslCertificates) Obj(o *ga.SslCertificate) *MockSslCertificatesObj { + return &MockSslCertificatesObj{o} +} + +// GCESslCertificates is a simplifying adapter for the GCE SslCertificates. +type GCESslCertificates struct { + s *Service +} + +// Get the SslCertificate named by key. +func (g *GCESslCertificates) Get(ctx context.Context, key meta.Key) (*ga.SslCertificate, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "SslCertificates", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.SslCertificates.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all SslCertificate objects. +func (g *GCESslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.SslCertificate, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "SslCertificates", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.SslCertificates.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.SslCertificate + f := func(l *ga.SslCertificateList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert SslCertificate with key of value obj. +func (g *GCESslCertificates) Insert(ctx context.Context, key meta.Key, obj *ga.SslCertificate) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "SslCertificates", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.SslCertificates.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the SslCertificate referenced by key. +func (g *GCESslCertificates) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "SslCertificates", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.SslCertificates.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// TargetHttpProxies is an interface that allows for mocking of TargetHttpProxies. +type TargetHttpProxies interface { + Get(ctx context.Context, key meta.Key) (*ga.TargetHttpProxy, error) + List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpProxy, error) + Insert(ctx context.Context, key meta.Key, obj *ga.TargetHttpProxy) error + Delete(ctx context.Context, key meta.Key) error + SetUrlMap(context.Context, meta.Key, *ga.UrlMapReference) error +} + +// NewMockTargetHttpProxies returns a new mock for TargetHttpProxies. +func NewMockTargetHttpProxies(objs map[meta.Key]*MockTargetHttpProxiesObj) *MockTargetHttpProxies { + mock := &MockTargetHttpProxies{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockTargetHttpProxies is the mock for TargetHttpProxies. +type MockTargetHttpProxies struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockTargetHttpProxiesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockTargetHttpProxies, ctx context.Context, key meta.Key) (bool, *ga.TargetHttpProxy, error) + ListHook func(m *MockTargetHttpProxies, ctx context.Context, fl *filter.F) (bool, []*ga.TargetHttpProxy, error) + InsertHook func(m *MockTargetHttpProxies, ctx context.Context, key meta.Key, obj *ga.TargetHttpProxy) (bool, error) + DeleteHook func(m *MockTargetHttpProxies, ctx context.Context, key meta.Key) (bool, error) + SetUrlMapHook func(*MockTargetHttpProxies, context.Context, meta.Key, *ga.UrlMapReference) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockTargetHttpProxies) Get(ctx context.Context, key meta.Key) (*ga.TargetHttpProxy, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockTargetHttpProxies %v not found", key), + } + glog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockTargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpProxy, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.TargetHttpProxy + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockTargetHttpProxies) Insert(ctx context.Context, key meta.Key, obj *ga.TargetHttpProxy) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockTargetHttpProxies %v exists", key), + } + glog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "targetHttpProxies", key) + } + + m.Objects[key] = &MockTargetHttpProxiesObj{obj} + glog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockTargetHttpProxies) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockTargetHttpProxies %v not found", key), + } + glog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockTargetHttpProxies) Obj(o *ga.TargetHttpProxy) *MockTargetHttpProxiesObj { + return &MockTargetHttpProxiesObj{o} +} + +// SetUrlMap is a mock for the corresponding method. +func (m *MockTargetHttpProxies) SetUrlMap(ctx context.Context, key meta.Key, arg0 *ga.UrlMapReference) error { + if m.SetUrlMapHook != nil { + return m.SetUrlMapHook(m, ctx, key, arg0) + } + return nil +} + +// GCETargetHttpProxies is a simplifying adapter for the GCE TargetHttpProxies. +type GCETargetHttpProxies struct { + s *Service +} + +// Get the TargetHttpProxy named by key. +func (g *GCETargetHttpProxies) Get(ctx context.Context, key meta.Key) (*ga.TargetHttpProxy, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "TargetHttpProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.TargetHttpProxies.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all TargetHttpProxy objects. +func (g *GCETargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpProxy, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "TargetHttpProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.TargetHttpProxies.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.TargetHttpProxy + f := func(l *ga.TargetHttpProxyList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert TargetHttpProxy with key of value obj. +func (g *GCETargetHttpProxies) Insert(ctx context.Context, key meta.Key, obj *ga.TargetHttpProxy) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "TargetHttpProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.TargetHttpProxies.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the TargetHttpProxy referenced by key. +func (g *GCETargetHttpProxies) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "TargetHttpProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.TargetHttpProxies.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// SetUrlMap is a method on GCETargetHttpProxies. +func (g *GCETargetHttpProxies) SetUrlMap(ctx context.Context, key meta.Key, arg0 *ga.UrlMapReference) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetUrlMap", + Version: meta.Version("ga"), + Service: "TargetHttpProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.TargetHttpProxies.SetUrlMap(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// TargetHttpsProxies is an interface that allows for mocking of TargetHttpsProxies. +type TargetHttpsProxies interface { + Get(ctx context.Context, key meta.Key) (*ga.TargetHttpsProxy, error) + List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpsProxy, error) + Insert(ctx context.Context, key meta.Key, obj *ga.TargetHttpsProxy) error + Delete(ctx context.Context, key meta.Key) error + SetSslCertificates(context.Context, meta.Key, *ga.TargetHttpsProxiesSetSslCertificatesRequest) error + SetUrlMap(context.Context, meta.Key, *ga.UrlMapReference) error +} + +// NewMockTargetHttpsProxies returns a new mock for TargetHttpsProxies. +func NewMockTargetHttpsProxies(objs map[meta.Key]*MockTargetHttpsProxiesObj) *MockTargetHttpsProxies { + mock := &MockTargetHttpsProxies{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockTargetHttpsProxies is the mock for TargetHttpsProxies. +type MockTargetHttpsProxies struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockTargetHttpsProxiesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockTargetHttpsProxies, ctx context.Context, key meta.Key) (bool, *ga.TargetHttpsProxy, error) + ListHook func(m *MockTargetHttpsProxies, ctx context.Context, fl *filter.F) (bool, []*ga.TargetHttpsProxy, error) + InsertHook func(m *MockTargetHttpsProxies, ctx context.Context, key meta.Key, obj *ga.TargetHttpsProxy) (bool, error) + DeleteHook func(m *MockTargetHttpsProxies, ctx context.Context, key meta.Key) (bool, error) + SetSslCertificatesHook func(*MockTargetHttpsProxies, context.Context, meta.Key, *ga.TargetHttpsProxiesSetSslCertificatesRequest) error + SetUrlMapHook func(*MockTargetHttpsProxies, context.Context, meta.Key, *ga.UrlMapReference) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockTargetHttpsProxies) Get(ctx context.Context, key meta.Key) (*ga.TargetHttpsProxy, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockTargetHttpsProxies %v not found", key), + } + glog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockTargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpsProxy, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.TargetHttpsProxy + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockTargetHttpsProxies) Insert(ctx context.Context, key meta.Key, obj *ga.TargetHttpsProxy) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockTargetHttpsProxies %v exists", key), + } + glog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "targetHttpsProxies", key) + } + + m.Objects[key] = &MockTargetHttpsProxiesObj{obj} + glog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockTargetHttpsProxies) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockTargetHttpsProxies %v not found", key), + } + glog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockTargetHttpsProxies) Obj(o *ga.TargetHttpsProxy) *MockTargetHttpsProxiesObj { + return &MockTargetHttpsProxiesObj{o} +} + +// SetSslCertificates is a mock for the corresponding method. +func (m *MockTargetHttpsProxies) SetSslCertificates(ctx context.Context, key meta.Key, arg0 *ga.TargetHttpsProxiesSetSslCertificatesRequest) error { + if m.SetSslCertificatesHook != nil { + return m.SetSslCertificatesHook(m, ctx, key, arg0) + } + return nil +} + +// SetUrlMap is a mock for the corresponding method. +func (m *MockTargetHttpsProxies) SetUrlMap(ctx context.Context, key meta.Key, arg0 *ga.UrlMapReference) error { + if m.SetUrlMapHook != nil { + return m.SetUrlMapHook(m, ctx, key, arg0) + } + return nil +} + +// GCETargetHttpsProxies is a simplifying adapter for the GCE TargetHttpsProxies. +type GCETargetHttpsProxies struct { + s *Service +} + +// Get the TargetHttpsProxy named by key. +func (g *GCETargetHttpsProxies) Get(ctx context.Context, key meta.Key) (*ga.TargetHttpsProxy, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "TargetHttpsProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.TargetHttpsProxies.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all TargetHttpsProxy objects. +func (g *GCETargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpsProxy, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "TargetHttpsProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.TargetHttpsProxies.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.TargetHttpsProxy + f := func(l *ga.TargetHttpsProxyList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert TargetHttpsProxy with key of value obj. +func (g *GCETargetHttpsProxies) Insert(ctx context.Context, key meta.Key, obj *ga.TargetHttpsProxy) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "TargetHttpsProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.TargetHttpsProxies.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the TargetHttpsProxy referenced by key. +func (g *GCETargetHttpsProxies) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "TargetHttpsProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.TargetHttpsProxies.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// SetSslCertificates is a method on GCETargetHttpsProxies. +func (g *GCETargetHttpsProxies) SetSslCertificates(ctx context.Context, key meta.Key, arg0 *ga.TargetHttpsProxiesSetSslCertificatesRequest) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetSslCertificates", + Version: meta.Version("ga"), + Service: "TargetHttpsProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.TargetHttpsProxies.SetSslCertificates(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// SetUrlMap is a method on GCETargetHttpsProxies. +func (g *GCETargetHttpsProxies) SetUrlMap(ctx context.Context, key meta.Key, arg0 *ga.UrlMapReference) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetUrlMap", + Version: meta.Version("ga"), + Service: "TargetHttpsProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.TargetHttpsProxies.SetUrlMap(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// TargetPools is an interface that allows for mocking of TargetPools. +type TargetPools interface { + Get(ctx context.Context, key meta.Key) (*ga.TargetPool, error) + List(ctx context.Context, region string, fl *filter.F) ([]*ga.TargetPool, error) + Insert(ctx context.Context, key meta.Key, obj *ga.TargetPool) error + Delete(ctx context.Context, key meta.Key) error + AddInstance(context.Context, meta.Key, *ga.TargetPoolsAddInstanceRequest) error + RemoveInstance(context.Context, meta.Key, *ga.TargetPoolsRemoveInstanceRequest) error +} + +// NewMockTargetPools returns a new mock for TargetPools. +func NewMockTargetPools(objs map[meta.Key]*MockTargetPoolsObj) *MockTargetPools { + mock := &MockTargetPools{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockTargetPools is the mock for TargetPools. +type MockTargetPools struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockTargetPoolsObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockTargetPools, ctx context.Context, key meta.Key) (bool, *ga.TargetPool, error) + ListHook func(m *MockTargetPools, ctx context.Context, region string, fl *filter.F) (bool, []*ga.TargetPool, error) + InsertHook func(m *MockTargetPools, ctx context.Context, key meta.Key, obj *ga.TargetPool) (bool, error) + DeleteHook func(m *MockTargetPools, ctx context.Context, key meta.Key) (bool, error) + AddInstanceHook func(*MockTargetPools, context.Context, meta.Key, *ga.TargetPoolsAddInstanceRequest) error + RemoveInstanceHook func(*MockTargetPools, context.Context, meta.Key, *ga.TargetPoolsRemoveInstanceRequest) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockTargetPools) Get(ctx context.Context, key meta.Key) (*ga.TargetPool, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockTargetPools.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockTargetPools.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockTargetPools.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockTargetPools %v not found", key), + } + glog.V(5).Infof("MockTargetPools.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given region. +func (m *MockTargetPools) List(ctx context.Context, region string, fl *filter.F) ([]*ga.TargetPool, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, region, fl); intercept { + glog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.TargetPool + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockTargetPools) Insert(ctx context.Context, key meta.Key, obj *ga.TargetPool) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockTargetPools %v exists", key), + } + glog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "targetPools", key) + } + + m.Objects[key] = &MockTargetPoolsObj{obj} + glog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockTargetPools) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockTargetPools %v not found", key), + } + glog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockTargetPools.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockTargetPools) Obj(o *ga.TargetPool) *MockTargetPoolsObj { + return &MockTargetPoolsObj{o} +} + +// AddInstance is a mock for the corresponding method. +func (m *MockTargetPools) AddInstance(ctx context.Context, key meta.Key, arg0 *ga.TargetPoolsAddInstanceRequest) error { + if m.AddInstanceHook != nil { + return m.AddInstanceHook(m, ctx, key, arg0) + } + return nil +} + +// RemoveInstance is a mock for the corresponding method. +func (m *MockTargetPools) RemoveInstance(ctx context.Context, key meta.Key, arg0 *ga.TargetPoolsRemoveInstanceRequest) error { + if m.RemoveInstanceHook != nil { + return m.RemoveInstanceHook(m, ctx, key, arg0) + } + return nil +} + +// GCETargetPools is a simplifying adapter for the GCE TargetPools. +type GCETargetPools struct { + s *Service +} + +// Get the TargetPool named by key. +func (g *GCETargetPools) Get(ctx context.Context, key meta.Key) (*ga.TargetPool, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "TargetPools", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.TargetPools.Get(projectID, key.Region, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all TargetPool objects. +func (g *GCETargetPools) List(ctx context.Context, region string, fl *filter.F) ([]*ga.TargetPool, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "TargetPools", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.TargetPools.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.TargetPool + f := func(l *ga.TargetPoolList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert TargetPool with key of value obj. +func (g *GCETargetPools) Insert(ctx context.Context, key meta.Key, obj *ga.TargetPool) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "TargetPools", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.TargetPools.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the TargetPool referenced by key. +func (g *GCETargetPools) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "TargetPools", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.TargetPools.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AddInstance is a method on GCETargetPools. +func (g *GCETargetPools) AddInstance(ctx context.Context, key meta.Key, arg0 *ga.TargetPoolsAddInstanceRequest) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AddInstance", + Version: meta.Version("ga"), + Service: "TargetPools", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.TargetPools.AddInstance(projectID, key.Region, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// RemoveInstance is a method on GCETargetPools. +func (g *GCETargetPools) RemoveInstance(ctx context.Context, key meta.Key, arg0 *ga.TargetPoolsRemoveInstanceRequest) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "RemoveInstance", + Version: meta.Version("ga"), + Service: "TargetPools", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.TargetPools.RemoveInstance(projectID, key.Region, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// UrlMaps is an interface that allows for mocking of UrlMaps. +type UrlMaps interface { + Get(ctx context.Context, key meta.Key) (*ga.UrlMap, error) + List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, error) + Insert(ctx context.Context, key meta.Key, obj *ga.UrlMap) error + Delete(ctx context.Context, key meta.Key) error + Update(context.Context, meta.Key, *ga.UrlMap) error +} + +// NewMockUrlMaps returns a new mock for UrlMaps. +func NewMockUrlMaps(objs map[meta.Key]*MockUrlMapsObj) *MockUrlMaps { + mock := &MockUrlMaps{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockUrlMaps is the mock for UrlMaps. +type MockUrlMaps struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockUrlMapsObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockUrlMaps, ctx context.Context, key meta.Key) (bool, *ga.UrlMap, error) + ListHook func(m *MockUrlMaps, ctx context.Context, fl *filter.F) (bool, []*ga.UrlMap, error) + InsertHook func(m *MockUrlMaps, ctx context.Context, key meta.Key, obj *ga.UrlMap) (bool, error) + DeleteHook func(m *MockUrlMaps, ctx context.Context, key meta.Key) (bool, error) + UpdateHook func(*MockUrlMaps, context.Context, meta.Key, *ga.UrlMap) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockUrlMaps) Get(ctx context.Context, key meta.Key) (*ga.UrlMap, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockUrlMaps.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockUrlMaps.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockUrlMaps %v not found", key), + } + glog.V(5).Infof("MockUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockUrlMaps.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockUrlMaps.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.UrlMap + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockUrlMaps.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockUrlMaps) Insert(ctx context.Context, key meta.Key, obj *ga.UrlMap) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockUrlMaps %v exists", key), + } + glog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "urlMaps", key) + } + + m.Objects[key] = &MockUrlMapsObj{obj} + glog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockUrlMaps) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockUrlMaps %v not found", key), + } + glog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockUrlMaps) Obj(o *ga.UrlMap) *MockUrlMapsObj { + return &MockUrlMapsObj{o} +} + +// Update is a mock for the corresponding method. +func (m *MockUrlMaps) Update(ctx context.Context, key meta.Key, arg0 *ga.UrlMap) error { + if m.UpdateHook != nil { + return m.UpdateHook(m, ctx, key, arg0) + } + return nil +} + +// GCEUrlMaps is a simplifying adapter for the GCE UrlMaps. +type GCEUrlMaps struct { + s *Service +} + +// Get the UrlMap named by key. +func (g *GCEUrlMaps) Get(ctx context.Context, key meta.Key) (*ga.UrlMap, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "UrlMaps", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.UrlMaps.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all UrlMap objects. +func (g *GCEUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "UrlMaps", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.UrlMaps.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.UrlMap + f := func(l *ga.UrlMapList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert UrlMap with key of value obj. +func (g *GCEUrlMaps) Insert(ctx context.Context, key meta.Key, obj *ga.UrlMap) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "UrlMaps", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.UrlMaps.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the UrlMap referenced by key. +func (g *GCEUrlMaps) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "UrlMaps", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.UrlMaps.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Update is a method on GCEUrlMaps. +func (g *GCEUrlMaps) Update(ctx context.Context, key meta.Key, arg0 *ga.UrlMap) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Update", + Version: meta.Version("ga"), + Service: "UrlMaps", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.UrlMaps.Update(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Zones is an interface that allows for mocking of Zones. +type Zones interface { + Get(ctx context.Context, key meta.Key) (*ga.Zone, error) + List(ctx context.Context, fl *filter.F) ([]*ga.Zone, error) +} + +// NewMockZones returns a new mock for Zones. +func NewMockZones(objs map[meta.Key]*MockZonesObj) *MockZones { + mock := &MockZones{ + Objects: objs, + GetError: map[meta.Key]error{}, + } + return mock +} + +// MockZones is the mock for Zones. +type MockZones struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockZonesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockZones, ctx context.Context, key meta.Key) (bool, *ga.Zone, error) + ListHook func(m *MockZones, ctx context.Context, fl *filter.F) (bool, []*ga.Zone, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockZones) Get(ctx context.Context, key meta.Key) (*ga.Zone, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockZones.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockZones.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockZones.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockZones %v not found", key), + } + glog.V(5).Infof("MockZones.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockZones) List(ctx context.Context, fl *filter.F) ([]*ga.Zone, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockZones.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockZones.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.Zone + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockZones.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Obj wraps the object for use in the mock. +func (m *MockZones) Obj(o *ga.Zone) *MockZonesObj { + return &MockZonesObj{o} +} + +// GCEZones is a simplifying adapter for the GCE Zones. +type GCEZones struct { + s *Service +} + +// Get the Zone named by key. +func (g *GCEZones) Get(ctx context.Context, key meta.Key) (*ga.Zone, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Zones") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "Zones", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Zones.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Zone objects. +func (g *GCEZones) List(ctx context.Context, fl *filter.F) ([]*ga.Zone, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Zones") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "Zones", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Zones.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.Zone + f := func(l *ga.ZoneList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} diff --git a/pkg/cloudprovider/providers/gce/cloud/gen_test.go b/pkg/cloudprovider/providers/gce/cloud/gen_test.go new file mode 100644 index 00000000000..cbe5d9938d3 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/gen_test.go @@ -0,0 +1,1749 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was generated by "go run gen/main.go -mode test > gen_test.go". Do not edit +// directly. + +package cloud + +import ( + "context" + "reflect" + "testing" + + alpha "google.golang.org/api/compute/v0.alpha" + beta "google.golang.org/api/compute/v0.beta" + ga "google.golang.org/api/compute/v1" + + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" +) + +const location = "location" + +func TestDisksGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.ZonalKey("key-alpha", "location") + key = keyAlpha + keyGA := meta.ZonalKey("key-ga", "location") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaDisks().Get(ctx, *key); err == nil { + t.Errorf("AlphaDisks().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.Disks().Get(ctx, *key); err == nil { + t.Errorf("Disks().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.Disk{} + if err := mock.AlphaDisks().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaDisks().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &ga.Disk{} + if err := mock.Disks().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("Disks().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaDisks().Get(ctx, *key); err != nil { + t.Errorf("AlphaDisks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.Disks().Get(ctx, *key); err != nil { + t.Errorf("Disks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaDisks.Objects[*keyAlpha] = mock.MockAlphaDisks.Obj(&alpha.Disk{Name: keyAlpha.Name}) + mock.MockDisks.Objects[*keyGA] = mock.MockDisks.Obj(&ga.Disk{Name: keyGA.Name}) + want := map[string]bool{ + "key-alpha": true, + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaDisks().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaDisks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaDisks().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.Disks().List(ctx, location, filter.None) + if err != nil { + t.Errorf("Disks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaDisks().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaDisks().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaDisks().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.Disks().Delete(ctx, *keyGA); err != nil { + t.Errorf("Disks().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaDisks().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaDisks().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.Disks().Delete(ctx, *keyGA); err == nil { + t.Errorf("Disks().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestFirewallsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.Firewalls().Get(ctx, *key); err == nil { + t.Errorf("Firewalls().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.Firewall{} + if err := mock.Firewalls().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("Firewalls().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.Firewalls().Get(ctx, *key); err != nil { + t.Errorf("Firewalls().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockFirewalls.Objects[*keyGA] = mock.MockFirewalls.Obj(&ga.Firewall{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.Firewalls().List(ctx, filter.None) + if err != nil { + t.Errorf("Firewalls().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaFirewalls().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.Firewalls().Delete(ctx, *keyGA); err != nil { + t.Errorf("Firewalls().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.Firewalls().Delete(ctx, *keyGA); err == nil { + t.Errorf("Firewalls().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestInstancesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.ZonalKey("key-alpha", "location") + key = keyAlpha + keyBeta := meta.ZonalKey("key-beta", "location") + key = keyBeta + keyGA := meta.ZonalKey("key-ga", "location") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaInstances().Get(ctx, *key); err == nil { + t.Errorf("AlphaInstances().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.BetaInstances().Get(ctx, *key); err == nil { + t.Errorf("BetaInstances().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.Instances().Get(ctx, *key); err == nil { + t.Errorf("Instances().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.Instance{} + if err := mock.AlphaInstances().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaInstances().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &beta.Instance{} + if err := mock.BetaInstances().Insert(ctx, *keyBeta, obj); err != nil { + t.Errorf("BetaInstances().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &ga.Instance{} + if err := mock.Instances().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("Instances().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaInstances().Get(ctx, *key); err != nil { + t.Errorf("AlphaInstances().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.BetaInstances().Get(ctx, *key); err != nil { + t.Errorf("BetaInstances().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.Instances().Get(ctx, *key); err != nil { + t.Errorf("Instances().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaInstances.Objects[*keyAlpha] = mock.MockAlphaInstances.Obj(&alpha.Instance{Name: keyAlpha.Name}) + mock.MockBetaInstances.Objects[*keyBeta] = mock.MockBetaInstances.Obj(&beta.Instance{Name: keyBeta.Name}) + mock.MockInstances.Objects[*keyGA] = mock.MockInstances.Obj(&ga.Instance{Name: keyGA.Name}) + want := map[string]bool{ + "key-alpha": true, + "key-beta": true, + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaInstances().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaInstances().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaInstances().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.BetaInstances().List(ctx, location, filter.None) + if err != nil { + t.Errorf("BetaInstances().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaInstances().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.Instances().List(ctx, location, filter.None) + if err != nil { + t.Errorf("Instances().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaInstances().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaInstances().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaInstances().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.BetaInstances().Delete(ctx, *keyBeta); err != nil { + t.Errorf("BetaInstances().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.Instances().Delete(ctx, *keyGA); err != nil { + t.Errorf("Instances().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaInstances().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaInstances().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.BetaInstances().Delete(ctx, *keyBeta); err == nil { + t.Errorf("BetaInstances().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.Instances().Delete(ctx, *keyGA); err == nil { + t.Errorf("Instances().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestProjectsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + + // Insert. + + // Get across versions. + + // List. + mock.MockProjects.Objects[*keyGA] = mock.MockProjects.Obj(&ga.Project{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + + // Delete across versions. + + // Delete not found. +} + +func TestRoutesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.Routes().Get(ctx, *key); err == nil { + t.Errorf("Routes().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.Route{} + if err := mock.Routes().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("Routes().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.Routes().Get(ctx, *key); err != nil { + t.Errorf("Routes().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockRoutes.Objects[*keyGA] = mock.MockRoutes.Obj(&ga.Route{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.Routes().List(ctx, filter.None) + if err != nil { + t.Errorf("Routes().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaRoutes().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.Routes().Delete(ctx, *keyGA); err != nil { + t.Errorf("Routes().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.Routes().Delete(ctx, *keyGA); err == nil { + t.Errorf("Routes().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestTargetPoolsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.RegionalKey("key-ga", "location") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.TargetPools().Get(ctx, *key); err == nil { + t.Errorf("TargetPools().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.TargetPool{} + if err := mock.TargetPools().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("TargetPools().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.TargetPools().Get(ctx, *key); err != nil { + t.Errorf("TargetPools().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockTargetPools.Objects[*keyGA] = mock.MockTargetPools.Obj(&ga.TargetPool{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.TargetPools().List(ctx, location, filter.None) + if err != nil { + t.Errorf("TargetPools().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaTargetPools().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.TargetPools().Delete(ctx, *keyGA); err != nil { + t.Errorf("TargetPools().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.TargetPools().Delete(ctx, *keyGA); err == nil { + t.Errorf("TargetPools().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestGlobalForwardingRulesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.GlobalForwardingRules().Get(ctx, *key); err == nil { + t.Errorf("GlobalForwardingRules().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.ForwardingRule{} + if err := mock.GlobalForwardingRules().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("GlobalForwardingRules().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.GlobalForwardingRules().Get(ctx, *key); err != nil { + t.Errorf("GlobalForwardingRules().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockGlobalForwardingRules.Objects[*keyGA] = mock.MockGlobalForwardingRules.Obj(&ga.ForwardingRule{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.GlobalForwardingRules().List(ctx, filter.None) + if err != nil { + t.Errorf("GlobalForwardingRules().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaGlobalForwardingRules().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.GlobalForwardingRules().Delete(ctx, *keyGA); err != nil { + t.Errorf("GlobalForwardingRules().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.GlobalForwardingRules().Delete(ctx, *keyGA); err == nil { + t.Errorf("GlobalForwardingRules().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestNetworkEndpointGroupsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.ZonalKey("key-alpha", "location") + key = keyAlpha + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaNetworkEndpointGroups().Get(ctx, *key); err == nil { + t.Errorf("AlphaNetworkEndpointGroups().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.NetworkEndpointGroup{} + if err := mock.AlphaNetworkEndpointGroups().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaNetworkEndpointGroups().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaNetworkEndpointGroups().Get(ctx, *key); err != nil { + t.Errorf("AlphaNetworkEndpointGroups().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaNetworkEndpointGroups.Objects[*keyAlpha] = mock.MockAlphaNetworkEndpointGroups.Obj(&alpha.NetworkEndpointGroup{Name: keyAlpha.Name}) + want := map[string]bool{ + "key-alpha": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaNetworkEndpointGroups().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaNetworkEndpointGroups().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaNetworkEndpointGroups().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaNetworkEndpointGroups().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaNetworkEndpointGroups().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaNetworkEndpointGroups().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaNetworkEndpointGroups().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestTargetHttpProxiesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.TargetHttpProxies().Get(ctx, *key); err == nil { + t.Errorf("TargetHttpProxies().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.TargetHttpProxy{} + if err := mock.TargetHttpProxies().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("TargetHttpProxies().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.TargetHttpProxies().Get(ctx, *key); err != nil { + t.Errorf("TargetHttpProxies().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockTargetHttpProxies.Objects[*keyGA] = mock.MockTargetHttpProxies.Obj(&ga.TargetHttpProxy{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.TargetHttpProxies().List(ctx, filter.None) + if err != nil { + t.Errorf("TargetHttpProxies().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaTargetHttpProxies().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.TargetHttpProxies().Delete(ctx, *keyGA); err != nil { + t.Errorf("TargetHttpProxies().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.TargetHttpProxies().Delete(ctx, *keyGA); err == nil { + t.Errorf("TargetHttpProxies().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestUrlMapsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.UrlMaps().Get(ctx, *key); err == nil { + t.Errorf("UrlMaps().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.UrlMap{} + if err := mock.UrlMaps().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("UrlMaps().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.UrlMaps().Get(ctx, *key); err != nil { + t.Errorf("UrlMaps().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockUrlMaps.Objects[*keyGA] = mock.MockUrlMaps.Obj(&ga.UrlMap{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.UrlMaps().List(ctx, filter.None) + if err != nil { + t.Errorf("UrlMaps().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaUrlMaps().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.UrlMaps().Delete(ctx, *keyGA); err != nil { + t.Errorf("UrlMaps().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.UrlMaps().Delete(ctx, *keyGA); err == nil { + t.Errorf("UrlMaps().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestZonesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.Zones().Get(ctx, *key); err == nil { + t.Errorf("Zones().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + + // Get across versions. + + // List. + mock.MockZones.Objects[*keyGA] = mock.MockZones.Obj(&ga.Zone{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.Zones().List(ctx, filter.None) + if err != nil { + t.Errorf("Zones().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaZones().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + + // Delete not found. +} + +func TestGlobalAddressesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.GlobalAddresses().Get(ctx, *key); err == nil { + t.Errorf("GlobalAddresses().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.Address{} + if err := mock.GlobalAddresses().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("GlobalAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.GlobalAddresses().Get(ctx, *key); err != nil { + t.Errorf("GlobalAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockGlobalAddresses.Objects[*keyGA] = mock.MockGlobalAddresses.Obj(&ga.Address{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.GlobalAddresses().List(ctx, filter.None) + if err != nil { + t.Errorf("GlobalAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaGlobalAddresses().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.GlobalAddresses().Delete(ctx, *keyGA); err != nil { + t.Errorf("GlobalAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.GlobalAddresses().Delete(ctx, *keyGA); err == nil { + t.Errorf("GlobalAddresses().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestBackendServicesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.GlobalKey("key-alpha") + key = keyAlpha + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaBackendServices().Get(ctx, *key); err == nil { + t.Errorf("AlphaBackendServices().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.BackendServices().Get(ctx, *key); err == nil { + t.Errorf("BackendServices().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.BackendService{} + if err := mock.AlphaBackendServices().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaBackendServices().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &ga.BackendService{} + if err := mock.BackendServices().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("BackendServices().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaBackendServices().Get(ctx, *key); err != nil { + t.Errorf("AlphaBackendServices().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.BackendServices().Get(ctx, *key); err != nil { + t.Errorf("BackendServices().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaBackendServices.Objects[*keyAlpha] = mock.MockAlphaBackendServices.Obj(&alpha.BackendService{Name: keyAlpha.Name}) + mock.MockBackendServices.Objects[*keyGA] = mock.MockBackendServices.Obj(&ga.BackendService{Name: keyGA.Name}) + want := map[string]bool{ + "key-alpha": true, + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaBackendServices().List(ctx, filter.None) + if err != nil { + t.Errorf("AlphaBackendServices().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaBackendServices().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.BackendServices().List(ctx, filter.None) + if err != nil { + t.Errorf("BackendServices().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaBackendServices().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaBackendServices().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaBackendServices().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.BackendServices().Delete(ctx, *keyGA); err != nil { + t.Errorf("BackendServices().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaBackendServices().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaBackendServices().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.BackendServices().Delete(ctx, *keyGA); err == nil { + t.Errorf("BackendServices().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestForwardingRulesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.RegionalKey("key-alpha", "location") + key = keyAlpha + keyGA := meta.RegionalKey("key-ga", "location") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaForwardingRules().Get(ctx, *key); err == nil { + t.Errorf("AlphaForwardingRules().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.ForwardingRules().Get(ctx, *key); err == nil { + t.Errorf("ForwardingRules().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.ForwardingRule{} + if err := mock.AlphaForwardingRules().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaForwardingRules().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &ga.ForwardingRule{} + if err := mock.ForwardingRules().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("ForwardingRules().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaForwardingRules().Get(ctx, *key); err != nil { + t.Errorf("AlphaForwardingRules().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.ForwardingRules().Get(ctx, *key); err != nil { + t.Errorf("ForwardingRules().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaForwardingRules.Objects[*keyAlpha] = mock.MockAlphaForwardingRules.Obj(&alpha.ForwardingRule{Name: keyAlpha.Name}) + mock.MockForwardingRules.Objects[*keyGA] = mock.MockForwardingRules.Obj(&ga.ForwardingRule{Name: keyGA.Name}) + want := map[string]bool{ + "key-alpha": true, + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaForwardingRules().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaForwardingRules().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaForwardingRules().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.ForwardingRules().List(ctx, location, filter.None) + if err != nil { + t.Errorf("ForwardingRules().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaForwardingRules().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaForwardingRules().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaForwardingRules().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.ForwardingRules().Delete(ctx, *keyGA); err != nil { + t.Errorf("ForwardingRules().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaForwardingRules().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaForwardingRules().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.ForwardingRules().Delete(ctx, *keyGA); err == nil { + t.Errorf("ForwardingRules().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestRegionsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.Regions().Get(ctx, *key); err == nil { + t.Errorf("Regions().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + + // Get across versions. + + // List. + mock.MockRegions.Objects[*keyGA] = mock.MockRegions.Obj(&ga.Region{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.Regions().List(ctx, filter.None) + if err != nil { + t.Errorf("Regions().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaRegions().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + + // Delete not found. +} + +func TestAddressesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.RegionalKey("key-alpha", "location") + key = keyAlpha + keyBeta := meta.RegionalKey("key-beta", "location") + key = keyBeta + keyGA := meta.RegionalKey("key-ga", "location") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaAddresses().Get(ctx, *key); err == nil { + t.Errorf("AlphaAddresses().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.BetaAddresses().Get(ctx, *key); err == nil { + t.Errorf("BetaAddresses().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.Addresses().Get(ctx, *key); err == nil { + t.Errorf("Addresses().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.Address{} + if err := mock.AlphaAddresses().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &beta.Address{} + if err := mock.BetaAddresses().Insert(ctx, *keyBeta, obj); err != nil { + t.Errorf("BetaAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &ga.Address{} + if err := mock.Addresses().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("Addresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaAddresses().Get(ctx, *key); err != nil { + t.Errorf("AlphaAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.BetaAddresses().Get(ctx, *key); err != nil { + t.Errorf("BetaAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.Addresses().Get(ctx, *key); err != nil { + t.Errorf("Addresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaAddresses.Objects[*keyAlpha] = mock.MockAlphaAddresses.Obj(&alpha.Address{Name: keyAlpha.Name}) + mock.MockBetaAddresses.Objects[*keyBeta] = mock.MockBetaAddresses.Obj(&beta.Address{Name: keyBeta.Name}) + mock.MockAddresses.Objects[*keyGA] = mock.MockAddresses.Obj(&ga.Address{Name: keyGA.Name}) + want := map[string]bool{ + "key-alpha": true, + "key-beta": true, + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaAddresses().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.BetaAddresses().List(ctx, location, filter.None) + if err != nil { + t.Errorf("BetaAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.Addresses().List(ctx, location, filter.None) + if err != nil { + t.Errorf("Addresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaAddresses().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.BetaAddresses().Delete(ctx, *keyBeta); err != nil { + t.Errorf("BetaAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.Addresses().Delete(ctx, *keyGA); err != nil { + t.Errorf("Addresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaAddresses().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaAddresses().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.BetaAddresses().Delete(ctx, *keyBeta); err == nil { + t.Errorf("BetaAddresses().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.Addresses().Delete(ctx, *keyGA); err == nil { + t.Errorf("Addresses().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestRegionBackendServicesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.RegionalKey("key-alpha", "location") + key = keyAlpha + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaRegionBackendServices().Get(ctx, *key); err == nil { + t.Errorf("AlphaRegionBackendServices().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.BackendService{} + if err := mock.AlphaRegionBackendServices().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaRegionBackendServices().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaRegionBackendServices().Get(ctx, *key); err != nil { + t.Errorf("AlphaRegionBackendServices().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaRegionBackendServices.Objects[*keyAlpha] = mock.MockAlphaRegionBackendServices.Obj(&alpha.BackendService{Name: keyAlpha.Name}) + want := map[string]bool{ + "key-alpha": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaRegionBackendServices().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaRegionBackendServices().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaRegionBackendServices().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaRegionBackendServices().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaRegionBackendServices().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaRegionBackendServices().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaRegionBackendServices().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestRegionDisksGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.RegionalKey("key-alpha", "location") + key = keyAlpha + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaRegionDisks().Get(ctx, *key); err == nil { + t.Errorf("AlphaRegionDisks().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.Disk{} + if err := mock.AlphaRegionDisks().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaRegionDisks().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaRegionDisks().Get(ctx, *key); err != nil { + t.Errorf("AlphaRegionDisks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaRegionDisks.Objects[*keyAlpha] = mock.MockAlphaRegionDisks.Obj(&alpha.Disk{Name: keyAlpha.Name}) + want := map[string]bool{ + "key-alpha": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaRegionDisks().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaRegionDisks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaRegionDisks().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaRegionDisks().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaRegionDisks().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaRegionDisks().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaRegionDisks().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestHealthChecksGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.GlobalKey("key-alpha") + key = keyAlpha + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaHealthChecks().Get(ctx, *key); err == nil { + t.Errorf("AlphaHealthChecks().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.HealthChecks().Get(ctx, *key); err == nil { + t.Errorf("HealthChecks().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.HealthCheck{} + if err := mock.AlphaHealthChecks().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaHealthChecks().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &ga.HealthCheck{} + if err := mock.HealthChecks().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("HealthChecks().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaHealthChecks().Get(ctx, *key); err != nil { + t.Errorf("AlphaHealthChecks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.HealthChecks().Get(ctx, *key); err != nil { + t.Errorf("HealthChecks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaHealthChecks.Objects[*keyAlpha] = mock.MockAlphaHealthChecks.Obj(&alpha.HealthCheck{Name: keyAlpha.Name}) + mock.MockHealthChecks.Objects[*keyGA] = mock.MockHealthChecks.Obj(&ga.HealthCheck{Name: keyGA.Name}) + want := map[string]bool{ + "key-alpha": true, + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaHealthChecks().List(ctx, filter.None) + if err != nil { + t.Errorf("AlphaHealthChecks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaHealthChecks().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.HealthChecks().List(ctx, filter.None) + if err != nil { + t.Errorf("HealthChecks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaHealthChecks().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaHealthChecks().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaHealthChecks().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.HealthChecks().Delete(ctx, *keyGA); err != nil { + t.Errorf("HealthChecks().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaHealthChecks().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaHealthChecks().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.HealthChecks().Delete(ctx, *keyGA); err == nil { + t.Errorf("HealthChecks().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestHttpHealthChecksGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.HttpHealthChecks().Get(ctx, *key); err == nil { + t.Errorf("HttpHealthChecks().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.HttpHealthCheck{} + if err := mock.HttpHealthChecks().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("HttpHealthChecks().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.HttpHealthChecks().Get(ctx, *key); err != nil { + t.Errorf("HttpHealthChecks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockHttpHealthChecks.Objects[*keyGA] = mock.MockHttpHealthChecks.Obj(&ga.HttpHealthCheck{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.HttpHealthChecks().List(ctx, filter.None) + if err != nil { + t.Errorf("HttpHealthChecks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaHttpHealthChecks().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.HttpHealthChecks().Delete(ctx, *keyGA); err != nil { + t.Errorf("HttpHealthChecks().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.HttpHealthChecks().Delete(ctx, *keyGA); err == nil { + t.Errorf("HttpHealthChecks().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestHttpsHealthChecksGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.HttpsHealthChecks().Get(ctx, *key); err == nil { + t.Errorf("HttpsHealthChecks().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.HttpsHealthCheck{} + if err := mock.HttpsHealthChecks().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("HttpsHealthChecks().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.HttpsHealthChecks().Get(ctx, *key); err != nil { + t.Errorf("HttpsHealthChecks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockHttpsHealthChecks.Objects[*keyGA] = mock.MockHttpsHealthChecks.Obj(&ga.HttpsHealthCheck{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.HttpsHealthChecks().List(ctx, filter.None) + if err != nil { + t.Errorf("HttpsHealthChecks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaHttpsHealthChecks().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.HttpsHealthChecks().Delete(ctx, *keyGA); err != nil { + t.Errorf("HttpsHealthChecks().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.HttpsHealthChecks().Delete(ctx, *keyGA); err == nil { + t.Errorf("HttpsHealthChecks().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestInstanceGroupsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.ZonalKey("key-ga", "location") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.InstanceGroups().Get(ctx, *key); err == nil { + t.Errorf("InstanceGroups().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.InstanceGroup{} + if err := mock.InstanceGroups().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("InstanceGroups().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.InstanceGroups().Get(ctx, *key); err != nil { + t.Errorf("InstanceGroups().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockInstanceGroups.Objects[*keyGA] = mock.MockInstanceGroups.Obj(&ga.InstanceGroup{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.InstanceGroups().List(ctx, location, filter.None) + if err != nil { + t.Errorf("InstanceGroups().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaInstanceGroups().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.InstanceGroups().Delete(ctx, *keyGA); err != nil { + t.Errorf("InstanceGroups().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.InstanceGroups().Delete(ctx, *keyGA); err == nil { + t.Errorf("InstanceGroups().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestSslCertificatesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.SslCertificates().Get(ctx, *key); err == nil { + t.Errorf("SslCertificates().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.SslCertificate{} + if err := mock.SslCertificates().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("SslCertificates().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.SslCertificates().Get(ctx, *key); err != nil { + t.Errorf("SslCertificates().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockSslCertificates.Objects[*keyGA] = mock.MockSslCertificates.Obj(&ga.SslCertificate{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.SslCertificates().List(ctx, filter.None) + if err != nil { + t.Errorf("SslCertificates().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaSslCertificates().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.SslCertificates().Delete(ctx, *keyGA); err != nil { + t.Errorf("SslCertificates().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.SslCertificates().Delete(ctx, *keyGA); err == nil { + t.Errorf("SslCertificates().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestTargetHttpsProxiesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.TargetHttpsProxies().Get(ctx, *key); err == nil { + t.Errorf("TargetHttpsProxies().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.TargetHttpsProxy{} + if err := mock.TargetHttpsProxies().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("TargetHttpsProxies().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.TargetHttpsProxies().Get(ctx, *key); err != nil { + t.Errorf("TargetHttpsProxies().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockTargetHttpsProxies.Objects[*keyGA] = mock.MockTargetHttpsProxies.Obj(&ga.TargetHttpsProxy{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.TargetHttpsProxies().List(ctx, filter.None) + if err != nil { + t.Errorf("TargetHttpsProxies().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaTargetHttpsProxies().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.TargetHttpsProxies().Delete(ctx, *keyGA); err != nil { + t.Errorf("TargetHttpsProxies().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.TargetHttpsProxies().Delete(ctx, *keyGA); err == nil { + t.Errorf("TargetHttpsProxies().Delete(%v, %v) = nil; want error", ctx, key) + } +} From a0adc1bb19c706ac5d93519c1fa68689c7782720 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Thu, 4 Jan 2018 23:36:12 -0800 Subject: [PATCH 076/264] Special custom code for handling the Projects resource --- .../providers/gce/cloud/gce_projects.go | 95 +++++++++++++++++++ 1 file changed, 95 insertions(+) create mode 100644 pkg/cloudprovider/providers/gce/cloud/gce_projects.go diff --git a/pkg/cloudprovider/providers/gce/cloud/gce_projects.go b/pkg/cloudprovider/providers/gce/cloud/gce_projects.go new file mode 100644 index 00000000000..adc60927afc --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/gce_projects.go @@ -0,0 +1,95 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloud + +import ( + "context" + "fmt" + "net/http" + + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" + compute "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +// ProjectsOps is the manually implemented methods for the Projects service. +type ProjectsOps interface { + Get(ctx context.Context, projectID string) (*compute.Project, error) + SetCommonInstanceMetadata(ctx context.Context, projectID string, m *compute.Metadata) error +} + +// MockProjectOpsState is stored in the mock.X field. +type MockProjectOpsState struct { + metadata map[string]*compute.Metadata +} + +func (m *MockProjects) Get(ctx context.Context, projectID string) (*compute.Project, error) { + m.Lock.Lock() + defer m.Lock.Unlock() + + if p, ok := m.Objects[*meta.GlobalKey(projectID)]; ok { + return p.ToGA(), nil + } + return nil, &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockProjects %v not found", projectID), + } +} + +func (g *GCEProjects) Get(ctx context.Context, projectID string) (*compute.Project, error) { + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "Projects", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Projects.Get(projectID) + call.Context(ctx) + return call.Do() +} + +func (m *MockProjects) SetCommonInstanceMetadata(ctx context.Context, projectID string, meta *compute.Metadata) error { + if m.X == nil { + m.X = &MockProjectOpsState{metadata: map[string]*compute.Metadata{}} + } + state := m.X.(*MockProjectOpsState) + state.metadata[projectID] = meta + return nil +} + +func (g *GCEProjects) SetCommonInstanceMetadata(ctx context.Context, projectID string, m *compute.Metadata) error { + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetCommonInstanceMetadata", + Version: meta.Version("ga"), + Service: "Projects", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.Projects.SetCommonInstanceMetadata(projectID, m) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} From f076f4fa0b0418ee0c99102a9021f1e4b8978b34 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Thu, 4 Jan 2018 23:36:28 -0800 Subject: [PATCH 077/264] Hand written unit test for exercising the mock --- .../providers/gce/cloud/mock_test.go | 150 ++++++++++++++++++ 1 file changed, 150 insertions(+) create mode 100644 pkg/cloudprovider/providers/gce/cloud/mock_test.go diff --git a/pkg/cloudprovider/providers/gce/cloud/mock_test.go b/pkg/cloudprovider/providers/gce/cloud/mock_test.go new file mode 100644 index 00000000000..3d0fb160cc0 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/mock_test.go @@ -0,0 +1,150 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloud + +import ( + "context" + "reflect" + "testing" + + alpha "google.golang.org/api/compute/v0.alpha" + beta "google.golang.org/api/compute/v0.beta" + ga "google.golang.org/api/compute/v1" + + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" +) + +func TestMocks(t *testing.T) { + t.Parallel() + + // This test uses Addresses, but the logic that is generated is the same for + // other basic objects. + const region = "us-central1" + + ctx := context.Background() + mock := NewMockGCE() + + keyAlpha := meta.RegionalKey("key-alpha", region) + keyBeta := meta.RegionalKey("key-beta", region) + keyGA := meta.RegionalKey("key-ga", region) + key := keyAlpha + + // Get not found. + if _, err := mock.AlphaAddresses().Get(ctx, *key); err == nil { + t.Errorf("AlphaAddresses().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.BetaAddresses().Get(ctx, *key); err == nil { + t.Errorf("BetaAddresses().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.Addresses().Get(ctx, *key); err == nil { + t.Errorf("Addresses().Get(%v, %v) = _, nil; want error", ctx, key) + } + // Insert. + { + obj := &alpha.Address{} + if err := mock.AlphaAddresses().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &beta.Address{} + if err := mock.BetaAddresses().Insert(ctx, *keyBeta, obj); err != nil { + t.Errorf("BetaAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &ga.Address{} + if err := mock.Addresses().Insert(ctx, *keyGA, &ga.Address{Name: "ga"}); err != nil { + t.Errorf("Addresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + // Get across versions. + if obj, err := mock.AlphaAddresses().Get(ctx, *key); err != nil { + t.Errorf("AlphaAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.BetaAddresses().Get(ctx, *key); err != nil { + t.Errorf("BetaAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.Addresses().Get(ctx, *key); err != nil { + t.Errorf("Addresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + // List across versions. + want := map[string]bool{"key-alpha": true, "key-beta": true, "key-ga": true} + { + objs, err := mock.AlphaAddresses().List(ctx, region, filter.None) + if err != nil { + t.Errorf("AlphaAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, region, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.BetaAddresses().List(ctx, region, filter.None) + if err != nil { + t.Errorf("BetaAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, region, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.Addresses().List(ctx, region, filter.None) + if err != nil { + t.Errorf("Addresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, region, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) + } + } + } + // Delete across versions. + if err := mock.AlphaAddresses().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.BetaAddresses().Delete(ctx, *keyBeta); err != nil { + t.Errorf("BetaAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.Addresses().Delete(ctx, *keyGA); err != nil { + t.Errorf("Addresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + // Delete not found. + if err := mock.AlphaAddresses().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaAddresses().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.BetaAddresses().Delete(ctx, *keyBeta); err == nil { + t.Errorf("BetaAddresses().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.Addresses().Delete(ctx, *keyGA); err == nil { + t.Errorf("Addresses().Delete(%v, %v) = nil; want error", ctx, key) + } +} From 9a7088555904a3f3a9f61d94292fd8f308563602 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Thu, 4 Jan 2018 23:30:56 -0800 Subject: [PATCH 078/264] BUILD --- pkg/cloudprovider/providers/gce/BUILD | 5 +- pkg/cloudprovider/providers/gce/cloud/BUILD | 63 +++++++++++++++++++ .../providers/gce/cloud/filter/BUILD | 30 +++++++++ .../providers/gce/cloud/gen/BUILD | 33 ++++++++++ .../providers/gce/cloud/meta/BUILD | 41 ++++++++++++ 5 files changed, 171 insertions(+), 1 deletion(-) create mode 100644 pkg/cloudprovider/providers/gce/cloud/BUILD create mode 100644 pkg/cloudprovider/providers/gce/cloud/filter/BUILD create mode 100644 pkg/cloudprovider/providers/gce/cloud/gen/BUILD create mode 100644 pkg/cloudprovider/providers/gce/cloud/meta/BUILD diff --git a/pkg/cloudprovider/providers/gce/BUILD b/pkg/cloudprovider/providers/gce/BUILD index 18205b9fcad..b112c95912c 100644 --- a/pkg/cloudprovider/providers/gce/BUILD +++ b/pkg/cloudprovider/providers/gce/BUILD @@ -125,6 +125,9 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//pkg/cloudprovider/providers/gce/cloud:all-srcs", + ], tags = ["automanaged"], ) diff --git a/pkg/cloudprovider/providers/gce/cloud/BUILD b/pkg/cloudprovider/providers/gce/cloud/BUILD new file mode 100644 index 00000000000..3df8f7a5e97 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/BUILD @@ -0,0 +1,63 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "gce_projects.go", + "gen.go", + "op.go", + "project.go", + "ratelimit.go", + "service.go", + "utils.go", + ], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cloudprovider/providers/gce/cloud/filter:go_default_library", + "//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/google.golang.org/api/compute/v0.alpha:go_default_library", + "//vendor/google.golang.org/api/compute/v0.beta:go_default_library", + "//vendor/google.golang.org/api/compute/v1:go_default_library", + "//vendor/google.golang.org/api/googleapi:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "gen_test.go", + "mock_test.go", + "utils_test.go", + ], + embed = [":go_default_library"], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud", + deps = [ + "//pkg/cloudprovider/providers/gce/cloud/filter:go_default_library", + "//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library", + "//vendor/google.golang.org/api/compute/v0.alpha:go_default_library", + "//vendor/google.golang.org/api/compute/v0.beta:go_default_library", + "//vendor/google.golang.org/api/compute/v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/cloudprovider/providers/gce/cloud/filter:all-srcs", + "//pkg/cloudprovider/providers/gce/cloud/gen:all-srcs", + "//pkg/cloudprovider/providers/gce/cloud/meta:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/cloudprovider/providers/gce/cloud/filter/BUILD b/pkg/cloudprovider/providers/gce/cloud/filter/BUILD new file mode 100644 index 00000000000..c0176ded894 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/filter/BUILD @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["filter.go"], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/golang/glog:go_default_library"], +) + +go_test( + name = "go_default_test", + srcs = ["filter_test.go"], + embed = [":go_default_library"], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/cloudprovider/providers/gce/cloud/gen/BUILD b/pkg/cloudprovider/providers/gce/cloud/gen/BUILD new file mode 100644 index 00000000000..e196daf2ac8 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/gen/BUILD @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") + +go_library( + name = "go_default_library", + srcs = ["main.go"], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen", + visibility = ["//visibility:private"], + deps = [ + "//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + ], +) + +go_binary( + name = "gen", + embed = [":go_default_library"], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/cloudprovider/providers/gce/cloud/meta/BUILD b/pkg/cloudprovider/providers/gce/cloud/meta/BUILD new file mode 100644 index 00000000000..4bcf3b5f5ba --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/meta/BUILD @@ -0,0 +1,41 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "key.go", + "meta.go", + "method.go", + "service.go", + ], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/google.golang.org/api/compute/v0.alpha:go_default_library", + "//vendor/google.golang.org/api/compute/v0.beta:go_default_library", + "//vendor/google.golang.org/api/compute/v1:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["key_test.go"], + embed = [":go_default_library"], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) From 2aaf8b47b2bea38aa9f0c6082a087fd80960a272 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Fri, 5 Jan 2018 11:54:42 -0800 Subject: [PATCH 079/264] Clean up documentation. --- pkg/cloudprovider/providers/gce/cloud/doc.go | 1 + .../providers/gce/cloud/gce_projects.go | 6 +- pkg/cloudprovider/providers/gce/cloud/gen.go | 95 +++++++++++++++++++ .../providers/gce/cloud/gen/main.go | 4 +- .../providers/gce/cloud/meta/method.go | 11 ++- .../providers/gce/cloud/meta/service.go | 4 + .../providers/gce/cloud/project.go | 1 + .../providers/gce/cloud/ratelimit.go | 1 + 8 files changed, 120 insertions(+), 3 deletions(-) diff --git a/pkg/cloudprovider/providers/gce/cloud/doc.go b/pkg/cloudprovider/providers/gce/cloud/doc.go index d0d7a6cfb19..a6b121457cd 100644 --- a/pkg/cloudprovider/providers/gce/cloud/doc.go +++ b/pkg/cloudprovider/providers/gce/cloud/doc.go @@ -60,6 +60,7 @@ limitations under the License. // &ServiceInfo{ // Object: "InstanceGroup", // Name of the object type. // Service: "InstanceGroups", // Name of the service. +// Resource: "instanceGroups", // Lowercase resource name (as appears in the URL). // version: meta.VersionAlpha, // API version (one entry per version is needed). // keyType: Zonal, // What kind of resource this is. // serviceType: reflect.TypeOf(&alpha.InstanceGroupsService{}), // Associated golang type. diff --git a/pkg/cloudprovider/providers/gce/cloud/gce_projects.go b/pkg/cloudprovider/providers/gce/cloud/gce_projects.go index adc60927afc..c531881a94a 100644 --- a/pkg/cloudprovider/providers/gce/cloud/gce_projects.go +++ b/pkg/cloudprovider/providers/gce/cloud/gce_projects.go @@ -21,9 +21,9 @@ import ( "fmt" "net/http" - "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" compute "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" ) // ProjectsOps is the manually implemented methods for the Projects service. @@ -37,6 +37,7 @@ type MockProjectOpsState struct { metadata map[string]*compute.Metadata } +// Get a project by projectID. func (m *MockProjects) Get(ctx context.Context, projectID string) (*compute.Project, error) { m.Lock.Lock() defer m.Lock.Unlock() @@ -50,6 +51,7 @@ func (m *MockProjects) Get(ctx context.Context, projectID string) (*compute.Proj } } +// Get a project by projectID. func (g *GCEProjects) Get(ctx context.Context, projectID string) (*compute.Project, error) { rk := &RateLimitKey{ ProjectID: projectID, @@ -65,6 +67,7 @@ func (g *GCEProjects) Get(ctx context.Context, projectID string) (*compute.Proje return call.Do() } +// SetCommonInstanceMetadata for a given project. func (m *MockProjects) SetCommonInstanceMetadata(ctx context.Context, projectID string, meta *compute.Metadata) error { if m.X == nil { m.X = &MockProjectOpsState{metadata: map[string]*compute.Metadata{}} @@ -74,6 +77,7 @@ func (m *MockProjects) SetCommonInstanceMetadata(ctx context.Context, projectID return nil } +// SetCommonInstanceMetadata for a given project. func (g *GCEProjects) SetCommonInstanceMetadata(ctx context.Context, projectID string, m *compute.Metadata) error { rk := &RateLimitKey{ ProjectID: projectID, diff --git a/pkg/cloudprovider/providers/gce/cloud/gen.go b/pkg/cloudprovider/providers/gce/cloud/gen.go index ef7a2c62eaf..33a2b7ba619 100644 --- a/pkg/cloudprovider/providers/gce/cloud/gen.go +++ b/pkg/cloudprovider/providers/gce/cloud/gen.go @@ -150,99 +150,162 @@ type GCE struct { gceZones *GCEZones } +// Addresses returns the interface for the ga Addresses. func (gce *GCE) Addresses() Addresses { return gce.gceAddresses } + +// AlphaAddresses returns the interface for the alpha Addresses. func (gce *GCE) AlphaAddresses() AlphaAddresses { return gce.gceAlphaAddresses } + +// BetaAddresses returns the interface for the beta Addresses. func (gce *GCE) BetaAddresses() BetaAddresses { return gce.gceBetaAddresses } + +// GlobalAddresses returns the interface for the ga GlobalAddresses. func (gce *GCE) GlobalAddresses() GlobalAddresses { return gce.gceGlobalAddresses } + +// BackendServices returns the interface for the ga BackendServices. func (gce *GCE) BackendServices() BackendServices { return gce.gceBackendServices } + +// AlphaBackendServices returns the interface for the alpha BackendServices. func (gce *GCE) AlphaBackendServices() AlphaBackendServices { return gce.gceAlphaBackendServices } + +// AlphaRegionBackendServices returns the interface for the alpha RegionBackendServices. func (gce *GCE) AlphaRegionBackendServices() AlphaRegionBackendServices { return gce.gceAlphaRegionBackendServices } + +// Disks returns the interface for the ga Disks. func (gce *GCE) Disks() Disks { return gce.gceDisks } + +// AlphaDisks returns the interface for the alpha Disks. func (gce *GCE) AlphaDisks() AlphaDisks { return gce.gceAlphaDisks } + +// AlphaRegionDisks returns the interface for the alpha RegionDisks. func (gce *GCE) AlphaRegionDisks() AlphaRegionDisks { return gce.gceAlphaRegionDisks } + +// Firewalls returns the interface for the ga Firewalls. func (gce *GCE) Firewalls() Firewalls { return gce.gceFirewalls } + +// ForwardingRules returns the interface for the ga ForwardingRules. func (gce *GCE) ForwardingRules() ForwardingRules { return gce.gceForwardingRules } + +// AlphaForwardingRules returns the interface for the alpha ForwardingRules. func (gce *GCE) AlphaForwardingRules() AlphaForwardingRules { return gce.gceAlphaForwardingRules } + +// GlobalForwardingRules returns the interface for the ga GlobalForwardingRules. func (gce *GCE) GlobalForwardingRules() GlobalForwardingRules { return gce.gceGlobalForwardingRules } + +// HealthChecks returns the interface for the ga HealthChecks. func (gce *GCE) HealthChecks() HealthChecks { return gce.gceHealthChecks } + +// AlphaHealthChecks returns the interface for the alpha HealthChecks. func (gce *GCE) AlphaHealthChecks() AlphaHealthChecks { return gce.gceAlphaHealthChecks } + +// HttpHealthChecks returns the interface for the ga HttpHealthChecks. func (gce *GCE) HttpHealthChecks() HttpHealthChecks { return gce.gceHttpHealthChecks } + +// HttpsHealthChecks returns the interface for the ga HttpsHealthChecks. func (gce *GCE) HttpsHealthChecks() HttpsHealthChecks { return gce.gceHttpsHealthChecks } + +// InstanceGroups returns the interface for the ga InstanceGroups. func (gce *GCE) InstanceGroups() InstanceGroups { return gce.gceInstanceGroups } + +// Instances returns the interface for the ga Instances. func (gce *GCE) Instances() Instances { return gce.gceInstances } + +// BetaInstances returns the interface for the beta Instances. func (gce *GCE) BetaInstances() BetaInstances { return gce.gceBetaInstances } + +// AlphaInstances returns the interface for the alpha Instances. func (gce *GCE) AlphaInstances() AlphaInstances { return gce.gceAlphaInstances } + +// AlphaNetworkEndpointGroups returns the interface for the alpha NetworkEndpointGroups. func (gce *GCE) AlphaNetworkEndpointGroups() AlphaNetworkEndpointGroups { return gce.gceAlphaNetworkEndpointGroups } + +// Projects returns the interface for the ga Projects. func (gce *GCE) Projects() Projects { return gce.gceProjects } + +// Regions returns the interface for the ga Regions. func (gce *GCE) Regions() Regions { return gce.gceRegions } + +// Routes returns the interface for the ga Routes. func (gce *GCE) Routes() Routes { return gce.gceRoutes } + +// SslCertificates returns the interface for the ga SslCertificates. func (gce *GCE) SslCertificates() SslCertificates { return gce.gceSslCertificates } + +// TargetHttpProxies returns the interface for the ga TargetHttpProxies. func (gce *GCE) TargetHttpProxies() TargetHttpProxies { return gce.gceTargetHttpProxies } + +// TargetHttpsProxies returns the interface for the ga TargetHttpsProxies. func (gce *GCE) TargetHttpsProxies() TargetHttpsProxies { return gce.gceTargetHttpsProxies } + +// TargetPools returns the interface for the ga TargetPools. func (gce *GCE) TargetPools() TargetPools { return gce.gceTargetPools } + +// UrlMaps returns the interface for the ga UrlMaps. func (gce *GCE) UrlMaps() UrlMaps { return gce.gceUrlMaps } + +// Zones returns the interface for the ga Zones. func (gce *GCE) Zones() Zones { return gce.gceZones } @@ -350,130 +413,162 @@ type MockGCE struct { MockZones *MockZones } +// Addresses returns the interface for the ga Addresses. func (mock *MockGCE) Addresses() Addresses { return mock.MockAddresses } +// AlphaAddresses returns the interface for the alpha Addresses. func (mock *MockGCE) AlphaAddresses() AlphaAddresses { return mock.MockAlphaAddresses } +// BetaAddresses returns the interface for the beta Addresses. func (mock *MockGCE) BetaAddresses() BetaAddresses { return mock.MockBetaAddresses } +// GlobalAddresses returns the interface for the ga GlobalAddresses. func (mock *MockGCE) GlobalAddresses() GlobalAddresses { return mock.MockGlobalAddresses } +// BackendServices returns the interface for the ga BackendServices. func (mock *MockGCE) BackendServices() BackendServices { return mock.MockBackendServices } +// AlphaBackendServices returns the interface for the alpha BackendServices. func (mock *MockGCE) AlphaBackendServices() AlphaBackendServices { return mock.MockAlphaBackendServices } +// AlphaRegionBackendServices returns the interface for the alpha RegionBackendServices. func (mock *MockGCE) AlphaRegionBackendServices() AlphaRegionBackendServices { return mock.MockAlphaRegionBackendServices } +// Disks returns the interface for the ga Disks. func (mock *MockGCE) Disks() Disks { return mock.MockDisks } +// AlphaDisks returns the interface for the alpha Disks. func (mock *MockGCE) AlphaDisks() AlphaDisks { return mock.MockAlphaDisks } +// AlphaRegionDisks returns the interface for the alpha RegionDisks. func (mock *MockGCE) AlphaRegionDisks() AlphaRegionDisks { return mock.MockAlphaRegionDisks } +// Firewalls returns the interface for the ga Firewalls. func (mock *MockGCE) Firewalls() Firewalls { return mock.MockFirewalls } +// ForwardingRules returns the interface for the ga ForwardingRules. func (mock *MockGCE) ForwardingRules() ForwardingRules { return mock.MockForwardingRules } +// AlphaForwardingRules returns the interface for the alpha ForwardingRules. func (mock *MockGCE) AlphaForwardingRules() AlphaForwardingRules { return mock.MockAlphaForwardingRules } +// GlobalForwardingRules returns the interface for the ga GlobalForwardingRules. func (mock *MockGCE) GlobalForwardingRules() GlobalForwardingRules { return mock.MockGlobalForwardingRules } +// HealthChecks returns the interface for the ga HealthChecks. func (mock *MockGCE) HealthChecks() HealthChecks { return mock.MockHealthChecks } +// AlphaHealthChecks returns the interface for the alpha HealthChecks. func (mock *MockGCE) AlphaHealthChecks() AlphaHealthChecks { return mock.MockAlphaHealthChecks } +// HttpHealthChecks returns the interface for the ga HttpHealthChecks. func (mock *MockGCE) HttpHealthChecks() HttpHealthChecks { return mock.MockHttpHealthChecks } +// HttpsHealthChecks returns the interface for the ga HttpsHealthChecks. func (mock *MockGCE) HttpsHealthChecks() HttpsHealthChecks { return mock.MockHttpsHealthChecks } +// InstanceGroups returns the interface for the ga InstanceGroups. func (mock *MockGCE) InstanceGroups() InstanceGroups { return mock.MockInstanceGroups } +// Instances returns the interface for the ga Instances. func (mock *MockGCE) Instances() Instances { return mock.MockInstances } +// BetaInstances returns the interface for the beta Instances. func (mock *MockGCE) BetaInstances() BetaInstances { return mock.MockBetaInstances } +// AlphaInstances returns the interface for the alpha Instances. func (mock *MockGCE) AlphaInstances() AlphaInstances { return mock.MockAlphaInstances } +// AlphaNetworkEndpointGroups returns the interface for the alpha NetworkEndpointGroups. func (mock *MockGCE) AlphaNetworkEndpointGroups() AlphaNetworkEndpointGroups { return mock.MockAlphaNetworkEndpointGroups } +// Projects returns the interface for the ga Projects. func (mock *MockGCE) Projects() Projects { return mock.MockProjects } +// Regions returns the interface for the ga Regions. func (mock *MockGCE) Regions() Regions { return mock.MockRegions } +// Routes returns the interface for the ga Routes. func (mock *MockGCE) Routes() Routes { return mock.MockRoutes } +// SslCertificates returns the interface for the ga SslCertificates. func (mock *MockGCE) SslCertificates() SslCertificates { return mock.MockSslCertificates } +// TargetHttpProxies returns the interface for the ga TargetHttpProxies. func (mock *MockGCE) TargetHttpProxies() TargetHttpProxies { return mock.MockTargetHttpProxies } +// TargetHttpsProxies returns the interface for the ga TargetHttpsProxies. func (mock *MockGCE) TargetHttpsProxies() TargetHttpsProxies { return mock.MockTargetHttpsProxies } +// TargetPools returns the interface for the ga TargetPools. func (mock *MockGCE) TargetPools() TargetPools { return mock.MockTargetPools } +// UrlMaps returns the interface for the ga UrlMaps. func (mock *MockGCE) UrlMaps() UrlMaps { return mock.MockUrlMaps } +// Zones returns the interface for the ga Zones. func (mock *MockGCE) Zones() Zones { return mock.MockZones } diff --git a/pkg/cloudprovider/providers/gce/cloud/gen/main.go b/pkg/cloudprovider/providers/gce/cloud/gen/main.go index f8dcd730b9f..7217d35c5d6 100644 --- a/pkg/cloudprovider/providers/gce/cloud/gen/main.go +++ b/pkg/cloudprovider/providers/gce/cloud/gen/main.go @@ -30,8 +30,8 @@ import ( "text/template" "time" - "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" "github.com/golang/glog" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" ) const ( @@ -167,6 +167,7 @@ type GCE struct { } {{range .All}} +// {{.WrapType}} returns the interface for the {{.Version}} {{.Service}}. func (gce *GCE) {{.WrapType}}() {{.WrapType}} { return gce.{{.Field}} } @@ -196,6 +197,7 @@ type MockGCE struct { {{- end}} } {{range .All}} +// {{.WrapType}} returns the interface for the {{.Version}} {{.Service}}. func (mock *MockGCE) {{.WrapType}}() {{.WrapType}} { return mock.{{.MockField}} } diff --git a/pkg/cloudprovider/providers/gce/cloud/meta/method.go b/pkg/cloudprovider/providers/gce/cloud/meta/method.go index 5adf065fae4..c3a33d801d3 100644 --- a/pkg/cloudprovider/providers/gce/cloud/meta/method.go +++ b/pkg/cloudprovider/providers/gce/cloud/meta/method.go @@ -91,7 +91,7 @@ func newMethod(s *ServiceInfo, m reflect.Method) *Method { return ret } -// Method is used to generate the calling code non-standard methods. +// Method is used to generate the calling code for non-standard methods. type Method struct { *ServiceInfo m reflect.Method @@ -135,6 +135,7 @@ func (mr *Method) args(skip int, nameArgs bool, prefix []string) []string { return append(prefix, a...) } +// init the method, preforming some rudimentary static checking. func (mr *Method) init() { fType := mr.m.Func.Type() if fType.NumIn() < mr.argsSkip() { @@ -189,10 +190,14 @@ func (mr *Method) init() { } } +// Name is the name of the method. func (mr *Method) Name() string { return mr.m.Name } +// CallArgs is a list of comma separated "argN" used for calling the method. +// For example, if the method has two additional arguments, this will return +// "arg0, arg1". func (mr *Method) CallArgs() string { var args []string for i := mr.argsSkip(); i < mr.m.Func.Type().NumIn(); i++ { @@ -204,10 +209,12 @@ func (mr *Method) CallArgs() string { return fmt.Sprintf(", %s", strings.Join(args, ", ")) } +// MockHookName is the name of the hook function in the mock. func (mr *Method) MockHookName() string { return mr.m.Name + "Hook" } +// MockHook is the definition of the hook function. func (mr *Method) MockHook() string { args := mr.args(mr.argsSkip(), false, []string{ fmt.Sprintf("*%s", mr.MockWrapType()), @@ -220,6 +227,7 @@ func (mr *Method) MockHook() string { return fmt.Sprintf("%v func(%v) (*%v.%v, error)", mr.MockHookName(), strings.Join(args, ", "), mr.Version(), mr.ReturnType) } +// FcnArgs is the function signature for the definition of the method. func (mr *Method) FcnArgs() string { args := mr.args(mr.argsSkip(), true, []string{ "ctx context.Context", @@ -232,6 +240,7 @@ func (mr *Method) FcnArgs() string { return fmt.Sprintf("%v(%v) (*%v.%v, error)", mr.m.Name, strings.Join(args, ", "), mr.Version(), mr.ReturnType) } +// InterfaceFunc is the function declaration of the method in the interface. func (mr *Method) InterfaceFunc() string { args := mr.args(mr.argsSkip(), false, []string{"context.Context", "meta.Key"}) if mr.ReturnType == "Operation" { diff --git a/pkg/cloudprovider/providers/gce/cloud/meta/service.go b/pkg/cloudprovider/providers/gce/cloud/meta/service.go index ffa3385075b..b2ba91c8ec5 100644 --- a/pkg/cloudprovider/providers/gce/cloud/meta/service.go +++ b/pkg/cloudprovider/providers/gce/cloud/meta/service.go @@ -220,6 +220,7 @@ type ServiceGroup struct { GA *ServiceInfo } +// Service returns any ServiceInfo object belonging to the ServiceGroup. func (sg *ServiceGroup) Service() string { switch { case sg.GA != nil: @@ -233,14 +234,17 @@ func (sg *ServiceGroup) Service() string { } } +// HasGA returns true if this object has a GA representation. func (sg *ServiceGroup) HasGA() bool { return sg.GA != nil } +// HasAlpha returns true if this object has a Alpha representation. func (sg *ServiceGroup) HasAlpha() bool { return sg.Alpha != nil } +// HasBeta returns true if this object has a Beta representation. func (sg *ServiceGroup) HasBeta() bool { return sg.Beta != nil } diff --git a/pkg/cloudprovider/providers/gce/cloud/project.go b/pkg/cloudprovider/providers/gce/cloud/project.go index 74299e4a23e..231e7cf916a 100644 --- a/pkg/cloudprovider/providers/gce/cloud/project.go +++ b/pkg/cloudprovider/providers/gce/cloud/project.go @@ -39,6 +39,7 @@ type SingleProjectRouter struct { ID string } +// ProjectID returns the project ID to be used for a call to the API. func (r *SingleProjectRouter) ProjectID(ctx context.Context, version meta.Version, service string) string { return r.ID } diff --git a/pkg/cloudprovider/providers/gce/cloud/ratelimit.go b/pkg/cloudprovider/providers/gce/cloud/ratelimit.go index 948f1d36d89..e38b8f7de3c 100644 --- a/pkg/cloudprovider/providers/gce/cloud/ratelimit.go +++ b/pkg/cloudprovider/providers/gce/cloud/ratelimit.go @@ -51,6 +51,7 @@ type RateLimiter interface { type NopRateLimiter struct { } +// Accept the operation to be rate limited. func (*NopRateLimiter) Accept(ctx context.Context, key *RateLimitKey) error { // Rate limit polling of the Operation status to avoid hammering GCE // for the status of an operation. From adaaed102835e02957d1f3c951ca654cea1a432d Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Fri, 5 Jan 2018 11:55:58 -0800 Subject: [PATCH 080/264] Ignore golint failures for bad compute API names --- hack/.golint_failures | 1 + 1 file changed, 1 insertion(+) diff --git a/hack/.golint_failures b/hack/.golint_failures index c7cd6939add..92f25955ec8 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -87,6 +87,7 @@ pkg/cloudprovider pkg/cloudprovider/providers/aws pkg/cloudprovider/providers/fake pkg/cloudprovider/providers/gce +pkg/cloudprovider/providers/gce/cloud pkg/cloudprovider/providers/openstack pkg/cloudprovider/providers/ovirt pkg/cloudprovider/providers/photon From c3e23b1b145455b521fb2d15c5ccf06aac520fb6 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Fri, 5 Jan 2018 13:36:21 -0800 Subject: [PATCH 081/264] Fix gofmt --- .../providers/gce/cloud/meta/meta.go | 72 +++++++++---------- .../providers/gce/cloud/utils_test.go | 4 +- 2 files changed, 38 insertions(+), 38 deletions(-) diff --git a/pkg/cloudprovider/providers/gce/cloud/meta/meta.go b/pkg/cloudprovider/providers/gce/cloud/meta/meta.go index 3f60c00f412..e1f36904d01 100644 --- a/pkg/cloudprovider/providers/gce/cloud/meta/meta.go +++ b/pkg/cloudprovider/providers/gce/cloud/meta/meta.go @@ -65,14 +65,14 @@ var AllVersions = []Version{ // AllServices are a list of all the services to generate code for. Keep // this list in lexiographical order by object type. var AllServices = []*ServiceInfo{ - &ServiceInfo{ + { Object: "Address", Service: "Addresses", Resource: "addresses", keyType: Regional, serviceType: reflect.TypeOf(&ga.AddressesService{}), }, - &ServiceInfo{ + { Object: "Address", Service: "Addresses", Resource: "addresses", @@ -80,7 +80,7 @@ var AllServices = []*ServiceInfo{ keyType: Regional, serviceType: reflect.TypeOf(&alpha.AddressesService{}), }, - &ServiceInfo{ + { Object: "Address", Service: "Addresses", Resource: "addresses", @@ -88,14 +88,14 @@ var AllServices = []*ServiceInfo{ keyType: Regional, serviceType: reflect.TypeOf(&beta.AddressesService{}), }, - &ServiceInfo{ + { Object: "Address", Service: "GlobalAddresses", Resource: "addresses", keyType: Global, serviceType: reflect.TypeOf(&ga.GlobalAddressesService{}), }, - &ServiceInfo{ + { Object: "BackendService", Service: "BackendServices", Resource: "backendServices", @@ -106,16 +106,16 @@ var AllServices = []*ServiceInfo{ "Update", }, }, - &ServiceInfo{ + { Object: "BackendService", Service: "BackendServices", - Resource: "backendServices", + Resource: "backendServices", version: VersionAlpha, keyType: Global, serviceType: reflect.TypeOf(&alpha.BackendServicesService{}), additionalMethods: []string{"Update"}, }, - &ServiceInfo{ + { Object: "BackendService", Service: "RegionBackendServices", Resource: "backendServices", @@ -127,14 +127,14 @@ var AllServices = []*ServiceInfo{ "Update", }, }, - &ServiceInfo{ + { Object: "Disk", Service: "Disks", Resource: "disks", keyType: Zonal, serviceType: reflect.TypeOf(&ga.DisksService{}), }, - &ServiceInfo{ + { Object: "Disk", Service: "Disks", Resource: "disks", @@ -142,7 +142,7 @@ var AllServices = []*ServiceInfo{ keyType: Zonal, serviceType: reflect.TypeOf(&alpha.DisksService{}), }, - &ServiceInfo{ + { Object: "Disk", Service: "RegionDisks", Resource: "disks", @@ -150,7 +150,7 @@ var AllServices = []*ServiceInfo{ keyType: Regional, serviceType: reflect.TypeOf(&alpha.DisksService{}), }, - &ServiceInfo{ + { Object: "Firewall", Service: "Firewalls", Resource: "firewalls", @@ -160,14 +160,14 @@ var AllServices = []*ServiceInfo{ "Update", }, }, - &ServiceInfo{ + { Object: "ForwardingRule", Service: "ForwardingRules", Resource: "forwardingRules", keyType: Regional, serviceType: reflect.TypeOf(&ga.ForwardingRulesService{}), }, - &ServiceInfo{ + { Object: "ForwardingRule", Service: "ForwardingRules", Resource: "forwardingRules", @@ -175,7 +175,7 @@ var AllServices = []*ServiceInfo{ keyType: Regional, serviceType: reflect.TypeOf(&alpha.ForwardingRulesService{}), }, - &ServiceInfo{ + { Object: "ForwardingRule", Service: "GlobalForwardingRules", Resource: "forwardingRules", @@ -185,7 +185,7 @@ var AllServices = []*ServiceInfo{ "SetTarget", }, }, - &ServiceInfo{ + { Object: "HealthCheck", Service: "HealthChecks", Resource: "healthChecks", @@ -195,7 +195,7 @@ var AllServices = []*ServiceInfo{ "Update", }, }, - &ServiceInfo{ + { Object: "HealthCheck", Service: "HealthChecks", Resource: "healthChecks", @@ -206,7 +206,7 @@ var AllServices = []*ServiceInfo{ "Update", }, }, - &ServiceInfo{ + { Object: "HttpHealthCheck", Service: "HttpHealthChecks", Resource: "httpHealthChecks", @@ -216,7 +216,7 @@ var AllServices = []*ServiceInfo{ "Update", }, }, - &ServiceInfo{ + { Object: "HttpsHealthCheck", Service: "HttpsHealthChecks", Resource: "httpsHealthChecks", @@ -226,7 +226,7 @@ var AllServices = []*ServiceInfo{ "Update", }, }, - &ServiceInfo{ + { Object: "InstanceGroup", Service: "InstanceGroups", Resource: "instanceGroups", @@ -239,7 +239,7 @@ var AllServices = []*ServiceInfo{ "SetNamedPorts", }, }, - &ServiceInfo{ + { Object: "Instance", Service: "Instances", Resource: "instances", @@ -250,7 +250,7 @@ var AllServices = []*ServiceInfo{ "DetachDisk", }, }, - &ServiceInfo{ + { Object: "Instance", Service: "Instances", Resource: "instances", @@ -262,7 +262,7 @@ var AllServices = []*ServiceInfo{ "DetachDisk", }, }, - &ServiceInfo{ + { Object: "Instance", Service: "Instances", Resource: "instances", @@ -275,7 +275,7 @@ var AllServices = []*ServiceInfo{ "UpdateNetworkInterface", }, }, - &ServiceInfo{ + { Object: "NetworkEndpointGroup", Service: "NetworkEndpointGroups", Resource: "networkEndpointGroups", @@ -288,16 +288,16 @@ var AllServices = []*ServiceInfo{ }, options: AggregatedList, }, - &ServiceInfo{ - Object: "Project", - Service: "Projects", + { + Object: "Project", + Service: "Projects", Resource: "projects", - keyType: Global, + keyType: Global, // Generate only the stub with no methods. options: NoGet | NoList | NoInsert | NoDelete | CustomOps, serviceType: reflect.TypeOf(&ga.ProjectsService{}), }, - &ServiceInfo{ + { Object: "Region", Service: "Regions", Resource: "regions", @@ -305,21 +305,21 @@ var AllServices = []*ServiceInfo{ options: ReadOnly, serviceType: reflect.TypeOf(&ga.RegionsService{}), }, - &ServiceInfo{ + { Object: "Route", Service: "Routes", Resource: "routes", keyType: Global, serviceType: reflect.TypeOf(&ga.RoutesService{}), }, - &ServiceInfo{ + { Object: "SslCertificate", Service: "SslCertificates", Resource: "sslCertificates", keyType: Global, serviceType: reflect.TypeOf(&ga.SslCertificatesService{}), }, - &ServiceInfo{ + { Object: "TargetHttpProxy", Service: "TargetHttpProxies", Resource: "targetHttpProxies", @@ -329,7 +329,7 @@ var AllServices = []*ServiceInfo{ "SetUrlMap", }, }, - &ServiceInfo{ + { Object: "TargetHttpsProxy", Service: "TargetHttpsProxies", Resource: "targetHttpsProxies", @@ -340,7 +340,7 @@ var AllServices = []*ServiceInfo{ "SetUrlMap", }, }, - &ServiceInfo{ + { Object: "TargetPool", Service: "TargetPools", Resource: "targetPools", @@ -351,7 +351,7 @@ var AllServices = []*ServiceInfo{ "RemoveInstance", }, }, - &ServiceInfo{ + { Object: "UrlMap", Service: "UrlMaps", Resource: "urlMaps", @@ -361,7 +361,7 @@ var AllServices = []*ServiceInfo{ "Update", }, }, - &ServiceInfo{ + { Object: "Zone", Service: "Zones", Resource: "zones", diff --git a/pkg/cloudprovider/providers/gce/cloud/utils_test.go b/pkg/cloudprovider/providers/gce/cloud/utils_test.go index 823c8e73c88..562d0f35ba7 100644 --- a/pkg/cloudprovider/providers/gce/cloud/utils_test.go +++ b/pkg/cloudprovider/providers/gce/cloud/utils_test.go @@ -161,7 +161,7 @@ func TestCopyVisJSON(t *testing.T) { func TestSelfLink(t *testing.T) { t.Parallel() - for _, tc := range []struct{ + for _, tc := range []struct { ver meta.Version project string resource string @@ -189,7 +189,7 @@ func TestSelfLink(t *testing.T) { *meta.GlobalKey("key3"), "https://www.googleapis.com/compute/v1/projects/proj4/urlMaps/key3", }, - }{ + } { if link := SelfLink(tc.ver, tc.project, tc.resource, tc.key); link != tc.want { t.Errorf("SelfLink(%v, %q, %q, %v) = %v, want %q", tc.ver, tc.project, tc.resource, tc.key, link, tc.want) } From 5abf80718e3d5591204905c2b2b6d178f9b12104 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Tue, 9 Jan 2018 16:57:15 -0800 Subject: [PATCH 082/264] Remove glog dependency in the generator --- pkg/cloudprovider/providers/gce/cloud/gen/main.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/cloudprovider/providers/gce/cloud/gen/main.go b/pkg/cloudprovider/providers/gce/cloud/gen/main.go index 7217d35c5d6..ee48b374362 100644 --- a/pkg/cloudprovider/providers/gce/cloud/gen/main.go +++ b/pkg/cloudprovider/providers/gce/cloud/gen/main.go @@ -25,12 +25,12 @@ import ( "flag" "fmt" "io" + "log" "os" "os/exec" "text/template" "time" - "github.com/golang/glog" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" ) @@ -1131,7 +1131,7 @@ func main() { genUnitTestHeader(out) genUnitTestServices(out) default: - glog.Fatalf("Invalid -mode: %q", flags.mode) + log.Fatalf("Invalid -mode: %q", flags.mode) } if flags.gofmt { From e609cda0d2ee6e69ed31740349aaad85edf89ec9 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Tue, 9 Jan 2018 17:07:10 -0800 Subject: [PATCH 083/264] hack/ scripts to keep the generated code in sync --- hack/update-cloudprovider-gce.sh | 36 +++++++++++++++++++++++ hack/verify-cloudprovider-gce.sh | 50 ++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+) create mode 100755 hack/update-cloudprovider-gce.sh create mode 100755 hack/verify-cloudprovider-gce.sh diff --git a/hack/update-cloudprovider-gce.sh b/hack/update-cloudprovider-gce.sh new file mode 100755 index 00000000000..b7d606c95b4 --- /dev/null +++ b/hack/update-cloudprovider-gce.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +source "${KUBE_ROOT}/hack/lib/init.sh" +GENERATOR="${KUBE_ROOT}/pkg/cloudprovider/providers/gce/cloud/gen/main.go" + +GEN_GO="${KUBE_ROOT}/pkg/cloudprovider/providers/gce/cloud/gen.go" +GEN_TEST_GO="${KUBE_ROOT}/pkg/cloudprovider/providers/gce/cloud/gen_test.go" + +kube::golang::setup_env + +TMPFILE=$(mktemp verify-cloudprovider-gce-XXXX) +trap "{ rm -f ${TMPFILE}; }" EXIT + +go run "${GENERATOR}" > ${TMPFILE} +mv "${TMPFILE}" "${GEN_GO}" +go run "${GENERATOR}" -mode test > ${TMPFILE} +mv "${TMPFILE}" "${GEN_TEST_GO}" + +exit 0 diff --git a/hack/verify-cloudprovider-gce.sh b/hack/verify-cloudprovider-gce.sh new file mode 100755 index 00000000000..c7615d36592 --- /dev/null +++ b/hack/verify-cloudprovider-gce.sh @@ -0,0 +1,50 @@ +#!/bin/bash +#!/bin/bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +source "${KUBE_ROOT}/hack/lib/init.sh" +GENERATOR="${KUBE_ROOT}/pkg/cloudprovider/providers/gce/cloud/gen/main.go" + +GEN_GO="${KUBE_ROOT}/pkg/cloudprovider/providers/gce/cloud/gen.go" +GEN_TEST_GO="${KUBE_ROOT}/pkg/cloudprovider/providers/gce/cloud/gen_test.go" + +kube::golang::setup_env + +TMPFILE=$(mktemp verify-cloudprovider-gce-XXXX) +trap "{ rm -f ${TMPFILE}; }" EXIT + +go run "${GENERATOR}" > ${TMPFILE} +if ! diff "${TMPFILE}" "${GEN_GO}"; then + echo "Generated file ${GEN_GO} needs to be updated (run hack/update-cloudprovider-gce.sh)" + echo + diff -u "${TMPFILE}" "${GEN_GO}" || true + exit 1 +fi + +go run "${GENERATOR}" -mode test > ${TMPFILE} +if ! diff "${TMPFILE}" "${GEN_TEST_GO}"; then + echo "Generated file ${GEN_TEST_GO} needs to be updated (run hack/update-cloudprovider-gce.sh)" + echo + diff -u "${TMPFILE}" "${GEN_TEST_GO}" || true + exit 1 +fi + +exit 0 From 8cdfe362671dc6a7b93f8bfec0e813ee66f0604b Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Tue, 9 Jan 2018 17:07:22 -0800 Subject: [PATCH 084/264] Update generated code to stable order --- .../providers/gce/cloud/gen/main.go | 10 +- .../providers/gce/cloud/gen_test.go | 1928 ++++++++--------- 2 files changed, 973 insertions(+), 965 deletions(-) diff --git a/pkg/cloudprovider/providers/gce/cloud/gen/main.go b/pkg/cloudprovider/providers/gce/cloud/gen/main.go index ee48b374362..ba0dd9cc2f0 100644 --- a/pkg/cloudprovider/providers/gce/cloud/gen/main.go +++ b/pkg/cloudprovider/providers/gce/cloud/gen/main.go @@ -28,6 +28,7 @@ import ( "log" "os" "os/exec" + "sort" "text/template" "time" @@ -1110,7 +1111,14 @@ func Test{{.Service}}Group(t *testing.T) { } ` tmpl := template.Must(template.New("unittest").Parse(text)) - for _, s := range meta.AllServicesByGroup { + // Sort keys so the output will be stable. + var keys []string + for k, _ := range meta.AllServicesByGroup { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + s := meta.AllServicesByGroup[k] if err := tmpl.Execute(wr, s); err != nil { panic(err) } diff --git a/pkg/cloudprovider/providers/gce/cloud/gen_test.go b/pkg/cloudprovider/providers/gce/cloud/gen_test.go index cbe5d9938d3..ee7cb103753 100644 --- a/pkg/cloudprovider/providers/gce/cloud/gen_test.go +++ b/pkg/cloudprovider/providers/gce/cloud/gen_test.go @@ -34,6 +34,238 @@ import ( const location = "location" +func TestAddressesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.RegionalKey("key-alpha", "location") + key = keyAlpha + keyBeta := meta.RegionalKey("key-beta", "location") + key = keyBeta + keyGA := meta.RegionalKey("key-ga", "location") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaAddresses().Get(ctx, *key); err == nil { + t.Errorf("AlphaAddresses().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.BetaAddresses().Get(ctx, *key); err == nil { + t.Errorf("BetaAddresses().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.Addresses().Get(ctx, *key); err == nil { + t.Errorf("Addresses().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.Address{} + if err := mock.AlphaAddresses().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &beta.Address{} + if err := mock.BetaAddresses().Insert(ctx, *keyBeta, obj); err != nil { + t.Errorf("BetaAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &ga.Address{} + if err := mock.Addresses().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("Addresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaAddresses().Get(ctx, *key); err != nil { + t.Errorf("AlphaAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.BetaAddresses().Get(ctx, *key); err != nil { + t.Errorf("BetaAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.Addresses().Get(ctx, *key); err != nil { + t.Errorf("Addresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaAddresses.Objects[*keyAlpha] = mock.MockAlphaAddresses.Obj(&alpha.Address{Name: keyAlpha.Name}) + mock.MockBetaAddresses.Objects[*keyBeta] = mock.MockBetaAddresses.Obj(&beta.Address{Name: keyBeta.Name}) + mock.MockAddresses.Objects[*keyGA] = mock.MockAddresses.Obj(&ga.Address{Name: keyGA.Name}) + want := map[string]bool{ + "key-alpha": true, + "key-beta": true, + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaAddresses().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.BetaAddresses().List(ctx, location, filter.None) + if err != nil { + t.Errorf("BetaAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.Addresses().List(ctx, location, filter.None) + if err != nil { + t.Errorf("Addresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaAddresses().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.BetaAddresses().Delete(ctx, *keyBeta); err != nil { + t.Errorf("BetaAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.Addresses().Delete(ctx, *keyGA); err != nil { + t.Errorf("Addresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaAddresses().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaAddresses().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.BetaAddresses().Delete(ctx, *keyBeta); err == nil { + t.Errorf("BetaAddresses().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.Addresses().Delete(ctx, *keyGA); err == nil { + t.Errorf("Addresses().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestBackendServicesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.GlobalKey("key-alpha") + key = keyAlpha + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaBackendServices().Get(ctx, *key); err == nil { + t.Errorf("AlphaBackendServices().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.BackendServices().Get(ctx, *key); err == nil { + t.Errorf("BackendServices().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.BackendService{} + if err := mock.AlphaBackendServices().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaBackendServices().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &ga.BackendService{} + if err := mock.BackendServices().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("BackendServices().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaBackendServices().Get(ctx, *key); err != nil { + t.Errorf("AlphaBackendServices().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.BackendServices().Get(ctx, *key); err != nil { + t.Errorf("BackendServices().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaBackendServices.Objects[*keyAlpha] = mock.MockAlphaBackendServices.Obj(&alpha.BackendService{Name: keyAlpha.Name}) + mock.MockBackendServices.Objects[*keyGA] = mock.MockBackendServices.Obj(&ga.BackendService{Name: keyGA.Name}) + want := map[string]bool{ + "key-alpha": true, + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaBackendServices().List(ctx, filter.None) + if err != nil { + t.Errorf("AlphaBackendServices().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaBackendServices().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.BackendServices().List(ctx, filter.None) + if err != nil { + t.Errorf("BackendServices().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaBackendServices().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaBackendServices().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaBackendServices().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.BackendServices().Delete(ctx, *keyGA); err != nil { + t.Errorf("BackendServices().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaBackendServices().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaBackendServices().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.BackendServices().Delete(ctx, *keyGA); err == nil { + t.Errorf("BackendServices().Delete(%v, %v) = nil; want error", ctx, key) + } +} + func TestDisksGroup(t *testing.T) { t.Parallel() @@ -194,749 +426,6 @@ func TestFirewallsGroup(t *testing.T) { } } -func TestInstancesGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyAlpha := meta.ZonalKey("key-alpha", "location") - key = keyAlpha - keyBeta := meta.ZonalKey("key-beta", "location") - key = keyBeta - keyGA := meta.ZonalKey("key-ga", "location") - key = keyGA - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.AlphaInstances().Get(ctx, *key); err == nil { - t.Errorf("AlphaInstances().Get(%v, %v) = _, nil; want error", ctx, key) - } - if _, err := mock.BetaInstances().Get(ctx, *key); err == nil { - t.Errorf("BetaInstances().Get(%v, %v) = _, nil; want error", ctx, key) - } - if _, err := mock.Instances().Get(ctx, *key); err == nil { - t.Errorf("Instances().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - { - obj := &alpha.Instance{} - if err := mock.AlphaInstances().Insert(ctx, *keyAlpha, obj); err != nil { - t.Errorf("AlphaInstances().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - { - obj := &beta.Instance{} - if err := mock.BetaInstances().Insert(ctx, *keyBeta, obj); err != nil { - t.Errorf("BetaInstances().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - { - obj := &ga.Instance{} - if err := mock.Instances().Insert(ctx, *keyGA, obj); err != nil { - t.Errorf("Instances().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - - // Get across versions. - if obj, err := mock.AlphaInstances().Get(ctx, *key); err != nil { - t.Errorf("AlphaInstances().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - if obj, err := mock.BetaInstances().Get(ctx, *key); err != nil { - t.Errorf("BetaInstances().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - if obj, err := mock.Instances().Get(ctx, *key); err != nil { - t.Errorf("Instances().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - - // List. - mock.MockAlphaInstances.Objects[*keyAlpha] = mock.MockAlphaInstances.Obj(&alpha.Instance{Name: keyAlpha.Name}) - mock.MockBetaInstances.Objects[*keyBeta] = mock.MockBetaInstances.Obj(&beta.Instance{Name: keyBeta.Name}) - mock.MockInstances.Objects[*keyGA] = mock.MockInstances.Obj(&ga.Instance{Name: keyGA.Name}) - want := map[string]bool{ - "key-alpha": true, - "key-beta": true, - "key-ga": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.AlphaInstances().List(ctx, location, filter.None) - if err != nil { - t.Errorf("AlphaInstances().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaInstances().List(); got %+v, want %+v", got, want) - } - } - } - { - objs, err := mock.BetaInstances().List(ctx, location, filter.None) - if err != nil { - t.Errorf("BetaInstances().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaInstances().List(); got %+v, want %+v", got, want) - } - } - } - { - objs, err := mock.Instances().List(ctx, location, filter.None) - if err != nil { - t.Errorf("Instances().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaInstances().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - if err := mock.AlphaInstances().Delete(ctx, *keyAlpha); err != nil { - t.Errorf("AlphaInstances().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - if err := mock.BetaInstances().Delete(ctx, *keyBeta); err != nil { - t.Errorf("BetaInstances().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - if err := mock.Instances().Delete(ctx, *keyGA); err != nil { - t.Errorf("Instances().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - - // Delete not found. - if err := mock.AlphaInstances().Delete(ctx, *keyAlpha); err == nil { - t.Errorf("AlphaInstances().Delete(%v, %v) = nil; want error", ctx, key) - } - if err := mock.BetaInstances().Delete(ctx, *keyBeta); err == nil { - t.Errorf("BetaInstances().Delete(%v, %v) = nil; want error", ctx, key) - } - if err := mock.Instances().Delete(ctx, *keyGA); err == nil { - t.Errorf("Instances().Delete(%v, %v) = nil; want error", ctx, key) - } -} - -func TestProjectsGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyGA := meta.GlobalKey("key-ga") - key = keyGA - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - - // Insert. - - // Get across versions. - - // List. - mock.MockProjects.Objects[*keyGA] = mock.MockProjects.Obj(&ga.Project{Name: keyGA.Name}) - want := map[string]bool{ - "key-ga": true, - } - _ = want // ignore unused variables. - - // Delete across versions. - - // Delete not found. -} - -func TestRoutesGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyGA := meta.GlobalKey("key-ga") - key = keyGA - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.Routes().Get(ctx, *key); err == nil { - t.Errorf("Routes().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - { - obj := &ga.Route{} - if err := mock.Routes().Insert(ctx, *keyGA, obj); err != nil { - t.Errorf("Routes().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - - // Get across versions. - if obj, err := mock.Routes().Get(ctx, *key); err != nil { - t.Errorf("Routes().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - - // List. - mock.MockRoutes.Objects[*keyGA] = mock.MockRoutes.Obj(&ga.Route{Name: keyGA.Name}) - want := map[string]bool{ - "key-ga": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.Routes().List(ctx, filter.None) - if err != nil { - t.Errorf("Routes().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaRoutes().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - if err := mock.Routes().Delete(ctx, *keyGA); err != nil { - t.Errorf("Routes().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - - // Delete not found. - if err := mock.Routes().Delete(ctx, *keyGA); err == nil { - t.Errorf("Routes().Delete(%v, %v) = nil; want error", ctx, key) - } -} - -func TestTargetPoolsGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyGA := meta.RegionalKey("key-ga", "location") - key = keyGA - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.TargetPools().Get(ctx, *key); err == nil { - t.Errorf("TargetPools().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - { - obj := &ga.TargetPool{} - if err := mock.TargetPools().Insert(ctx, *keyGA, obj); err != nil { - t.Errorf("TargetPools().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - - // Get across versions. - if obj, err := mock.TargetPools().Get(ctx, *key); err != nil { - t.Errorf("TargetPools().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - - // List. - mock.MockTargetPools.Objects[*keyGA] = mock.MockTargetPools.Obj(&ga.TargetPool{Name: keyGA.Name}) - want := map[string]bool{ - "key-ga": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.TargetPools().List(ctx, location, filter.None) - if err != nil { - t.Errorf("TargetPools().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaTargetPools().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - if err := mock.TargetPools().Delete(ctx, *keyGA); err != nil { - t.Errorf("TargetPools().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - - // Delete not found. - if err := mock.TargetPools().Delete(ctx, *keyGA); err == nil { - t.Errorf("TargetPools().Delete(%v, %v) = nil; want error", ctx, key) - } -} - -func TestGlobalForwardingRulesGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyGA := meta.GlobalKey("key-ga") - key = keyGA - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.GlobalForwardingRules().Get(ctx, *key); err == nil { - t.Errorf("GlobalForwardingRules().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - { - obj := &ga.ForwardingRule{} - if err := mock.GlobalForwardingRules().Insert(ctx, *keyGA, obj); err != nil { - t.Errorf("GlobalForwardingRules().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - - // Get across versions. - if obj, err := mock.GlobalForwardingRules().Get(ctx, *key); err != nil { - t.Errorf("GlobalForwardingRules().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - - // List. - mock.MockGlobalForwardingRules.Objects[*keyGA] = mock.MockGlobalForwardingRules.Obj(&ga.ForwardingRule{Name: keyGA.Name}) - want := map[string]bool{ - "key-ga": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.GlobalForwardingRules().List(ctx, filter.None) - if err != nil { - t.Errorf("GlobalForwardingRules().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaGlobalForwardingRules().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - if err := mock.GlobalForwardingRules().Delete(ctx, *keyGA); err != nil { - t.Errorf("GlobalForwardingRules().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - - // Delete not found. - if err := mock.GlobalForwardingRules().Delete(ctx, *keyGA); err == nil { - t.Errorf("GlobalForwardingRules().Delete(%v, %v) = nil; want error", ctx, key) - } -} - -func TestNetworkEndpointGroupsGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyAlpha := meta.ZonalKey("key-alpha", "location") - key = keyAlpha - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.AlphaNetworkEndpointGroups().Get(ctx, *key); err == nil { - t.Errorf("AlphaNetworkEndpointGroups().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - { - obj := &alpha.NetworkEndpointGroup{} - if err := mock.AlphaNetworkEndpointGroups().Insert(ctx, *keyAlpha, obj); err != nil { - t.Errorf("AlphaNetworkEndpointGroups().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - - // Get across versions. - if obj, err := mock.AlphaNetworkEndpointGroups().Get(ctx, *key); err != nil { - t.Errorf("AlphaNetworkEndpointGroups().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - - // List. - mock.MockAlphaNetworkEndpointGroups.Objects[*keyAlpha] = mock.MockAlphaNetworkEndpointGroups.Obj(&alpha.NetworkEndpointGroup{Name: keyAlpha.Name}) - want := map[string]bool{ - "key-alpha": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.AlphaNetworkEndpointGroups().List(ctx, location, filter.None) - if err != nil { - t.Errorf("AlphaNetworkEndpointGroups().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaNetworkEndpointGroups().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - if err := mock.AlphaNetworkEndpointGroups().Delete(ctx, *keyAlpha); err != nil { - t.Errorf("AlphaNetworkEndpointGroups().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - - // Delete not found. - if err := mock.AlphaNetworkEndpointGroups().Delete(ctx, *keyAlpha); err == nil { - t.Errorf("AlphaNetworkEndpointGroups().Delete(%v, %v) = nil; want error", ctx, key) - } -} - -func TestTargetHttpProxiesGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyGA := meta.GlobalKey("key-ga") - key = keyGA - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.TargetHttpProxies().Get(ctx, *key); err == nil { - t.Errorf("TargetHttpProxies().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - { - obj := &ga.TargetHttpProxy{} - if err := mock.TargetHttpProxies().Insert(ctx, *keyGA, obj); err != nil { - t.Errorf("TargetHttpProxies().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - - // Get across versions. - if obj, err := mock.TargetHttpProxies().Get(ctx, *key); err != nil { - t.Errorf("TargetHttpProxies().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - - // List. - mock.MockTargetHttpProxies.Objects[*keyGA] = mock.MockTargetHttpProxies.Obj(&ga.TargetHttpProxy{Name: keyGA.Name}) - want := map[string]bool{ - "key-ga": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.TargetHttpProxies().List(ctx, filter.None) - if err != nil { - t.Errorf("TargetHttpProxies().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaTargetHttpProxies().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - if err := mock.TargetHttpProxies().Delete(ctx, *keyGA); err != nil { - t.Errorf("TargetHttpProxies().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - - // Delete not found. - if err := mock.TargetHttpProxies().Delete(ctx, *keyGA); err == nil { - t.Errorf("TargetHttpProxies().Delete(%v, %v) = nil; want error", ctx, key) - } -} - -func TestUrlMapsGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyGA := meta.GlobalKey("key-ga") - key = keyGA - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.UrlMaps().Get(ctx, *key); err == nil { - t.Errorf("UrlMaps().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - { - obj := &ga.UrlMap{} - if err := mock.UrlMaps().Insert(ctx, *keyGA, obj); err != nil { - t.Errorf("UrlMaps().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - - // Get across versions. - if obj, err := mock.UrlMaps().Get(ctx, *key); err != nil { - t.Errorf("UrlMaps().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - - // List. - mock.MockUrlMaps.Objects[*keyGA] = mock.MockUrlMaps.Obj(&ga.UrlMap{Name: keyGA.Name}) - want := map[string]bool{ - "key-ga": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.UrlMaps().List(ctx, filter.None) - if err != nil { - t.Errorf("UrlMaps().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaUrlMaps().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - if err := mock.UrlMaps().Delete(ctx, *keyGA); err != nil { - t.Errorf("UrlMaps().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - - // Delete not found. - if err := mock.UrlMaps().Delete(ctx, *keyGA); err == nil { - t.Errorf("UrlMaps().Delete(%v, %v) = nil; want error", ctx, key) - } -} - -func TestZonesGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyGA := meta.GlobalKey("key-ga") - key = keyGA - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.Zones().Get(ctx, *key); err == nil { - t.Errorf("Zones().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - - // Get across versions. - - // List. - mock.MockZones.Objects[*keyGA] = mock.MockZones.Obj(&ga.Zone{Name: keyGA.Name}) - want := map[string]bool{ - "key-ga": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.Zones().List(ctx, filter.None) - if err != nil { - t.Errorf("Zones().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaZones().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - - // Delete not found. -} - -func TestGlobalAddressesGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyGA := meta.GlobalKey("key-ga") - key = keyGA - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.GlobalAddresses().Get(ctx, *key); err == nil { - t.Errorf("GlobalAddresses().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - { - obj := &ga.Address{} - if err := mock.GlobalAddresses().Insert(ctx, *keyGA, obj); err != nil { - t.Errorf("GlobalAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - - // Get across versions. - if obj, err := mock.GlobalAddresses().Get(ctx, *key); err != nil { - t.Errorf("GlobalAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - - // List. - mock.MockGlobalAddresses.Objects[*keyGA] = mock.MockGlobalAddresses.Obj(&ga.Address{Name: keyGA.Name}) - want := map[string]bool{ - "key-ga": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.GlobalAddresses().List(ctx, filter.None) - if err != nil { - t.Errorf("GlobalAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaGlobalAddresses().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - if err := mock.GlobalAddresses().Delete(ctx, *keyGA); err != nil { - t.Errorf("GlobalAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - - // Delete not found. - if err := mock.GlobalAddresses().Delete(ctx, *keyGA); err == nil { - t.Errorf("GlobalAddresses().Delete(%v, %v) = nil; want error", ctx, key) - } -} - -func TestBackendServicesGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyAlpha := meta.GlobalKey("key-alpha") - key = keyAlpha - keyGA := meta.GlobalKey("key-ga") - key = keyGA - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.AlphaBackendServices().Get(ctx, *key); err == nil { - t.Errorf("AlphaBackendServices().Get(%v, %v) = _, nil; want error", ctx, key) - } - if _, err := mock.BackendServices().Get(ctx, *key); err == nil { - t.Errorf("BackendServices().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - { - obj := &alpha.BackendService{} - if err := mock.AlphaBackendServices().Insert(ctx, *keyAlpha, obj); err != nil { - t.Errorf("AlphaBackendServices().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - { - obj := &ga.BackendService{} - if err := mock.BackendServices().Insert(ctx, *keyGA, obj); err != nil { - t.Errorf("BackendServices().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - - // Get across versions. - if obj, err := mock.AlphaBackendServices().Get(ctx, *key); err != nil { - t.Errorf("AlphaBackendServices().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - if obj, err := mock.BackendServices().Get(ctx, *key); err != nil { - t.Errorf("BackendServices().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - - // List. - mock.MockAlphaBackendServices.Objects[*keyAlpha] = mock.MockAlphaBackendServices.Obj(&alpha.BackendService{Name: keyAlpha.Name}) - mock.MockBackendServices.Objects[*keyGA] = mock.MockBackendServices.Obj(&ga.BackendService{Name: keyGA.Name}) - want := map[string]bool{ - "key-alpha": true, - "key-ga": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.AlphaBackendServices().List(ctx, filter.None) - if err != nil { - t.Errorf("AlphaBackendServices().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaBackendServices().List(); got %+v, want %+v", got, want) - } - } - } - { - objs, err := mock.BackendServices().List(ctx, filter.None) - if err != nil { - t.Errorf("BackendServices().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaBackendServices().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - if err := mock.AlphaBackendServices().Delete(ctx, *keyAlpha); err != nil { - t.Errorf("AlphaBackendServices().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - if err := mock.BackendServices().Delete(ctx, *keyGA); err != nil { - t.Errorf("BackendServices().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - - // Delete not found. - if err := mock.AlphaBackendServices().Delete(ctx, *keyAlpha); err == nil { - t.Errorf("AlphaBackendServices().Delete(%v, %v) = nil; want error", ctx, key) - } - if err := mock.BackendServices().Delete(ctx, *keyGA); err == nil { - t.Errorf("BackendServices().Delete(%v, %v) = nil; want error", ctx, key) - } -} - func TestForwardingRulesGroup(t *testing.T) { t.Parallel() @@ -1035,7 +524,7 @@ func TestForwardingRulesGroup(t *testing.T) { } } -func TestRegionsGroup(t *testing.T) { +func TestGlobalAddressesGroup(t *testing.T) { t.Parallel() ctx := context.Background() @@ -1048,295 +537,114 @@ func TestRegionsGroup(t *testing.T) { _, _, _ = ctx, mock, key // Get not found. - if _, err := mock.Regions().Get(ctx, *key); err == nil { - t.Errorf("Regions().Get(%v, %v) = _, nil; want error", ctx, key) + if _, err := mock.GlobalAddresses().Get(ctx, *key); err == nil { + t.Errorf("GlobalAddresses().Get(%v, %v) = _, nil; want error", ctx, key) } // Insert. + { + obj := &ga.Address{} + if err := mock.GlobalAddresses().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("GlobalAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } // Get across versions. + if obj, err := mock.GlobalAddresses().Get(ctx, *key); err != nil { + t.Errorf("GlobalAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } // List. - mock.MockRegions.Objects[*keyGA] = mock.MockRegions.Obj(&ga.Region{Name: keyGA.Name}) + mock.MockGlobalAddresses.Objects[*keyGA] = mock.MockGlobalAddresses.Obj(&ga.Address{Name: keyGA.Name}) want := map[string]bool{ "key-ga": true, } _ = want // ignore unused variables. { - objs, err := mock.Regions().List(ctx, filter.None) + objs, err := mock.GlobalAddresses().List(ctx, filter.None) if err != nil { - t.Errorf("Regions().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + t.Errorf("GlobalAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) } else { got := map[string]bool{} for _, obj := range objs { got[obj.Name] = true } if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaRegions().List(); got %+v, want %+v", got, want) + t.Errorf("AlphaGlobalAddresses().List(); got %+v, want %+v", got, want) } } } // Delete across versions. + if err := mock.GlobalAddresses().Delete(ctx, *keyGA); err != nil { + t.Errorf("GlobalAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + } // Delete not found. + if err := mock.GlobalAddresses().Delete(ctx, *keyGA); err == nil { + t.Errorf("GlobalAddresses().Delete(%v, %v) = nil; want error", ctx, key) + } } -func TestAddressesGroup(t *testing.T) { +func TestGlobalForwardingRulesGroup(t *testing.T) { t.Parallel() ctx := context.Background() mock := NewMockGCE() var key *meta.Key - keyAlpha := meta.RegionalKey("key-alpha", "location") - key = keyAlpha - keyBeta := meta.RegionalKey("key-beta", "location") - key = keyBeta - keyGA := meta.RegionalKey("key-ga", "location") + keyGA := meta.GlobalKey("key-ga") key = keyGA // Ignore unused variables. _, _, _ = ctx, mock, key // Get not found. - if _, err := mock.AlphaAddresses().Get(ctx, *key); err == nil { - t.Errorf("AlphaAddresses().Get(%v, %v) = _, nil; want error", ctx, key) - } - if _, err := mock.BetaAddresses().Get(ctx, *key); err == nil { - t.Errorf("BetaAddresses().Get(%v, %v) = _, nil; want error", ctx, key) - } - if _, err := mock.Addresses().Get(ctx, *key); err == nil { - t.Errorf("Addresses().Get(%v, %v) = _, nil; want error", ctx, key) + if _, err := mock.GlobalForwardingRules().Get(ctx, *key); err == nil { + t.Errorf("GlobalForwardingRules().Get(%v, %v) = _, nil; want error", ctx, key) } // Insert. { - obj := &alpha.Address{} - if err := mock.AlphaAddresses().Insert(ctx, *keyAlpha, obj); err != nil { - t.Errorf("AlphaAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - { - obj := &beta.Address{} - if err := mock.BetaAddresses().Insert(ctx, *keyBeta, obj); err != nil { - t.Errorf("BetaAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - { - obj := &ga.Address{} - if err := mock.Addresses().Insert(ctx, *keyGA, obj); err != nil { - t.Errorf("Addresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + obj := &ga.ForwardingRule{} + if err := mock.GlobalForwardingRules().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("GlobalForwardingRules().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) } } // Get across versions. - if obj, err := mock.AlphaAddresses().Get(ctx, *key); err != nil { - t.Errorf("AlphaAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - if obj, err := mock.BetaAddresses().Get(ctx, *key); err != nil { - t.Errorf("BetaAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - if obj, err := mock.Addresses().Get(ctx, *key); err != nil { - t.Errorf("Addresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + if obj, err := mock.GlobalForwardingRules().Get(ctx, *key); err != nil { + t.Errorf("GlobalForwardingRules().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) } // List. - mock.MockAlphaAddresses.Objects[*keyAlpha] = mock.MockAlphaAddresses.Obj(&alpha.Address{Name: keyAlpha.Name}) - mock.MockBetaAddresses.Objects[*keyBeta] = mock.MockBetaAddresses.Obj(&beta.Address{Name: keyBeta.Name}) - mock.MockAddresses.Objects[*keyGA] = mock.MockAddresses.Obj(&ga.Address{Name: keyGA.Name}) + mock.MockGlobalForwardingRules.Objects[*keyGA] = mock.MockGlobalForwardingRules.Obj(&ga.ForwardingRule{Name: keyGA.Name}) want := map[string]bool{ - "key-alpha": true, - "key-beta": true, - "key-ga": true, + "key-ga": true, } _ = want // ignore unused variables. { - objs, err := mock.AlphaAddresses().List(ctx, location, filter.None) + objs, err := mock.GlobalForwardingRules().List(ctx, filter.None) if err != nil { - t.Errorf("AlphaAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + t.Errorf("GlobalForwardingRules().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) } else { got := map[string]bool{} for _, obj := range objs { got[obj.Name] = true } if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) - } - } - } - { - objs, err := mock.BetaAddresses().List(ctx, location, filter.None) - if err != nil { - t.Errorf("BetaAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) - } - } - } - { - objs, err := mock.Addresses().List(ctx, location, filter.None) - if err != nil { - t.Errorf("Addresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) + t.Errorf("AlphaGlobalForwardingRules().List(); got %+v, want %+v", got, want) } } } // Delete across versions. - if err := mock.AlphaAddresses().Delete(ctx, *keyAlpha); err != nil { - t.Errorf("AlphaAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - if err := mock.BetaAddresses().Delete(ctx, *keyBeta); err != nil { - t.Errorf("BetaAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - if err := mock.Addresses().Delete(ctx, *keyGA); err != nil { - t.Errorf("Addresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + if err := mock.GlobalForwardingRules().Delete(ctx, *keyGA); err != nil { + t.Errorf("GlobalForwardingRules().Delete(%v, %v) = %v; want nil", ctx, key, err) } // Delete not found. - if err := mock.AlphaAddresses().Delete(ctx, *keyAlpha); err == nil { - t.Errorf("AlphaAddresses().Delete(%v, %v) = nil; want error", ctx, key) - } - if err := mock.BetaAddresses().Delete(ctx, *keyBeta); err == nil { - t.Errorf("BetaAddresses().Delete(%v, %v) = nil; want error", ctx, key) - } - if err := mock.Addresses().Delete(ctx, *keyGA); err == nil { - t.Errorf("Addresses().Delete(%v, %v) = nil; want error", ctx, key) - } -} - -func TestRegionBackendServicesGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyAlpha := meta.RegionalKey("key-alpha", "location") - key = keyAlpha - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.AlphaRegionBackendServices().Get(ctx, *key); err == nil { - t.Errorf("AlphaRegionBackendServices().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - { - obj := &alpha.BackendService{} - if err := mock.AlphaRegionBackendServices().Insert(ctx, *keyAlpha, obj); err != nil { - t.Errorf("AlphaRegionBackendServices().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - - // Get across versions. - if obj, err := mock.AlphaRegionBackendServices().Get(ctx, *key); err != nil { - t.Errorf("AlphaRegionBackendServices().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - - // List. - mock.MockAlphaRegionBackendServices.Objects[*keyAlpha] = mock.MockAlphaRegionBackendServices.Obj(&alpha.BackendService{Name: keyAlpha.Name}) - want := map[string]bool{ - "key-alpha": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.AlphaRegionBackendServices().List(ctx, location, filter.None) - if err != nil { - t.Errorf("AlphaRegionBackendServices().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaRegionBackendServices().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - if err := mock.AlphaRegionBackendServices().Delete(ctx, *keyAlpha); err != nil { - t.Errorf("AlphaRegionBackendServices().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - - // Delete not found. - if err := mock.AlphaRegionBackendServices().Delete(ctx, *keyAlpha); err == nil { - t.Errorf("AlphaRegionBackendServices().Delete(%v, %v) = nil; want error", ctx, key) - } -} - -func TestRegionDisksGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyAlpha := meta.RegionalKey("key-alpha", "location") - key = keyAlpha - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.AlphaRegionDisks().Get(ctx, *key); err == nil { - t.Errorf("AlphaRegionDisks().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - { - obj := &alpha.Disk{} - if err := mock.AlphaRegionDisks().Insert(ctx, *keyAlpha, obj); err != nil { - t.Errorf("AlphaRegionDisks().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - - // Get across versions. - if obj, err := mock.AlphaRegionDisks().Get(ctx, *key); err != nil { - t.Errorf("AlphaRegionDisks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - - // List. - mock.MockAlphaRegionDisks.Objects[*keyAlpha] = mock.MockAlphaRegionDisks.Obj(&alpha.Disk{Name: keyAlpha.Name}) - want := map[string]bool{ - "key-alpha": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.AlphaRegionDisks().List(ctx, location, filter.None) - if err != nil { - t.Errorf("AlphaRegionDisks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaRegionDisks().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - if err := mock.AlphaRegionDisks().Delete(ctx, *keyAlpha); err != nil { - t.Errorf("AlphaRegionDisks().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - - // Delete not found. - if err := mock.AlphaRegionDisks().Delete(ctx, *keyAlpha); err == nil { - t.Errorf("AlphaRegionDisks().Delete(%v, %v) = nil; want error", ctx, key) + if err := mock.GlobalForwardingRules().Delete(ctx, *keyGA); err == nil { + t.Errorf("GlobalForwardingRules().Delete(%v, %v) = nil; want error", ctx, key) } } @@ -1624,6 +932,465 @@ func TestInstanceGroupsGroup(t *testing.T) { } } +func TestInstancesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.ZonalKey("key-alpha", "location") + key = keyAlpha + keyBeta := meta.ZonalKey("key-beta", "location") + key = keyBeta + keyGA := meta.ZonalKey("key-ga", "location") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaInstances().Get(ctx, *key); err == nil { + t.Errorf("AlphaInstances().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.BetaInstances().Get(ctx, *key); err == nil { + t.Errorf("BetaInstances().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.Instances().Get(ctx, *key); err == nil { + t.Errorf("Instances().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.Instance{} + if err := mock.AlphaInstances().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaInstances().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &beta.Instance{} + if err := mock.BetaInstances().Insert(ctx, *keyBeta, obj); err != nil { + t.Errorf("BetaInstances().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &ga.Instance{} + if err := mock.Instances().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("Instances().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaInstances().Get(ctx, *key); err != nil { + t.Errorf("AlphaInstances().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.BetaInstances().Get(ctx, *key); err != nil { + t.Errorf("BetaInstances().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.Instances().Get(ctx, *key); err != nil { + t.Errorf("Instances().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaInstances.Objects[*keyAlpha] = mock.MockAlphaInstances.Obj(&alpha.Instance{Name: keyAlpha.Name}) + mock.MockBetaInstances.Objects[*keyBeta] = mock.MockBetaInstances.Obj(&beta.Instance{Name: keyBeta.Name}) + mock.MockInstances.Objects[*keyGA] = mock.MockInstances.Obj(&ga.Instance{Name: keyGA.Name}) + want := map[string]bool{ + "key-alpha": true, + "key-beta": true, + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaInstances().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaInstances().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaInstances().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.BetaInstances().List(ctx, location, filter.None) + if err != nil { + t.Errorf("BetaInstances().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaInstances().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.Instances().List(ctx, location, filter.None) + if err != nil { + t.Errorf("Instances().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaInstances().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaInstances().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaInstances().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.BetaInstances().Delete(ctx, *keyBeta); err != nil { + t.Errorf("BetaInstances().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.Instances().Delete(ctx, *keyGA); err != nil { + t.Errorf("Instances().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaInstances().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaInstances().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.BetaInstances().Delete(ctx, *keyBeta); err == nil { + t.Errorf("BetaInstances().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.Instances().Delete(ctx, *keyGA); err == nil { + t.Errorf("Instances().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestNetworkEndpointGroupsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.ZonalKey("key-alpha", "location") + key = keyAlpha + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaNetworkEndpointGroups().Get(ctx, *key); err == nil { + t.Errorf("AlphaNetworkEndpointGroups().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.NetworkEndpointGroup{} + if err := mock.AlphaNetworkEndpointGroups().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaNetworkEndpointGroups().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaNetworkEndpointGroups().Get(ctx, *key); err != nil { + t.Errorf("AlphaNetworkEndpointGroups().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaNetworkEndpointGroups.Objects[*keyAlpha] = mock.MockAlphaNetworkEndpointGroups.Obj(&alpha.NetworkEndpointGroup{Name: keyAlpha.Name}) + want := map[string]bool{ + "key-alpha": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaNetworkEndpointGroups().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaNetworkEndpointGroups().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaNetworkEndpointGroups().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaNetworkEndpointGroups().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaNetworkEndpointGroups().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaNetworkEndpointGroups().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaNetworkEndpointGroups().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestProjectsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + + // Insert. + + // Get across versions. + + // List. + mock.MockProjects.Objects[*keyGA] = mock.MockProjects.Obj(&ga.Project{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + + // Delete across versions. + + // Delete not found. +} + +func TestRegionBackendServicesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.RegionalKey("key-alpha", "location") + key = keyAlpha + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaRegionBackendServices().Get(ctx, *key); err == nil { + t.Errorf("AlphaRegionBackendServices().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.BackendService{} + if err := mock.AlphaRegionBackendServices().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaRegionBackendServices().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaRegionBackendServices().Get(ctx, *key); err != nil { + t.Errorf("AlphaRegionBackendServices().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaRegionBackendServices.Objects[*keyAlpha] = mock.MockAlphaRegionBackendServices.Obj(&alpha.BackendService{Name: keyAlpha.Name}) + want := map[string]bool{ + "key-alpha": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaRegionBackendServices().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaRegionBackendServices().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaRegionBackendServices().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaRegionBackendServices().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaRegionBackendServices().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaRegionBackendServices().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaRegionBackendServices().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestRegionDisksGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.RegionalKey("key-alpha", "location") + key = keyAlpha + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaRegionDisks().Get(ctx, *key); err == nil { + t.Errorf("AlphaRegionDisks().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.Disk{} + if err := mock.AlphaRegionDisks().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaRegionDisks().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaRegionDisks().Get(ctx, *key); err != nil { + t.Errorf("AlphaRegionDisks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaRegionDisks.Objects[*keyAlpha] = mock.MockAlphaRegionDisks.Obj(&alpha.Disk{Name: keyAlpha.Name}) + want := map[string]bool{ + "key-alpha": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaRegionDisks().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaRegionDisks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaRegionDisks().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaRegionDisks().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaRegionDisks().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaRegionDisks().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaRegionDisks().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestRegionsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.Regions().Get(ctx, *key); err == nil { + t.Errorf("Regions().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + + // Get across versions. + + // List. + mock.MockRegions.Objects[*keyGA] = mock.MockRegions.Obj(&ga.Region{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.Regions().List(ctx, filter.None) + if err != nil { + t.Errorf("Regions().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaRegions().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + + // Delete not found. +} + +func TestRoutesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.Routes().Get(ctx, *key); err == nil { + t.Errorf("Routes().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.Route{} + if err := mock.Routes().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("Routes().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.Routes().Get(ctx, *key); err != nil { + t.Errorf("Routes().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockRoutes.Objects[*keyGA] = mock.MockRoutes.Obj(&ga.Route{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.Routes().List(ctx, filter.None) + if err != nil { + t.Errorf("Routes().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaRoutes().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.Routes().Delete(ctx, *keyGA); err != nil { + t.Errorf("Routes().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.Routes().Delete(ctx, *keyGA); err == nil { + t.Errorf("Routes().Delete(%v, %v) = nil; want error", ctx, key) + } +} + func TestSslCertificatesGroup(t *testing.T) { t.Parallel() @@ -1686,6 +1453,68 @@ func TestSslCertificatesGroup(t *testing.T) { } } +func TestTargetHttpProxiesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.TargetHttpProxies().Get(ctx, *key); err == nil { + t.Errorf("TargetHttpProxies().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.TargetHttpProxy{} + if err := mock.TargetHttpProxies().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("TargetHttpProxies().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.TargetHttpProxies().Get(ctx, *key); err != nil { + t.Errorf("TargetHttpProxies().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockTargetHttpProxies.Objects[*keyGA] = mock.MockTargetHttpProxies.Obj(&ga.TargetHttpProxy{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.TargetHttpProxies().List(ctx, filter.None) + if err != nil { + t.Errorf("TargetHttpProxies().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaTargetHttpProxies().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.TargetHttpProxies().Delete(ctx, *keyGA); err != nil { + t.Errorf("TargetHttpProxies().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.TargetHttpProxies().Delete(ctx, *keyGA); err == nil { + t.Errorf("TargetHttpProxies().Delete(%v, %v) = nil; want error", ctx, key) + } +} + func TestTargetHttpsProxiesGroup(t *testing.T) { t.Parallel() @@ -1747,3 +1576,174 @@ func TestTargetHttpsProxiesGroup(t *testing.T) { t.Errorf("TargetHttpsProxies().Delete(%v, %v) = nil; want error", ctx, key) } } + +func TestTargetPoolsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.RegionalKey("key-ga", "location") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.TargetPools().Get(ctx, *key); err == nil { + t.Errorf("TargetPools().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.TargetPool{} + if err := mock.TargetPools().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("TargetPools().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.TargetPools().Get(ctx, *key); err != nil { + t.Errorf("TargetPools().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockTargetPools.Objects[*keyGA] = mock.MockTargetPools.Obj(&ga.TargetPool{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.TargetPools().List(ctx, location, filter.None) + if err != nil { + t.Errorf("TargetPools().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaTargetPools().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.TargetPools().Delete(ctx, *keyGA); err != nil { + t.Errorf("TargetPools().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.TargetPools().Delete(ctx, *keyGA); err == nil { + t.Errorf("TargetPools().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestUrlMapsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.UrlMaps().Get(ctx, *key); err == nil { + t.Errorf("UrlMaps().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.UrlMap{} + if err := mock.UrlMaps().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("UrlMaps().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.UrlMaps().Get(ctx, *key); err != nil { + t.Errorf("UrlMaps().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockUrlMaps.Objects[*keyGA] = mock.MockUrlMaps.Obj(&ga.UrlMap{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.UrlMaps().List(ctx, filter.None) + if err != nil { + t.Errorf("UrlMaps().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaUrlMaps().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.UrlMaps().Delete(ctx, *keyGA); err != nil { + t.Errorf("UrlMaps().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.UrlMaps().Delete(ctx, *keyGA); err == nil { + t.Errorf("UrlMaps().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestZonesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.Zones().Get(ctx, *key); err == nil { + t.Errorf("Zones().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + + // Get across versions. + + // List. + mock.MockZones.Objects[*keyGA] = mock.MockZones.Obj(&ga.Zone{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.Zones().List(ctx, filter.None) + if err != nil { + t.Errorf("Zones().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaZones().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + + // Delete not found. +} From b2613f151487f12362917ed8db6065c7bb648faf Mon Sep 17 00:00:00 2001 From: TigerXu Date: Wed, 10 Jan 2018 09:30:23 +0800 Subject: [PATCH 085/264] Revert "no need delete endpoint explicitly in endpoint controller" --- pkg/controller/endpoint/endpoints_controller.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/pkg/controller/endpoint/endpoints_controller.go b/pkg/controller/endpoint/endpoints_controller.go index b7d46c3e07e..8aa41da8a83 100644 --- a/pkg/controller/endpoint/endpoints_controller.go +++ b/pkg/controller/endpoint/endpoints_controller.go @@ -394,7 +394,15 @@ func (e *EndpointController) syncService(key string) error { } service, err := e.serviceLister.Services(namespace).Get(name) if err != nil { - // Service has been deleted. So no need to do any more operations. + // Delete the corresponding endpoint, as the service has been deleted. + // TODO: Please note that this will delete an endpoint when a + // service is deleted. However, if we're down at the time when + // the service is deleted, we will miss that deletion, so this + // doesn't completely solve the problem. See #6877. + err = e.client.CoreV1().Endpoints(namespace).Delete(name, nil) + if err != nil && !errors.IsNotFound(err) { + return err + } return nil } From 386c077dc6e241ad94c19e3e6a3fcc56b4ba325a Mon Sep 17 00:00:00 2001 From: Penghao Cen Date: Fri, 29 Dec 2017 18:43:38 +0800 Subject: [PATCH 086/264] Move common functions together --- test/e2e_node/BUILD | 3 + test/e2e_node/device_plugin.go | 102 +++++++++++++++++++++++++++++ test/e2e_node/gpu_device_plugin.go | 97 +++++---------------------- test/e2e_node/util.go | 13 ++++ 4 files changed, 134 insertions(+), 81 deletions(-) create mode 100644 test/e2e_node/device_plugin.go diff --git a/test/e2e_node/BUILD b/test/e2e_node/BUILD index e70755c5365..9ee141f0096 100644 --- a/test/e2e_node/BUILD +++ b/test/e2e_node/BUILD @@ -10,6 +10,7 @@ go_library( name = "go_default_library", srcs = [ "container.go", + "device_plugin.go", "doc.go", "docker_util.go", "framework.go", @@ -32,10 +33,12 @@ go_library( "//pkg/features:go_default_library", "//pkg/kubelet/apis/cri:go_default_library", "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", + "//pkg/kubelet/apis/deviceplugin/v1alpha:go_default_library", "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library", "//pkg/kubelet/apis/kubeletconfig/v1alpha1:go_default_library", "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", + "//pkg/kubelet/cm/deviceplugin:go_default_library", "//pkg/kubelet/metrics:go_default_library", "//pkg/kubelet/remote:go_default_library", "//test/e2e/common:go_default_library", diff --git a/test/e2e_node/device_plugin.go b/test/e2e_node/device_plugin.go new file mode 100644 index 00000000000..9748d31d68a --- /dev/null +++ b/test/e2e_node/device_plugin.go @@ -0,0 +1,102 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e_node + +import ( + "fmt" + "os" + "path/filepath" + "time" + + "regexp" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/uuid" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" + "k8s.io/kubernetes/test/e2e/framework" + + pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha" + dp "k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +// makeBusyboxPod returns a simple Pod spec with a pause container +// that requests resourceName and runs the specified command. +func makeBusyboxPod(resourceName, cmd string) *v1.Pod { + podName := "device-plugin-test-" + string(uuid.NewUUID()) + rl := v1.ResourceList{v1.ResourceName(resourceName): *resource.NewQuantity(1, resource.DecimalSI)} + + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: podName}, + Spec: v1.PodSpec{ + RestartPolicy: v1.RestartPolicyAlways, + Containers: []v1.Container{{ + Image: busyboxImage, + Name: podName, + // Runs the specified command in the test pod. + Command: []string{"sh", "-c", cmd}, + Resources: v1.ResourceRequirements{ + Limits: rl, + Requests: rl, + }, + }}, + }, + } +} + +// parseLogFromNRuns returns restart count of the specified container +// after it has been restarted at least restartCount times, +// and the matching string for the specified regular expression parsed from the container logs. +func parseLogFromNRuns(f *framework.Framework, podName string, contName string, restartCount int32, re string) (int32, string) { + var count int32 + // Wait till pod has been restarted at least restartCount times. + Eventually(func() bool { + p, err := f.PodClient().Get(podName, metav1.GetOptions{}) + if err != nil || len(p.Status.ContainerStatuses) < 1 { + return false + } + count = p.Status.ContainerStatuses[0].RestartCount + return count >= restartCount + }, 5*time.Minute, framework.Poll).Should(BeTrue()) + logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, contName) + if err != nil { + framework.Failf("GetPodLogs for pod %q failed: %v", podName, err) + } + framework.Logf("got pod logs: %v", logs) + regex := regexp.MustCompile(re) + matches := regex.FindStringSubmatch(logs) + if len(matches) < 2 { + return count, "" + } + return count, matches[1] +} + +// numberOfDevices returns the number of devices of resourceName advertised by a node +func numberOfDevices(node *v1.Node, resourceName string) int64 { + val, ok := node.Status.Capacity[v1.ResourceName(resourceName)] + if !ok { + return 0 + } + + return val.Value() +} diff --git a/test/e2e_node/gpu_device_plugin.go b/test/e2e_node/gpu_device_plugin.go index d2a52c3749e..256a8935c5c 100644 --- a/test/e2e_node/gpu_device_plugin.go +++ b/test/e2e_node/gpu_device_plugin.go @@ -17,15 +17,11 @@ limitations under the License. package e2e_node import ( - "os/exec" - "regexp" "strconv" "time" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" @@ -89,24 +85,28 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi It("checks that when Kubelet restarts exclusive GPU assignation to pods is kept.", func() { By("Creating one GPU pod on a node with at least two GPUs") - p1 := f.PodClient().CreateSync(makeCudaPauseImage()) - count1, devId1 := getDeviceId(f, p1.Name, p1.Name, 1) + podRECMD := "devs=$(ls /dev/ | egrep '^nvidia[0-9]+$') && echo gpu devices: $devs" + p1 := f.PodClient().CreateSync(makeBusyboxPod(framework.NVIDIAGPUResourceName, podRECMD)) + + deviceIDRE := "gpu devices: (nvidia[0-9]+)" + count1, devId1 := parseLogFromNRuns(f, p1.Name, p1.Name, 1, deviceIDRE) p1, err := f.PodClient().Get(p1.Name, metav1.GetOptions{}) framework.ExpectNoError(err) By("Restarting Kubelet and waiting for the current running pod to restart") - restartKubelet(f) + restartKubelet() By("Confirming that after a kubelet and pod restart, GPU assignement is kept") - count1, devIdRestart1 := getDeviceId(f, p1.Name, p1.Name, count1+1) + count1, devIdRestart1 := parseLogFromNRuns(f, p1.Name, p1.Name, count1+1, deviceIDRE) Expect(devIdRestart1).To(Equal(devId1)) By("Restarting Kubelet and creating another pod") - restartKubelet(f) - p2 := f.PodClient().CreateSync(makeCudaPauseImage()) + restartKubelet() + p2 := f.PodClient().CreateSync(makeBusyboxPod(framework.NVIDIAGPUResourceName, podRECMD)) By("Checking that pods got a different GPU") - count2, devId2 := getDeviceId(f, p2.Name, p2.Name, 1) + count2, devId2 := parseLogFromNRuns(f, p2.Name, p2.Name, 1, deviceIDRE) + Expect(devId1).To(Not(Equal(devId2))) By("Deleting device plugin.") @@ -118,16 +118,16 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi return framework.NumberOfNVIDIAGPUs(node) <= 0 }, 10*time.Minute, framework.Poll).Should(BeTrue()) By("Checking that scheduled pods can continue to run even after we delete device plugin.") - count1, devIdRestart1 = getDeviceId(f, p1.Name, p1.Name, count1+1) + count1, devIdRestart1 = parseLogFromNRuns(f, p1.Name, p1.Name, count1+1, deviceIDRE) Expect(devIdRestart1).To(Equal(devId1)) - count2, devIdRestart2 := getDeviceId(f, p2.Name, p2.Name, count2+1) + count2, devIdRestart2 := parseLogFromNRuns(f, p2.Name, p2.Name, count2+1, deviceIDRE) Expect(devIdRestart2).To(Equal(devId2)) By("Restarting Kubelet.") - restartKubelet(f) + restartKubelet() By("Checking that scheduled pods can continue to run even after we delete device plugin and restart Kubelet.") - count1, devIdRestart1 = getDeviceId(f, p1.Name, p1.Name, count1+2) + count1, devIdRestart1 = parseLogFromNRuns(f, p1.Name, p1.Name, count1+2, deviceIDRE) Expect(devIdRestart1).To(Equal(devId1)) - count2, devIdRestart2 = getDeviceId(f, p2.Name, p2.Name, count2+2) + count2, devIdRestart2 = parseLogFromNRuns(f, p2.Name, p2.Name, count2+2, deviceIDRE) Expect(devIdRestart2).To(Equal(devId2)) logDevicePluginMetrics() @@ -165,68 +165,3 @@ func logDevicePluginMetrics() { } } } - -func makeCudaPauseImage() *v1.Pod { - podName := testPodNamePrefix + string(uuid.NewUUID()) - - return &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{Name: podName}, - Spec: v1.PodSpec{ - RestartPolicy: v1.RestartPolicyAlways, - Containers: []v1.Container{{ - Image: busyboxImage, - Name: podName, - // Retrieves the gpu devices created in the user pod. - // Note the nvidia device plugin implementation doesn't do device id remapping currently. - // Will probably need to use nvidia-smi if that changes. - Command: []string{"sh", "-c", "devs=$(ls /dev/ | egrep '^nvidia[0-9]+$') && echo gpu devices: $devs"}, - - Resources: v1.ResourceRequirements{ - Limits: newDecimalResourceList(framework.NVIDIAGPUResourceName, 1), - Requests: newDecimalResourceList(framework.NVIDIAGPUResourceName, 1), - }, - }}, - }, - } -} - -func newDecimalResourceList(name v1.ResourceName, quantity int64) v1.ResourceList { - return v1.ResourceList{name: *resource.NewQuantity(quantity, resource.DecimalSI)} -} - -// TODO: Find a uniform way to deal with systemctl/initctl/service operations. #34494 -func restartKubelet(f *framework.Framework) { - stdout, err := exec.Command("sudo", "systemctl", "list-units", "kubelet*", "--state=running").CombinedOutput() - framework.ExpectNoError(err) - regex := regexp.MustCompile("(kubelet-[0-9]+)") - matches := regex.FindStringSubmatch(string(stdout)) - Expect(len(matches)).NotTo(BeZero()) - kube := matches[0] - framework.Logf("Get running kubelet with systemctl: %v, %v", string(stdout), kube) - stdout, err = exec.Command("sudo", "systemctl", "restart", kube).CombinedOutput() - framework.ExpectNoError(err, "Failed to restart kubelet with systemctl: %v, %v", err, stdout) -} - -func getDeviceId(f *framework.Framework, podName string, contName string, restartCount int32) (int32, string) { - var count int32 - // Wait till pod has been restarted at least restartCount times. - Eventually(func() bool { - p, err := f.PodClient().Get(podName, metav1.GetOptions{}) - if err != nil || len(p.Status.ContainerStatuses) < 1 { - return false - } - count = p.Status.ContainerStatuses[0].RestartCount - return count >= restartCount - }, 5*time.Minute, framework.Poll).Should(BeTrue()) - logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, contName) - if err != nil { - framework.Failf("GetPodLogs for pod %q failed: %v", podName, err) - } - framework.Logf("got pod logs: %v", logs) - regex := regexp.MustCompile("gpu devices: (nvidia[0-9]+)") - matches := regex.FindStringSubmatch(logs) - if len(matches) < 2 { - return count, "" - } - return count, matches[1] -} diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index bf1914c5e71..f81ab6f5d8b 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -389,3 +389,16 @@ func getCRIClient() (internalapi.RuntimeService, internalapi.ImageManagerService } return r, i, nil } + +// TODO: Find a uniform way to deal with systemctl/initctl/service operations. #34494 +func restartKubelet() { + stdout, err := exec.Command("sudo", "systemctl", "list-units", "kubelet*", "--state=running").CombinedOutput() + framework.ExpectNoError(err) + regex := regexp.MustCompile("(kubelet-[0-9]+)") + matches := regex.FindStringSubmatch(string(stdout)) + Expect(len(matches)).NotTo(BeZero()) + kube := matches[0] + framework.Logf("Get running kubelet with systemctl: %v, %v", string(stdout), kube) + stdout, err = exec.Command("sudo", "systemctl", "restart", kube).CombinedOutput() + framework.ExpectNoError(err, "Failed to restart kubelet with systemctl: %v, %v", err, stdout) +} From 40c0cb468fc151df8accd3a6228cbe0b5ca6b183 Mon Sep 17 00:00:00 2001 From: Robert Bailey Date: Tue, 9 Jan 2018 21:53:22 -0800 Subject: [PATCH 087/264] Remove options.md, which is outdated and doesn't contain any useful information. --- cluster/options.md | 15 --------------- 1 file changed, 15 deletions(-) delete mode 100644 cluster/options.md diff --git a/cluster/options.md b/cluster/options.md deleted file mode 100644 index f48d0ebc264..00000000000 --- a/cluster/options.md +++ /dev/null @@ -1,15 +0,0 @@ -# Configuration options - -These options can be set as environment variables, to customize how your cluster is created. - -These options apply across providers. There are additional documents for options specific to providers: - -* [AWS](aws/options.md) - -This is a work-in-progress; not all options are documented yet! - -**NUM_NODES** - -The number of node instances to create. Most providers default this to 4. - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/options.md?pixel)]() From d2d48cddf8cd3856b66e3bd385f8fab4b6ccf57c Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 10 Jan 2018 14:01:47 +0800 Subject: [PATCH 088/264] Add wrappers for azure clients --- pkg/cloudprovider/providers/azure/azure.go | 256 +----- .../providers/azure/azure_client.go | 811 ++++++++++++++++++ 2 files changed, 854 insertions(+), 213 deletions(-) create mode 100644 pkg/cloudprovider/providers/azure/azure_client.go diff --git a/pkg/cloudprovider/providers/azure/azure.go b/pkg/cloudprovider/providers/azure/azure.go index 8f31f013508..9d61124f194 100644 --- a/pkg/cloudprovider/providers/azure/azure.go +++ b/pkg/cloudprovider/providers/azure/azure.go @@ -30,10 +30,6 @@ import ( "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/version" - "github.com/Azure/azure-sdk-for-go/arm/compute" - "github.com/Azure/azure-sdk-for-go/arm/disk" - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/Azure/azure-sdk-for-go/arm/storage" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" "github.com/ghodss/yaml" @@ -110,117 +106,22 @@ type Config struct { MaximumLoadBalancerRuleCount int `json:"maximumLoadBalancerRuleCount"` } -// VirtualMachinesClient defines needed functions for azure compute.VirtualMachinesClient -type VirtualMachinesClient interface { - CreateOrUpdate(resourceGroupName string, VMName string, parameters compute.VirtualMachine, cancel <-chan struct{}) (<-chan compute.VirtualMachine, <-chan error) - Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) - List(resourceGroupName string) (result compute.VirtualMachineListResult, err error) - ListNextResults(lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) -} - -// InterfacesClient defines needed functions for azure network.InterfacesClient -type InterfacesClient interface { - CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters network.Interface, cancel <-chan struct{}) (<-chan network.Interface, <-chan error) - Get(resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) - GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) -} - -// LoadBalancersClient defines needed functions for azure network.LoadBalancersClient -type LoadBalancersClient interface { - CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, cancel <-chan struct{}) (<-chan network.LoadBalancer, <-chan error) - Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) - Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) - List(resourceGroupName string) (result network.LoadBalancerListResult, err error) - ListNextResults(lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) -} - -// PublicIPAddressesClient defines needed functions for azure network.PublicIPAddressesClient -type PublicIPAddressesClient interface { - CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress, cancel <-chan struct{}) (<-chan network.PublicIPAddress, <-chan error) - Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) - Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) - List(resourceGroupName string) (result network.PublicIPAddressListResult, err error) - ListNextResults(lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) -} - -// SubnetsClient defines needed functions for azure network.SubnetsClient -type SubnetsClient interface { - CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet, cancel <-chan struct{}) (<-chan network.Subnet, <-chan error) - Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) - Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) - List(resourceGroupName string, virtualNetworkName string) (result network.SubnetListResult, err error) -} - -// SecurityGroupsClient defines needed functions for azure network.SecurityGroupsClient -type SecurityGroupsClient interface { - CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, cancel <-chan struct{}) (<-chan network.SecurityGroup, <-chan error) - Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) - Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) - List(resourceGroupName string) (result network.SecurityGroupListResult, err error) -} - -// VirtualMachineScaleSetsClient defines needed functions for azure compute.VirtualMachineScaleSetsClient -type VirtualMachineScaleSetsClient interface { - CreateOrUpdate(resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet, cancel <-chan struct{}) (<-chan compute.VirtualMachineScaleSet, <-chan error) - Get(resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) - List(resourceGroupName string) (result compute.VirtualMachineScaleSetListResult, err error) - ListNextResults(lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) - UpdateInstances(resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (<-chan compute.OperationStatusResponse, <-chan error) -} - -// VirtualMachineScaleSetVMsClient defines needed functions for azure compute.VirtualMachineScaleSetVMsClient -type VirtualMachineScaleSetVMsClient interface { - Get(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) - GetInstanceView(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) - List(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result compute.VirtualMachineScaleSetVMListResult, err error) - ListNextResults(lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) -} - -// RoutesClient defines needed functions for azure network.RoutesClient -type RoutesClient interface { - CreateOrUpdate(resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, cancel <-chan struct{}) (<-chan network.Route, <-chan error) - Delete(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) -} - -// RouteTablesClient defines needed functions for azure network.RouteTablesClient -type RouteTablesClient interface { - CreateOrUpdate(resourceGroupName string, routeTableName string, parameters network.RouteTable, cancel <-chan struct{}) (<-chan network.RouteTable, <-chan error) - Get(resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) -} - -// StorageAccountClient defines needed functions for azure storage.AccountsClient -type StorageAccountClient interface { - Create(resourceGroupName string, accountName string, parameters storage.AccountCreateParameters, cancel <-chan struct{}) (<-chan storage.Account, <-chan error) - Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) - ListKeys(resourceGroupName string, accountName string) (result storage.AccountListKeysResult, err error) - ListByResourceGroup(resourceGroupName string) (result storage.AccountListResult, err error) - GetProperties(resourceGroupName string, accountName string) (result storage.Account, err error) -} - -// DisksClient defines needed functions for azure disk.DisksClient -type DisksClient interface { - CreateOrUpdate(resourceGroupName string, diskName string, diskParameter disk.Model, cancel <-chan struct{}) (<-chan disk.Model, <-chan error) - Delete(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan disk.OperationStatusResponse, <-chan error) - Get(resourceGroupName string, diskName string) (result disk.Model, err error) -} - // Cloud holds the config and clients type Cloud struct { Config - Environment azure.Environment - RoutesClient RoutesClient - SubnetsClient SubnetsClient - InterfacesClient InterfacesClient - RouteTablesClient RouteTablesClient - LoadBalancerClient LoadBalancersClient - PublicIPAddressesClient PublicIPAddressesClient - SecurityGroupsClient SecurityGroupsClient - VirtualMachinesClient VirtualMachinesClient - StorageAccountClient StorageAccountClient - DisksClient DisksClient - operationPollRateLimiter flowcontrol.RateLimiter - resourceRequestBackoff wait.Backoff - vmSet VMSet + Environment azure.Environment + RoutesClient RoutesClient + SubnetsClient SubnetsClient + InterfacesClient InterfacesClient + RouteTablesClient RouteTablesClient + LoadBalancerClient LoadBalancersClient + PublicIPAddressesClient PublicIPAddressesClient + SecurityGroupsClient SecurityGroupsClient + VirtualMachinesClient VirtualMachinesClient + StorageAccountClient StorageAccountClient + DisksClient DisksClient + resourceRequestBackoff wait.Backoff + vmSet VMSet // Clients for vmss. VirtualMachineScaleSetsClient VirtualMachineScaleSetsClient @@ -247,116 +148,45 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { return nil, err } - az := Cloud{ - Config: *config, - Environment: *env, - } - servicePrincipalToken, err := auth.GetServicePrincipalToken(&config.AzureAuthConfig, env) if err != nil { return nil, err } - subnetsClient := network.NewSubnetsClient(az.SubscriptionID) - subnetsClient.BaseURI = az.Environment.ResourceManagerEndpoint - subnetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - subnetsClient.PollingDelay = 5 * time.Second - configureUserAgent(&subnetsClient.Client) - az.SubnetsClient = subnetsClient - - routeTablesClient := network.NewRouteTablesClient(az.SubscriptionID) - routeTablesClient.BaseURI = az.Environment.ResourceManagerEndpoint - routeTablesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - routeTablesClient.PollingDelay = 5 * time.Second - configureUserAgent(&routeTablesClient.Client) - az.RouteTablesClient = routeTablesClient - - routesClient := network.NewRoutesClient(az.SubscriptionID) - routesClient.BaseURI = az.Environment.ResourceManagerEndpoint - routesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - routesClient.PollingDelay = 5 * time.Second - configureUserAgent(&routesClient.Client) - az.RoutesClient = routesClient - - interfacesClient := network.NewInterfacesClient(az.SubscriptionID) - interfacesClient.BaseURI = az.Environment.ResourceManagerEndpoint - interfacesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - interfacesClient.PollingDelay = 5 * time.Second - configureUserAgent(&interfacesClient.Client) - az.InterfacesClient = interfacesClient - - loadBalancerClient := network.NewLoadBalancersClient(az.SubscriptionID) - loadBalancerClient.BaseURI = az.Environment.ResourceManagerEndpoint - loadBalancerClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - loadBalancerClient.PollingDelay = 5 * time.Second - configureUserAgent(&loadBalancerClient.Client) - az.LoadBalancerClient = loadBalancerClient - - virtualMachinesClient := compute.NewVirtualMachinesClient(az.SubscriptionID) - virtualMachinesClient.BaseURI = az.Environment.ResourceManagerEndpoint - virtualMachinesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - virtualMachinesClient.PollingDelay = 5 * time.Second - configureUserAgent(&virtualMachinesClient.Client) - az.VirtualMachinesClient = virtualMachinesClient - - publicIPAddressClient := network.NewPublicIPAddressesClient(az.SubscriptionID) - publicIPAddressClient.BaseURI = az.Environment.ResourceManagerEndpoint - publicIPAddressClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - publicIPAddressClient.PollingDelay = 5 * time.Second - configureUserAgent(&publicIPAddressClient.Client) - az.PublicIPAddressesClient = publicIPAddressClient - - securityGroupsClient := network.NewSecurityGroupsClient(az.SubscriptionID) - securityGroupsClient.BaseURI = az.Environment.ResourceManagerEndpoint - securityGroupsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - securityGroupsClient.PollingDelay = 5 * time.Second - configureUserAgent(&securityGroupsClient.Client) - az.SecurityGroupsClient = securityGroupsClient - - virtualMachineScaleSetVMsClient := compute.NewVirtualMachineScaleSetVMsClient(az.SubscriptionID) - virtualMachineScaleSetVMsClient.BaseURI = az.Environment.ResourceManagerEndpoint - virtualMachineScaleSetVMsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - virtualMachineScaleSetVMsClient.PollingDelay = 5 * time.Second - configureUserAgent(&virtualMachineScaleSetVMsClient.Client) - az.VirtualMachineScaleSetVMsClient = virtualMachineScaleSetVMsClient - - virtualMachineScaleSetsClient := compute.NewVirtualMachineScaleSetsClient(az.SubscriptionID) - virtualMachineScaleSetsClient.BaseURI = az.Environment.ResourceManagerEndpoint - virtualMachineScaleSetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - virtualMachineScaleSetsClient.PollingDelay = 5 * time.Second - configureUserAgent(&virtualMachineScaleSetsClient.Client) - az.VirtualMachineScaleSetsClient = virtualMachineScaleSetsClient - - storageAccountClient := storage.NewAccountsClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID) - storageAccountClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - storageAccountClient.PollingDelay = 5 * time.Second - configureUserAgent(&storageAccountClient.Client) - az.StorageAccountClient = storageAccountClient - - disksClient := disk.NewDisksClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID) - disksClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - disksClient.PollingDelay = 5 * time.Second - configureUserAgent(&disksClient.Client) - az.DisksClient = disksClient - - // Conditionally configure rate limits - if az.CloudProviderRateLimit { + // operationPollRateLimiter.Accept() is a no-op if rate limits are configured off. + operationPollRateLimiter := flowcontrol.NewFakeAlwaysRateLimiter() + if config.CloudProviderRateLimit { // Assign rate limit defaults if no configuration was passed in - if az.CloudProviderRateLimitQPS == 0 { - az.CloudProviderRateLimitQPS = rateLimitQPSDefault + if config.CloudProviderRateLimitQPS == 0 { + config.CloudProviderRateLimitQPS = rateLimitQPSDefault } - if az.CloudProviderRateLimitBucket == 0 { - az.CloudProviderRateLimitBucket = rateLimitBucketDefault + if config.CloudProviderRateLimitBucket == 0 { + config.CloudProviderRateLimitBucket = rateLimitBucketDefault } - az.operationPollRateLimiter = flowcontrol.NewTokenBucketRateLimiter( - az.CloudProviderRateLimitQPS, - az.CloudProviderRateLimitBucket) + operationPollRateLimiter = flowcontrol.NewTokenBucketRateLimiter( + config.CloudProviderRateLimitQPS, + config.CloudProviderRateLimitBucket) glog.V(2).Infof("Azure cloudprovider using rate limit config: QPS=%g, bucket=%d", - az.CloudProviderRateLimitQPS, - az.CloudProviderRateLimitBucket) - } else { - // if rate limits are configured off, az.operationPollRateLimiter.Accept() is a no-op - az.operationPollRateLimiter = flowcontrol.NewFakeAlwaysRateLimiter() + config.CloudProviderRateLimitQPS, + config.CloudProviderRateLimitBucket) + } + + az := Cloud{ + Config: *config, + Environment: *env, + + DisksClient: newAzDisksClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + RoutesClient: newAzRoutesClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + SubnetsClient: newAzSubnetsClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + InterfacesClient: newAzInterfacesClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + RouteTablesClient: newAzRouteTablesClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + LoadBalancerClient: newAzLoadBalancersClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + SecurityGroupsClient: newAzSecurityGroupsClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + StorageAccountClient: newAzStorageAccountClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + VirtualMachinesClient: newAzVirtualMachinesClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + PublicIPAddressesClient: newAzPublicIPAddressesClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + VirtualMachineScaleSetsClient: newAzVirtualMachineScaleSetsClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + VirtualMachineScaleSetVMsClient: newAzVirtualMachineScaleSetVMsClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), } // Conditionally configure resource request backoff diff --git a/pkg/cloudprovider/providers/azure/azure_client.go b/pkg/cloudprovider/providers/azure/azure_client.go new file mode 100644 index 00000000000..3a359dd6893 --- /dev/null +++ b/pkg/cloudprovider/providers/azure/azure_client.go @@ -0,0 +1,811 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/arm/compute" + "github.com/Azure/azure-sdk-for-go/arm/disk" + "github.com/Azure/azure-sdk-for-go/arm/network" + "github.com/Azure/azure-sdk-for-go/arm/storage" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/golang/glog" + + "k8s.io/client-go/util/flowcontrol" +) + +// VirtualMachinesClient defines needed functions for azure compute.VirtualMachinesClient +type VirtualMachinesClient interface { + CreateOrUpdate(resourceGroupName string, VMName string, parameters compute.VirtualMachine, cancel <-chan struct{}) (<-chan compute.VirtualMachine, <-chan error) + Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) + List(resourceGroupName string) (result compute.VirtualMachineListResult, err error) + ListNextResults(lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) +} + +// InterfacesClient defines needed functions for azure network.InterfacesClient +type InterfacesClient interface { + CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters network.Interface, cancel <-chan struct{}) (<-chan network.Interface, <-chan error) + Get(resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) + GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) +} + +// LoadBalancersClient defines needed functions for azure network.LoadBalancersClient +type LoadBalancersClient interface { + CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, cancel <-chan struct{}) (<-chan network.LoadBalancer, <-chan error) + Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) + Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) + List(resourceGroupName string) (result network.LoadBalancerListResult, err error) + ListNextResults(lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) +} + +// PublicIPAddressesClient defines needed functions for azure network.PublicIPAddressesClient +type PublicIPAddressesClient interface { + CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress, cancel <-chan struct{}) (<-chan network.PublicIPAddress, <-chan error) + Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) + Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) + List(resourceGroupName string) (result network.PublicIPAddressListResult, err error) + ListNextResults(lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) +} + +// SubnetsClient defines needed functions for azure network.SubnetsClient +type SubnetsClient interface { + CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet, cancel <-chan struct{}) (<-chan network.Subnet, <-chan error) + Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) + Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) + List(resourceGroupName string, virtualNetworkName string) (result network.SubnetListResult, err error) +} + +// SecurityGroupsClient defines needed functions for azure network.SecurityGroupsClient +type SecurityGroupsClient interface { + CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, cancel <-chan struct{}) (<-chan network.SecurityGroup, <-chan error) + Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) + Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) + List(resourceGroupName string) (result network.SecurityGroupListResult, err error) +} + +// VirtualMachineScaleSetsClient defines needed functions for azure compute.VirtualMachineScaleSetsClient +type VirtualMachineScaleSetsClient interface { + CreateOrUpdate(resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet, cancel <-chan struct{}) (<-chan compute.VirtualMachineScaleSet, <-chan error) + Get(resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) + List(resourceGroupName string) (result compute.VirtualMachineScaleSetListResult, err error) + ListNextResults(lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) + UpdateInstances(resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (<-chan compute.OperationStatusResponse, <-chan error) +} + +// VirtualMachineScaleSetVMsClient defines needed functions for azure compute.VirtualMachineScaleSetVMsClient +type VirtualMachineScaleSetVMsClient interface { + Get(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) + GetInstanceView(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) + List(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result compute.VirtualMachineScaleSetVMListResult, err error) + ListNextResults(lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) +} + +// RoutesClient defines needed functions for azure network.RoutesClient +type RoutesClient interface { + CreateOrUpdate(resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, cancel <-chan struct{}) (<-chan network.Route, <-chan error) + Delete(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) +} + +// RouteTablesClient defines needed functions for azure network.RouteTablesClient +type RouteTablesClient interface { + CreateOrUpdate(resourceGroupName string, routeTableName string, parameters network.RouteTable, cancel <-chan struct{}) (<-chan network.RouteTable, <-chan error) + Get(resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) +} + +// StorageAccountClient defines needed functions for azure storage.AccountsClient +type StorageAccountClient interface { + Create(resourceGroupName string, accountName string, parameters storage.AccountCreateParameters, cancel <-chan struct{}) (<-chan storage.Account, <-chan error) + Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) + ListKeys(resourceGroupName string, accountName string) (result storage.AccountListKeysResult, err error) + ListByResourceGroup(resourceGroupName string) (result storage.AccountListResult, err error) + GetProperties(resourceGroupName string, accountName string) (result storage.Account, err error) +} + +// DisksClient defines needed functions for azure disk.DisksClient +type DisksClient interface { + CreateOrUpdate(resourceGroupName string, diskName string, diskParameter disk.Model, cancel <-chan struct{}) (<-chan disk.Model, <-chan error) + Delete(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan disk.OperationStatusResponse, <-chan error) + Get(resourceGroupName string, diskName string) (result disk.Model, err error) +} + +// azVirtualMachinesClient implements VirtualMachinesClient. +type azVirtualMachinesClient struct { + client compute.VirtualMachinesClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzVirtualMachinesClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azVirtualMachinesClient { + virtualMachinesClient := compute.NewVirtualMachinesClient(subscriptionID) + virtualMachinesClient.BaseURI = endpoint + virtualMachinesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + virtualMachinesClient.PollingDelay = 5 * time.Second + configureUserAgent(&virtualMachinesClient.Client) + + return &azVirtualMachinesClient{ + rateLimiter: rateLimiter, + client: virtualMachinesClient, + } +} + +func (az *azVirtualMachinesClient) CreateOrUpdate(resourceGroupName string, VMName string, parameters compute.VirtualMachine, cancel <-chan struct{}) (<-chan compute.VirtualMachine, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): start", resourceGroupName, VMName) + defer func() { + glog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): end", resourceGroupName, VMName) + }() + + return az.client.CreateOrUpdate(resourceGroupName, VMName, parameters, cancel) +} + +func (az *azVirtualMachinesClient) Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): start", resourceGroupName, VMName) + defer func() { + glog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): end", resourceGroupName, VMName) + }() + + return az.client.Get(resourceGroupName, VMName, expand) +} + +func (az *azVirtualMachinesClient) List(resourceGroupName string) (result compute.VirtualMachineListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachinesClient.List(%q): start", resourceGroupName) + defer func() { + glog.V(10).Infof("azVirtualMachinesClient.List(%q): end", resourceGroupName) + }() + + return az.client.List(resourceGroupName) +} + +func (az *azVirtualMachinesClient) ListNextResults(lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachinesClient.ListNextResults(%q): start", lastResults) + defer func() { + glog.V(10).Infof("azVirtualMachinesClient.ListNextResults(%q): end", lastResults) + }() + + return az.client.ListNextResults(lastResults) +} + +// azInterfacesClient implements InterfacesClient. +type azInterfacesClient struct { + client network.InterfacesClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzInterfacesClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azInterfacesClient { + interfacesClient := network.NewInterfacesClient(subscriptionID) + interfacesClient.BaseURI = endpoint + interfacesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + interfacesClient.PollingDelay = 5 * time.Second + configureUserAgent(&interfacesClient.Client) + + return &azInterfacesClient{ + rateLimiter: rateLimiter, + client: interfacesClient, + } +} + +func (az *azInterfacesClient) CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters network.Interface, cancel <-chan struct{}) (<-chan network.Interface, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, networkInterfaceName) + defer func() { + glog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkInterfaceName) + }() + + return az.client.CreateOrUpdate(resourceGroupName, networkInterfaceName, parameters, cancel) +} + +func (az *azInterfacesClient) Get(resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azInterfacesClient.Get(%q,%q): start", resourceGroupName, networkInterfaceName) + defer func() { + glog.V(10).Infof("azInterfacesClient.Get(%q,%q): end", resourceGroupName, networkInterfaceName) + }() + + return az.client.Get(resourceGroupName, networkInterfaceName, expand) +} + +func (az *azInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): start", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName) + defer func() { + glog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName) + }() + + return az.client.GetVirtualMachineScaleSetNetworkInterface(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, expand) +} + +// azLoadBalancersClient implements LoadBalancersClient. +type azLoadBalancersClient struct { + client network.LoadBalancersClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzLoadBalancersClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azLoadBalancersClient { + loadBalancerClient := network.NewLoadBalancersClient(subscriptionID) + loadBalancerClient.BaseURI = endpoint + loadBalancerClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + loadBalancerClient.PollingDelay = 5 * time.Second + configureUserAgent(&loadBalancerClient.Client) + + return &azLoadBalancersClient{ + rateLimiter: rateLimiter, + client: loadBalancerClient, + } +} + +func (az *azLoadBalancersClient) CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, cancel <-chan struct{}) (<-chan network.LoadBalancer, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): start", resourceGroupName, loadBalancerName) + defer func() { + glog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): end", resourceGroupName, loadBalancerName) + }() + + return az.client.CreateOrUpdate(resourceGroupName, loadBalancerName, parameters, cancel) +} + +func (az *azLoadBalancersClient) Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): start", resourceGroupName, loadBalancerName) + defer func() { + glog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): end", resourceGroupName, loadBalancerName) + }() + + return az.client.Delete(resourceGroupName, loadBalancerName, cancel) +} + +func (az *azLoadBalancersClient) Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): start", resourceGroupName, loadBalancerName) + defer func() { + glog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): end", resourceGroupName, loadBalancerName) + }() + + return az.client.Get(resourceGroupName, loadBalancerName, expand) +} + +func (az *azLoadBalancersClient) List(resourceGroupName string) (result network.LoadBalancerListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azLoadBalancersClient.List(%q): start", resourceGroupName) + defer func() { + glog.V(10).Infof("azLoadBalancersClient.List(%q): end", resourceGroupName) + }() + + return az.client.List(resourceGroupName) +} + +func (az *azLoadBalancersClient) ListNextResults(lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azLoadBalancersClient.ListNextResults(%q): start", lastResult) + defer func() { + glog.V(10).Infof("azLoadBalancersClient.ListNextResults(%q): end", lastResult) + }() + + return az.client.ListNextResults(lastResult) +} + +// azPublicIPAddressesClient implements PublicIPAddressesClient. +type azPublicIPAddressesClient struct { + client network.PublicIPAddressesClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzPublicIPAddressesClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azPublicIPAddressesClient { + publicIPAddressClient := network.NewPublicIPAddressesClient(subscriptionID) + publicIPAddressClient.BaseURI = endpoint + publicIPAddressClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + publicIPAddressClient.PollingDelay = 5 * time.Second + configureUserAgent(&publicIPAddressClient.Client) + + return &azPublicIPAddressesClient{ + rateLimiter: rateLimiter, + client: publicIPAddressClient, + } +} + +func (az *azPublicIPAddressesClient) CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress, cancel <-chan struct{}) (<-chan network.PublicIPAddress, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, publicIPAddressName) + defer func() { + glog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, publicIPAddressName) + }() + + return az.client.CreateOrUpdate(resourceGroupName, publicIPAddressName, parameters, cancel) +} + +func (az *azPublicIPAddressesClient) Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): start", resourceGroupName, publicIPAddressName) + defer func() { + glog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): end", resourceGroupName, publicIPAddressName) + }() + + return az.client.Delete(resourceGroupName, publicIPAddressName, cancel) +} + +func (az *azPublicIPAddressesClient) Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): start", resourceGroupName, publicIPAddressName) + defer func() { + glog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): end", resourceGroupName, publicIPAddressName) + }() + + return az.client.Get(resourceGroupName, publicIPAddressName, expand) +} + +func (az *azPublicIPAddressesClient) List(resourceGroupName string) (result network.PublicIPAddressListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azPublicIPAddressesClient.List(%q): start", resourceGroupName) + defer func() { + glog.V(10).Infof("azPublicIPAddressesClient.List(%q): end", resourceGroupName) + }() + + return az.client.List(resourceGroupName) +} + +func (az *azPublicIPAddressesClient) ListNextResults(lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azPublicIPAddressesClient.ListNextResults(%q): start", lastResults) + defer func() { + glog.V(10).Infof("azPublicIPAddressesClient.ListNextResults(%q): end", lastResults) + }() + + return az.client.ListNextResults(lastResults) +} + +// azSubnetsClient implements SubnetsClient. +type azSubnetsClient struct { + client network.SubnetsClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzSubnetsClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azSubnetsClient { + subnetsClient := network.NewSubnetsClient(subscriptionID) + subnetsClient.BaseURI = endpoint + subnetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + subnetsClient.PollingDelay = 5 * time.Second + configureUserAgent(&subnetsClient.Client) + + return &azSubnetsClient{ + client: subnetsClient, + rateLimiter: rateLimiter, + } +} + +func (az *azSubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet, cancel <-chan struct{}) (<-chan network.Subnet, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName) + defer func() { + glog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) + }() + + return az.client.CreateOrUpdate(resourceGroupName, virtualNetworkName, subnetName, subnetParameters, cancel) +} + +func (az *azSubnetsClient) Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName) + defer func() { + glog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) + }() + + return az.client.Delete(resourceGroupName, virtualNetworkName, subnetName, cancel) +} + +func (az *azSubnetsClient) Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName) + defer func() { + glog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) + }() + + return az.client.Get(resourceGroupName, virtualNetworkName, subnetName, expand) +} + +func (az *azSubnetsClient) List(resourceGroupName string, virtualNetworkName string) (result network.SubnetListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azSubnetsClient.List(%q,%q): start", resourceGroupName, virtualNetworkName) + defer func() { + glog.V(10).Infof("azSubnetsClient.List(%q,%q): end", resourceGroupName, virtualNetworkName) + }() + + return az.client.List(resourceGroupName, virtualNetworkName) +} + +// azSecurityGroupsClient implements SecurityGroupsClient. +type azSecurityGroupsClient struct { + client network.SecurityGroupsClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzSecurityGroupsClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azSecurityGroupsClient { + securityGroupsClient := network.NewSecurityGroupsClient(subscriptionID) + securityGroupsClient.BaseURI = endpoint + securityGroupsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + securityGroupsClient.PollingDelay = 5 * time.Second + configureUserAgent(&securityGroupsClient.Client) + + return &azSecurityGroupsClient{ + rateLimiter: rateLimiter, + client: securityGroupsClient, + } +} + +func (az *azSecurityGroupsClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, cancel <-chan struct{}) (<-chan network.SecurityGroup, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, networkSecurityGroupName) + defer func() { + glog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkSecurityGroupName) + }() + + return az.client.CreateOrUpdate(resourceGroupName, networkSecurityGroupName, parameters, cancel) +} + +func (az *azSecurityGroupsClient) Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): start", resourceGroupName, networkSecurityGroupName) + defer func() { + glog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): end", resourceGroupName, networkSecurityGroupName) + }() + + return az.client.Delete(resourceGroupName, networkSecurityGroupName, cancel) +} + +func (az *azSecurityGroupsClient) Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): start", resourceGroupName, networkSecurityGroupName) + defer func() { + glog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): end", resourceGroupName, networkSecurityGroupName) + }() + + return az.client.Get(resourceGroupName, networkSecurityGroupName, expand) +} + +func (az *azSecurityGroupsClient) List(resourceGroupName string) (result network.SecurityGroupListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azSecurityGroupsClient.List(%q): start", resourceGroupName) + defer func() { + glog.V(10).Infof("azSecurityGroupsClient.List(%q): end", resourceGroupName) + }() + + return az.client.List(resourceGroupName) +} + +// azVirtualMachineScaleSetsClient implements VirtualMachineScaleSetsClient. +type azVirtualMachineScaleSetsClient struct { + client compute.VirtualMachineScaleSetsClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzVirtualMachineScaleSetsClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azVirtualMachineScaleSetsClient { + virtualMachineScaleSetsClient := compute.NewVirtualMachineScaleSetsClient(subscriptionID) + virtualMachineScaleSetsClient.BaseURI = endpoint + virtualMachineScaleSetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + virtualMachineScaleSetsClient.PollingDelay = 5 * time.Second + configureUserAgent(&virtualMachineScaleSetsClient.Client) + + return &azVirtualMachineScaleSetsClient{ + client: virtualMachineScaleSetsClient, + rateLimiter: rateLimiter, + } +} + +func (az *azVirtualMachineScaleSetsClient) CreateOrUpdate(resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet, cancel <-chan struct{}) (<-chan compute.VirtualMachineScaleSet, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, VMScaleSetName) + defer func() { + glog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, VMScaleSetName) + }() + + return az.client.CreateOrUpdate(resourceGroupName, VMScaleSetName, parameters, cancel) +} + +func (az *azVirtualMachineScaleSetsClient) Get(resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): start", resourceGroupName, VMScaleSetName) + defer func() { + glog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): end", resourceGroupName, VMScaleSetName) + }() + + return az.client.Get(resourceGroupName, VMScaleSetName) +} + +func (az *azVirtualMachineScaleSetsClient) List(resourceGroupName string) (result compute.VirtualMachineScaleSetListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q,%q): start", resourceGroupName) + defer func() { + glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q,%q): end", resourceGroupName) + }() + + return az.client.List(resourceGroupName) +} + +func (az *azVirtualMachineScaleSetsClient) ListNextResults(lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachineScaleSetsClient.ListNextResults(%q): start", lastResults) + defer func() { + glog.V(10).Infof("azVirtualMachineScaleSetsClient.ListNextResults(%q): end", lastResults) + }() + + return az.client.ListNextResults(lastResults) +} + +func (az *azVirtualMachineScaleSetsClient) UpdateInstances(resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (<-chan compute.OperationStatusResponse, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%q): start", resourceGroupName, VMScaleSetName, VMInstanceIDs) + defer func() { + glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%q): end", resourceGroupName, VMScaleSetName, VMInstanceIDs) + }() + + return az.client.UpdateInstances(resourceGroupName, VMScaleSetName, VMInstanceIDs, cancel) +} + +// azVirtualMachineScaleSetVMsClient implements VirtualMachineScaleSetVMsClient. +type azVirtualMachineScaleSetVMsClient struct { + client compute.VirtualMachineScaleSetVMsClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzVirtualMachineScaleSetVMsClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azVirtualMachineScaleSetVMsClient { + virtualMachineScaleSetVMsClient := compute.NewVirtualMachineScaleSetVMsClient(subscriptionID) + virtualMachineScaleSetVMsClient.BaseURI = endpoint + virtualMachineScaleSetVMsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + virtualMachineScaleSetVMsClient.PollingDelay = 5 * time.Second + configureUserAgent(&virtualMachineScaleSetVMsClient.Client) + + return &azVirtualMachineScaleSetVMsClient{ + client: virtualMachineScaleSetVMsClient, + rateLimiter: rateLimiter, + } +} + +func (az *azVirtualMachineScaleSetVMsClient) Get(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID) + defer func() { + glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) + }() + + return az.client.Get(resourceGroupName, VMScaleSetName, instanceID) +} + +func (az *azVirtualMachineScaleSetVMsClient) GetInstanceView(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID) + defer func() { + glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) + }() + + return az.client.GetInstanceView(resourceGroupName, VMScaleSetName, instanceID) +} + +func (az *azVirtualMachineScaleSetVMsClient) List(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result compute.VirtualMachineScaleSetVMListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): start", resourceGroupName, virtualMachineScaleSetName, filter) + defer func() { + glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, filter) + }() + + return az.client.List(resourceGroupName, virtualMachineScaleSetName, filter, selectParameter, expand) +} + +func (az *azVirtualMachineScaleSetVMsClient) ListNextResults(lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.ListNextResults(%q,%q,%q): start", lastResults) + defer func() { + glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.ListNextResults(%q,%q,%q): end", lastResults) + }() + + return az.client.ListNextResults(lastResults) +} + +// azRoutesClient implements RoutesClient. +type azRoutesClient struct { + client network.RoutesClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzRoutesClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azRoutesClient { + routesClient := network.NewRoutesClient(subscriptionID) + routesClient.BaseURI = endpoint + routesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + routesClient.PollingDelay = 5 * time.Second + configureUserAgent(&routesClient.Client) + + return &azRoutesClient{ + client: routesClient, + rateLimiter: rateLimiter, + } +} + +func (az *azRoutesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, cancel <-chan struct{}) (<-chan network.Route, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): start", resourceGroupName, routeTableName, routeName) + defer func() { + glog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, routeTableName, routeName) + }() + + return az.client.CreateOrUpdate(resourceGroupName, routeTableName, routeName, routeParameters, cancel) +} + +func (az *azRoutesClient) Delete(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): start", resourceGroupName, routeTableName, routeName) + defer func() { + glog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): end", resourceGroupName, routeTableName, routeName) + }() + + return az.client.Delete(resourceGroupName, routeTableName, routeName, cancel) +} + +// azRouteTablesClient implements RouteTablesClient. +type azRouteTablesClient struct { + client network.RouteTablesClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzRouteTablesClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azRouteTablesClient { + routeTablesClient := network.NewRouteTablesClient(subscriptionID) + routeTablesClient.BaseURI = endpoint + routeTablesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + routeTablesClient.PollingDelay = 5 * time.Second + configureUserAgent(&routeTablesClient.Client) + + return &azRouteTablesClient{ + client: routeTablesClient, + rateLimiter: rateLimiter, + } +} + +func (az *azRouteTablesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, parameters network.RouteTable, cancel <-chan struct{}) (<-chan network.RouteTable, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, routeTableName) + defer func() { + glog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, routeTableName) + }() + + return az.client.CreateOrUpdate(resourceGroupName, routeTableName, parameters, cancel) +} + +func (az *azRouteTablesClient) Get(resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azRouteTablesClient.Get(%q,%q): start", resourceGroupName, routeTableName) + defer func() { + glog.V(10).Infof("azRouteTablesClient.Get(%q,%q): end", resourceGroupName, routeTableName) + }() + + return az.client.Get(resourceGroupName, routeTableName, expand) +} + +// azStorageAccountClient implements StorageAccountClient. +type azStorageAccountClient struct { + client storage.AccountsClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzStorageAccountClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azStorageAccountClient { + storageAccountClient := storage.NewAccountsClientWithBaseURI(endpoint, subscriptionID) + storageAccountClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + storageAccountClient.PollingDelay = 5 * time.Second + configureUserAgent(&storageAccountClient.Client) + + return &azStorageAccountClient{ + client: storageAccountClient, + rateLimiter: rateLimiter, + } +} + +func (az *azStorageAccountClient) Create(resourceGroupName string, accountName string, parameters storage.AccountCreateParameters, cancel <-chan struct{}) (<-chan storage.Account, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azStorageAccountClient.Create(%q,%q): start", resourceGroupName, accountName) + defer func() { + glog.V(10).Infof("azStorageAccountClient.Create(%q,%q): end", resourceGroupName, accountName) + }() + + return az.client.Create(resourceGroupName, accountName, parameters, cancel) +} + +func (az *azStorageAccountClient) Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): start", resourceGroupName, accountName) + defer func() { + glog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): end", resourceGroupName, accountName) + }() + + return az.client.Delete(resourceGroupName, accountName) +} + +func (az *azStorageAccountClient) ListKeys(resourceGroupName string, accountName string) (result storage.AccountListKeysResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): start", resourceGroupName, accountName) + defer func() { + glog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): end", resourceGroupName, accountName) + }() + + return az.client.ListKeys(resourceGroupName, accountName) +} + +func (az *azStorageAccountClient) ListByResourceGroup(resourceGroupName string) (result storage.AccountListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): start", resourceGroupName) + defer func() { + glog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): end", resourceGroupName) + }() + + return az.client.ListByResourceGroup(resourceGroupName) +} + +func (az *azStorageAccountClient) GetProperties(resourceGroupName string, accountName string) (result storage.Account, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): start", resourceGroupName, accountName) + defer func() { + glog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): end", resourceGroupName, accountName) + }() + + return az.client.GetProperties(resourceGroupName, accountName) +} + +// azDisksClient implements DisksClient. +type azDisksClient struct { + client disk.DisksClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzDisksClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azDisksClient { + disksClient := disk.NewDisksClientWithBaseURI(endpoint, subscriptionID) + disksClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + disksClient.PollingDelay = 5 * time.Second + configureUserAgent(&disksClient.Client) + + return &azDisksClient{ + client: disksClient, + rateLimiter: rateLimiter, + } +} + +func (az *azDisksClient) CreateOrUpdate(resourceGroupName string, diskName string, diskParameter disk.Model, cancel <-chan struct{}) (<-chan disk.Model, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): start", resourceGroupName, diskName) + defer func() { + glog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): end", resourceGroupName, diskName) + }() + + return az.client.CreateOrUpdate(resourceGroupName, diskName, diskParameter, cancel) +} + +func (az *azDisksClient) Delete(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan disk.OperationStatusResponse, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azDisksClient.Delete(%q,%q): start", resourceGroupName, diskName) + defer func() { + glog.V(10).Infof("azDisksClient.Delete(%q,%q): end", resourceGroupName, diskName) + }() + + return az.client.Delete(resourceGroupName, diskName, cancel) +} + +func (az *azDisksClient) Get(resourceGroupName string, diskName string) (result disk.Model, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azDisksClient.Get(%q,%q): start", resourceGroupName, diskName) + defer func() { + glog.V(10).Infof("azDisksClient.Get(%q,%q): end", resourceGroupName, diskName) + }() + + return az.client.Get(resourceGroupName, diskName) +} From 2423e7c52b48e0a3c2fb85cc0090a883ec20df0a Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 10 Jan 2018 14:02:08 +0800 Subject: [PATCH 089/264] Clean up azure rateLimiter and verbose logs --- pkg/cloudprovider/providers/azure/BUILD | 3 +- .../providers/azure/azure_backoff.go | 41 ------------------- .../providers/azure/azure_controllerCommon.go | 2 - .../providers/azure/azure_loadbalancer.go | 14 +------ .../providers/azure/azure_routes.go | 6 --- .../providers/azure/azure_storageaccount.go | 8 ---- .../providers/azure/azure_test.go | 2 - .../providers/azure/azure_util.go | 7 ---- .../providers/azure/azure_util_vmss.go | 33 --------------- .../providers/azure/azure_wrap.go | 28 +------------ 10 files changed, 5 insertions(+), 139 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/BUILD b/pkg/cloudprovider/providers/azure/BUILD index fe131367e0c..d796860b662 100644 --- a/pkg/cloudprovider/providers/azure/BUILD +++ b/pkg/cloudprovider/providers/azure/BUILD @@ -12,6 +12,7 @@ go_library( "azure.go", "azure_backoff.go", "azure_blobDiskController.go", + "azure_client.go", "azure_controllerCommon.go", "azure_fakes.go", "azure_file.go", @@ -42,6 +43,7 @@ go_library( "//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/storage:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", "//vendor/github.com/ghodss/yaml:go_default_library", @@ -80,7 +82,6 @@ go_test( "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", ], ) diff --git a/pkg/cloudprovider/providers/azure/azure_backoff.go b/pkg/cloudprovider/providers/azure/azure_backoff.go index 099fea81fe1..ff0e16bfd7d 100644 --- a/pkg/cloudprovider/providers/azure/azure_backoff.go +++ b/pkg/cloudprovider/providers/azure/azure_backoff.go @@ -67,10 +67,7 @@ func (az *Cloud) VirtualMachineClientListWithRetry() ([]compute.VirtualMachine, var result compute.VirtualMachineListResult err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachinesClient.List(%v): start", az.ResourceGroup) result, retryErr = az.VirtualMachinesClient.List(az.ResourceGroup) - glog.V(10).Infof("VirtualMachinesClient.List(%v): end", az.ResourceGroup) if retryErr != nil { glog.Errorf("VirtualMachinesClient.List(%v) - backoff: failure, will retry,err=%v", az.ResourceGroup, @@ -92,10 +89,7 @@ func (az *Cloud) VirtualMachineClientListWithRetry() ([]compute.VirtualMachine, if result.NextLink != nil { err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachinesClient.ListNextResults(%v): start", az.ResourceGroup) result, retryErr = az.VirtualMachinesClient.ListNextResults(result) - glog.V(10).Infof("VirtualMachinesClient.ListNextResults(%v): end", az.ResourceGroup) if retryErr != nil { glog.Errorf("VirtualMachinesClient.ListNextResults(%v) - backoff: failure, will retry,err=%v", az.ResourceGroup, retryErr) @@ -133,8 +127,6 @@ func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, error) { // CreateOrUpdateSGWithRetry invokes az.SecurityGroupsClient.CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): start", *sg.Name) respChan, errChan := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *sg.Name, sg, nil) resp := <-respChan err := <-errChan @@ -146,8 +138,6 @@ func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error { // CreateOrUpdateLBWithRetry invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): start", *lb.Name) respChan, errChan := az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil) resp := <-respChan err := <-errChan @@ -163,10 +153,7 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) { err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("LoadBalancerClient.List(%v): start", az.ResourceGroup) result, retryErr = az.LoadBalancerClient.List(az.ResourceGroup) - glog.V(10).Infof("LoadBalancerClient.List(%v): end", az.ResourceGroup) if retryErr != nil { glog.Errorf("LoadBalancerClient.List(%v) - backoff: failure, will retry,err=%v", az.ResourceGroup, @@ -189,10 +176,7 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) { if result.NextLink != nil { err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("LoadBalancerClient.ListNextResults(%v): start", az.ResourceGroup) result, retryErr = az.LoadBalancerClient.ListNextResults(result) - glog.V(10).Infof("LoadBalancerClient.ListNextResults(%v): end", az.ResourceGroup) if retryErr != nil { glog.Errorf("LoadBalancerClient.ListNextResults(%v) - backoff: failure, will retry,err=%v", az.ResourceGroup, @@ -218,10 +202,7 @@ func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAd var result network.PublicIPAddressListResult err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("PublicIPAddressesClient.List(%v): start", pipResourceGroup) result, retryErr = az.PublicIPAddressesClient.List(pipResourceGroup) - glog.V(10).Infof("PublicIPAddressesClient.List(%v): end", pipResourceGroup) if retryErr != nil { glog.Errorf("PublicIPAddressesClient.List(%v) - backoff: failure, will retry,err=%v", pipResourceGroup, @@ -244,10 +225,7 @@ func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAd if result.NextLink != nil { err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("PublicIPAddressesClient.ListNextResults(%v): start", pipResourceGroup) result, retryErr = az.PublicIPAddressesClient.ListNextResults(result) - glog.V(10).Infof("PublicIPAddressesClient.ListNextResults(%v): end", pipResourceGroup) if retryErr != nil { glog.Errorf("PublicIPAddressesClient.ListNextResults(%v) - backoff: failure, will retry,err=%v", pipResourceGroup, @@ -270,8 +248,6 @@ func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAd // CreateOrUpdatePIPWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdatePIPWithRetry(pipResourceGroup string, pip network.PublicIPAddress) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): start", pipResourceGroup, *pip.Name) respChan, errChan := az.PublicIPAddressesClient.CreateOrUpdate(pipResourceGroup, *pip.Name, pip, nil) resp := <-respChan err := <-errChan @@ -283,8 +259,6 @@ func (az *Cloud) CreateOrUpdatePIPWithRetry(pipResourceGroup string, pip network // CreateOrUpdateInterfaceWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): start", *nic.Name) respChan, errChan := az.InterfacesClient.CreateOrUpdate(az.ResourceGroup, *nic.Name, nic, nil) resp := <-respChan err := <-errChan @@ -296,12 +270,9 @@ func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error { // DeletePublicIPWithRetry invokes az.PublicIPAddressesClient.Delete with exponential backoff retry func (az *Cloud) DeletePublicIPWithRetry(pipResourceGroup string, pipName string) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("PublicIPAddressesClient.Delete(%s, %s): start", pipResourceGroup, pipName) respChan, errChan := az.PublicIPAddressesClient.Delete(pipResourceGroup, pipName, nil) resp := <-respChan err := <-errChan - glog.V(10).Infof("PublicIPAddressesClient.Delete(%s, %s): end", pipResourceGroup, pipName) return processRetryResponse(resp, err) }) } @@ -309,12 +280,9 @@ func (az *Cloud) DeletePublicIPWithRetry(pipResourceGroup string, pipName string // DeleteLBWithRetry invokes az.LoadBalancerClient.Delete with exponential backoff retry func (az *Cloud) DeleteLBWithRetry(lbName string) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("LoadBalancerClient.Delete(%s): start", lbName) respChan, errChan := az.LoadBalancerClient.Delete(az.ResourceGroup, lbName, nil) resp := <-respChan err := <-errChan - glog.V(10).Infof("LoadBalancerClient.Delete(%s): end", lbName) return processRetryResponse(resp, err) }) } @@ -322,12 +290,9 @@ func (az *Cloud) DeleteLBWithRetry(lbName string) error { // CreateOrUpdateRouteTableWithRetry invokes az.RouteTablesClient.CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdateRouteTableWithRetry(routeTable network.RouteTable) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%s): start", *routeTable.Name) respChan, errChan := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil) resp := <-respChan err := <-errChan - glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%s): end", *routeTable.Name) return processRetryResponse(resp.Response, err) }) } @@ -335,8 +300,6 @@ func (az *Cloud) CreateOrUpdateRouteTableWithRetry(routeTable network.RouteTable // CreateOrUpdateRouteWithRetry invokes az.RoutesClient.CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): start", *route.Name) respChan, errChan := az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil) resp := <-respChan err := <-errChan @@ -348,8 +311,6 @@ func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error { // DeleteRouteWithRetry invokes az.RoutesClient.Delete with exponential backoff retry func (az *Cloud) DeleteRouteWithRetry(routeName string) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("RoutesClient.Delete(%s): start", az.RouteTableName) respChan, errChan := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil) resp := <-respChan err := <-errChan @@ -361,8 +322,6 @@ func (az *Cloud) DeleteRouteWithRetry(routeName string) error { // CreateOrUpdateVMWithRetry invokes az.VirtualMachinesClient.CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdateVMWithRetry(vmName string, newVM compute.VirtualMachine) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): start", vmName) respChan, errChan := az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil) resp := <-respChan err := <-errChan diff --git a/pkg/cloudprovider/providers/azure/azure_controllerCommon.go b/pkg/cloudprovider/providers/azure/azure_controllerCommon.go index ea32f3f477f..ad40f3c5b85 100644 --- a/pkg/cloudprovider/providers/azure/azure_controllerCommon.go +++ b/pkg/cloudprovider/providers/azure/azure_controllerCommon.go @@ -110,7 +110,6 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri } vmName := mapNodeNameToVMName(nodeName) glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", c.resourceGroup, vmName) - c.cloud.operationPollRateLimiter.Accept() respChan, errChan := c.cloud.VirtualMachinesClient.CreateOrUpdate(c.resourceGroup, vmName, newVM, nil) resp := <-respChan err = <-errChan @@ -176,7 +175,6 @@ func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName t } vmName := mapNodeNameToVMName(nodeName) glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", c.resourceGroup, vmName) - c.cloud.operationPollRateLimiter.Accept() respChan, errChan := c.cloud.VirtualMachinesClient.CreateOrUpdate(c.resourceGroup, vmName, newVM, nil) resp := <-respChan err = <-errChan diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index d6f4bdfac38..cc64f80826b 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -415,7 +415,6 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai } pip.Tags = &map[string]*string{"service": &serviceName} glog.V(3).Infof("ensure(%s): pip(%s) - creating", serviceName, *pip.Name) - az.operationPollRateLimiter.Accept() glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): start", pipResourceGroup, *pip.Name) err = az.CreateOrUpdatePIPWithRetry(pipResourceGroup, pip) if err != nil { @@ -424,10 +423,7 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai } glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): end", pipResourceGroup, *pip.Name) - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("PublicIPAddressesClient.Get(%s, %q): start", pipResourceGroup, *pip.Name) pip, err = az.PublicIPAddressesClient.Get(pipResourceGroup, *pip.Name, "") - glog.V(10).Infof("PublicIPAddressesClient.Get(%s, %q): end", pipResourceGroup, *pip.Name) if err != nil { return nil, err } @@ -762,14 +758,13 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): end", lbBackendPoolID, vmSetName) // Remove the LB. - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("LoadBalancerClient.Delete(%q): start", lbName) + glog.V(10).Infof("az.DeleteLBWithRetry(%q): start", lbName) err = az.DeleteLBWithRetry(lbName) if err != nil { glog.V(2).Infof("delete(%s) abort backoff: lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName) return nil, err } - glog.V(10).Infof("LoadBalancerClient.Delete(%q): end", lbName) + glog.V(10).Infof("az.DeleteLBWithRetry(%q): end", lbName) } else { glog.V(3).Infof("ensure(%s): lb(%s) - updating", serviceName, lbName) err := az.CreateOrUpdateLBWithRetry(*lb) @@ -808,10 +803,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, ports = []v1.ServicePort{} } - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("SecurityGroupsClient.Get(%q): start", az.SecurityGroupName) sg, err := az.SecurityGroupsClient.Get(az.ResourceGroup, az.SecurityGroupName, "") - glog.V(10).Infof("SecurityGroupsClient.Get(%q): end", az.SecurityGroupName) if err != nil { return nil, err } @@ -980,7 +972,6 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, if dirtySg { sg.SecurityRules = &updatedRules glog.V(3).Infof("ensure(%s): sg(%s) - updating", serviceName, *sg.Name) - az.operationPollRateLimiter.Accept() glog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): start", *sg.Name) err := az.CreateOrUpdateSGWithRetry(sg) if err != nil { @@ -1169,7 +1160,6 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want // Public ip resource with match service tag } else { glog.V(2).Infof("ensure(%s): pip(%s) - deleting", serviceName, pipName) - az.operationPollRateLimiter.Accept() glog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): start", pipResourceGroup, pipName) err = az.DeletePublicIPWithRetry(pipResourceGroup, pipName) if err != nil { diff --git a/pkg/cloudprovider/providers/azure/azure_routes.go b/pkg/cloudprovider/providers/azure/azure_routes.go index 60c5049b052..eef61003ff8 100644 --- a/pkg/cloudprovider/providers/azure/azure_routes.go +++ b/pkg/cloudprovider/providers/azure/azure_routes.go @@ -77,8 +77,6 @@ func (az *Cloud) CreateRoute(clusterName string, nameHint string, kubeRoute *clo } glog.V(3).Infof("create: creating routetable. routeTableName=%q", az.RouteTableName) - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): start", az.RouteTableName) respChan, errChan := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil) resp := <-respChan err := <-errChan @@ -119,8 +117,6 @@ func (az *Cloud) CreateRoute(clusterName string, nameHint string, kubeRoute *clo } glog.V(3).Infof("create: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("RoutesClient.CreateOrUpdate(%q): start", az.RouteTableName) respChan, errChan := az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil) resp := <-respChan err = <-errChan @@ -147,8 +143,6 @@ func (az *Cloud) DeleteRoute(clusterName string, kubeRoute *cloudprovider.Route) glog.V(2).Infof("delete: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) routeName := mapNodeNameToRouteName(kubeRoute.TargetNode) - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("RoutesClient.Delete(%q): start", az.RouteTableName) respChan, errChan := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil) resp := <-respChan err := <-errChan diff --git a/pkg/cloudprovider/providers/azure/azure_storageaccount.go b/pkg/cloudprovider/providers/azure/azure_storageaccount.go index ad69c0a532f..4d33fb21e66 100644 --- a/pkg/cloudprovider/providers/azure/azure_storageaccount.go +++ b/pkg/cloudprovider/providers/azure/azure_storageaccount.go @@ -19,8 +19,6 @@ package azure import ( "fmt" "strings" - - "github.com/golang/glog" ) type accountWithLocation struct { @@ -29,10 +27,7 @@ type accountWithLocation struct { // getStorageAccounts gets the storage accounts' name, type, location in a resource group func (az *Cloud) getStorageAccounts() ([]accountWithLocation, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("StorageAccountClient.ListByResourceGroup(%v): start", az.ResourceGroup) result, err := az.StorageAccountClient.ListByResourceGroup(az.ResourceGroup) - glog.V(10).Infof("StorageAccountClient.ListByResourceGroup(%v): end", az.ResourceGroup) if err != nil { return nil, err } @@ -61,10 +56,7 @@ func (az *Cloud) getStorageAccounts() ([]accountWithLocation, error) { // getStorageAccesskey gets the storage account access key func (az *Cloud) getStorageAccesskey(account string) (string, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("StorageAccountClient.ListKeys(%q): start", account) result, err := az.StorageAccountClient.ListKeys(az.ResourceGroup, account) - glog.V(10).Infof("StorageAccountClient.ListKeys(%q): end", account) if err != nil { return "", err } diff --git a/pkg/cloudprovider/providers/azure/azure_test.go b/pkg/cloudprovider/providers/azure/azure_test.go index 47181bb0d3f..40a1153680b 100644 --- a/pkg/cloudprovider/providers/azure/azure_test.go +++ b/pkg/cloudprovider/providers/azure/azure_test.go @@ -25,7 +25,6 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/flowcontrol" serviceapi "k8s.io/kubernetes/pkg/api/v1/service" "k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" @@ -862,7 +861,6 @@ func getTestCloud() (az *Cloud) { MaximumLoadBalancerRuleCount: 250, }, } - az.operationPollRateLimiter = flowcontrol.NewTokenBucketRateLimiter(100, 100) az.LoadBalancerClient = newFakeAzureLBClient() az.PublicIPAddressesClient = newFakeAzurePIPClient(az.Config.SubscriptionID) az.SubnetsClient = newFakeAzureSubnetsClient() diff --git a/pkg/cloudprovider/providers/azure/azure_util.go b/pkg/cloudprovider/providers/azure/azure_util.go index c7fcef10ebd..7a42ab52cd6 100644 --- a/pkg/cloudprovider/providers/azure/azure_util.go +++ b/pkg/cloudprovider/providers/azure/azure_util.go @@ -389,7 +389,6 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error) var machine compute.VirtualMachine var err error - as.operationPollRateLimiter.Accept() machine, err = as.getVirtualMachine(types.NodeName(name)) if err != nil { if as.CloudProviderBackoff { @@ -563,7 +562,6 @@ func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) func (as *availabilitySet) GetPrimaryInterface(nodeName, vmSetName string) (network.Interface, error) { var machine compute.VirtualMachine - as.operationPollRateLimiter.Accept() machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName)) if err != nil { glog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName) @@ -589,10 +587,7 @@ func (as *availabilitySet) GetPrimaryInterface(nodeName, vmSetName string) (netw } } - as.operationPollRateLimiter.Accept() - glog.V(10).Infof("InterfacesClient.Get(%q): start", nicName) nic, err := as.InterfacesClient.Get(as.ResourceGroup, nicName, "") - glog.V(10).Infof("InterfacesClient.Get(%q): end", nicName) if err != nil { return network.Interface{}, err } @@ -642,8 +637,6 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N nicName := *nic.Name glog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName) - as.operationPollRateLimiter.Accept() - glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): start", *nic.Name) respChan, errChan := as.InterfacesClient.CreateOrUpdate(as.ResourceGroup, *nic.Name, nic, nil) resp := <-respChan err := <-errChan diff --git a/pkg/cloudprovider/providers/azure/azure_util_vmss.go b/pkg/cloudprovider/providers/azure/azure_util_vmss.go index c9b827c308e..2116e4f0dc7 100644 --- a/pkg/cloudprovider/providers/azure/azure_util_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_util_vmss.go @@ -401,10 +401,7 @@ func (ss *scaleSet) listScaleSetsWithRetry() ([]string, error) { allScaleSets := make([]string, 0) backoffError := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetsClient.List start for %v", ss.ResourceGroup) result, err = ss.VirtualMachineScaleSetsClient.List(ss.ResourceGroup) - glog.V(10).Infof("VirtualMachineScaleSetsClient.List end for %v", ss.ResourceGroup) if err != nil { glog.Errorf("VirtualMachineScaleSetsClient.List for %v failed: %v", ss.ResourceGroup, err) return false, err @@ -425,10 +422,7 @@ func (ss *scaleSet) listScaleSetsWithRetry() ([]string, error) { if result.NextLink != nil { backoffError := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetsClient.ListNextResults start for %v", ss.ResourceGroup) result, err = ss.VirtualMachineScaleSetsClient.ListNextResults(result) - glog.V(10).Infof("VirtualMachineScaleSetsClient.ListNextResults end for %v", ss.ResourceGroup) if err != nil { glog.Errorf("VirtualMachineScaleSetsClient.ListNextResults for %v failed: %v", ss.ResourceGroup, err) return false, err @@ -455,10 +449,7 @@ func (ss *scaleSet) listScaleSetVMsWithRetry(scaleSetName string) ([]compute.Vir allVMs := make([]compute.VirtualMachineScaleSetVM, 0) backoffError := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetVMsClient.List start for %v", scaleSetName) result, err = ss.VirtualMachineScaleSetVMsClient.List(ss.ResourceGroup, scaleSetName, "", "", string(compute.InstanceView)) - glog.V(10).Infof("VirtualMachineScaleSetVMsClient.List end for %v", scaleSetName) if err != nil { glog.Errorf("VirtualMachineScaleSetVMsClient.List for %v failed: %v", scaleSetName, err) return false, err @@ -477,10 +468,7 @@ func (ss *scaleSet) listScaleSetVMsWithRetry(scaleSetName string) ([]compute.Vir if result.NextLink != nil { backoffError := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetVMsClient.ListNextResults start for %v", scaleSetName) result, err = ss.VirtualMachineScaleSetVMsClient.ListNextResults(result) - glog.V(10).Infof("VirtualMachineScaleSetVMsClient.ListNextResults end for %v", ss.ResourceGroup) if err != nil { glog.Errorf("VirtualMachineScaleSetVMsClient.ListNextResults for %v failed: %v", scaleSetName, err) return false, err @@ -622,10 +610,7 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName, vmSetName string) (network.Int return network.Interface{}, err } - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("InterfacesClient.Get(%q): start", nicName) nic, err := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ss.ResourceGroup, vm.ScaleSetName, vm.InstanceID, nicName, "") - glog.V(10).Infof("InterfacesClient.Get(%q): end", nicName) if err != nil { glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, ss.ResourceGroup, vm.ScaleSetName, nicName, err) return network.Interface{}, err @@ -642,11 +627,7 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName, vmSetName string) (network.Int // getScaleSet gets a scale set by name. func (ss *scaleSet) getScaleSet(name string) (compute.VirtualMachineScaleSet, bool, error) { - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetsClient.Get(%s): start", name) result, err := ss.VirtualMachineScaleSetsClient.Get(ss.ResourceGroup, name) - glog.V(10).Infof("VirtualMachineScaleSetsClient.Get(%s): end", name) - exists, realErr := checkResourceExistsFromError(err) if realErr != nil { return result, false, realErr @@ -714,8 +695,6 @@ func (ss *scaleSet) getPrimaryIPConfigForScaleSet(config *compute.VirtualMachine // createOrUpdateVMSSWithRetry invokes ss.VirtualMachineScaleSetsClient.CreateOrUpdate with exponential backoff retry. func (ss *scaleSet) createOrUpdateVMSSWithRetry(virtualMachineScaleSet compute.VirtualMachineScaleSet) error { return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%s): start", *virtualMachineScaleSet.Name) respChan, errChan := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ss.ResourceGroup, *virtualMachineScaleSet.Name, virtualMachineScaleSet, nil) resp := <-respChan err := <-errChan @@ -727,8 +706,6 @@ func (ss *scaleSet) createOrUpdateVMSSWithRetry(virtualMachineScaleSet compute.V // updateVMSSInstancesWithRetry invokes ss.VirtualMachineScaleSetsClient.UpdateInstances with exponential backoff retry. func (ss *scaleSet) updateVMSSInstancesWithRetry(scaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) error { return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%s): start", scaleSetName) respChan, errChan := ss.VirtualMachineScaleSetsClient.UpdateInstances(ss.ResourceGroup, scaleSetName, vmInstanceIDs, nil) resp := <-respChan err := <-errChan @@ -784,8 +761,6 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating", serviceName, vmSetName) - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): start", vmSetName) respChan, errChan := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ss.ResourceGroup, vmSetName, virtualMachineScaleSet, nil) resp := <-respChan err := <-errChan @@ -829,8 +804,6 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back vmInstanceIDs := compute.VirtualMachineScaleSetVMInstanceRequiredIDs{ InstanceIds: &instanceIDs, } - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): start", vmSetName) respChan, errChan := ss.VirtualMachineScaleSetsClient.UpdateInstances(ss.ResourceGroup, vmSetName, vmInstanceIDs, nil) resp := <-respChan err = <-errChan @@ -898,8 +871,6 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error { // Update scale set with backoff. primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating", vmSetName) - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): start", vmSetName) respChan, errChan := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ss.ResourceGroup, vmSetName, virtualMachineScaleSet, nil) resp := <-respChan err = <-errChan @@ -921,8 +892,6 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error { vmInstanceIDs := compute.VirtualMachineScaleSetVMInstanceRequiredIDs{ InstanceIds: &instanceIDs, } - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): start", vmSetName) updateRespChan, errChan := ss.VirtualMachineScaleSetsClient.UpdateInstances(ss.ResourceGroup, vmSetName, vmInstanceIDs, nil) updateResp := <-updateRespChan err = <-errChan @@ -943,8 +912,6 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error { // TODO: remove this workaround when figuring out the root cause. if len(newBackendPools) == 0 { glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating second time", vmSetName) - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): start", vmSetName) respChan, errChan = ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ss.ResourceGroup, vmSetName, virtualMachineScaleSet, nil) resp = <-respChan err = <-errChan diff --git a/pkg/cloudprovider/providers/azure/azure_wrap.go b/pkg/cloudprovider/providers/azure/azure_wrap.go index 85b67c456b0..9db1a457b5a 100644 --- a/pkg/cloudprovider/providers/azure/azure_wrap.go +++ b/pkg/cloudprovider/providers/azure/azure_wrap.go @@ -96,11 +96,7 @@ func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualM // case we do get instance view every time to fulfill the azure_zones requirement without hitting // throttling. // Consider adding separate parameter for controlling 'InstanceView' once node update issue #56276 is fixed - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachinesClient.Get(%s): start", vmName) vm, err = az.VirtualMachinesClient.Get(az.ResourceGroup, vmName, compute.InstanceView) - glog.V(10).Infof("VirtualMachinesClient.Get(%s): end", vmName) - exists, realErr := checkResourceExistsFromError(err) if realErr != nil { return vm, realErr @@ -122,11 +118,7 @@ func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualM func (az *Cloud) getRouteTable() (routeTable network.RouteTable, exists bool, err error) { var realErr error - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("RouteTablesClient.Get(%s): start", az.RouteTableName) routeTable, err = az.RouteTablesClient.Get(az.ResourceGroup, az.RouteTableName, "") - glog.V(10).Infof("RouteTablesClient.Get(%s): end", az.RouteTableName) - exists, realErr = checkResourceExistsFromError(err) if realErr != nil { return routeTable, false, realErr @@ -142,11 +134,7 @@ func (az *Cloud) getRouteTable() (routeTable network.RouteTable, exists bool, er func (az *Cloud) getSecurityGroup() (sg network.SecurityGroup, exists bool, err error) { var realErr error - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("SecurityGroupsClient.Get(%s): start", az.SecurityGroupName) sg, err = az.SecurityGroupsClient.Get(az.ResourceGroup, az.SecurityGroupName, "") - glog.V(10).Infof("SecurityGroupsClient.Get(%s): end", az.SecurityGroupName) - exists, realErr = checkResourceExistsFromError(err) if realErr != nil { return sg, false, realErr @@ -161,11 +149,8 @@ func (az *Cloud) getSecurityGroup() (sg network.SecurityGroup, exists bool, err func (az *Cloud) getAzureLoadBalancer(name string) (lb network.LoadBalancer, exists bool, err error) { var realErr error - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("LoadBalancerClient.Get(%s): start", name) - lb, err = az.LoadBalancerClient.Get(az.ResourceGroup, name, "") - glog.V(10).Infof("LoadBalancerClient.Get(%s): end", name) + lb, err = az.LoadBalancerClient.Get(az.ResourceGroup, name, "") exists, realErr = checkResourceExistsFromError(err) if realErr != nil { return lb, false, realErr @@ -181,10 +166,7 @@ func (az *Cloud) getAzureLoadBalancer(name string) (lb network.LoadBalancer, exi func (az *Cloud) listLoadBalancers() (lbListResult network.LoadBalancerListResult, exists bool, err error) { var realErr error - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("LoadBalancerClient.List(%s): start", az.ResourceGroup) lbListResult, err = az.LoadBalancerClient.List(az.ResourceGroup) - glog.V(10).Infof("LoadBalancerClient.List(%s): end", az.ResourceGroup) exists, realErr = checkResourceExistsFromError(err) if realErr != nil { return lbListResult, false, realErr @@ -204,11 +186,7 @@ func (az *Cloud) getPublicIPAddress(pipResourceGroup string, pipName string) (pi } var realErr error - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("PublicIPAddressesClient.Get(%s, %s): start", resourceGroup, pipName) pip, err = az.PublicIPAddressesClient.Get(resourceGroup, pipName, "") - glog.V(10).Infof("PublicIPAddressesClient.Get(%s, %s): end", resourceGroup, pipName) - exists, realErr = checkResourceExistsFromError(err) if realErr != nil { return pip, false, realErr @@ -231,11 +209,7 @@ func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet rg = az.ResourceGroup } - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("SubnetsClient.Get(%s): start", subnetName) subnet, err = az.SubnetsClient.Get(rg, virtualNetworkName, subnetName, "") - glog.V(10).Infof("SubnetsClient.Get(%s): end", subnetName) - exists, realErr = checkResourceExistsFromError(err) if realErr != nil { return subnet, false, realErr From 3cde2613fff06d1b35739831a1cee62640d80d0e Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Tue, 9 Jan 2018 23:09:08 -0800 Subject: [PATCH 090/264] Fix lint and bazel --- hack/update-cloudprovider-gce.sh | 2 ++ hack/verify-cloudprovider-gce.sh | 1 - pkg/cloudprovider/providers/gce/cloud/gen/BUILD | 5 +---- pkg/cloudprovider/providers/gce/cloud/gen/main.go | 2 +- 4 files changed, 4 insertions(+), 6 deletions(-) diff --git a/hack/update-cloudprovider-gce.sh b/hack/update-cloudprovider-gce.sh index b7d606c95b4..90b8659c563 100755 --- a/hack/update-cloudprovider-gce.sh +++ b/hack/update-cloudprovider-gce.sh @@ -1,4 +1,5 @@ #!/bin/bash + # Copyright 2018 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + set -o errexit set -o nounset set -o pipefail diff --git a/hack/verify-cloudprovider-gce.sh b/hack/verify-cloudprovider-gce.sh index c7615d36592..1aae5aae42a 100755 --- a/hack/verify-cloudprovider-gce.sh +++ b/hack/verify-cloudprovider-gce.sh @@ -1,5 +1,4 @@ #!/bin/bash -#!/bin/bash # Copyright 2018 The Kubernetes Authors. # diff --git a/pkg/cloudprovider/providers/gce/cloud/gen/BUILD b/pkg/cloudprovider/providers/gce/cloud/gen/BUILD index e196daf2ac8..a3591435fe4 100644 --- a/pkg/cloudprovider/providers/gce/cloud/gen/BUILD +++ b/pkg/cloudprovider/providers/gce/cloud/gen/BUILD @@ -5,10 +5,7 @@ go_library( srcs = ["main.go"], importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen", visibility = ["//visibility:private"], - deps = [ - "//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - ], + deps = ["//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library"], ) go_binary( diff --git a/pkg/cloudprovider/providers/gce/cloud/gen/main.go b/pkg/cloudprovider/providers/gce/cloud/gen/main.go index ba0dd9cc2f0..d6e16f16ec4 100644 --- a/pkg/cloudprovider/providers/gce/cloud/gen/main.go +++ b/pkg/cloudprovider/providers/gce/cloud/gen/main.go @@ -1113,7 +1113,7 @@ func Test{{.Service}}Group(t *testing.T) { tmpl := template.Must(template.New("unittest").Parse(text)) // Sort keys so the output will be stable. var keys []string - for k, _ := range meta.AllServicesByGroup { + for k := range meta.AllServicesByGroup { keys = append(keys, k) } sort.Strings(keys) From e8005face7a48c8e380adfbdc24caae38762190b Mon Sep 17 00:00:00 2001 From: tianshapjq Date: Wed, 10 Jan 2018 15:30:53 +0800 Subject: [PATCH 091/264] typo of errUnsuportedVersion --- pkg/kubelet/cm/deviceplugin/manager.go | 2 +- pkg/kubelet/cm/deviceplugin/types.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/kubelet/cm/deviceplugin/manager.go b/pkg/kubelet/cm/deviceplugin/manager.go index 646dd658793..b2c09749ba9 100644 --- a/pkg/kubelet/cm/deviceplugin/manager.go +++ b/pkg/kubelet/cm/deviceplugin/manager.go @@ -279,7 +279,7 @@ func (m *ManagerImpl) Register(ctx context.Context, r *pluginapi.RegisterRequest glog.Infof("Got registration request from device plugin with resource name %q", r.ResourceName) metrics.DevicePluginRegistrationCount.WithLabelValues(r.ResourceName).Inc() if r.Version != pluginapi.Version { - errorString := fmt.Sprintf(errUnsuportedVersion, r.Version, pluginapi.Version) + errorString := fmt.Sprintf(errUnsupportedVersion, r.Version, pluginapi.Version) glog.Infof("Bad registration request from device plugin with resource name %q: %v", r.ResourceName, errorString) return &pluginapi.Empty{}, fmt.Errorf(errorString) } diff --git a/pkg/kubelet/cm/deviceplugin/types.go b/pkg/kubelet/cm/deviceplugin/types.go index 3c6b30206f1..78cf4e41a4d 100644 --- a/pkg/kubelet/cm/deviceplugin/types.go +++ b/pkg/kubelet/cm/deviceplugin/types.go @@ -73,9 +73,9 @@ const ( // errFailedToDialDevicePlugin is the error raised when the device plugin could not be // reached on the registered socket errFailedToDialDevicePlugin = "failed to dial device plugin:" - // errUnsuportedVersion is the error raised when the device plugin uses an API version not + // errUnsupportedVersion is the error raised when the device plugin uses an API version not // supported by the Kubelet registry - errUnsuportedVersion = "requested API version %q is not supported by kubelet. Supported version is %q" + errUnsupportedVersion = "requested API version %q is not supported by kubelet. Supported version is %q" // errDevicePluginAlreadyExists is the error raised when a device plugin with the // same Resource Name tries to register itself errDevicePluginAlreadyExists = "another device plugin already registered this Resource Name" From c3a885353b0443ffb8ef8a906116b0f9be752247 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 10 Jan 2018 17:19:35 +0800 Subject: [PATCH 092/264] Add azClientConfig to pass all essential information to create clients --- pkg/cloudprovider/providers/azure/azure.go | 30 +++-- .../providers/azure/azure_client.go | 124 ++++++++++-------- 2 files changed, 84 insertions(+), 70 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure.go b/pkg/cloudprovider/providers/azure/azure.go index 9d61124f194..9070be894d4 100644 --- a/pkg/cloudprovider/providers/azure/azure.go +++ b/pkg/cloudprovider/providers/azure/azure.go @@ -171,22 +171,28 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { config.CloudProviderRateLimitBucket) } + azClientConfig := &azClientConfig{ + subscriptionID: config.SubscriptionID, + resourceManagerEndpoint: env.ResourceManagerEndpoint, + servicePrincipalToken: servicePrincipalToken, + rateLimiter: operationPollRateLimiter, + } az := Cloud{ Config: *config, Environment: *env, - DisksClient: newAzDisksClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), - RoutesClient: newAzRoutesClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), - SubnetsClient: newAzSubnetsClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), - InterfacesClient: newAzInterfacesClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), - RouteTablesClient: newAzRouteTablesClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), - LoadBalancerClient: newAzLoadBalancersClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), - SecurityGroupsClient: newAzSecurityGroupsClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), - StorageAccountClient: newAzStorageAccountClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), - VirtualMachinesClient: newAzVirtualMachinesClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), - PublicIPAddressesClient: newAzPublicIPAddressesClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), - VirtualMachineScaleSetsClient: newAzVirtualMachineScaleSetsClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), - VirtualMachineScaleSetVMsClient: newAzVirtualMachineScaleSetVMsClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + DisksClient: newAzDisksClient(azClientConfig), + RoutesClient: newAzRoutesClient(azClientConfig), + SubnetsClient: newAzSubnetsClient(azClientConfig), + InterfacesClient: newAzInterfacesClient(azClientConfig), + RouteTablesClient: newAzRouteTablesClient(azClientConfig), + LoadBalancerClient: newAzLoadBalancersClient(azClientConfig), + SecurityGroupsClient: newAzSecurityGroupsClient(azClientConfig), + StorageAccountClient: newAzStorageAccountClient(azClientConfig), + VirtualMachinesClient: newAzVirtualMachinesClient(azClientConfig), + PublicIPAddressesClient: newAzPublicIPAddressesClient(azClientConfig), + VirtualMachineScaleSetsClient: newAzVirtualMachineScaleSetsClient(azClientConfig), + VirtualMachineScaleSetVMsClient: newAzVirtualMachineScaleSetVMsClient(azClientConfig), } // Conditionally configure resource request backoff diff --git a/pkg/cloudprovider/providers/azure/azure_client.go b/pkg/cloudprovider/providers/azure/azure_client.go index 3a359dd6893..7cf65fe13e9 100644 --- a/pkg/cloudprovider/providers/azure/azure_client.go +++ b/pkg/cloudprovider/providers/azure/azure_client.go @@ -124,21 +124,29 @@ type DisksClient interface { Get(resourceGroupName string, diskName string) (result disk.Model, err error) } +// azClientConfig contains all essential information to create an Azure client. +type azClientConfig struct { + subscriptionID string + resourceManagerEndpoint string + servicePrincipalToken *adal.ServicePrincipalToken + rateLimiter flowcontrol.RateLimiter +} + // azVirtualMachinesClient implements VirtualMachinesClient. type azVirtualMachinesClient struct { client compute.VirtualMachinesClient rateLimiter flowcontrol.RateLimiter } -func newAzVirtualMachinesClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azVirtualMachinesClient { - virtualMachinesClient := compute.NewVirtualMachinesClient(subscriptionID) - virtualMachinesClient.BaseURI = endpoint - virtualMachinesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzVirtualMachinesClient(config *azClientConfig) *azVirtualMachinesClient { + virtualMachinesClient := compute.NewVirtualMachinesClient(config.subscriptionID) + virtualMachinesClient.BaseURI = config.resourceManagerEndpoint + virtualMachinesClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) virtualMachinesClient.PollingDelay = 5 * time.Second configureUserAgent(&virtualMachinesClient.Client) return &azVirtualMachinesClient{ - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, client: virtualMachinesClient, } } @@ -189,15 +197,15 @@ type azInterfacesClient struct { rateLimiter flowcontrol.RateLimiter } -func newAzInterfacesClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azInterfacesClient { - interfacesClient := network.NewInterfacesClient(subscriptionID) - interfacesClient.BaseURI = endpoint - interfacesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzInterfacesClient(config *azClientConfig) *azInterfacesClient { + interfacesClient := network.NewInterfacesClient(config.subscriptionID) + interfacesClient.BaseURI = config.resourceManagerEndpoint + interfacesClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) interfacesClient.PollingDelay = 5 * time.Second configureUserAgent(&interfacesClient.Client) return &azInterfacesClient{ - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, client: interfacesClient, } } @@ -238,15 +246,15 @@ type azLoadBalancersClient struct { rateLimiter flowcontrol.RateLimiter } -func newAzLoadBalancersClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azLoadBalancersClient { - loadBalancerClient := network.NewLoadBalancersClient(subscriptionID) - loadBalancerClient.BaseURI = endpoint - loadBalancerClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzLoadBalancersClient(config *azClientConfig) *azLoadBalancersClient { + loadBalancerClient := network.NewLoadBalancersClient(config.subscriptionID) + loadBalancerClient.BaseURI = config.resourceManagerEndpoint + loadBalancerClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) loadBalancerClient.PollingDelay = 5 * time.Second configureUserAgent(&loadBalancerClient.Client) return &azLoadBalancersClient{ - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, client: loadBalancerClient, } } @@ -307,15 +315,15 @@ type azPublicIPAddressesClient struct { rateLimiter flowcontrol.RateLimiter } -func newAzPublicIPAddressesClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azPublicIPAddressesClient { - publicIPAddressClient := network.NewPublicIPAddressesClient(subscriptionID) - publicIPAddressClient.BaseURI = endpoint - publicIPAddressClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzPublicIPAddressesClient(config *azClientConfig) *azPublicIPAddressesClient { + publicIPAddressClient := network.NewPublicIPAddressesClient(config.subscriptionID) + publicIPAddressClient.BaseURI = config.resourceManagerEndpoint + publicIPAddressClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) publicIPAddressClient.PollingDelay = 5 * time.Second configureUserAgent(&publicIPAddressClient.Client) return &azPublicIPAddressesClient{ - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, client: publicIPAddressClient, } } @@ -376,16 +384,16 @@ type azSubnetsClient struct { rateLimiter flowcontrol.RateLimiter } -func newAzSubnetsClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azSubnetsClient { - subnetsClient := network.NewSubnetsClient(subscriptionID) - subnetsClient.BaseURI = endpoint - subnetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzSubnetsClient(config *azClientConfig) *azSubnetsClient { + subnetsClient := network.NewSubnetsClient(config.subscriptionID) + subnetsClient.BaseURI = config.resourceManagerEndpoint + subnetsClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) subnetsClient.PollingDelay = 5 * time.Second configureUserAgent(&subnetsClient.Client) return &azSubnetsClient{ client: subnetsClient, - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, } } @@ -435,15 +443,15 @@ type azSecurityGroupsClient struct { rateLimiter flowcontrol.RateLimiter } -func newAzSecurityGroupsClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azSecurityGroupsClient { - securityGroupsClient := network.NewSecurityGroupsClient(subscriptionID) - securityGroupsClient.BaseURI = endpoint - securityGroupsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzSecurityGroupsClient(config *azClientConfig) *azSecurityGroupsClient { + securityGroupsClient := network.NewSecurityGroupsClient(config.subscriptionID) + securityGroupsClient.BaseURI = config.resourceManagerEndpoint + securityGroupsClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) securityGroupsClient.PollingDelay = 5 * time.Second configureUserAgent(&securityGroupsClient.Client) return &azSecurityGroupsClient{ - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, client: securityGroupsClient, } } @@ -494,16 +502,16 @@ type azVirtualMachineScaleSetsClient struct { rateLimiter flowcontrol.RateLimiter } -func newAzVirtualMachineScaleSetsClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azVirtualMachineScaleSetsClient { - virtualMachineScaleSetsClient := compute.NewVirtualMachineScaleSetsClient(subscriptionID) - virtualMachineScaleSetsClient.BaseURI = endpoint - virtualMachineScaleSetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzVirtualMachineScaleSetsClient(config *azClientConfig) *azVirtualMachineScaleSetsClient { + virtualMachineScaleSetsClient := compute.NewVirtualMachineScaleSetsClient(config.subscriptionID) + virtualMachineScaleSetsClient.BaseURI = config.resourceManagerEndpoint + virtualMachineScaleSetsClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) virtualMachineScaleSetsClient.PollingDelay = 5 * time.Second configureUserAgent(&virtualMachineScaleSetsClient.Client) return &azVirtualMachineScaleSetsClient{ client: virtualMachineScaleSetsClient, - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, } } @@ -563,16 +571,16 @@ type azVirtualMachineScaleSetVMsClient struct { rateLimiter flowcontrol.RateLimiter } -func newAzVirtualMachineScaleSetVMsClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azVirtualMachineScaleSetVMsClient { - virtualMachineScaleSetVMsClient := compute.NewVirtualMachineScaleSetVMsClient(subscriptionID) - virtualMachineScaleSetVMsClient.BaseURI = endpoint - virtualMachineScaleSetVMsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzVirtualMachineScaleSetVMsClient(config *azClientConfig) *azVirtualMachineScaleSetVMsClient { + virtualMachineScaleSetVMsClient := compute.NewVirtualMachineScaleSetVMsClient(config.subscriptionID) + virtualMachineScaleSetVMsClient.BaseURI = config.resourceManagerEndpoint + virtualMachineScaleSetVMsClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) virtualMachineScaleSetVMsClient.PollingDelay = 5 * time.Second configureUserAgent(&virtualMachineScaleSetVMsClient.Client) return &azVirtualMachineScaleSetVMsClient{ client: virtualMachineScaleSetVMsClient, - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, } } @@ -622,16 +630,16 @@ type azRoutesClient struct { rateLimiter flowcontrol.RateLimiter } -func newAzRoutesClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azRoutesClient { - routesClient := network.NewRoutesClient(subscriptionID) - routesClient.BaseURI = endpoint - routesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzRoutesClient(config *azClientConfig) *azRoutesClient { + routesClient := network.NewRoutesClient(config.subscriptionID) + routesClient.BaseURI = config.resourceManagerEndpoint + routesClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) routesClient.PollingDelay = 5 * time.Second configureUserAgent(&routesClient.Client) return &azRoutesClient{ client: routesClient, - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, } } @@ -661,16 +669,16 @@ type azRouteTablesClient struct { rateLimiter flowcontrol.RateLimiter } -func newAzRouteTablesClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azRouteTablesClient { - routeTablesClient := network.NewRouteTablesClient(subscriptionID) - routeTablesClient.BaseURI = endpoint - routeTablesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzRouteTablesClient(config *azClientConfig) *azRouteTablesClient { + routeTablesClient := network.NewRouteTablesClient(config.subscriptionID) + routeTablesClient.BaseURI = config.resourceManagerEndpoint + routeTablesClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) routeTablesClient.PollingDelay = 5 * time.Second configureUserAgent(&routeTablesClient.Client) return &azRouteTablesClient{ client: routeTablesClient, - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, } } @@ -700,15 +708,15 @@ type azStorageAccountClient struct { rateLimiter flowcontrol.RateLimiter } -func newAzStorageAccountClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azStorageAccountClient { - storageAccountClient := storage.NewAccountsClientWithBaseURI(endpoint, subscriptionID) - storageAccountClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzStorageAccountClient(config *azClientConfig) *azStorageAccountClient { + storageAccountClient := storage.NewAccountsClientWithBaseURI(config.resourceManagerEndpoint, config.subscriptionID) + storageAccountClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) storageAccountClient.PollingDelay = 5 * time.Second configureUserAgent(&storageAccountClient.Client) return &azStorageAccountClient{ client: storageAccountClient, - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, } } @@ -768,15 +776,15 @@ type azDisksClient struct { rateLimiter flowcontrol.RateLimiter } -func newAzDisksClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azDisksClient { - disksClient := disk.NewDisksClientWithBaseURI(endpoint, subscriptionID) - disksClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzDisksClient(config *azClientConfig) *azDisksClient { + disksClient := disk.NewDisksClientWithBaseURI(config.resourceManagerEndpoint, config.subscriptionID) + disksClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) disksClient.PollingDelay = 5 * time.Second configureUserAgent(&disksClient.Client) return &azDisksClient{ client: disksClient, - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, } } From fc6443ce2c3a2b36b9f7dbf2388055751b612fdd Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Fri, 8 Dec 2017 07:11:48 +0530 Subject: [PATCH 093/264] Add volID based delete() and resize() if volID is available in pv spec. Signed-off-by: Humble Chirammal --- pkg/volume/glusterfs/glusterfs.go | 33 ++++++++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index c42e3cdf794..144724ecfef 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -596,8 +596,13 @@ func (d *glusterfsVolumeDeleter) getGid() (int, bool, error) { func (d *glusterfsVolumeDeleter) Delete() error { glog.V(2).Infof("delete volume: %s ", d.glusterfsMounter.path) + volumeName := d.glusterfsMounter.path - volumeID := dstrings.TrimPrefix(volumeName, volPrefix) + volumeID, err := getVolumeID(d.spec, volumeName) + if err != nil { + return fmt.Errorf("failed to get volumeID, err: %v", err) + } + class, err := volutil.GetClassForVolume(d.plugin.host.GetKubeClient(), d.spec) if err != nil { return err @@ -1048,13 +1053,35 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa return &cfg, nil } +// getVolumeID returns volumeID from the PV or volumename. +func getVolumeID(pv *v1.PersistentVolume, volumeName string) (string, error) { + volumeID := "" + + // Get volID from pvspec if available, else fill it from volumename. + if pv != nil { + if pv.Annotations["VolID"] != "" { + volumeID = pv.Annotations["VolID"] + } else { + volumeID = dstrings.TrimPrefix(volumeName, volPrefix) + } + } else { + return volumeID, fmt.Errorf("provided PV spec is nil") + } + if volumeID == "" { + return volumeID, fmt.Errorf("volume ID is empty") + } + return volumeID, nil +} + func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) { pvSpec := spec.PersistentVolume.Spec glog.V(2).Infof("Request to expand volume: %s ", pvSpec.Glusterfs.Path) volumeName := pvSpec.Glusterfs.Path + volumeID, err := getVolumeID(spec.PersistentVolume, volumeName) - // Fetch the volume for expansion. - volumeID := dstrings.TrimPrefix(volumeName, volPrefix) + if err != nil { + return oldSize, fmt.Errorf("failed to get volumeID, err: %v", err) + } //Get details of SC. class, err := volutil.GetClassForVolume(plugin.host.GetKubeClient(), spec.PersistentVolume) From 19003486bfa241d539b13b030979f31f7713450e Mon Sep 17 00:00:00 2001 From: linyouchong Date: Mon, 25 Dec 2017 17:36:59 +0800 Subject: [PATCH 094/264] Fix bug:Kubelet failure to umount mount points --- pkg/volume/util/BUILD | 9 ++--- pkg/volume/util/util.go | 71 +++++++++++++++++++++++++++--------- pkg/volume/util/util_test.go | 41 +++++++++++++++++++++ 3 files changed, 97 insertions(+), 24 deletions(-) diff --git a/pkg/volume/util/BUILD b/pkg/volume/util/BUILD index 22c8fd82cd4..7a447ddb976 100644 --- a/pkg/volume/util/BUILD +++ b/pkg/volume/util/BUILD @@ -147,15 +147,12 @@ go_test( deps = [ "//pkg/apis/core/install:go_default_library", "//pkg/apis/core/v1/helper:go_default_library", + "//pkg/util/mount:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/k8s.io/client-go/util/testing:go_default_library", - ], - "//conditions:default": [], - }), + "//vendor/k8s.io/client-go/util/testing:go_default_library", + ], ) filegroup( diff --git a/pkg/volume/util/util.go b/pkg/volume/util/util.go index 106036dfb57..6b6528f8ab6 100644 --- a/pkg/volume/util/util.go +++ b/pkg/volume/util/util.go @@ -23,6 +23,7 @@ import ( "path" "path/filepath" "strings" + "syscall" "github.com/golang/glog" "k8s.io/api/core/v1" @@ -96,29 +97,42 @@ func UnmountPath(mountPath string, mounter mount.Interface) error { // IsNotMountPoint will be called instead of IsLikelyNotMountPoint. // IsNotMountPoint is more expensive but properly handles bind mounts. func UnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool) error { - if pathExists, pathErr := PathExists(mountPath); pathErr != nil { - return fmt.Errorf("Error checking if path exists: %v", pathErr) - } else if !pathExists { + pathExists, pathErr := PathExists(mountPath) + if !pathExists { glog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath) return nil } - - var notMnt bool - var err error - - if extensiveMountPointCheck { - notMnt, err = mount.IsNotMountPoint(mounter, mountPath) - } else { - notMnt, err = mounter.IsLikelyNotMountPoint(mountPath) + corruptedMnt := isCorruptedMnt(pathErr) + if pathErr != nil && !corruptedMnt { + return fmt.Errorf("Error checking path: %v", pathErr) } + return doUnmountMountPoint(mountPath, mounter, extensiveMountPointCheck, corruptedMnt) +} - if err != nil { - return err - } +// doUnmountMountPoint is a common unmount routine that unmounts the given path and +// deletes the remaining directory if successful. +// if extensiveMountPointCheck is true +// IsNotMountPoint will be called instead of IsLikelyNotMountPoint. +// IsNotMountPoint is more expensive but properly handles bind mounts. +// if corruptedMnt is true, it means that the mountPath is a corrupted mountpoint, Take it as an argument for convenience of testing +func doUnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool, corruptedMnt bool) error { + if !corruptedMnt { + var notMnt bool + var err error + if extensiveMountPointCheck { + notMnt, err = mount.IsNotMountPoint(mounter, mountPath) + } else { + notMnt, err = mounter.IsLikelyNotMountPoint(mountPath) + } - if notMnt { - glog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath) - return os.Remove(mountPath) + if err != nil { + return err + } + + if notMnt { + glog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath) + return os.Remove(mountPath) + } } // Unmount the mount path @@ -128,7 +142,7 @@ func UnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMount } notMnt, mntErr := mounter.IsLikelyNotMountPoint(mountPath) if mntErr != nil { - return err + return mntErr } if notMnt { glog.V(4).Infof("%q is unmounted, deleting the directory", mountPath) @@ -144,11 +158,32 @@ func PathExists(path string) (bool, error) { return true, nil } else if os.IsNotExist(err) { return false, nil + } else if isCorruptedMnt(err) { + return true, err } else { return false, err } } +// isCorruptedMnt return true if err is about corrupted mount point +func isCorruptedMnt(err error) bool { + if err == nil { + return false + } + var underlyingError error + switch pe := err.(type) { + case nil: + return false + case *os.PathError: + underlyingError = pe.Err + case *os.LinkError: + underlyingError = pe.Err + case *os.SyscallError: + underlyingError = pe.Err + } + return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE +} + // GetSecretForPod locates secret by name in the pod's namespace and returns secret map func GetSecretForPod(pod *v1.Pod, secretName string, kubeClient clientset.Interface) (map[string]string, error) { secret := make(map[string]string) diff --git a/pkg/volume/util/util_test.go b/pkg/volume/util/util_test.go index b11be33eeb2..5fd11f0861e 100644 --- a/pkg/volume/util/util_test.go +++ b/pkg/volume/util/util_test.go @@ -24,10 +24,12 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" + utiltesting "k8s.io/client-go/util/testing" // util.go uses api.Codecs.LegacyCodec so import this package to do some // resource initialization. _ "k8s.io/kubernetes/pkg/apis/core/install" "k8s.io/kubernetes/pkg/apis/core/v1/helper" + "k8s.io/kubernetes/pkg/util/mount" ) var nodeLabels map[string]string = map[string]string{ @@ -263,3 +265,42 @@ func TestZonesToSet(t *testing.T) { } } } + +func TestDoUnmountMountPoint(t *testing.T) { + + tmpDir1, err1 := utiltesting.MkTmpdir("umount_test1") + if err1 != nil { + t.Fatalf("error creating temp dir: %v", err1) + } + defer os.RemoveAll(tmpDir1) + + tmpDir2, err2 := utiltesting.MkTmpdir("umount_test2") + if err2 != nil { + t.Fatalf("error creating temp dir: %v", err2) + } + defer os.RemoveAll(tmpDir2) + + // Second part: want no error + tests := []struct { + mountPath string + corruptedMnt bool + }{ + { + mountPath: tmpDir1, + corruptedMnt: true, + }, + { + mountPath: tmpDir2, + corruptedMnt: false, + }, + } + + fake := &mount.FakeMounter{} + + for _, tt := range tests { + err := doUnmountMountPoint(tt.mountPath, fake, false, tt.corruptedMnt) + if err != nil { + t.Errorf("err Expected nil, but got: %v", err) + } + } +} From 713e28874afab96b91000d187b0d3d6ce01abf2a Mon Sep 17 00:00:00 2001 From: lcfang Date: Wed, 10 Jan 2018 22:05:00 +0800 Subject: [PATCH 095/264] fixed some bad url --- api/swagger-spec/apps_v1alpha1.json | 2 +- .../src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/types.go | 2 +- .../src/k8s.io/apiserver/pkg/apis/example/v1/generated.proto | 2 +- staging/src/k8s.io/apiserver/pkg/apis/example/v1/types.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/api/swagger-spec/apps_v1alpha1.json b/api/swagger-spec/apps_v1alpha1.json index aa3fbdcc2d8..6f546623de3 100644 --- a/api/swagger-spec/apps_v1alpha1.json +++ b/api/swagger-spec/apps_v1alpha1.json @@ -1311,7 +1311,7 @@ }, "serviceAccountName": { "type": "string", - "description": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md" + "description": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://git.k8s.io/community/contributors/design-proposals/auth/service_accounts.md" }, "serviceAccount": { "type": "string", diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/types.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/types.go index cd505e007d0..7e880ab33f3 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/types.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/types.go @@ -136,7 +136,7 @@ type CarpSpec struct { NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"` // ServiceAccountName is the name of the ServiceAccount to use to run this carp. - // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md + // More info: https://git.k8s.io/community/contributors/design-proposals/auth/service_accounts.md // +optional ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"` // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/example/v1/generated.proto b/staging/src/k8s.io/apiserver/pkg/apis/example/v1/generated.proto index de59bc3ddaa..dc78ca40e7f 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/example/v1/generated.proto +++ b/staging/src/k8s.io/apiserver/pkg/apis/example/v1/generated.proto @@ -122,7 +122,7 @@ message PodSpec { map nodeSelector = 7; // ServiceAccountName is the name of the ServiceAccount to use to run this pod. - // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md + // More info: https://git.k8s.io/community/contributors/design-proposals/auth/service_accounts.md // +optional optional string serviceAccountName = 8; diff --git a/staging/src/k8s.io/apiserver/pkg/apis/example/v1/types.go b/staging/src/k8s.io/apiserver/pkg/apis/example/v1/types.go index 7be2a0c6ff4..06c3f9f8873 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/example/v1/types.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/example/v1/types.go @@ -136,7 +136,7 @@ type PodSpec struct { NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"` // ServiceAccountName is the name of the ServiceAccount to use to run this pod. - // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md + // More info: https://git.k8s.io/community/contributors/design-proposals/auth/service_accounts.md // +optional ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"` // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. From 8b5f293b3df27c7436ba8ce8130f5d5b447ff920 Mon Sep 17 00:00:00 2001 From: Scott Creeley Date: Wed, 10 Jan 2018 10:21:44 -0500 Subject: [PATCH 096/264] fix for local-up-cluster.sh bad cloud_config_arg --- hack/local-up-cluster.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/local-up-cluster.sh b/hack/local-up-cluster.sh index 4c6b855331b..46a24fb37b0 100755 --- a/hack/local-up-cluster.sh +++ b/hack/local-up-cluster.sh @@ -605,7 +605,7 @@ function start_controller_manager { node_cidr_args="--allocate-node-cidrs=true --cluster-cidr=10.1.0.0/16 " fi - cloud_config_arg=cloud_config_arg="--cloud-provider=${CLOUD_PROVIDER} --cloud-config=${CLOUD_CONFIG}" + cloud_config_arg="--cloud-provider=${CLOUD_PROVIDER} --cloud-config=${CLOUD_CONFIG}" if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then cloud_config_arg="--cloud-provider=external" fi @@ -668,7 +668,7 @@ function start_kubelet { priv_arg="--allow-privileged " fi - cloud_config_arg=cloud_config_arg="--cloud-provider=${CLOUD_PROVIDER} --cloud-config=${CLOUD_CONFIG}" + cloud_config_arg="--cloud-provider=${CLOUD_PROVIDER} --cloud-config=${CLOUD_CONFIG}" if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then cloud_config_arg="--cloud-provider=external" fi From a6d979dd88141f362e48c49e7407d8187c3047da Mon Sep 17 00:00:00 2001 From: Mitsuhiro Tanino Date: Sat, 28 Oct 2017 15:28:52 -0400 Subject: [PATCH 097/264] Block volumes Support: iSCSI plugin update This patch adds block volume support to iSCSI volume plugin. --- pkg/volume/iscsi/attacher.go | 118 +++------ pkg/volume/iscsi/disk_manager.go | 6 +- pkg/volume/iscsi/iscsi.go | 405 +++++++++++++++++++++++++------ pkg/volume/iscsi/iscsi_test.go | 129 +++++++++- pkg/volume/iscsi/iscsi_util.go | 201 ++++++++++++--- 5 files changed, 658 insertions(+), 201 deletions(-) diff --git a/pkg/volume/iscsi/attacher.go b/pkg/volume/iscsi/attacher.go index b86b2f2499e..2aab2ecc9d5 100644 --- a/pkg/volume/iscsi/attacher.go +++ b/pkg/volume/iscsi/attacher.go @@ -19,16 +19,17 @@ package iscsi import ( "fmt" "os" - "strconv" "time" "github.com/golang/glog" "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" + "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) type iscsiAttacher struct { @@ -66,7 +67,7 @@ func (attacher *iscsiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName } func (attacher *iscsiAttacher) WaitForAttach(spec *volume.Spec, devicePath string, pod *v1.Pod, timeout time.Duration) (string, error) { - mounter, err := attacher.volumeSpecToMounter(spec, attacher.host, pod) + mounter, err := volumeSpecToMounter(spec, attacher.host, pod) if err != nil { glog.Warningf("failed to get iscsi mounter: %v", err) return "", err @@ -76,7 +77,7 @@ func (attacher *iscsiAttacher) WaitForAttach(spec *volume.Spec, devicePath strin func (attacher *iscsiAttacher) GetDeviceMountPath( spec *volume.Spec) (string, error) { - mounter, err := attacher.volumeSpecToMounter(spec, attacher.host, nil) + mounter, err := volumeSpecToMounter(spec, attacher.host, nil) if err != nil { glog.Warningf("failed to get iscsi mounter: %v", err) return "", err @@ -143,7 +144,7 @@ func (detacher *iscsiDetacher) Detach(volumeName string, nodeName types.NodeName } func (detacher *iscsiDetacher) UnmountDevice(deviceMountPath string) error { - unMounter := detacher.volumeSpecToUnmounter(detacher.mounter) + unMounter := volumeSpecToUnmounter(detacher.mounter, detacher.host) err := detacher.manager.DetachDisk(*unMounter, deviceMountPath) if err != nil { return fmt.Errorf("iscsi: failed to detach disk: %s\nError: %v", deviceMountPath, err) @@ -157,94 +158,49 @@ func (detacher *iscsiDetacher) UnmountDevice(deviceMountPath string) error { return nil } -func (attacher *iscsiAttacher) volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost, pod *v1.Pod) (*iscsiDiskMounter, error) { +func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost, pod *v1.Pod) (*iscsiDiskMounter, error) { var secret map[string]string - var bkportal []string readOnly, fsType, err := getISCSIVolumeInfo(spec) if err != nil { return nil, err } + var podUID types.UID if pod != nil { - chapDiscovery, err := getISCSIDiscoveryCHAPInfo(spec) + secret, err = createSecretMap(spec, &iscsiPlugin{host: host}, pod.Namespace) if err != nil { return nil, err } - chapSession, err := getISCSISessionCHAPInfo(spec) + podUID = pod.UID + } + iscsiDisk, err := createISCSIDisk(spec, + podUID, + &iscsiPlugin{host: host}, + &ISCSIUtil{}, + secret, + ) + if err != nil { + return nil, err + } + exec := host.GetExec(iscsiPluginName) + // TODO: remove feature gate check after no longer needed + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + volumeMode, err := volumehelper.GetVolumeMode(spec) if err != nil { return nil, err } - if chapDiscovery || chapSession { - secretName, secretNamespace, err := getISCSISecretNameAndNamespace(spec, pod.Namespace) - if err != nil { - return nil, err - } - if len(secretNamespace) == 0 || len(secretName) == 0 { - return nil, fmt.Errorf("CHAP enabled but secret name or namespace is empty") - } - // if secret is provided, retrieve it - kubeClient := host.GetKubeClient() - if kubeClient == nil { - return nil, fmt.Errorf("Cannot get kube client") - } - secretObj, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) - if err != nil { - err = fmt.Errorf("Couldn't get secret %v/%v error: %v", secretNamespace, secretName, err) - return nil, err - } - secret = make(map[string]string) - for name, data := range secretObj.Data { - glog.V(6).Infof("retrieving CHAP secret name: %s", name) - secret[name] = string(data) - } - } - + glog.V(5).Infof("iscsi: VolumeSpecToMounter volumeMode %s", volumeMode) + return &iscsiDiskMounter{ + iscsiDisk: iscsiDisk, + fsType: fsType, + volumeMode: volumeMode, + readOnly: readOnly, + mounter: &mount.SafeFormatAndMount{Interface: host.GetMounter(iscsiPluginName), Exec: exec}, + exec: exec, + deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()), + }, nil } - tp, portals, iqn, lunStr, err := getISCSITargetInfo(spec) - if err != nil { - return nil, err - } - - lun := strconv.Itoa(int(lunStr)) - portal := portalMounter(tp) - bkportal = append(bkportal, portal) - for _, p := range portals { - bkportal = append(bkportal, portalMounter(string(p))) - } - - iface, initiatorNamePtr, err := getISCSIInitiatorInfo(spec) - if err != nil { - return nil, err - } - - var initiatorName string - if initiatorNamePtr != nil { - initiatorName = *initiatorNamePtr - } - chapDiscovery, err := getISCSIDiscoveryCHAPInfo(spec) - if err != nil { - return nil, err - } - chapSession, err := getISCSISessionCHAPInfo(spec) - if err != nil { - return nil, err - } - exec := attacher.host.GetExec(iscsiPluginName) - return &iscsiDiskMounter{ - iscsiDisk: &iscsiDisk{ - plugin: &iscsiPlugin{ - host: host, - }, - VolName: spec.Name(), - Portals: bkportal, - Iqn: iqn, - lun: lun, - Iface: iface, - chap_discovery: chapDiscovery, - chap_session: chapSession, - secret: secret, - InitiatorName: initiatorName, - manager: &ISCSIUtil{}}, + iscsiDisk: iscsiDisk, fsType: fsType, readOnly: readOnly, mounter: &mount.SafeFormatAndMount{Interface: host.GetMounter(iscsiPluginName), Exec: exec}, @@ -253,8 +209,8 @@ func (attacher *iscsiAttacher) volumeSpecToMounter(spec *volume.Spec, host volum }, nil } -func (detacher *iscsiDetacher) volumeSpecToUnmounter(mounter mount.Interface) *iscsiDiskUnmounter { - exec := detacher.host.GetExec(iscsiPluginName) +func volumeSpecToUnmounter(mounter mount.Interface, host volume.VolumeHost) *iscsiDiskUnmounter { + exec := host.GetExec(iscsiPluginName) return &iscsiDiskUnmounter{ iscsiDisk: &iscsiDisk{ plugin: &iscsiPlugin{}, diff --git a/pkg/volume/iscsi/disk_manager.go b/pkg/volume/iscsi/disk_manager.go index ea00c7ebcbe..4d5e9f9fe7d 100644 --- a/pkg/volume/iscsi/disk_manager.go +++ b/pkg/volume/iscsi/disk_manager.go @@ -27,15 +27,19 @@ import ( // Abstract interface to disk operations. type diskManager interface { MakeGlobalPDName(disk iscsiDisk) string + MakeGlobalVDPDName(disk iscsiDisk) string // Attaches the disk to the kubelet's host machine. AttachDisk(b iscsiDiskMounter) (string, error) // Detaches the disk from the kubelet's host machine. DetachDisk(disk iscsiDiskUnmounter, mntPath string) error + // Detaches the block disk from the kubelet's host machine. + DetachBlockISCSIDisk(disk iscsiDiskUnmapper, mntPath string) error } // utility to mount a disk based filesystem +// globalPDPath: global mount path like, /var/lib/kubelet/plugins/kubernetes.io/iscsi/{ifaceName}/{portal-some_iqn-lun-lun_id} +// volPath: pod volume dir path like, /var/lib/kubelet/pods/{podUID}/volumes/kubernetes.io~iscsi/{volumeName} func diskSetUp(manager diskManager, b iscsiDiskMounter, volPath string, mounter mount.Interface, fsGroup *int64) error { - // TODO: handle failed mounts here. notMnt, err := mounter.IsLikelyNotMountPoint(volPath) if err != nil && !os.IsNotExist(err) { glog.Errorf("cannot validate mountpoint: %s", volPath) diff --git a/pkg/volume/iscsi/iscsi.go b/pkg/volume/iscsi/iscsi.go index 4ea6e792ef8..e9611e208c3 100644 --- a/pkg/volume/iscsi/iscsi.go +++ b/pkg/volume/iscsi/iscsi.go @@ -18,6 +18,8 @@ package iscsi import ( "fmt" + "os" + "path/filepath" "strconv" "strings" @@ -42,6 +44,7 @@ type iscsiPlugin struct { var _ volume.VolumePlugin = &iscsiPlugin{} var _ volume.PersistentVolumePlugin = &iscsiPlugin{} +var _ volume.BlockVolumePlugin = &iscsiPlugin{} const ( iscsiPluginName = "kubernetes.io/iscsi" @@ -93,98 +96,27 @@ func (plugin *iscsiPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode { } func (plugin *iscsiPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) { - // Inject real implementations here, test through the internal function. - var secret map[string]string if pod == nil { return nil, fmt.Errorf("nil pod") } - chapDiscover, err := getISCSIDiscoveryCHAPInfo(spec) + secret, err := createSecretMap(spec, plugin, pod.Namespace) if err != nil { return nil, err } - chapSession, err := getISCSISessionCHAPInfo(spec) - if err != nil { - return nil, err - } - if chapDiscover || chapSession { - secretName, secretNamespace, err := getISCSISecretNameAndNamespace(spec, pod.Namespace) - if err != nil { - return nil, err - } - - if len(secretName) > 0 && len(secretNamespace) > 0 { - // if secret is provideded, retrieve it - kubeClient := plugin.host.GetKubeClient() - if kubeClient == nil { - return nil, fmt.Errorf("Cannot get kube client") - } - secretObj, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) - if err != nil { - err = fmt.Errorf("Couldn't get secret %v/%v error: %v", secretNamespace, secretName, err) - return nil, err - } - secret = make(map[string]string) - for name, data := range secretObj.Data { - glog.V(4).Infof("retrieving CHAP secret name: %s", name) - secret[name] = string(data) - } - } - } return plugin.newMounterInternal(spec, pod.UID, &ISCSIUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()), secret) } func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec, secret map[string]string) (volume.Mounter, error) { - // iscsi volumes used directly in a pod have a ReadOnly flag set by the pod author. - // iscsi volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV readOnly, fsType, err := getISCSIVolumeInfo(spec) if err != nil { return nil, err } - tp, portals, iqn, lunStr, err := getISCSITargetInfo(spec) + iscsiDisk, err := createISCSIDisk(spec, podUID, plugin, manager, secret) if err != nil { return nil, err } - - lun := strconv.Itoa(int(lunStr)) - portal := portalMounter(tp) - var bkportal []string - bkportal = append(bkportal, portal) - for _, p := range portals { - bkportal = append(bkportal, portalMounter(string(p))) - } - - iface, initiatorNamePtr, err := getISCSIInitiatorInfo(spec) - if err != nil { - return nil, err - } - - var initiatorName string - if initiatorNamePtr != nil { - initiatorName = *initiatorNamePtr - } - chapDiscovery, err := getISCSIDiscoveryCHAPInfo(spec) - if err != nil { - return nil, err - } - chapSession, err := getISCSISessionCHAPInfo(spec) - if err != nil { - return nil, err - } - return &iscsiDiskMounter{ - iscsiDisk: &iscsiDisk{ - podUID: podUID, - VolName: spec.Name(), - Portals: bkportal, - Iqn: iqn, - lun: lun, - Iface: iface, - chap_discovery: chapDiscovery, - chap_session: chapSession, - secret: secret, - InitiatorName: initiatorName, - manager: manager, - plugin: plugin}, + iscsiDisk: iscsiDisk, fsType: fsType, readOnly: readOnly, mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}, @@ -194,8 +126,41 @@ func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UI }, nil } +// NewBlockVolumeMapper creates a new volume.BlockVolumeMapper from an API specification. +func (plugin *iscsiPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.BlockVolumeMapper, error) { + // If this is called via GenerateUnmapDeviceFunc(), pod is nil. + // Pass empty string as dummy uid since uid isn't used in the case. + var uid types.UID + var secret map[string]string + var err error + if pod != nil { + uid = pod.UID + secret, err = createSecretMap(spec, plugin, pod.Namespace) + if err != nil { + return nil, err + } + } + return plugin.newBlockVolumeMapperInternal(spec, uid, &ISCSIUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()), secret) +} + +func (plugin *iscsiPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec, secret map[string]string) (volume.BlockVolumeMapper, error) { + readOnly, _, err := getISCSIVolumeInfo(spec) + if err != nil { + return nil, err + } + iscsiDisk, err := createISCSIDisk(spec, podUID, plugin, manager, secret) + if err != nil { + return nil, err + } + return &iscsiDiskMapper{ + iscsiDisk: iscsiDisk, + readOnly: readOnly, + exec: exec, + deviceUtil: ioutil.NewDeviceHandler(ioutil.NewIOHandler()), + }, nil +} + func (plugin *iscsiPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { - // Inject real implementations here, test through the internal function. return plugin.newUnmounterInternal(volName, podUID, &ISCSIUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName())) } @@ -212,25 +177,88 @@ func (plugin *iscsiPlugin) newUnmounterInternal(volName string, podUID types.UID }, nil } +// NewBlockVolumeUnmapper creates a new volume.BlockVolumeUnmapper from recoverable state. +func (plugin *iscsiPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) { + return plugin.newUnmapperInternal(volName, podUID, &ISCSIUtil{}, plugin.host.GetExec(plugin.GetPluginName())) +} + +func (plugin *iscsiPlugin) newUnmapperInternal(volName string, podUID types.UID, manager diskManager, exec mount.Exec) (volume.BlockVolumeUnmapper, error) { + return &iscsiDiskUnmapper{ + iscsiDisk: &iscsiDisk{ + podUID: podUID, + VolName: volName, + manager: manager, + plugin: plugin, + }, + exec: exec, + deviceUtil: ioutil.NewDeviceHandler(ioutil.NewIOHandler()), + }, nil +} + func (plugin *iscsiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) { + // Find globalPDPath from pod volume directory(mountPath) + var globalPDPath string + mounter := plugin.host.GetMounter(plugin.GetPluginName()) + paths, err := mount.GetMountRefs(mounter, mountPath) + if err != nil { + return nil, err + } + for _, path := range paths { + if strings.Contains(path, plugin.host.GetPluginDir(iscsiPluginName)) { + globalPDPath = path + break + } + } + // Couldn't fetch globalPDPath + if len(globalPDPath) == 0 { + return nil, fmt.Errorf("couldn't fetch globalPDPath. failed to obtain volume spec") + } + + // Obtain iscsi disk configurations from globalPDPath + device, _, err := extractDeviceAndPrefix(globalPDPath) + if err != nil { + return nil, err + } + bkpPortal, iqn, err := extractPortalAndIqn(device) + if err != nil { + return nil, err + } + iface, _ := extractIface(globalPDPath) iscsiVolume := &v1.Volume{ Name: volumeName, VolumeSource: v1.VolumeSource{ ISCSI: &v1.ISCSIVolumeSource{ - TargetPortal: volumeName, - IQN: volumeName, + TargetPortal: bkpPortal, + IQN: iqn, + ISCSIInterface: iface, }, }, } return volume.NewSpecFromVolume(iscsiVolume), nil } +func (plugin *iscsiPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) { + pluginDir := plugin.host.GetVolumeDevicePluginDir(iscsiPluginName) + blkutil := ioutil.NewBlockVolumePathHandler() + globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID) + if err != nil { + return nil, err + } + glog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) + // Retreive volume information from globalMapPathUUID + // globalMapPathUUID example: + // plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} + // plugins/kubernetes.io/iscsi/volumeDevices/iface-default/192.168.0.10:3260-iqn.2017-05.com.example:test-lun-0/{pod uuid} + globalMapPath := filepath.Dir(globalMapPathUUID) + return getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath) +} + type iscsiDisk struct { VolName string podUID types.UID Portals []string Iqn string - lun string + Lun string Iface string chap_discovery bool chap_session bool @@ -248,10 +276,25 @@ func (iscsi *iscsiDisk) GetPath() string { return iscsi.plugin.host.GetPodVolumeDir(iscsi.podUID, utilstrings.EscapeQualifiedNameForDisk(name), iscsi.VolName) } +func (iscsi *iscsiDisk) iscsiGlobalMapPath(spec *volume.Spec) (string, error) { + mounter, err := volumeSpecToMounter(spec, iscsi.plugin.host, nil /* pod */) + if err != nil { + glog.Warningf("failed to get iscsi mounter: %v", err) + return "", err + } + return iscsi.manager.MakeGlobalVDPDName(*mounter.iscsiDisk), nil +} + +func (iscsi *iscsiDisk) iscsiPodDeviceMapPath() (string, string) { + name := iscsiPluginName + return iscsi.plugin.host.GetPodVolumeDeviceDir(iscsi.podUID, utilstrings.EscapeQualifiedNameForDisk(name)), iscsi.VolName +} + type iscsiDiskMounter struct { *iscsiDisk readOnly bool fsType string + volumeMode v1.PersistentVolumeMode mounter *mount.SafeFormatAndMount exec mount.Exec deviceUtil ioutil.DeviceUtil @@ -306,6 +349,58 @@ func (c *iscsiDiskUnmounter) TearDownAt(dir string) error { return ioutil.UnmountPath(dir, c.mounter) } +// Block Volumes Support +type iscsiDiskMapper struct { + *iscsiDisk + readOnly bool + exec mount.Exec + deviceUtil ioutil.DeviceUtil +} + +var _ volume.BlockVolumeMapper = &iscsiDiskMapper{} + +func (b *iscsiDiskMapper) SetUpDevice() (string, error) { + return "", nil +} + +type iscsiDiskUnmapper struct { + *iscsiDisk + exec mount.Exec + deviceUtil ioutil.DeviceUtil +} + +var _ volume.BlockVolumeUnmapper = &iscsiDiskUnmapper{} + +// Even though iSCSI plugin has attacher/detacher implementation, iSCSI plugin +// needs volume detach operation during TearDownDevice(). This method is only +// chance that operations are done on kubelet node during volume teardown sequences. +func (c *iscsiDiskUnmapper) TearDownDevice(mapPath, _ string) error { + err := c.manager.DetachBlockISCSIDisk(*c, mapPath) + if err != nil { + return fmt.Errorf("iscsi: failed to detach disk: %s\nError: %v", mapPath, err) + } + glog.V(4).Infof("iscsi: %q is unmounted, deleting the directory", mapPath) + err = os.RemoveAll(mapPath) + if err != nil { + return fmt.Errorf("iscsi: failed to delete the directory: %s\nError: %v", mapPath, err) + } + glog.V(4).Infof("iscsi: successfully detached disk: %s", mapPath) + return nil +} + +// GetGlobalMapPath returns global map path and error +// path: plugins/kubernetes.io/{PluginName}/volumeDevices/{ifaceName}/{portal-some_iqn-lun-lun_id} +func (iscsi *iscsiDisk) GetGlobalMapPath(spec *volume.Spec) (string, error) { + return iscsi.iscsiGlobalMapPath(spec) +} + +// GetPodDeviceMapPath returns pod device map path and volume name +// path: pods/{podUid}/volumeDevices/kubernetes.io~iscsi +// volumeName: pv0001 +func (iscsi *iscsiDisk) GetPodDeviceMapPath() (string, string) { + return iscsi.iscsiPodDeviceMapPath() +} + func portalMounter(portal string) string { if !strings.Contains(portal, ":") { portal = portal + ":3260" @@ -316,7 +411,7 @@ func portalMounter(portal string) string { // get iSCSI volume info: readOnly and fstype func getISCSIVolumeInfo(spec *volume.Spec) (bool, string, error) { // for volume source, readonly is in volume spec - // for PV, readonly is in PV spec + // for PV, readonly is in PV spec. PV gets the ReadOnly flag indirectly through the PVC source if spec.Volume != nil && spec.Volume.ISCSI != nil { return spec.Volume.ISCSI.ReadOnly, spec.Volume.ISCSI.FSType, nil } else if spec.PersistentVolume != nil && @@ -397,3 +492,155 @@ func getISCSISecretNameAndNamespace(spec *volume.Spec, defaultSecretNamespace st return "", "", fmt.Errorf("Spec does not reference an ISCSI volume type") } + +func createISCSIDisk(spec *volume.Spec, podUID types.UID, plugin *iscsiPlugin, manager diskManager, secret map[string]string) (*iscsiDisk, error) { + tp, portals, iqn, lunStr, err := getISCSITargetInfo(spec) + if err != nil { + return nil, err + } + + lun := strconv.Itoa(int(lunStr)) + portal := portalMounter(tp) + var bkportal []string + bkportal = append(bkportal, portal) + for _, p := range portals { + bkportal = append(bkportal, portalMounter(string(p))) + } + + iface, initiatorNamePtr, err := getISCSIInitiatorInfo(spec) + if err != nil { + return nil, err + } + + var initiatorName string + if initiatorNamePtr != nil { + initiatorName = *initiatorNamePtr + } + chapDiscovery, err := getISCSIDiscoveryCHAPInfo(spec) + if err != nil { + return nil, err + } + chapSession, err := getISCSISessionCHAPInfo(spec) + if err != nil { + return nil, err + } + + return &iscsiDisk{ + podUID: podUID, + VolName: spec.Name(), + Portals: bkportal, + Iqn: iqn, + Lun: lun, + Iface: iface, + chap_discovery: chapDiscovery, + chap_session: chapSession, + secret: secret, + InitiatorName: initiatorName, + manager: manager, + plugin: plugin}, nil +} + +func createSecretMap(spec *volume.Spec, plugin *iscsiPlugin, namespace string) (map[string]string, error) { + var secret map[string]string + chapDiscover, err := getISCSIDiscoveryCHAPInfo(spec) + if err != nil { + return nil, err + } + chapSession, err := getISCSISessionCHAPInfo(spec) + if err != nil { + return nil, err + } + if chapDiscover || chapSession { + secretName, secretNamespace, err := getISCSISecretNameAndNamespace(spec, namespace) + if err != nil { + return nil, err + } + + if len(secretName) > 0 && len(secretNamespace) > 0 { + // if secret is provideded, retrieve it + kubeClient := plugin.host.GetKubeClient() + if kubeClient == nil { + return nil, fmt.Errorf("Cannot get kube client") + } + secretObj, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) + if err != nil { + err = fmt.Errorf("Couldn't get secret %v/%v error: %v", secretNamespace, secretName, err) + return nil, err + } + secret = make(map[string]string) + for name, data := range secretObj.Data { + glog.V(4).Infof("retrieving CHAP secret name: %s", name) + secret[name] = string(data) + } + } + } + return secret, err +} + +func createVolumeFromISCSIVolumeSource(volumeName string, iscsi v1.ISCSIVolumeSource) *v1.Volume { + return &v1.Volume{ + Name: volumeName, + VolumeSource: v1.VolumeSource{ + ISCSI: &iscsi, + }, + } +} + +func createPersistentVolumeFromISCSIPVSource(volumeName string, iscsi v1.ISCSIPersistentVolumeSource) *v1.PersistentVolume { + block := v1.PersistentVolumeBlock + return &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: volumeName, + }, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + ISCSI: &iscsi, + }, + VolumeMode: &block, + }, + } +} + +func getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath string) (*volume.Spec, error) { + // Retreive volume spec information from globalMapPath + // globalMapPath example: + // plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath} + // plugins/kubernetes.io/iscsi/volumeDevices/iface-default/192.168.0.10:3260-iqn.2017-05.com.example:test-lun-0 + + // device: 192.168.0.10:3260-iqn.2017-05.com.example:test-lun-0 + device, _, err := extractDeviceAndPrefix(globalMapPath) + if err != nil { + return nil, err + } + bkpPortal, iqn, err := extractPortalAndIqn(device) + if err != nil { + return nil, err + } + arr := strings.Split(device, "-lun-") + if len(arr) < 2 { + return nil, fmt.Errorf("failed to retreive lun from globalMapPath: %v", globalMapPath) + } + lun, err := strconv.Atoi(arr[1]) + if err != nil { + return nil, err + } + iface, found := extractIface(globalMapPath) + if !found { + return nil, fmt.Errorf("failed to retreive iface from globalMapPath: %v", globalMapPath) + } + iscsiPV := createPersistentVolumeFromISCSIPVSource(volumeName, + v1.ISCSIPersistentVolumeSource{ + TargetPortal: bkpPortal, + IQN: iqn, + Lun: int32(lun), + ISCSIInterface: iface, + }, + ) + glog.V(5).Infof("ConstructBlockVolumeSpec: TargetPortal: %v, IQN: %v, Lun: %v, ISCSIInterface: %v", + iscsiPV.Spec.PersistentVolumeSource.ISCSI.TargetPortal, + iscsiPV.Spec.PersistentVolumeSource.ISCSI.IQN, + iscsiPV.Spec.PersistentVolumeSource.ISCSI.Lun, + iscsiPV.Spec.PersistentVolumeSource.ISCSI.ISCSIInterface, + ) + return volume.NewSpecFromPersistentVolume(iscsiPV, false), nil +} diff --git a/pkg/volume/iscsi/iscsi_test.go b/pkg/volume/iscsi/iscsi_test.go index 831cd564395..eeb6fa6c015 100644 --- a/pkg/volume/iscsi/iscsi_test.go +++ b/pkg/volume/iscsi/iscsi_test.go @@ -19,6 +19,7 @@ package iscsi import ( "fmt" "os" + "strings" "testing" "k8s.io/api/core/v1" @@ -80,7 +81,7 @@ type fakeDiskManager struct { func NewFakeDiskManager() *fakeDiskManager { return &fakeDiskManager{ - tmpDir: utiltesting.MkTmpdirOrDie("fc_test"), + tmpDir: utiltesting.MkTmpdirOrDie("iscsi_test"), } } @@ -91,6 +92,11 @@ func (fake *fakeDiskManager) Cleanup() { func (fake *fakeDiskManager) MakeGlobalPDName(disk iscsiDisk) string { return fake.tmpDir } + +func (fake *fakeDiskManager) MakeGlobalVDPDName(disk iscsiDisk) string { + return fake.tmpDir +} + func (fake *fakeDiskManager) AttachDisk(b iscsiDiskMounter) (string, error) { globalPath := b.manager.MakeGlobalPDName(*b.iscsiDisk) err := os.MkdirAll(globalPath, 0750) @@ -113,6 +119,15 @@ func (fake *fakeDiskManager) DetachDisk(c iscsiDiskUnmounter, mntPath string) er return nil } +func (fake *fakeDiskManager) DetachBlockISCSIDisk(c iscsiDiskUnmapper, mntPath string) error { + globalPath := c.manager.MakeGlobalVDPDName(*c.iscsiDisk) + err := os.RemoveAll(globalPath) + if err != nil { + return err + } + return nil +} + func doTestPlugin(t *testing.T, spec *volume.Spec) { tmpDir, err := utiltesting.MkTmpdir("iscsi_test") if err != nil { @@ -289,10 +304,12 @@ type testcase struct { defaultNs string spec *volume.Spec // Expected return of the test - expectedName string - expectedNs string - expectedIface string - expectedError error + expectedName string + expectedNs string + expectedIface string + expectedError error + expectedDiscoveryCHAP bool + expectedSessionCHAP bool } func TestGetSecretNameAndNamespaceForPV(t *testing.T) { @@ -424,5 +441,105 @@ func TestGetISCSIInitiatorInfo(t *testing.T) { err, resultIface) } } - +} + +func TestGetISCSICHAP(t *testing.T) { + tests := []testcase{ + { + name: "persistent volume source", + spec: &volume.Spec{ + PersistentVolume: &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + ISCSI: &v1.ISCSIPersistentVolumeSource{ + DiscoveryCHAPAuth: true, + SessionCHAPAuth: true, + }, + }, + }, + }, + }, + expectedDiscoveryCHAP: true, + expectedSessionCHAP: true, + expectedError: nil, + }, + { + name: "pod volume source", + spec: &volume.Spec{ + Volume: &v1.Volume{ + VolumeSource: v1.VolumeSource{ + ISCSI: &v1.ISCSIVolumeSource{ + DiscoveryCHAPAuth: true, + SessionCHAPAuth: true, + }, + }, + }, + }, + expectedDiscoveryCHAP: true, + expectedSessionCHAP: true, + expectedError: nil, + }, + { + name: "no volume", + spec: &volume.Spec{}, + expectedDiscoveryCHAP: false, + expectedSessionCHAP: false, + expectedError: fmt.Errorf("Spec does not reference an ISCSI volume type"), + }, + } + for _, testcase := range tests { + resultDiscoveryCHAP, err := getISCSIDiscoveryCHAPInfo(testcase.spec) + resultSessionCHAP, err := getISCSISessionCHAPInfo(testcase.spec) + switch testcase.name { + case "no volume": + if err.Error() != testcase.expectedError.Error() || resultDiscoveryCHAP != testcase.expectedDiscoveryCHAP || resultSessionCHAP != testcase.expectedSessionCHAP { + t.Errorf("%s failed: expected err=%v DiscoveryCHAP=%v SessionCHAP=%v, got %v/%v/%v", + testcase.name, testcase.expectedError, testcase.expectedDiscoveryCHAP, testcase.expectedSessionCHAP, + err, resultDiscoveryCHAP, resultSessionCHAP) + } + default: + if err != testcase.expectedError || resultDiscoveryCHAP != testcase.expectedDiscoveryCHAP || resultSessionCHAP != testcase.expectedSessionCHAP { + t.Errorf("%s failed: expected err=%v DiscoveryCHAP=%v SessionCHAP=%v, got %v/%v/%v", testcase.name, testcase.expectedError, testcase.expectedDiscoveryCHAP, testcase.expectedSessionCHAP, + err, resultDiscoveryCHAP, resultSessionCHAP) + } + } + } +} + +func TestGetVolumeSpec(t *testing.T) { + path := "plugins/kubernetes.io/iscsi/volumeDevices/iface-default/127.0.0.1:3260-iqn.2014-12.server:storage.target01-lun-0" + spec, _ := getVolumeSpecFromGlobalMapPath("test", path) + + portal := spec.PersistentVolume.Spec.PersistentVolumeSource.ISCSI.TargetPortal + if portal != "127.0.0.1:3260" { + t.Errorf("wrong portal: %v", portal) + } + iqn := spec.PersistentVolume.Spec.PersistentVolumeSource.ISCSI.IQN + if iqn != "iqn.2014-12.server:storage.target01" { + t.Errorf("wrong iqn: %v", iqn) + } + lun := spec.PersistentVolume.Spec.PersistentVolumeSource.ISCSI.Lun + if lun != 0 { + t.Errorf("wrong lun: %v", lun) + } + iface := spec.PersistentVolume.Spec.PersistentVolumeSource.ISCSI.ISCSIInterface + if iface != "default" { + t.Errorf("wrong ISCSIInterface: %v", iface) + } +} + +func TestGetVolumeSpec_no_lun(t *testing.T) { + path := "plugins/kubernetes.io/iscsi/volumeDevices/iface-default/127.0.0.1:3260-iqn.2014-12.server:storage.target01" + _, err := getVolumeSpecFromGlobalMapPath("test", path) + if !strings.Contains(err.Error(), "malformatted mnt path") { + t.Errorf("should get error: malformatted mnt path") + } +} + +func TestGetVolumeSpec_no_iface(t *testing.T) { + path := "plugins/kubernetes.io/iscsi/volumeDevices/default/127.0.0.1:3260-iqn.2014-12.server:storage.target01-lun-0" + _, err := getVolumeSpecFromGlobalMapPath("test", path) + if !strings.Contains(err.Error(), "failed to retreive iface") { + t.Errorf("should get error: failed to retreive iface") + } } diff --git a/pkg/volume/iscsi/iscsi_util.go b/pkg/volume/iscsi/iscsi_util.go index 12e8430d85e..b42ca1e5b4c 100644 --- a/pkg/volume/iscsi/iscsi_util.go +++ b/pkg/volume/iscsi/iscsi_util.go @@ -27,6 +27,9 @@ import ( "time" "github.com/golang/glog" + "k8s.io/api/core/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -163,10 +166,21 @@ func makePDNameInternal(host volume.VolumeHost, portal string, iqn string, lun s return path.Join(host.GetPluginDir(iscsiPluginName), "iface-"+iface, portal+"-"+iqn+"-lun-"+lun) } +// make a directory like /var/lib/kubelet/plugins/kubernetes.io/iscsi/volumeDevices/iface_name/portal-some_iqn-lun-lun_id +func makeVDPDNameInternal(host volume.VolumeHost, portal string, iqn string, lun string, iface string) string { + return path.Join(host.GetVolumeDevicePluginDir(iscsiPluginName), "iface-"+iface, portal+"-"+iqn+"-lun-"+lun) +} + type ISCSIUtil struct{} +// MakeGlobalPDName returns path of global plugin dir func (util *ISCSIUtil) MakeGlobalPDName(iscsi iscsiDisk) string { - return makePDNameInternal(iscsi.plugin.host, iscsi.Portals[0], iscsi.Iqn, iscsi.lun, iscsi.Iface) + return makePDNameInternal(iscsi.plugin.host, iscsi.Portals[0], iscsi.Iqn, iscsi.Lun, iscsi.Iface) +} + +// MakeGlobalVDPDName returns path of global volume device plugin dir +func (util *ISCSIUtil) MakeGlobalVDPDName(iscsi iscsiDisk) string { + return makeVDPDNameInternal(iscsi.plugin.host, iscsi.Portals[0], iscsi.Iqn, iscsi.Lun, iscsi.Iface) } func (util *ISCSIUtil) persistISCSI(conf iscsiDisk, mnt string) error { @@ -184,7 +198,6 @@ func (util *ISCSIUtil) persistISCSI(conf iscsiDisk, mnt string) error { } func (util *ISCSIUtil) loadISCSI(conf *iscsiDisk, mnt string) error { - // NOTE: The iscsi config json is not deleted after logging out from target portals. file := path.Join(mnt, "iscsi.json") fp, err := os.Open(file) if err != nil { @@ -198,6 +211,7 @@ func (util *ISCSIUtil) loadISCSI(conf *iscsiDisk, mnt string) error { return nil } +// AttachDisk returns devicePath of volume if attach succeeded otherwise returns error func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { var devicePath string var devicePaths []string @@ -240,9 +254,9 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { return "", fmt.Errorf("Could not parse iface file for %s", b.Iface) } if iscsiTransport == "tcp" { - devicePath = strings.Join([]string{"/dev/disk/by-path/ip", tp, "iscsi", b.Iqn, "lun", b.lun}, "-") + devicePath = strings.Join([]string{"/dev/disk/by-path/ip", tp, "iscsi", b.Iqn, "lun", b.Lun}, "-") } else { - devicePath = strings.Join([]string{"/dev/disk/by-path/pci", "*", "ip", tp, "iscsi", b.Iqn, "lun", b.lun}, "-") + devicePath = strings.Join([]string{"/dev/disk/by-path/pci", "*", "ip", tp, "iscsi", b.Iqn, "lun", b.Lun}, "-") } if exist := waitForPathToExist(&devicePath, 1, iscsiTransport); exist { @@ -307,26 +321,6 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { //Make sure we use a valid devicepath to find mpio device. devicePath = devicePaths[0] - - // mount it - globalPDPath := b.manager.MakeGlobalPDName(*b.iscsiDisk) - notMnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath) - if err != nil && !os.IsNotExist(err) { - return "", fmt.Errorf("Heuristic determination of mount point failed:%v", err) - } - if !notMnt { - glog.Infof("iscsi: %s already mounted", globalPDPath) - return "", nil - } - - if err := os.MkdirAll(globalPDPath, 0750); err != nil { - glog.Errorf("iscsi: failed to mkdir %s, error", globalPDPath) - return "", err - } - - // Persist iscsi disk config to json file for DetachDisk path - util.persistISCSI(*(b.iscsiDisk), globalPDPath) - for _, path := range devicePaths { // There shouldnt be any empty device paths. However adding this check // for safer side to avoid the possibility of an empty entry. @@ -339,14 +333,67 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { break } } - err = b.mounter.FormatAndMount(devicePath, globalPDPath, b.fsType, nil) - if err != nil { - glog.Errorf("iscsi: failed to mount iscsi volume %s [%s] to %s, error %v", devicePath, b.fsType, globalPDPath, err) - } - - return devicePath, err + glog.V(5).Infof("iscsi: AttachDisk devicePath: %s", devicePath) + // run global mount path related operations based on volumeMode + return globalPDPathOperation(b)(b, devicePath, util) } +// globalPDPathOperation returns global mount path related operations based on volumeMode. +// If the volumeMode is 'Filesystem' or not defined, plugin needs to create a dir, persist +// iscsi configrations, and then format/mount the volume. +// If the volumeMode is 'Block', plugin creates a dir and persists iscsi configrations. +// Since volume type is block, plugin doesn't need to format/mount the volume. +func globalPDPathOperation(b iscsiDiskMounter) func(iscsiDiskMounter, string, *ISCSIUtil) (string, error) { + // TODO: remove feature gate check after no longer needed + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + glog.V(5).Infof("iscsi: AttachDisk volumeMode: %s", b.volumeMode) + if b.volumeMode == v1.PersistentVolumeBlock { + // If the volumeMode is 'Block', plugin don't need to format the volume. + return func(b iscsiDiskMounter, devicePath string, util *ISCSIUtil) (string, error) { + globalPDPath := b.manager.MakeGlobalVDPDName(*b.iscsiDisk) + // Create dir like /var/lib/kubelet/plugins/kubernetes.io/iscsi/volumeDevices/{ifaceName}/{portal-some_iqn-lun-lun_id} + if err := os.MkdirAll(globalPDPath, 0750); err != nil { + glog.Errorf("iscsi: failed to mkdir %s, error", globalPDPath) + return "", err + } + // Persist iscsi disk config to json file for DetachDisk path + util.persistISCSI(*(b.iscsiDisk), globalPDPath) + + return devicePath, nil + } + } + } + // If the volumeMode is 'Filesystem', plugin needs to format the volume + // and mount it to globalPDPath. + return func(b iscsiDiskMounter, devicePath string, util *ISCSIUtil) (string, error) { + globalPDPath := b.manager.MakeGlobalPDName(*b.iscsiDisk) + notMnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath) + if err != nil && !os.IsNotExist(err) { + return "", fmt.Errorf("Heuristic determination of mount point failed:%v", err) + } + // Return confirmed devicePath to caller + if !notMnt { + glog.Infof("iscsi: %s already mounted", globalPDPath) + return devicePath, nil + } + // Create dir like /var/lib/kubelet/plugins/kubernetes.io/iscsi/{ifaceName}/{portal-some_iqn-lun-lun_id} + if err := os.MkdirAll(globalPDPath, 0750); err != nil { + glog.Errorf("iscsi: failed to mkdir %s, error", globalPDPath) + return "", err + } + // Persist iscsi disk config to json file for DetachDisk path + util.persistISCSI(*(b.iscsiDisk), globalPDPath) + + err = b.mounter.FormatAndMount(devicePath, globalPDPath, b.fsType, nil) + if err != nil { + glog.Errorf("iscsi: failed to mount iscsi volume %s [%s] to %s, error %v", devicePath, b.fsType, globalPDPath, err) + } + + return devicePath, nil + } +} + +// DetachDisk unmounts and detaches a volume from node func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error { _, cnt, err := mount.GetDeviceNameFromMount(c.mounter, mntPath) if err != nil { @@ -401,9 +448,91 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error { } portals := removeDuplicate(bkpPortal) if len(portals) == 0 { - return fmt.Errorf("iscsi detach disk: failed to detach iscsi disk. Couldn't get connected portals from configurations.") + return fmt.Errorf("iscsi detach disk: failed to detach iscsi disk. Couldn't get connected portals from configurations") } + err = util.detachISCSIDisk(c.exec, portals, iqn, iface, volName, initiatorName, found) + if err != nil { + return fmt.Errorf("failed to finish detachISCSIDisk, err: %v", err) + } + return nil +} + +// DetachBlockISCSIDisk removes loopback device for a volume and detaches a volume from node +func (util *ISCSIUtil) DetachBlockISCSIDisk(c iscsiDiskUnmapper, mapPath string) error { + if pathExists, pathErr := volumeutil.PathExists(mapPath); pathErr != nil { + return fmt.Errorf("Error checking if path exists: %v", pathErr) + } else if !pathExists { + glog.Warningf("Warning: Unmap skipped because path does not exist: %v", mapPath) + return nil + } + // If we arrive here, device is no longer used, see if need to logout the target + // device: 192.168.0.10:3260-iqn.2017-05.com.example:test-lun-0 + device, _, err := extractDeviceAndPrefix(mapPath) + if err != nil { + return err + } + var bkpPortal []string + var volName, iqn, lun, iface, initiatorName string + found := true + // load iscsi disk config from json file + if err := util.loadISCSI(c.iscsiDisk, mapPath); err == nil { + bkpPortal, iqn, lun, iface, volName = c.iscsiDisk.Portals, c.iscsiDisk.Iqn, c.iscsiDisk.Lun, c.iscsiDisk.Iface, c.iscsiDisk.VolName + initiatorName = c.iscsiDisk.InitiatorName + } else { + // If the iscsi disk config is not found, fall back to the original behavior. + // This portal/iqn/iface is no longer referenced, log out. + // Extract the portal and iqn from device path. + bkpPortal = make([]string, 1) + bkpPortal[0], iqn, err = extractPortalAndIqn(device) + if err != nil { + return err + } + arr := strings.Split(device, "-lun-") + if len(arr) < 2 { + return fmt.Errorf("failed to retreive lun from mapPath: %v", mapPath) + } + lun = arr[1] + // Extract the iface from the mountPath and use it to log out. If the iface + // is not found, maintain the previous behavior to facilitate kubelet upgrade. + // Logout may fail as no session may exist for the portal/IQN on the specified interface. + iface, found = extractIface(mapPath) + } + portals := removeDuplicate(bkpPortal) + if len(portals) == 0 { + return fmt.Errorf("iscsi detach disk: failed to detach iscsi disk. Couldn't get connected portals from configurations") + } + + devicePath := getDevByPath(portals[0], iqn, lun) + glog.V(5).Infof("iscsi: devicePath: %s", devicePath) + if _, err = os.Stat(devicePath); err != nil { + return fmt.Errorf("failed to validate devicePath: %s", devicePath) + } + // check if the dev is using mpio and if so mount it via the dm-XX device + if mappedDevicePath := c.deviceUtil.FindMultipathDeviceForDevice(devicePath); mappedDevicePath != "" { + devicePath = mappedDevicePath + } + // Get loopback device which takes fd lock for devicePath before + // detaching a volume from node. + blkUtil := volumeutil.NewBlockVolumePathHandler() + loop, err := volumeutil.BlockVolumePathHandler.GetLoopDevice(blkUtil, devicePath) + if err != nil { + return fmt.Errorf("failed to get loopback for device: %v, err: %v", devicePath, err) + } + // Detach a volume from kubelet node + err = util.detachISCSIDisk(c.exec, portals, iqn, iface, volName, initiatorName, found) + if err != nil { + return fmt.Errorf("failed to finish detachISCSIDisk, err: %v", err) + } + // The volume was successfully detached from node. We can safely remove the loopback. + err = volumeutil.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop) + if err != nil { + return fmt.Errorf("failed to remove loopback :%v, err: %v", loop, err) + } + return nil +} + +func (util *ISCSIUtil) detachISCSIDisk(exec mount.Exec, portals []string, iqn, iface, volName, initiatorName string, found bool) error { for _, portal := range portals { logoutArgs := []string{"-m", "node", "-p", portal, "-T", iqn, "--logout"} deleteArgs := []string{"-m", "node", "-p", portal, "-T", iqn, "-o", "delete"} @@ -412,13 +541,13 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error { deleteArgs = append(deleteArgs, []string{"-I", iface}...) } glog.Infof("iscsi: log out target %s iqn %s iface %s", portal, iqn, iface) - out, err := c.exec.Run("iscsiadm", logoutArgs...) + out, err := exec.Run("iscsiadm", logoutArgs...) if err != nil { glog.Errorf("iscsi: failed to detach disk Error: %s", string(out)) } // Delete the node record glog.Infof("iscsi: delete node record target %s iqn %s", portal, iqn) - out, err = c.exec.Run("iscsiadm", deleteArgs...) + out, err = exec.Run("iscsiadm", deleteArgs...) if err != nil { glog.Errorf("iscsi: failed to delete node record Error: %s", string(out)) } @@ -427,7 +556,7 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error { // If the iface is not created via iscsi plugin, skip to delete if initiatorName != "" && found && iface == (portals[0]+":"+volName) { deleteArgs := []string{"-m", "iface", "-I", iface, "-o", "delete"} - out, err := c.exec.Run("iscsiadm", deleteArgs...) + out, err := exec.Run("iscsiadm", deleteArgs...) if err != nil { glog.Errorf("iscsi: failed to delete iface Error: %s", string(out)) } @@ -436,6 +565,10 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error { return nil } +func getDevByPath(portal, iqn, lun string) string { + return "/dev/disk/by-path/ip-" + portal + "-iscsi-" + iqn + "-lun-" + lun +} + func extractTransportname(ifaceOutput string) (iscsiTransport string) { rexOutput := ifaceTransportNameRe.FindStringSubmatch(ifaceOutput) if rexOutput == nil { From 96509d4f5b91d8ace521dc04a2ed150d4c057090 Mon Sep 17 00:00:00 2001 From: mtanino Date: Mon, 27 Nov 2017 16:12:33 -0500 Subject: [PATCH 098/264] generated code for iSCSI plugin change --- pkg/volume/iscsi/BUILD | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/volume/iscsi/BUILD b/pkg/volume/iscsi/BUILD index e056ff00889..6da935b9552 100644 --- a/pkg/volume/iscsi/BUILD +++ b/pkg/volume/iscsi/BUILD @@ -17,14 +17,17 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/volume/iscsi", deps = [ + "//pkg/features:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", + "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) From 4e1b5c6a3299327ff856d05443af8fa20ee760e7 Mon Sep 17 00:00:00 2001 From: Jesse Haka Date: Sun, 7 Jan 2018 11:05:17 +0200 Subject: [PATCH 099/264] move detach out of os volumes attach add test add test fix bazel fix tests change loglevel, remove else statement --- .../providers/openstack/openstack_volumes.go | 90 +++++++++++++----- pkg/volume/cinder/attacher.go | 31 +----- pkg/volume/cinder/attacher_test.go | 94 ++++++++++++++++--- pkg/volume/cinder/cinder.go | 3 +- 4 files changed, 151 insertions(+), 67 deletions(-) diff --git a/pkg/cloudprovider/providers/openstack/openstack_volumes.go b/pkg/cloudprovider/providers/openstack/openstack_volumes.go index 8a530592845..4a441e4c347 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_volumes.go +++ b/pkg/cloudprovider/providers/openstack/openstack_volumes.go @@ -26,6 +26,7 @@ import ( "time" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/types" k8s_volume "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -319,33 +320,18 @@ func (os *OpenStack) AttachDisk(instanceID, volumeID string) (string, error) { if instanceID == volume.AttachedServerId { glog.V(4).Infof("Disk %s is already attached to instance %s", volumeID, instanceID) return volume.ID, nil - } else { - nodeName, err := os.GetNodeNameByID(volume.AttachedServerId) - attachErr := fmt.Sprintf("disk %s path %s is attached to a different instance (%s)", volumeID, volume.AttachedDevice, volume.AttachedServerId) - if err != nil { - glog.Error(attachErr) - return "", errors.New(attachErr) - } - // using volume.AttachedDevice may cause problems because cinder does not report device path correctly see issue #33128 - devicePath := volume.AttachedDevice - danglingErr := volumeutil.NewDanglingError(attachErr, nodeName, devicePath) - glog.V(4).Infof("volume %s is already attached to node %s path %s", volumeID, nodeName, devicePath) - // check special case, if node is deleted from cluster but exist still in openstack - // we need to check can we detach the cinder, node is deleted from cluster if state is not ACTIVE - srv, err := getServerByName(cClient, nodeName, false) - if err != nil { - return "", err - } - if srv.Status != "ACTIVE" { - err = os.DetachDisk(volume.AttachedServerId, volumeID) - if err != nil { - glog.Error(err) - return "", err - } - glog.V(4).Infof("detached volume %s node state was %s", volumeID, srv.Status) - } - return "", danglingErr } + nodeName, err := os.GetNodeNameByID(volume.AttachedServerId) + attachErr := fmt.Sprintf("disk %s path %s is attached to a different instance (%s)", volumeID, volume.AttachedDevice, volume.AttachedServerId) + if err != nil { + glog.Error(attachErr) + return "", errors.New(attachErr) + } + // using volume.AttachedDevice may cause problems because cinder does not report device path correctly see issue #33128 + devicePath := volume.AttachedDevice + danglingErr := volumeutil.NewDanglingError(attachErr, nodeName, devicePath) + glog.V(2).Infof("Found dangling volume %s attached to node %s", volumeID, nodeName) + return "", danglingErr } startTime := time.Now() @@ -605,6 +591,9 @@ func (os *OpenStack) GetAttachmentDiskPath(instanceID, volumeID string) (string, // DiskIsAttached queries if a volume is attached to a compute instance func (os *OpenStack) DiskIsAttached(instanceID, volumeID string) (bool, error) { + if instanceID == "" { + glog.Warningf("calling DiskIsAttached with empty instanceid: %s %s", instanceID, volumeID) + } volume, err := os.getVolume(volumeID) if err != nil { return false, err @@ -613,6 +602,29 @@ func (os *OpenStack) DiskIsAttached(instanceID, volumeID string) (bool, error) { return instanceID == volume.AttachedServerId, nil } +// DiskIsAttachedByName queries if a volume is attached to a compute instance by name +func (os *OpenStack) DiskIsAttachedByName(nodeName types.NodeName, volumeID string) (bool, string, error) { + cClient, err := os.NewComputeV2() + if err != nil { + return false, "", err + } + srv, err := getServerByName(cClient, nodeName, false) + if err != nil { + if err == ErrNotFound { + // instance not found anymore in cloudprovider, assume that cinder is detached + return false, "", nil + } else { + return false, "", err + } + } + instanceID := "/" + srv.ID + if ind := strings.LastIndex(instanceID, "/"); ind >= 0 { + instanceID = instanceID[(ind + 1):] + } + attached, err := os.DiskIsAttached(instanceID, volumeID) + return attached, instanceID, err +} + // DisksAreAttached queries if a list of volumes are attached to a compute instance func (os *OpenStack) DisksAreAttached(instanceID string, volumeIDs []string) (map[string]bool, error) { attached := make(map[string]bool) @@ -627,6 +639,32 @@ func (os *OpenStack) DisksAreAttached(instanceID string, volumeIDs []string) (ma return attached, nil } +// DisksAreAttachedByName queries if a list of volumes are attached to a compute instance by name +func (os *OpenStack) DisksAreAttachedByName(nodeName types.NodeName, volumeIDs []string) (map[string]bool, error) { + attached := make(map[string]bool) + cClient, err := os.NewComputeV2() + if err != nil { + return attached, err + } + srv, err := getServerByName(cClient, nodeName, false) + if err != nil { + if err == ErrNotFound { + // instance not found anymore, mark all volumes as detached + for _, volumeID := range volumeIDs { + attached[volumeID] = false + } + return attached, nil + } else { + return attached, err + } + } + instanceID := "/" + srv.ID + if ind := strings.LastIndex(instanceID, "/"); ind >= 0 { + instanceID = instanceID[(ind + 1):] + } + return os.DisksAreAttached(instanceID, volumeIDs) +} + // diskIsUsed returns true a disk is attached to any node. func (os *OpenStack) diskIsUsed(volumeID string) (bool, error) { volume, err := os.getVolume(volumeID) diff --git a/pkg/volume/cinder/attacher.go b/pkg/volume/cinder/attacher.go index 87b58dae01b..65b24640cbb 100644 --- a/pkg/volume/cinder/attacher.go +++ b/pkg/volume/cinder/attacher.go @@ -27,7 +27,6 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -187,23 +186,7 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod volumeSpecMap[volumeSource.VolumeID] = spec } - instanceID, err := attacher.nodeInstanceID(nodeName) - if err != nil { - if err == cloudprovider.InstanceNotFound { - // If node doesn't exist, OpenStack Nova will assume the volumes are not attached to it. - // Mark the volumes as detached and return false without error. - glog.Warningf("VolumesAreAttached: node %q does not exist.", nodeName) - for spec := range volumesAttachedCheck { - volumesAttachedCheck[spec] = false - } - - return volumesAttachedCheck, nil - } - - return volumesAttachedCheck, err - } - - attachedResult, err := attacher.cinderProvider.DisksAreAttached(instanceID, volumeIDList) + attachedResult, err := attacher.cinderProvider.DisksAreAttachedByName(nodeName, volumeIDList) if err != nil { // Log error and continue with attach glog.Errorf( @@ -381,20 +364,10 @@ func (detacher *cinderDiskDetacher) waitDiskDetached(instanceID, volumeID string func (detacher *cinderDiskDetacher) Detach(volumeName string, nodeName types.NodeName) error { volumeID := path.Base(volumeName) - instances, res := detacher.cinderProvider.Instances() - if !res { - return fmt.Errorf("failed to list openstack instances") - } - instanceID, err := instances.InstanceID(nodeName) - if ind := strings.LastIndex(instanceID, "/"); ind >= 0 { - instanceID = instanceID[(ind + 1):] - } - if err := detacher.waitOperationFinished(volumeID); err != nil { return err } - - attached, err := detacher.cinderProvider.DiskIsAttached(instanceID, volumeID) + attached, instanceID, err := detacher.cinderProvider.DiskIsAttachedByName(nodeName, volumeID) if err != nil { // Log error and continue with detach glog.Errorf( diff --git a/pkg/volume/cinder/attacher_test.go b/pkg/volume/cinder/attacher_test.go index f868db675bf..ddc307cd5fe 100644 --- a/pkg/volume/cinder/attacher_test.go +++ b/pkg/volume/cinder/attacher_test.go @@ -132,7 +132,7 @@ func TestAttachDetach(t *testing.T) { name: "Attach_Positive", instanceID: instanceID, operationPending: operationPendingCall{volumeID, false, done, nil}, - diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, nil}, + diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, nil}, attach: attachCall{instanceID, volumeID, "", nil}, diskPath: diskPathCall{instanceID, volumeID, "/dev/sda", nil}, test: func(testcase *testcase) (string, error) { @@ -147,7 +147,7 @@ func TestAttachDetach(t *testing.T) { name: "Attach_Positive_AlreadyAttached", instanceID: instanceID, operationPending: operationPendingCall{volumeID, false, done, nil}, - diskIsAttached: diskIsAttachedCall{instanceID, volumeID, true, nil}, + diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, true, nil}, diskPath: diskPathCall{instanceID, volumeID, "/dev/sda", nil}, test: func(testcase *testcase) (string, error) { attacher := newAttacher(testcase) @@ -173,7 +173,7 @@ func TestAttachDetach(t *testing.T) { name: "Attach_Negative", instanceID: instanceID, operationPending: operationPendingCall{volumeID, false, done, nil}, - diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, diskCheckError}, + diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, diskCheckError}, attach: attachCall{instanceID, volumeID, "/dev/sda", attachError}, test: func(testcase *testcase) (string, error) { attacher := newAttacher(testcase) @@ -187,7 +187,7 @@ func TestAttachDetach(t *testing.T) { name: "Attach_Negative_DiskPatchFails", instanceID: instanceID, operationPending: operationPendingCall{volumeID, false, done, nil}, - diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, nil}, + diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, nil}, attach: attachCall{instanceID, volumeID, "", nil}, diskPath: diskPathCall{instanceID, volumeID, "", diskPathError}, test: func(testcase *testcase) (string, error) { @@ -201,7 +201,7 @@ func TestAttachDetach(t *testing.T) { { name: "VolumesAreAttached_Positive", instanceID: instanceID, - disksAreAttached: disksAreAttachedCall{instanceID, []string{volumeID}, map[string]bool{volumeID: true}, nil}, + disksAreAttached: disksAreAttachedCall{instanceID, nodeName, []string{volumeID}, map[string]bool{volumeID: true}, nil}, test: func(testcase *testcase) (string, error) { attacher := newAttacher(testcase) attachments, err := attacher.VolumesAreAttached([]*volume.Spec{spec}, nodeName) @@ -214,7 +214,7 @@ func TestAttachDetach(t *testing.T) { { name: "VolumesAreAttached_Negative", instanceID: instanceID, - disksAreAttached: disksAreAttachedCall{instanceID, []string{volumeID}, map[string]bool{volumeID: false}, nil}, + disksAreAttached: disksAreAttachedCall{instanceID, nodeName, []string{volumeID}, map[string]bool{volumeID: false}, nil}, test: func(testcase *testcase) (string, error) { attacher := newAttacher(testcase) attachments, err := attacher.VolumesAreAttached([]*volume.Spec{spec}, nodeName) @@ -227,7 +227,7 @@ func TestAttachDetach(t *testing.T) { { name: "VolumesAreAttached_CinderFailed", instanceID: instanceID, - disksAreAttached: disksAreAttachedCall{instanceID, []string{volumeID}, nil, disksCheckError}, + disksAreAttached: disksAreAttachedCall{instanceID, nodeName, []string{volumeID}, nil, disksCheckError}, test: func(testcase *testcase) (string, error) { attacher := newAttacher(testcase) attachments, err := attacher.VolumesAreAttached([]*volume.Spec{spec}, nodeName) @@ -242,7 +242,7 @@ func TestAttachDetach(t *testing.T) { name: "Detach_Positive", instanceID: instanceID, operationPending: operationPendingCall{volumeID, false, done, nil}, - diskIsAttached: diskIsAttachedCall{instanceID, volumeID, true, nil}, + diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, true, nil}, detach: detachCall{instanceID, volumeID, nil}, test: func(testcase *testcase) (string, error) { detacher := newDetacher(testcase) @@ -255,7 +255,7 @@ func TestAttachDetach(t *testing.T) { name: "Detach_Positive_AlreadyDetached", instanceID: instanceID, operationPending: operationPendingCall{volumeID, false, done, nil}, - diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, nil}, + diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, nil}, test: func(testcase *testcase) (string, error) { detacher := newDetacher(testcase) return "", detacher.Detach(volumeID, nodeName) @@ -267,7 +267,7 @@ func TestAttachDetach(t *testing.T) { name: "Detach_Positive_CheckFails", instanceID: instanceID, operationPending: operationPendingCall{volumeID, false, done, nil}, - diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, diskCheckError}, + diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, diskCheckError}, detach: detachCall{instanceID, volumeID, nil}, test: func(testcase *testcase) (string, error) { detacher := newDetacher(testcase) @@ -280,7 +280,7 @@ func TestAttachDetach(t *testing.T) { name: "Detach_Negative", instanceID: instanceID, operationPending: operationPendingCall{volumeID, false, done, nil}, - diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, diskCheckError}, + diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, diskCheckError}, detach: detachCall{instanceID, volumeID, detachError}, test: func(testcase *testcase) (string, error) { detacher := newDetacher(testcase) @@ -426,6 +426,7 @@ type operationPendingCall struct { type diskIsAttachedCall struct { instanceID string + nodeName types.NodeName volumeID string isAttached bool ret error @@ -440,6 +441,7 @@ type diskPathCall struct { type disksAreAttachedCall struct { instanceID string + nodeName types.NodeName volumeIDs []string areAttached map[string]bool ret error @@ -572,6 +574,46 @@ func (testcase *testcase) ShouldTrustDevicePath() bool { return true } +func (testcase *testcase) DiskIsAttachedByName(nodeName types.NodeName, volumeID string) (bool, string, error) { + expected := &testcase.diskIsAttached + instanceID := expected.instanceID + // If testcase call DetachDisk*, return false + if *testcase.attachOrDetach == detachStatus { + return false, instanceID, nil + } + + // If testcase call AttachDisk*, return true + if *testcase.attachOrDetach == attachStatus { + return true, instanceID, nil + } + + if expected.nodeName != nodeName { + testcase.t.Errorf("Unexpected DiskIsAttachedByName call: expected nodename %s, got %s", expected.nodeName, nodeName) + return false, instanceID, errors.New("Unexpected DiskIsAttachedByName call: wrong nodename") + } + + if expected.volumeID == "" && expected.instanceID == "" { + // testcase.diskIsAttached looks uninitialized, test did not expect to + // call DiskIsAttached + testcase.t.Errorf("Unexpected DiskIsAttachedByName call!") + return false, instanceID, errors.New("Unexpected DiskIsAttachedByName call!") + } + + if expected.volumeID != volumeID { + testcase.t.Errorf("Unexpected DiskIsAttachedByName call: expected volumeID %s, got %s", expected.volumeID, volumeID) + return false, instanceID, errors.New("Unexpected DiskIsAttachedByName call: wrong volumeID") + } + + if expected.instanceID != instanceID { + testcase.t.Errorf("Unexpected DiskIsAttachedByName call: expected instanceID %s, got %s", expected.instanceID, instanceID) + return false, instanceID, errors.New("Unexpected DiskIsAttachedByName call: wrong instanceID") + } + + glog.V(4).Infof("DiskIsAttachedByName call: %s, %s, returning %v, %v", volumeID, nodeName, expected.isAttached, expected.instanceID, expected.ret) + + return expected.isAttached, expected.instanceID, expected.ret +} + func (testcase *testcase) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error) { return "", "", false, errors.New("Not implemented") } @@ -626,6 +668,36 @@ func (testcase *testcase) DisksAreAttached(instanceID string, volumeIDs []string return expected.areAttached, expected.ret } +func (testcase *testcase) DisksAreAttachedByName(nodeName types.NodeName, volumeIDs []string) (map[string]bool, error) { + expected := &testcase.disksAreAttached + areAttached := make(map[string]bool) + + instanceID := expected.instanceID + if expected.nodeName != nodeName { + testcase.t.Errorf("Unexpected DisksAreAttachedByName call: expected nodeName %s, got %s", expected.nodeName, nodeName) + return areAttached, errors.New("Unexpected DisksAreAttachedByName call: wrong nodename") + } + if len(expected.volumeIDs) == 0 && expected.instanceID == "" { + // testcase.volumeIDs looks uninitialized, test did not expect to call DisksAreAttached + testcase.t.Errorf("Unexpected DisksAreAttachedByName call!") + return areAttached, errors.New("Unexpected DisksAreAttachedByName call") + } + + if !reflect.DeepEqual(expected.volumeIDs, volumeIDs) { + testcase.t.Errorf("Unexpected DisksAreAttachedByName call: expected volumeIDs %v, got %v", expected.volumeIDs, volumeIDs) + return areAttached, errors.New("Unexpected DisksAreAttachedByName call: wrong volumeID") + } + + if expected.instanceID != instanceID { + testcase.t.Errorf("Unexpected DisksAreAttachedByName call: expected instanceID %s, got %s", expected.instanceID, instanceID) + return areAttached, errors.New("Unexpected DisksAreAttachedByName call: wrong instanceID") + } + + glog.V(4).Infof("DisksAreAttachedByName call: %v, %s, returning %v, %v", volumeIDs, nodeName, expected.areAttached, expected.ret) + + return expected.areAttached, expected.ret +} + // Implementation of fake cloudprovider.Instances type instances struct { instanceID string diff --git a/pkg/volume/cinder/cinder.go b/pkg/volume/cinder/cinder.go index c5b785cd0ab..07fa459a98d 100644 --- a/pkg/volume/cinder/cinder.go +++ b/pkg/volume/cinder/cinder.go @@ -52,7 +52,8 @@ type CinderProvider interface { GetAttachmentDiskPath(instanceID, volumeID string) (string, error) OperationPending(diskName string) (bool, string, error) DiskIsAttached(instanceID, volumeID string) (bool, error) - DisksAreAttached(instanceID string, volumeIDs []string) (map[string]bool, error) + DiskIsAttachedByName(nodeName types.NodeName, volumeID string) (bool, string, error) + DisksAreAttachedByName(nodeName types.NodeName, volumeIDs []string) (map[string]bool, error) ShouldTrustDevicePath() bool Instances() (cloudprovider.Instances, bool) ExpandVolume(volumeID string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) From 877143e547921747d9fd14e2af776b34663d37dc Mon Sep 17 00:00:00 2001 From: David Eads Date: Wed, 10 Jan 2018 12:11:17 -0500 Subject: [PATCH 100/264] manuallly handle encoding and decoding in the scale client --- staging/src/k8s.io/client-go/scale/client.go | 47 +++++++++++++++----- staging/src/k8s.io/client-go/scale/util.go | 19 ++++++++ 2 files changed, 54 insertions(+), 12 deletions(-) diff --git a/staging/src/k8s.io/client-go/scale/client.go b/staging/src/k8s.io/client-go/scale/client.go index 3f85197a0b6..07c6098620b 100644 --- a/staging/src/k8s.io/client-go/scale/client.go +++ b/staging/src/k8s.io/client-go/scale/client.go @@ -21,6 +21,7 @@ import ( autoscaling "k8s.io/api/autoscaling/v1" "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/dynamic" @@ -129,21 +130,29 @@ func (c *namespacedScaleClient) Get(resource schema.GroupResource, name string) return nil, fmt.Errorf("unable to get client for %s: %v", resource.String(), err) } - rawObj, err := c.client.clientBase.Get(). + result := c.client.clientBase.Get(). AbsPath(path). Namespace(c.namespace). Resource(gvr.Resource). Name(name). SubResource("scale"). - Do(). - Get() + Do() + if err := result.Error(); err != nil { + return nil, fmt.Errorf("could not fetch the scale for %s %s: %v", resource.String(), name, err) + } + scaleBytes, err := result.Raw() + if err != nil { + return nil, err + } + decoder := scaleConverter.codecs.UniversalDecoder(scaleConverter.ScaleVersions()...) + rawScaleObj, err := runtime.Decode(decoder, scaleBytes) if err != nil { return nil, err } // convert whatever this is to autoscaling/v1.Scale - scaleObj, err := scaleConverter.ConvertToVersion(rawObj, autoscaling.SchemeGroupVersion) + scaleObj, err := scaleConverter.ConvertToVersion(rawScaleObj, autoscaling.SchemeGroupVersion) if err != nil { return nil, fmt.Errorf("received an object from a /scale endpoint which was not convertible to autoscaling Scale: %v", err) } @@ -158,7 +167,7 @@ func (c *namespacedScaleClient) Update(resource schema.GroupResource, scale *aut } // Currently, a /scale endpoint can receive and return different scale types. - // Until we hvae support for the alternative API representations proposal, + // Until we have support for the alternative API representations proposal, // we need to deal with sending and accepting differnet API versions. // figure out what scale we actually need here @@ -170,25 +179,39 @@ func (c *namespacedScaleClient) Update(resource schema.GroupResource, scale *aut // convert this to whatever this endpoint wants scaleUpdate, err := scaleConverter.ConvertToVersion(scale, desiredGVK.GroupVersion()) if err != nil { - return nil, fmt.Errorf("could not convert scale update to internal Scale: %v", err) + return nil, fmt.Errorf("could not convert scale update to external Scale: %v", err) + } + encoder := scaleConverter.codecs.LegacyCodec(desiredGVK.GroupVersion()) + scaleUpdateBytes, err := runtime.Encode(encoder, scaleUpdate) + if err != nil { + return nil, fmt.Errorf("could not encode scale update to external Scale: %v", err) } - rawObj, err := c.client.clientBase.Put(). + result := c.client.clientBase.Put(). AbsPath(path). Namespace(c.namespace). Resource(gvr.Resource). Name(scale.Name). SubResource("scale"). - Body(scaleUpdate). - Do(). - Get() + Body(scaleUpdateBytes). + Do() + if err := result.Error(); err != nil { + panic(err) + return nil, fmt.Errorf("could not update the scale for %s %s: %v", resource.String(), scale.Name, err) + } + scaleBytes, err := result.Raw() if err != nil { - return nil, fmt.Errorf("could not fetch the scale for %s %s: %v", resource.String(), scale.Name, err) + return nil, err + } + decoder := scaleConverter.codecs.UniversalDecoder(scaleConverter.ScaleVersions()...) + rawScaleObj, err := runtime.Decode(decoder, scaleBytes) + if err != nil { + return nil, err } // convert whatever this is back to autoscaling/v1.Scale - scaleObj, err := scaleConverter.ConvertToVersion(rawObj, autoscaling.SchemeGroupVersion) + scaleObj, err := scaleConverter.ConvertToVersion(rawScaleObj, autoscaling.SchemeGroupVersion) if err != nil { return nil, fmt.Errorf("received an object from a /scale endpoint which was not convertible to autoscaling Scale: %v", err) } diff --git a/staging/src/k8s.io/client-go/scale/util.go b/staging/src/k8s.io/client-go/scale/util.go index 9eb10853605..46b5c4d1e2d 100644 --- a/staging/src/k8s.io/client-go/scale/util.go +++ b/staging/src/k8s.io/client-go/scale/util.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/discovery" scalescheme "k8s.io/client-go/scale/scheme" scaleappsint "k8s.io/client-go/scale/scheme/appsint" @@ -124,6 +125,7 @@ func NewDiscoveryScaleKindResolver(client discovery.ServerResourcesInterface) Sc // ScaleConverter knows how to convert between external scale versions. type ScaleConverter struct { scheme *runtime.Scheme + codecs serializer.CodecFactory internalVersioner runtime.GroupVersioner } @@ -141,6 +143,7 @@ func NewScaleConverter() *ScaleConverter { return &ScaleConverter{ scheme: scheme, + codecs: serializer.NewCodecFactory(scheme), internalVersioner: runtime.NewMultiGroupVersioner( scalescheme.SchemeGroupVersion, schema.GroupKind{Group: scaleext.GroupName, Kind: "Scale"}, @@ -156,6 +159,22 @@ func (c *ScaleConverter) Scheme() *runtime.Scheme { return c.scheme } +func (c *ScaleConverter) Codecs() serializer.CodecFactory { + return c.codecs +} + +func (c *ScaleConverter) ScaleVersions() []schema.GroupVersion { + return []schema.GroupVersion{ + scaleautoscaling.SchemeGroupVersion, + scalescheme.SchemeGroupVersion, + scaleext.SchemeGroupVersion, + scaleextint.SchemeGroupVersion, + scaleappsint.SchemeGroupVersion, + scaleappsv1beta1.SchemeGroupVersion, + scaleappsv1beta2.SchemeGroupVersion, + } +} + // ConvertToVersion converts the given *external* input object to the given output *external* output group-version. func (c *ScaleConverter) ConvertToVersion(in runtime.Object, outVersion schema.GroupVersion) (runtime.Object, error) { scaleInt, err := c.scheme.ConvertToVersion(in, c.internalVersioner) From bf60b7aa5538280c509128ed9312650e1ba26521 Mon Sep 17 00:00:00 2001 From: Lee Verberne Date: Wed, 10 Jan 2018 17:47:13 +0100 Subject: [PATCH 101/264] Mark kubelet PID namespace flag as deprecated The `--docker-disable-shared-pid` flag will be removed once per-pod configurable process namespace sharing becomes available. Mark it deprecated to notify cluster admins. --- pkg/kubelet/config/flags.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/kubelet/config/flags.go b/pkg/kubelet/config/flags.go index 705b8babfd2..f951ee92257 100644 --- a/pkg/kubelet/config/flags.go +++ b/pkg/kubelet/config/flags.go @@ -89,7 +89,8 @@ func (s *ContainerRuntimeOptions) AddFlags(fs *pflag.FlagSet) { fs.MarkHidden("experimental-dockershim") fs.StringVar(&s.DockershimRootDirectory, "experimental-dockershim-root-directory", s.DockershimRootDirectory, "Path to the dockershim root directory.") fs.MarkHidden("experimental-dockershim-root-directory") - fs.BoolVar(&s.DockerDisableSharedPID, "docker-disable-shared-pid", s.DockerDisableSharedPID, "The Container Runtime Interface (CRI) defaults to using a shared PID namespace for containers in a pod when running with Docker 1.13.1 or higher. Setting this flag reverts to the previous behavior of isolated PID namespaces. This ability will be removed in a future Kubernetes release.") + fs.BoolVar(&s.DockerDisableSharedPID, "docker-disable-shared-pid", s.DockerDisableSharedPID, "Setting this to false causes Kubernetes to create pods using a shared process namespace for containers in a pod when running with Docker 1.13.1 or higher. A future Kubernetes release will make this configurable instead in the API.") + fs.MarkDeprecated("docker-disable-shared-pid", "will be removed in a future release. This option will be replaced by PID namespace sharing that is configurable per-pod using the API. See https://features.k8s.io/495") fs.StringVar(&s.PodSandboxImage, "pod-infra-container-image", s.PodSandboxImage, "The image whose network/ipc namespaces containers in each pod will use.") fs.StringVar(&s.DockerEndpoint, "docker-endpoint", s.DockerEndpoint, "Use this for the docker endpoint to communicate with") fs.DurationVar(&s.ImagePullProgressDeadline.Duration, "image-pull-progress-deadline", s.ImagePullProgressDeadline.Duration, "If no pulling progress is made before this deadline, the image pulling will be cancelled.") From 032fa206af76666c10fd763d5b6ba4165ba810ff Mon Sep 17 00:00:00 2001 From: Balaji Subramaniam Date: Mon, 18 Dec 2017 10:00:26 -0800 Subject: [PATCH 102/264] Fix policy conflict in the CPU manager node e2e test. --- test/e2e_node/cpu_manager_test.go | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/test/e2e_node/cpu_manager_test.go b/test/e2e_node/cpu_manager_test.go index 9ac0bfca4a6..61a3b66cb5d 100644 --- a/test/e2e_node/cpu_manager_test.go +++ b/test/e2e_node/cpu_manager_test.go @@ -26,7 +26,6 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/features" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" @@ -137,7 +136,16 @@ func getCPUSiblingList(cpuRes int64) string { return string(out) } +func deleteStateFile() { + err := exec.Command("/bin/sh", "-c", "rm -f /var/lib/kubelet/cpu_manager_state").Run() + framework.ExpectNoError(err, "error deleting state file") +} + func setOldKubeletConfig(f *framework.Framework, oldCfg *kubeletconfig.KubeletConfiguration) { + // Delete the CPU Manager state file so that the old Kubelet configuration + // can take effect.i + deleteStateFile() + if oldCfg != nil { framework.ExpectNoError(setKubeletConfiguration(f, oldCfg)) } @@ -155,8 +163,15 @@ func enableCPUManagerInKubelet(f *framework.Framework) (oldCfg *kubeletconfig.Ku newCfg.FeatureGates = make(map[string]bool) } - // Enable CPU Manager using feature gate. - newCfg.FeatureGates[string(features.CPUManager)] = true + // After graduation of the CPU Manager feature to Beta, the CPU Manager + // "none" policy is ON by default. But when we set the CPU Manager policy to + // "static" in this test and the Kubelet is restarted so that "static" + // policy can take effect, there will always be a conflict with the state + // checkpointed in the disk (i.e., the policy checkpointed in the disk will + // be "none" whereas we are trying to restart Kubelet with "static" + // policy). Therefore, we delete the state file so that we can proceed + // with the tests. + deleteStateFile() // Set the CPU Manager policy to static. newCfg.CPUManagerPolicy = string(cpumanager.PolicyStatic) From 316abc7fe08f276c52f35ea7d882216fdb631243 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Bauer?= Date: Wed, 10 Jan 2018 10:39:28 +0100 Subject: [PATCH 103/264] added fluent-plugin-detect-exceptions plugin to fluentd-es-image added configmap changes raised fluentd-es-configmap version fixed missing version match raised image version --- .../fluentd-es-configmap.yaml | 14 ++++++++++++-- .../fluentd-elasticsearch/fluentd-es-ds.yaml | 12 ++++++------ .../fluentd-elasticsearch/fluentd-es-image/Gemfile | 1 + .../fluentd-es-image/Makefile | 2 +- 4 files changed, 20 insertions(+), 9 deletions(-) diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml b/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml index 09fbad0ebf4..28ffb1c03b1 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml @@ -105,7 +105,7 @@ data: path /var/log/containers/*.log pos_file /var/log/es-containers.log.pos time_format %Y-%m-%dT%H:%M:%S.%NZ - tag kubernetes.* + tag raw.kubernetes.* read_from_head true format multi_format @@ -118,6 +118,16 @@ data: time_format %Y-%m-%dT%H:%M:%S.%N%:z + # Detect exceptions in the log output and forward them as one log entry. + + @type detect_exceptions + remove_tag_prefix raw + message log + stream stream + multiline_flush_interval 5 + max_bytes 500000 + max_lines 1000 + system.input.conf: |- # Example: # 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081 @@ -367,7 +377,7 @@ data: num_threads 2 metadata: - name: fluentd-es-config-v0.1.1 + name: fluentd-es-config-v0.1.2 namespace: kube-system labels: addonmanager.kubernetes.io/mode: Reconcile diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml b/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml index 74242adce74..405bfdd4c24 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml @@ -48,24 +48,24 @@ roleRef: apiVersion: apps/v1beta2 kind: DaemonSet metadata: - name: fluentd-es-v2.0.2 + name: fluentd-es-v2.0.3 namespace: kube-system labels: k8s-app: fluentd-es - version: v2.0.2 + version: v2.0.3 kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile spec: selector: matchLabels: k8s-app: fluentd-es - version: v2.0.2 + version: v2.0.3 template: metadata: labels: k8s-app: fluentd-es kubernetes.io/cluster-service: "true" - version: v2.0.2 + version: v2.0.3 # This annotation ensures that fluentd does not get evicted if the node # supports critical pod annotation based priority scheme. # Note that this does not guarantee admission on the nodes (#40573). @@ -75,7 +75,7 @@ spec: serviceAccountName: fluentd-es containers: - name: fluentd-es - image: gcr.io/google-containers/fluentd-elasticsearch:v2.0.2 + image: gcr.io/google-containers/fluentd-elasticsearch:v2.0.3 env: - name: FLUENTD_ARGS value: --no-supervisor -q @@ -112,4 +112,4 @@ spec: path: /usr/lib64 - name: config-volume configMap: - name: fluentd-es-config-v0.1.1 + name: fluentd-es-config-v0.1.2 diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Gemfile b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Gemfile index c936b40f3c4..1fab8f51f78 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Gemfile +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Gemfile @@ -5,6 +5,7 @@ gem 'activesupport', '~>4.2.6' gem 'fluent-plugin-kubernetes_metadata_filter', '~>0.27.0' gem 'fluent-plugin-elasticsearch', '~>1.9.5' gem 'fluent-plugin-systemd', '~>0.0.8' +gem 'fluent-plugin-detect-exceptions', '~>0.0.8' gem 'fluent-plugin-prometheus', '~>0.3.0' gem 'fluent-plugin-multi-format-parser', '~>0.1.1' gem 'oj', '~>2.18.1' diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile index 0b5fa8a487c..9d161fa6693 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile @@ -16,7 +16,7 @@ PREFIX = gcr.io/google-containers IMAGE = fluentd-elasticsearch -TAG = v2.0.2 +TAG = v2.0.3 build: docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) . From 553a3f049b639b2838f55815a4c62d64b32dde21 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Wed, 10 Jan 2018 10:00:00 -0800 Subject: [PATCH 104/264] remove deprecated photon controller --- cluster/photon-controller/config-common.sh | 72 -- cluster/photon-controller/config-default.sh | 94 -- cluster/photon-controller/config-test.sh | 20 - cluster/photon-controller/setup-prereq.sh | 239 ---- cluster/photon-controller/templates/README | 4 - .../templates/create-dynamic-salt-files.sh | 127 -- .../photon-controller/templates/hostname.sh | 22 - .../templates/install-release.sh | 26 - .../templates/salt-master.sh | 59 - .../templates/salt-minion.sh | 51 - cluster/photon-controller/util.sh | 1110 ----------------- 11 files changed, 1824 deletions(-) delete mode 100644 cluster/photon-controller/config-common.sh delete mode 100755 cluster/photon-controller/config-default.sh delete mode 100755 cluster/photon-controller/config-test.sh delete mode 100755 cluster/photon-controller/setup-prereq.sh delete mode 100644 cluster/photon-controller/templates/README delete mode 100755 cluster/photon-controller/templates/create-dynamic-salt-files.sh delete mode 100755 cluster/photon-controller/templates/hostname.sh delete mode 100755 cluster/photon-controller/templates/install-release.sh delete mode 100755 cluster/photon-controller/templates/salt-master.sh delete mode 100755 cluster/photon-controller/templates/salt-minion.sh delete mode 100755 cluster/photon-controller/util.sh diff --git a/cluster/photon-controller/config-common.sh b/cluster/photon-controller/config-common.sh deleted file mode 100644 index 412eb26ba2f..00000000000 --- a/cluster/photon-controller/config-common.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -########################################################## -# -# These parameters describe objects we are using from -# Photon Controller. They are all assumed to be pre-existing. -# -# Note: if you want help in creating them, you can use -# the setup-prereq.sh script, which will create any of these -# that do not already exist. -# -########################################################## - -# Pre-created tenant for Kubernetes to use -PHOTON_TENANT=kube-tenant - -# Pre-created project in PHOTON_TENANT for Kubernetes to use -PHOTON_PROJECT=kube-project - -# Pre-created VM flavor for Kubernetes master to use -# Can be same as master -# We recommend at least 1GB of memory -PHOTON_MASTER_FLAVOR=kube-vm - -# Pre-created VM flavor for Kubernetes node to use -# Can be same as master -# We recommend at least 2GB of memory -PHOTON_NODE_FLAVOR=kube-vm - -# Pre-created disk flavor for Kubernetes to use -PHOTON_DISK_FLAVOR=kube-disk - -# Pre-created Debian 8 image with kube user uploaded to Photon Controller -# Note: While Photon Controller allows multiple images to have the same -# name, we assume that there is exactly one image with this name. -PHOTON_IMAGE=kube - -########################################################## -# -# Parameters just for the setup-prereq.sh script: not used -# elsewhere. If you create the above objects by hand, you -# do not need to edit these. -# -# Note that setup-prereq.sh also creates the objects -# above. -# -########################################################## - -# The specifications for the master and node flavors -SETUP_MASTER_FLAVOR_SPEC="vm 1 COUNT, vm.cpu 1 COUNT, vm.memory 2 GB" -SETUP_NODE_FLAVOR_SPEC=${SETUP_MASTER_FLAVOR_SPEC} - -# The specification for the ephemeral disk flavor. -SETUP_DISK_FLAVOR_SPEC="ephemeral-disk 1 COUNT" - -# The specification for the tenant resource ticket and the project resources -SETUP_TICKET_SPEC="vm.memory 1000 GB, vm 1000 COUNT" -SETUP_PROJECT_SPEC="${SETUP_TICKET_SPEC}" diff --git a/cluster/photon-controller/config-default.sh b/cluster/photon-controller/config-default.sh deleted file mode 100755 index b2f71dd8c51..00000000000 --- a/cluster/photon-controller/config-default.sh +++ /dev/null @@ -1,94 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -########################################################## -# -# Common parameters for Kubernetes -# -########################################################## - -# Default number of nodes to make. You can change this as needed -NUM_NODES=3 - -# Range of IPs assigned to pods -NODE_IP_RANGES="10.244.0.0/16" - -# IPs used by Kubernetes master -MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" - -# Range of IPs assigned by Kubernetes to services -SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20" - -########################################################## -# -# Advanced parameters for Kubernetes -# -########################################################## - -# The instance prefix is the beginning of the name given to each VM we create -# If this is changed, you can have multiple kubernetes clusters per project -# Note that even if you don't change it, each tenant/project can have its own -# Kubernetes cluster -INSTANCE_PREFIX=kubernetes - -# Name of the user used to configure the VM -# We use cloud-init to create the user -VM_USER=kube - -# SSH options for how we connect to the Kubernetes VMs -# We set the user known hosts file to /dev/null because we are connecting to new VMs. -# When working in an environment where there is a lot of VM churn, VM IP addresses -# will be reused, and the ssh keys will be different. This prevents us from seeing error -# due to this, and it will not save the SSH key to the known_hosts file, so users will -# still have standard ssh security checks. -SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=ERROR -C" - -# Optional: Enable node logging. -# Note: currently untested -ENABLE_NODE_LOGGING=false -LOGGING_DESTINATION=elasticsearch - -# Optional: When set to true, Elasticsearch and Kibana will be setup -# Note: currently untested -ENABLE_CLUSTER_LOGGING=false -ELASTICSEARCH_LOGGING_REPLICAS=1 - -# Optional: Cluster monitoring to setup as part of the cluster bring up: -# none - No cluster monitoring setup -# influxdb - Heapster, InfluxDB, and Grafana -# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging -# Note: currently untested -ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}" - -# Optional: Install cluster DNS. -ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" -DNS_SERVER_IP="10.244.240.240" -DNS_DOMAIN="cluster.local" - -# Optional: Enable DNS horizontal autoscaler -ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}" - -# Optional: Install Kubernetes UI -ENABLE_CLUSTER_UI=true - -# We need to configure subject alternate names (SANs) for the master's certificate -# we generate. While users will connect via the external IP, pods (like the UI) -# will connect via the cluster IP, from the SERVICE_CLUSTER_IP_RANGE. -# In addition to the extra SANS here, we'll also add one for for the service IP. -MASTER_EXTRA_SANS="DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${DNS_DOMAIN}" - -# Optional: if set to true, kube-up will configure the cluster to run e2e tests. -E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false} diff --git a/cluster/photon-controller/config-test.sh b/cluster/photon-controller/config-test.sh deleted file mode 100755 index 87e68d72f79..00000000000 --- a/cluster/photon-controller/config-test.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -NUM_NODES=2 -NODE_IP_RANGES="10.244.0.0/16" -MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" -SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20" diff --git a/cluster/photon-controller/setup-prereq.sh b/cluster/photon-controller/setup-prereq.sh deleted file mode 100755 index 7212081327b..00000000000 --- a/cluster/photon-controller/setup-prereq.sh +++ /dev/null @@ -1,239 +0,0 @@ -#!/bin/bash - -# Copyright 2016 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This sets up a Photon Controller with the tenant, project, flavors -# and image that are needed to deploy Kubernetes with kube-up. -# -# This is not meant to be used in production: it creates resource tickets -# (quotas) that are arbitrary and not likely to work in your environment. -# However, it may be a quick way to get your environment set up to try out -# a Kubernetes installation. -# -# It uses the names for the tenant, project, and flavors as specified in the -# config-common.sh file -# -# If you want to do this by hand, this script is equivalent to the following -# Photon Controller commands (assuming you haven't edited config-common.sh -# to change the names) -# -# photon target set https://192.0.2.2 -# photon tenant create kube-tenant -# photon tenant set kube-tenant -# photon resource-ticket create --tenant kube-tenant --name kube-resources --limits "vm.memory 1000 GB, vm 1000 COUNT" -# photon project create --tenant kube-tenant --resource-ticket kube-resources --name kube-project --limits "vm.memory 1000 GB, vm 1000 COUNT" -# photon project set kube-project -# photon -n flavor create --name "kube-vm" --kind "vm" --cost "vm 1 COUNT, vm.cpu 1 COUNT, vm.memory 2 GB" -# photon -n flavor create --name "kube-disk" --kind "ephemeral-disk" --cost "ephemeral-disk 1 COUNT" -# photon image create kube.vmdk -n kube-image -i EAGER -# -# Note that the kube.vmdk can be downloaded as specified in the documentation. - -set -o errexit -set -o nounset -set -o pipefail - -KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../.. -# shellcheck source=./util.sh -source "${KUBE_ROOT}/cluster/photon-controller/util.sh" - -function main { - verify-cmd-in-path photon - set-target - create-tenant - create-project - create-vm-flavor "${PHOTON_MASTER_FLAVOR}" "${SETUP_MASTER_FLAVOR_SPEC}" - if [ "${PHOTON_MASTER_FLAVOR}" != "${PHOTON_NODE_FLAVOR}" ]; then - create-vm-flavor "${PHOTON_NODE_FLAVOR}" "${SETUP_NODE_FLAVOR_SPEC}" - fi - create-disk-flavor - create-image -} - -function parse-cmd-line { - PHOTON_TARGET=${1:-""} - PHOTON_VMDK=${2:-""} - - if [[ "${PHOTON_TARGET}" = "" || "${PHOTON_VMDK}" = "" ]]; then - echo "Usage: setup-prereq " - echo "Target should be a URL like https://192.0.2.1" - echo "" - echo "This will create the following, based on the configuration in config-common.sh" - echo " * A tenant named ${PHOTON_TENANT}" - echo " * A project named ${PHOTON_PROJECT}" - echo " * A VM flavor named ${PHOTON_MASTER_FLAVOR}" - echo " * A disk flavor named ${PHOTON_DISK_FLAVOR}" - echo "It will also upload the Kube VMDK" - echo "" - echo "It creates the tenant with a resource ticket (quota) that may" - echo "be inappropriate for your environment. For a production" - echo "environment, you should configure these to match your" - echo "environment." - exit 1 - fi - - echo "Photon Target: ${PHOTON_TARGET}" - echo "Photon VMDK: ${PHOTON_VMDK}" -} - -function set-target { - ${PHOTON} target set "${PHOTON_TARGET}" > /dev/null 2>&1 -} - -function create-tenant { - local rc=0 - local output - - ${PHOTON} tenant list | grep -q "\t${PHOTON_TENANT}$" > /dev/null 2>&1 || rc=$? - if [[ ${rc} -eq 0 ]]; then - echo "Tenant ${PHOTON_TENANT} already made, skipping" - else - echo "Making tenant ${PHOTON_TENANT}" - rc=0 - output=$(${PHOTON} tenant create "${PHOTON_TENANT}" 2>&1) || { - echo "ERROR: Could not create tenant \"${PHOTON_TENANT}\", exiting" - echo "Output from tenant creation:" - echo "${output}" - exit 1 - } - fi - ${PHOTON} tenant set "${PHOTON_TENANT}" > /dev/null 2>&1 -} - -function create-project { - local rc=0 - local output - - ${PHOTON} project list | grep -q "\t${PHOTON_PROJECT}\t" > /dev/null 2>&1 || rc=$? - if [[ ${rc} -eq 0 ]]; then - echo "Project ${PHOTON_PROJECT} already made, skipping" - else - echo "Making project ${PHOTON_PROJECT}" - rc=0 - output=$(${PHOTON} resource-ticket create --tenant "${PHOTON_TENANT}" --name "${PHOTON_TENANT}-resources" --limits "${SETUP_TICKET_SPEC}" 2>&1) || { - echo "ERROR: Could not create resource ticket, exiting" - echo "Output from resource ticket creation:" - echo "${output}" - exit 1 - } - - rc=0 - output=$(${PHOTON} project create --tenant "${PHOTON_TENANT}" --resource-ticket "${PHOTON_TENANT}-resources" --name "${PHOTON_PROJECT}" --limits "${SETUP_PROJECT_SPEC}" 2>&1) || { - echo "ERROR: Could not create project \"${PHOTON_PROJECT}\", exiting" - echo "Output from project creation:" - echo "${output}" - exit 1 - } - fi - ${PHOTON} project set "${PHOTON_PROJECT}" -} - -function create-vm-flavor { - local flavor_name=${1} - local flavor_spec=${2} - local rc=0 - local output - - ${PHOTON} flavor list | grep -q "\t${flavor_name}\t" > /dev/null 2>&1 || rc=$? - if [[ ${rc} -eq 0 ]]; then - check-flavor-ready "${flavor_name}" - echo "Flavor ${flavor_name} already made, skipping" - else - echo "Making VM flavor ${flavor_name}" - rc=0 - output=$(${PHOTON} -n flavor create --name "${flavor_name}" --kind "vm" --cost "${flavor_spec}" 2>&1) || { - echo "ERROR: Could not create vm flavor \"${flavor_name}\", exiting" - echo "Output from flavor creation:" - echo "${output}" - exit 1 - } - fi -} - -function create-disk-flavor { - local rc=0 - local output - - ${PHOTON} flavor list | grep -q "\t${PHOTON_DISK_FLAVOR}\t" > /dev/null 2>&1 || rc=$? - if [[ ${rc} -eq 0 ]]; then - check-flavor-ready "${PHOTON_DISK_FLAVOR}" - echo "Flavor ${PHOTON_DISK_FLAVOR} already made, skipping" - else - echo "Making disk flavor ${PHOTON_DISK_FLAVOR}" - rc=0 - output=$(${PHOTON} -n flavor create --name "${PHOTON_DISK_FLAVOR}" --kind "ephemeral-disk" --cost "${SETUP_DISK_FLAVOR_SPEC}" 2>&1) || { - echo "ERROR: Could not create disk flavor \"${PHOTON_DISK_FLAVOR}\", exiting" - echo "Output from flavor creation:" - echo "${output}" - exit 1 - } - fi -} - -function check-flavor-ready { - local flavor_name=${1} - local rc=0 - - local flavor_id - flavor_id=$(${PHOTON} flavor list | grep "\t${flavor_name}\t" | awk '{print $1}') || { - echo "ERROR: Found ${flavor_name} but cannot find it's id" - exit 1 - } - - ${PHOTON} flavor show "${flavor_id}" | grep "\tREADY\$" > /dev/null 2>&1 || { - echo "ERROR: Flavor \"${flavor_name}\" already exists but is not READY. Please delete or fix it." - exit 1 - } -} - -function create-image { - local rc=0 - local num_images - local output - - ${PHOTON} image list | grep "\t${PHOTON_IMAGE}\t" | grep -q ERROR > /dev/null 2>&1 || rc=$? - if [[ ${rc} -eq 0 ]]; then - echo "Warning: You have at least one ${PHOTON_IMAGE} image in the ERROR state. You may want to investigate." - echo "Images in the ERROR state will be ignored." - fi - - rc=0 - # We don't use grep -c because it exists non-zero when there are no matches, tell shellcheck - # shellcheck disable=SC2126 - num_images=$(${PHOTON} image list | grep "\t${PHOTON_IMAGE}\t" | grep READY | wc -l) - if [[ "${num_images}" -gt 1 ]]; then - echo "Warning: You have more than one good ${PHOTON_IMAGE} image. You may want to remove duplicates." - fi - - ${PHOTON} image list | grep "\t${PHOTON_IMAGE}\t" | grep -q READY > /dev/null 2>&1 || rc=$? - if [[ ${rc} -eq 0 ]]; then - echo "Image ${PHOTON_VMDK} already uploaded, skipping" - else - echo "Uploading image ${PHOTON_VMDK}" - rc=0 - output=$(${PHOTON} image create "${PHOTON_VMDK}" -n "${PHOTON_IMAGE}" -i EAGER 2>&1) || { - echo "ERROR: Could not upload image, exiting" - echo "Output from image create:" - echo "${output}" - exit 1 - } - fi -} - -# We don't want silent pipeline failure: we check for failure -set +o pipefail - -parse-cmd-line "$@" -main diff --git a/cluster/photon-controller/templates/README b/cluster/photon-controller/templates/README deleted file mode 100644 index b91d629fa0c..00000000000 --- a/cluster/photon-controller/templates/README +++ /dev/null @@ -1,4 +0,0 @@ -The scripts in this directory are not meant to be invoked -directly. Instead they are partial scripts that are combined into full -scripts by util.sh and are run on the Kubernetes nodes are part of the -setup. diff --git a/cluster/photon-controller/templates/create-dynamic-salt-files.sh b/cluster/photon-controller/templates/create-dynamic-salt-files.sh deleted file mode 100755 index 369fdb7095f..00000000000 --- a/cluster/photon-controller/templates/create-dynamic-salt-files.sh +++ /dev/null @@ -1,127 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#generate token files - -KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) -KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) -known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv" -if [[ ! -f "${known_tokens_file}" ]]; then - - mkdir -p /srv/salt-overlay/salt/kube-apiserver - known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv" - (umask u=rw,go= ; - echo "$KUBELET_TOKEN,kubelet,kubelet" > $known_tokens_file; - echo "$KUBE_PROXY_TOKEN,kube_proxy,kube_proxy" >> $known_tokens_file) - - mkdir -p /srv/salt-overlay/salt/kubelet - kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth" - (umask u=rw,go= ; echo "{\"BearerToken\": \"$KUBELET_TOKEN\", \"Insecure\": true }" > $kubelet_auth_file) - kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig" - - mkdir -p /srv/salt-overlay/salt/kubelet - (umask 077; - cat > "${kubelet_kubeconfig_file}" << EOF -apiVersion: v1 -kind: Config -clusters: -- cluster: - server: https://${KUBE_MASTER_IP} - insecure-skip-tls-verify: true - name: local -contexts: -- context: - cluster: local - user: kubelet - name: service-account-context -current-context: service-account-context -users: -- name: kubelet - user: - token: ${KUBELET_TOKEN} -EOF -) - - - mkdir -p /srv/salt-overlay/salt/kube-proxy - kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig" - # Make a kubeconfig file with the token. - # TODO(etune): put apiserver certs into secret too, and reference from authfile, - # so that "Insecure" is not needed. - (umask 077; - cat > "${kube_proxy_kubeconfig_file}" << EOF -apiVersion: v1 -kind: Config -clusters: -- cluster: - insecure-skip-tls-verify: true - name: local -contexts: -- context: - cluster: local - user: kube-proxy - name: service-account-context -current-context: service-account-context -users: -- name: kube-proxy - user: - token: ${KUBE_PROXY_TOKEN} -EOF -) - - # Generate tokens for other "service accounts". Append to known_tokens. - # - # NB: If this list ever changes, this script actually has to - # change to detect the existence of this file, kill any deleted - # old tokens and add any new tokens (to handle the upgrade case). - service_accounts=("system:scheduler" "system:controller_manager" "system:logging" "system:monitoring" "system:dns") - for account in "${service_accounts[@]}"; do - token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) - echo "${token},${account},${account}" >> "${known_tokens_file}" - done -fi - -readonly BASIC_AUTH_FILE="/srv/salt-overlay/salt/kube-apiserver/basic_auth.csv" -if [[ ! -e "${BASIC_AUTH_FILE}" ]]; then - mkdir -p /srv/salt-overlay/salt/kube-apiserver - (umask 077; - echo "${KUBE_PASSWORD},${KUBE_USER},admin" > "${BASIC_AUTH_FILE}") -fi - - -# Create the overlay files for the salt tree. We create these in a separate -# place so that we can blow away the rest of the salt configs on a kube-push and -# re-apply these. - -mkdir -p /srv/salt-overlay/pillar -cat </srv/salt-overlay/pillar/cluster-params.sls -instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")' -node_instance_prefix: $NODE_INSTANCE_PREFIX -service_cluster_ip_range: $SERVICE_CLUSTER_IP_RANGE -enable_cluster_monitoring: "${ENABLE_CLUSTER_MONITORING:-none}" -enable_cluster_logging: "${ENABLE_CLUSTER_LOGGING:false}" -enable_cluster_ui: "${ENABLE_CLUSTER_UI:true}" -enable_node_logging: "${ENABLE_NODE_LOGGING:false}" -logging_destination: $LOGGING_DESTINATION -elasticsearch_replicas: $ELASTICSEARCH_LOGGING_REPLICAS -enable_cluster_dns: "${ENABLE_CLUSTER_DNS:-false}" -dns_server: $DNS_SERVER_IP -dns_domain: $DNS_DOMAIN -e2e_storage_test_environment: "${E2E_STORAGE_TEST_ENVIRONMENT:-false}" -cluster_cidr: "$NODE_IP_RANGES" -allocate_node_cidrs: "${ALLOCATE_NODE_CIDRS:-true}" -admission_control: Initializers,NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,ResourceQuota -EOF diff --git a/cluster/photon-controller/templates/hostname.sh b/cluster/photon-controller/templates/hostname.sh deleted file mode 100755 index ae7f4d0f4ef..00000000000 --- a/cluster/photon-controller/templates/hostname.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Remove kube.vm from /etc/hosts -sed -i -e 's/\b\w\+.vm\b//' /etc/hosts - -# Update hostname in /etc/hosts and /etc/hostname -sed -i -e "s/\\bkube\\b/${MY_NAME}/g" /etc/host{s,name} -hostname ${MY_NAME} diff --git a/cluster/photon-controller/templates/install-release.sh b/cluster/photon-controller/templates/install-release.sh deleted file mode 100755 index 34206a35aab..00000000000 --- a/cluster/photon-controller/templates/install-release.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This script assumes that the environment variable SERVER_BINARY_TAR contains -# the release tar to download and unpack. It is meant to be pushed to the -# master and run. - -echo "Unpacking Salt tree" -rm -rf kubernetes -tar xzf "${SALT_TAR}" - -echo "Running release install script" -sudo kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR}" diff --git a/cluster/photon-controller/templates/salt-master.sh b/cluster/photon-controller/templates/salt-master.sh deleted file mode 100755 index 19281d008f1..00000000000 --- a/cluster/photon-controller/templates/salt-master.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Use other Debian mirror -sed -i -e "s/http.us.debian.org/mirrors.kernel.org/" /etc/apt/sources.list - -# Prepopulate the name of the Master -mkdir -p /etc/salt/minion.d -echo "master: ${MASTER_NAME}" > /etc/salt/minion.d/master.conf - -cat </etc/salt/minion.d/grains.conf -grains: - roles: - - kubernetes-master - cbr-cidr: $MASTER_IP_RANGE - cloud: photon-controller - master_extra_sans: $MASTER_EXTRA_SANS - api_servers: $MASTER_NAME - kubelet_kubeconfig: /srv/salt-overlay/salt/kubelet/kubeconfig - kube_user: $KUBE_USER -EOF - -# Auto accept all keys from minions that try to join -mkdir -p /etc/salt/master.d -cat </etc/salt/master.d/auto-accept.conf -auto_accept: True -EOF - -cat </etc/salt/master.d/reactor.conf -# React to new minions starting by running highstate on them. -reactor: - - 'salt/minion/*/start': - - /srv/reactor/highstate-new.sls - - /srv/reactor/highstate-masters.sls - - /srv/reactor/highstate-minions.sls -EOF - -# Install Salt -# -# We specify -X to avoid a race condition that can cause minion failure to -# install. See https://github.com/saltstack/salt-bootstrap/issues/270 -# -# -M installs the master -set +x -curl -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s -- -M -X -set -x diff --git a/cluster/photon-controller/templates/salt-minion.sh b/cluster/photon-controller/templates/salt-minion.sh deleted file mode 100755 index 314e5e726d5..00000000000 --- a/cluster/photon-controller/templates/salt-minion.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Use other Debian mirror -sed -i -e "s/http.us.debian.org/mirrors.kernel.org/" /etc/apt/sources.list - -# Resolve hostname of master -if ! grep -q $KUBE_MASTER /etc/hosts; then - echo "Adding host entry for $KUBE_MASTER" - echo "${KUBE_MASTER_IP} ${KUBE_MASTER}" >> /etc/hosts -fi - -# Prepopulate the name of the Master -mkdir -p /etc/salt/minion.d -echo "master: ${KUBE_MASTER}" > /etc/salt/minion.d/master.conf - -# Turn on debugging for salt-minion -# echo "DAEMON_ARGS=\"\$DAEMON_ARGS --log-file-level=debug\"" > /etc/default/salt-minion - -# Our minions will have a pool role to distinguish them from the master. -# -# Setting the "minion_ip" here causes the kubelet to use its IP for -# identification instead of its hostname. -# -cat </etc/salt/minion.d/grains.conf -grains: - hostname_override: $(ip route get 1.1.1.1 | awk '{print $7}') - roles: - - kubernetes-pool - - kubernetes-pool-photon-controller - cloud: photon-controller -EOF - -# Install Salt -# -# We specify -X to avoid a race condition that can cause minion failure to -# install. See https://github.com/saltstack/salt-bootstrap/issues/270 -curl -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s -- -X diff --git a/cluster/photon-controller/util.sh b/cluster/photon-controller/util.sh deleted file mode 100755 index 55ec52ff9cc..00000000000 --- a/cluster/photon-controller/util.sh +++ /dev/null @@ -1,1110 +0,0 @@ -#!/bin/bash - -# Copyright 2016 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts. - -KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../.. -# shellcheck source=./config-common.sh -source "${KUBE_ROOT}/cluster/photon-controller/config-common.sh" -# shellcheck source=./config-default.sh -source "${KUBE_ROOT}/cluster/photon-controller/${KUBE_CONFIG_FILE-"config-default.sh"}" -# shellcheck source=../common.sh -source "${KUBE_ROOT}/cluster/common.sh" - -readonly PHOTON="photon -n" - -# Naming scheme for VMs (masters & nodes) -readonly MASTER_NAME="${INSTANCE_PREFIX}-master" - -# shell check claims this doesn't work because you can't use a variable in a brace -# range. It does work because we're calling eval. -# shellcheck disable=SC2051 -readonly NODE_NAMES=($(eval echo "${INSTANCE_PREFIX}"-node-{1.."${NUM_NODES}"})) - -##################################################################### -# -# Public API -# -##################################################################### - -# -# detect-master will query Photon Controller for the Kubernetes master. -# It assumes that the VM name for the master is unique. -# It will set KUBE_MASTER_ID to be the VM ID of the master -# It will set KUBE_MASTER_IP to be the IP address of the master -# If the silent parameter is passed, it will not print when the master -# is found: this is used internally just to find the MASTER -# -function detect-master { - local silent=${1:-""} - local tenant_args="--tenant ${PHOTON_TENANT} --project ${PHOTON_PROJECT}" - - KUBE_MASTER=${MASTER_NAME} - KUBE_MASTER_ID=${KUBE_MASTER_ID:-""} - KUBE_MASTER_IP=${KUBE_MASTER_IP:-""} - - # We don't want silent failure: we check for failure - set +o pipefail - if [[ -z ${KUBE_MASTER_ID} ]]; then - KUBE_MASTER_ID=$(${PHOTON} vm list ${tenant_args} | grep $'\t'"kubernetes-master"$'\t' | awk '{print $1}') - fi - if [[ -z ${KUBE_MASTER_ID} ]]; then - kube::log::error "Could not find Kubernetes master node ID. Make sure you've launched a cluster with kube-up.sh" - exit 1 - fi - - if [[ -z "${KUBE_MASTER_IP-}" ]]; then - # Pick out the NICs that have a MAC address owned VMware (with OUI 00:0C:29) - # Make sure to ignore lines that have a network interface but no address - KUBE_MASTER_IP=$(${PHOTON} vm networks "${KUBE_MASTER_ID}" | grep -i $'\t'"00:0C:29" | grep -E '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -1 | awk -F'\t' '{print $3}') - fi - if [[ -z "${KUBE_MASTER_IP-}" ]]; then - kube::log::error "Could not find Kubernetes master node IP. Make sure you've launched a cluster with 'kube-up.sh'" >&2 - exit 1 - fi - if [[ -z ${silent} ]]; then - kube::log::status "Master: $KUBE_MASTER ($KUBE_MASTER_IP)" - fi - # Reset default set in common.sh - set -o pipefail -} - -# -# detect-nodes will query Photon Controller for the Kubernetes nodes -# It assumes that the VM name for the nodes are unique. -# It assumes that NODE_NAMES has been set -# It will set KUBE_NODE_IP_ADDRESSES to be the VM IPs of the nodes -# It will set the KUBE_NODE_IDS to be the VM IDs of the nodes -# If the silent parameter is passed, it will not print when the nodes -# are found: this is used internally just to find the MASTER -# -function detect-nodes { - local silent=${1:-""} - local failure=0 - local tenant_args="--tenant ${PHOTON_TENANT} --project ${PHOTON_PROJECT}" - - KUBE_NODE_IP_ADDRESSES=() - KUBE_NODE_IDS=() - # We don't want silent failure: we check for failure - set +o pipefail - for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - - local node_id - node_id=$(${PHOTON} vm list ${tenant_args} | grep $'\t'"${NODE_NAMES[${i}]}"$'\t' | awk '{print $1}') - if [[ -z ${node_id} ]]; then - kube::log::error "Could not find ${NODE_NAMES[${i}]}" - failure=1 - fi - KUBE_NODE_IDS+=("${node_id}") - - # Pick out the NICs that have a MAC address owned VMware (with OUI 00:0C:29) - # Make sure to ignore lines that have a network interface but no address - node_ip=$(${PHOTON} vm networks "${node_id}" | grep -i $'\t'"00:0C:29" | grep -E '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -1 | awk -F'\t' '{print $3}') - KUBE_NODE_IP_ADDRESSES+=("${node_ip}") - - if [[ -z ${silent} ]]; then - kube::log::status "Node: ${NODE_NAMES[${i}]} (${KUBE_NODE_IP_ADDRESSES[${i}]})" - fi - done - - if [[ ${failure} -ne 0 ]]; then - exit 1 - fi - # Reset default set in common.sh - set -o pipefail -} - -# Get node names if they are not static. -function detect-node-names { - echo "TODO: detect-node-names" 1>&2 -} - -# -# Verifies that this computer has sufficient software installed -# so that it can run the rest of the script. -# -function verify-prereqs { - verify-cmd-in-path photon - verify-cmd-in-path ssh - verify-cmd-in-path scp - verify-cmd-in-path ssh-add - verify-cmd-in-path openssl - verify-cmd-in-path mkisofs -} - -# -# The entry point for bringing up a Kubernetes cluster -# -function kube-up { - verify-prereqs - verify-ssh-prereqs - verify-photon-config - kube::util::ensure-temp-dir - - find-release-tars - find-image-id - - load-or-gen-kube-basicauth - gen-cloud-init-iso - gen-master-start - create-master-vm - install-salt-on-master - - gen-node-start - install-salt-on-nodes - - detect-nodes -s - - install-kubernetes-on-master - install-kubernetes-on-nodes - - wait-master-api - wait-node-apis - - setup-pod-routes - - copy-kube-certs - kube::log::status "Creating kubeconfig..." - create-kubeconfig -} - -# Delete a kubernetes cluster -function kube-down { - detect-master - detect-nodes - - pc-delete-vm "${KUBE_MASTER}" "${KUBE_MASTER_ID}" - for (( node=0; node<${#KUBE_NODE_IDS[@]}; node++)); do - pc-delete-vm "${NODE_NAMES[${node}]}" "${KUBE_NODE_IDS[${node}]}" - done -} - -# Update a kubernetes cluster -function kube-push { - echo "TODO: kube-push" 1>&2 -} - -# Prepare update a kubernetes component -function prepare-push { - echo "TODO: prepare-push" 1>&2 -} - -# Update a kubernetes master -function push-master { - echo "TODO: push-master" 1>&2 -} - -# Update a kubernetes node -function push-node { - echo "TODO: push-node" 1>&2 -} - -# Execute prior to running tests to build a release if required for env -function test-build-release { - echo "TODO: test-build-release" 1>&2 -} - -# Execute prior to running tests to initialize required structure -function test-setup { - echo "TODO: test-setup" 1>&2 -} - -# Execute after running tests to perform any required clean-up -function test-teardown { - echo "TODO: test-teardown" 1>&2 -} - -##################################################################### -# -# Internal functions -# -##################################################################### - -# -# Uses Photon Controller to make a VM -# Takes two parameters: -# - The name of the VM (Assumed to be unique) -# - The name of the flavor to create the VM (Assumed to be unique) -# -# It assumes that the variables in config-common.sh (PHOTON_TENANT, etc) -# are set correctly. -# -# It also assumes the cloud-init ISO has been generated -# -# When it completes, it sets two environment variables for use by the -# caller: _VM_ID (the ID of the created VM) and _VM_IP (the IP address -# of the created VM) -# -function pc-create-vm { - local vm_name="${1}" - local vm_flavor="${2}" - local rc=0 - local i=0 - - # Create the VM - local tenant_args="--tenant ${PHOTON_TENANT} --project ${PHOTON_PROJECT}" - local vm_args="--name ${vm_name} --image ${PHOTON_IMAGE_ID} --flavor ${vm_flavor}" - local disk_args="disk-1 ${PHOTON_DISK_FLAVOR} boot=true" - - rc=0 - _VM_ID=$(${PHOTON} vm create ${tenant_args} ${vm_args} --disks "${disk_args}" 2>&1) || rc=$? - if [[ ${rc} -ne 0 ]]; then - kube::log::error "Failed to create VM. Error output:" - echo "${_VM_ID}" - exit 1 - fi - kube::log::status "Created VM ${vm_name}: ${_VM_ID}" - - # Start the VM - # Note that the VM has cloud-init in it, and we attach an ISO that - # contains a user-data.txt file for cloud-init. When the VM starts, - # cloud-init will temporarily mount the ISO and configure the VM - # Our user-data will configure the 'kube' user and set up the ssh - # authorized keys to allow us to ssh to the VM and do further work. - run-cmd "${PHOTON} vm attach-iso -p ${KUBE_TEMP}/cloud-init.iso ${_VM_ID}" - run-cmd "${PHOTON} vm start ${_VM_ID}" - kube::log::status "Started VM ${vm_name}, waiting for network address..." - - # Wait for the VM to be started and connected to the network - have_network=0 - for i in {1..120}; do - # photon -n vm networks print several fields: - # NETWORK MAC IP GATEWAY CONNECTED? - # We wait until CONNECTED is True - rc=0 - networks=$(${PHOTON} vm networks "${_VM_ID}") || rc=$? - if [[ ${rc} -ne 0 ]]; then - kube::log::error "'${PHOTON} vm networks ${_VM_ID}' failed. Error output: " - echo "${networks}" - fi - networks=$(echo "${networks}" | grep True) || rc=$? - if [[ ${rc} -eq 0 ]]; then - have_network=1 - break; - fi - sleep 1 - done - - # Fail if the VM didn't come up - if [[ ${have_network} -eq 0 ]]; then - kube::log::error "VM ${vm_name} failed to start up: no IP was found" - exit 1 - fi - - # Find the IP address of the VM - _VM_IP=$(${PHOTON} vm networks "${_VM_ID}" | head -1 | awk -F'\t' '{print $3}') - kube::log::status "VM ${vm_name} has IP: ${_VM_IP}" -} - -# -# Delete one of our VMs -# If it is STARTED, it will be stopped first. -# -function pc-delete-vm { - local vm_name="${1}" - local vm_id="${2}" - local rc=0 - - kube::log::status "Deleting VM ${vm_name}" - # In some cases, head exits before photon, so the pipline exits with - # SIGPIPE. We disable the pipefile option to hide that failure. - set +o pipefail - ${PHOTON} vm show "${vm_id}" | head -1 | grep STARTED > /dev/null 2>&1 || rc=$? - set +o pipefail - if [[ ${rc} -eq 0 ]]; then - ${PHOTON} vm stop "${vm_id}" > /dev/null 2>&1 || rc=$? - if [[ ${rc} -ne 0 ]]; then - kube::log::error "Error: could not stop ${vm_name} ($vm_id)" - kube::log::error "Please investigate and stop manually" - return - fi - fi - - rc=0 - ${PHOTON} vm delete "${vm_id}" > /dev/null 2>&1 || rc=$? - if [[ ${rc} -ne 0 ]]; then - kube::log::error "Error: could not delete ${vm_name} ($vm_id)" - kube::log::error "Please investigate and delete manually" - fi -} - -# -# Looks for the image named PHOTON_IMAGE -# Sets PHOTON_IMAGE_ID to be the id of that image. -# We currently assume there is exactly one image with name -# -function find-image-id { - local rc=0 - PHOTON_IMAGE_ID=$(${PHOTON} image list | grep $'\t'"${PHOTON_IMAGE}"$'\t' | head -1 | grep READY | awk -F'\t' '{print $1}') - if [[ ${rc} -ne 0 ]]; then - kube::log::error "Cannot find image \"${PHOTON_IMAGE}\"" - fail=1 - fi -} - -# -# Generate an ISO with a single file called user-data.txt -# This ISO will be used to configure cloud-init (which is already -# on the VM). We will tell cloud-init to create the kube user/group -# and give ourselves the ability to ssh to the VM with ssh. We also -# allow people to ssh with the same password that was randomly -# generated for access to Kubernetes as a backup method. -# -# Assumes environment variables: -# - VM_USER -# - KUBE_PASSWORD (randomly generated password) -# -function gen-cloud-init-iso { - local password_hash - password_hash=$(openssl passwd -1 "${KUBE_PASSWORD}") - - local ssh_key - ssh_key=$(ssh-add -L | head -1) - - # Make the user-data file that will be used by cloud-init - ( - echo "#cloud-config" - echo "" - echo "groups:" - echo " - ${VM_USER}" - echo "" - echo "users:" - echo " - name: ${VM_USER}" - echo " gecos: Kubernetes" - echo " primary-group: ${VM_USER}" - echo " lock-passwd: false" - echo " passwd: ${password_hash}" - echo " ssh-authorized-keys: " - echo " - ${ssh_key}" - echo " sudo: ALL=(ALL) NOPASSWD:ALL" - echo " shell: /bin/bash" - echo "" - echo "hostname:" - echo " - hostname: kube" - ) > "${KUBE_TEMP}/user-data.txt" - - # Make the ISO that will contain the user-data - # The -rock option means that we'll generate real filenames (long and with case) - run-cmd "mkisofs -rock -o ${KUBE_TEMP}/cloud-init.iso ${KUBE_TEMP}/user-data.txt" -} - -# -# Generate a script used to install salt on the master -# It is placed into $KUBE_TEMP/master-start.sh -# -function gen-master-start { - python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \ - -b -c "${KUBE_TEMP}/htpasswd" "${KUBE_USER}" "${KUBE_PASSWORD}" - local htpasswd - htpasswd=$(cat "${KUBE_TEMP}/htpasswd") - - # This calculation of the service IP should work, but if you choose an - # alternate subnet, there's a small chance you'd need to modify the - # service_ip, below. We'll choose an IP like 10.244.240.1 by taking - # the first three octets of the SERVICE_CLUSTER_IP_RANGE and tacking - # on a .1 - local octets - local service_ip - octets=($(echo "${SERVICE_CLUSTER_IP_RANGE}" | sed -e 's|/.*||' -e 's/\./ /g')) - ((octets[3]+=1)) - service_ip=$(echo "${octets[*]}" | sed 's/ /./g') - MASTER_EXTRA_SANS="IP:${service_ip},DNS:${MASTER_NAME},${MASTER_EXTRA_SANS}" - - ( - echo "#! /bin/bash" - echo "readonly MY_NAME=${MASTER_NAME}" - grep -v "^#" "${KUBE_ROOT}/cluster/photon-controller/templates/hostname.sh" - echo "cd /home/kube/cache/kubernetes-install" - echo "readonly KUBE_MASTER_IP='{$KUBE_MASTER_IP}'" - echo "readonly MASTER_NAME='${MASTER_NAME}'" - echo "readonly MASTER_IP_RANGE='${MASTER_IP_RANGE}'" - echo "readonly INSTANCE_PREFIX='${INSTANCE_PREFIX}'" - echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-node'" - echo "readonly NODE_IP_RANGES='${NODE_IP_RANGES}'" - echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'" - echo "readonly ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'" - echo "readonly LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'" - echo "readonly ENABLE_CLUSTER_DNS='${ENABLE_CLUSTER_DNS:-false}'" - echo "readonly ENABLE_CLUSTER_UI='${ENABLE_CLUSTER_UI:-false}'" - echo "readonly DNS_SERVER_IP='${DNS_SERVER_IP:-}'" - echo "readonly DNS_DOMAIN='${DNS_DOMAIN:-}'" - echo "readonly KUBE_USER='${KUBE_USER:-}'" - echo "readonly KUBE_PASSWORD='${KUBE_PASSWORD:-}'" - echo "readonly SERVER_BINARY_TAR='${SERVER_BINARY_TAR##*/}'" - echo "readonly SALT_TAR='${SALT_TAR##*/}'" - echo "readonly MASTER_HTPASSWD='${htpasswd}'" - echo "readonly E2E_STORAGE_TEST_ENVIRONMENT='${E2E_STORAGE_TEST_ENVIRONMENT:-}'" - echo "readonly MASTER_EXTRA_SANS='${MASTER_EXTRA_SANS:-}'" - grep -v "^#" "${KUBE_ROOT}/cluster/photon-controller/templates/create-dynamic-salt-files.sh" - grep -v "^#" "${KUBE_ROOT}/cluster/photon-controller/templates/install-release.sh" - grep -v "^#" "${KUBE_ROOT}/cluster/photon-controller/templates/salt-master.sh" - ) > "${KUBE_TEMP}/master-start.sh" -} - -# -# Generate the scripts for each node to install salt -# -function gen-node-start { - local i - for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - ( - echo "#! /bin/bash" - echo "readonly MY_NAME=${NODE_NAMES[${i}]}" - grep -v "^#" "${KUBE_ROOT}/cluster/photon-controller/templates/hostname.sh" - echo "KUBE_MASTER=${KUBE_MASTER}" - echo "KUBE_MASTER_IP=${KUBE_MASTER_IP}" - echo "NODE_IP_RANGE=$NODE_IP_RANGES" - grep -v "^#" "${KUBE_ROOT}/cluster/photon-controller/templates/salt-minion.sh" - ) > "${KUBE_TEMP}/node-start-${i}.sh" - done -} - -# -# Create a script that will run on the Kubernetes master and will run salt -# to configure the master. We make it a script instead of just running a -# single ssh command so that we can get logging. -# -function gen-master-salt { - gen-salt "kubernetes-master" -} - -# -# Create scripts that will be run on the Kubernetes master. Each of these -# will invoke salt to configure one of the nodes -# -function gen-node-salt { - local i - for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - gen-salt "${NODE_NAMES[${i}]}" - done -} - -# -# Shared implementation for gen-master-salt and gen-node-salt -# Writes a script that installs Kubernetes with salt -# The core of the script is simple (run 'salt ... state.highstate') -# We also do a bit of logging so we can debug problems -# -# There is also a funky workaround for an issue with docker 1.9 -# (elsewhere we peg ourselves to docker 1.9). It's fixed in 1.10, -# so we should be able to remove it in the future -# https://github.com/docker/docker/issues/18113 -# The problem is that sometimes the install (with apt-get) of -# docker fails. Deleting a file and retrying fixes it. -# -# Tell shellcheck to ignore our variables within single quotes: -# We're writing a script, not executing it, so this is normal -# shellcheck disable=SC2016 -function gen-salt { - node_name=${1} - ( - echo '#!/bin/bash' - echo '' - echo "node=${node_name}" - echo 'out=/tmp/${node}-salt.out' - echo 'log=/tmp/${node}-salt.log' - echo '' - echo 'echo $(date) >> $log' - echo 'salt ${node} state.highstate -t 30 --no-color > ${out}' - echo 'grep -E "Failed:[[:space:]]+0" ${out}' - echo 'success=$?' - echo 'cat ${out} >> ${log}' - echo '' - echo 'if [[ ${success} -ne 0 ]]; then' - echo ' # Did we try to install docker-engine?' - echo ' attempted=$(grep docker-engine ${out} | wc -l)' - echo ' # Is docker-engine installed?' - echo ' installed=$(salt --output=txt ${node} pkg.version docker-engine | wc -l)' - echo ' if [[ ${attempted} -ne 0 && ${installed} -eq 0 ]]; then' - echo ' echo "Unwedging docker-engine install" >> ${log}' - echo ' salt ${node} cmd.run "rm -f /var/lib/docker/network/files/local-kv.db"' - echo ' fi' - echo 'fi' - echo 'exit ${success}' - ) > "${KUBE_TEMP}/${node_name}-salt.sh" -} - -# -# Generate a script to add a route to a host (master or node) -# The script will do two things: -# 1. Add the route immediately with the route command -# 2. Persist the route by saving it in /etc/network/interfaces -# This was done with a script because it was easier to get the quoting right -# and make it clear. -# -function gen-add-route { - route=${1} - gateway=${2} - ( - echo '#!/bin/bash' - echo '' - echo '# Immediately add route' - echo "sudo route add -net ${route} gw ${gateway}" - echo '' - echo '# Persist route so it lasts over restarts' - echo 'sed -in "s|^iface eth0.*|&\n post-up route add -net' "${route} gw ${gateway}|"'" /etc/network/interfaces' - ) > "${KUBE_TEMP}/add-route.sh" -} - -# -# Create the Kubernetes master VM -# Sets global variables: -# - KUBE_MASTER (Name) -# - KUBE_MASTER_ID (Photon VM ID) -# - KUBE_MASTER_IP (IP address) -# -function create-master-vm { - kube::log::status "Starting master VM..." - pc-create-vm "${MASTER_NAME}" "${PHOTON_MASTER_FLAVOR}" - KUBE_MASTER=${MASTER_NAME} - KUBE_MASTER_ID=${_VM_ID} - KUBE_MASTER_IP=${_VM_IP} -} - -# -# Install salt on the Kubernetes master -# Relies on the master-start.sh script created in gen-master-start -# -function install-salt-on-master { - kube::log::status "Installing salt on master..." - upload-server-tars "${MASTER_NAME}" "${KUBE_MASTER_IP}" - run-script-remotely "${KUBE_MASTER_IP}" "${KUBE_TEMP}/master-start.sh" -} - -# -# Installs salt on Kubernetes nodes in parallel -# Relies on the node-start script created in gen-node-start -# -function install-salt-on-nodes { - kube::log::status "Creating nodes and installing salt on them..." - - # Start each of the VMs in parallel - # In the future, we'll batch this because it doesn't scale well - # past 10 or 20 nodes - local node - for (( node=0; node<${#NODE_NAMES[@]}; node++)); do - ( - pc-create-vm "${NODE_NAMES[${node}]}" "${PHOTON_NODE_FLAVOR}" - run-script-remotely "${_VM_IP}" "${KUBE_TEMP}/node-start-${node}.sh" - ) & - done - - # Wait for the node VM startups to complete - local fail=0 - local job - for job in $(jobs -p); do - wait "${job}" || fail=$((fail + 1)) - done - if (( fail != 0 )); then - kube::log::error "Failed to start ${fail}/${NUM_NODES} nodes" - exit 1 - fi -} - -# -# Install Kubernetes on the master. -# This uses the kubernetes-master-salt.sh script created by gen-master-salt -# That script uses salt to install Kubernetes -# -function install-kubernetes-on-master { - # Wait until salt-master is running: it may take a bit - try-until-success-ssh "${KUBE_MASTER_IP}" \ - "Waiting for salt-master to start on ${KUBE_MASTER}" \ - "pgrep salt-master" - gen-master-salt - copy-file-to-vm "${_VM_IP}" "${KUBE_TEMP}/kubernetes-master-salt.sh" "/tmp/kubernetes-master-salt.sh" - try-until-success-ssh "${KUBE_MASTER_IP}" \ - "Installing Kubernetes on ${KUBE_MASTER} via salt" \ - "sudo /bin/bash /tmp/kubernetes-master-salt.sh" -} - -# -# Install Kubernetes on the nodes in parallel -# This uses the kubernetes-master-salt.sh script created by gen-node-salt -# That script uses salt to install Kubernetes -# -function install-kubernetes-on-nodes { - gen-node-salt - - # Run in parallel to bring up the cluster faster - # TODO: Batch this so that we run up to N in parallel, so - # we don't overload this machine or the salt master - local node - for (( node=0; node<${#NODE_NAMES[@]}; node++)); do - ( - copy-file-to-vm "${_VM_IP}" "${KUBE_TEMP}/${NODE_NAMES[${node}]}-salt.sh" "/tmp/${NODE_NAMES[${node}]}-salt.sh" - try-until-success-ssh "${KUBE_NODE_IP_ADDRESSES[${node}]}" \ - "Waiting for salt-master to start on ${NODE_NAMES[${node}]}" \ - "pgrep salt-minion" - try-until-success-ssh "${KUBE_MASTER_IP}" \ - "Installing Kubernetes on ${NODE_NAMES[${node}]} via salt" \ - "sudo /bin/bash /tmp/${NODE_NAMES[${node}]}-salt.sh" - ) & - done - - # Wait for the Kubernetes installations to complete - local fail=0 - local job - for job in $(jobs -p); do - wait "${job}" || fail=$((fail + 1)) - done - if (( fail != 0 )); then - kube::log::error "Failed to start install Kubernetes on ${fail} out of ${NUM_NODES} nodess" - exit 1 - fi -} - -# -# Upload the Kubernetes tarballs to the master -# -function upload-server-tars { - vm_name=${1} - vm_ip=${2} - - run-ssh-cmd "${vm_ip}" "mkdir -p /home/kube/cache/kubernetes-install" - - local tar - for tar in "${SERVER_BINARY_TAR}" "${SALT_TAR}"; do - local base_tar - base_tar=$(basename "${tar}") - kube::log::status "Uploading ${base_tar} to ${vm_name}..." - copy-file-to-vm "${vm_ip}" "${tar}" "/home/kube/cache/kubernetes-install/${tar##*/}" - done -} - -# -# Wait for the Kubernets healthz API to be responsive on the master -# -function wait-master-api { - local curl_creds="--insecure --user ${KUBE_USER}:${KUBE_PASSWORD}" - local curl_output="--fail --output /dev/null --silent" - local curl_net="--max-time 1" - - try-until-success "Waiting for Kubernetes API on ${KUBE_MASTER}" \ - "curl ${curl_creds} ${curl_output} ${curl_net} https://${KUBE_MASTER_IP}/healthz" -} - -# -# Wait for the Kubernetes healthz API to be responsive on each node -# -function wait-node-apis { - local curl_output="--fail --output /dev/null --silent" - local curl_net="--max-time 1" - - for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - try-until-success "Waiting for Kubernetes API on ${NODE_NAMES[${i}]}..." \ - "curl ${curl_output} ${curl_net} http://${KUBE_NODE_IP_ADDRESSES[${i}]}:10250/healthz" - done -} - -# -# Configure the nodes so the pods can communicate -# Each node will have a bridge named cbr0 for the NODE_IP_RANGES -# defined in config-default.sh. This finds the IP subnet (assigned -# by Kubernetes) to nodes and configures routes so they can communicate -# -# Also configure the master to be able to talk to the nodes. This is -# useful so that you can get to the UI from the master. -# -function setup-pod-routes { - local node - - KUBE_NODE_BRIDGE_NETWORK=() - for (( node=0; node<${#NODE_NAMES[@]}; node++)); do - - # This happens in two steps (wait for an address, wait for a non 172.x.x.x address) - # because it's both simpler and more clear what's happening. - try-until-success-ssh "${KUBE_NODE_IP_ADDRESSES[${node}]}" \ - "Waiting for cbr0 bridge on ${NODE_NAMES[${node}]} to have an address" \ - 'sudo ifconfig cbr0 | grep -oP "inet addr:\K\S+"' - - try-until-success-ssh "${KUBE_NODE_IP_ADDRESSES[${node}]}" \ - "Waiting for cbr0 bridge on ${NODE_NAMES[${node}]} to have correct address" \ - 'sudo ifconfig cbr0 | grep -oP "inet addr:\K\S+" | grep -v "^172."' - - run-ssh-cmd "${KUBE_NODE_IP_ADDRESSES[${node}]}" 'sudo ip route show | grep -E "dev cbr0" | cut -d " " -f1' - KUBE_NODE_BRIDGE_NETWORK+=(${_OUTPUT}) - kube::log::status "cbr0 on ${NODE_NAMES[${node}]} is ${_OUTPUT}" - done - - local i - local j - for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - kube::log::status "Configuring pod routes on ${NODE_NAMES[${i}]}..." - gen-add-route "${KUBE_NODE_BRIDGE_NETWORK[${i}]}" "${KUBE_NODE_IP_ADDRESSES[${i}]}" - run-script-remotely "${KUBE_MASTER_IP}" "${KUBE_TEMP}/add-route.sh" - - for (( j=0; j<${#NODE_NAMES[@]}; j++)); do - if [[ "${i}" != "${j}" ]]; then - gen-add-route "${KUBE_NODE_BRIDGE_NETWORK[${j}]}" "${KUBE_NODE_IP_ADDRESSES[${j}]}" - run-script-remotely "${KUBE_NODE_IP_ADDRESSES[${i}]}" "${KUBE_TEMP}/add-route.sh" - fi - done - done -} - -# -# Copy the certificate/key from the Kubernetes master -# These are used to create the kubeconfig file, which allows -# users to use kubectl easily -# -# We also set KUBE_CERT, KUBE_KEY, CA_CERT, and CONTEXT because they -# are needed by create-kubeconfig from common.sh to generate -# the kube config file. -# -function copy-kube-certs { - local cert="kubecfg.crt" - local key="kubecfg.key" - local ca="ca.crt" - local cert_dir="/srv/kubernetes" - - kube::log::status "Copying credentials from ${KUBE_MASTER}" - - # Set global environment variables: needed by create-kubeconfig - # in common.sh - export KUBE_CERT="${KUBE_TEMP}/${cert}" - export KUBE_KEY="${KUBE_TEMP}/${key}" - export CA_CERT="${KUBE_TEMP}/${ca}" - export CONTEXT="photon-${INSTANCE_PREFIX}" - - run-ssh-cmd "${KUBE_MASTER_IP}" "sudo chmod 644 ${cert_dir}/${cert}" - run-ssh-cmd "${KUBE_MASTER_IP}" "sudo chmod 644 ${cert_dir}/${key}" - run-ssh-cmd "${KUBE_MASTER_IP}" "sudo chmod 644 ${cert_dir}/${ca}" - - copy-file-from-vm "${KUBE_MASTER_IP}" "${cert_dir}/${cert}" "${KUBE_CERT}" - copy-file-from-vm "${KUBE_MASTER_IP}" "${cert_dir}/${key}" "${KUBE_KEY}" - copy-file-from-vm "${KUBE_MASTER_IP}" "${cert_dir}/${ca}" "${CA_CERT}" - - run-ssh-cmd "${KUBE_MASTER_IP}" "sudo chmod 600 ${cert_dir}/${cert}" - run-ssh-cmd "${KUBE_MASTER_IP}" "sudo chmod 600 ${cert_dir}/${key}" - run-ssh-cmd "${KUBE_MASTER_IP}" "sudo chmod 600 ${cert_dir}/${ca}" -} - -# -# Copies a script to a VM and runs it -# Parameters: -# - IP of VM -# - Path to local file -# -function run-script-remotely { - local vm_ip=${1} - local local_file="${2}" - local base_file - local remote_file - - base_file=$(basename "${local_file}") - remote_file="/tmp/${base_file}" - - copy-file-to-vm "${vm_ip}" "${local_file}" "${remote_file}" - run-ssh-cmd "${vm_ip}" "chmod 700 ${remote_file}" - run-ssh-cmd "${vm_ip}" "nohup sudo ${remote_file} < /dev/null 1> ${remote_file}.out 2>&1 &" -} - -# -# Runs an command on a VM using ssh -# Parameters: -# - (optional) -i to ignore failure -# - IP address of the VM -# - Command to run -# Assumes environment variables: -# - VM_USER -# - SSH_OPTS -# -function run-ssh-cmd { - local ignore_failure="" - if [[ "${1}" = "-i" ]]; then - ignore_failure="-i" - shift - fi - - local vm_ip=${1} - shift - local cmd=${1} - - - run-cmd ${ignore_failure} "ssh ${SSH_OPTS} $VM_USER@${vm_ip} $1" -} - -# -# Uses scp to copy file to VM -# Parameters: -# - IP address of the VM -# - Path to local file -# - Path to remote file -# Assumes environment variables: -# - VM_USER -# - SSH_OPTS -# -function copy-file-to-vm { - local vm_ip=${1} - local local_file=${2} - local remote_file=${3} - - run-cmd "scp ${SSH_OPTS} ${local_file} ${VM_USER}@${vm_ip}:${remote_file}" -} - -function copy-file-from-vm { - local vm_ip=${1} - local remote_file=${2} - local local_file=${3} - - run-cmd "scp ${SSH_OPTS} ${VM_USER}@${vm_ip}:${remote_file} ${local_file}" -} - -# -# Run a command, print nice error output -# Used by copy-file-to-vm and run-ssh-cmd -# -function run-cmd { - local rc=0 - local ignore_failure="" - if [[ "${1}" = "-i" ]]; then - ignore_failure=${1} - shift - fi - - local cmd=$1 - local output - output=$(${cmd} 2>&1) || rc=$? - if [[ ${rc} -ne 0 ]]; then - if [[ -z "${ignore_failure}" ]]; then - kube::log::error "Failed to run command: ${cmd} Output:" - echo "${output}" - exit 1 - fi - fi - _OUTPUT=${output} - return ${rc} -} - -# -# After the initial VM setup, we use SSH with keys to access the VMs -# This requires an SSH agent, so we verify that it's running -# -function verify-ssh-prereqs { - kube::log::status "Validating SSH configuration..." - local rc - - rc=0 - ssh-add -L 1> /dev/null 2> /dev/null || rc=$? - # "Could not open a connection to your authentication agent." - if [[ "${rc}" -eq 2 ]]; then - # ssh agent wasn't running, so start it and ensure we stop it - eval "$(ssh-agent)" > /dev/null - trap-add "kill ${SSH_AGENT_PID}" EXIT - fi - - rc=0 - ssh-add -L 1> /dev/null 2> /dev/null || rc=$? - # "The agent has no identities." - if [[ "${rc}" -eq 1 ]]; then - # Try adding one of the default identities, with or without passphrase. - ssh-add || true - fi - - # Expect at least one identity to be available. - if ! ssh-add -L 1> /dev/null 2> /dev/null; then - kube::log::error "Could not find or add an SSH identity." - kube::log::error "Please start ssh-agent, add your identity, and retry." - exit 1 - fi -} - -# -# Verify that Photon Controller has been configured in the way we expect. Specifically -# - Have the flavors been created? -# - Has the image been uploaded? -# TODO: Check the tenant and project as well. -function verify-photon-config { - kube::log::status "Validating Photon configuration..." - - # We don't want silent failure: we check for failure - set +o pipefail - - verify-photon-flavors - verify-photon-image - verify-photon-tenant - - # Reset default set in common.sh - set -o pipefail -} - -# -# Verify that the VM and disk flavors have been created -# -function verify-photon-flavors { - local rc=0 - - ${PHOTON} flavor list | awk -F'\t' '{print $2}' | grep -q "^${PHOTON_MASTER_FLAVOR}$" > /dev/null 2>&1 || rc=$? - if [[ ${rc} -ne 0 ]]; then - kube::log::error "ERROR: Cannot find VM flavor named ${PHOTON_MASTER_FLAVOR}" - exit 1 - fi - - if [[ "${PHOTON_MASTER_FLAVOR}" != "${PHOTON_NODE_FLAVOR}" ]]; then - rc=0 - ${PHOTON} flavor list | awk -F'\t' '{print $2}' | grep -q "^${PHOTON_NODE_FLAVOR}$" > /dev/null 2>&1 || rc=$? - if [[ ${rc} -ne 0 ]]; then - kube::log::error "ERROR: Cannot find VM flavor named ${PHOTON_NODE_FLAVOR}" - exit 1 - fi - fi - - ${PHOTON} flavor list | awk -F'\t' '{print $2}' | grep -q "^${PHOTON_DISK_FLAVOR}$" > /dev/null 2>&1 || rc=$? - if [[ ${rc} -ne 0 ]]; then - kube::log::error "ERROR: Cannot find disk flavor named ${PHOTON_DISK_FLAVOR}" - exit 1 - fi -} - -# -# Verify that we have the image we need, and it's not in error state or -# multiple copies -# -function verify-photon-image { - local rc - - rc=0 - ${PHOTON} image list | grep -q $'\t'"${PHOTON_IMAGE}"$'\t' > /dev/null 2>&1 || rc=$? - if [[ ${rc} -ne 0 ]]; then - kube::log::error "ERROR: Cannot find image \"${PHOTON_IMAGE}\"" - exit 1 - fi - - rc=0 - ${PHOTON} image list | grep $'\t'"${PHOTON_IMAGE}"$'\t' | grep ERROR > /dev/null 2>&1 || rc=$? - if [[ ${rc} -eq 0 ]]; then - echo "Warning: You have at least one ${PHOTON_IMAGE} image in the ERROR state. You may want to investigate." - echo "Images in the ERROR state will be ignored." - fi - - rc=0 - num_images=$(${PHOTON} image list | grep $'\t'"${PHOTON_IMAGE}"$'\t' | grep -c READY) - if [[ "${num_images}" -gt 1 ]]; then - echo "ERROR: You have more than one READY ${PHOTON_IMAGE} image. Ensure there is only one" - exit 1 - fi -} - -function verify-photon-tenant { - local rc - - rc=0 - ${PHOTON} tenant list | grep -q $'\t'"${PHOTON_TENANT}" > /dev/null 2>&1 || rc=$? - if [[ ${rc} -ne 0 ]]; then - echo "ERROR: Cannot find tenant \"${PHOTON_TENANT}\"" - exit 1 - fi - - ${PHOTON} project list --tenant "${PHOTON_TENANT}" | grep -q $'\t'"${PHOTON_PROJECT}"$'\t' > /dev/null 2>&1 || rc=$? - if [[ ${rc} -ne 0 ]]; then - echo "ERROR: Cannot find project \"${PHOTON_PROJECT}\"" - exit 1 - fi -} - -# -# Verifies that a given command is in the PATH -# -function verify-cmd-in-path { - cmd=${1} - which "${cmd}" >/dev/null || { - kube::log::error "Can't find ${cmd} in PATH, please install and retry." - exit 1 - } -} - -# -# Repeatedly try a command over ssh until it succeeds or until five minutes have passed -# The timeout isn't exact, since we assume the command runs instantaneously, and -# it doesn't. -# -function try-until-success-ssh { - local vm_ip=${1} - local cmd_description=${2} - local cmd=${3} - local timeout=600 - local sleep_time=5 - local max_attempts - - ((max_attempts=timeout/sleep_time)) - - kube::log::status "${cmd_description} for up to 10 minutes..." - local attempt=0 - while true; do - local rc=0 - run-ssh-cmd -i "${vm_ip}" "${cmd}" || rc=1 - if [[ ${rc} != 0 ]]; then - if (( attempt == max_attempts )); then - kube::log::error "Failed, cannot proceed: you may need to retry to log into the VM to debug" - exit 1 - fi - else - break - fi - attempt=$((attempt+1)) - sleep ${sleep_time} - done -} - -function try-until-success { - local cmd_description=${1} - local cmd=${2} - local timeout=600 - local sleep_time=5 - local max_attempts - - ((max_attempts=timeout/sleep_time)) - - kube::log::status "${cmd_description} for up to 10 minutes..." - local attempt=0 - while true; do - local rc=0 - run-cmd -i "${cmd}" || rc=1 - if [[ ${rc} != 0 ]]; then - if (( attempt == max_attempts )); then - kube::log::error "Failed, cannot proceed" - exit 1 - fi - else - break - fi - attempt=$((attempt+1)) - sleep ${sleep_time} - done -} - -# -# Sets up a trap handler -# -function trap-add { - local handler="${1}" - local signal="${2-EXIT}" - local cur - - cur="$(eval "sh -c 'echo \$3' -- $(trap -p ${signal})")" - if [[ -n "${cur}" ]]; then - handler="${cur}; ${handler}" - fi - - # We want ${handler} to expand now, so tell shellcheck - # shellcheck disable=SC2064 - trap "${handler}" ${signal} -} From f03cdd3851bc923afe0da36547a8bb6dd12d18f3 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Wed, 10 Jan 2018 12:47:54 -0500 Subject: [PATCH 105/264] Fix cadvisor flag registration for cross build --- cmd/kubelet/app/options/BUILD | 57 +++++++++++--- cmd/kubelet/app/options/globalflags.go | 51 ------------- cmd/kubelet/app/options/globalflags_linux.go | 79 ++++++++++++++++++++ cmd/kubelet/app/options/globalflags_other.go | 26 +++++++ 4 files changed, 153 insertions(+), 60 deletions(-) create mode 100644 cmd/kubelet/app/options/globalflags_linux.go create mode 100644 cmd/kubelet/app/options/globalflags_other.go diff --git a/cmd/kubelet/app/options/BUILD b/cmd/kubelet/app/options/BUILD index 0d8a49e1e96..d08afb55bab 100644 --- a/cmd/kubelet/app/options/BUILD +++ b/cmd/kubelet/app/options/BUILD @@ -12,7 +12,42 @@ go_library( "container_runtime.go", "globalflags.go", "options.go", - ], + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "globalflags_other.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "globalflags_other.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "globalflags_other.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "globalflags_other.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "globalflags_linux.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "globalflags_other.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "globalflags_other.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "globalflags_other.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "globalflags_other.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "globalflags_other.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "globalflags_other.go", + ], + "//conditions:default": [], + }), importpath = "k8s.io/kubernetes/cmd/kubelet/app/options", deps = [ "//pkg/apis/componentconfig:go_default_library", @@ -29,19 +64,23 @@ go_library( "//pkg/util/taints:go_default_library", "//pkg/version/verflag:go_default_library", "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/google/cadvisor/container/common:go_default_library", - "//vendor/github.com/google/cadvisor/container/containerd:go_default_library", - "//vendor/github.com/google/cadvisor/container/docker:go_default_library", - "//vendor/github.com/google/cadvisor/container/raw:go_default_library", - "//vendor/github.com/google/cadvisor/machine:go_default_library", - "//vendor/github.com/google/cadvisor/manager:go_default_library", - "//vendor/github.com/google/cadvisor/storage:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/logs:go_default_library", - ], + ] + select({ + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/github.com/google/cadvisor/container/common:go_default_library", + "//vendor/github.com/google/cadvisor/container/containerd:go_default_library", + "//vendor/github.com/google/cadvisor/container/docker:go_default_library", + "//vendor/github.com/google/cadvisor/container/raw:go_default_library", + "//vendor/github.com/google/cadvisor/machine:go_default_library", + "//vendor/github.com/google/cadvisor/manager:go_default_library", + "//vendor/github.com/google/cadvisor/storage:go_default_library", + ], + "//conditions:default": [], + }), ) filegroup( diff --git a/cmd/kubelet/app/options/globalflags.go b/cmd/kubelet/app/options/globalflags.go index 85829930c0b..ad70a30374f 100644 --- a/cmd/kubelet/app/options/globalflags.go +++ b/cmd/kubelet/app/options/globalflags.go @@ -30,13 +30,6 @@ import ( // ensure libs have a chance to globally register their flags _ "github.com/golang/glog" - _ "github.com/google/cadvisor/container/common" - _ "github.com/google/cadvisor/container/containerd" - _ "github.com/google/cadvisor/container/docker" - _ "github.com/google/cadvisor/container/raw" - _ "github.com/google/cadvisor/machine" - _ "github.com/google/cadvisor/manager" - _ "github.com/google/cadvisor/storage" _ "k8s.io/kubernetes/pkg/credentialprovider/azure" _ "k8s.io/kubernetes/pkg/credentialprovider/gcp" ) @@ -121,47 +114,3 @@ func addGlogFlags(fs *pflag.FlagSet) { fs.AddFlagSet(local) } - -// addCadvisorFlags adds flags from cadvisor -func addCadvisorFlags(fs *pflag.FlagSet) { - // lookup flags in global flag set and re-register the values with our flagset - global := flag.CommandLine - local := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError) - - // These flags were also implicit from cadvisor, but are actually used by something in the core repo: - // TODO(mtaufen): This one is stil used by our salt, but for heaven's sake it's even deprecated in cadvisor - register(global, local, "docker_root") - // e2e node tests rely on this - register(global, local, "housekeeping_interval") - - // These flags were implicit from cadvisor, and are mistakes that should be registered deprecated: - const deprecated = "This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed." - - registerDeprecated(global, local, "application_metrics_count_limit", deprecated) - registerDeprecated(global, local, "boot_id_file", deprecated) - registerDeprecated(global, local, "container_hints", deprecated) - registerDeprecated(global, local, "containerd", deprecated) - registerDeprecated(global, local, "docker", deprecated) - registerDeprecated(global, local, "docker_env_metadata_whitelist", deprecated) - registerDeprecated(global, local, "docker_only", deprecated) - registerDeprecated(global, local, "docker-tls", deprecated) - registerDeprecated(global, local, "docker-tls-ca", deprecated) - registerDeprecated(global, local, "docker-tls-cert", deprecated) - registerDeprecated(global, local, "docker-tls-key", deprecated) - registerDeprecated(global, local, "enable_load_reader", deprecated) - registerDeprecated(global, local, "event_storage_age_limit", deprecated) - registerDeprecated(global, local, "event_storage_event_limit", deprecated) - registerDeprecated(global, local, "global_housekeeping_interval", deprecated) - registerDeprecated(global, local, "log_cadvisor_usage", deprecated) - registerDeprecated(global, local, "machine_id_file", deprecated) - registerDeprecated(global, local, "storage_driver_user", deprecated) - registerDeprecated(global, local, "storage_driver_password", deprecated) - registerDeprecated(global, local, "storage_driver_host", deprecated) - registerDeprecated(global, local, "storage_driver_db", deprecated) - registerDeprecated(global, local, "storage_driver_table", deprecated) - registerDeprecated(global, local, "storage_driver_secure", deprecated) - registerDeprecated(global, local, "storage_driver_buffer_duration", deprecated) - - // finally, add cadvisor flags to the provided flagset - fs.AddFlagSet(local) -} diff --git a/cmd/kubelet/app/options/globalflags_linux.go b/cmd/kubelet/app/options/globalflags_linux.go new file mode 100644 index 00000000000..99911921522 --- /dev/null +++ b/cmd/kubelet/app/options/globalflags_linux.go @@ -0,0 +1,79 @@ +// +build linux + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "flag" + "os" + + "github.com/spf13/pflag" + + // ensure libs have a chance to globally register their flags + _ "github.com/google/cadvisor/container/common" + _ "github.com/google/cadvisor/container/containerd" + _ "github.com/google/cadvisor/container/docker" + _ "github.com/google/cadvisor/container/raw" + _ "github.com/google/cadvisor/machine" + _ "github.com/google/cadvisor/manager" + _ "github.com/google/cadvisor/storage" +) + +// addCadvisorFlags adds flags from cadvisor +func addCadvisorFlags(fs *pflag.FlagSet) { + // lookup flags in global flag set and re-register the values with our flagset + global := flag.CommandLine + local := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError) + + // These flags were also implicit from cadvisor, but are actually used by something in the core repo: + // TODO(mtaufen): This one is stil used by our salt, but for heaven's sake it's even deprecated in cadvisor + register(global, local, "docker_root") + // e2e node tests rely on this + register(global, local, "housekeeping_interval") + + // These flags were implicit from cadvisor, and are mistakes that should be registered deprecated: + const deprecated = "This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed." + + registerDeprecated(global, local, "application_metrics_count_limit", deprecated) + registerDeprecated(global, local, "boot_id_file", deprecated) + registerDeprecated(global, local, "container_hints", deprecated) + registerDeprecated(global, local, "containerd", deprecated) + registerDeprecated(global, local, "docker", deprecated) + registerDeprecated(global, local, "docker_env_metadata_whitelist", deprecated) + registerDeprecated(global, local, "docker_only", deprecated) + registerDeprecated(global, local, "docker-tls", deprecated) + registerDeprecated(global, local, "docker-tls-ca", deprecated) + registerDeprecated(global, local, "docker-tls-cert", deprecated) + registerDeprecated(global, local, "docker-tls-key", deprecated) + registerDeprecated(global, local, "enable_load_reader", deprecated) + registerDeprecated(global, local, "event_storage_age_limit", deprecated) + registerDeprecated(global, local, "event_storage_event_limit", deprecated) + registerDeprecated(global, local, "global_housekeeping_interval", deprecated) + registerDeprecated(global, local, "log_cadvisor_usage", deprecated) + registerDeprecated(global, local, "machine_id_file", deprecated) + registerDeprecated(global, local, "storage_driver_user", deprecated) + registerDeprecated(global, local, "storage_driver_password", deprecated) + registerDeprecated(global, local, "storage_driver_host", deprecated) + registerDeprecated(global, local, "storage_driver_db", deprecated) + registerDeprecated(global, local, "storage_driver_table", deprecated) + registerDeprecated(global, local, "storage_driver_secure", deprecated) + registerDeprecated(global, local, "storage_driver_buffer_duration", deprecated) + + // finally, add cadvisor flags to the provided flagset + fs.AddFlagSet(local) +} diff --git a/cmd/kubelet/app/options/globalflags_other.go b/cmd/kubelet/app/options/globalflags_other.go new file mode 100644 index 00000000000..b4a04f9f40f --- /dev/null +++ b/cmd/kubelet/app/options/globalflags_other.go @@ -0,0 +1,26 @@ +// +build !linux + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "github.com/spf13/pflag" +) + +func addCadvisorFlags(fs *pflag.FlagSet) { +} From dca369dc8451178d80ca879ceda954f7ccb39ba5 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Wed, 10 Jan 2018 10:03:22 -0800 Subject: [PATCH 106/264] remove support for container-linux in gce kube-up --- build/lib/release.sh | 1 - cluster/common.sh | 22 +- cluster/gce/BUILD | 2 - cluster/gce/config-default.sh | 6 - cluster/gce/config-test.sh | 7 - cluster/gce/container-linux/OWNERS | 8 - cluster/gce/container-linux/README.md | 8 - .../gce/container-linux/configure-helper.sh | 1606 ----------------- cluster/gce/container-linux/configure.sh | 182 -- cluster/gce/container-linux/health-monitor.sh | 83 - cluster/gce/container-linux/helper.sh | 19 - cluster/gce/container-linux/master-helper.sh | 139 -- cluster/gce/container-linux/master.yaml | 57 - cluster/gce/container-linux/node-helper.sh | 35 - cluster/gce/container-linux/node.yaml | 57 - cluster/gce/util.sh | 4 +- 16 files changed, 7 insertions(+), 2229 deletions(-) delete mode 100644 cluster/gce/container-linux/OWNERS delete mode 100644 cluster/gce/container-linux/README.md delete mode 100755 cluster/gce/container-linux/configure-helper.sh delete mode 100755 cluster/gce/container-linux/configure.sh delete mode 100644 cluster/gce/container-linux/health-monitor.sh delete mode 100755 cluster/gce/container-linux/helper.sh delete mode 100755 cluster/gce/container-linux/master-helper.sh delete mode 100644 cluster/gce/container-linux/master.yaml delete mode 100755 cluster/gce/container-linux/node-helper.sh delete mode 100644 cluster/gce/container-linux/node.yaml diff --git a/build/lib/release.sh b/build/lib/release.sh index 870451601f6..c9932ea29d2 100644 --- a/build/lib/release.sh +++ b/build/lib/release.sh @@ -414,7 +414,6 @@ function kube::release::package_kube_manifests_tarball() { cp "${salt_dir}/e2e-image-puller/e2e-image-puller.manifest" "${gci_dst_dir}/" cp "${KUBE_ROOT}/cluster/gce/gci/configure-helper.sh" "${gci_dst_dir}/gci-configure-helper.sh" cp "${KUBE_ROOT}/cluster/gce/gci/health-monitor.sh" "${gci_dst_dir}/health-monitor.sh" - cp "${KUBE_ROOT}/cluster/gce/container-linux/configure-helper.sh" "${gci_dst_dir}/container-linux-configure-helper.sh" cp -r "${salt_dir}/kube-admission-controls/limit-range" "${gci_dst_dir}" local objects objects=$(cd "${KUBE_ROOT}/cluster/addons" && find . \( -name \*.yaml -or -name \*.yaml.in -or -name \*.json \) | grep -v demo) diff --git a/cluster/common.sh b/cluster/common.sh index a2b947f1748..2aa73622a64 100755 --- a/cluster/common.sh +++ b/cluster/common.sh @@ -436,8 +436,8 @@ function find-release-tars() { # This tarball is used by GCI, Ubuntu Trusty, and Container Linux. KUBE_MANIFESTS_TAR= - if [[ "${MASTER_OS_DISTRIBUTION:-}" == "trusty" || "${MASTER_OS_DISTRIBUTION:-}" == "gci" || "${MASTER_OS_DISTRIBUTION:-}" == "container-linux" || "${MASTER_OS_DISTRIBUTION:-}" == "ubuntu" ]] || \ - [[ "${NODE_OS_DISTRIBUTION:-}" == "trusty" || "${NODE_OS_DISTRIBUTION:-}" == "gci" || "${NODE_OS_DISTRIBUTION:-}" == "container-linux" || "${NODE_OS_DISTRIBUTION:-}" == "ubuntu" ]] ; then + if [[ "${MASTER_OS_DISTRIBUTION:-}" == "trusty" || "${MASTER_OS_DISTRIBUTION:-}" == "gci" || "${MASTER_OS_DISTRIBUTION:-}" == "ubuntu" ]] || \ + [[ "${NODE_OS_DISTRIBUTION:-}" == "trusty" || "${NODE_OS_DISTRIBUTION:-}" == "gci" || "${NODE_OS_DISTRIBUTION:-}" == "ubuntu" ]] ; then KUBE_MANIFESTS_TAR=$(find-tar kubernetes-manifests.tar.gz) fi } @@ -576,9 +576,7 @@ function build-kube-env { local server_binary_tar_url=$SERVER_BINARY_TAR_URL local salt_tar_url=$SALT_TAR_URL local kube_manifests_tar_url="${KUBE_MANIFESTS_TAR_URL:-}" - if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "container-linux" ]] || \ - [[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "container-linux" ]] || \ - [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \ + if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \ [[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]] ; then # TODO: Support fallback .tar.gz settings on Container Linux server_binary_tar_url=$(split_csv "${SERVER_BINARY_TAR_URL}") @@ -696,8 +694,8 @@ EOF TERMINATED_POD_GC_THRESHOLD: $(yaml-quote ${TERMINATED_POD_GC_THRESHOLD}) EOF fi - if [[ "${master}" == "true" && ("${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "container-linux") || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \ - [[ "${master}" == "false" && ("${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "container-linux") || "${NODE_OS_DISTRIBUTION}" = "ubuntu" ]] ; then + if [[ "${master}" == "true" && ("${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci") || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \ + [[ "${master}" == "false" && ("${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci") || "${NODE_OS_DISTRIBUTION}" = "ubuntu" ]] ; then cat >>$file <>$file <>$file </dev/null; then - useradd -s /sbin/nologin -d /var/etcd etcd - fi - chown -R etcd "${mount_point}/var/etcd" - chgrp -R etcd "${mount_point}/var/etcd" -} - -# replace_prefixed_line ensures: -# 1. the specified file exists -# 2. existing lines with the specified ${prefix} are removed -# 3. a new line with the specified ${prefix}${suffix} is appended -function replace_prefixed_line { - local -r file="${1:-}" - local -r prefix="${2:-}" - local -r suffix="${3:-}" - - touch "${file}" - awk "substr(\$0,0,length(\"${prefix}\")) != \"${prefix}\" { print }" "${file}" > "${file}.filtered" && mv "${file}.filtered" "${file}" - echo "${prefix}${suffix}" >> "${file}" -} - -# After the first boot and on upgrade, these files exist on the master-pd -# and should never be touched again (except perhaps an additional service -# account, see NB below.) -function create-master-auth { - echo "Creating master auth files" - local -r auth_dir="/etc/srv/kubernetes" - if [[ ! -e "${auth_dir}/ca.crt" && ! -z "${CA_CERT:-}" && ! -z "${MASTER_CERT:-}" && ! -z "${MASTER_KEY:-}" ]]; then - echo "${CA_CERT}" | base64 --decode > "${auth_dir}/ca.crt" - echo "${MASTER_CERT}" | base64 --decode > "${auth_dir}/server.cert" - echo "${MASTER_KEY}" | base64 --decode > "${auth_dir}/server.key" - fi - local -r basic_auth_csv="${auth_dir}/basic_auth.csv" - if [[ -n "${KUBE_PASSWORD:-}" && -n "${KUBE_USER:-}" ]]; then - replace_prefixed_line "${basic_auth_csv}" "${KUBE_PASSWORD},${KUBE_USER}," "admin,system:masters" - fi - local -r known_tokens_csv="${auth_dir}/known_tokens.csv" - if [[ -n "${KUBE_BEARER_TOKEN:-}" ]]; then - replace_prefixed_line "${known_tokens_csv}" "${KUBE_BEARER_TOKEN}," "admin,admin,system:masters" - fi - if [[ -n "${KUBE_CONTROLLER_MANAGER_TOKEN:-}" ]]; then - replace_prefixed_line "${known_tokens_csv}" "${KUBE_CONTROLLER_MANAGER_TOKEN}," "system:kube-controller-manager,uid:system:kube-controller-manager" - fi - if [[ -n "${KUBE_SCHEDULER_TOKEN:-}" ]]; then - replace_prefixed_line "${known_tokens_csv}" "${KUBE_SCHEDULER_TOKEN}," "system:kube-scheduler,uid:system:kube-scheduler" - fi - if [[ -n "${KUBELET_TOKEN:-}" ]]; then - replace_prefixed_line "${known_tokens_csv}" "${KUBELET_TOKEN}," "kubelet,uid:kubelet,system:nodes" - fi - if [[ -n "${KUBE_PROXY_TOKEN:-}" ]]; then - replace_prefixed_line "${known_tokens_csv}" "${KUBE_PROXY_TOKEN}," "system:kube-proxy,uid:kube_proxy" - fi - local use_cloud_config="false" - cat </etc/gce.conf -[global] -EOF - if [[ -n "${GCE_API_ENDPOINT:-}" ]]; then - cat <>/etc/gce.conf -api-endpoint = ${GCE_API_ENDPOINT} -EOF - fi - if [[ -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" ]]; then - use_cloud_config="true" - cat <>/etc/gce.conf -token-url = ${TOKEN_URL} -token-body = ${TOKEN_BODY} -EOF - fi - if [[ -n "${PROJECT_ID:-}" ]]; then - use_cloud_config="true" - cat <>/etc/gce.conf -project-id = ${PROJECT_ID} -EOF - fi - if [[ -n "${NETWORK_PROJECT_ID:-}" ]]; then - use_cloud_config="true" - cat <>/etc/gce.conf -network-project-id = ${NETWORK_PROJECT_ID} -EOF - fi - if [[ -n "${NODE_NETWORK:-}" ]]; then - use_cloud_config="true" - cat <>/etc/gce.conf -network-name = ${NODE_NETWORK} -EOF - fi - if [[ -n "${NODE_SUBNETWORK:-}" ]]; then - use_cloud_config="true" - cat <>/etc/gce.conf -subnetwork-name = ${NODE_SUBNETWORK} -EOF - fi - if [[ -n "${NODE_INSTANCE_PREFIX:-}" ]]; then - use_cloud_config="true" - if [[ -n "${NODE_TAGS:-}" ]]; then - # split NODE_TAGS into an array by comma. - IFS=',' read -r -a node_tags <<< ${NODE_TAGS} - else - local -r node_tags="${NODE_INSTANCE_PREFIX}" - fi - cat <>/etc/gce.conf -node-instance-prefix = ${NODE_INSTANCE_PREFIX} -EOF - for tag in ${node_tags[@]}; do - cat <>/etc/gce.conf -node-tags = ${tag} -EOF - done - fi - if [[ -n "${MULTIZONE:-}" ]]; then - use_cloud_config="true" - cat <>/etc/gce.conf -multizone = ${MULTIZONE} -EOF - fi - if [[ -n "${GCE_ALPHA_FEATURES:-}" ]]; then - use_cloud_config="true" - # split GCE_ALPHA_FEATURES into an array by comma. - IFS=',' read -r -a alpha_features <<< ${GCE_ALPHA_FEATURES} - for feature in ${alpha_features[@]}; do - cat <>/etc/gce.conf -alpha-features = ${feature} -EOF - done - fi - if [[ -n "${SECONDARY_RANGE_NAME:-}" ]]; then - use_cloud_config="true" - cat <> /etc/gce.conf -secondary-range-name = ${SECONDARY_RANGE_NAME} -EOF - fi - if [[ "${use_cloud_config}" != "true" ]]; then - rm -f /etc/gce.conf - fi - - if [[ -n "${GCP_AUTHN_URL:-}" ]]; then - cat </etc/gcp_authn.config -clusters: - - name: gcp-authentication-server - cluster: - server: ${GCP_AUTHN_URL} -users: - - name: kube-apiserver - user: - auth-provider: - name: gcp -current-context: webhook -contexts: -- context: - cluster: gcp-authentication-server - user: kube-apiserver - name: webhook -EOF - fi - - if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then - cat </etc/gcp_authz.config -clusters: - - name: gcp-authorization-server - cluster: - server: ${GCP_AUTHZ_URL} -users: - - name: kube-apiserver - user: - auth-provider: - name: gcp -current-context: webhook -contexts: -- context: - cluster: gcp-authorization-server - user: kube-apiserver - name: webhook -EOF - fi - -if [[ -n "${GCP_IMAGE_VERIFICATION_URL:-}" ]]; then - # This is the config file for the image review webhook. - cat </etc/gcp_image_review.config -clusters: - - name: gcp-image-review-server - cluster: - server: ${GCP_IMAGE_VERIFICATION_URL} -users: - - name: kube-apiserver - user: - auth-provider: - name: gcp -current-context: webhook -contexts: -- context: - cluster: gcp-image-review-server - user: kube-apiserver - name: webhook -EOF - # This is the config for the image review admission controller. - cat </etc/admission_controller.config -imagePolicy: - kubeConfigFile: /etc/gcp_image_review.config - allowTTL: 30 - denyTTL: 30 - retryBackoff: 500 - defaultAllow: true -EOF - fi -} - -# Arg 1: the address of the API server -function create-kubelet-kubeconfig() { - local apiserver_address="${1}" - if [[ -z "${apiserver_address}" ]]; then - echo "Must provide API server address to create Kubelet kubeconfig file!" - exit 1 - fi - echo "Creating kubelet kubeconfig file" - if [[ -z "${KUBELET_CA_CERT:-}" ]]; then - KUBELET_CA_CERT="${CA_CERT}" - fi - cat </var/lib/kubelet/kubeconfig -apiVersion: v1 -kind: Config -users: -- name: kubelet - user: - client-certificate-data: ${KUBELET_CERT} - client-key-data: ${KUBELET_KEY} -clusters: -- name: local - cluster: - server: ${apiserver_address} - certificate-authority-data: ${KUBELET_CA_CERT} -contexts: -- context: - cluster: local - user: kubelet - name: service-account-context -current-context: service-account-context -EOF -} - -# Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and KUBELET_KEY -# to generate a kubeconfig file for the kubelet to securely connect to the apiserver. -# Set REGISTER_MASTER_KUBELET to true if kubelet on the master node -# should register to the apiserver. -function create-master-kubelet-auth { - # Only configure the kubelet on the master if the required variables are - # set in the environment. - if [[ -n "${KUBELET_APISERVER:-}" && -n "${KUBELET_CERT:-}" && -n "${KUBELET_KEY:-}" ]]; then - REGISTER_MASTER_KUBELET="true" - create-kubelet-kubeconfig "https://${KUBELET_APISERVER}" - fi -} - -function create-kubeproxy-user-kubeconfig { - echo "Creating kube-proxy user kubeconfig file" - cat </var/lib/kube-proxy/kubeconfig -apiVersion: v1 -kind: Config -users: -- name: kube-proxy - user: - token: ${KUBE_PROXY_TOKEN} -clusters: -- name: local - cluster: - certificate-authority-data: ${CA_CERT} -contexts: -- context: - cluster: local - user: kube-proxy - name: service-account-context -current-context: service-account-context -EOF -} - -function create-kubecontrollermanager-kubeconfig { - echo "Creating kube-controller-manager kubeconfig file" - mkdir -p /etc/srv/kubernetes/kube-controller-manager - cat </etc/srv/kubernetes/kube-controller-manager/kubeconfig -apiVersion: v1 -kind: Config -users: -- name: kube-controller-manager - user: - token: ${KUBE_CONTROLLER_MANAGER_TOKEN} -clusters: -- name: local - cluster: - insecure-skip-tls-verify: true - server: https://localhost:443 -contexts: -- context: - cluster: local - user: kube-controller-manager - name: service-account-context -current-context: service-account-context -EOF -} - -function create-kubescheduler-kubeconfig { - echo "Creating kube-scheduler kubeconfig file" - mkdir -p /etc/srv/kubernetes/kube-scheduler - cat </etc/srv/kubernetes/kube-scheduler/kubeconfig -apiVersion: v1 -kind: Config -users: -- name: kube-scheduler - user: - token: ${KUBE_SCHEDULER_TOKEN} -clusters: -- name: local - cluster: - insecure-skip-tls-verify: true - server: https://localhost:443 -contexts: -- context: - cluster: local - user: kube-scheduler - name: kube-scheduler -current-context: kube-scheduler -EOF -} - -function create-master-etcd-auth { - if [[ -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then - local -r auth_dir="/etc/srv/kubernetes" - echo "${ETCD_CA_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-ca.crt" - echo "${ETCD_PEER_KEY}" | base64 --decode > "${auth_dir}/etcd-peer.key" - echo "${ETCD_PEER_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-peer.crt" - fi -} - -function configure-docker-daemon { - echo "Configuring the Docker daemon" - local docker_opts="-p /var/run/docker.pid --iptables=false --ip-masq=false" - if [[ "${TEST_CLUSTER:-}" == "true" ]]; then - docker_opts+=" --log-level=debug" - else - docker_opts+=" --log-level=warn" - fi - local use_net_plugin="true" - if [[ "${NETWORK_PROVIDER:-}" == "kubenet" || "${NETWORK_PROVIDER:-}" == "cni" ]]; then - # set docker0 cidr to private ip address range to avoid conflict with cbr0 cidr range - docker_opts+=" --bip=169.254.123.1/24" - else - use_net_plugin="false" - docker_opts+=" --bridge=cbr0" - fi - - # Decide whether to enable a docker registry mirror. This is taken from - # the "kube-env" metadata value. - if [[ -n "${DOCKER_REGISTRY_MIRROR_URL:-}" ]]; then - echo "Enable docker registry mirror at: ${DOCKER_REGISTRY_MIRROR_URL}" - docker_opts+=" --registry-mirror=${DOCKER_REGISTRY_MIRROR_URL}" - fi - - mkdir -p /etc/systemd/system/docker.service.d/ - local kubernetes_conf_dropin="/etc/systemd/system/docker.service.d/00_kubelet.conf" - cat > "${kubernetes_conf_dropin}" < "${kubelet_env_file}" - - # Write the systemd service file for kubelet. - cat </etc/systemd/system/kubelet.service -[Unit] -Description=Kubernetes kubelet -Requires=network-online.target -After=network-online.target - -[Service] -Restart=always -RestartSec=10 -EnvironmentFile=${kubelet_env_file} -ExecStart=${kubelet_bin} \$KUBELET_OPTS - -[Install] -WantedBy=multi-user.target -EOF - - # Flush iptables nat table - iptables -t nat -F || true - - systemctl start kubelet.service -} - -# Create the log file and set its properties. -# -# $1 is the file to create. -function prepare-log-file { - touch $1 - chmod 644 $1 - chown root:root $1 -} - -# Prepares parameters for kube-proxy manifest. -# $1 source path of kube-proxy manifest. -function prepare-kube-proxy-manifest-variables { - local -r src_file=$1; - - remove-salt-config-comments "${src_file}" - - local -r kubeconfig="--kubeconfig=/var/lib/kube-proxy/kubeconfig" - local kube_docker_registry="gcr.io/google_containers" - if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then - kube_docker_registry=${KUBE_DOCKER_REGISTRY} - fi - local -r kube_proxy_docker_tag=$(cat /opt/kubernetes/kube-docker-files/kube-proxy.docker_tag) - local api_servers="--master=https://${KUBERNETES_MASTER_NAME}" - local params="${KUBEPROXY_TEST_LOG_LEVEL:-"--v=2"}" - if [[ -n "${FEATURE_GATES:-}" ]]; then - params+=" --feature-gates=${FEATURE_GATES}" - fi - params+=" --iptables-sync-period=1m --iptables-min-sync-period=10s --ipvs-sync-period=1m --ipvs-min-sync-period=10s" - if [[ -n "${KUBEPROXY_TEST_ARGS:-}" ]]; then - params+=" ${KUBEPROXY_TEST_ARGS}" - fi - local container_env="" - local kube_cache_mutation_detector_env_name="" - local kube_cache_mutation_detector_env_value="" - if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then - container_env="env:" - kube_cache_mutation_detector_env_name="- name: KUBE_CACHE_MUTATION_DETECTOR" - kube_cache_mutation_detector_env_value="value: \"${ENABLE_CACHE_MUTATION_DETECTOR}\"" - fi - local pod_priority="" - if [[ "${ENABLE_POD_PRIORITY:-}" == "true" ]]; then - pod_priority="priorityClassName: system-node-critical" - fi - sed -i -e "s@{{kubeconfig}}@${kubeconfig}@g" ${src_file} - sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${kube_docker_registry}@g" ${src_file} - sed -i -e "s@{{pillar\['kube-proxy_docker_tag'\]}}@${kube_proxy_docker_tag}@g" ${src_file} - sed -i -e "s@{{params}}@${params}@g" ${src_file} - sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file} - sed -i -e "s@{{kube_cache_mutation_detector_env_name}}@${kube_cache_mutation_detector_env_name}@g" ${src_file} - sed -i -e "s@{{kube_cache_mutation_detector_env_value}}@${kube_cache_mutation_detector_env_value}@g" ${src_file} - sed -i -e "s@{{pod_priority}}@${pod_priority}@g" ${src_file} - sed -i -e "s@{{ cpurequest }}@100m@g" ${src_file} - sed -i -e "s@{{api_servers_with_port}}@${api_servers}@g" ${src_file} - sed -i -e "s@{{kubernetes_service_host_env_value}}@${KUBERNETES_MASTER_NAME}@g" ${src_file} - if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then - sed -i -e "s@{{cluster_cidr}}@--cluster-cidr=${CLUSTER_IP_RANGE}@g" ${src_file} - fi - if [[ "${CONTAINER_RUNTIME:-}" == "rkt" ]]; then - # Work arounds for https://github.com/coreos/rkt/issues/3245 and https://github.com/coreos/rkt/issues/3264 - # This is an incredibly hacky workaround. It's fragile too. If the kube-proxy command changes too much, this breaks - # TODO, this could be done much better in many other places, such as an - # init script within the container, or even within kube-proxy's code. - local extra_workaround_cmd="ln -sf /proc/self/mounts /etc/mtab; \ - mount -o remount,rw /proc; \ - mount -o remount,rw /proc/sys; \ - mount -o remount,rw /sys; " - sed -i -e "s@-\\s\\+kube-proxy@- ${extra_workaround_cmd} kube-proxy@g" "${src_file}" - fi -} - -# Starts kube-proxy static pod. -function start-kube-proxy { - echo "Start kube-proxy static pod" - prepare-log-file /var/log/kube-proxy.log - local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/kube-proxy.manifest" - prepare-kube-proxy-manifest-variables "$src_file" - - cp "${src_file}" /etc/kubernetes/manifests -} - -# Replaces the variables in the etcd manifest file with the real values, and then -# copy the file to the manifest dir -# $1: value for variable 'suffix' -# $2: value for variable 'port' -# $3: value for variable 'server_port' -# $4: value for variable 'cpulimit' -# $5: pod name, which should be either etcd or etcd-events -function prepare-etcd-manifest { - local host_name=${ETCD_HOSTNAME:-$(hostname -s)} - local etcd_cluster="" - local cluster_state="new" - local etcd_protocol="http" - local etcd_creds="" - - if [[ -n "${ETCD_CA_KEY:-}" && -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then - etcd_creds=" --peer-trusted-ca-file /etc/srv/kubernetes/etcd-ca.crt --peer-cert-file /etc/srv/kubernetes/etcd-peer.crt --peer-key-file /etc/srv/kubernetes/etcd-peer.key -peer-client-cert-auth " - etcd_protocol="https" - fi - - for host in $(echo "${INITIAL_ETCD_CLUSTER:-${host_name}}" | tr "," "\n"); do - etcd_host="etcd-${host}=${etcd_protocol}://${host}:$3" - if [[ -n "${etcd_cluster}" ]]; then - etcd_cluster+="," - cluster_state="existing" - fi - etcd_cluster+="${etcd_host}" - done - - local -r temp_file="/tmp/$5" - cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd.manifest" "${temp_file}" - remove-salt-config-comments "${temp_file}" - sed -i -e "s@{{ *suffix *}}@$1@g" "${temp_file}" - sed -i -e "s@{{ *port *}}@$2@g" "${temp_file}" - sed -i -e "s@{{ *server_port *}}@$3@g" "${temp_file}" - sed -i -e "s@{{ *cpulimit *}}@\"$4\"@g" "${temp_file}" - sed -i -e "s@{{ *hostname *}}@$host_name@g" "${temp_file}" - sed -i -e "s@{{ *srv_kube_path *}}@/etc/srv/kubernetes@g" "${temp_file}" - sed -i -e "s@{{ *etcd_cluster *}}@$etcd_cluster@g" "${temp_file}" - sed -i -e "s@{{ *liveness_probe_initial_delay *}}@${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${temp_file}" - # Get default storage backend from manifest file. - local -r default_storage_backend=$(cat "${temp_file}" | \ - grep -o "{{ *pillar\.get('storage_backend', '\(.*\)') *}}" | \ - sed -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g") - if [[ -n "${STORAGE_BACKEND:-}" ]]; then - sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@${STORAGE_BACKEND}@g" "${temp_file}" - else - sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g" "${temp_file}" - fi - if [[ "${STORAGE_BACKEND:-${default_storage_backend}}" == "etcd3" ]]; then - sed -i -e "s@{{ *quota_bytes *}}@--quota-backend-bytes=4294967296@g" "${temp_file}" - else - sed -i -e "s@{{ *quota_bytes *}}@@g" "${temp_file}" - fi - sed -i -e "s@{{ *cluster_state *}}@$cluster_state@g" "${temp_file}" - if [[ -n "${ETCD_IMAGE:-}" ]]; then - sed -i -e "s@{{ *pillar\.get('etcd_docker_tag', '\(.*\)') *}}@${ETCD_IMAGE}@g" "${temp_file}" - else - sed -i -e "s@{{ *pillar\.get('etcd_docker_tag', '\(.*\)') *}}@\1@g" "${temp_file}" - fi - if [[ -n "${ETCD_DOCKER_REPOSITORY:-}" ]]; then - sed -i -e "s@{{ *pillar\.get('etcd_docker_repository', '\(.*\)') *}}@${ETCD_DOCKER_REPOSITORY}@g" "${temp_file}" - else - sed -i -e "s@{{ *pillar\.get('etcd_docker_repository', '\(.*\)') *}}@\1@g" "${temp_file}" - fi - - sed -i -e "s@{{ *etcd_protocol *}}@$etcd_protocol@g" "${temp_file}" - sed -i -e "s@{{ *etcd_creds *}}@$etcd_creds@g" "${temp_file}" - if [[ -n "${ETCD_VERSION:-}" ]]; then - sed -i -e "s@{{ *pillar\.get('etcd_version', '\(.*\)') *}}@${ETCD_VERSION}@g" "${temp_file}" - else - sed -i -e "s@{{ *pillar\.get('etcd_version', '\(.*\)') *}}@\1@g" "${temp_file}" - fi - # Replace the volume host path. - sed -i -e "s@/mnt/master-pd/var/etcd@/mnt/disks/master-pd/var/etcd@g" "${temp_file}" - mv "${temp_file}" /etc/kubernetes/manifests -} - -function start-etcd-empty-dir-cleanup-pod { - cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd-empty-dir-cleanup/etcd-empty-dir-cleanup.yaml" "/etc/kubernetes/manifests" -} - -# Starts etcd server pod (and etcd-events pod if needed). -# More specifically, it prepares dirs and files, sets the variable value -# in the manifests, and copies them to /etc/kubernetes/manifests. -function start-etcd-servers { - echo "Start etcd pods" - if [[ -d /etc/etcd ]]; then - rm -rf /etc/etcd - fi - if [[ -e /etc/default/etcd ]]; then - rm -f /etc/default/etcd - fi - if [[ -e /etc/systemd/system/etcd.service ]]; then - rm -f /etc/systemd/system/etcd.service - fi - if [[ -e /etc/init.d/etcd ]]; then - rm -f /etc/init.d/etcd - fi - prepare-log-file /var/log/etcd.log - prepare-etcd-manifest "" "2379" "2380" "200m" "etcd.manifest" - - prepare-log-file /var/log/etcd-events.log - prepare-etcd-manifest "-events" "4002" "2381" "100m" "etcd-events.manifest" -} - -# Calculates the following variables based on env variables, which will be used -# by the manifests of several kube-master components. -# CLOUD_CONFIG_OPT -# CLOUD_CONFIG_VOLUME -# CLOUD_CONFIG_MOUNT -# DOCKER_REGISTRY -function compute-master-manifest-variables { - CLOUD_CONFIG_OPT="" - CLOUD_CONFIG_VOLUME="" - CLOUD_CONFIG_MOUNT="" - if [[ -f /etc/gce.conf ]]; then - CLOUD_CONFIG_OPT="--cloud-config=/etc/gce.conf" - CLOUD_CONFIG_VOLUME="{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"/etc/gce.conf\", \"type\": \"FileOrCreate\"}}," - CLOUD_CONFIG_MOUNT="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true}," - fi - DOCKER_REGISTRY="gcr.io/google_containers" - if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then - DOCKER_REGISTRY="${KUBE_DOCKER_REGISTRY}" - fi -} - -# A helper function for removing salt configuration and comments from a file. -# This is mainly for preparing a manifest file. -# -# $1: Full path of the file to manipulate -function remove-salt-config-comments { - # Remove salt configuration. - sed -i "/^[ |\t]*{[#|%]/d" $1 - # Remove comments. - sed -i "/^[ |\t]*#/d" $1 -} - -# Starts kubernetes apiserver. -# It prepares the log file, loads the docker image, calculates variables, sets them -# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests. -# -# Assumed vars (which are calculated in function compute-master-manifest-variables) -# CLOUD_CONFIG_OPT -# CLOUD_CONFIG_VOLUME -# CLOUD_CONFIG_MOUNT -# DOCKER_REGISTRY -function start-kube-apiserver { - echo "Start kubernetes api-server" - prepare-log-file /var/log/kube-apiserver.log - prepare-log-file /var/log/kube-apiserver-audit.log - - # Calculate variables and assemble the command line. - local params="${API_SERVER_TEST_LOG_LEVEL:-"--v=2"} ${APISERVER_TEST_ARGS:-} ${CLOUD_CONFIG_OPT}" - params+=" --address=127.0.0.1" - params+=" --allow-privileged=true" - params+=" --cloud-provider=gce" - params+=" --client-ca-file=/etc/srv/kubernetes/ca.crt" - params+=" --etcd-servers=http://127.0.0.1:2379" - params+=" --etcd-servers-overrides=/events#http://127.0.0.1:4002" - params+=" --secure-port=443" - params+=" --tls-cert-file=/etc/srv/kubernetes/server.cert" - params+=" --tls-private-key-file=/etc/srv/kubernetes/server.key" - params+=" --token-auth-file=/etc/srv/kubernetes/known_tokens.csv" - params+=" --enable-aggregator-routing=true" - if [[ -n "${KUBE_PASSWORD:-}" && -n "${KUBE_USER:-}" ]]; then - params+=" --basic-auth-file=/etc/srv/kubernetes/basic_auth.csv" - fi - if [[ -n "${STORAGE_BACKEND:-}" ]]; then - params+=" --storage-backend=${STORAGE_BACKEND}" - fi - if [[ -n "${STORAGE_MEDIA_TYPE:-}" ]]; then - params+=" --storage-media-type=${STORAGE_MEDIA_TYPE}" - fi - if [[ -n "${KUBE_APISERVER_REQUEST_TIMEOUT_SEC:-}" ]]; then - params+=" --request-timeout=${KUBE_APISERVER_REQUEST_TIMEOUT_SEC}s" - fi - if [[ -n "${ENABLE_GARBAGE_COLLECTOR:-}" ]]; then - params+=" --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}" - fi - if [[ -n "${NUM_NODES:-}" ]]; then - # If the cluster is large, increase max-requests-inflight limit in apiserver. - if [[ "${NUM_NODES}" -ge 1000 ]]; then - params+=" --max-requests-inflight=1500 --max-mutating-requests-inflight=500" - fi - # Set amount of memory available for apiserver based on number of nodes. - # TODO: Once we start setting proper requests and limits for apiserver - # we should reuse the same logic here instead of current heuristic. - params+=" --target-ram-mb=$((${NUM_NODES} * 60))" - fi - if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then - params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}" - fi - if [[ -n "${ETCD_QUORUM_READ:-}" ]]; then - params+=" --etcd-quorum-read=${ETCD_QUORUM_READ}" - fi - - if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" == "true" ]]; then - # We currently only support enabling with a fixed path and with built-in log - # rotation "disabled" (large value) so it behaves like kube-apiserver.log. - # External log rotation should be set up the same as for kube-apiserver.log. - params+=" --audit-log-path=/var/log/kube-apiserver-audit.log" - params+=" --audit-log-maxage=0" - params+=" --audit-log-maxbackup=0" - # Lumberjack doesn't offer any way to disable size-based rotation. It also - # has an in-memory counter that doesn't notice if you truncate the file. - # 2000000000 (in MiB) is a large number that fits in 31 bits. If the log - # grows at 10MiB/s (~30K QPS), it will rotate after ~6 years if apiserver - # never restarts. Please manually restart apiserver before this time. - params+=" --audit-log-maxsize=2000000000" - fi - - if [[ "${ENABLE_APISERVER_LOGS_HANDLER:-}" == "false" ]]; then - params+=" --enable-logs-handler=false" - fi - - local admission_controller_config_mount="" - local admission_controller_config_volume="" - local image_policy_webhook_config_mount="" - local image_policy_webhook_config_volume="" - if [[ -n "${ADMISSION_CONTROL:-}" ]]; then - params+=" --admission-control=${ADMISSION_CONTROL}" - if [[ ${ADMISSION_CONTROL} == *"ImagePolicyWebhook"* ]]; then - params+=" --admission-control-config-file=/etc/admission_controller.config" - # Mount the file to configure admission controllers if ImagePolicyWebhook is set. - admission_controller_config_mount="{\"name\": \"admissioncontrollerconfigmount\",\"mountPath\": \"/etc/admission_controller.config\", \"readOnly\": false}," - admission_controller_config_volume="{\"name\": \"admissioncontrollerconfigmount\",\"hostPath\": {\"path\": \"/etc/admission_controller.config\", \"type\": \"FileOrCreate\"}}," - # Mount the file to configure the ImagePolicyWebhook's webhook. - image_policy_webhook_config_mount="{\"name\": \"imagepolicywebhookconfigmount\",\"mountPath\": \"/etc/gcp_image_review.config\", \"readOnly\": false}," - image_policy_webhook_config_volume="{\"name\": \"imagepolicywebhookconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_image_review.config\", \"type\": \"FileOrCreate\"}}," - fi - fi - - if [[ -n "${KUBE_APISERVER_REQUEST_TIMEOUT:-}" ]]; then - params+=" --min-request-timeout=${KUBE_APISERVER_REQUEST_TIMEOUT}" - fi - if [[ -n "${RUNTIME_CONFIG:-}" ]]; then - params+=" --runtime-config=${RUNTIME_CONFIG}" - fi - if [[ -n "${FEATURE_GATES:-}" ]]; then - params+=" --feature-gates=${FEATURE_GATES}" - fi - if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then - local -r vm_external_ip=$(curl --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip") - if [[ -n "${PROXY_SSH_USER:-}" ]]; then - params+=" --advertise-address=${vm_external_ip}" - params+=" --ssh-user=${PROXY_SSH_USER}" - params+=" --ssh-keyfile=/etc/srv/sshproxy/.sshkeyfile" - else - params+=" --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname", - fi - elif [ -n "${MASTER_ADVERTISE_ADDRESS:-}" ]; then - params="${params} --advertise-address=${MASTER_ADVERTISE_ADDRESS}" - fi - - local webhook_authn_config_mount="" - local webhook_authn_config_volume="" - if [[ -n "${GCP_AUTHN_URL:-}" ]]; then - params+=" --authentication-token-webhook-config-file=/etc/gcp_authn.config" - webhook_authn_config_mount="{\"name\": \"webhookauthnconfigmount\",\"mountPath\": \"/etc/gcp_authn.config\", \"readOnly\": false}," - webhook_authn_config_volume="{\"name\": \"webhookauthnconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_authn.config\", \"type\": \"FileOrCreate\"}}," - fi - - local authorization_mode="RBAC" - local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty" - - # Enable ABAC mode unless the user explicitly opts out with ENABLE_LEGACY_ABAC=false - if [[ "${ENABLE_LEGACY_ABAC:-}" != "false" ]]; then - echo "Warning: Enabling legacy ABAC policy. All service accounts will have superuser API access. Set ENABLE_LEGACY_ABAC=false to disable this." - # Create the ABAC file if it doesn't exist yet, or if we have a KUBE_USER set (to ensure the right user is given permissions) - if [[ -n "${KUBE_USER:-}" || ! -e /etc/srv/kubernetes/abac-authz-policy.jsonl ]]; then - local -r abac_policy_json="${src_dir}/abac-authz-policy.jsonl" - remove-salt-config-comments "${abac_policy_json}" - if [[ -n "${KUBE_USER:-}" ]]; then - sed -i -e "s/{{kube_user}}/${KUBE_USER}/g" "${abac_policy_json}" - else - sed -i -e "/{{kube_user}}/d" "${abac_policy_json}" - fi - cp "${abac_policy_json}" /etc/srv/kubernetes/ - fi - - params+=" --authorization-policy-file=/etc/srv/kubernetes/abac-authz-policy.jsonl" - authorization_mode+=",ABAC" - fi - - local webhook_config_mount="" - local webhook_config_volume="" - if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then - authorization_mode+=",Webhook" - params+=" --authorization-webhook-config-file=/etc/gcp_authz.config" - webhook_config_mount="{\"name\": \"webhookconfigmount\",\"mountPath\": \"/etc/gcp_authz.config\", \"readOnly\": false}," - webhook_config_volume="{\"name\": \"webhookconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_authz.config\", \"type\": \"FileOrCreate\"}}," - fi - params+=" --authorization-mode=${authorization_mode}" - - local container_env="" - if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then - container_env="\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"" - fi - if [[ -n "${ENABLE_PATCH_CONVERSION_DETECTOR:-}" ]]; then - if [[ -n "${container_env}" ]]; then - container_env="${container_env}, " - fi - container_env="\"name\": \"KUBE_PATCH_CONVERSION_DETECTOR\", \"value\": \"${ENABLE_PATCH_CONVERSION_DETECTOR}\"" - fi - if [[ -n "${container_env}" ]]; then - container_env="\"env\":[{${container_env}}]," - fi - - src_file="${src_dir}/kube-apiserver.manifest" - remove-salt-config-comments "${src_file}" - # Evaluate variables. - local -r kube_apiserver_docker_tag=$(cat /opt/kubernetes/kube-docker-files/kube-apiserver.docker_tag) - sed -i -e "s@{{params}}@${params}@g" "${src_file}" - sed -i -e "s@{{container_env}}@${container_env}@g" "${src_file}" - sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}" - sed -i -e "s@{{srv_sshproxy_path}}@/etc/srv/sshproxy@g" "${src_file}" - sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}" - sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}" - sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}" - sed -i -e "s@{{pillar\['kube-apiserver_docker_tag'\]}}@${kube_apiserver_docker_tag}@g" "${src_file}" - sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}" - sed -i -e "s@{{liveness_probe_initial_delay}}@${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${src_file}" - sed -i -e "s@{{secure_port}}@443@g" "${src_file}" - sed -i -e "s@{{secure_port}}@8080@g" "${src_file}" - sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}" - sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}" - sed -i -e "s@{{webhook_authn_config_mount}}@${webhook_authn_config_mount}@g" "${src_file}" - sed -i -e "s@{{webhook_authn_config_volume}}@${webhook_authn_config_volume}@g" "${src_file}" - sed -i -e "s@{{webhook_config_mount}}@${webhook_config_mount}@g" "${src_file}" - sed -i -e "s@{{webhook_config_volume}}@${webhook_config_volume}@g" "${src_file}" - sed -i -e "s@{{admission_controller_config_mount}}@${admission_controller_config_mount}@g" "${src_file}" - sed -i -e "s@{{admission_controller_config_volume}}@${admission_controller_config_volume}@g" "${src_file}" - sed -i -e "s@{{image_policy_webhook_config_mount}}@${image_policy_webhook_config_mount}@g" "${src_file}" - sed -i -e "s@{{image_policy_webhook_config_volume}}@${image_policy_webhook_config_volume}@g" "${src_file}" - cp "${src_file}" /etc/kubernetes/manifests -} - -# Starts kubernetes controller manager. -# It prepares the log file, loads the docker image, calculates variables, sets them -# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests. -# -# Assumed vars (which are calculated in function compute-master-manifest-variables) -# CLOUD_CONFIG_OPT -# CLOUD_CONFIG_VOLUME -# CLOUD_CONFIG_MOUNT -# DOCKER_REGISTRY -function start-kube-controller-manager { - echo "Start kubernetes controller-manager" - create-kubecontrollermanager-kubeconfig - prepare-log-file /var/log/kube-controller-manager.log - # Calculate variables and assemble the command line. - local params="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-"--v=2"} ${CONTROLLER_MANAGER_TEST_ARGS:-} ${CLOUD_CONFIG_OPT}" - params+=" --use-service-account-credentials" - params+=" --cloud-provider=gce" - params+=" --kubeconfig=/etc/srv/kubernetes/kube-controller-manager/kubeconfig" - params+=" --root-ca-file=/etc/srv/kubernetes/ca.crt" - params+=" --service-account-private-key-file=/etc/srv/kubernetes/server.key" - if [[ -n "${ENABLE_GARBAGE_COLLECTOR:-}" ]]; then - params+=" --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}" - fi - if [[ -n "${INSTANCE_PREFIX:-}" ]]; then - params+=" --cluster-name=${INSTANCE_PREFIX}" - fi - if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then - params+=" --cluster-cidr=${CLUSTER_IP_RANGE}" - fi - if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then - params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}" - fi - if [[ -n "${CONCURRENT_SERVICE_SYNCS:-}" ]]; then - params+=" --concurrent-service-syncs=${CONCURRENT_SERVICE_SYNCS}" - fi - if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]]; then - params+=" --allocate-node-cidrs=true" - elif [[ -n "${ALLOCATE_NODE_CIDRS:-}" ]]; then - params+=" --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}" - fi - if [[ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]]; then - params+=" --terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}" - fi - if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then - params+=" --cidr-allocator-type=CloudAllocator" - params+=" --configure-cloud-routes=false" - fi - if [[ -n "${FEATURE_GATES:-}" ]]; then - params+=" --feature-gates=${FEATURE_GATES}" - fi - local -r kube_rc_docker_tag=$(cat /opt/kubernetes/kube-docker-files/kube-controller-manager.docker_tag) - local container_env="" - if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then - container_env="\"env\":[{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}]," - fi - - local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-controller-manager.manifest" - remove-salt-config-comments "${src_file}" - # Evaluate variables. - sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}" - sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}" - sed -i -e "s@{{pillar\['kube-controller-manager_docker_tag'\]}}@${kube_rc_docker_tag}@g" "${src_file}" - sed -i -e "s@{{params}}@${params}@g" "${src_file}" - sed -i -e "s@{{container_env}}@${container_env}@g" "${src_file}" - sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}" - sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}" - sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}" - sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}" - cp "${src_file}" /etc/kubernetes/manifests -} - -# Starts kubernetes scheduler. -# It prepares the log file, loads the docker image, calculates variables, sets them -# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests. -# -# Assumed vars (which are calculated in compute-master-manifest-variables) -# DOCKER_REGISTRY -function start-kube-scheduler { - echo "Start kubernetes scheduler" - create-kubescheduler-kubeconfig - prepare-log-file /var/log/kube-scheduler.log - - # Calculate variables and set them in the manifest. - params="${SCHEDULER_TEST_LOG_LEVEL:-"--v=2"} ${SCHEDULER_TEST_ARGS:-}" - params+=" --kubeconfig=/etc/srv/kubernetes/kube-scheduler/kubeconfig" - if [[ -n "${FEATURE_GATES:-}" ]]; then - params+=" --feature-gates=${FEATURE_GATES}" - fi - if [[ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]]; then - params+=" --algorithm-provider=${SCHEDULING_ALGORITHM_PROVIDER}" - fi - local -r kube_scheduler_docker_tag=$(cat "${KUBE_HOME}/kube-docker-files/kube-scheduler.docker_tag") - - # Remove salt comments and replace variables with values. - local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-scheduler.manifest" - remove-salt-config-comments "${src_file}" - - sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}" - sed -i -e "s@{{params}}@${params}@g" "${src_file}" - sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}" - sed -i -e "s@{{pillar\['kube-scheduler_docker_tag'\]}}@${kube_scheduler_docker_tag}@g" "${src_file}" - cp "${src_file}" /etc/kubernetes/manifests -} - -# Starts cluster autoscaler. -# Assumed vars (which are calculated in function compute-master-manifest-variables) -# CLOUD_CONFIG_OPT -# CLOUD_CONFIG_VOLUME -# CLOUD_CONFIG_MOUNT -function start-cluster-autoscaler { - if [[ "${ENABLE_CLUSTER_AUTOSCALER:-}" == "true" ]]; then - echo "Start kubernetes cluster autoscaler" - prepare-log-file /var/log/cluster-autoscaler.log - - # Remove salt comments and replace variables with values - local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/cluster-autoscaler.manifest" - remove-salt-config-comments "${src_file}" - - local params="${AUTOSCALER_MIG_CONFIG} ${CLOUD_CONFIG_OPT} ${AUTOSCALER_EXPANDER_CONFIG:---expander=price}" - sed -i -e "s@{{params}}@${params}@g" "${src_file}" - sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}" - sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}" - sed -i -e "s@{%.*%}@@g" "${src_file}" - - cp "${src_file}" /etc/kubernetes/manifests - fi -} - -# A helper function for copying addon manifests and set dir/files -# permissions. -# -# $1: addon category under /etc/kubernetes -# $2: manifest source dir -function setup-addon-manifests { - local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/$2" - local -r dst_dir="/etc/kubernetes/$1/$2" - if [[ ! -d "${dst_dir}" ]]; then - mkdir -p "${dst_dir}" - fi - local files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml") - if [[ -n "${files}" ]]; then - cp "${src_dir}/"*.yaml "${dst_dir}" - fi - files=$(find "${src_dir}" -maxdepth 1 -name "*.json") - if [[ -n "${files}" ]]; then - cp "${src_dir}/"*.json "${dst_dir}" - fi - files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml.in") - if [[ -n "${files}" ]]; then - cp "${src_dir}/"*.yaml.in "${dst_dir}" - fi - chown -R root:root "${dst_dir}" - chmod 755 "${dst_dir}" - chmod 644 "${dst_dir}"/* -} - -# Updates parameters in yaml file for prometheus-to-sd configuration, or -# removes component if it is disabled. -function update-prometheus-to-sd-parameters { - if [[ "${ENABLE_PROMETHEUS_TO_SD:-}" == "true" ]]; then - sed -i -e "s@{{ *prometheus_to_sd_prefix *}}@${PROMETHEUS_TO_SD_PREFIX}@g" "$1" - sed -i -e "s@{{ *prometheus_to_sd_endpoint *}}@${PROMETHEUS_TO_SD_ENDPOINT}@g" "$1" - else - # Removes all lines between two patterns (throws away prometheus-to-sd) - sed -i -e "/# BEGIN_PROMETHEUS_TO_SD/,/# END_PROMETHEUS_TO_SD/d" "$1" - fi -} - -# Sets up the manifests of coreDNS for k8s addons. -function setup-coredns-manifest { - local -r coredns_file="${dst_dir}/dns/coredns.yaml" - mv "${dst_dir}/dns/coredns.yaml.in" "${coredns_file}" - # Replace the salt configurations with variable values. - sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" "${coredns_file}" - sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${coredns_file}" - sed -i -e "s@{{ *pillar\['service_cluster_ip_range'\] *}}@${SERVICE_CLUSTER_IP_RANGE}@g" "${coredns_file}" -} - -# Sets up the manifests of kube-dns for k8s addons. -function setup-kube-dns-manifest { - local -r kubedns_file="${dst_dir}/dns/kube-dns.yaml" - mv "${dst_dir}/dns/kube-dns.yaml.in" "${kubedns_file}" - if [ -n "${CUSTOM_KUBE_DNS_YAML:-}" ]; then - # Replace with custom GKE kube-dns deployment. - cat > "${kubedns_file}" < "$src_dir/kube-proxy/kube-proxy-ds.yaml" < /etc/systemd/system/rkt-api.service </dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) -KUBE_SCHEDULER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) - -# KUBERNETES_CONTAINER_RUNTIME is set by the `kube-env` file, but it's a bit of a mouthful -if [[ "${CONTAINER_RUNTIME:-}" == "" ]]; then - CONTAINER_RUNTIME="${KUBERNETES_CONTAINER_RUNTIME:-docker}" -fi - -create-dirs -ensure-local-ssds -if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then - mount-master-pd - create-master-auth - create-master-kubelet-auth - create-master-etcd-auth -else - create-kubelet-kubeconfig "https://${KUBERNETES_MASTER_NAME}" - if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then - create-kubeproxy-user-kubeconfig - fi -fi - -if [[ "${KUBERNETES_CONTAINER_RUNTIME:-}" == "rkt" ]]; then - systemctl stop docker - systemctl disable docker - setup-rkt - install-docker2aci - create-kube-controller-manager-dirs -else - configure-docker-daemon -fi - -load-docker-images -start-kubelet - -if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then - compute-master-manifest-variables - start-etcd-servers - start-etcd-empty-dir-cleanup-pod - start-kube-apiserver - start-kube-controller-manager - start-kube-scheduler - start-kube-addons - start-cluster-autoscaler - start-lb-controller - start-rescheduler -else - if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then - start-kube-proxy - fi - # Kube-registry-proxy. - if [[ "${ENABLE_CLUSTER_REGISTRY:-}" == "true" ]]; then - start-kube-registry-proxy - fi - if [[ "${PREPULL_E2E_IMAGES:-}" == "true" ]]; then - start-image-puller - fi -fi -echo "Done for the configuration for kubernetes" diff --git a/cluster/gce/container-linux/configure.sh b/cluster/gce/container-linux/configure.sh deleted file mode 100755 index 16dcf27a044..00000000000 --- a/cluster/gce/container-linux/configure.sh +++ /dev/null @@ -1,182 +0,0 @@ -#!/bin/bash - -# Copyright 2016 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -# Use --retry-connrefused opt only if it's supported by curl. -CURL_RETRY_CONNREFUSED="" -if curl --help | grep -q -- '--retry-connrefused'; then - CURL_RETRY_CONNREFUSED='--retry-connrefused' -fi - -function download-kube-env { - # Fetch kube-env from GCE metadata server. - local -r tmp_kube_env="/tmp/kube-env.yaml" - curl --fail --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --silent --show-error \ - -H "X-Google-Metadata-Request: True" \ - -o "${tmp_kube_env}" \ - http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env - # Convert the yaml format file into a shell-style file. - sed 's/: /=/' < "${tmp_kube_env}" > "${KUBE_HOME}/kube-env" - rm -f "${tmp_kube_env}" -} - -function validate-hash { - local -r file="$1" - local -r expected="$2" - - actual=$(sha1sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} ==" - return 1 - fi -} - - -# Retry a download until we get it. Takes a hash and a set of URLs. -# -# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown. -# $2+ are the URLs to download. -function download-or-bust { - local -r hash="$1" - shift 1 - - local -r urls=( $* ) - while true; do - for url in "${urls[@]}"; do - local file="${url##*/}" - rm -f "${file}" - if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 ${CURL_RETRY_CONNREFUSED} "${url}"; then - echo "== Failed to download ${url}. Retrying. ==" - elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - else - if [[ -n "${hash}" ]]; then - echo "== Downloaded ${url} (SHA1 = ${hash}) ==" - else - echo "== Downloaded ${url} ==" - fi - return - fi - done - done -} - -function split-commas { - echo $1 | tr "," "\n" -} - -# Downloads kubernetes binaries and kube-system manifest tarball, unpacks them, -# and places them into suitable directories. Files are placed in /opt/kubernetes. -function install-kube-binary-config { - cd "${KUBE_HOME}" - local -r server_binary_tar_urls=( $(split-commas "${SERVER_BINARY_TAR_URL}") ) - local -r server_binary_tar="${server_binary_tar_urls[0]##*/}" - if [[ -n "${SERVER_BINARY_TAR_HASH:-}" ]]; then - local -r server_binary_tar_hash="${SERVER_BINARY_TAR_HASH}" - else - echo "Downloading binary release sha1 (not found in env)" - download-or-bust "" "${server_binary_tar_urls[@]/.tar.gz/.tar.gz.sha1}" - local -r server_binary_tar_hash=$(cat "${server_binary_tar}.sha1") - fi - echo "Downloading binary release tar" - download-or-bust "${server_binary_tar_hash}" "${server_binary_tar_urls[@]}" - tar xzf "${KUBE_HOME}/${server_binary_tar}" -C "${KUBE_HOME}" --overwrite - # Copy docker_tag and image files to ${KUBE_HOME}/kube-docker-files. - src_dir="${KUBE_HOME}/kubernetes/server/bin" - dst_dir="${KUBE_HOME}/kube-docker-files" - mkdir -p "${dst_dir}" - cp "${src_dir}/"*.docker_tag "${dst_dir}" - if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then - cp "${src_dir}/kube-proxy.tar" "${dst_dir}" - else - cp "${src_dir}/kube-apiserver.tar" "${dst_dir}" - cp "${src_dir}/kube-controller-manager.tar" "${dst_dir}" - cp "${src_dir}/kube-scheduler.tar" "${dst_dir}" - cp -r "${KUBE_HOME}/kubernetes/addons" "${dst_dir}" - fi - local -r kube_bin="${KUBE_HOME}/bin" - mv "${src_dir}/kubelet" "${kube_bin}" - mv "${src_dir}/kubectl" "${kube_bin}" - - if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]] || \ - [[ "${NETWORK_PROVIDER:-}" == "cni" ]]; then - local -r cni_version="v0.6.0" - local -r cni_tar="cni-plugins-amd64-${cni_version}.tgz" - local -r cni_sha1="d595d3ded6499a64e8dac02466e2f5f2ce257c9f" - download-or-bust "${cni_sha1}" "https://storage.googleapis.com/kubernetes-release/network-plugins/${cni_tar}" - local -r cni_dir="${KUBE_HOME}/cni" - mkdir -p "${cni_dir}/bin" - tar xzf "${KUBE_HOME}/${cni_tar}" -C "${cni_dir}/bin" --overwrite - mv "${cni_dir}/bin"/* "${kube_bin}" - rmdir "${cni_dir}/bin" - rm -f "${KUBE_HOME}/${cni_tar}" - fi - - mv "${KUBE_HOME}/kubernetes/LICENSES" "${KUBE_HOME}" - mv "${KUBE_HOME}/kubernetes/kubernetes-src.tar.gz" "${KUBE_HOME}" - - # Put kube-system pods manifests in ${KUBE_HOME}/kube-manifests/. - dst_dir="${KUBE_HOME}/kube-manifests" - mkdir -p "${dst_dir}" - local -r manifests_tar_urls=( $(split-commas "${KUBE_MANIFESTS_TAR_URL}") ) - local -r manifests_tar="${manifests_tar_urls[0]##*/}" - if [ -n "${KUBE_MANIFESTS_TAR_HASH:-}" ]; then - local -r manifests_tar_hash="${KUBE_MANIFESTS_TAR_HASH}" - else - echo "Downloading k8s manifests sha1 (not found in env)" - download-or-bust "" "${manifests_tar_urls[@]/.tar.gz/.tar.gz.sha1}" - local -r manifests_tar_hash=$(cat "${manifests_tar}.sha1") - fi - echo "Downloading k8s manifests tar" - download-or-bust "${manifests_tar_hash}" "${manifests_tar_urls[@]}" - tar xzf "${KUBE_HOME}/${manifests_tar}" -C "${dst_dir}" --overwrite - local -r kube_addon_registry="${KUBE_ADDON_REGISTRY:-gcr.io/google_containers}" - if [[ "${kube_addon_registry}" != "gcr.io/google_containers" ]]; then - find "${dst_dir}" -name \*.yaml -or -name \*.yaml.in | \ - xargs sed -ri "s@(image:\s.*)gcr.io/google_containers@\1${kube_addon_registry}@" - find "${dst_dir}" -name \*.manifest -or -name \*.json | \ - xargs sed -ri "s@(image\":\s+\")gcr.io/google_containers@\1${kube_addon_registry}@" - fi - cp "${dst_dir}/kubernetes/gci-trusty/container-linux-configure-helper.sh" "${KUBE_HOME}/bin/configure-helper.sh" - chmod -R 755 "${kube_bin}" - - # Clean up. - rm -rf "${KUBE_HOME}/kubernetes" - rm -f "${KUBE_HOME}/${server_binary_tar}" - rm -f "${KUBE_HOME}/${server_binary_tar}.sha1" - rm -f "${KUBE_HOME}/${manifests_tar}" - rm -f "${KUBE_HOME}/${manifests_tar}.sha1" -} - -######### Main Function ########## -echo "Start to install kubernetes files" -KUBE_HOME="/opt/kubernetes" -mkdir -p "${KUBE_HOME}" -download-kube-env -source "${KUBE_HOME}/kube-env" -install-kube-binary-config -echo "Done for installing kubernetes files" - -# On Container Linux, the hosts is in /usr/share/baselayout/hosts -# So we need to manually populdate the hosts file here on gce. -echo "127.0.0.1 localhost" >> /etc/hosts -echo "::1 localhost" >> /etc/hosts - -echo "Configuring hostname" -hostnamectl set-hostname $(hostname | cut -f1 -d.) diff --git a/cluster/gce/container-linux/health-monitor.sh b/cluster/gce/container-linux/health-monitor.sh deleted file mode 100644 index 6e8f1b03b12..00000000000 --- a/cluster/gce/container-linux/health-monitor.sh +++ /dev/null @@ -1,83 +0,0 @@ -#!/bin/bash - -# Copyright 2016 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This script is for master and node instance health monitoring, which is -# packed in kube-manifest tarball. It is executed through a systemd service -# in cluster/gce/gci/.yaml. The env variables come from an env -# file provided by the systemd service. - -set -o nounset -set -o pipefail - -# We simply kill the process when there is a failure. Another systemd service will -# automatically restart the process. -function docker_monitoring { - while [ 1 ]; do - if ! timeout 60 docker ps > /dev/null; then - echo "Docker daemon failed!" - pkill docker - # Wait for a while, as we don't want to kill it again before it is really up. - sleep 30 - else - sleep "${SLEEP_SECONDS}" - fi - done -} - -function kubelet_monitoring { - echo "Wait for 2 minutes for kubelet to be fuctional" - # TODO(andyzheng0831): replace it with a more reliable method if possible. - sleep 120 - local -r max_seconds=10 - local output="" - while [ 1 ]; do - if ! output=$(curl --insecure -m "${max_seconds}" -f -s -S https://127.0.0.1:${KUBELET_PORT:-10250}/healthz 2>&1); then - # Print the response and/or errors. - echo $output - echo "Kubelet is unhealthy!" - pkill kubelet - # Wait for a while, as we don't want to kill it again before it is really up. - sleep 60 - else - sleep "${SLEEP_SECONDS}" - fi - done -} - - -############## Main Function ################ -if [[ "$#" -ne 1 ]]; then - echo "Usage: health-monitor.sh " - exit 1 -fi - -KUBE_ENV="/home/kubernetes/kube-env" -if [[ ! -e "${KUBE_ENV}" ]]; then - echo "The ${KUBE_ENV} file does not exist!! Terminate health monitoring" - exit 1 -fi - -SLEEP_SECONDS=10 -component=$1 -echo "Start kubernetes health monitoring for ${component}" -source "${KUBE_ENV}" -if [[ "${component}" == "docker" ]]; then - docker_monitoring -elif [[ "${component}" == "kubelet" ]]; then - kubelet_monitoring -else - echo "Health monitoring for component "${component}" is not supported!" -fi diff --git a/cluster/gce/container-linux/helper.sh b/cluster/gce/container-linux/helper.sh deleted file mode 100755 index ddd45791266..00000000000 --- a/cluster/gce/container-linux/helper.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -# Copyright 2016 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A library of helper functions and constants for the Container Linux distro. - -# This file intentionally left blank diff --git a/cluster/gce/container-linux/master-helper.sh b/cluster/gce/container-linux/master-helper.sh deleted file mode 100755 index 3cd3ee3a3e5..00000000000 --- a/cluster/gce/container-linux/master-helper.sh +++ /dev/null @@ -1,139 +0,0 @@ -#!/bin/bash - -# Copyright 2016 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A library of helper functions and constant for the Container Linux distro. -source "${KUBE_ROOT}/cluster/gce/container-linux/helper.sh" - -# create-master-instance creates the master instance. If called with -# an argument, the argument is used as the name to a reserved IP -# address for the master. (In the case of upgrade/repair, we re-use -# the same IP.) -# -# It requires a whole slew of assumed variables, partially due to to -# the call to write-master-env. Listing them would be rather -# futile. Instead, we list the required calls to ensure any additional -# -# variables are set: -# ensure-temp-dir -# detect-project -# get-bearer-token -function create-master-instance { - local address="" - [[ -n ${1:-} ]] && address="${1}" - - write-master-env - create-master-instance-internal "${MASTER_NAME}" "${address}" -} - -function replicate-master-instance() { - local existing_master_zone="${1}" - local existing_master_name="${2}" - local existing_master_replicas="${3}" - - local kube_env="$(get-metadata "${existing_master_zone}" "${existing_master_name}" kube-env)" - # Substitute INITIAL_ETCD_CLUSTER to enable etcd clustering. - kube_env="$(echo "${kube_env}" | grep -v "INITIAL_ETCD_CLUSTER")" - kube_env="$(echo -e "${kube_env}\nINITIAL_ETCD_CLUSTER: '${existing_master_replicas},${REPLICA_NAME}'")" - ETCD_CA_KEY="$(echo "${kube_env}" | grep "ETCD_CA_KEY" | sed "s/^.*: '//" | sed "s/'$//")" - ETCD_CA_CERT="$(echo "${kube_env}" | grep "ETCD_CA_CERT" | sed "s/^.*: '//" | sed "s/'$//")" - - create-etcd-certs "${REPLICA_NAME}" "${ETCD_CA_CERT}" "${ETCD_CA_KEY}" - - kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_KEY")" - kube_env="$(echo -e "${kube_env}\nETCD_PEER_KEY: '${ETCD_PEER_KEY_BASE64}'")" - kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_CERT")" - kube_env="$(echo -e "${kube_env}\nETCD_PEER_CERT: '${ETCD_PEER_CERT_BASE64}'")" - - echo "${kube_env}" > ${KUBE_TEMP}/master-kube-env.yaml - get-metadata "${existing_master_zone}" "${existing_master_name}" cluster-name > "${KUBE_TEMP}/cluster-name.txt" - - create-master-instance-internal "${REPLICA_NAME}" -} - - -function create-master-instance-internal() { - local gcloud="gcloud" - local retries=5 - if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then - gcloud="gcloud beta" - fi - - local -r master_name="${1}" - local -r address="${2:-}" - - local preemptible_master="" - if [[ "${PREEMPTIBLE_MASTER:-}" == "true" ]]; then - preemptible_master="--preemptible --maintenance-policy TERMINATE" - fi - - local network=$(make-gcloud-network-argument \ - "${NETWORK_PROJECT}" "${REGION}" "${NETWORK}" "${SUBNETWORK:-}" \ - "${address:-}" "${ENABLE_IP_ALIASES:-}" "${IP_ALIAS_SIZE:-}") - - local metadata="kube-env=${KUBE_TEMP}/master-kube-env.yaml" - metadata="${metadata},user-data=${KUBE_ROOT}/cluster/gce/container-linux/master.yaml" - metadata="${metadata},configure-sh=${KUBE_ROOT}/cluster/gce/container-linux/configure.sh" - metadata="${metadata},cluster-name=${KUBE_TEMP}/cluster-name.txt" - - local disk="name=${master_name}-pd" - disk="${disk},device-name=master-pd" - disk="${disk},mode=rw" - disk="${disk},boot=no" - disk="${disk},auto-delete=no" - - for attempt in $(seq 1 ${retries}); do - if result=$(${gcloud} compute instances create "${master_name}" \ - --project "${PROJECT}" \ - --zone "${ZONE}" \ - --machine-type "${MASTER_SIZE}" \ - --image-project="${MASTER_IMAGE_PROJECT}" \ - --image "${MASTER_IMAGE}" \ - --tags "${MASTER_TAG}" \ - --scopes "storage-ro,compute-rw,monitoring,logging-write" \ - --metadata-from-file "${metadata}" \ - --disk "${disk}" \ - --boot-disk-size "${MASTER_ROOT_DISK_SIZE}" \ - ${MASTER_MIN_CPU_ARCHITECTURE:+"--min-cpu-platform=${MASTER_MIN_CPU_ARCHITECTURE}"} \ - ${preemptible_master} \ - ${network} 2>&1); then - echo "${result}" >&2 - return 0 - else - echo "${result}" >&2 - if [[ ! "${result}" =~ "try again later" ]]; then - echo "Failed to create master instance due to non-retryable error" >&2 - return 1 - fi - sleep 10 - fi - done - - echo "Failed to create master instance despite ${retries} attempts" >&2 - return 1 -} - -function get-metadata() { - local zone="${1}" - local name="${2}" - local key="${3}" - - local metadata_url="http://metadata.google.internal/computeMetadata/v1/instance/attributes/${key}" - - gcloud compute ssh "${name}" \ - --project "${PROJECT}" \ - --zone "${zone}" \ - --command "curl '${metadata_url}' -H 'Metadata-Flavor: Google'" 2>/dev/null -} diff --git a/cluster/gce/container-linux/master.yaml b/cluster/gce/container-linux/master.yaml deleted file mode 100644 index 444d3042739..00000000000 --- a/cluster/gce/container-linux/master.yaml +++ /dev/null @@ -1,57 +0,0 @@ -#cloud-config - -coreos: - update: - reboot-strategy: off - units: - - name: locksmithd.service - mask: true - - name: kube-master-installation.service - command: start - content: | - [Unit] - Description=Download and install k8s binaries and configurations - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=yes - ExecStartPre=/bin/mkdir -p /opt/kubernetes/bin - # Use --retry-connrefused opt only if it's supported by curl. - ExecStartPre=/bin/bash -c 'OPT=""; if curl --help | grep -q -- "--retry-connrefused"; then OPT="--retry-connrefused"; fi; /usr/bin/curl --fail --retry 5 --retry-delay 3 $OPT --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh' - ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure.sh - ExecStart=/opt/kubernetes/bin/configure.sh - - [Install] - WantedBy=kubernetes.target - - name: kube-master-configuration.service - command: start - content: | - [Unit] - Description=Configure kubernetes master - After=kube-master-installation.service - - [Service] - Type=oneshot - RemainAfterExit=yes - ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure-helper.sh - ExecStart=/opt/kubernetes/bin/configure-helper.sh - - [Install] - WantedBy=kubernetes.target - - name: kubernetes.target - enable: true - command: start - content: | - [Unit] - Description=Kubernetes - - [Install] - WantedBy=multi-user.target - - name: docker.service - drop-ins: - - name: "use-cgroupfs-driver.conf" - # This is required for setting cgroup parent in the current ~1.4 per-pod cgroup impl - content: | - [Service] - Environment="DOCKER_CGROUPS=--exec-opt native.cgroupdriver=" diff --git a/cluster/gce/container-linux/node-helper.sh b/cluster/gce/container-linux/node-helper.sh deleted file mode 100755 index c2432c5b0ea..00000000000 --- a/cluster/gce/container-linux/node-helper.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -# Copyright 2016 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A library of helper functions and constant for the Container Linux distro. -source "${KUBE_ROOT}/cluster/gce/container-linux/helper.sh" - -function get-node-instance-metadata { - local metadata="" - metadata+="kube-env=${KUBE_TEMP}/node-kube-env.yaml," - metadata+="user-data=${KUBE_ROOT}/cluster/gce/container-linux/node.yaml," - metadata+="configure-sh=${KUBE_ROOT}/cluster/gce/container-linux/configure.sh," - metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt" - echo "${metadata}" -} - -# $1: template name (required). -function create-node-instance-template { - local template_name="$1" - - create-node-template "$template_name" "${scope_flags[*]}" "$(get-node-instance-metadata)" - # TODO(euank): We should include update-strategy here. We should also switch to ignition -} diff --git a/cluster/gce/container-linux/node.yaml b/cluster/gce/container-linux/node.yaml deleted file mode 100644 index 9886679cd78..00000000000 --- a/cluster/gce/container-linux/node.yaml +++ /dev/null @@ -1,57 +0,0 @@ -#cloud-config - -coreos: - update: - reboot-strategy: off - units: - - name: locksmithd.service - mask: true - - name: kube-node-installation.service - command: start - content: | - [Unit] - Description=Download and install k8s binaries and configurations - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=yes - ExecStartPre=/bin/mkdir -p /opt/kubernetes/bin - # Use --retry-connrefused opt only if it's supported by curl. - ExecStartPre=/bin/bash -c 'OPT=""; if curl --help | grep -q -- "--retry-connrefused"; then OPT="--retry-connrefused"; fi; /usr/bin/curl --fail --retry 5 --retry-delay 3 $OPT --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh' - ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure.sh - ExecStart=/opt/kubernetes/bin/configure.sh - - [Install] - WantedBy=kubernetes.target - - name: kube-node-configuration.service - command: start - content: | - [Unit] - Description=Configure kubernetes master - After=kube-node-installation.service - - [Service] - Type=oneshot - RemainAfterExit=yes - ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure-helper.sh - ExecStart=/opt/kubernetes/bin/configure-helper.sh - - [Install] - WantedBy=kubernetes.target - - name: kubernetes.target - enable: true - command: start - content: | - [Unit] - Description=Kubernetes - - [Install] - WantedBy=multi-user.target - - name: docker.service - drop-ins: - - name: "use-cgroupfs-driver.conf" - # This is required for setting cgroup parent in the current ~1.4 per-pod cgroup impl - content: | - [Service] - Environment="DOCKER_CGROUPS=--exec-opt native.cgroupdriver=" diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 46178dba693..5c4f48d12cd 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -25,14 +25,14 @@ source "${KUBE_ROOT}/cluster/gce/${KUBE_CONFIG_FILE-"config-default.sh"}" source "${KUBE_ROOT}/cluster/common.sh" source "${KUBE_ROOT}/hack/lib/util.sh" -if [[ "${NODE_OS_DISTRIBUTION}" == "debian" || "${NODE_OS_DISTRIBUTION}" == "container-linux" || "${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]]; then +if [[ "${NODE_OS_DISTRIBUTION}" == "debian" || "${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]]; then source "${KUBE_ROOT}/cluster/gce/${NODE_OS_DISTRIBUTION}/node-helper.sh" else echo "Cannot operate on cluster using node os distro: ${NODE_OS_DISTRIBUTION}" >&2 exit 1 fi -if [[ "${MASTER_OS_DISTRIBUTION}" == "container-linux" || "${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]]; then +if [[ "${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]]; then source "${KUBE_ROOT}/cluster/gce/${MASTER_OS_DISTRIBUTION}/master-helper.sh" else echo "Cannot operate on cluster using master os distro: ${MASTER_OS_DISTRIBUTION}" >&2 From 1ddd5efaa0f50bf021958e62700966cdf2703729 Mon Sep 17 00:00:00 2001 From: Lee Verberne Date: Wed, 10 Jan 2018 18:55:50 +0100 Subject: [PATCH 107/264] Create a feature flag for sharing PID namespace This feature is described in https://features.k8s.io/495. --- pkg/features/kube_features.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index a80f86a756d..c6cbc6cd586 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -118,11 +118,17 @@ const ( ExpandPersistentVolumes utilfeature.Feature = "ExpandPersistentVolumes" // owner: @verb - // alpha: v1.8 + // alpha: v1.10 // // Allows running a "debug container" in a pod namespaces to troubleshoot a running pod. DebugContainers utilfeature.Feature = "DebugContainers" + // owner: @verb + // alpha: v1.10 + // + // Allows all containers in a pod to share a process namespace. + PodShareProcessNamespace utilfeature.Feature = "PodShareProcessNamespace" + // owner: @bsalamat // alpha: v1.8 // @@ -239,6 +245,7 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS LocalStorageCapacityIsolation: {Default: false, PreRelease: utilfeature.Alpha}, HugePages: {Default: true, PreRelease: utilfeature.Beta}, DebugContainers: {Default: false, PreRelease: utilfeature.Alpha}, + PodShareProcessNamespace: {Default: false, PreRelease: utilfeature.Alpha}, PodPriority: {Default: false, PreRelease: utilfeature.Alpha}, EnableEquivalenceClassCache: {Default: false, PreRelease: utilfeature.Alpha}, TaintNodesByCondition: {Default: false, PreRelease: utilfeature.Alpha}, From 010a127314a935d8d038f8dd4559fc5b249813e4 Mon Sep 17 00:00:00 2001 From: Dan Mace Date: Wed, 10 Jan 2018 16:36:01 -0500 Subject: [PATCH 108/264] Fix quota controller worker deadlock The resource quota controller worker pool can deadlock when: * Worker goroutines are idle waiting for work from queues * The Sync() method detects discovery updates to apply The problem is workers acquire a read lock while idle, making write lock acquisition dependent upon the presence of work in the queues. The Sync() method blocks on a pending write lock acquisition and won't unblock until every existing worker processes one item from their queue and releases their read lock. While the Sync() method's lock is pending, all new read lock acquisitions will block; if a worker does process work and release its lock, it will then become blocked on a read lock acquisition; they become blocked on Sync(). This can easily deadlock all the workers processing from one queue while any workers on the other queue remain blocked waiting for work. Fix the deadlock by refactoring workers to acquire a read lock *after* work is popped from the queue. This allows writers to get locks while workers are idle, while preserving the worker pause semantics necessary to allow safe sync. --- pkg/controller/resourcequota/resource_quota_controller.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pkg/controller/resourcequota/resource_quota_controller.go b/pkg/controller/resourcequota/resource_quota_controller.go index b2ae6d1f6e2..e341e1cde55 100644 --- a/pkg/controller/resourcequota/resource_quota_controller.go +++ b/pkg/controller/resourcequota/resource_quota_controller.go @@ -237,15 +237,13 @@ func (rq *ResourceQuotaController) addQuota(obj interface{}) { // worker runs a worker thread that just dequeues items, processes them, and marks them done. func (rq *ResourceQuotaController) worker(queue workqueue.RateLimitingInterface) func() { workFunc := func() bool { - - rq.workerLock.RLock() - defer rq.workerLock.RUnlock() - key, quit := queue.Get() if quit { return true } defer queue.Done(key) + rq.workerLock.RLock() + defer rq.workerLock.RUnlock() err := rq.syncHandler(key.(string)) if err == nil { queue.Forget(key) From ac48b1b075efa31f2109f818159bcaff95947609 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Wed, 10 Jan 2018 12:15:44 -0800 Subject: [PATCH 109/264] Add `cloud` for the generated GCE interfaces, support structs Note: this does not wire the generated code. --- pkg/cloudprovider/providers/gce/BUILD | 3 + pkg/cloudprovider/providers/gce/gce.go | 35 ++++++++---- pkg/cloudprovider/providers/gce/support.go | 66 ++++++++++++++++++++++ 3 files changed, 92 insertions(+), 12 deletions(-) create mode 100644 pkg/cloudprovider/providers/gce/support.go diff --git a/pkg/cloudprovider/providers/gce/BUILD b/pkg/cloudprovider/providers/gce/BUILD index b112c95912c..c6583798dc8 100644 --- a/pkg/cloudprovider/providers/gce/BUILD +++ b/pkg/cloudprovider/providers/gce/BUILD @@ -41,12 +41,15 @@ go_library( "gce_util.go", "gce_zones.go", "metrics.go", + "support.go", "token_source.go", ], importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce", deps = [ "//pkg/api/v1/service:go_default_library", "//pkg/cloudprovider:go_default_library", + "//pkg/cloudprovider/providers/gce/cloud:go_default_library", + "//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library", "//pkg/controller:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/master/ports:go_default_library", diff --git a/pkg/cloudprovider/providers/gce/gce.go b/pkg/cloudprovider/providers/gce/gce.go index b515c8ff67a..b734a9b8d8e 100644 --- a/pkg/cloudprovider/providers/gce/gce.go +++ b/pkg/cloudprovider/providers/gce/gce.go @@ -30,6 +30,13 @@ import ( gcfg "gopkg.in/gcfg.v1" "cloud.google.com/go/compute/metadata" + "github.com/golang/glog" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + computealpha "google.golang.org/api/compute/v0.alpha" + computebeta "google.golang.org/api/compute/v0.beta" + compute "google.golang.org/api/compute/v1" + container "google.golang.org/api/container/v1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -41,18 +48,12 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/flowcontrol" + "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" "k8s.io/kubernetes/pkg/controller" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/version" - - "github.com/golang/glog" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - computealpha "google.golang.org/api/compute/v0.alpha" - computebeta "google.golang.org/api/compute/v0.beta" - compute "google.golang.org/api/compute/v1" - container "google.golang.org/api/container/v1" ) const ( @@ -147,6 +148,9 @@ type GCECloud struct { // the corresponding api is enabled. // If not enabled, it should return error. AlphaFeatureGate *AlphaFeatureGate + + // New code generated interface to the GCE compute library. + c cloud.Cloud } // TODO: replace gcfg with json @@ -243,7 +247,6 @@ func newGCECloud(config io.Reader) (gceCloud *GCECloud, err error) { return nil, err } return CreateGCECloud(cloudConfig) - } func readConfig(reader io.Reader) (*ConfigFile, error) { @@ -363,11 +366,12 @@ func generateCloudConfig(configFile *ConfigFile) (cloudConfig *CloudConfig, err // If no tokenSource is specified, uses oauth2.DefaultTokenSource. // If managedZones is nil / empty all zones in the region will be managed. func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { - // Remove any pre-release version and build metadata from the semver, leaving only the MAJOR.MINOR.PATCH portion. - // See http://semver.org/. + // Remove any pre-release version and build metadata from the semver, + // leaving only the MAJOR.MINOR.PATCH portion. See http://semver.org/. version := strings.TrimLeft(strings.Split(strings.Split(version.Get().GitVersion, "-")[0], "+")[0], "v") - // Create a user-agent header append string to supply to the Google API clients, to identify Kubernetes as the origin of the GCP API calls. + // Create a user-agent header append string to supply to the Google API + // clients, to identify Kubernetes as the origin of the GCP API calls. userAgent := fmt.Sprintf("Kubernetes/%s (%s %s)", version, runtime.GOOS, runtime.GOARCH) // Use ProjectID for NetworkProjectID, if it wasn't explicitly set. @@ -506,6 +510,13 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { } gce.manager = &gceServiceManager{gce} + gce.c = cloud.NewGCE(&cloud.Service{ + GA: service, + Alpha: serviceAlpha, + Beta: serviceBeta, + ProjectRouter: &gceProjectRouter{gce}, + RateLimiter: &gceRateLimiter{gce}, + }) return gce, nil } diff --git a/pkg/cloudprovider/providers/gce/support.go b/pkg/cloudprovider/providers/gce/support.go new file mode 100644 index 00000000000..42903af4579 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/support.go @@ -0,0 +1,66 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gce + +import ( + "context" + + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" +) + +// gceProjectRouter sends requests to the appropriate project ID. +type gceProjectRouter struct { + gce *GCECloud +} + +// ProjectID returns the project ID to be used for the given operation. +func (r *gceProjectRouter) ProjectID(ctx context.Context, version meta.Version, service string) string { + switch service { + case "Firewalls", "Routes": + return r.gce.NetworkProjectID() + default: + return r.gce.projectID + } +} + +// gceRateLimiter implements cloud.RateLimiter. +type gceRateLimiter struct { + gce *GCECloud +} + +// Accept blocks until the operation can be performed. +// +// TODO: the current cloud provider policy doesn't seem to be correct as it +// only rate limits the polling operations, but not the /submission/ of +// operations. +func (l *gceRateLimiter) Accept(ctx context.Context, key *cloud.RateLimitKey) error { + if key.Operation == "Get" && key.Service == "Operations" { + ch := make(chan struct{}) + go func() { + l.gce.operationPollRateLimiter.Accept() + close(ch) + }() + select { + case <-ch: + break + case <-ctx.Done(): + return ctx.Err() + } + } + return nil +} From ce0a8303d6f20f20de774bb5e32dcb6f972ffce0 Mon Sep 17 00:00:00 2001 From: Eric Chiang Date: Tue, 9 Jan 2018 13:44:55 -0800 Subject: [PATCH 110/264] integration: add retries to node authorizer tests --- test/integration/auth/BUILD | 1 + test/integration/auth/node_test.go | 267 +++++++++++++++++------------ 2 files changed, 158 insertions(+), 110 deletions(-) diff --git a/test/integration/auth/BUILD b/test/integration/auth/BUILD index ccd4ec402c4..897557a21a9 100644 --- a/test/integration/auth/BUILD +++ b/test/integration/auth/BUILD @@ -58,6 +58,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/group:go_default_library", diff --git a/test/integration/auth/node_test.go b/test/integration/auth/node_test.go index 32a699b12c4..1199ad9cbb1 100644 --- a/test/integration/auth/node_test.go +++ b/test/integration/auth/node_test.go @@ -20,8 +20,6 @@ import ( "fmt" "net/http" "net/http/httptest" - "path/filepath" - "runtime" "testing" "time" @@ -29,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/authentication/request/bearertoken" "k8s.io/apiserver/pkg/authentication/token/tokenfile" "k8s.io/apiserver/pkg/authentication/user" @@ -149,125 +148,159 @@ func TestNodeAuthorizer(t *testing.T) { t.Fatal(err) } - getSecret := func(client clientset.Interface) error { - _, err := client.Core().Secrets("ns").Get("mysecret", metav1.GetOptions{}) - return err + getSecret := func(client clientset.Interface) func() error { + return func() error { + _, err := client.Core().Secrets("ns").Get("mysecret", metav1.GetOptions{}) + return err + } } - getPVSecret := func(client clientset.Interface) error { - _, err := client.Core().Secrets("ns").Get("mypvsecret", metav1.GetOptions{}) - return err + getPVSecret := func(client clientset.Interface) func() error { + return func() error { + _, err := client.Core().Secrets("ns").Get("mypvsecret", metav1.GetOptions{}) + return err + } } - getConfigMap := func(client clientset.Interface) error { - _, err := client.Core().ConfigMaps("ns").Get("myconfigmap", metav1.GetOptions{}) - return err + getConfigMap := func(client clientset.Interface) func() error { + return func() error { + _, err := client.Core().ConfigMaps("ns").Get("myconfigmap", metav1.GetOptions{}) + return err + } } - getPVC := func(client clientset.Interface) error { - _, err := client.Core().PersistentVolumeClaims("ns").Get("mypvc", metav1.GetOptions{}) - return err + getPVC := func(client clientset.Interface) func() error { + return func() error { + _, err := client.Core().PersistentVolumeClaims("ns").Get("mypvc", metav1.GetOptions{}) + return err + } } - getPV := func(client clientset.Interface) error { - _, err := client.Core().PersistentVolumes().Get("mypv", metav1.GetOptions{}) - return err + getPV := func(client clientset.Interface) func() error { + return func() error { + _, err := client.Core().PersistentVolumes().Get("mypv", metav1.GetOptions{}) + return err + } } - createNode2NormalPod := func(client clientset.Interface) error { - _, err := client.Core().Pods("ns").Create(&api.Pod{ - ObjectMeta: metav1.ObjectMeta{Name: "node2normalpod"}, - Spec: api.PodSpec{ - NodeName: "node2", - Containers: []api.Container{{Name: "image", Image: "busybox"}}, - Volumes: []api.Volume{ - {Name: "secret", VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "mysecret"}}}, - {Name: "cm", VolumeSource: api.VolumeSource{ConfigMap: &api.ConfigMapVolumeSource{LocalObjectReference: api.LocalObjectReference{Name: "myconfigmap"}}}}, - {Name: "pvc", VolumeSource: api.VolumeSource{PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ClaimName: "mypvc"}}}, + createNode2NormalPod := func(client clientset.Interface) func() error { + return func() error { + _, err := client.Core().Pods("ns").Create(&api.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "node2normalpod"}, + Spec: api.PodSpec{ + NodeName: "node2", + Containers: []api.Container{{Name: "image", Image: "busybox"}}, + Volumes: []api.Volume{ + {Name: "secret", VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "mysecret"}}}, + {Name: "cm", VolumeSource: api.VolumeSource{ConfigMap: &api.ConfigMapVolumeSource{LocalObjectReference: api.LocalObjectReference{Name: "myconfigmap"}}}}, + {Name: "pvc", VolumeSource: api.VolumeSource{PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ClaimName: "mypvc"}}}, + }, }, - }, - }) - return err + }) + return err + } } - updateNode2NormalPodStatus := func(client clientset.Interface) error { - startTime := metav1.NewTime(time.Now()) - _, err := client.Core().Pods("ns").UpdateStatus(&api.Pod{ - ObjectMeta: metav1.ObjectMeta{Name: "node2normalpod"}, - Status: api.PodStatus{StartTime: &startTime}, - }) - return err + updateNode2NormalPodStatus := func(client clientset.Interface) func() error { + return func() error { + startTime := metav1.NewTime(time.Now()) + _, err := client.Core().Pods("ns").UpdateStatus(&api.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "node2normalpod"}, + Status: api.PodStatus{StartTime: &startTime}, + }) + return err + } } - deleteNode2NormalPod := func(client clientset.Interface) error { - zero := int64(0) - return client.Core().Pods("ns").Delete("node2normalpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero}) + deleteNode2NormalPod := func(client clientset.Interface) func() error { + return func() error { + zero := int64(0) + return client.Core().Pods("ns").Delete("node2normalpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero}) + } } - createNode2MirrorPod := func(client clientset.Interface) error { - _, err := client.Core().Pods("ns").Create(&api.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node2mirrorpod", - Annotations: map[string]string{api.MirrorPodAnnotationKey: "true"}, - }, - Spec: api.PodSpec{ - NodeName: "node2", - Containers: []api.Container{{Name: "image", Image: "busybox"}}, - }, - }) - return err + createNode2MirrorPod := func(client clientset.Interface) func() error { + return func() error { + _, err := client.Core().Pods("ns").Create(&api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node2mirrorpod", + Annotations: map[string]string{api.MirrorPodAnnotationKey: "true"}, + }, + Spec: api.PodSpec{ + NodeName: "node2", + Containers: []api.Container{{Name: "image", Image: "busybox"}}, + }, + }) + return err + } } - deleteNode2MirrorPod := func(client clientset.Interface) error { - zero := int64(0) - return client.Core().Pods("ns").Delete("node2mirrorpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero}) + deleteNode2MirrorPod := func(client clientset.Interface) func() error { + return func() error { + zero := int64(0) + return client.Core().Pods("ns").Delete("node2mirrorpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero}) + } } - createNode2 := func(client clientset.Interface) error { - _, err := client.Core().Nodes().Create(&api.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2"}}) - return err + createNode2 := func(client clientset.Interface) func() error { + return func() error { + _, err := client.Core().Nodes().Create(&api.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2"}}) + return err + } } - updateNode2Status := func(client clientset.Interface) error { - _, err := client.Core().Nodes().UpdateStatus(&api.Node{ - ObjectMeta: metav1.ObjectMeta{Name: "node2"}, - Status: api.NodeStatus{}, - }) - return err + updateNode2Status := func(client clientset.Interface) func() error { + return func() error { + _, err := client.Core().Nodes().UpdateStatus(&api.Node{ + ObjectMeta: metav1.ObjectMeta{Name: "node2"}, + Status: api.NodeStatus{}, + }) + return err + } } - deleteNode2 := func(client clientset.Interface) error { - return client.Core().Nodes().Delete("node2", nil) + deleteNode2 := func(client clientset.Interface) func() error { + return func() error { + return client.Core().Nodes().Delete("node2", nil) + } } - createNode2NormalPodEviction := func(client clientset.Interface) error { - return client.Policy().Evictions("ns").Evict(&policy.Eviction{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "policy/v1beta1", - Kind: "Eviction", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "node2normalpod", - Namespace: "ns", - }, - }) + createNode2NormalPodEviction := func(client clientset.Interface) func() error { + return func() error { + return client.Policy().Evictions("ns").Evict(&policy.Eviction{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "policy/v1beta1", + Kind: "Eviction", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "node2normalpod", + Namespace: "ns", + }, + }) + } } - createNode2MirrorPodEviction := func(client clientset.Interface) error { - return client.Policy().Evictions("ns").Evict(&policy.Eviction{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "policy/v1beta1", - Kind: "Eviction", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "node2mirrorpod", - Namespace: "ns", - }, - }) + createNode2MirrorPodEviction := func(client clientset.Interface) func() error { + return func() error { + return client.Policy().Evictions("ns").Evict(&policy.Eviction{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "policy/v1beta1", + Kind: "Eviction", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "node2mirrorpod", + Namespace: "ns", + }, + }) + } } capacity := 50 - updatePVCCapacity := func(client clientset.Interface) error { - capacity++ - statusString := fmt.Sprintf("{\"status\": {\"capacity\": {\"storage\": \"%dG\"}}}", capacity) - patchBytes := []byte(statusString) - _, err := client.Core().PersistentVolumeClaims("ns").Patch("mypvc", types.StrategicMergePatchType, patchBytes, "status") - return err + updatePVCCapacity := func(client clientset.Interface) func() error { + return func() error { + capacity++ + statusString := fmt.Sprintf("{\"status\": {\"capacity\": {\"storage\": \"%dG\"}}}", capacity) + patchBytes := []byte(statusString) + _, err := client.Core().PersistentVolumeClaims("ns").Patch("mypvc", types.StrategicMergePatchType, patchBytes, "status") + return err + } } - updatePVCPhase := func(client clientset.Interface) error { - patchBytes := []byte(`{"status":{"phase": "Bound"}}`) - _, err := client.Core().PersistentVolumeClaims("ns").Patch("mypvc", types.StrategicMergePatchType, patchBytes, "status") - return err + updatePVCPhase := func(client clientset.Interface) func() error { + return func() error { + patchBytes := []byte(`{"status":{"phase": "Bound"}}`) + _, err := client.Core().PersistentVolumeClaims("ns").Patch("mypvc", types.StrategicMergePatchType, patchBytes, "status") + return err + } } nodeanonClient := clientsetForToken(tokenNodeUnknown, clientConfig) @@ -386,23 +419,37 @@ func TestNodeAuthorizer(t *testing.T) { expectForbidden(t, updatePVCPhase(node2Client)) } -func expectForbidden(t *testing.T, err error) { - if !errors.IsForbidden(err) { - _, file, line, _ := runtime.Caller(1) - t.Errorf("%s:%d: Expected forbidden error, got %v", filepath.Base(file), line, err) +// expect executes a function a set number of times until it either returns the +// expected error or executes too many times. It returns if the retries timed +// out and the last error returned by the method. +func expect(f func() error, wantErr func(error) bool) (timeout bool, lastErr error) { + err := wait.PollImmediate(time.Second, 30*time.Second, func() (bool, error) { + lastErr = f() + if wantErr(lastErr) { + return true, nil + } + return false, nil + }) + return err == nil, lastErr +} + +func expectForbidden(t *testing.T, f func() error) { + t.Helper() + if ok, err := expect(f, errors.IsForbidden); !ok { + t.Errorf("Expected forbidden error, got %v", err) } } -func expectNotFound(t *testing.T, err error) { - if !errors.IsNotFound(err) { - _, file, line, _ := runtime.Caller(1) - t.Errorf("%s:%d: Expected notfound error, got %v", filepath.Base(file), line, err) +func expectNotFound(t *testing.T, f func() error) { + t.Helper() + if ok, err := expect(f, errors.IsNotFound); !ok { + t.Errorf("Expected notfound error, got %v", err) } } -func expectAllowed(t *testing.T, err error) { - if err != nil { - _, file, line, _ := runtime.Caller(1) - t.Errorf("%s:%d: Expected no error, got %v", filepath.Base(file), line, err) +func expectAllowed(t *testing.T, f func() error) { + t.Helper() + if ok, err := expect(f, func(e error) bool { return e == nil }); !ok { + t.Errorf("Expected no error, got %v", err) } } From 9abb3160061c95a516ecc855518f1102225fdd87 Mon Sep 17 00:00:00 2001 From: Lion-Wei Date: Fri, 5 Jan 2018 11:42:39 +0800 Subject: [PATCH 111/264] fix ipvs proxy mode kubeadm usage --- pkg/proxy/ipvs/README.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pkg/proxy/ipvs/README.md b/pkg/proxy/ipvs/README.md index 52297b7c90b..6a89697772f 100644 --- a/pkg/proxy/ipvs/README.md +++ b/pkg/proxy/ipvs/README.md @@ -39,9 +39,11 @@ Then the configuration file is similar to: kind: MasterConfiguration apiVersion: kubeadm.k8s.io/v1alpha1 ... -featureGates: - SupportIPVSProxyMode: true -mode: ipvs +kubeProxy: + config: + featureGates: SupportIPVSProxyMode=true + mode: ipvs +... ``` ## Debug From dc5384a139cc05ac31f76758c2c0923ce5cb88ff Mon Sep 17 00:00:00 2001 From: Penghao Cen Date: Fri, 17 Nov 2017 13:10:25 +0800 Subject: [PATCH 112/264] Don't rewrite device health --- pkg/kubelet/cm/deviceplugin/device_plugin_stub.go | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go b/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go index 01f08c15987..9969e99989b 100644 --- a/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go +++ b/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go @@ -115,16 +115,8 @@ func (m *Stub) Register(kubeletEndpoint, resourceName string) error { // ListAndWatch lists devices and update that list according to the Update call func (m *Stub) ListAndWatch(e *pluginapi.Empty, s pluginapi.DevicePlugin_ListAndWatchServer) error { log.Println("ListAndWatch") - var devs []*pluginapi.Device - for _, d := range m.devs { - devs = append(devs, &pluginapi.Device{ - ID: d.ID, - Health: pluginapi.Healthy, - }) - } - - s.Send(&pluginapi.ListAndWatchResponse{Devices: devs}) + s.Send(&pluginapi.ListAndWatchResponse{Devices: m.devs}) for { select { From 1c73497c7e35f6f7596b127b75688879dd245f23 Mon Sep 17 00:00:00 2001 From: Robert Bailey Date: Tue, 9 Jan 2018 23:17:12 -0800 Subject: [PATCH 113/264] Add zouyee as a reviewer for the cluster/centos directory. --- cluster/centos/OWNERS | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 cluster/centos/OWNERS diff --git a/cluster/centos/OWNERS b/cluster/centos/OWNERS new file mode 100644 index 00000000000..0edb92d5f9c --- /dev/null +++ b/cluster/centos/OWNERS @@ -0,0 +1,2 @@ +reviewers: + - zouyee From 671c4eb2b79941983d89ef5b07b25b0d546504ad Mon Sep 17 00:00:00 2001 From: Penghao Cen Date: Thu, 11 Jan 2018 14:41:45 +0800 Subject: [PATCH 114/264] Add e2e test logic for device plugin --- .../cm/deviceplugin/device_plugin_stub.go | 28 ++- test/e2e_node/device_plugin.go | 162 +++++++++++++++++- test/e2e_node/util.go | 1 + 3 files changed, 188 insertions(+), 3 deletions(-) diff --git a/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go b/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go index 01f08c15987..a04389cc192 100644 --- a/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go +++ b/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go @@ -38,6 +38,18 @@ type Stub struct { update chan []*pluginapi.Device server *grpc.Server + + // allocFunc is used for handling allocation request + allocFunc stubAllocFunc +} + +// stubAllocFunc is the function called when receive an allocation request from Kubelet +type stubAllocFunc func(r *pluginapi.AllocateRequest, devs map[string]pluginapi.Device) (*pluginapi.AllocateResponse, error) + +func defaultAllocFunc(r *pluginapi.AllocateRequest, devs map[string]pluginapi.Device) (*pluginapi.AllocateResponse, error) { + var response pluginapi.AllocateResponse + + return &response, nil } // NewDevicePluginStub returns an initialized DevicePlugin Stub. @@ -48,9 +60,16 @@ func NewDevicePluginStub(devs []*pluginapi.Device, socket string) *Stub { stop: make(chan interface{}), update: make(chan []*pluginapi.Device), + + allocFunc: defaultAllocFunc, } } +// SetAllocFunc sets allocFunc of the device plugin +func (m *Stub) SetAllocFunc(f stubAllocFunc) { + m.allocFunc = f +} + // Start starts the gRPC server of the device plugin func (m *Stub) Start() error { err := m.cleanup() @@ -145,8 +164,13 @@ func (m *Stub) Update(devs []*pluginapi.Device) { func (m *Stub) Allocate(ctx context.Context, r *pluginapi.AllocateRequest) (*pluginapi.AllocateResponse, error) { log.Printf("Allocate, %+v", r) - var response pluginapi.AllocateResponse - return &response, nil + devs := make(map[string]pluginapi.Device) + + for _, dev := range m.devs { + devs[dev.ID] = *dev + } + + return m.allocFunc(r, devs) } func (m *Stub) cleanup() error { diff --git a/test/e2e_node/device_plugin.go b/test/e2e_node/device_plugin.go index 9748d31d68a..826d3b66989 100644 --- a/test/e2e_node/device_plugin.go +++ b/test/e2e_node/device_plugin.go @@ -40,7 +40,130 @@ import ( . "github.com/onsi/gomega" ) -// makeBusyboxPod returns a simple Pod spec with a pause container +const ( + // fake resource name + resourceName = "fake.com/resource" +) + +// Serial because the test restarts Kubelet +var _ = framework.KubeDescribe("Device Plugin [Feature:DevicePlugin] [Serial] [Disruptive]", func() { + f := framework.NewDefaultFramework("device-plugin-errors") + + Context("DevicePlugin", func() { + By("Enabling support for Device Plugin") + tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + initialConfig.FeatureGates[string(features.DevicePlugins)] = true + }) + + It("Verifies the Kubelet device plugin functionality.", func() { + + By("Wait for node is ready") + framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout) + + By("Start stub device plugin") + // fake devices for e2e test + devs := []*pluginapi.Device{ + {ID: "Dev-1", Health: pluginapi.Healthy}, + {ID: "Dev-2", Health: pluginapi.Healthy}, + } + + socketPath := pluginapi.DevicePluginPath + "dp." + fmt.Sprintf("%d", time.Now().Unix()) + + dp1 := dp.NewDevicePluginStub(devs, socketPath) + dp1.SetAllocFunc(stubAllocFunc) + err := dp1.Start() + framework.ExpectNoError(err) + + By("Register resources") + err = dp1.Register(pluginapi.KubeletSocket, resourceName) + framework.ExpectNoError(err) + + By("Waiting for the resource exported by the stub device plugin to become available on the local node") + devsLen := int64(len(devs)) + Eventually(func() int64 { + node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + return numberOfDevices(node, resourceName) + }, 30*time.Second, framework.Poll).Should(Equal(devsLen)) + + By("Creating one pod on node with at least one fake-device") + podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs" + pod1 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD)) + deviceIDRE := "stub devices: (Dev-[0-9]+)" + count1, devId1 := parseLogFromNRuns(f, pod1.Name, pod1.Name, 0, deviceIDRE) + Expect(devId1).To(Not(Equal(""))) + + pod1, err = f.PodClient().Get(pod1.Name, metav1.GetOptions{}) + framework.ExpectNoError(err) + + By("Restarting Kubelet and waiting for the current running pod to restart") + restartKubelet() + + By("Confirming that after a kubelet and pod restart, fake-device assignement is kept") + count1, devIdRestart1 := parseLogFromNRuns(f, pod1.Name, pod1.Name, count1+1, deviceIDRE) + Expect(devIdRestart1).To(Equal(devId1)) + + By("Wait for node is ready") + framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout) + + By("Re-Register resources") + dp1 = dp.NewDevicePluginStub(devs, socketPath) + dp1.SetAllocFunc(stubAllocFunc) + err = dp1.Start() + framework.ExpectNoError(err) + + err = dp1.Register(pluginapi.KubeletSocket, resourceName) + framework.ExpectNoError(err) + + By("Waiting for resource to become available on the local node after re-registration") + Eventually(func() int64 { + node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + return numberOfDevices(node, resourceName) + }, 30*time.Second, framework.Poll).Should(Equal(devsLen)) + + By("Creating another pod") + pod2 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD)) + + By("Checking that pods got a different GPU") + count2, devId2 := parseLogFromNRuns(f, pod2.Name, pod2.Name, 1, deviceIDRE) + + Expect(devId1).To(Not(Equal(devId2))) + + By("Deleting device plugin.") + err = dp1.Stop() + framework.ExpectNoError(err) + + By("Waiting for stub device plugin to become unavailable on the local node") + Eventually(func() bool { + node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + return numberOfDevices(node, resourceName) <= 0 + }, 10*time.Minute, framework.Poll).Should(BeTrue()) + + By("Checking that scheduled pods can continue to run even after we delete device plugin.") + count1, devIdRestart1 = parseLogFromNRuns(f, pod1.Name, pod1.Name, count1+1, deviceIDRE) + Expect(devIdRestart1).To(Equal(devId1)) + count2, devIdRestart2 := parseLogFromNRuns(f, pod2.Name, pod2.Name, count2+1, deviceIDRE) + Expect(devIdRestart2).To(Equal(devId2)) + + By("Restarting Kubelet.") + restartKubelet() + + By("Checking that scheduled pods can continue to run even after we delete device plugin and restart Kubelet.") + count1, devIdRestart1 = parseLogFromNRuns(f, pod1.Name, pod1.Name, count1+2, deviceIDRE) + Expect(devIdRestart1).To(Equal(devId1)) + count2, devIdRestart2 = parseLogFromNRuns(f, pod2.Name, pod2.Name, count2+2, deviceIDRE) + Expect(devIdRestart2).To(Equal(devId2)) + + // Cleanup + f.PodClient().DeleteSync(pod1.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + f.PodClient().DeleteSync(pod2.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + }) + }) +}) + +// makeBusyboxPod returns a simple Pod spec with a busybox container // that requests resourceName and runs the specified command. func makeBusyboxPod(resourceName, cmd string) *v1.Pod { podName := "device-plugin-test-" + string(uuid.NewUUID()) @@ -78,16 +201,19 @@ func parseLogFromNRuns(f *framework.Framework, podName string, contName string, count = p.Status.ContainerStatuses[0].RestartCount return count >= restartCount }, 5*time.Minute, framework.Poll).Should(BeTrue()) + logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, contName) if err != nil { framework.Failf("GetPodLogs for pod %q failed: %v", podName, err) } + framework.Logf("got pod logs: %v", logs) regex := regexp.MustCompile(re) matches := regex.FindStringSubmatch(logs) if len(matches) < 2 { return count, "" } + return count, matches[1] } @@ -100,3 +226,37 @@ func numberOfDevices(node *v1.Node, resourceName string) int64 { return val.Value() } + +// stubAllocFunc will pass to stub device plugin +func stubAllocFunc(r *pluginapi.AllocateRequest, devs map[string]pluginapi.Device) (*pluginapi.AllocateResponse, error) { + var response pluginapi.AllocateResponse + for _, requestID := range r.DevicesIDs { + dev, ok := devs[requestID] + if !ok { + return nil, fmt.Errorf("invalid allocation request with non-existing device %s", requestID) + } + + if dev.Health != pluginapi.Healthy { + return nil, fmt.Errorf("invalid allocation request with unhealthy device: %s", requestID) + } + + // create fake device file + fpath := filepath.Join("/tmp", dev.ID) + + // clean first + os.RemoveAll(fpath) + f, err := os.Create(fpath) + if err != nil && !os.IsExist(err) { + return nil, fmt.Errorf("failed to create fake device file: %s", err) + } + + f.Close() + + response.Mounts = append(response.Mounts, &pluginapi.Mount{ + ContainerPath: fpath, + HostPath: fpath, + }) + } + + return &response, nil +} diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index f81ab6f5d8b..9a9e39b91eb 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -24,6 +24,7 @@ import ( "net/http" "os/exec" "reflect" + "regexp" "strings" "time" From 8d44e0b38a122809b86c3d24e1ee9d0a9f289e46 Mon Sep 17 00:00:00 2001 From: Robert Bailey Date: Wed, 10 Jan 2018 16:24:46 -0800 Subject: [PATCH 115/264] Remove the deprecated vagrant kube-up implementation. --- Vagrantfile | 325 ---------------- build/lib/release.sh | 1 - build/release-tars/BUILD | 1 - cluster/saltbase/README.md | 9 +- cluster/vagrant/OWNERS | 36 -- cluster/vagrant/config-default.sh | 122 ------ cluster/vagrant/config-test.sh | 29 -- cluster/vagrant/pod-ip-test.sh | 105 ------ cluster/vagrant/provision-master.sh | 122 ------ cluster/vagrant/provision-network-master.sh | 91 ----- cluster/vagrant/provision-network-node.sh | 51 --- cluster/vagrant/provision-node.sh | 88 ----- cluster/vagrant/provision-utils.sh | 222 ----------- cluster/vagrant/util.sh | 389 -------------------- test/e2e/framework/test_context.go | 2 +- test/e2e/framework/util.go | 6 - 16 files changed, 5 insertions(+), 1594 deletions(-) delete mode 100644 Vagrantfile delete mode 100644 cluster/vagrant/OWNERS delete mode 100755 cluster/vagrant/config-default.sh delete mode 100644 cluster/vagrant/config-test.sh delete mode 100755 cluster/vagrant/pod-ip-test.sh delete mode 100755 cluster/vagrant/provision-master.sh delete mode 100644 cluster/vagrant/provision-network-master.sh delete mode 100644 cluster/vagrant/provision-network-node.sh delete mode 100755 cluster/vagrant/provision-node.sh delete mode 100755 cluster/vagrant/provision-utils.sh delete mode 100755 cluster/vagrant/util.sh diff --git a/Vagrantfile b/Vagrantfile deleted file mode 100644 index 8743a6f34af..00000000000 --- a/Vagrantfile +++ /dev/null @@ -1,325 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! -VAGRANTFILE_API_VERSION = "2" - -# Require a recent version of vagrant otherwise some have reported errors setting host names on boxes -Vagrant.require_version ">= 1.7.4" - -if ARGV.first == "up" && ENV['USING_KUBE_SCRIPTS'] != 'true' - raise Vagrant::Errors::VagrantError.new, < { - 'fedora' => { - # :box_url and :box_version are optional (and mutually exclusive); - # if :box_url is omitted the box will be retrieved by :box_name (and - # :box_version if provided) from - # http://atlas.hashicorp.com/boxes/search (formerly - # http://vagrantcloud.com/); this allows you override :box_name with - # your own value so long as you provide :box_url; for example, the - # "official" name of this box is "rickard-von-essen/ - # opscode_fedora-20", but by providing the URL and our own name, we - # make it appear as yet another provider under the "kube-fedora22" - # box - :box_name => 'kube-fedora23', - :box_url => 'https://opscode-vm-bento.s3.amazonaws.com/vagrant/parallels/opscode_fedora-23_chef-provisionerless.box' - } - }, - :virtualbox => { - 'fedora' => { - :box_name => 'kube-fedora23', - :box_url => 'https://opscode-vm-bento.s3.amazonaws.com/vagrant/virtualbox/opscode_fedora-23_chef-provisionerless.box' - } - }, - :libvirt => { - 'fedora' => { - :box_name => 'kube-fedora23', - :box_url => 'https://archives.fedoraproject.org/pub/archive/fedora/linux/releases/23/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-23-20151030.x86_64.vagrant-libvirt.box' - } - }, - :vmware_desktop => { - 'fedora' => { - :box_name => 'kube-fedora23', - :box_url => 'https://opscode-vm-bento.s3.amazonaws.com/vagrant/vmware/opscode_fedora-23_chef-provisionerless.box' - } - }, - :vsphere => { - 'fedora' => { - :box_name => 'vsphere-dummy', - :box_url => 'https://github.com/deromka/vagrant-vsphere/blob/master/vsphere-dummy.box?raw=true' - } - } -} - -# Give access to all physical cpu cores -# Previously cargo-culted from here: -# http://www.stefanwrobel.com/how-to-make-vagrant-performance-not-suck -# Rewritten to actually determine the number of hardware cores instead of assuming -# that the host has hyperthreading enabled. -host = RbConfig::CONFIG['host_os'] -if host =~ /darwin/ - $vm_cpus = `sysctl -n hw.physicalcpu`.to_i -elsif host =~ /linux/ - #This should work on most processors, however it will fail on ones without the core id field. - #So far i have only seen this on a raspberry pi. which you probably don't want to run vagrant on anyhow... - #But just in case we'll default to the result of nproc if we get 0 just to be safe. - $vm_cpus = `cat /proc/cpuinfo | grep 'core id' | sort -u | wc -l`.to_i - if $vm_cpus < 1 - $vm_cpus = `nproc`.to_i - end -else # sorry Windows folks, I can't help you - $vm_cpus = 2 -end - -# Give VM 1024MB of RAM by default -# In Fedora VM, tmpfs device is mapped to /tmp. tmpfs is given 50% of RAM allocation. -# When doing Salt provisioning, we copy approximately 200MB of content in /tmp before anything else happens. -# This causes problems if anything else was in /tmp or the other directories that are bound to tmpfs device (i.e /run, etc.) -$vm_master_mem = (ENV['KUBERNETES_MASTER_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1280).to_i -$vm_node_mem = (ENV['KUBERNETES_NODE_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 2048).to_i - -Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - if Vagrant.has_plugin?("vagrant-proxyconf") - $http_proxy = ENV['KUBERNETES_HTTP_PROXY'] || "" - $https_proxy = ENV['KUBERNETES_HTTPS_PROXY'] || "" - $no_proxy = ENV['KUBERNETES_NO_PROXY'] || "127.0.0.1" - config.proxy.http = $http_proxy - config.proxy.https = $https_proxy - config.proxy.no_proxy = $no_proxy - end - - # this corrects a bug in 1.8.5 where an invalid SSH key is inserted. - if Vagrant::VERSION == "1.8.5" - config.ssh.insert_key = false - end - - def setvmboxandurl(config, provider) - if ENV['KUBERNETES_BOX_NAME'] then - config.vm.box = ENV['KUBERNETES_BOX_NAME'] - - if ENV['KUBERNETES_BOX_URL'] then - config.vm.box_url = ENV['KUBERNETES_BOX_URL'] - end - - if ENV['KUBERNETES_BOX_VERSION'] then - config.vm.box_version = ENV['KUBERNETES_BOX_VERSION'] - end - else - config.vm.box = $kube_provider_boxes[provider][$kube_os][:box_name] - - if $kube_provider_boxes[provider][$kube_os][:box_url] then - config.vm.box_url = $kube_provider_boxes[provider][$kube_os][:box_url] - end - - if $kube_provider_boxes[provider][$kube_os][:box_version] then - config.vm.box_version = $kube_provider_boxes[provider][$kube_os][:box_version] - end - end - end - - def customize_vm(config, vm_mem) - - if $use_nfs then - config.vm.synced_folder ".", "/vagrant", nfs: true - elsif $use_rsync then - opts = {} - if ENV['KUBERNETES_VAGRANT_RSYNC_ARGS'] then - opts[:rsync__args] = ENV['KUBERNETES_VAGRANT_RSYNC_ARGS'].split(" ") - end - if ENV['KUBERNETES_VAGRANT_RSYNC_EXCLUDE'] then - opts[:rsync__exclude] = ENV['KUBERNETES_VAGRANT_RSYNC_EXCLUDE'].split(" ") - end - config.vm.synced_folder ".", "/vagrant", opts - end - - # Try VMWare Fusion first (see - # https://docs.vagrantup.com/v2/providers/basic_usage.html) - config.vm.provider :vmware_fusion do |v, override| - setvmboxandurl(override, :vmware_desktop) - v.vmx['memsize'] = vm_mem - v.vmx['numvcpus'] = $vm_cpus - end - - # configure libvirt provider - config.vm.provider :libvirt do |v, override| - setvmboxandurl(override, :libvirt) - v.memory = vm_mem - v.cpus = $vm_cpus - v.nested = true - v.volume_cache = 'none' - end - - # Then try VMWare Workstation - config.vm.provider :vmware_workstation do |v, override| - setvmboxandurl(override, :vmware_desktop) - v.vmx['memsize'] = vm_mem - v.vmx['numvcpus'] = $vm_cpus - end - - # Then try Parallels - config.vm.provider :parallels do |v, override| - setvmboxandurl(override, :parallels) - v.memory = vm_mem # v.customize ['set', :id, '--memsize', vm_mem] - v.cpus = $vm_cpus # v.customize ['set', :id, '--cpus', $vm_cpus] - - # Don't attempt to update the Parallels tools on the image (this can - # be done manually if necessary) - v.update_guest_tools = false # v.customize ['set', :id, '--tools-autoupdate', 'off'] - - # Set up Parallels folder sharing to behave like VirtualBox (i.e., - # mount the current directory as /vagrant and that's it) - v.customize ['set', :id, '--shf-guest', 'off'] - v.customize ['set', :id, '--shf-guest-automount', 'off'] - v.customize ['set', :id, '--shf-host', 'on'] - - # Synchronize VM clocks to host clock (Avoid certificate invalid issue) - v.customize ['set', :id, '--time-sync', 'on'] - - # Remove all auto-mounted "shared folders"; the result seems to - # persist between runs (i.e., vagrant halt && vagrant up) - override.vm.provision :shell, :inline => (%q{ - set -ex - if [ -d /media/psf ]; then - for i in /media/psf/*; do - if [ -d "${i}" ]; then - umount "${i}" || true - rmdir -v "${i}" - fi - done - rmdir -v /media/psf - fi - exit - }).strip - end - - # Then try vsphere - config.vm.provider :vsphere do |vsphere, override| - setvmboxandurl(override, :vsphere) - - #config.vm.hostname = ENV['MASTER_NAME'] - - config.ssh.username = ENV['MASTER_USER'] - config.ssh.password = ENV['MASTER_PASSWD'] - - config.ssh.pty = true - config.ssh.insert_key = true - #config.ssh.private_key_path = '~/.ssh/id_rsa_vsphere' - - # Don't attempt to update the tools on the image (this can - # be done manually if necessary) - # vsphere.update_guest_tools = false # v.customize ['set', :id, '--tools-autoupdate', 'off'] - - # The vSphere host we're going to connect to - vsphere.host = ENV['VAGRANT_VSPHERE_URL'] - - # The ESX host for the new VM - vsphere.compute_resource_name = ENV['VAGRANT_VSPHERE_RESOURCE_POOL'] - - # The resource pool for the new VM - #vsphere.resource_pool_name = 'Comp' - - # path to folder where new VM should be created, if not specified template's parent folder will be used - vsphere.vm_base_path = ENV['VAGRANT_VSPHERE_BASE_PATH'] - - # The template we're going to clone - vsphere.template_name = ENV['VAGRANT_VSPHERE_TEMPLATE_NAME'] - - # The name of the new machine - #vsphere.name = ENV['MASTER_NAME'] - - # vSphere login - vsphere.user = ENV['VAGRANT_VSPHERE_USERNAME'] - - # vSphere password - vsphere.password = ENV['VAGRANT_VSPHERE_PASSWORD'] - - # cpu count - vsphere.cpu_count = $vm_cpus - - # memory in MB - vsphere.memory_mb = vm_mem - - # If you don't have SSL configured correctly, set this to 'true' - vsphere.insecure = ENV['VAGRANT_VSPHERE_INSECURE'] - end - - - # Don't attempt to update Virtualbox Guest Additions (requires gcc) - if Vagrant.has_plugin?("vagrant-vbguest") then - config.vbguest.auto_update = false - end - # Finally, fall back to VirtualBox - config.vm.provider :virtualbox do |v, override| - setvmboxandurl(override, :virtualbox) - v.memory = vm_mem # v.customize ["modifyvm", :id, "--memory", vm_mem] - v.cpus = $vm_cpus # v.customize ["modifyvm", :id, "--cpus", $vm_cpus] - - # Use faster paravirtualized networking - v.customize ["modifyvm", :id, "--nictype1", "virtio"] - v.customize ["modifyvm", :id, "--nictype2", "virtio"] - end - end - - # Kubernetes master - config.vm.define "master" do |c| - customize_vm c, $vm_master_mem - if ENV['KUBE_TEMP'] then - script = "#{ENV['KUBE_TEMP']}/master-start.sh" - c.vm.provision "shell", run: "always", path: script - end - c.vm.network "private_network", ip: "#{$master_ip}" - end - - # Kubernetes node - $num_node.times do |n| - node_vm_name = "node-#{n+1}" - - config.vm.define node_vm_name do |node| - customize_vm node, $vm_node_mem - - node_ip = $node_ips[n] - if ENV['KUBE_TEMP'] then - script = "#{ENV['KUBE_TEMP']}/node-start-#{n}.sh" - node.vm.provision "shell", run: "always", path: script - end - node.vm.network "private_network", ip: "#{node_ip}" - end - end -end diff --git a/build/lib/release.sh b/build/lib/release.sh index 870451601f6..a34f4162a0e 100644 --- a/build/lib/release.sh +++ b/build/lib/release.sh @@ -517,7 +517,6 @@ EOF cp -R "${KUBE_ROOT}/docs" "${release_stage}/" cp "${KUBE_ROOT}/README.md" "${release_stage}/" cp "${KUBE_ROOT}/Godeps/LICENSES" "${release_stage}/" - cp "${KUBE_ROOT}/Vagrantfile" "${release_stage}/" echo "${KUBE_GIT_VERSION}" > "${release_stage}/version" diff --git a/build/release-tars/BUILD b/build/release-tars/BUILD index 39f588e9518..27773468028 100644 --- a/build/release-tars/BUILD +++ b/build/release-tars/BUILD @@ -193,7 +193,6 @@ pkg_tar( files = [ "//:Godeps/LICENSES", "//:README.md", - "//:Vagrantfile", "//:version", "//cluster:all-srcs", "//docs:all-srcs", diff --git a/cluster/saltbase/README.md b/cluster/saltbase/README.md index 765d801ff7d..d3d53792838 100644 --- a/cluster/saltbase/README.md +++ b/cluster/saltbase/README.md @@ -4,11 +4,10 @@ This is the root of the SaltStack configuration for Kubernetes. A high level overview for the Kubernetes SaltStack configuration can be found [in the docs tree.](https://kubernetes.io/docs/admin/salt/) This SaltStack configuration currently applies to default -configurations for Debian-on-GCE, Fedora-on-Vagrant, Ubuntu-on-AWS and -Ubuntu-on-Azure. (That doesn't mean it can't be made to apply to an -arbitrary configuration, but those are only the in-tree OS/IaaS -combinations supported today.) As you peruse the configuration, these -are shorthanded as `gce`, `vagrant`, `aws`, `azure-legacy` in `grains.cloud`; +configurations for Debian-on-GCE. (That doesn't mean it can't +be made to apply to an arbitrary configuration, but those are +only the in-tree OS/IaaS combinations supported today.) As you +peruse the configuration, this is shorthanded as `gce`, in `grains.cloud`; the documentation in this tree uses this same shorthand for convenience. See more: diff --git a/cluster/vagrant/OWNERS b/cluster/vagrant/OWNERS deleted file mode 100644 index 3be25134ef5..00000000000 --- a/cluster/vagrant/OWNERS +++ /dev/null @@ -1,36 +0,0 @@ -approvers: -- derekwaynecarr -reviewers: -- ArtfulCoder -- thockin -- lavalamp -- smarterclayton -- derekwaynecarr -- caesarxuchao -- vishh -- mikedanese -- liggitt -- nikhiljindal -- erictune -- dchen1107 -- zmerlynn -- justinsb -- roberthbailey -- eparis -- jlowdermilk -- piosz -- jsafrane -- jbeda -- madhusudancs -- jayunit100 -- cjcullen -- david-mcmahon -- mfojtik -- pweil- -- dcbw -- ivan4th -- filbranden -- dshulyak -- k82cn -- caseydavenport -- johscheuer diff --git a/cluster/vagrant/config-default.sh b/cluster/vagrant/config-default.sh deleted file mode 100755 index 63b49146db2..00000000000 --- a/cluster/vagrant/config-default.sh +++ /dev/null @@ -1,122 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -## Contains configuration values for interacting with the Vagrant cluster - -# Number of nodes in the cluster -NUM_NODES=${NUM_NODES-"1"} -export NUM_NODES - -# The IP of the master -export MASTER_IP=${MASTER_IP-"10.245.1.2"} -export KUBE_MASTER_IP=${MASTER_IP} - -export INSTANCE_PREFIX="kubernetes" -export MASTER_NAME="${INSTANCE_PREFIX}-master" - -# Should the master serve as a node -REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false} - -# Map out the IPs, names and container subnets of each node -export NODE_IP_BASE=${NODE_IP_BASE-"10.245.1."} -NODE_CONTAINER_SUBNET_BASE="10.246" -MASTER_CONTAINER_NETMASK="255.255.255.0" -MASTER_CONTAINER_ADDR="${NODE_CONTAINER_SUBNET_BASE}.0.1" -MASTER_CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.1/24" -CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.0/16" -for ((i=0; i < NUM_NODES; i++)) do - NODE_IPS[$i]="${NODE_IP_BASE}$((i+3))" - NODE_NAMES[$i]="${INSTANCE_PREFIX}-node-$((i+1))" - NODE_CONTAINER_SUBNETS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1/24" - NODE_CONTAINER_ADDRS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1" - NODE_CONTAINER_NETMASKS[$i]="255.255.255.0" - VAGRANT_NODE_NAMES[$i]="node-$((i+1))" -done - -CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.246.0.0/16}" - -SERVICE_CLUSTER_IP_RANGE=10.247.0.0/16 # formerly PORTAL_NET - -# Since this isn't exposed on the network, default to a simple user/passwd -MASTER_USER="${MASTER_USER:-vagrant}" -MASTER_PASSWD="${MASTER_PASSWD:-vagrant}" - -# Admission Controllers to invoke prior to persisting objects in cluster -# If we included ResourceQuota, we should keep it at the end of the list to prevent incrementing quota usage prematurely. -ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,PVCProtection,ResourceQuota - -# Optional: Enable node logging. -ENABLE_NODE_LOGGING=false -LOGGING_DESTINATION=elasticsearch - -# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up. -ENABLE_CLUSTER_LOGGING=false -ELASTICSEARCH_LOGGING_REPLICAS=1 - -# Optional: Cluster monitoring to setup as part of the cluster bring up: -# none - No cluster monitoring setup -# influxdb - Heapster, InfluxDB, and Grafana -# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging -ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}" - -# Extra options to set on the Docker command line. This is useful for setting -# --insecure-registry for local registries, or globally configuring selinux options -# TODO Enable selinux when Fedora 21 repositories get an updated docker package -# see https://bugzilla.redhat.com/show_bug.cgi?id=1216151 -#EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-} -b=cbr0 --selinux-enabled --insecure-registry 10.0.0.0/8" -EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-} --insecure-registry 10.0.0.0/8 -s overlay" - -# Flag to tell the kubelet to enable CFS quota support -ENABLE_CPU_CFS_QUOTA="${KUBE_ENABLE_CPU_CFS_QUOTA:-true}" - -# Optional: Install cluster DNS. -ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" -DNS_SERVER_IP="10.247.0.10" -DNS_DOMAIN="cluster.local" - -# Optional: Enable DNS horizontal autoscaler -ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}" - -# Optional: Install Kubernetes UI -ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}" - -# Optional: Enable setting flags for kube-apiserver to turn on behavior in active-dev -RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}" - -# Determine extra certificate names for master -octets=($(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e 's|/.*||' -e 's/\./ /g')) -((octets[3]+=1)) -service_ip=$(echo "${octets[*]}" | sed 's/ /./g') -MASTER_EXTRA_SANS="IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${DNS_DOMAIN},DNS:${MASTER_NAME}" - -NETWORK_PROVIDER="${NETWORK_PROVIDER:-none}" # opencontrail, kubenet, etc -if [ "${NETWORK_PROVIDER}" == "kubenet" ]; then - CLUSTER_IP_RANGE="${CONTAINER_SUBNET}" -fi - -# If enabled kube-controller-manager will be started with the --enable-hostpath-provisioner flag -ENABLE_HOSTPATH_PROVISIONER="${ENABLE_HOSTPATH_PROVISIONER:-true}" - -# OpenContrail networking plugin specific settings -OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}" -OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}" -OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}" - -# Optional: if set to true, kube-up will configure the cluster to run e2e tests. -E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false} - -# Default fallback NETWORK_IF_NAME, will be used in case when no 'VAGRANT-BEGIN' comments were defined in network-script -export DEFAULT_NETWORK_IF_NAME="eth0" diff --git a/cluster/vagrant/config-test.sh b/cluster/vagrant/config-test.sh deleted file mode 100644 index d5458c2861b..00000000000 --- a/cluster/vagrant/config-test.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -## Contains configuration values for interacting with the Vagrant cluster in test mode -#Set NUM_NODES to minimum required for testing. -NUM_NODES=2 - -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. -source "${KUBE_ROOT}/cluster/vagrant/config-default.sh" - -# Do not register the master kubelet during testing -REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false} - -# Optional: if set to true, kube-up will configure the cluster to run e2e tests. -E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false} - diff --git a/cluster/vagrant/pod-ip-test.sh b/cluster/vagrant/pod-ip-test.sh deleted file mode 100755 index 83ed59b3c86..00000000000 --- a/cluster/vagrant/pod-ip-test.sh +++ /dev/null @@ -1,105 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -echoOK() { - TC='\e[' - RegB="${TC}0m" - if [ "$1" -eq "0" ]; then - Green="${TC}32m" - echo -e "[${Green}OK${RegB}]" - else - Red="${TC}31m" - echo -e "[${Red}FAIL${RegB}]" - echo "Check log file." - exit 1 - fi -} - -usage() { - echo "Usage options: [--logfile ]" -} - -logfile=/dev/null -while [[ $# > 0 ]]; do - key="$1" - shift - case $key in - -l|--logfile) - logfile="$1" - if [ "$logfile" == "" ]; then - usage - exit 1 - fi - shift - ;; - *) - # unknown option - usage - exit 1 - ;; - esac -done - -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. -cd "${KUBE_ROOT}" - -echo All verbose output will be redirected to $logfile, use --logfile option to change. - -printf "Start the cluster with 2 nodes .. " -export NUM_NODES=2 -export KUBERNETES_PROVIDER=vagrant - -(cluster/kube-up.sh >>"$logfile" 2>&1) || true -echoOK $? - -printf "Check if node-1 can reach kubernetes master .. " -vagrant ssh node-1 -- ping -c 10 kubernetes-master >>"$logfile" 2>&1 -echoOK $? -printf "Check if node-2 can reach kubernetes master .. " -vagrant ssh node-2 -- ping -c 10 kubernetes-master >>"$logfile" 2>&1 -echoOK $? - -printf "Pull an image that runs a web server on node-1 .. " -vagrant ssh node-1 -- 'sudo docker pull kubernetes/serve_hostname' >>"$logfile" 2>&1 -echoOK $? -printf "Pull an image that runs a web server on node-2 .. " -vagrant ssh node-2 -- 'sudo docker pull kubernetes/serve_hostname' >>"$logfile" 2>&1 -echoOK $? - -printf "Run the server on node-1 .. " -vagrant ssh node-1 -- sudo docker run -d kubernetes/serve_hostname >>"$logfile" 2>&1 -echoOK $? -printf "Run the server on node-2 .. " -vagrant ssh node-2 -- sudo docker run -d kubernetes/serve_hostname >>"$logfile" 2>&1 -echoOK $? - -printf "Run ping from node-1 to docker bridges and to the containers on both nodes .. " -vagrant ssh node-1 -- 'ping -c 20 10.246.0.1 && ping -c 20 10.246.1.1 && ping -c 20 10.246.0.2 && ping -c 20 10.246.1.2' >>"$logfile" 2>&1 -echoOK $? -printf "Same pinch from node-2 .. " -vagrant ssh node-2 -- 'ping -c 20 10.246.0.1 && ping -c 20 10.246.1.1 && ping -c 20 10.246.0.2 && ping -c 20 10.246.1.2' >>"$logfile" 2>&1 -echoOK $? - -printf "tcp check, curl to both the running webservers from node-1 .. " -vagrant ssh node-1 -- 'curl -sS 10.246.0.2:9376 && curl -sS 10.246.1.2:9376' >>"$logfile" 2>&1 -echoOK $? -printf "tcp check, curl to both the running webservers from node-2 .. " -vagrant ssh node-2 -- 'curl -sS 10.246.0.2:9376 && curl -sS 10.246.1.2:9376' >>"$logfile" 2>&1 -echoOK $? - -printf "All good, destroy the cluster .. " -vagrant destroy -f >>"$logfile" 2>&1 -echoOK $? diff --git a/cluster/vagrant/provision-master.sh b/cluster/vagrant/provision-master.sh deleted file mode 100755 index eeff6ed8a91..00000000000 --- a/cluster/vagrant/provision-master.sh +++ /dev/null @@ -1,122 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -# Set the host name explicitly -# See: https://github.com/mitchellh/vagrant/issues/2430 -hostnamectl set-hostname ${MASTER_NAME} -# Set the variable to empty value explicitly -if_to_edit="" - -if [[ "$(grep 'VERSION_ID' /etc/os-release)" =~ ^VERSION_ID=23 ]]; then - # Disable network interface being managed by Network Manager (needed for Fedora 21+) - NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/ - if_to_edit=$( find ${NETWORK_CONF_PATH}ifcfg-* | xargs grep -l VAGRANT-BEGIN ) - for if_conf in ${if_to_edit}; do - grep -q ^NM_CONTROLLED= ${if_conf} || echo 'NM_CONTROLLED=no' >> ${if_conf} - sed -i 's/#^NM_CONTROLLED=.*/NM_CONTROLLED=no/' ${if_conf} - done; - systemctl restart network -fi - -# needed for vsphere support -# handle the case when no 'VAGRANT-BEGIN' comment was defined in network-scripts -# set the NETWORK_IF_NAME to have a default value in such case -NETWORK_IF_NAME=`echo ${if_to_edit} | awk -F- '{ print $3 }'` -if [[ -z "$NETWORK_IF_NAME" ]]; then - NETWORK_IF_NAME=${DEFAULT_NETWORK_IF_NAME} -fi - -# Setup hosts file to support ping by hostname to each node in the cluster from apiserver -for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - node=${NODE_NAMES[$i]} - ip=${NODE_IPS[$i]} - if [ ! "$(cat /etc/hosts | grep $node)" ]; then - echo "Adding $node to hosts file" - echo "$ip $node" >> /etc/hosts - fi -done -echo "127.0.0.1 localhost" >> /etc/hosts # enables cmds like 'kubectl get pods' on master. -echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts - -enable-accounting -prepare-package-manager - -# Configure the master network -if [ "${NETWORK_PROVIDER}" != "kubenet" ]; then - provision-network-master -fi - -write-salt-config kubernetes-master - -# Generate and distribute a shared secret (bearer token) to -# apiserver and kubelet so that kubelet can authenticate to -# apiserver to send events. -known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv" -if [[ ! -f "${known_tokens_file}" ]]; then - - mkdir -p /srv/salt-overlay/salt/kube-apiserver - known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv" - (umask u=rw,go= ; - echo "$KUBELET_TOKEN,kubelet,kubelet" > $known_tokens_file; - echo "$KUBE_PROXY_TOKEN,kube_proxy,kube_proxy" >> $known_tokens_file; - echo "$KUBE_BEARER_TOKEN,admin,admin" >> $known_tokens_file) - - mkdir -p /srv/salt-overlay/salt/kubelet - kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth" - (umask u=rw,go= ; echo "{\"BearerToken\": \"$KUBELET_TOKEN\", \"Insecure\": true }" > $kubelet_auth_file) - - create-salt-kubelet-auth - create-salt-kubeproxy-auth - # Generate tokens for other "service accounts". Append to known_tokens. - # - # NB: If this list ever changes, this script actually has to - # change to detect the existence of this file, kill any deleted - # old tokens and add any new tokens (to handle the upgrade case). - service_accounts=("system:scheduler" "system:controller_manager" "system:logging" "system:monitoring" "system:dns") - for account in "${service_accounts[@]}"; do - token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) - echo "${token},${account},${account}" >> "${known_tokens_file}" - done -fi - - -readonly BASIC_AUTH_FILE="/srv/salt-overlay/salt/kube-apiserver/basic_auth.csv" -if [ ! -e "${BASIC_AUTH_FILE}" ]; then - mkdir -p /srv/salt-overlay/salt/kube-apiserver - (umask 077; - echo "${MASTER_PASSWD},${MASTER_USER},admin" > "${BASIC_AUTH_FILE}") -fi - -# Enable Fedora Cockpit on host to support Kubernetes administration -# Access it by going to :9090 and login as vagrant/vagrant -if ! which /usr/libexec/cockpit-ws &>/dev/null; then - - pushd /etc/yum.repos.d - curl -OL https://copr.fedorainfracloud.org/coprs/g/cockpit/cockpit-preview/repo/fedora-23/msuchy-cockpit-preview-fedora-23.repo - dnf install -y cockpit cockpit-kubernetes docker socat ethtool - popd - - systemctl enable cockpit.socket - systemctl start cockpit.socket -fi - -install-salt - -run-salt diff --git a/cluster/vagrant/provision-network-master.sh b/cluster/vagrant/provision-network-master.sh deleted file mode 100644 index 14280cba072..00000000000 --- a/cluster/vagrant/provision-network-master.sh +++ /dev/null @@ -1,91 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# provision-network-master configures flannel on the master -function provision-network-master { - - echo "Provisioning network on master" - - FLANNEL_ETCD_URL="http://${MASTER_IP}:4379" - - # Install etcd for flannel data - if ! which etcd >/dev/null 2>&1; then - - dnf install -y etcd - - # Modify etcd configuration for flannel data - cat </etc/etcd/etcd.conf -ETCD_NAME=flannel -ETCD_DATA_DIR="/var/lib/etcd/flannel.etcd" -ETCD_LISTEN_PEER_URLS="http://${MASTER_IP}:4380" -ETCD_LISTEN_CLIENT_URLS="http://${MASTER_IP}:4379" -ETCD_INITIAL_ADVERTISE_PEER_URLS="http://${MASTER_IP}:4380" -ETCD_INITIAL_CLUSTER="flannel=http://${MASTER_IP}:4380" -ETCD_ADVERTISE_CLIENT_URLS="${FLANNEL_ETCD_URL}" -EOF - - # fix the etcd boot failure issue - sed -i '/^Restart/a RestartSec=10' /usr/lib/systemd/system/etcd.service - systemctl daemon-reload - - # Enable and start etcd - systemctl enable etcd - systemctl start etcd - - fi - - # Install flannel for overlay - if ! which flanneld >/dev/null 2>&1; then - - dnf install -y flannel - - cat </etc/flannel-config.json -{ - "Network": "${CONTAINER_SUBNET}", - "SubnetLen": 24, - "Backend": { - "Type": "udp", - "Port": 8285 - } -} -EOF - - # Import default configuration into etcd for master setup - etcdctl -C ${FLANNEL_ETCD_URL} set /coreos.com/network/config < /etc/flannel-config.json - - # Configure local daemon to speak to master - NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/ - if_to_edit=$( find ${NETWORK_CONF_PATH}ifcfg-* | xargs grep -l VAGRANT-BEGIN ) - NETWORK_IF_NAME=`echo ${if_to_edit} | awk -F- '{ print $3 }'` - # needed for vsphere support - # handle the case when no 'VAGRANT-BEGIN' comment was defined in network-scripts - # set the NETWORK_IF_NAME to have a default value in such case - if [[ -z "$NETWORK_IF_NAME" ]]; then - NETWORK_IF_NAME=${DEFAULT_NETWORK_IF_NAME} - fi - cat </etc/sysconfig/flanneld -FLANNEL_ETCD="${FLANNEL_ETCD_URL}" -FLANNEL_ETCD_KEY="/coreos.com/network" -FLANNEL_OPTIONS="-iface=${NETWORK_IF_NAME} --ip-masq" -EOF - - # Start flannel - systemctl enable flanneld - systemctl start flanneld - fi - - echo "Network configuration verified" -} diff --git a/cluster/vagrant/provision-network-node.sh b/cluster/vagrant/provision-network-node.sh deleted file mode 100644 index c8fd42252ef..00000000000 --- a/cluster/vagrant/provision-network-node.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# provision-network-node configures flannel on the node -function provision-network-node { - - echo "Provisioning network on node" - - FLANNEL_ETCD_URL="http://${MASTER_IP}:4379" - - # Install flannel for overlay - if ! which flanneld >/dev/null 2>&1; then - - dnf install -y flannel - - # Configure local daemon to speak to master - NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/ - if_to_edit=$( find ${NETWORK_CONF_PATH}ifcfg-* | xargs grep -l VAGRANT-BEGIN ) - NETWORK_IF_NAME=`echo ${if_to_edit} | awk -F- '{ print $3 }'` - # needed for vsphere support - # handle the case when no 'VAGRANT-BEGIN' comment was defined in network-scripts - # set the NETWORK_IF_NAME to have a default value in such case - if [[ -z "$NETWORK_IF_NAME" ]]; then - NETWORK_IF_NAME=${DEFAULT_NETWORK_IF_NAME} - fi - cat </etc/sysconfig/flanneld -FLANNEL_ETCD="${FLANNEL_ETCD_URL}" -FLANNEL_ETCD_KEY="/coreos.com/network" -FLANNEL_OPTIONS="-iface=${NETWORK_IF_NAME} --ip-masq" -EOF - - # Start flannel - systemctl enable flanneld - systemctl start flanneld - fi - - echo "Network configuration verified" -} diff --git a/cluster/vagrant/provision-node.sh b/cluster/vagrant/provision-node.sh deleted file mode 100755 index 8d43a63cada..00000000000 --- a/cluster/vagrant/provision-node.sh +++ /dev/null @@ -1,88 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -# Set the host name explicitly -# See: https://github.com/mitchellh/vagrant/issues/2430 -hostnamectl set-hostname ${NODE_NAME} -if_to_edit="" - -if [[ "$(grep 'VERSION_ID' /etc/os-release)" =~ ^VERSION_ID=23 ]]; then - # Disable network interface being managed by Network Manager (needed for Fedora 21+) - NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/ - if_to_edit=$( find ${NETWORK_CONF_PATH}ifcfg-* | xargs grep -l VAGRANT-BEGIN ) - for if_conf in ${if_to_edit}; do - grep -q ^NM_CONTROLLED= ${if_conf} || echo 'NM_CONTROLLED=no' >> ${if_conf} - sed -i 's/#^NM_CONTROLLED=.*/NM_CONTROLLED=no/' ${if_conf} - done; - systemctl restart network -fi - -# needed for vsphere support -# handle the case when no 'VAGRANT-BEGIN' comment was defined in network-scripts -# set the NETWORK_IF_NAME to have a default value in such case -NETWORK_IF_NAME=`echo ${if_to_edit} | awk -F- '{ print $3 }'` -if [[ -z "$NETWORK_IF_NAME" ]]; then - NETWORK_IF_NAME=${DEFAULT_NETWORK_IF_NAME} -fi - -# Setup hosts file to support ping by hostname to master -if [ ! "$(cat /etc/hosts | grep $MASTER_NAME)" ]; then - echo "Adding $MASTER_NAME to hosts file" - echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts -fi -echo "$NODE_IP $NODE_NAME" >> /etc/hosts - -# Setup hosts file to support ping by hostname to each node in the cluster -for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - node=${NODE_NAMES[$i]} - ip=${NODE_IPS[$i]} - if [ ! "$(cat /etc/hosts | grep $node)" ]; then - echo "Adding $node to hosts file" - echo "$ip $node" >> /etc/hosts - fi -done - -enable-accounting -prepare-package-manager - -# Configure network -if [ "${NETWORK_PROVIDER}" != "kubenet" ]; then - provision-network-node -fi - -write-salt-config kubernetes-pool - -# Generate kubelet and kube-proxy auth file(kubeconfig) if there is not an existing one -known_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig" -if [[ ! -f "${known_kubeconfig_file}" ]]; then - create-salt-kubelet-auth - create-salt-kubeproxy-auth -else - # stop kubelet, let salt start it later - systemctl stop kubelet -fi - -install-salt -add-volume-support - -run-salt - -dnf install -y socat ethtool -dnf update -y docker diff --git a/cluster/vagrant/provision-utils.sh b/cluster/vagrant/provision-utils.sh deleted file mode 100755 index e719a830c8f..00000000000 --- a/cluster/vagrant/provision-utils.sh +++ /dev/null @@ -1,222 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -function enable-accounting() { - mkdir -p /etc/systemd/system.conf.d/ - cat </etc/systemd/system.conf.d/kubernetes-accounting.conf -[Manager] -DefaultCPUAccounting=yes -DefaultMemoryAccounting=yes -EOF - systemctl daemon-reload -} - -function prepare-package-manager() { - echo "Prepare package manager" - - # Useful if a mirror is broken or slow - if [ -z "$CUSTOM_FEDORA_REPOSITORY_URL" ]; then - echo "fastestmirror=True" >> /etc/dnf/dnf.conf - else - # remove trailing slash from URL if it's present - CUSTOM_FEDORA_REPOSITORY_URL="${CUSTOM_FEDORA_REPOSITORY_URL%/}" - sed -i -e "/^metalink=/d" /etc/yum.repos.d/*.repo - sed -i -e "s@^#baseurl=http://download.fedoraproject.org/pub/fedora@baseurl=$CUSTOM_FEDORA_REPOSITORY_URL@" /etc/yum.repos.d/*.repo - fi -} - - -function add-volume-support() { - echo "Adding nfs volume support" - - # we need nfs-utils to support volumes - dnf install -y nfs-utils -} - -function write-salt-config() { - local role="$1" - - # Update salt configuration - mkdir -p /etc/salt/minion.d - - mkdir -p /srv/salt-overlay/pillar - cat </srv/salt-overlay/pillar/cluster-params.sls -service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")' -cert_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' -enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")' -enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")' -enable_cluster_ui: '$(echo "$ENABLE_CLUSTER_UI" | sed -e "s/'/''/g")' -enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")' -logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")' -elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")' -enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")' -dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")' -dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")' -instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")' -admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")' -enable_cpu_cfs_quota: '$(echo "$ENABLE_CPU_CFS_QUOTA" | sed -e "s/'/''/g")' -network_provider: '$(echo "$NETWORK_PROVIDER" | sed -e "s/'/''/g")' -cluster_cidr: '$(echo "$CLUSTER_IP_RANGE" | sed -e "s/'/''/g")' -opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG" | sed -e "s/'/''/g")' -opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG" | sed -e "s/'/''/g")' -opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET" | sed -e "s/'/''/g")' -e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")' -enable_hostpath_provisioner: '$(echo "$ENABLE_HOSTPATH_PROVISIONER" | sed -e "s/'/''/g")' -EOF - -if [ -n "${EVICTION_HARD:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -eviction_hard: '$(echo "${EVICTION_HARD}" | sed -e "s/'/''/g")' -EOF -fi - - cat </etc/salt/minion.d/log-level-debug.conf -log_level: warning -log_level_logfile: warning -EOF - - cat </etc/salt/minion.d/grains.conf -grains: - node_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' - publicAddressOverride: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' - network_mode: openvswitch - networkInterfaceName: '$(echo "$NETWORK_IF_NAME" | sed -e "s/'/''/g")' - api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' - kubelet_kubeconfig: /srv/salt-overlay/salt/kubelet/kubeconfig - cloud: vagrant - roles: - - $role - runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")' - docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")' - master_extra_sans: '$(echo "$MASTER_EXTRA_SANS" | sed -e "s/'/''/g")' - keep_host_etcd: true - kube_user: '$(echo "$KUBE_USER" | sed -e "s/'/''/g")' -EOF -} - -function release_not_found() { - echo "It looks as if you don't have a compiled version of Kubernetes. If you" >&2 - echo "are running from a clone of the git repo, please run 'make quick-release'." >&2 - echo "Note that this requires having Docker installed. If you are running " >&2 - echo "from a release tarball, something is wrong. Look at " >&2 - echo "http://kubernetes.io/ for information on how to contact the development team for help." >&2 - exit 1 -} - -function install-salt() { - server_binary_tar="/vagrant/server/kubernetes-server-linux-amd64.tar.gz" - if [[ ! -f "$server_binary_tar" ]]; then - server_binary_tar="/vagrant/_output/release-tars/kubernetes-server-linux-amd64.tar.gz" - fi - if [[ ! -f "$server_binary_tar" ]]; then - release_not_found - fi - - salt_tar="/vagrant/server/kubernetes-salt.tar.gz" - if [[ ! -f "$salt_tar" ]]; then - salt_tar="/vagrant/_output/release-tars/kubernetes-salt.tar.gz" - fi - if [[ ! -f "$salt_tar" ]]; then - release_not_found - fi - - echo "Running release install script" - rm -rf /kube-install - mkdir -p /kube-install - pushd /kube-install - tar xzf "$salt_tar" - cp "$server_binary_tar" . - ./kubernetes/saltbase/install.sh "${server_binary_tar##*/}" - popd - - if ! which salt-call >/dev/null 2>&1; then - # Install salt from official repositories. - # Need to enable testing-repos to get version of salt with fix for dnf-core-plugins - dnf config-manager --set-enabled updates-testing - dnf install -y salt-minion - - # Fedora >= 23 includes salt packages but the bootstrap is - # creating configuration for a (non-existent) salt repo anyway. - # Remove the invalid repo to prevent dnf from warning about it on - # every update. Assume this problem is specific to Fedora 23 and - # will fixed by the time another version of Fedora lands. - local fedora_version=$(grep 'VERSION_ID' /etc/os-release | sed 's+VERSION_ID=++') - if [[ "${fedora_version}" = '23' ]]; then - local repo_file='/etc/yum.repos.d/saltstack-salt-fedora-23.repo' - if [[ -f "${repo_file}" ]]; then - rm "${repo_file}" - fi - fi - - fi -} - -function run-salt() { - echo " Now waiting for the Salt provisioning process to complete on this machine." - echo " This can take some time based on your network, disk, and cpu speed." - salt-call --local state.highstate -} - -function create-salt-kubelet-auth() { - local -r kubelet_kubeconfig_folder="/srv/salt-overlay/salt/kubelet" - mkdir -p "${kubelet_kubeconfig_folder}" - (umask 077; - cat > "${kubelet_kubeconfig_folder}/kubeconfig" << EOF -apiVersion: v1 -kind: Config -clusters: -- cluster: - server: "https://${MASTER_IP}" - insecure-skip-tls-verify: true - name: local -contexts: -- context: - cluster: local - user: kubelet - name: service-account-context -current-context: service-account-context -users: -- name: kubelet - user: - token: ${KUBELET_TOKEN} -EOF - ) -} - -function create-salt-kubeproxy-auth() { - kube_proxy_kubeconfig_folder="/srv/salt-overlay/salt/kube-proxy" - mkdir -p "${kube_proxy_kubeconfig_folder}" - (umask 077; - cat > "${kube_proxy_kubeconfig_folder}/kubeconfig" << EOF -apiVersion: v1 -kind: Config -clusters: -- cluster: - insecure-skip-tls-verify: true - name: local -contexts: -- context: - cluster: local - user: kube-proxy - name: service-account-context -current-context: service-account-context -users: -- name: kube-proxy - user: - token: ${KUBE_PROXY_TOKEN} -EOF - ) -} diff --git a/cluster/vagrant/util.sh b/cluster/vagrant/util.sh deleted file mode 100755 index 3d022576d00..00000000000 --- a/cluster/vagrant/util.sh +++ /dev/null @@ -1,389 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts. - -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. -source "${KUBE_ROOT}/cluster/vagrant/${KUBE_CONFIG_FILE-"config-default.sh"}" -source "${KUBE_ROOT}/cluster/common.sh" - -function detect-master () { - KUBE_MASTER_IP=$MASTER_IP - echo "KUBE_MASTER_IP: ${KUBE_MASTER_IP}" 1>&2 -} - -# Get node IP addresses and store in KUBE_NODE_IP_ADDRESSES[] -function detect-nodes { - echo "Nodes already detected" 1>&2 - KUBE_NODE_IP_ADDRESSES=("${NODE_IPS[@]}") -} - -# Verify prereqs on host machine Also sets exports USING_KUBE_SCRIPTS=true so -# that our Vagrantfile doesn't error out. -function verify-prereqs { - for x in vagrant; do - if ! which "$x" >/dev/null; then - echo "Can't find $x in PATH, please fix and retry." - exit 1 - fi - done - - local vagrant_plugins=$(vagrant plugin list | sed '-es% .*$%%' '-es% *% %g' | tr ' ' $'\n') - local providers=( - # Format is: - # provider_ctl_executable vagrant_provider_name vagrant_provider_plugin_re - # either provider_ctl_executable or vagrant_provider_plugin_re can - # be blank (i.e., '') if none is needed by Vagrant (see, e.g., - # virtualbox entry) - '' vmware_fusion vagrant-vmware-fusion - '' vmware_workstation vagrant-vmware-workstation - prlctl parallels vagrant-parallels - VBoxManage virtualbox '' - virsh libvirt vagrant-libvirt - '' vsphere vagrant-vsphere - ) - local provider_found='' - local provider_bin - local provider_name - local provider_plugin_re - - while [ "${#providers[@]}" -gt 0 ]; do - provider_bin=${providers[0]} - provider_name=${providers[1]} - provider_plugin_re=${providers[2]} - providers=("${providers[@]:3}") - - # If the provider is explicitly set, look only for that provider - if [ -n "${VAGRANT_DEFAULT_PROVIDER:-}" ] \ - && [ "${VAGRANT_DEFAULT_PROVIDER}" != "${provider_name}" ]; then - continue - fi - - if ([ -z "${provider_bin}" ] \ - || which "${provider_bin}" >/dev/null 2>&1) \ - && ([ -z "${provider_plugin_re}" ] \ - || [ -n "$(echo "${vagrant_plugins}" | grep -E "^${provider_plugin_re}$")" ]); then - provider_found="${provider_name}" - # Stop after finding the first viable provider - break - fi - done - - if [ -z "${provider_found}" ]; then - if [ -n "${VAGRANT_DEFAULT_PROVIDER:-}" ]; then - echo "Can't find the necessary components for the ${VAGRANT_DEFAULT_PROVIDER} vagrant provider." - echo "Possible reasons could be: " - echo -e "\t- vmrun utility is not in your path" - echo -e "\t- Vagrant plugin was not found." - echo -e "\t- VAGRANT_DEFAULT_PROVIDER is set, but not found." - echo "Please fix and retry." - else - echo "Can't find the necessary components for any viable vagrant providers (e.g., virtualbox), please fix and retry." - fi - - exit 1 - fi - - # Set VAGRANT_CWD to KUBE_ROOT so that we find the right Vagrantfile no - # matter what directory the tools are called from. - export VAGRANT_CWD="${KUBE_ROOT}" - - export USING_KUBE_SCRIPTS=true -} - -# Create a set of provision scripts for the master and each of the nodes -function create-provision-scripts { - kube::util::ensure-temp-dir - - ( - echo "#! /bin/bash" - echo-kube-env - echo "NODE_IP='${MASTER_IP}'" - echo "CONTAINER_ADDR='${MASTER_CONTAINER_ADDR}'" - echo "CONTAINER_NETMASK='${MASTER_CONTAINER_NETMASK}'" - awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-utils.sh" - awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network-master.sh" - awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-master.sh" - ) > "${KUBE_TEMP}/master-start.sh" - - for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - ( - echo "#! /bin/bash" - echo-kube-env - echo "NODE_NAME=(${NODE_NAMES[$i]})" - echo "NODE_IP='${NODE_IPS[$i]}'" - echo "NODE_ID='$i'" - echo "CONTAINER_ADDR='${NODE_CONTAINER_ADDRS[$i]}'" - echo "CONTAINER_NETMASK='${NODE_CONTAINER_NETMASKS[$i]}'" - awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-utils.sh" - awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network-node.sh" - awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-node.sh" - ) > "${KUBE_TEMP}/node-start-${i}.sh" - done -} - -function echo-kube-env() { - echo "KUBE_ROOT=/vagrant" - echo "INSTANCE_PREFIX='${INSTANCE_PREFIX}'" - echo "MASTER_NAME='${INSTANCE_PREFIX}-master'" - echo "MASTER_IP='${MASTER_IP}'" - echo "NODE_NAMES=(${NODE_NAMES[@]})" - echo "NODE_IPS=(${NODE_IPS[@]})" - echo "DEFAULT_NETWORK_IF_NAME=${DEFAULT_NETWORK_IF_NAME}" - echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'" - echo "CLUSTER_IP_RANGE='${CLUSTER_IP_RANGE}'" - echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'" - echo "NODE_CONTAINER_NETMASKS='${NODE_CONTAINER_NETMASKS[@]}'" - echo "NODE_CONTAINER_SUBNETS=(${NODE_CONTAINER_SUBNETS[@]})" - echo "SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'" - echo "MASTER_USER='${MASTER_USER}'" - echo "MASTER_PASSWD='${MASTER_PASSWD}'" - echo "KUBE_USER='${KUBE_USER}'" - echo "KUBE_PASSWORD='${KUBE_PASSWORD}'" - echo "KUBE_BEARER_TOKEN='${KUBE_BEARER_TOKEN}'" - echo "ENABLE_CLUSTER_MONITORING='${ENABLE_CLUSTER_MONITORING}'" - echo "ENABLE_CLUSTER_LOGGING='${ENABLE_CLUSTER_LOGGING:-false}'" - echo "ELASTICSEARCH_LOGGING_REPLICAS='${ELASTICSEARCH_LOGGING_REPLICAS:-1}'" - echo "ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'" - echo "ENABLE_CLUSTER_UI='${ENABLE_CLUSTER_UI}'" - echo "ENABLE_HOSTPATH_PROVISIONER='${ENABLE_HOSTPATH_PROVISIONER:-false}'" - echo "LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'" - echo "ENABLE_CLUSTER_DNS='${ENABLE_CLUSTER_DNS:-false}'" - echo "DNS_SERVER_IP='${DNS_SERVER_IP:-}'" - echo "DNS_DOMAIN='${DNS_DOMAIN:-}'" - echo "RUNTIME_CONFIG='${RUNTIME_CONFIG:-}'" - echo "ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'" - echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS:-}'" - echo "VAGRANT_DEFAULT_PROVIDER='${VAGRANT_DEFAULT_PROVIDER:-}'" - echo "KUBELET_TOKEN='${KUBELET_TOKEN:-}'" - echo "KUBE_PROXY_TOKEN='${KUBE_PROXY_TOKEN:-}'" - echo "MASTER_EXTRA_SANS='${MASTER_EXTRA_SANS:-}'" - echo "ENABLE_CPU_CFS_QUOTA='${ENABLE_CPU_CFS_QUOTA}'" - echo "NETWORK_PROVIDER='${NETWORK_PROVIDER:-}'" - echo "OPENCONTRAIL_TAG='${OPENCONTRAIL_TAG:-}'" - echo "OPENCONTRAIL_KUBERNETES_TAG='${OPENCONTRAIL_KUBERNETES_TAG:-}'" - echo "OPENCONTRAIL_PUBLIC_SUBNET='${OPENCONTRAIL_PUBLIC_SUBNET:-}'" - echo "E2E_STORAGE_TEST_ENVIRONMENT='${E2E_STORAGE_TEST_ENVIRONMENT:-}'" - echo "CUSTOM_FEDORA_REPOSITORY_URL='${CUSTOM_FEDORA_REPOSITORY_URL:-}'" - echo "EVICTION_HARD='${EVICTION_HARD:-}'" -} - -function verify-cluster { - # TODO: How does the user know the difference between "tak[ing] some - # time" and "loop[ing] forever"? Can we give more specific feedback on - # whether "an error" has occurred? - echo "Each machine instance has been created/updated." - echo " Now waiting for the Salt provisioning process to complete on each machine." - echo " This can take some time based on your network, disk, and cpu speed." - echo " It is possible for an error to occur during Salt provision of cluster and this could loop forever." - - # verify master has all required daemons - echo "Validating master" - local machine="master" - local -a required_processes=("kube-apiserver" "kube-scheduler" "kube-controller-manager" "kubelet" "docker") - local validated="1" - until [[ "$validated" == "0" ]]; do - validated="0" - for process in "${required_processes[@]}"; do - vagrant ssh "${machine}" -c "pgrep -f ${process}" >/dev/null 2>&1 || { - printf "." - validated="1" - sleep 2 - } - done - done - - # verify each node has all required daemons - local i - for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - echo "Validating ${VAGRANT_NODE_NAMES[$i]}" - local machine=${VAGRANT_NODE_NAMES[$i]} - local -a required_processes=("kube-proxy" "kubelet" "docker") - local validated="1" - until [[ "${validated}" == "0" ]]; do - validated="0" - for process in "${required_processes[@]}"; do - vagrant ssh "${machine}" -c "pgrep -f ${process}" >/dev/null 2>&1 || { - printf "." - validated="1" - sleep 2 - } - done - done - done - - echo - echo "Waiting for each node to be registered with cloud provider" - for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - local validated="0" - start="$(date +%s)" - until [[ "$validated" == "1" ]]; do - now="$(date +%s)" - # Timeout set to 3 minutes - if [ $((now - start)) -gt 180 ]; then - echo "Timeout while waiting for echo node to be registered with cloud provider" - exit 2 - fi - local nodes=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o name) - validated=$(echo $nodes | grep -c "${NODE_NAMES[i]}") || { - printf "." - sleep 2 - validated="0" - } - done - done - - # By this time, all kube api calls should work, so no need to loop and retry. - echo "Validating we can run kubectl commands." - vagrant ssh master --command "kubectl get pods" || { - echo "WARNING: kubectl to localhost failed. This could mean localhost is not bound to an IP" - } - - ( - # ensures KUBECONFIG is set - get-kubeconfig-basicauth - get-kubeconfig-bearertoken - echo - echo "Kubernetes cluster is running." - echo - echo "The master is running at:" - echo - echo " https://${MASTER_IP}" - echo - echo "Administer and visualize its resources using Cockpit:" - echo - echo " https://${MASTER_IP}:9090" - echo - echo "For more information on Cockpit, visit http://cockpit-project.org" - echo - echo "The user name and password to use is located in ${KUBECONFIG}" - echo - ) -} - -# Instantiate a kubernetes cluster -function kube-up { - load-or-gen-kube-basicauth - load-or-gen-kube-bearertoken - get-tokens - create-provision-scripts - - vagrant up --no-parallel - - export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt" - export KUBE_KEY="/tmp/$RANDOM-kubecfg.key" - export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt" - export CONTEXT="vagrant" - - ( - umask 077 - vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null - vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null - vagrant ssh master -- sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null - - # Update the user's kubeconfig to include credentials for this apiserver. - create-kubeconfig - ) - - verify-cluster -} - -# Delete a kubernetes cluster -function kube-down { - vagrant destroy -f -} - -# Update a kubernetes cluster with latest source -function kube-push { - get-kubeconfig-basicauth - get-kubeconfig-bearertoken - create-provision-scripts - vagrant provision -} - -# Execute prior to running tests to build a release if required for env -function test-build-release { - # Make a release - "${KUBE_ROOT}/build/release.sh" -} - -# Execute prior to running tests to initialize required structure -function test-setup { - "${KUBE_ROOT}/cluster/kube-up.sh" - echo "Vagrant test setup complete" 1>&2 -} - -# Execute after running tests to perform any required clean-up -function test-teardown { - kube-down -} - -# Find the node name based on the IP address -function find-vagrant-name-by-ip { - local ip="$1" - local ip_pattern="${NODE_IP_BASE}(.*)" - - # This is subtle. We map 10.245.2.2 -> node-1. We do this by matching a - # regexp and using the capture to construct the name. - [[ $ip =~ $ip_pattern ]] || { - return 1 - } - - echo "node-$((${BASH_REMATCH[1]} - 1))" -} - -# Find the vagrant machine name based on the host name of the node -function find-vagrant-name-by-node-name { - local ip="$1" - if [[ "$ip" == "${INSTANCE_PREFIX}-master" ]]; then - echo "master" - return $? - fi - local ip_pattern="${INSTANCE_PREFIX}-node-(.*)" - - [[ $ip =~ $ip_pattern ]] || { - return 1 - } - - echo "node-${BASH_REMATCH[1]}" -} - - -# SSH to a node by name or IP ($1) and run a command ($2). -function ssh-to-node { - local node="$1" - local cmd="$2" - local machine - - machine=$(find-vagrant-name-by-ip $node) || true - [[ -n ${machine-} ]] || machine=$(find-vagrant-name-by-node-name $node) || true - [[ -n ${machine-} ]] || { - echo "Cannot find machine to ssh to: $1" - return 1 - } - - vagrant ssh "${machine}" -c "${cmd}" -} - -# Perform preparations required to run e2e tests -function prepare-e2e() { - echo "Vagrant doesn't need special preparations for e2e tests" 1>&2 -} - -function get-tokens() { - KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) - KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) -} diff --git a/test/e2e/framework/test_context.go b/test/e2e/framework/test_context.go index 31b2dde1855..b6e87ef444a 100644 --- a/test/e2e/framework/test_context.go +++ b/test/e2e/framework/test_context.go @@ -225,7 +225,7 @@ func RegisterClusterFlags() { flag.StringVar(&TestContext.KubeVolumeDir, "volume-dir", "/var/lib/kubelet", "Path to the directory containing the kubelet volumes.") flag.StringVar(&TestContext.CertDir, "cert-dir", "", "Path to the directory containing the certs. Default is empty, which doesn't use certs.") flag.StringVar(&TestContext.RepoRoot, "repo-root", "../../", "Root directory of kubernetes repository, for finding test files.") - flag.StringVar(&TestContext.Provider, "provider", "", "The name of the Kubernetes provider (gce, gke, local, vagrant, etc.)") + flag.StringVar(&TestContext.Provider, "provider", "", "The name of the Kubernetes provider (gce, gke, local, etc.)") flag.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.") flag.StringVar(&TestContext.OutputDir, "e2e-output-dir", "/tmp", "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.") flag.StringVar(&TestContext.Prefix, "prefix", "e2e", "A prefix to be added to cloud resources created during testing.") diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 637a9eace0b..62fa4c9abb3 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -3454,12 +3454,6 @@ func GetSigner(provider string) (ssh.Signer, error) { } // Otherwise revert to home dir keyfile = "kube_aws_rsa" - case "vagrant": - keyfile = os.Getenv("VAGRANT_SSH_KEY") - if len(keyfile) != 0 { - return sshutil.MakePrivateKeySignerFromFile(keyfile) - } - return nil, fmt.Errorf("VAGRANT_SSH_KEY env variable should be provided") case "local", "vsphere": keyfile = os.Getenv("LOCAL_SSH_KEY") // maybe? if len(keyfile) == 0 { From e826a77919785e651d3c5bad3deb65af8243b319 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Thu, 11 Jan 2018 13:51:03 +0530 Subject: [PATCH 116/264] Add custom volumename option to GlusterFS dynamic PVs. At present glusterfs dynamic PVs are created with random names. However an admin would like to have some handle on the volume names created dynamically for various purposes. One example would be having a filter for sorting out PVs created for a particular storage class. This patch enables the functionality by having a custom volume name as a prefix to dynamic PVs. This is an optional parameter in SC and if set, the dynamic volumes are created in below format where `_` is the field seperator/delimiter: customvolumeprefix_PVCname_randomUUID Signed-off-by: Humble Chirammal --- pkg/volume/glusterfs/glusterfs.go | 45 ++++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 13 deletions(-) diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index 9afadd1f050..8f2618c765d 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -36,6 +36,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/util/mount" @@ -406,17 +407,18 @@ func (plugin *glusterfsPlugin) newProvisionerInternal(options volume.VolumeOptio } type provisionerConfig struct { - url string - user string - userKey string - secretNamespace string - secretName string - secretValue string - clusterID string - gidMin int - gidMax int - volumeType gapi.VolumeDurabilityInfo - volumeOptions []string + url string + user string + userKey string + secretNamespace string + secretName string + secretValue string + clusterID string + gidMin int + gidMax int + volumeType gapi.VolumeDurabilityInfo + volumeOptions []string + volumeNamePrefix string } type glusterfsVolumeProvisioner struct { @@ -743,6 +745,7 @@ func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolumeSource, size int, volID string, err error) { var clusterIDs []string + customVolumeName := "" capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] // Glusterfs creates volumes in units of GiB, but heketi documentation incorrectly reports GBs sz := int(volume.RoundUpToGiB(capacity)) @@ -760,8 +763,13 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolum clusterIDs = dstrings.Split(p.clusterID, ",") glog.V(4).Infof("provided clusterIDs: %v", clusterIDs) } + + if p.provisionerConfig.volumeNamePrefix != "" { + customVolumeName = fmt.Sprintf("%s_%s_%s", p.provisionerConfig.volumeNamePrefix, p.options.PVC.Name, uuid.NewUUID()) + } + gid64 := int64(gid) - volumeReq := &gapi.VolumeCreateRequest{Size: sz, Clusters: clusterIDs, Gid: gid64, Durability: p.volumeType, GlusterVolumeOptions: p.volumeOptions} + volumeReq := &gapi.VolumeCreateRequest{Size: sz, Name: customVolumeName, Clusters: clusterIDs, Gid: gid64, Durability: p.volumeType, GlusterVolumeOptions: p.volumeOptions} volume, err := cli.VolumeCreate(volumeReq) if err != nil { glog.Errorf("error creating volume %v ", err) @@ -927,6 +935,7 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa authEnabled := true parseVolumeType := "" parseVolumeOptions := "" + parseVolumeNamePrefix := "" for k, v := range params { switch dstrings.ToLower(k) { @@ -977,7 +986,10 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa if len(v) != 0 { parseVolumeOptions = v } - + case "volumenameprefix": + if len(v) != 0 { + parseVolumeNamePrefix = v + } default: return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, glusterfsPluginName) } @@ -1057,6 +1069,13 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa cfg.volumeOptions = volOptions } + + if len(parseVolumeNamePrefix) != 0 { + if dstrings.Contains(parseVolumeNamePrefix, "_") { + return nil, fmt.Errorf("Storageclass parameter 'volumenameprefix' should not contain '_' in its value") + } + cfg.volumeNamePrefix = parseVolumeNamePrefix + } return &cfg, nil } From 66c7fdb8920cca5b095a29f078950747fb29798a Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Thu, 11 Jan 2018 14:14:29 +0530 Subject: [PATCH 117/264] Update bazel. Signed-off-by: Humble Chirammal --- pkg/volume/glusterfs/BUILD | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/volume/glusterfs/BUILD b/pkg/volume/glusterfs/BUILD index a57114c412e..19638dd3d16 100644 --- a/pkg/volume/glusterfs/BUILD +++ b/pkg/volume/glusterfs/BUILD @@ -32,6 +32,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", ], ) From 988c4ae6baed8fcf5e06fc55e1ec031bf3d3815c Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Wed, 10 Jan 2018 10:06:33 +0800 Subject: [PATCH 118/264] fix windows ut for proxy mode --- .../apis/kubeproxyconfig/validation/validation_test.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/proxy/apis/kubeproxyconfig/validation/validation_test.go b/pkg/proxy/apis/kubeproxyconfig/validation/validation_test.go index aaf5e9f4a91..8815e0af077 100644 --- a/pkg/proxy/apis/kubeproxyconfig/validation/validation_test.go +++ b/pkg/proxy/apis/kubeproxyconfig/validation/validation_test.go @@ -30,6 +30,12 @@ import ( ) func TestValidateKubeProxyConfiguration(t *testing.T) { + var proxyMode kubeproxyconfig.ProxyMode + if runtime.GOOS == "windows" { + proxyMode = kubeproxyconfig.ProxyModeKernelspace + } else { + proxyMode = kubeproxyconfig.ProxyModeIPVS + } successCases := []kubeproxyconfig.KubeProxyConfiguration{ { BindAddress: "192.168.59.103", @@ -43,7 +49,7 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { SyncPeriod: metav1.Duration{Duration: 5 * time.Second}, MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, - Mode: kubeproxyconfig.ProxyModeIPVS, + Mode: proxyMode, IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{ SyncPeriod: metav1.Duration{Duration: 10 * time.Second}, MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second}, From 68eaf536711b9ef8d0ad12fa5c89684750d17c4b Mon Sep 17 00:00:00 2001 From: zoues Date: Thu, 11 Jan 2018 21:52:47 +0800 Subject: [PATCH 119/264] remove provides which has been deleted --- cluster/kube-up.sh | 6 ------ 1 file changed, 6 deletions(-) diff --git a/cluster/kube-up.sh b/cluster/kube-up.sh index a2813f99e2f..81a33d3f3fa 100755 --- a/cluster/kube-up.sh +++ b/cluster/kube-up.sh @@ -34,13 +34,7 @@ source "${KUBE_ROOT}/cluster/kube-util.sh" DEPRECATED_PROVIDERS=( "centos" - "libvirt-coreos" "local" - "openstack-heat" - "photon-controller" - "vagrant" - "vsphere" - "windows" ) for provider in "${DEPRECATED_PROVIDERS[@]}"; do From f91858d86883b6eab21efa0f6cb6dbee1fa8074a Mon Sep 17 00:00:00 2001 From: Di Xu Date: Thu, 21 Dec 2017 17:34:34 +0800 Subject: [PATCH 120/264] add hostPorts to pod describer --- pkg/printers/internalversion/describe.go | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/pkg/printers/internalversion/describe.go b/pkg/printers/internalversion/describe.go index 43bc2b98113..7a7401e42a4 100644 --- a/pkg/printers/internalversion/describe.go +++ b/pkg/printers/internalversion/describe.go @@ -1315,11 +1315,13 @@ func describeContainerBasicInfo(container api.Container, status api.ContainerSta if strings.Contains(portString, ",") { w.Write(LEVEL_2, "Ports:\t%s\n", portString) } else { - if len(portString) == 0 { - w.Write(LEVEL_2, "Port:\t\n") - } else { - w.Write(LEVEL_2, "Port:\t%s\n", portString) - } + w.Write(LEVEL_2, "Port:\t%s\n", stringOrNone(portString)) + } + hostPortString := describeContainerHostPorts(container.Ports) + if strings.Contains(hostPortString, ",") { + w.Write(LEVEL_2, "Host Ports:\t%s\n", hostPortString) + } else { + w.Write(LEVEL_2, "Host Port:\t%s\n", stringOrNone(hostPortString)) } } @@ -1331,6 +1333,14 @@ func describeContainerPorts(cPorts []api.ContainerPort) string { return strings.Join(ports, ", ") } +func describeContainerHostPorts(cPorts []api.ContainerPort) string { + ports := make([]string, 0, len(cPorts)) + for _, cPort := range cPorts { + ports = append(ports, fmt.Sprintf("%d/%s", cPort.HostPort, cPort.Protocol)) + } + return strings.Join(ports, ", ") +} + func describeContainerCommand(container api.Container, w PrefixWriter) { if len(container.Command) > 0 { w.Write(LEVEL_2, "Command:\n") From 41cb533ad683fbf5ddc61a293e5d1da026cae7f6 Mon Sep 17 00:00:00 2001 From: mtanino Date: Wed, 10 Jan 2018 10:16:43 -0500 Subject: [PATCH 121/264] [FC Plugin] Create proper volumeSpec during ConstructVolumeSpec Currently, FC plugin returns volume name and empty FCVolumeSource during ConstrutVolumeSpec during filesystem volume's reconstruction. In this fix, ConstructVolumeSpec retrieves global mount path, analyzes volume parameters such as WWN, LUN, WWID from the path. Fixes #58085 --- pkg/volume/fc/fc.go | 62 +++++++++++++++++++++++++++++---- pkg/volume/fc/fc_test.go | 74 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 129 insertions(+), 7 deletions(-) diff --git a/pkg/volume/fc/fc.go b/pkg/volume/fc/fc.go index 8772ce91558..5aee9ba92b3 100644 --- a/pkg/volume/fc/fc.go +++ b/pkg/volume/fc/fc.go @@ -235,11 +235,59 @@ func (plugin *fcPlugin) newUnmapperInternal(volName string, podUID types.UID, ma } func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) { - fcVolume := &v1.Volume{ - Name: volumeName, - VolumeSource: v1.VolumeSource{ - FC: &v1.FCVolumeSource{}, - }, + // Find globalPDPath from pod volume directory(mountPath) + // examples: + // mountPath: pods/{podUid}/volumes/kubernetes.io~fc/{volumeName} + // globalPDPath : plugins/kubernetes.io/fc/50060e801049cfd1-lun-0 + var globalPDPath string + mounter := plugin.host.GetMounter(plugin.GetPluginName()) + paths, err := mount.GetMountRefs(mounter, mountPath) + if err != nil { + return nil, err + } + for _, path := range paths { + if strings.Contains(path, plugin.host.GetPluginDir(fcPluginName)) { + globalPDPath = path + break + } + } + // Couldn't fetch globalPDPath + if len(globalPDPath) == 0 { + return nil, fmt.Errorf("couldn't fetch globalPDPath. failed to obtain volume spec") + } + arr := strings.Split(globalPDPath, "/") + if len(arr) < 1 { + return nil, fmt.Errorf("failed to retrieve volume plugin information from globalPDPath: %v", globalPDPath) + } + volumeInfo := arr[len(arr)-1] + // Create volume from wwn+lun or wwid + var fcVolume *v1.Volume + if strings.Contains(volumeInfo, "-lun-") { + wwnLun := strings.Split(volumeInfo, "-lun-") + if len(wwnLun) < 2 { + return nil, fmt.Errorf("failed to retrieve TargetWWN and Lun. volumeInfo is invalid: %v", volumeInfo) + } + lun, err := strconv.Atoi(wwnLun[1]) + if err != nil { + return nil, err + } + lun32 := int32(lun) + fcVolume = &v1.Volume{ + Name: volumeName, + VolumeSource: v1.VolumeSource{ + FC: &v1.FCVolumeSource{TargetWWNs: []string{wwnLun[0]}, Lun: &lun32}, + }, + } + glog.V(5).Infof("ConstructVolumeSpec: TargetWWNs: %v, Lun: %v", + fcVolume.VolumeSource.FC.TargetWWNs, *fcVolume.VolumeSource.FC.Lun) + } else { + fcVolume = &v1.Volume{ + Name: volumeName, + VolumeSource: v1.VolumeSource{ + FC: &v1.FCVolumeSource{WWIDs: []string{volumeInfo}}, + }, + } + glog.V(5).Infof("ConstructVolumeSpec: WWIDs: %v", fcVolume.VolumeSource.FC.WWIDs) } return volume.NewSpecFromVolume(fcVolume), nil } @@ -249,7 +297,7 @@ func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volu // - If a file is found, then retreives volumePluginDependentPath from globalMapPathUUID. // - Once volumePluginDependentPath is obtained, store volume information to VolumeSource // examples: -// mapPath: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} +// mapPath: pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} // globalMapPathUUID : plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} func (plugin *fcPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) { pluginDir := plugin.host.GetVolumeDevicePluginDir(fcPluginName) @@ -284,7 +332,7 @@ func (plugin *fcPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, m v1.FCVolumeSource{TargetWWNs: []string{wwnLun[0]}, Lun: &lun32}) glog.V(5).Infof("ConstructBlockVolumeSpec: TargetWWNs: %v, Lun: %v", fcPV.Spec.PersistentVolumeSource.FC.TargetWWNs, - fcPV.Spec.PersistentVolumeSource.FC.Lun) + *fcPV.Spec.PersistentVolumeSource.FC.Lun) } else { fcPV = createPersistentVolumeFromFCVolumeSource(volumeName, v1.FCVolumeSource{WWIDs: []string{volumeInfo}}) diff --git a/pkg/volume/fc/fc_test.go b/pkg/volume/fc/fc_test.go index 42a530bc4a5..0f12042432c 100644 --- a/pkg/volume/fc/fc_test.go +++ b/pkg/volume/fc/fc_test.go @@ -19,6 +19,8 @@ package fc import ( "fmt" "os" + "strconv" + "strings" "testing" "k8s.io/api/core/v1" @@ -412,3 +414,75 @@ func Test_getWwnsLunWwidsError(t *testing.T) { t.Errorf("unexpected fc disk found") } } + +func Test_ConstructVolumeSpec(t *testing.T) { + fm := &mount.FakeMounter{ + MountPoints: []mount.MountPoint{ + {Device: "/dev/sdb", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1"}, + {Device: "/dev/sdb", Path: "/var/lib/kubelet/plugins/kubernetes.io/fc/50060e801049cfd1-lun-0"}, + {Device: "/dev/sdc", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod2"}, + {Device: "/dev/sdc", Path: "/var/lib/kubelet/plugins/kubernetes.io/fc/volumeDevices/3600508b400105e210000900000490000"}, + }, + } + mountPaths := []string{ + "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1", + "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod2", + } + for _, path := range mountPaths { + refs, _ := mount.GetMountRefs(fm, path) + var globalPDPath string + for _, ref := range refs { + if strings.Contains(ref, "kubernetes.io/fc") { + globalPDPath = ref + break + } + } + if len(globalPDPath) == 0 { + t.Errorf("couldn't fetch mountrefs") + } + arr := strings.Split(globalPDPath, "/") + if len(arr) < 1 { + t.Errorf("failed to retrieve volume plugin information from globalPDPath: %v", globalPDPath) + } + volumeInfo := arr[len(arr)-1] + if strings.Contains(volumeInfo, "-lun-") { + wwnLun := strings.Split(volumeInfo, "-lun-") + if len(wwnLun) < 2 { + t.Errorf("failed to retrieve TargetWWN and Lun. volumeInfo is invalid: %v", volumeInfo) + } + lun, _ := strconv.Atoi(wwnLun[1]) + lun32 := int32(lun) + if wwnLun[0] != "50060e801049cfd1" || lun32 != 0 { + t.Errorf("failed to retrieve TargetWWN and Lun") + } + } else { + if volumeInfo != "3600508b400105e210000900000490000" { + t.Errorf("failed to retrieve WWIDs") + } + } + } +} + +func Test_ConstructVolumeSpecNoRefs(t *testing.T) { + fm := &mount.FakeMounter{ + MountPoints: []mount.MountPoint{ + {Device: "/dev/sdd", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1"}, + }, + } + mountPaths := []string{ + "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1", + } + for _, path := range mountPaths { + refs, _ := mount.GetMountRefs(fm, path) + var globalPDPath string + for _, ref := range refs { + if strings.Contains(ref, "kubernetes.io/fc") { + globalPDPath = ref + break + } + } + if len(globalPDPath) != 0 { + t.Errorf("invalid globalPDPath") + } + } +} From eb0ac60175d50340a11fdb731b2c3320f92d4993 Mon Sep 17 00:00:00 2001 From: Eric Chiang Date: Wed, 10 Jan 2018 15:15:50 -0800 Subject: [PATCH 122/264] remove OpenAPI import from types --- pkg/api/unversioned/BUILD | 6 +---- pkg/api/unversioned/time.go | 22 ++++++++----------- .../apimachinery/pkg/api/resource/BUILD | 2 -- .../apimachinery/pkg/api/resource/quantity.go | 22 ++++++++----------- .../apimachinery/pkg/apis/meta/v1/BUILD | 2 -- .../pkg/apis/meta/v1/micro_time.go | 22 ++++++++----------- .../apimachinery/pkg/apis/meta/v1/time.go | 22 ++++++++----------- .../k8s.io/apimachinery/pkg/util/intstr/BUILD | 2 -- .../apimachinery/pkg/util/intstr/intstr.go | 22 ++++++++----------- 9 files changed, 46 insertions(+), 76 deletions(-) diff --git a/pkg/api/unversioned/BUILD b/pkg/api/unversioned/BUILD index 874384b90fb..c0d661e8f8e 100644 --- a/pkg/api/unversioned/BUILD +++ b/pkg/api/unversioned/BUILD @@ -13,11 +13,7 @@ go_library( "types.go", ], importpath = "k8s.io/kubernetes/pkg/api/unversioned", - deps = [ - "//vendor/github.com/go-openapi/spec:go_default_library", - "//vendor/github.com/google/gofuzz:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", - ], + deps = ["//vendor/github.com/google/gofuzz:go_default_library"], ) filegroup( diff --git a/pkg/api/unversioned/time.go b/pkg/api/unversioned/time.go index 32f9edb7453..34da5ba3985 100644 --- a/pkg/api/unversioned/time.go +++ b/pkg/api/unversioned/time.go @@ -20,9 +20,6 @@ import ( "encoding/json" "time" - openapi "k8s.io/kube-openapi/pkg/common" - - "github.com/go-openapi/spec" "github.com/google/gofuzz" ) @@ -141,16 +138,15 @@ func (t Time) MarshalJSON() ([]byte, error) { return json.Marshal(t.UTC().Format(time.RFC3339)) } -func (_ Time) OpenAPIDefinition() openapi.OpenAPIDefinition { - return openapi.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "date-time", - }, - }, - } -} +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Time) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ Time) OpenAPISchemaFormat() string { return "date-time" } // MarshalQueryParameter converts to a URL query parameter value func (t Time) MarshalQueryParameter() (string, error) { diff --git a/staging/src/k8s.io/apimachinery/pkg/api/resource/BUILD b/staging/src/k8s.io/apimachinery/pkg/api/resource/BUILD index fab98203507..2ae7638537e 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/resource/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/api/resource/BUILD @@ -38,11 +38,9 @@ go_library( ], importpath = "k8s.io/apimachinery/pkg/api/resource", deps = [ - "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/gopkg.in/inf.v0:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", ], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go b/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go index 682ee9aa646..6a8bb997218 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -27,9 +27,7 @@ import ( flag "github.com/spf13/pflag" - "github.com/go-openapi/spec" inf "gopkg.in/inf.v0" - openapi "k8s.io/kube-openapi/pkg/common" ) // Quantity is a fixed-point representation of a number. @@ -399,17 +397,15 @@ func (q Quantity) DeepCopy() Quantity { return q } -// OpenAPIDefinition returns openAPI definition for this type. -func (_ Quantity) OpenAPIDefinition() openapi.OpenAPIDefinition { - return openapi.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - } -} +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Quantity) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ Quantity) OpenAPISchemaFormat() string { return "" } // CanonicalizeBytes returns the canonical form of q and its suffix (see comment on Quantity). // diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD index c851816d782..1c49035bbc2 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD @@ -53,7 +53,6 @@ go_library( ], importpath = "k8s.io/apimachinery/pkg/apis/meta/v1", deps = [ - "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", @@ -67,7 +66,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", ], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go index a09d79571c3..7e5bc2d4e7f 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -20,9 +20,6 @@ import ( "encoding/json" "time" - openapi "k8s.io/kube-openapi/pkg/common" - - "github.com/go-openapi/spec" "github.com/google/gofuzz" ) @@ -149,16 +146,15 @@ func (t MicroTime) MarshalJSON() ([]byte, error) { return json.Marshal(t.UTC().Format(RFC3339Micro)) } -func (_ MicroTime) OpenAPIDefinition() openapi.OpenAPIDefinition { - return openapi.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "date-time", - }, - }, - } -} +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ MicroTime) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ MicroTime) OpenAPISchemaFormat() string { return "date-time" } // MarshalQueryParameter converts to a URL query parameter value func (t MicroTime) MarshalQueryParameter() (string, error) { diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index 0a9f2a37756..5041954f763 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/time.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -20,9 +20,6 @@ import ( "encoding/json" "time" - openapi "k8s.io/kube-openapi/pkg/common" - - "github.com/go-openapi/spec" "github.com/google/gofuzz" ) @@ -151,16 +148,15 @@ func (t Time) MarshalJSON() ([]byte, error) { return json.Marshal(t.UTC().Format(time.RFC3339)) } -func (_ Time) OpenAPIDefinition() openapi.OpenAPIDefinition { - return openapi.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "date-time", - }, - }, - } -} +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Time) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ Time) OpenAPISchemaFormat() string { return "date-time" } // MarshalQueryParameter converts to a URL query parameter value func (t Time) MarshalQueryParameter() (string, error) { diff --git a/staging/src/k8s.io/apimachinery/pkg/util/intstr/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/intstr/BUILD index 8c66be54fc8..b4fe3922fff 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/intstr/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/intstr/BUILD @@ -22,11 +22,9 @@ go_library( ], importpath = "k8s.io/apimachinery/pkg/util/intstr", deps = [ - "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", ], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 04a77bb6b4b..231498ca032 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -24,9 +24,6 @@ import ( "strconv" "strings" - openapi "k8s.io/kube-openapi/pkg/common" - - "github.com/go-openapi/spec" "github.com/golang/glog" "github.com/google/gofuzz" ) @@ -120,16 +117,15 @@ func (intstr IntOrString) MarshalJSON() ([]byte, error) { } } -func (_ IntOrString) OpenAPIDefinition() openapi.OpenAPIDefinition { - return openapi.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "int-or-string", - }, - }, - } -} +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ IntOrString) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } func (intstr *IntOrString) Fuzz(c fuzz.Continue) { if intstr == nil { From e1dda7e3be5cded77116cc48370372cb10992c17 Mon Sep 17 00:00:00 2001 From: Eric Chiang Date: Wed, 10 Jan 2018 15:17:37 -0800 Subject: [PATCH 123/264] bump(k8s.io/kube-openapi): a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3 --- Godeps/Godeps.json | 16 ++-- staging/src/k8s.io/api/Godeps/Godeps.json | 80 ----------------- .../Godeps/Godeps.json | 10 +-- .../k8s.io/apimachinery/Godeps/Godeps.json | 78 +--------------- .../src/k8s.io/apiserver/Godeps/Godeps.json | 10 +-- .../src/k8s.io/client-go/Godeps/Godeps.json | 78 +--------------- .../k8s.io/code-generator/Godeps/Godeps.json | 4 +- .../k8s.io/kube-aggregator/Godeps/Godeps.json | 12 +-- staging/src/k8s.io/metrics/Godeps/Godeps.json | 76 ---------------- .../sample-apiserver/Godeps/Godeps.json | 10 +-- .../sample-controller/Godeps/Godeps.json | 78 +--------------- .../k8s.io/kube-openapi/pkg/generators/README | 31 +++++++ .../kube-openapi/pkg/generators/openapi.go | 90 +++++++++++-------- .../kube-openapi/pkg/util/proto/document.go | 14 ++- .../kube-openapi/pkg/util/proto/openapi.go | 25 ++++++ .../pkg/util/proto/validation/types.go | 13 ++- 16 files changed, 165 insertions(+), 460 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 45b60e8306b..71f3a14418e 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -3212,35 +3212,35 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/aggregator", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/builder", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/generators", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/handler", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto/validation", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/utils/clock", diff --git a/staging/src/k8s.io/api/Godeps/Godeps.json b/staging/src/k8s.io/api/Godeps/Godeps.json index ffa35e2d980..2a066b367ce 100644 --- a/staging/src/k8s.io/api/Godeps/Godeps.json +++ b/staging/src/k8s.io/api/Godeps/Godeps.json @@ -6,38 +6,6 @@ "./..." ], "Deps": [ - { - "ImportPath": "github.com/PuerkitoBio/purell", - "Rev": "8a290539e2e8629dbc4e6bad948158f790ec31f4" - }, - { - "ImportPath": "github.com/PuerkitoBio/urlesc", - "Rev": "5bd2802263f21d8788851d5305584c82a5c75d7e" - }, - { - "ImportPath": "github.com/emicklei/go-restful", - "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" - }, - { - "ImportPath": "github.com/emicklei/go-restful/log", - "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" - }, - { - "ImportPath": "github.com/go-openapi/jsonpointer", - "Rev": "46af16f9f7b149af66e5d1bd010e3574dc06de98" - }, - { - "ImportPath": "github.com/go-openapi/jsonreference", - "Rev": "13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272" - }, - { - "ImportPath": "github.com/go-openapi/spec", - "Rev": "7abd5745472fff5eb3685386d5fb8bf38683154d" - }, - { - "ImportPath": "github.com/go-openapi/swag", - "Rev": "f3f9494671f93fcff853e3c6e9e948b3eb71e590" - }, { "ImportPath": "github.com/gogo/protobuf/proto", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" @@ -54,18 +22,6 @@ "ImportPath": "github.com/google/gofuzz", "Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c" }, - { - "ImportPath": "github.com/mailru/easyjson/buffer", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, - { - "ImportPath": "github.com/mailru/easyjson/jlexer", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, - { - "ImportPath": "github.com/mailru/easyjson/jwriter", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, { "ImportPath": "github.com/spf13/pflag", "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" @@ -86,34 +42,10 @@ "ImportPath": "golang.org/x/net/lex/httplex", "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" }, - { - "ImportPath": "golang.org/x/text/cases", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/internal", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/internal/tag", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/language", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/runes", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "golang.org/x/text/secure/bidirule", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, - { - "ImportPath": "golang.org/x/text/secure/precis", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "golang.org/x/text/transform", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" @@ -126,18 +58,10 @@ "ImportPath": "golang.org/x/text/unicode/norm", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, - { - "ImportPath": "golang.org/x/text/width", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "gopkg.in/inf.v0", "Rev": "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" }, - { - "ImportPath": "gopkg.in/yaml.v2", - "Rev": "53feefa2559fb8dfa8d81baad31be332c97d6c77" - }, { "ImportPath": "k8s.io/apimachinery/pkg/api/resource", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" @@ -218,10 +142,6 @@ "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/reflect", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, - { - "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" - }, { "ImportPath": "k8s.io/apimachinery/pkg/api/resource", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" diff --git a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json index 770352cf7d1..c4bd7aa4cc2 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json @@ -1632,23 +1632,23 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/builder", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/handler", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/equality", diff --git a/staging/src/k8s.io/apimachinery/Godeps/Godeps.json b/staging/src/k8s.io/apimachinery/Godeps/Godeps.json index a264f944b4e..bf8fe584f6f 100644 --- a/staging/src/k8s.io/apimachinery/Godeps/Godeps.json +++ b/staging/src/k8s.io/apimachinery/Godeps/Godeps.json @@ -6,14 +6,6 @@ "./..." ], "Deps": [ - { - "ImportPath": "github.com/PuerkitoBio/purell", - "Rev": "8a290539e2e8629dbc4e6bad948158f790ec31f4" - }, - { - "ImportPath": "github.com/PuerkitoBio/urlesc", - "Rev": "5bd2802263f21d8788851d5305584c82a5c75d7e" - }, { "ImportPath": "github.com/davecgh/go-spew/spew", "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" @@ -30,14 +22,6 @@ "ImportPath": "github.com/elazarl/goproxy", "Rev": "c4fc26588b6ef8af07a191fcb6476387bdd46711" }, - { - "ImportPath": "github.com/emicklei/go-restful", - "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" - }, - { - "ImportPath": "github.com/emicklei/go-restful/log", - "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" - }, { "ImportPath": "github.com/evanphx/json-patch", "Rev": "944e07253867aacae43c04b2e6a239005443f33a" @@ -46,22 +30,6 @@ "ImportPath": "github.com/ghodss/yaml", "Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee" }, - { - "ImportPath": "github.com/go-openapi/jsonpointer", - "Rev": "46af16f9f7b149af66e5d1bd010e3574dc06de98" - }, - { - "ImportPath": "github.com/go-openapi/jsonreference", - "Rev": "13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272" - }, - { - "ImportPath": "github.com/go-openapi/spec", - "Rev": "7abd5745472fff5eb3685386d5fb8bf38683154d" - }, - { - "ImportPath": "github.com/go-openapi/swag", - "Rev": "f3f9494671f93fcff853e3c6e9e948b3eb71e590" - }, { "ImportPath": "github.com/gogo/protobuf/proto", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" @@ -126,18 +94,6 @@ "ImportPath": "github.com/json-iterator/go", "Rev": "13f86432b882000a51c6e610c620974462691a97" }, - { - "ImportPath": "github.com/mailru/easyjson/buffer", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, - { - "ImportPath": "github.com/mailru/easyjson/jlexer", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, - { - "ImportPath": "github.com/mailru/easyjson/jwriter", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, { "ImportPath": "github.com/mxk/go-flowrate/flowrate", "Rev": "cca7078d478f8520f85629ad7c68962d31ed7682" @@ -190,34 +146,10 @@ "ImportPath": "golang.org/x/net/websocket", "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" }, - { - "ImportPath": "golang.org/x/text/cases", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/internal", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/internal/tag", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/language", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/runes", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "golang.org/x/text/secure/bidirule", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, - { - "ImportPath": "golang.org/x/text/secure/precis", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "golang.org/x/text/transform", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" @@ -230,10 +162,6 @@ "ImportPath": "golang.org/x/text/unicode/norm", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, - { - "ImportPath": "golang.org/x/text/width", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "gopkg.in/inf.v0", "Rev": "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" @@ -242,13 +170,9 @@ "ImportPath": "gopkg.in/yaml.v2", "Rev": "53feefa2559fb8dfa8d81baad31be332c97d6c77" }, - { - "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" - }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" } ] } diff --git a/staging/src/k8s.io/apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiserver/Godeps/Godeps.json index b1fcb0a32f1..68ec6fbdf25 100644 --- a/staging/src/k8s.io/apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiserver/Godeps/Godeps.json @@ -1764,23 +1764,23 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/builder", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/handler", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/client-go/discovery", diff --git a/staging/src/k8s.io/client-go/Godeps/Godeps.json b/staging/src/k8s.io/client-go/Godeps/Godeps.json index 4fae7ddb897..efc5a5dacba 100644 --- a/staging/src/k8s.io/client-go/Godeps/Godeps.json +++ b/staging/src/k8s.io/client-go/Godeps/Godeps.json @@ -30,14 +30,6 @@ "ImportPath": "github.com/Azure/go-autorest/autorest/date", "Rev": "e14a70c556c8e0db173358d1a903dca345a8e75e" }, - { - "ImportPath": "github.com/PuerkitoBio/purell", - "Rev": "8a290539e2e8629dbc4e6bad948158f790ec31f4" - }, - { - "ImportPath": "github.com/PuerkitoBio/urlesc", - "Rev": "5bd2802263f21d8788851d5305584c82a5c75d7e" - }, { "ImportPath": "github.com/coreos/go-oidc/http", "Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f" @@ -86,34 +78,10 @@ "ImportPath": "github.com/docker/spdystream/spdy", "Rev": "449fdfce4d962303d702fec724ef0ad181c92528" }, - { - "ImportPath": "github.com/emicklei/go-restful", - "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" - }, - { - "ImportPath": "github.com/emicklei/go-restful/log", - "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" - }, { "ImportPath": "github.com/ghodss/yaml", "Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee" }, - { - "ImportPath": "github.com/go-openapi/jsonpointer", - "Rev": "46af16f9f7b149af66e5d1bd010e3574dc06de98" - }, - { - "ImportPath": "github.com/go-openapi/jsonreference", - "Rev": "13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272" - }, - { - "ImportPath": "github.com/go-openapi/spec", - "Rev": "7abd5745472fff5eb3685386d5fb8bf38683154d" - }, - { - "ImportPath": "github.com/go-openapi/swag", - "Rev": "f3f9494671f93fcff853e3c6e9e948b3eb71e590" - }, { "ImportPath": "github.com/gogo/protobuf/proto", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" @@ -222,18 +190,6 @@ "ImportPath": "github.com/juju/ratelimit", "Rev": "5b9ff866471762aa2ab2dced63c9fb6f53921342" }, - { - "ImportPath": "github.com/mailru/easyjson/buffer", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, - { - "ImportPath": "github.com/mailru/easyjson/jlexer", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, - { - "ImportPath": "github.com/mailru/easyjson/jwriter", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, { "ImportPath": "github.com/pmezard/go-difflib/difflib", "Rev": "d8ed2627bdf02c080bf22230dbb337003b7aba2d" @@ -302,34 +258,10 @@ "ImportPath": "golang.org/x/sys/windows", "Rev": "95c6576299259db960f6c5b9b69ea52422860fce" }, - { - "ImportPath": "golang.org/x/text/cases", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/internal", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/internal/tag", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/language", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/runes", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "golang.org/x/text/secure/bidirule", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, - { - "ImportPath": "golang.org/x/text/secure/precis", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "golang.org/x/text/transform", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" @@ -342,10 +274,6 @@ "ImportPath": "golang.org/x/text/unicode/norm", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, - { - "ImportPath": "golang.org/x/text/width", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "gopkg.in/inf.v0", "Rev": "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" @@ -682,13 +610,9 @@ "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/reflect", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, - { - "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" - }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" } ] } diff --git a/staging/src/k8s.io/code-generator/Godeps/Godeps.json b/staging/src/k8s.io/code-generator/Godeps/Godeps.json index 506e4b88556..c30a706ba89 100644 --- a/staging/src/k8s.io/code-generator/Godeps/Godeps.json +++ b/staging/src/k8s.io/code-generator/Godeps/Godeps.json @@ -260,11 +260,11 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/generators", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" } ] } diff --git a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json index abddac483d5..c28bf8966ae 100644 --- a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json +++ b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json @@ -1620,27 +1620,27 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/aggregator", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/builder", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/handler", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" } ] } diff --git a/staging/src/k8s.io/metrics/Godeps/Godeps.json b/staging/src/k8s.io/metrics/Godeps/Godeps.json index ce359bb79d8..60300dfceee 100644 --- a/staging/src/k8s.io/metrics/Godeps/Godeps.json +++ b/staging/src/k8s.io/metrics/Godeps/Godeps.json @@ -6,42 +6,10 @@ "./..." ], "Deps": [ - { - "ImportPath": "github.com/PuerkitoBio/purell", - "Rev": "8a290539e2e8629dbc4e6bad948158f790ec31f4" - }, - { - "ImportPath": "github.com/PuerkitoBio/urlesc", - "Rev": "5bd2802263f21d8788851d5305584c82a5c75d7e" - }, - { - "ImportPath": "github.com/emicklei/go-restful", - "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" - }, - { - "ImportPath": "github.com/emicklei/go-restful/log", - "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" - }, { "ImportPath": "github.com/ghodss/yaml", "Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee" }, - { - "ImportPath": "github.com/go-openapi/jsonpointer", - "Rev": "46af16f9f7b149af66e5d1bd010e3574dc06de98" - }, - { - "ImportPath": "github.com/go-openapi/jsonreference", - "Rev": "13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272" - }, - { - "ImportPath": "github.com/go-openapi/spec", - "Rev": "7abd5745472fff5eb3685386d5fb8bf38683154d" - }, - { - "ImportPath": "github.com/go-openapi/swag", - "Rev": "f3f9494671f93fcff853e3c6e9e948b3eb71e590" - }, { "ImportPath": "github.com/gogo/protobuf/proto", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" @@ -98,18 +66,6 @@ "ImportPath": "github.com/juju/ratelimit", "Rev": "5b9ff866471762aa2ab2dced63c9fb6f53921342" }, - { - "ImportPath": "github.com/mailru/easyjson/buffer", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, - { - "ImportPath": "github.com/mailru/easyjson/jlexer", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, - { - "ImportPath": "github.com/mailru/easyjson/jwriter", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, { "ImportPath": "github.com/spf13/pflag", "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" @@ -130,34 +86,10 @@ "ImportPath": "golang.org/x/net/lex/httplex", "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" }, - { - "ImportPath": "golang.org/x/text/cases", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/internal", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/internal/tag", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/language", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/runes", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "golang.org/x/text/secure/bidirule", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, - { - "ImportPath": "golang.org/x/text/secure/precis", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "golang.org/x/text/transform", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" @@ -170,10 +102,6 @@ "ImportPath": "golang.org/x/text/unicode/norm", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, - { - "ImportPath": "golang.org/x/text/width", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "gopkg.in/inf.v0", "Rev": "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" @@ -497,10 +425,6 @@ { "ImportPath": "k8s.io/client-go/util/integer", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" - }, - { - "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" } ] } diff --git a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json index ce731cb79fd..c7366214eff 100644 --- a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json @@ -1608,23 +1608,23 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/builder", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/handler", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" } ] } diff --git a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json index ce34ffc7dfd..a6be9f90028 100644 --- a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json @@ -6,46 +6,14 @@ "./..." ], "Deps": [ - { - "ImportPath": "github.com/PuerkitoBio/purell", - "Rev": "8a290539e2e8629dbc4e6bad948158f790ec31f4" - }, - { - "ImportPath": "github.com/PuerkitoBio/urlesc", - "Rev": "5bd2802263f21d8788851d5305584c82a5c75d7e" - }, { "ImportPath": "github.com/davecgh/go-spew/spew", "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" }, - { - "ImportPath": "github.com/emicklei/go-restful", - "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" - }, - { - "ImportPath": "github.com/emicklei/go-restful/log", - "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" - }, { "ImportPath": "github.com/ghodss/yaml", "Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee" }, - { - "ImportPath": "github.com/go-openapi/jsonpointer", - "Rev": "46af16f9f7b149af66e5d1bd010e3574dc06de98" - }, - { - "ImportPath": "github.com/go-openapi/jsonreference", - "Rev": "13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272" - }, - { - "ImportPath": "github.com/go-openapi/spec", - "Rev": "7abd5745472fff5eb3685386d5fb8bf38683154d" - }, - { - "ImportPath": "github.com/go-openapi/swag", - "Rev": "f3f9494671f93fcff853e3c6e9e948b3eb71e590" - }, { "ImportPath": "github.com/gogo/protobuf/proto", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" @@ -122,18 +90,6 @@ "ImportPath": "github.com/juju/ratelimit", "Rev": "5b9ff866471762aa2ab2dced63c9fb6f53921342" }, - { - "ImportPath": "github.com/mailru/easyjson/buffer", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, - { - "ImportPath": "github.com/mailru/easyjson/jlexer", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, - { - "ImportPath": "github.com/mailru/easyjson/jwriter", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, { "ImportPath": "github.com/spf13/pflag", "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" @@ -170,34 +126,10 @@ "ImportPath": "golang.org/x/sys/windows", "Rev": "95c6576299259db960f6c5b9b69ea52422860fce" }, - { - "ImportPath": "golang.org/x/text/cases", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/internal", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/internal/tag", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/language", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/runes", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "golang.org/x/text/secure/bidirule", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, - { - "ImportPath": "golang.org/x/text/secure/precis", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "golang.org/x/text/transform", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" @@ -210,10 +142,6 @@ "ImportPath": "golang.org/x/text/unicode/norm", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, - { - "ImportPath": "golang.org/x/text/width", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "gopkg.in/inf.v0", "Rev": "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" @@ -966,13 +894,9 @@ "ImportPath": "k8s.io/client-go/util/workqueue", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, - { - "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" - }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" } ] } diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/README b/vendor/k8s.io/kube-openapi/pkg/generators/README index 35660a40da7..feb19b401a9 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/README +++ b/vendor/k8s.io/kube-openapi/pkg/generators/README @@ -11,5 +11,36 @@ escape or quote the value string. Extensions can be used to pass more informatio documentation generators. For example a type might have a friendly name to be displayed in documentation or being used in a client's fluent interface. +# Custom OpenAPI type definitions + +Custom types which otherwise don't map directly to OpenAPI can override their +OpenAPI definition by implementing a function named "OpenAPIDefinition" with +the following signature: + + import openapi "k8s.io/kube-openapi/pkg/common" + + // ... + + type Time struct { + time.Time + } + + func (_ Time) OpenAPIDefinition() openapi.OpenAPIDefinition { + return openapi.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "date-time", + }, + }, + } + } + +Alternatively, the type can avoid the "openapi" import by defining the following +methods. The following example produces the same OpenAPI definition as the +example above: + + func (_ Time) OpenAPISchemaType() []string { return []string{"string"} } + func (_ Time) OpenAPISchemaFormat() string { return "date-time" } TODO(mehdy): Make k8s:openapi-gen a parameter to the generator now that OpenAPI has its own repo. diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go index 5efb3f45c6f..d9b0980abb4 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go @@ -118,35 +118,13 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat `)...) - outputPath := arguments.OutputPackagePath - - if err := context.AddDir(outputPath); err != nil { - glog.Fatalf("Failed to load output package: %v", err) - } - - // Compute the canonical output path to allow retrieval of the - // package for a vendored output path. - const vendorPath = "/vendor/" - canonicalOutputPath := outputPath - if strings.Contains(outputPath, vendorPath) { - canonicalOutputPath = outputPath[strings.Index(outputPath, vendorPath)+len(vendorPath):] - } - - // The package for outputPath is mapped to the canonical path - pkg := context.Universe[canonicalOutputPath] - if pkg == nil { - glog.Fatalf("Got nil output package: %v", err) - } return generator.Packages{ &generator.DefaultPackage{ - PackageName: strings.Split(filepath.Base(pkg.Path), ".")[0], - // Use the supplied output path rather than the canonical - // one to allow generation into the path of a - // vendored package. - PackagePath: outputPath, + PackageName: filepath.Base(arguments.OutputPackagePath), + PackagePath: arguments.OutputPackagePath, HeaderText: header, GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { - return []generator.Generator{NewOpenAPIGen(arguments.OutputFileBaseName, pkg, context)} + return []generator.Generator{NewOpenAPIGen(arguments.OutputFileBaseName, arguments.OutputPackagePath, context)} }, FilterFunc: func(c *generator.Context, t *types.Type) bool { // There is a conflict between this codegen and codecgen, we should avoid types generated for codecgen @@ -175,12 +153,12 @@ const ( type openAPIGen struct { generator.DefaultGen // TargetPackage is the package that will get GetOpenAPIDefinitions function returns all open API definitions. - targetPackage *types.Package + targetPackage string imports namer.ImportTracker context *generator.Context } -func NewOpenAPIGen(sanitizedName string, targetPackage *types.Package, context *generator.Context) generator.Generator { +func NewOpenAPIGen(sanitizedName string, targetPackage string, context *generator.Context) generator.Generator { return &openAPIGen{ DefaultGen: generator.DefaultGen{ OptionalName: sanitizedName, @@ -194,7 +172,7 @@ func NewOpenAPIGen(sanitizedName string, targetPackage *types.Package, context * func (g *openAPIGen) Namers(c *generator.Context) namer.NameSystems { // Have the raw namer for this file track what it imports. return namer.NameSystems{ - "raw": namer.NewRawNamer(g.targetPackage.Path, g.imports), + "raw": namer.NewRawNamer(g.targetPackage, g.imports), } } @@ -207,10 +185,10 @@ func (g *openAPIGen) Filter(c *generator.Context, t *types.Type) bool { } func (g *openAPIGen) isOtherPackage(pkg string) bool { - if pkg == g.targetPackage.Path { + if pkg == g.targetPackage { return false } - if strings.HasSuffix(pkg, "\""+g.targetPackage.Path+"\"") { + if strings.HasSuffix(pkg, "\""+g.targetPackage+"\"") { return false } return true @@ -300,23 +278,37 @@ func newOpenAPITypeWriter(sw *generator.SnippetWriter) openAPITypeWriter { } } +func methodReturnsValue(mt *types.Type, pkg, name string) bool { + if len(mt.Signature.Parameters) != 0 || len(mt.Signature.Results) != 1 { + return false + } + r := mt.Signature.Results[0] + return r.Name.Name == name && r.Name.Package == pkg +} + func hasOpenAPIDefinitionMethod(t *types.Type) bool { for mn, mt := range t.Methods { if mn != "OpenAPIDefinition" { continue } - if len(mt.Signature.Parameters) != 0 || len(mt.Signature.Results) != 1 { - return false - } - r := mt.Signature.Results[0] - if r.Name.Name != "OpenAPIDefinition" || r.Name.Package != openAPICommonPackagePath { - return false - } - return true + return methodReturnsValue(mt, openAPICommonPackagePath, "OpenAPIDefinition") } return false } +func hasOpenAPIDefinitionMethods(t *types.Type) bool { + var hasSchemaTypeMethod, hasOpenAPISchemaFormat bool + for mn, mt := range t.Methods { + switch mn { + case "OpenAPISchemaType": + hasSchemaTypeMethod = methodReturnsValue(mt, "", "[]string") + case "OpenAPISchemaFormat": + hasOpenAPISchemaFormat = methodReturnsValue(mt, "", "string") + } + } + return hasSchemaTypeMethod && hasOpenAPISchemaFormat +} + // typeShortName returns short package name (e.g. the name x appears in package x definition) dot type name. func typeShortName(t *types.Type) string { return filepath.Base(t.Name.Package) + "." + t.Name.Name @@ -360,6 +352,28 @@ func (g openAPITypeWriter) generate(t *types.Type) error { g.Do("$.type|raw${}.OpenAPIDefinition(),\n", args) return nil } + if hasOpenAPIDefinitionMethods(t) { + // Since this generated snippet is part of a map: + // + // map[string]common.OpenAPIDefinition: { + // "TYPE_NAME": { + // Schema: spec.Schema{ ... }, + // }, + // } + // + // For compliance with gofmt -s it's important we elide the + // struct type. The type is implied by the map and will be + // removed otherwise. + g.Do("{\n"+ + "Schema: spec.Schema{\n"+ + "SchemaProps: spec.SchemaProps{\n"+ + "Type:$.type|raw${}.OpenAPISchemaType(),\n"+ + "Format:$.type|raw${}.OpenAPISchemaFormat(),\n"+ + "},\n"+ + "},\n"+ + "},\n", args) + return nil + } g.Do("{\nSchema: spec.Schema{\nSchemaProps: spec.SchemaProps{\n", nil) g.generateDescription(t.CommentLines) g.Do("Properties: map[string]$.SpecSchemaType|raw${\n", args) diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go index 5f607c76701..61dbf4fc0e4 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -210,11 +210,18 @@ func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error }, nil } +func (d *Definitions) parseArbitrary(s *openapi_v2.Schema, path *Path) (Schema, error) { + return &Arbitrary{ + BaseSchema: d.parseBaseSchema(s, path), + }, nil +} + // ParseSchema creates a walkable Schema from an openapi schema. While // this function is public, it doesn't leak through the interface. func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, error) { - if len(s.GetType().GetValue()) == 1 { - t := s.GetType().GetValue()[0] + objectTypes := s.GetType().GetValue() + if len(objectTypes) == 1 { + t := objectTypes[0] switch t { case object: return d.parseMap(s, path) @@ -229,6 +236,9 @@ func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, err if s.GetProperties() != nil { return d.parseKind(s, path) } + if len(objectTypes) == 0 || (len(objectTypes) == 1 && objectTypes[0] == "") { + return d.parseArbitrary(s, path) + } return d.parsePrimitive(s, path) } diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go index 02ab06d6d53..b48e62c3bf9 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go @@ -58,6 +58,14 @@ type SchemaVisitor interface { VisitReference(Reference) } +// SchemaVisitorArbitrary is an additional visitor interface which handles +// arbitrary types. For backwards compatability, it's a separate interface +// which is checked for at runtime. +type SchemaVisitorArbitrary interface { + SchemaVisitor + VisitArbitrary(*Arbitrary) +} + // Schema is the base definition of an openapi type. type Schema interface { // Giving a visitor here will let you visit the actual type. @@ -242,6 +250,23 @@ func (p *Primitive) GetName() string { return fmt.Sprintf("%s (%s)", p.Type, p.Format) } +// Arbitrary is a value of any type (primitive, object or array) +type Arbitrary struct { + BaseSchema +} + +var _ Schema = &Arbitrary{} + +func (a *Arbitrary) Accept(v SchemaVisitor) { + if visitor, ok := v.(SchemaVisitorArbitrary); ok { + visitor.VisitArbitrary(a) + } +} + +func (a *Arbitrary) GetName() string { + return "Arbitrary value (primitive, object or array)" +} + // Reference implementation depends on the type of document. type Reference interface { Schema diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go index 0be7a5302f1..bbbdd4f61c9 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go @@ -127,6 +127,9 @@ func (item *mapItem) VisitKind(schema *proto.Kind) { } } +func (item *mapItem) VisitArbitrary(schema *proto.Arbitrary) { +} + func (item *mapItem) VisitReference(schema proto.Reference) { // passthrough schema.SubSchema().Accept(item) @@ -163,11 +166,14 @@ func (item *arrayItem) VisitArray(schema *proto.Array) { } func (item *arrayItem) VisitMap(schema *proto.Map) { - item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "array", Actual: "map"}) + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: "array"}) } func (item *arrayItem) VisitKind(schema *proto.Kind) { - item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "array", Actual: "map"}) + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: "array"}) +} + +func (item *arrayItem) VisitArbitrary(schema *proto.Arbitrary) { } func (item *arrayItem) VisitReference(schema proto.Reference) { @@ -226,6 +232,9 @@ func (item *primitiveItem) VisitKind(schema *proto.Kind) { item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: item.Kind}) } +func (item *primitiveItem) VisitArbitrary(schema *proto.Arbitrary) { +} + func (item *primitiveItem) VisitReference(schema proto.Reference) { // passthrough schema.SubSchema().Accept(item) From 3dd6e98ea011ff891a08668f7916f090f04e5652 Mon Sep 17 00:00:00 2001 From: abhi Date: Mon, 11 Dec 2017 13:20:23 -0800 Subject: [PATCH 124/264] Fixing logs for cri stats Signed-off-by: abhi --- pkg/kubelet/stats/cri_stats_provider.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/kubelet/stats/cri_stats_provider.go b/pkg/kubelet/stats/cri_stats_provider.go index 5934c2962bf..152344c3c31 100644 --- a/pkg/kubelet/stats/cri_stats_provider.go +++ b/pkg/kubelet/stats/cri_stats_provider.go @@ -123,14 +123,14 @@ func (p *criStatsProvider) ListPodStats() ([]statsapi.PodStats, error) { containerID := stats.Attributes.Id container, found := containerMap[containerID] if !found { - glog.Errorf("Unknown id %q in container map.", containerID) + glog.Errorf("Unable to find container id %q in container stats list", containerID) continue } podSandboxID := container.PodSandboxId podSandbox, found := podSandboxMap[podSandboxID] if !found { - glog.Errorf("Unknown id %q in pod sandbox map.", podSandboxID) + glog.Errorf("Unable to find pod sandbox id %q in pod stats list", podSandboxID) continue } @@ -142,7 +142,7 @@ func (p *criStatsProvider) ListPodStats() ([]statsapi.PodStats, error) { // Fill stats from cadvisor is available for full set of required pod stats caPodSandbox, found := caInfos[podSandboxID] if !found { - glog.V(4).Info("Unable to find cadvisor stats for sandbox %q", podSandboxID) + glog.V(4).Infof("Unable to find cadvisor stats for sandbox %q", podSandboxID) } else { p.addCadvisorPodStats(ps, &caPodSandbox) } @@ -153,7 +153,7 @@ func (p *criStatsProvider) ListPodStats() ([]statsapi.PodStats, error) { // container stats caStats, caFound := caInfos[containerID] if !caFound { - glog.V(4).Info("Unable to find cadvisor stats for %q", containerID) + glog.V(4).Infof("Unable to find cadvisor stats for %q", containerID) } else { p.addCadvisorContainerStats(cs, &caStats) } From 28465d8b39f55ae1fb926905e493caaa39d8e05a Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Thu, 11 Jan 2018 10:13:10 -0800 Subject: [PATCH 125/264] Fix golint errors on test/e2e/e2e.go When running golint on test/e2e/e2e.go, the following erros were faced: $ golint e2e.go e2e.go:329:2: var metricsJson should be metricsJSON e2e.go:342:1: comment on exported function RunE2ETests should be of the form "RunE2ETests ..." This PR fixes them. --- test/e2e/e2e.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 63211b6892e..bb57e7e6b19 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -288,20 +288,20 @@ func gatherTestSuiteMetrics() error { } metricsForE2E := (*framework.MetricsForE2E)(&received) - metricsJson := metricsForE2E.PrintJSON() + metricsJSON := metricsForE2E.PrintJSON() if framework.TestContext.ReportDir != "" { filePath := path.Join(framework.TestContext.ReportDir, "MetricsForE2ESuite_"+time.Now().Format(time.RFC3339)+".json") - if err := ioutil.WriteFile(filePath, []byte(metricsJson), 0644); err != nil { + if err := ioutil.WriteFile(filePath, []byte(metricsJSON), 0644); err != nil { return fmt.Errorf("error writing to %q: %v", filePath, err) } } else { - framework.Logf("\n\nTest Suite Metrics:\n%s\n\n", metricsJson) + framework.Logf("\n\nTest Suite Metrics:\n%s\n\n", metricsJSON) } return nil } -// TestE2E checks configuration parameters (specified through flags) and then runs +// RunE2ETests checks configuration parameters (specified through flags) and then runs // E2E tests using the Ginkgo runner. // If a "report directory" is specified, one or more JUnit test reports will be // generated in this directory, and cluster logs will also be saved. From fb56f679aa50c783bf21bb2d102bbeebe2e4dc3d Mon Sep 17 00:00:00 2001 From: mbohlool Date: Thu, 11 Jan 2018 13:40:26 -0800 Subject: [PATCH 126/264] Fix CHANGELOG urls for release 1.9.1 --- CHANGELOG-1.9.md | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/CHANGELOG-1.9.md b/CHANGELOG-1.9.md index 0094d62b279..ee713833ff5 100644 --- a/CHANGELOG-1.9.md +++ b/CHANGELOG-1.9.md @@ -149,44 +149,44 @@ filename | sha256 hash -------- | ----------- -[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes.tar.gz) | `0eece0e6c1f68535ea71b58b87e239019bb57fdd61118f3d7defa6bbf4fad5ee` -[kubernetes-src.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-src.tar.gz) | `625ebb79412bd12feccf12e8b6a15d9c71ea681b571f34deaa59fe6c9ba55935` +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes.tar.gz) | `0eece0e6c1f68535ea71b58b87e239019bb57fdd61118f3d7defa6bbf4fad5ee` +[kubernetes-src.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-src.tar.gz) | `625ebb79412bd12feccf12e8b6a15d9c71ea681b571f34deaa59fe6c9ba55935` ### Client Binaries filename | sha256 hash -------- | ----------- -[kubernetes-client-darwin-386.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-darwin-386.tar.gz) | `909556ed9b8445703d0124f2d8c1901b00afaba63a9123a4296be8663c3a2b2d` -[kubernetes-client-darwin-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-darwin-amd64.tar.gz) | `71e191d99d3ac1426e23e087b8d0875e793e5615d3aa7ac1e175b250f9707c48` -[kubernetes-client-linux-386.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-386.tar.gz) | `1c4e60c0c056a3300c7fcc9faccd1b1ea2b337e1360c20c5b1c25fdc47923cf0` -[kubernetes-client-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-amd64.tar.gz) | `fe8fe40148df404b33069931ea30937699758ed4611ef6baddb4c21b7b19db5e` -[kubernetes-client-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-arm64.tar.gz) | `921f5711b97f0b4de69784d9c79f95e80f75a550f28fc1f26597aa0ef6faa471` -[kubernetes-client-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-arm.tar.gz) | `77b010cadef98dc832a2f560afe15e57a675ed9fbc59ffad5e19878510997874` -[kubernetes-client-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-ppc64le.tar.gz) | `02aa71ddcbe8b711814af7287aac79de5d99c1c143c0d3af5e14b1ff195b8bdc` -[kubernetes-client-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-s390x.tar.gz) | `7e315024267306a620045d003785ecc8d7f2e763a6108ae806d5d384aa7552cc` -[kubernetes-client-windows-386.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-windows-386.tar.gz) | `99b2a81b7876498e119db4cb34c434b3790bc41cd882384037c1c1b18cba9f99` -[kubernetes-client-windows-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-windows-amd64.tar.gz) | `d89d303cbbf9e57e5a540277158e4d83ad18ca7402b5b54665f1378bb4528599` +[kubernetes-client-darwin-386.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-darwin-386.tar.gz) | `909556ed9b8445703d0124f2d8c1901b00afaba63a9123a4296be8663c3a2b2d` +[kubernetes-client-darwin-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-darwin-amd64.tar.gz) | `71e191d99d3ac1426e23e087b8d0875e793e5615d3aa7ac1e175b250f9707c48` +[kubernetes-client-linux-386.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-linux-386.tar.gz) | `1c4e60c0c056a3300c7fcc9faccd1b1ea2b337e1360c20c5b1c25fdc47923cf0` +[kubernetes-client-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-linux-amd64.tar.gz) | `fe8fe40148df404b33069931ea30937699758ed4611ef6baddb4c21b7b19db5e` +[kubernetes-client-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-linux-arm64.tar.gz) | `921f5711b97f0b4de69784d9c79f95e80f75a550f28fc1f26597aa0ef6faa471` +[kubernetes-client-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-linux-arm.tar.gz) | `77b010cadef98dc832a2f560afe15e57a675ed9fbc59ffad5e19878510997874` +[kubernetes-client-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-linux-ppc64le.tar.gz) | `02aa71ddcbe8b711814af7287aac79de5d99c1c143c0d3af5e14b1ff195b8bdc` +[kubernetes-client-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-linux-s390x.tar.gz) | `7e315024267306a620045d003785ecc8d7f2e763a6108ae806d5d384aa7552cc` +[kubernetes-client-windows-386.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-windows-386.tar.gz) | `99b2a81b7876498e119db4cb34c434b3790bc41cd882384037c1c1b18cba9f99` +[kubernetes-client-windows-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-windows-amd64.tar.gz) | `d89d303cbbf9e57e5a540277158e4d83ad18ca7402b5b54665f1378bb4528599` ### Server Binaries filename | sha256 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-server-linux-amd64.tar.gz) | `5acf2527461419ba883ac352f7c36c3fa0b86a618dbede187054ad90fa233b0e` -[kubernetes-server-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-server-linux-arm64.tar.gz) | `e1f61b4dc6e0c9986e95ec25f876f9a89966215ee8cc7f4a3539ec391b217587` -[kubernetes-server-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-server-linux-arm.tar.gz) | `441c45e16e63e9bdf99887a896a99b3a376af778cb778cc1d0e6afc505237200` -[kubernetes-server-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-server-linux-ppc64le.tar.gz) | `c0175f02180d9c88028ee5ad4e3ea04af8a6741a97f4900b02615f7f83c4d1c5` -[kubernetes-server-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-server-linux-s390x.tar.gz) | `2178150d31197ad7f59d44ffea37d682c2675b3a4ea2fc3fa1eaa0e768b993f7` +[kubernetes-server-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-server-linux-amd64.tar.gz) | `5acf2527461419ba883ac352f7c36c3fa0b86a618dbede187054ad90fa233b0e` +[kubernetes-server-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-server-linux-arm64.tar.gz) | `e1f61b4dc6e0c9986e95ec25f876f9a89966215ee8cc7f4a3539ec391b217587` +[kubernetes-server-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-server-linux-arm.tar.gz) | `441c45e16e63e9bdf99887a896a99b3a376af778cb778cc1d0e6afc505237200` +[kubernetes-server-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-server-linux-ppc64le.tar.gz) | `c0175f02180d9c88028ee5ad4e3ea04af8a6741a97f4900b02615f7f83c4d1c5` +[kubernetes-server-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-server-linux-s390x.tar.gz) | `2178150d31197ad7f59d44ffea37d682c2675b3a4ea2fc3fa1eaa0e768b993f7` ### Node Binaries filename | sha256 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-linux-amd64.tar.gz) | `b8ff0ae693ecca4d55669c66786d6c585f8c77b41a270d65f8175eba8729663a` -[kubernetes-node-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-linux-arm64.tar.gz) | `f0f63baaace463dc663c98cbc9a41e52233d1ef33410571ce3f3e78bd485787e` -[kubernetes-node-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-linux-arm.tar.gz) | `554bdd11deaf390de85830c7c888dfd4d75d9de8ac147799df12993f27bde905` -[kubernetes-node-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-linux-ppc64le.tar.gz) | `913af8ca8b258930e76fd3368acc83608e36e7e270638fa01a6e3be4f682d8bd` -[kubernetes-node-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-linux-s390x.tar.gz) | `8192c1c80563230d727fab71514105571afa52cde8520b3d90af58e6daf0e19c` -[kubernetes-node-windows-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-windows-amd64.tar.gz) | `4408e6d741c6008044584c0d7235e608c596e836d51346ee773589d9b4589fdc` +[kubernetes-node-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-node-linux-amd64.tar.gz) | `b8ff0ae693ecca4d55669c66786d6c585f8c77b41a270d65f8175eba8729663a` +[kubernetes-node-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-node-linux-arm64.tar.gz) | `f0f63baaace463dc663c98cbc9a41e52233d1ef33410571ce3f3e78bd485787e` +[kubernetes-node-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-node-linux-arm.tar.gz) | `554bdd11deaf390de85830c7c888dfd4d75d9de8ac147799df12993f27bde905` +[kubernetes-node-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-node-linux-ppc64le.tar.gz) | `913af8ca8b258930e76fd3368acc83608e36e7e270638fa01a6e3be4f682d8bd` +[kubernetes-node-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-node-linux-s390x.tar.gz) | `8192c1c80563230d727fab71514105571afa52cde8520b3d90af58e6daf0e19c` +[kubernetes-node-windows-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-node-windows-amd64.tar.gz) | `4408e6d741c6008044584c0d7235e608c596e836d51346ee773589d9b4589fdc` ## Changelog since v1.9.0 From c7988bae61f123c6d35ddaaf2dd54def42e02612 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Thu, 11 Jan 2018 17:50:07 -0800 Subject: [PATCH 127/264] Get the node before attempting to get its Alias IP ranges This allows us to fail fast if the node doesn't exist, and to record node status changes if we fail to 'allocate' a CIDR. --- .../nodeipam/ipam/cloud_cidr_allocator.go | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go b/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go index 8d6ef878dac..6f4e4cfcc7a 100644 --- a/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go +++ b/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go @@ -191,10 +191,14 @@ func (ca *cloudCIDRAllocator) AllocateOrOccupyCIDR(node *v1.Node) error { // updateCIDRAllocation assigns CIDR to Node and sends an update to the API server. func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error { - var err error - var node *v1.Node defer ca.removeNodeFromProcessing(nodeName) + node, err := ca.nodeLister.Get(nodeName) + if err != nil { + glog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", nodeName, err) + return err + } + cidrs, err := ca.cloud.AliasRanges(types.NodeName(nodeName)) if err != nil { nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable") @@ -210,12 +214,6 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error { } podCIDR := cidr.String() - node, err = ca.nodeLister.Get(nodeName) - if err != nil { - glog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", nodeName, err) - return err - } - if node.Spec.PodCIDR == podCIDR { glog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR) // We don't return here, in order to set the NetworkUnavailable condition later below. From 9d1b687914226514992d9f47c639847930d315b2 Mon Sep 17 00:00:00 2001 From: Chao Xu Date: Thu, 11 Jan 2018 18:24:24 -0800 Subject: [PATCH 128/264] fix a typo --- .../pkg/admission/plugin/webhook/mutating/admission.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go index ec0ae942b69..6d62a36f629 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go @@ -174,7 +174,7 @@ func (a *MutatingWebhook) ValidateInitialization() error { return fmt.Errorf("MutatingWebhook.convertor is not properly setup: %v", err) } if a.defaulter == nil { - return fmt.Errorf("MutatingWebhook.defaulter is not properly setup: %v") + return fmt.Errorf("MutatingWebhook.defaulter is not properly setup") } go a.hookSource.Run(wait.NeverStop) return nil From 0ae647bf333f5123bb470c3866084b699ecaafb0 Mon Sep 17 00:00:00 2001 From: wackxu Date: Tue, 14 Nov 2017 21:43:20 +0800 Subject: [PATCH 129/264] use shared informers for BootstrapSigner controller --- cmd/kube-controller-manager/app/bootstrap.go | 2 + pkg/controller/bootstrap/BUILD | 7 + pkg/controller/bootstrap/bootstrapsigner.go | 148 ++++++++++-------- .../bootstrap/bootstrapsigner_test.go | 38 +++-- 4 files changed, 114 insertions(+), 81 deletions(-) diff --git a/cmd/kube-controller-manager/app/bootstrap.go b/cmd/kube-controller-manager/app/bootstrap.go index 046070ecb27..38e066523fd 100644 --- a/cmd/kube-controller-manager/app/bootstrap.go +++ b/cmd/kube-controller-manager/app/bootstrap.go @@ -25,6 +25,8 @@ import ( func startBootstrapSignerController(ctx ControllerContext) (bool, error) { bsc, err := bootstrap.NewBootstrapSigner( ctx.ClientBuilder.ClientGoClientOrDie("bootstrap-signer"), + ctx.InformerFactory.Core().V1().Secrets(), + ctx.InformerFactory.Core().V1().ConfigMaps(), bootstrap.DefaultBootstrapSignerOptions(), ) if err != nil { diff --git a/pkg/controller/bootstrap/BUILD b/pkg/controller/bootstrap/BUILD index 6dfcd204c9d..dccc9637a58 100644 --- a/pkg/controller/bootstrap/BUILD +++ b/pkg/controller/bootstrap/BUILD @@ -21,11 +21,14 @@ go_test( "//pkg/apis/core:go_default_library", "//pkg/apis/core/helper:go_default_library", "//pkg/bootstrap/api:go_default_library", + "//pkg/controller:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/client-go/informers:go_default_library", + "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/k8s.io/client-go/testing:go_default_library", ], @@ -44,6 +47,7 @@ go_library( deps = [ "//pkg/apis/core:go_default_library", "//pkg/bootstrap/api:go_default_library", + "//pkg/controller:go_default_library", "//pkg/util/metrics:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/square/go-jose:go_default_library", @@ -51,11 +55,14 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/util/workqueue:go_default_library", ], diff --git a/pkg/controller/bootstrap/bootstrapsigner.go b/pkg/controller/bootstrap/bootstrapsigner.go index 5bb53a44837..5db870aa49d 100644 --- a/pkg/controller/bootstrap/bootstrapsigner.go +++ b/pkg/controller/bootstrap/bootstrapsigner.go @@ -22,25 +22,25 @@ import ( "github.com/golang/glog" + "fmt" "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/watch" + informers "k8s.io/client-go/informers/core/v1" clientset "k8s.io/client-go/kubernetes" + corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" api "k8s.io/kubernetes/pkg/apis/core" bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api" + "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/util/metrics" ) // BootstrapSignerOptions contains options for the BootstrapSigner type BootstrapSignerOptions struct { - // ConfigMapNamespace is the namespace of the ConfigMap ConfigMapNamespace string @@ -71,88 +71,101 @@ func DefaultBootstrapSignerOptions() BootstrapSignerOptions { // BootstrapSigner is a controller that signs a ConfigMap with a set of tokens. type BootstrapSigner struct { - client clientset.Interface - configMapKey string - secretNamespace string - - configMaps cache.Store - secrets cache.Store + client clientset.Interface + configMapKey string + configMapName string + configMapNamespace string + secretNamespace string // syncQueue handles synchronizing updates to the ConfigMap. We'll only ever // have one item (Named ) in this queue. We are using it // serializes and collapses updates as they can come from both the ConfigMap // and Secrets controllers. - syncQueue workqueue.Interface + syncQueue workqueue.RateLimitingInterface - // Since we join two objects, we'll watch both of them with controllers. - configMapsController cache.Controller - secretsController cache.Controller + secretLister corelisters.SecretLister + secretSynced cache.InformerSynced + + configMapLister corelisters.ConfigMapLister + configMapSynced cache.InformerSynced } // NewBootstrapSigner returns a new *BootstrapSigner. -// -// TODO: Switch to shared informers -func NewBootstrapSigner(cl clientset.Interface, options BootstrapSignerOptions) (*BootstrapSigner, error) { +func NewBootstrapSigner(cl clientset.Interface, secrets informers.SecretInformer, configMaps informers.ConfigMapInformer, options BootstrapSignerOptions) (*BootstrapSigner, error) { e := &BootstrapSigner{ - client: cl, - configMapKey: options.ConfigMapNamespace + "/" + options.ConfigMapName, - secretNamespace: options.TokenSecretNamespace, - syncQueue: workqueue.NewNamed("bootstrap_signer_queue"), + client: cl, + configMapKey: options.ConfigMapNamespace + "/" + options.ConfigMapName, + configMapName: options.ConfigMapName, + configMapNamespace: options.ConfigMapNamespace, + secretNamespace: options.TokenSecretNamespace, + secretLister: secrets.Lister(), + secretSynced: secrets.Informer().HasSynced, + configMapLister: configMaps.Lister(), + configMapSynced: configMaps.Informer().HasSynced, + syncQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "bootstrap_signer_queue"), } if cl.CoreV1().RESTClient().GetRateLimiter() != nil { if err := metrics.RegisterMetricAndTrackRateLimiterUsage("bootstrap_signer", cl.CoreV1().RESTClient().GetRateLimiter()); err != nil { return nil, err } } - configMapSelector := fields.SelectorFromSet(map[string]string{api.ObjectNameField: options.ConfigMapName}) - e.configMaps, e.configMapsController = cache.NewInformer( - &cache.ListWatch{ - ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) { - lo.FieldSelector = configMapSelector.String() - return e.client.CoreV1().ConfigMaps(options.ConfigMapNamespace).List(lo) + + configMaps.Informer().AddEventHandlerWithResyncPeriod( + cache.FilteringResourceEventHandler{ + FilterFunc: func(obj interface{}) bool { + switch t := obj.(type) { + case *v1.ConfigMap: + return t.Name == options.ConfigMapName && t.Namespace == options.ConfigMapNamespace + default: + utilruntime.HandleError(fmt.Errorf("object passed to %T that is not expected: %T", e, obj)) + return false + } }, - WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) { - lo.FieldSelector = configMapSelector.String() - return e.client.CoreV1().ConfigMaps(options.ConfigMapNamespace).Watch(lo) + Handler: cache.ResourceEventHandlerFuncs{ + AddFunc: func(_ interface{}) { e.pokeConfigMapSync() }, + UpdateFunc: func(_, _ interface{}) { e.pokeConfigMapSync() }, }, }, - &v1.ConfigMap{}, options.ConfigMapResync, - cache.ResourceEventHandlerFuncs{ - AddFunc: func(_ interface{}) { e.pokeConfigMapSync() }, - UpdateFunc: func(_, _ interface{}) { e.pokeConfigMapSync() }, - }, ) - secretSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(bootstrapapi.SecretTypeBootstrapToken)}) - e.secrets, e.secretsController = cache.NewInformer( - &cache.ListWatch{ - ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) { - lo.FieldSelector = secretSelector.String() - return e.client.CoreV1().Secrets(e.secretNamespace).List(lo) + secrets.Informer().AddEventHandlerWithResyncPeriod( + cache.FilteringResourceEventHandler{ + FilterFunc: func(obj interface{}) bool { + switch t := obj.(type) { + case *v1.Secret: + return t.Type == bootstrapapi.SecretTypeBootstrapToken && t.Namespace == e.secretNamespace + default: + utilruntime.HandleError(fmt.Errorf("object passed to %T that is not expected: %T", e, obj)) + return false + } }, - WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) { - lo.FieldSelector = secretSelector.String() - return e.client.CoreV1().Secrets(e.secretNamespace).Watch(lo) + Handler: cache.ResourceEventHandlerFuncs{ + AddFunc: func(_ interface{}) { e.pokeConfigMapSync() }, + UpdateFunc: func(_, _ interface{}) { e.pokeConfigMapSync() }, + DeleteFunc: func(_ interface{}) { e.pokeConfigMapSync() }, }, }, - &v1.Secret{}, options.SecretResync, - cache.ResourceEventHandlerFuncs{ - AddFunc: func(_ interface{}) { e.pokeConfigMapSync() }, - UpdateFunc: func(_, _ interface{}) { e.pokeConfigMapSync() }, - DeleteFunc: func(_ interface{}) { e.pokeConfigMapSync() }, - }, ) + return e, nil } // Run runs controller loops and returns when they are done func (e *BootstrapSigner) Run(stopCh <-chan struct{}) { - go e.configMapsController.Run(stopCh) - go e.secretsController.Run(stopCh) + // Shut down queues + defer utilruntime.HandleCrash() + defer e.syncQueue.ShutDown() + + if !controller.WaitForCacheSync("bootstrap_signer", stopCh, e.configMapSynced, e.secretSynced) { + return + } + + glog.V(5).Infof("Starting workers") go wait.Until(e.serviceConfigMapQueue, 0, stopCh) <-stopCh + glog.V(1).Infof("Shutting down") } func (e *BootstrapSigner) pokeConfigMapSync() { @@ -237,27 +250,32 @@ func (e *BootstrapSigner) updateConfigMap(cm *v1.ConfigMap) { // getConfigMap gets the ConfigMap we are interested in func (e *BootstrapSigner) getConfigMap() *v1.ConfigMap { - configMap, exists, err := e.configMaps.GetByKey(e.configMapKey) + configMap, err := e.configMapLister.ConfigMaps(e.configMapNamespace).Get(e.configMapName) // If we can't get the configmap just return nil. The resync will eventually // sync things up. + if err != nil { + if !apierrors.IsNotFound(err) { + utilruntime.HandleError(err) + } + return nil + } + + return configMap +} + +func (e *BootstrapSigner) listSecrets() []*v1.Secret { + secrets, err := e.secretLister.Secrets(e.secretNamespace).List(labels.Everything()) if err != nil { utilruntime.HandleError(err) return nil } - if exists { - return configMap.(*v1.ConfigMap) - } - return nil -} - -func (e *BootstrapSigner) listSecrets() []*v1.Secret { - secrets := e.secrets.List() - items := []*v1.Secret{} - for _, obj := range secrets { - items = append(items, obj.(*v1.Secret)) + for _, secret := range secrets { + if secret.Type == bootstrapapi.SecretTypeBootstrapToken { + items = append(items, secret) + } } return items } diff --git a/pkg/controller/bootstrap/bootstrapsigner_test.go b/pkg/controller/bootstrap/bootstrapsigner_test.go index ba92382300d..15fab2e26b0 100644 --- a/pkg/controller/bootstrap/bootstrapsigner_test.go +++ b/pkg/controller/bootstrap/bootstrapsigner_test.go @@ -24,10 +24,13 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/informers" + coreinformers "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" api "k8s.io/kubernetes/pkg/apis/core" bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api" + "k8s.io/kubernetes/pkg/controller" ) func init() { @@ -36,14 +39,17 @@ func init() { const testTokenID = "abc123" -func newBootstrapSigner() (*BootstrapSigner, *fake.Clientset, error) { +func newBootstrapSigner() (*BootstrapSigner, *fake.Clientset, coreinformers.SecretInformer, coreinformers.ConfigMapInformer, error) { options := DefaultBootstrapSignerOptions() cl := fake.NewSimpleClientset() - bsc, err := NewBootstrapSigner(cl, options) + informers := informers.NewSharedInformerFactory(fake.NewSimpleClientset(), controller.NoResyncPeriodFunc()) + secrets := informers.Core().V1().Secrets() + configMaps := informers.Core().V1().ConfigMaps() + bsc, err := NewBootstrapSigner(cl, secrets, configMaps, options) if err != nil { - return nil, nil, err + return nil, nil, nil, nil, err } - return bsc, cl, nil + return bsc, cl, secrets, configMaps, nil } func newConfigMap(tokenID, signature string) *v1.ConfigMap { @@ -64,7 +70,7 @@ func newConfigMap(tokenID, signature string) *v1.ConfigMap { } func TestNoConfigMap(t *testing.T) { - signer, cl, err := newBootstrapSigner() + signer, cl, _, _, err := newBootstrapSigner() if err != nil { t.Fatalf("error creating BootstrapSigner: %v", err) } @@ -73,17 +79,17 @@ func TestNoConfigMap(t *testing.T) { } func TestSimpleSign(t *testing.T) { - signer, cl, err := newBootstrapSigner() + signer, cl, secrets, configMaps, err := newBootstrapSigner() if err != nil { t.Fatalf("error creating BootstrapSigner: %v", err) } cm := newConfigMap("", "") - signer.configMaps.Add(cm) + configMaps.Informer().GetIndexer().Add(cm) secret := newTokenSecret(testTokenID, "tokenSecret") addSecretSigningUsage(secret, "true") - signer.secrets.Add(secret) + secrets.Informer().GetIndexer().Add(secret) signer.signConfigMap() @@ -97,17 +103,17 @@ func TestSimpleSign(t *testing.T) { } func TestNoSignNeeded(t *testing.T) { - signer, cl, err := newBootstrapSigner() + signer, cl, secrets, configMaps, err := newBootstrapSigner() if err != nil { t.Fatalf("error creating BootstrapSigner: %v", err) } cm := newConfigMap(testTokenID, "eyJhbGciOiJIUzI1NiIsImtpZCI6ImFiYzEyMyJ9..QSxpUG7Q542CirTI2ECPSZjvBOJURUW5a7XqFpNI958") - signer.configMaps.Add(cm) + configMaps.Informer().GetIndexer().Add(cm) secret := newTokenSecret(testTokenID, "tokenSecret") addSecretSigningUsage(secret, "true") - signer.secrets.Add(secret) + secrets.Informer().GetIndexer().Add(secret) signer.signConfigMap() @@ -115,17 +121,17 @@ func TestNoSignNeeded(t *testing.T) { } func TestUpdateSignature(t *testing.T) { - signer, cl, err := newBootstrapSigner() + signer, cl, secrets, configMaps, err := newBootstrapSigner() if err != nil { t.Fatalf("error creating BootstrapSigner: %v", err) } cm := newConfigMap(testTokenID, "old signature") - signer.configMaps.Add(cm) + configMaps.Informer().GetIndexer().Add(cm) secret := newTokenSecret(testTokenID, "tokenSecret") addSecretSigningUsage(secret, "true") - signer.secrets.Add(secret) + secrets.Informer().GetIndexer().Add(secret) signer.signConfigMap() @@ -139,13 +145,13 @@ func TestUpdateSignature(t *testing.T) { } func TestRemoveSignature(t *testing.T) { - signer, cl, err := newBootstrapSigner() + signer, cl, _, configMaps, err := newBootstrapSigner() if err != nil { t.Fatalf("error creating BootstrapSigner: %v", err) } cm := newConfigMap(testTokenID, "old signature") - signer.configMaps.Add(cm) + configMaps.Informer().GetIndexer().Add(cm) signer.signConfigMap() From 252ff1e5a606a3517d6d1e2743a2c023cd40267a Mon Sep 17 00:00:00 2001 From: ravisantoshgudimetla Date: Tue, 9 Jan 2018 10:42:07 +0530 Subject: [PATCH 130/264] Metrics for predicate and priority evaluation --- pkg/scheduler/core/generic_scheduler.go | 7 ++++++- pkg/scheduler/metrics/metrics.go | 21 +++++++++++++++++++-- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/pkg/scheduler/core/generic_scheduler.go b/pkg/scheduler/core/generic_scheduler.go index e1128c01cb8..f147d534958 100644 --- a/pkg/scheduler/core/generic_scheduler.go +++ b/pkg/scheduler/core/generic_scheduler.go @@ -36,6 +36,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/schedulercache" "k8s.io/kubernetes/pkg/scheduler/util" @@ -131,6 +132,7 @@ func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister } trace.Step("Computing predicates") + startPredicateEvalTime := time.Now() filteredNodes, failedPredicateMap, err := findNodesThatFit(pod, g.cachedNodeInfoMap, nodes, g.predicates, g.extenders, g.predicateMetaProducer, g.equivalenceCache, g.schedulingQueue) if err != nil { return "", err @@ -143,11 +145,13 @@ func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister FailedPredicates: failedPredicateMap, } } + metrics.SchedulingAlgorithmPredicateEvaluationDuration.Observe(metrics.SinceInMicroseconds(startPredicateEvalTime)) trace.Step("Prioritizing") - + startPriorityEvalTime := time.Now() // When only one node after predicate, just use it. if len(filteredNodes) == 1 { + metrics.SchedulingAlgorithmPriorityEvaluationDuration.Observe(metrics.SinceInMicroseconds(startPriorityEvalTime)) return filteredNodes[0].Name, nil } @@ -156,6 +160,7 @@ func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister if err != nil { return "", err } + metrics.SchedulingAlgorithmPriorityEvaluationDuration.Observe(metrics.SinceInMicroseconds(startPriorityEvalTime)) trace.Step("Selecting host") return g.selectHost(priorityList) diff --git a/pkg/scheduler/metrics/metrics.go b/pkg/scheduler/metrics/metrics.go index cd50ceddc9a..c0a87f319ae 100644 --- a/pkg/scheduler/metrics/metrics.go +++ b/pkg/scheduler/metrics/metrics.go @@ -25,8 +25,7 @@ import ( const schedulerSubsystem = "scheduler" -var BindingSaturationReportInterval = 1 * time.Second - +// All the histogram based metrics have 1ms as size for the smallest bucket. var ( E2eSchedulingLatency = prometheus.NewHistogram( prometheus.HistogramOpts{ @@ -44,6 +43,22 @@ var ( Buckets: prometheus.ExponentialBuckets(1000, 2, 15), }, ) + SchedulingAlgorithmPredicateEvaluationDuration = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Subsystem: schedulerSubsystem, + Name: "scheduling_algorithm_predicate_evaluation", + Help: "Scheduling algorithm predicate evaluation duration", + Buckets: prometheus.ExponentialBuckets(1000, 2, 15), + }, + ) + SchedulingAlgorithmPriorityEvaluationDuration = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Subsystem: schedulerSubsystem, + Name: "scheduling_algorithm_priority_evaluation", + Help: "Scheduling algorithm priority evaluation duration", + Buckets: prometheus.ExponentialBuckets(1000, 2, 15), + }, + ) BindingLatency = prometheus.NewHistogram( prometheus.HistogramOpts{ Subsystem: schedulerSubsystem, @@ -63,6 +78,8 @@ func Register() { prometheus.MustRegister(E2eSchedulingLatency) prometheus.MustRegister(SchedulingAlgorithmLatency) prometheus.MustRegister(BindingLatency) + prometheus.MustRegister(SchedulingAlgorithmPredicateEvaluationDuration) + prometheus.MustRegister(SchedulingAlgorithmPriorityEvaluationDuration) }) } From b3c57a880ce42222ede01878df2cb595f0aad571 Mon Sep 17 00:00:00 2001 From: ravisantoshgudimetla Date: Tue, 9 Jan 2018 10:50:50 +0530 Subject: [PATCH 131/264] Build files generated --- pkg/scheduler/core/BUILD | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/scheduler/core/BUILD b/pkg/scheduler/core/BUILD index 6b652164e8c..c04be5f4821 100644 --- a/pkg/scheduler/core/BUILD +++ b/pkg/scheduler/core/BUILD @@ -50,6 +50,7 @@ go_library( "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/scheduler/api:go_default_library", + "//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/schedulercache:go_default_library", "//pkg/scheduler/util:go_default_library", "//pkg/scheduler/volumebinder:go_default_library", From 41c75969748162118708c56ff56627f1980efe09 Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Thu, 11 Jan 2018 01:56:16 +0000 Subject: [PATCH 132/264] Use linux commands instead of docker commands. --- test/e2e/framework/util.go | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 637a9eace0b..6f311e5c73e 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -3900,9 +3900,7 @@ func sshRestartMaster() error { } var command string if ProviderIs("gce") { - // `kube-apiserver_kube-apiserver` matches the name of the apiserver - // container. - command = "sudo docker ps | grep kube-apiserver_kube-apiserver | cut -d ' ' -f 1 | xargs sudo docker kill" + command = "pidof kube-apiserver | xargs sudo kill" } else { command = "sudo /etc/init.d/kube-apiserver restart" } @@ -3933,9 +3931,9 @@ func RestartControllerManager() error { if ProviderIs("gce") && !MasterOSDistroIs("gci") { return fmt.Errorf("unsupported master OS distro: %s", TestContext.MasterOSDistro) } - cmd := "sudo docker ps | grep k8s_kube-controller-manager | cut -d ' ' -f 1 | xargs sudo docker kill" + cmd := "pidof kube-controller-manager | xargs sudo kill" Logf("Restarting controller-manager via ssh, running: %v", cmd) - result, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider) + result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider) if err != nil || result.Code != 0 { LogSSHResult(result) return fmt.Errorf("couldn't restart controller-manager: %v", err) @@ -3946,7 +3944,7 @@ func RestartControllerManager() error { func WaitForControllerManagerUp() error { cmd := "curl http://localhost:" + strconv.Itoa(ports.ControllerManagerPort) + "/healthz" for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { - result, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider) + result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider) if err != nil || result.Code != 0 { LogSSHResult(result) } @@ -3960,9 +3958,9 @@ func WaitForControllerManagerUp() error { // CheckForControllerManagerHealthy checks that the controller manager does not crash within "duration" func CheckForControllerManagerHealthy(duration time.Duration) error { var PID string - cmd := "sudo docker ps | grep k8s_kube-controller-manager | cut -d ' ' -f 1" + cmd := "pidof kube-controller-manager" for start := time.Now(); time.Since(start) < duration; time.Sleep(5 * time.Second) { - result, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider) + result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider) if err != nil { // We don't necessarily know that it crashed, pipe could just be broken LogSSHResult(result) From 4d6817dd71fb6b7703aaa0b5e74bcdfdaa44b1a6 Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Fri, 12 Jan 2018 06:33:18 +0000 Subject: [PATCH 133/264] Use GinkgoRecover to avoid panic. --- test/e2e_node/resource_collector.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/e2e_node/resource_collector.go b/test/e2e_node/resource_collector.go index 53028aebfad..ec256e32f1f 100644 --- a/test/e2e_node/resource_collector.go +++ b/test/e2e_node/resource_collector.go @@ -45,6 +45,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e_node/perftype" + . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) @@ -373,6 +374,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) { for _, pod := range pods { wg.Add(1) go func(pod *v1.Pod) { + defer GinkgoRecover() defer wg.Done() err := f.PodClient().Delete(pod.ObjectMeta.Name, metav1.NewDeleteOptions(30)) From dd9de90b0ad1aa78c2a8dd7d5238d8f769ffe771 Mon Sep 17 00:00:00 2001 From: p0lyn0mial Date: Thu, 4 Jan 2018 14:52:25 +0100 Subject: [PATCH 134/264] the changes introduced in this commit plumbs in the generic scaler into kubectl. note that we don't change the behaviour of kubectl. For example it won't scale new resources. That's the end goal. The first step is to retrofit existing code to use the generic scaler. --- pkg/kubectl/cmd/util/BUILD | 1 + .../cmd/util/factory_object_mapping.go | 19 ++++++- pkg/kubectl/scale.go | 12 ++-- staging/src/k8s.io/client-go/scale/client.go | 1 - test/e2e/apps/daemon_restart.go | 6 +- test/e2e/examples.go | 4 +- test/e2e/framework/BUILD | 3 + test/e2e/framework/deployment_util.go | 7 ++- test/e2e/framework/framework.go | 25 +++++++++ test/e2e/framework/rc_util.go | 13 +++-- test/e2e/framework/util.go | 12 +++- test/e2e/network/service.go | 2 +- test/e2e/scalability/BUILD | 15 +++-- test/e2e/scalability/density.go | 5 +- test/e2e/scalability/load.go | 55 ++++++++++++++++--- .../equivalence_cache_predicates.go | 2 +- test/e2e/scheduling/priorities.go | 2 +- test/e2e/scheduling/rescheduler.go | 6 +- test/integration/framework/BUILD | 3 - test/integration/framework/util.go | 54 ------------------ test/utils/BUILD | 1 + test/utils/runners.go | 29 ++++++++++ 22 files changed, 177 insertions(+), 100 deletions(-) diff --git a/pkg/kubectl/cmd/util/BUILD b/pkg/kubectl/cmd/util/BUILD index 6541d3953e9..ed3d59475c3 100644 --- a/pkg/kubectl/cmd/util/BUILD +++ b/pkg/kubectl/cmd/util/BUILD @@ -78,6 +78,7 @@ go_library( "//vendor/k8s.io/client-go/dynamic:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/scale:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", "//vendor/k8s.io/client-go/util/homedir:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", diff --git a/pkg/kubectl/cmd/util/factory_object_mapping.go b/pkg/kubectl/cmd/util/factory_object_mapping.go index dfd82d406c5..5c9f01f7d50 100644 --- a/pkg/kubectl/cmd/util/factory_object_mapping.go +++ b/pkg/kubectl/cmd/util/factory_object_mapping.go @@ -37,6 +37,7 @@ import ( "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" restclient "k8s.io/client-go/rest" + scaleclient "k8s.io/client-go/scale" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/batch" @@ -286,7 +287,23 @@ func (f *ring1Factory) Scaler(mapping *meta.RESTMapping) (kubectl.Scaler, error) if err != nil { return nil, err } - return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), clientset) + + // create scales getter + // TODO(p0lyn0mial): put scalesGetter to a factory + discoClient, err := f.clientAccessFactory.DiscoveryClient() + if err != nil { + return nil, err + } + restClient, err := f.clientAccessFactory.RESTClient() + if err != nil { + return nil, err + } + mapper, _ := f.Object() + resolver := scaleclient.NewDiscoveryScaleKindResolver(discoClient) + scalesGetter := scaleclient.New(restClient, mapper, dynamic.LegacyAPIPathResolverFunc, resolver) + gvk := mapping.GroupVersionKind.GroupVersion().WithResource(mapping.Resource) + + return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), clientset, scalesGetter, gvk.GroupResource()) } func (f *ring1Factory) Reaper(mapping *meta.RESTMapping) (kubectl.Reaper, error) { diff --git a/pkg/kubectl/scale.go b/pkg/kubectl/scale.go index 1d4165f9626..511514df6b8 100644 --- a/pkg/kubectl/scale.go +++ b/pkg/kubectl/scale.go @@ -53,7 +53,10 @@ type Scaler interface { ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (updatedResourceVersion string, err error) } -func ScalerFor(kind schema.GroupKind, c internalclientset.Interface) (Scaler, error) { +// ScalerFor gets a scaler for a given resource +// TODO(p0lyn0mial): remove kind and internalclientset +// TODO(p0lyn0mial): once we have only one scaler, there is no need to return an error anymore. +func ScalerFor(kind schema.GroupKind, c internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, gr schema.GroupResource) (Scaler, error) { switch kind { case api.Kind("ReplicationController"): return &ReplicationControllerScaler{c.Core()}, nil @@ -63,10 +66,9 @@ func ScalerFor(kind schema.GroupKind, c internalclientset.Interface) (Scaler, er return &JobScaler{c.Batch()}, nil // Either kind of job can be scaled with Batch interface. case apps.Kind("StatefulSet"): return &StatefulSetScaler{c.Apps()}, nil - case extensions.Kind("Deployment"), apps.Kind("Deployment"): - return &DeploymentScaler{c.Extensions()}, nil + default: + return &GenericScaler{scalesGetter, gr}, nil } - return nil, fmt.Errorf("no scaler has been implemented for %q", kind) } // ScalePrecondition describes a condition that must be true for the scale to take place @@ -533,7 +535,7 @@ func (precondition *ScalePrecondition) validateGeneric(scale *autoscalingapi.Sca } // GenericScaler can update scales for resources in a particular namespace -// TODO(o0lyn0mial): when the work on GenericScaler is done, don't +// TODO(po0lyn0mial): when the work on GenericScaler is done, don't // export the GenericScaler. Instead use ScalerFor method for getting the Scaler // also update the UTs type GenericScaler struct { diff --git a/staging/src/k8s.io/client-go/scale/client.go b/staging/src/k8s.io/client-go/scale/client.go index 07c6098620b..a8c903d9eab 100644 --- a/staging/src/k8s.io/client-go/scale/client.go +++ b/staging/src/k8s.io/client-go/scale/client.go @@ -196,7 +196,6 @@ func (c *namespacedScaleClient) Update(resource schema.GroupResource, scale *aut Body(scaleUpdateBytes). Do() if err := result.Error(); err != nil { - panic(err) return nil, fmt.Errorf("could not update the scale for %s %s: %v", resource.String(), scale.Name, err) } diff --git a/test/e2e/apps/daemon_restart.go b/test/e2e/apps/daemon_restart.go index 85266680ee7..2319dfe5731 100644 --- a/test/e2e/apps/daemon_restart.go +++ b/test/e2e/apps/daemon_restart.go @@ -257,7 +257,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { // that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC // to the same size achieves this, because the scale operation advances the RC's sequence number // and awaits it to be observed and reported back in the RC's status. - framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods, true) + framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods, true) // Only check the keys, the pods can be different if the kubelet updated it. // TODO: Can it really? @@ -288,9 +288,9 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { restarter.kill() // This is best effort to try and create pods while the scheduler is down, // since we don't know exactly when it is restarted after the kill signal. - framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods+5, false)) + framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods+5, false)) restarter.waitUp() - framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods+5, true)) + framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods+5, true)) }) It("Kubelet should not restart containers across restart", func() { diff --git a/test/e2e/examples.go b/test/e2e/examples.go index 7e377e203e4..6fa937c89f6 100644 --- a/test/e2e/examples.go +++ b/test/e2e/examples.go @@ -521,7 +521,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { Expect(err).NotTo(HaveOccurred()) By("scaling rethinkdb") - framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, "rethinkdb-rc", 2, true) + framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, "rethinkdb-rc", 2, true) checkDbInstances() By("starting admin") @@ -564,7 +564,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { Expect(err).NotTo(HaveOccurred()) By("scaling hazelcast") - framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, "hazelcast", 2, true) + framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, "hazelcast", 2, true) forEachPod("name", "hazelcast", func(pod v1.Pod) { _, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [2]", serverStartTimeout) Expect(err).NotTo(HaveOccurred()) diff --git a/test/e2e/framework/BUILD b/test/e2e/framework/BUILD index 0ecad57686d..6a773a3957b 100644 --- a/test/e2e/framework/BUILD +++ b/test/e2e/framework/BUILD @@ -110,6 +110,7 @@ go_library( "//vendor/k8s.io/api/policy/v1beta1:go_default_library", "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", @@ -132,6 +133,7 @@ go_library( "//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/discovery:go_default_library", + "//vendor/k8s.io/client-go/discovery/cached:go_default_library", "//vendor/k8s.io/client-go/dynamic:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", @@ -139,6 +141,7 @@ go_library( "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/scale:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", diff --git a/test/e2e/framework/deployment_util.go b/test/e2e/framework/deployment_util.go index 23feda770d4..d5544e1998e 100644 --- a/test/e2e/framework/deployment_util.go +++ b/test/e2e/framework/deployment_util.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" clientset "k8s.io/client-go/kubernetes" + scaleclient "k8s.io/client-go/scale" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" @@ -178,8 +179,10 @@ func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) er return err } -func ScaleDeployment(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error { - return ScaleResource(clientset, internalClientset, ns, name, size, wait, extensionsinternal.Kind("Deployment")) +//TODO(p0lyn0mial): remove internalClientset and kind. +//TODO(p0lyn0mial): update the callers. +func ScaleDeployment(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error { + return ScaleResource(clientset, internalClientset, scalesGetter, ns, name, size, wait, extensionsinternal.Kind("Deployment"), extensionsinternal.Resource("deployments")) } func RunDeployment(config testutils.DeploymentConfig) error { diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index e628accaa28..f5341d79c00 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -28,14 +28,19 @@ import ( "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/discovery" + cacheddiscovery "k8s.io/client-go/discovery/cached" "k8s.io/client-go/dynamic" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + scaleclient "k8s.io/client-go/scale" "k8s.io/client-go/tools/clientcmd" aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/api/legacyscheme" @@ -67,6 +72,8 @@ type Framework struct { AggregatorClient *aggregatorclient.Clientset ClientPool dynamic.ClientPool + ScalesGetter scaleclient.ScalesGetter + SkipNamespaceCreation bool // Whether to skip creating a namespace Namespace *v1.Namespace // Every test has at least one namespace unless creation is skipped namespacesToDelete []*v1.Namespace // Some tests have more than one. @@ -161,6 +168,24 @@ func (f *Framework) BeforeEach() { f.AggregatorClient, err = aggregatorclient.NewForConfig(config) Expect(err).NotTo(HaveOccurred()) f.ClientPool = dynamic.NewClientPool(config, legacyscheme.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc) + + // create scales getter, set GroupVersion and NegotiatedSerializer to default values + // as they are required when creating a REST client. + if config.GroupVersion == nil { + config.GroupVersion = &schema.GroupVersion{} + } + if config.NegotiatedSerializer == nil { + config.NegotiatedSerializer = legacyscheme.Codecs + } + restClient, err := rest.RESTClientFor(config) + Expect(err).NotTo(HaveOccurred()) + discoClient, err := discovery.NewDiscoveryClientForConfig(config) + Expect(err).NotTo(HaveOccurred()) + cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoClient) + restMapper := discovery.NewDeferredDiscoveryRESTMapper(cachedDiscoClient, meta.InterfacesForUnstructured) + resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient) + f.ScalesGetter = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver) + if ProviderIs("kubemark") && TestContext.KubemarkExternalKubeConfig != "" && TestContext.CloudConfig.KubemarkController == nil { externalConfig, err := clientcmd.BuildConfigFromFlags("", TestContext.KubemarkExternalKubeConfig) externalConfig.QPS = f.Options.ClientQPS diff --git a/test/e2e/framework/rc_util.go b/test/e2e/framework/rc_util.go index d0d1982b535..8bbdb6f4a9b 100644 --- a/test/e2e/framework/rc_util.go +++ b/test/e2e/framework/rc_util.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + scaleclient "k8s.io/client-go/scale" "k8s.io/kubernetes/pkg/api/testapi" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" @@ -84,7 +85,9 @@ func RcByNameContainer(name string, replicas int32, image string, labels map[str // ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till // none are running, otherwise it does what a synchronous scale operation would do. -func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalclientset.Interface, ns string, l map[string]string, replicas uint) error { +//TODO(p0lyn0mial): remove internalClientset. +//TODO(p0lyn0mial): update the callers. +func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns string, l map[string]string, replicas uint) error { listOpts := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l)).String()} rcs, err := clientset.CoreV1().ReplicationControllers(ns).List(listOpts) if err != nil { @@ -96,7 +99,7 @@ func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalcl Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas) for _, labelRC := range rcs.Items { name := labelRC.Name - if err := ScaleRC(clientset, internalClientset, ns, name, replicas, false); err != nil { + if err := ScaleRC(clientset, internalClientset, scalesGetter, ns, name, replicas, false); err != nil { return err } rc, err := clientset.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{}) @@ -156,8 +159,10 @@ func DeleteRCAndPods(clientset clientset.Interface, internalClientset internalcl return DeleteResourceAndPods(clientset, internalClientset, api.Kind("ReplicationController"), ns, name) } -func ScaleRC(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error { - return ScaleResource(clientset, internalClientset, ns, name, size, wait, api.Kind("ReplicationController")) +//TODO(p0lyn0mial): remove internalClientset. +//TODO(p0lyn0mial): update the callers. +func ScaleRC(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error { + return ScaleResource(clientset, internalClientset, scalesGetter, ns, name, size, wait, api.Kind("ReplicationController"), api.Resource("replicationcontrollers")) } func RunRC(config testutils.RCConfig) error { diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 637a9eace0b..8384c774af0 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -74,6 +74,7 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" + scaleclient "k8s.io/client-go/scale" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/testapi" podutil "k8s.io/kubernetes/pkg/api/v1/pod" @@ -2682,20 +2683,25 @@ func RemoveAvoidPodsOffNode(c clientset.Interface, nodeName string) { ExpectNoError(err) } -func getScalerForKind(internalClientset internalclientset.Interface, kind schema.GroupKind) (kubectl.Scaler, error) { - return kubectl.ScalerFor(kind, internalClientset) +//TODO(p0lyn0mial): remove internalClientset and kind +func getScalerForKind(internalClientset internalclientset.Interface, kind schema.GroupKind, scalesGetter scaleclient.ScalesGetter, gr schema.GroupResource) (kubectl.Scaler, error) { + return kubectl.ScalerFor(kind, internalClientset, scalesGetter, gr) } +//TODO(p0lyn0mial): remove internalClientset and kind. +//TODO(p0lyn0mial): update the callers. func ScaleResource( clientset clientset.Interface, internalClientset internalclientset.Interface, + scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool, kind schema.GroupKind, + gr schema.GroupResource, ) error { By(fmt.Sprintf("Scaling %v %s in namespace %s to %d", kind, name, ns, size)) - scaler, err := getScalerForKind(internalClientset, kind) + scaler, err := getScalerForKind(internalClientset, kind, scalesGetter, gr) if err != nil { return err } diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 759599a9937..44712d0d4cf 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -1265,7 +1265,7 @@ var _ = SIGDescribe("Services", func() { } By("Scaling down replication controller to zero") - framework.ScaleRC(f.ClientSet, f.InternalClientset, t.Namespace, rcSpec.Name, 0, false) + framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, t.Namespace, rcSpec.Name, 0, false) By("Update service to not tolerate unready services") _, err = framework.UpdateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) { diff --git a/test/e2e/scalability/BUILD b/test/e2e/scalability/BUILD index 34dff1f866a..fc6e3cee361 100644 --- a/test/e2e/scalability/BUILD +++ b/test/e2e/scalability/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", @@ -14,7 +9,9 @@ go_library( "load.go", ], importpath = "k8s.io/kubernetes/test/e2e/scalability", + visibility = ["//visibility:public"], deps = [ + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/batch:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", @@ -26,6 +23,7 @@ go_library( "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", @@ -38,8 +36,12 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/discovery:go_default_library", + "//vendor/k8s.io/client-go/discovery/cached:go_default_library", + "//vendor/k8s.io/client-go/dynamic:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/scale:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/transport:go_default_library", "//vendor/k8s.io/client-go/util/workqueue:go_default_library", @@ -57,4 +59,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/test/e2e/scalability/density.go b/test/e2e/scalability/density.go index f10671e10b6..6e49d068599 100644 --- a/test/e2e/scalability/density.go +++ b/test/e2e/scalability/density.go @@ -528,7 +528,7 @@ var _ = SIGDescribe("Density", func() { podThroughput := 20 timeout := time.Duration(totalPods/podThroughput)*time.Second + 3*time.Minute // createClients is defined in load.go - clients, internalClients, err := createClients(numberOfCollections) + clients, internalClients, scalesClients, err := createClients(numberOfCollections) for i := 0; i < numberOfCollections; i++ { nsName := namespaces[i].Name secretNames := []string{} @@ -559,6 +559,7 @@ var _ = SIGDescribe("Density", func() { baseConfig := &testutils.RCConfig{ Client: clients[i], InternalClient: internalClients[i], + ScalesGetter: scalesClients[i], Image: framework.GetPauseImageName(f.ClientSet), Name: name, Namespace: nsName, @@ -590,7 +591,7 @@ var _ = SIGDescribe("Density", func() { } // Single client is running out of http2 connections in delete phase, hence we need more. - clients, internalClients, err = createClients(2) + clients, internalClients, _, err = createClients(2) dConfig := DensityTestConfig{ ClientSets: clients, diff --git a/test/e2e/scalability/load.go b/test/e2e/scalability/load.go index c696de42724..6e15dbc8803 100644 --- a/test/e2e/scalability/load.go +++ b/test/e2e/scalability/load.go @@ -28,14 +28,18 @@ import ( "time" "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/intstr" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/discovery" + cacheddiscovery "k8s.io/client-go/discovery/cached" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" + scaleclient "k8s.io/client-go/scale" "k8s.io/client-go/transport" "k8s.io/client-go/util/workqueue" "k8s.io/kubernetes/pkg/apis/batch" @@ -48,6 +52,8 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "k8s.io/client-go/dynamic" + "k8s.io/kubernetes/pkg/api/legacyscheme" ) const ( @@ -309,9 +315,11 @@ var _ = SIGDescribe("Load capacity", func() { } }) -func createClients(numberOfClients int) ([]clientset.Interface, []internalclientset.Interface, error) { +func createClients(numberOfClients int) ([]clientset.Interface, []internalclientset.Interface, []scaleclient.ScalesGetter, error) { clients := make([]clientset.Interface, numberOfClients) internalClients := make([]internalclientset.Interface, numberOfClients) + scalesClients := make([]scaleclient.ScalesGetter, numberOfClients) + for i := 0; i < numberOfClients; i++ { config, err := framework.LoadConfig() Expect(err).NotTo(HaveOccurred()) @@ -327,11 +335,11 @@ func createClients(numberOfClients int) ([]clientset.Interface, []internalclient // each client here. transportConfig, err := config.TransportConfig() if err != nil { - return nil, nil, err + return nil, nil, nil, err } tlsConfig, err := transport.TLSConfigFor(transportConfig) if err != nil { - return nil, nil, err + return nil, nil, nil, err } config.Transport = utilnet.SetTransportDefaults(&http.Transport{ Proxy: http.ProxyFromEnvironment, @@ -349,16 +357,37 @@ func createClients(numberOfClients int) ([]clientset.Interface, []internalclient c, err := clientset.NewForConfig(config) if err != nil { - return nil, nil, err + return nil, nil, nil, err } clients[i] = c internalClient, err := internalclientset.NewForConfig(config) if err != nil { - return nil, nil, err + return nil, nil, nil, err } internalClients[i] = internalClient + + // create scale client, if GroupVersion or NegotiatedSerializer are not set + // assign default values - these fields are mandatory (required by RESTClientFor). + if config.GroupVersion == nil { + config.GroupVersion = &schema.GroupVersion{} + } + if config.NegotiatedSerializer == nil { + config.NegotiatedSerializer = legacyscheme.Codecs + } + restClient, err := restclient.RESTClientFor(config) + if err != nil { + return nil, nil, nil, err + } + discoClient, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return nil, nil, nil, err + } + cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoClient) + restMapper := discovery.NewDeferredDiscoveryRESTMapper(cachedDiscoClient, meta.InterfacesForUnstructured) + resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient) + scalesClients[i] = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver) } - return clients, internalClients, nil + return clients, internalClients, scalesClients, nil } func computePodCounts(total int) (int, int, int) { @@ -405,12 +434,13 @@ func generateConfigs( // Create a number of clients to better simulate real usecase // where not everyone is using exactly the same client. rcsPerClient := 20 - clients, internalClients, err := createClients((len(configs) + rcsPerClient - 1) / rcsPerClient) + clients, internalClients, scalesClients, err := createClients((len(configs) + rcsPerClient - 1) / rcsPerClient) framework.ExpectNoError(err) for i := 0; i < len(configs); i++ { configs[i].SetClient(clients[i%len(clients)]) configs[i].SetInternalClient(internalClients[i%len(internalClients)]) + configs[i].SetScalesClient(scalesClients[i%len(clients)]) } for i := 0; i < len(secretConfigs); i++ { secretConfigs[i].Client = clients[i%len(clients)] @@ -590,7 +620,16 @@ func scaleResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, scaling sleepUpTo(scalingTime) newSize := uint(rand.Intn(config.GetReplicas()) + config.GetReplicas()/2) framework.ExpectNoError(framework.ScaleResource( - config.GetClient(), config.GetInternalClient(), config.GetNamespace(), config.GetName(), newSize, true, config.GetKind()), + config.GetClient(), + config.GetInternalClient(), + config.GetScalesGetter(), + config.GetNamespace(), + config.GetName(), + newSize, + true, + config.GetKind(), + config.GetGroupResource(), + ), fmt.Sprintf("scaling %v %v", config.GetKind(), config.GetName())) selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.GetName()})) diff --git a/test/e2e/scheduling/equivalence_cache_predicates.go b/test/e2e/scheduling/equivalence_cache_predicates.go index 79aaf5d9d8b..3d551476399 100644 --- a/test/e2e/scheduling/equivalence_cache_predicates.go +++ b/test/e2e/scheduling/equivalence_cache_predicates.go @@ -155,7 +155,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { By("Trying to schedule another equivalent Pod should fail due to node label has been removed.") // use scale to create another equivalent pod and wait for failure event WaitForSchedulerAfterAction(f, func() error { - err := framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, affinityRCName, uint(replica+1), false) + err := framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, affinityRCName, uint(replica+1), false) return err }, affinityRCName, false) // and this new pod should be rejected since node label has been updated diff --git a/test/e2e/scheduling/priorities.go b/test/e2e/scheduling/priorities.go index 1c34ea8998c..f3643b6cb13 100644 --- a/test/e2e/scheduling/priorities.go +++ b/test/e2e/scheduling/priorities.go @@ -196,7 +196,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { By(fmt.Sprintf("Scale the RC: %s to len(nodeList.Item)-1 : %v.", rc.Name, len(nodeList.Items)-1)) - framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rc.Name, uint(len(nodeList.Items)-1), true) + framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rc.Name, uint(len(nodeList.Items)-1), true) testPods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{ LabelSelector: "name=scheduler-priority-avoid-pod", }) diff --git a/test/e2e/scheduling/rescheduler.go b/test/e2e/scheduling/rescheduler.go index 512e8b3c6f0..0d1107ccbe1 100644 --- a/test/e2e/scheduling/rescheduler.go +++ b/test/e2e/scheduling/rescheduler.go @@ -68,8 +68,8 @@ var _ = SIGDescribe("Rescheduler [Serial]", func() { deployment := deployments.Items[0] replicas := uint(*(deployment.Spec.Replicas)) - err = framework.ScaleDeployment(f.ClientSet, f.InternalClientset, metav1.NamespaceSystem, deployment.Name, replicas+1, true) - defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, f.InternalClientset, metav1.NamespaceSystem, deployment.Name, replicas, true)) + err = framework.ScaleDeployment(f.ClientSet, f.InternalClientset, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas+1, true) + defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, f.InternalClientset, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas, true)) framework.ExpectNoError(err) }) @@ -80,7 +80,7 @@ func reserveAllCpu(f *framework.Framework, id string, millicores int) error { replicas := millicores / 100 reserveCpu(f, id, 1, 100) - framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.Namespace.Name, id, uint(replicas), false)) + framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, id, uint(replicas), false)) for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) { pods, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, framework.ImagePullerLabels) diff --git a/test/integration/framework/BUILD b/test/integration/framework/BUILD index 07da3bc1141..27a3fb8e412 100644 --- a/test/integration/framework/BUILD +++ b/test/integration/framework/BUILD @@ -22,13 +22,10 @@ go_library( "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/apis/batch:go_default_library", - "//pkg/apis/core:go_default_library", "//pkg/apis/policy/v1beta1:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/replication:go_default_library", "//pkg/generated/openapi:go_default_library", - "//pkg/kubectl:go_default_library", "//pkg/kubelet/client:go_default_library", "//pkg/master:go_default_library", "//pkg/util/env:go_default_library", diff --git a/test/integration/framework/util.go b/test/integration/framework/util.go index afb1d68961e..c9d42a99c48 100644 --- a/test/integration/framework/util.go +++ b/test/integration/framework/util.go @@ -19,22 +19,13 @@ limitations under the License. package framework import ( - "io/ioutil" "net/http/httptest" "strings" "testing" - "time" - - "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/api/testapi" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/kubectl" ) const ( @@ -80,48 +71,3 @@ func CreateTestingNamespace(baseName string, apiserver *httptest.Server, t *test func DeleteTestingNamespace(ns *v1.Namespace, apiserver *httptest.Server, t *testing.T) { // TODO: Remove all resources from a given namespace once we implement CreateTestingNamespace. } - -// RCFromManifest reads a .json file and returns the rc in it. -func RCFromManifest(fileName string) *v1.ReplicationController { - data, err := ioutil.ReadFile(fileName) - if err != nil { - glog.Fatalf("Unexpected error reading rc manifest %v", err) - } - var controller v1.ReplicationController - if err := runtime.DecodeInto(testapi.Default.Codec(), data, &controller); err != nil { - glog.Fatalf("Unexpected error reading rc manifest %v", err) - } - return &controller -} - -// StopRC stops the rc via kubectl's stop library -func StopRC(rc *v1.ReplicationController, clientset internalclientset.Interface) error { - reaper, err := kubectl.ReaperFor(api.Kind("ReplicationController"), clientset) - if err != nil || reaper == nil { - return err - } - err = reaper.Stop(rc.Namespace, rc.Name, 0, nil) - if err != nil { - return err - } - return nil -} - -// ScaleRC scales the given rc to the given replicas. -func ScaleRC(name, ns string, replicas int32, clientset internalclientset.Interface) (*api.ReplicationController, error) { - scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientset) - if err != nil { - return nil, err - } - retry := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout} - waitForReplicas := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout} - err = scaler.Scale(ns, name, uint(replicas), nil, retry, waitForReplicas) - if err != nil { - return nil, err - } - scaled, err := clientset.Core().ReplicationControllers(ns).Get(name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - return scaled, nil -} diff --git a/test/utils/BUILD b/test/utils/BUILD index a2e6045933f..da7eeadab95 100644 --- a/test/utils/BUILD +++ b/test/utils/BUILD @@ -44,6 +44,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/scale:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/util/workqueue:go_default_library", ], diff --git a/test/utils/runners.go b/test/utils/runners.go index 2eaf28e48e2..1d71a3eeb62 100644 --- a/test/utils/runners.go +++ b/test/utils/runners.go @@ -38,6 +38,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + scaleclient "k8s.io/client-go/scale" "k8s.io/client-go/util/workqueue" batchinternal "k8s.io/kubernetes/pkg/apis/batch" api "k8s.io/kubernetes/pkg/apis/core" @@ -105,16 +106,20 @@ type RunObjectConfig interface { GetKind() schema.GroupKind GetClient() clientset.Interface GetInternalClient() internalclientset.Interface + GetScalesGetter() scaleclient.ScalesGetter SetClient(clientset.Interface) SetInternalClient(internalclientset.Interface) + SetScalesClient(scaleclient.ScalesGetter) GetReplicas() int GetLabelValue(string) (string, bool) + GetGroupResource() schema.GroupResource } type RCConfig struct { Affinity *v1.Affinity Client clientset.Interface InternalClient internalclientset.Interface + ScalesGetter scaleclient.ScalesGetter Image string Command []string Name string @@ -277,6 +282,10 @@ func (config *DeploymentConfig) GetKind() schema.GroupKind { return extensionsinternal.Kind("Deployment") } +func (config *DeploymentConfig) GetGroupResource() schema.GroupResource { + return extensionsinternal.Resource("deployments") +} + func (config *DeploymentConfig) create() error { deployment := &extensions.Deployment{ ObjectMeta: metav1.ObjectMeta{ @@ -344,6 +353,10 @@ func (config *ReplicaSetConfig) GetKind() schema.GroupKind { return extensionsinternal.Kind("ReplicaSet") } +func (config *ReplicaSetConfig) GetGroupResource() schema.GroupResource { + return extensionsinternal.Resource("replicasets") +} + func (config *ReplicaSetConfig) create() error { rs := &extensions.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ @@ -411,6 +424,10 @@ func (config *JobConfig) GetKind() schema.GroupKind { return batchinternal.Kind("Job") } +func (config *JobConfig) GetGroupResource() schema.GroupResource { + return batchinternal.Resource("jobs") +} + func (config *JobConfig) create() error { job := &batch.Job{ ObjectMeta: metav1.ObjectMeta{ @@ -482,6 +499,10 @@ func (config *RCConfig) GetKind() schema.GroupKind { return api.Kind("ReplicationController") } +func (config *RCConfig) GetGroupResource() schema.GroupResource { + return api.Resource("replicationcontrollers") +} + func (config *RCConfig) GetClient() clientset.Interface { return config.Client } @@ -490,6 +511,10 @@ func (config *RCConfig) GetInternalClient() internalclientset.Interface { return config.InternalClient } +func (config *RCConfig) GetScalesGetter() scaleclient.ScalesGetter { + return config.ScalesGetter +} + func (config *RCConfig) SetClient(c clientset.Interface) { config.Client = c } @@ -498,6 +523,10 @@ func (config *RCConfig) SetInternalClient(c internalclientset.Interface) { config.InternalClient = c } +func (config *RCConfig) SetScalesClient(getter scaleclient.ScalesGetter) { + config.ScalesGetter = getter +} + func (config *RCConfig) GetReplicas() int { return config.Replicas } From 50444800b14c9841997619a54dc8eeae3a3ebef9 Mon Sep 17 00:00:00 2001 From: Cosmin Cojocar Date: Fri, 12 Jan 2018 11:10:40 +0100 Subject: [PATCH 135/264] Instrument the Azure API calls for Prometheus monitoring --- pkg/cloudprovider/providers/azure/BUILD | 3 + .../providers/azure/azure_client.go | 259 +++++++++++++++--- .../providers/azure/azure_metrics.go | 82 ++++++ .../providers/azure/azure_metrics_test.go | 39 +++ 4 files changed, 342 insertions(+), 41 deletions(-) create mode 100644 pkg/cloudprovider/providers/azure/azure_metrics.go create mode 100644 pkg/cloudprovider/providers/azure/azure_metrics_test.go diff --git a/pkg/cloudprovider/providers/azure/BUILD b/pkg/cloudprovider/providers/azure/BUILD index d796860b662..8272b20219d 100644 --- a/pkg/cloudprovider/providers/azure/BUILD +++ b/pkg/cloudprovider/providers/azure/BUILD @@ -19,6 +19,7 @@ go_library( "azure_instances.go", "azure_loadbalancer.go", "azure_managedDiskController.go", + "azure_metrics.go", "azure_routes.go", "azure_storage.go", "azure_storageaccount.go", @@ -48,6 +49,7 @@ go_library( "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/github.com/rubiojr/go-vhd/vhd:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", @@ -63,6 +65,7 @@ go_test( name = "go_default_test", srcs = [ "azure_loadbalancer_test.go", + "azure_metrics_test.go", "azure_test.go", "azure_util_cache_test.go", "azure_util_test.go", diff --git a/pkg/cloudprovider/providers/azure/azure_client.go b/pkg/cloudprovider/providers/azure/azure_client.go index 7cf65fe13e9..e0e2697aef8 100644 --- a/pkg/cloudprovider/providers/azure/azure_client.go +++ b/pkg/cloudprovider/providers/azure/azure_client.go @@ -158,7 +158,13 @@ func (az *azVirtualMachinesClient) CreateOrUpdate(resourceGroupName string, VMNa glog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): end", resourceGroupName, VMName) }() - return az.client.CreateOrUpdate(resourceGroupName, VMName, parameters, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("vm", "create_or_update", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.CreateOrUpdate(resourceGroupName, VMName, parameters, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azVirtualMachinesClient) Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) { @@ -168,7 +174,10 @@ func (az *azVirtualMachinesClient) Get(resourceGroupName string, VMName string, glog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): end", resourceGroupName, VMName) }() - return az.client.Get(resourceGroupName, VMName, expand) + mc := newMetricContext("vm", "get", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.Get(resourceGroupName, VMName, expand) + mc.Observe(err) + return } func (az *azVirtualMachinesClient) List(resourceGroupName string) (result compute.VirtualMachineListResult, err error) { @@ -178,7 +187,10 @@ func (az *azVirtualMachinesClient) List(resourceGroupName string) (result comput glog.V(10).Infof("azVirtualMachinesClient.List(%q): end", resourceGroupName) }() - return az.client.List(resourceGroupName) + mc := newMetricContext("vm", "list", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.List(resourceGroupName) + mc.Observe(err) + return } func (az *azVirtualMachinesClient) ListNextResults(lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) { @@ -217,7 +229,13 @@ func (az *azInterfacesClient) CreateOrUpdate(resourceGroupName string, networkIn glog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkInterfaceName) }() - return az.client.CreateOrUpdate(resourceGroupName, networkInterfaceName, parameters, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("interfaces", "create_or_update", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.CreateOrUpdate(resourceGroupName, networkInterfaceName, parameters, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azInterfacesClient) Get(resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) { @@ -227,7 +245,10 @@ func (az *azInterfacesClient) Get(resourceGroupName string, networkInterfaceName glog.V(10).Infof("azInterfacesClient.Get(%q,%q): end", resourceGroupName, networkInterfaceName) }() - return az.client.Get(resourceGroupName, networkInterfaceName, expand) + mc := newMetricContext("interfaces", "get", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.Get(resourceGroupName, networkInterfaceName, expand) + mc.Observe(err) + return } func (az *azInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) { @@ -237,7 +258,10 @@ func (az *azInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resource glog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName) }() - return az.client.GetVirtualMachineScaleSetNetworkInterface(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, expand) + mc := newMetricContext("interfaces", "get_vmss_ni", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.GetVirtualMachineScaleSetNetworkInterface(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, expand) + mc.Observe(err) + return } // azLoadBalancersClient implements LoadBalancersClient. @@ -266,7 +290,13 @@ func (az *azLoadBalancersClient) CreateOrUpdate(resourceGroupName string, loadBa glog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): end", resourceGroupName, loadBalancerName) }() - return az.client.CreateOrUpdate(resourceGroupName, loadBalancerName, parameters, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("load_balancers", "create_or_update", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.CreateOrUpdate(resourceGroupName, loadBalancerName, parameters, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azLoadBalancersClient) Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { @@ -276,7 +306,13 @@ func (az *azLoadBalancersClient) Delete(resourceGroupName string, loadBalancerNa glog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): end", resourceGroupName, loadBalancerName) }() - return az.client.Delete(resourceGroupName, loadBalancerName, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("load_balancers", "delete", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.Delete(resourceGroupName, loadBalancerName, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azLoadBalancersClient) Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) { @@ -286,7 +322,10 @@ func (az *azLoadBalancersClient) Get(resourceGroupName string, loadBalancerName glog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): end", resourceGroupName, loadBalancerName) }() - return az.client.Get(resourceGroupName, loadBalancerName, expand) + mc := newMetricContext("load_balancers", "get", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.Get(resourceGroupName, loadBalancerName, expand) + mc.Observe(err) + return } func (az *azLoadBalancersClient) List(resourceGroupName string) (result network.LoadBalancerListResult, err error) { @@ -296,7 +335,10 @@ func (az *azLoadBalancersClient) List(resourceGroupName string) (result network. glog.V(10).Infof("azLoadBalancersClient.List(%q): end", resourceGroupName) }() - return az.client.List(resourceGroupName) + mc := newMetricContext("load_balancers", "list", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.List(resourceGroupName) + mc.Observe(err) + return } func (az *azLoadBalancersClient) ListNextResults(lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) { @@ -335,7 +377,13 @@ func (az *azPublicIPAddressesClient) CreateOrUpdate(resourceGroupName string, pu glog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, publicIPAddressName) }() - return az.client.CreateOrUpdate(resourceGroupName, publicIPAddressName, parameters, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("public_ip_addresses", "create_or_update", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.CreateOrUpdate(resourceGroupName, publicIPAddressName, parameters, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azPublicIPAddressesClient) Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { @@ -345,7 +393,13 @@ func (az *azPublicIPAddressesClient) Delete(resourceGroupName string, publicIPAd glog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): end", resourceGroupName, publicIPAddressName) }() - return az.client.Delete(resourceGroupName, publicIPAddressName, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("public_ip_addresses", "delete", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.Delete(resourceGroupName, publicIPAddressName, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azPublicIPAddressesClient) Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) { @@ -355,7 +409,10 @@ func (az *azPublicIPAddressesClient) Get(resourceGroupName string, publicIPAddre glog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): end", resourceGroupName, publicIPAddressName) }() - return az.client.Get(resourceGroupName, publicIPAddressName, expand) + mc := newMetricContext("public_ip_addresses", "get", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.Get(resourceGroupName, publicIPAddressName, expand) + mc.Observe(err) + return } func (az *azPublicIPAddressesClient) List(resourceGroupName string) (result network.PublicIPAddressListResult, err error) { @@ -365,7 +422,10 @@ func (az *azPublicIPAddressesClient) List(resourceGroupName string) (result netw glog.V(10).Infof("azPublicIPAddressesClient.List(%q): end", resourceGroupName) }() - return az.client.List(resourceGroupName) + mc := newMetricContext("public_ip_addresses", "list", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.List(resourceGroupName) + mc.Observe(err) + return } func (az *azPublicIPAddressesClient) ListNextResults(lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) { @@ -404,7 +464,13 @@ func (az *azSubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetwo glog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) }() - return az.client.CreateOrUpdate(resourceGroupName, virtualNetworkName, subnetName, subnetParameters, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("subnets", "create_or_update", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.CreateOrUpdate(resourceGroupName, virtualNetworkName, subnetName, subnetParameters, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azSubnetsClient) Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { @@ -414,7 +480,13 @@ func (az *azSubnetsClient) Delete(resourceGroupName string, virtualNetworkName s glog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) }() - return az.client.Delete(resourceGroupName, virtualNetworkName, subnetName, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("subnets", "delete", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.Delete(resourceGroupName, virtualNetworkName, subnetName, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azSubnetsClient) Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) { @@ -424,7 +496,10 @@ func (az *azSubnetsClient) Get(resourceGroupName string, virtualNetworkName stri glog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) }() - return az.client.Get(resourceGroupName, virtualNetworkName, subnetName, expand) + mc := newMetricContext("subnets", "get", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.Get(resourceGroupName, virtualNetworkName, subnetName, expand) + mc.Observe(err) + return } func (az *azSubnetsClient) List(resourceGroupName string, virtualNetworkName string) (result network.SubnetListResult, err error) { @@ -434,7 +509,10 @@ func (az *azSubnetsClient) List(resourceGroupName string, virtualNetworkName str glog.V(10).Infof("azSubnetsClient.List(%q,%q): end", resourceGroupName, virtualNetworkName) }() - return az.client.List(resourceGroupName, virtualNetworkName) + mc := newMetricContext("subnets", "list", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.List(resourceGroupName, virtualNetworkName) + mc.Observe(err) + return } // azSecurityGroupsClient implements SecurityGroupsClient. @@ -463,7 +541,13 @@ func (az *azSecurityGroupsClient) CreateOrUpdate(resourceGroupName string, netwo glog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkSecurityGroupName) }() - return az.client.CreateOrUpdate(resourceGroupName, networkSecurityGroupName, parameters, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("security_groups", "create_or_update", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.CreateOrUpdate(resourceGroupName, networkSecurityGroupName, parameters, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azSecurityGroupsClient) Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { @@ -473,7 +557,13 @@ func (az *azSecurityGroupsClient) Delete(resourceGroupName string, networkSecuri glog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): end", resourceGroupName, networkSecurityGroupName) }() - return az.client.Delete(resourceGroupName, networkSecurityGroupName, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("security_groups", "delete", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.Delete(resourceGroupName, networkSecurityGroupName, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azSecurityGroupsClient) Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) { @@ -483,7 +573,10 @@ func (az *azSecurityGroupsClient) Get(resourceGroupName string, networkSecurityG glog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): end", resourceGroupName, networkSecurityGroupName) }() - return az.client.Get(resourceGroupName, networkSecurityGroupName, expand) + mc := newMetricContext("security_groups", "get", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.Get(resourceGroupName, networkSecurityGroupName, expand) + mc.Observe(err) + return } func (az *azSecurityGroupsClient) List(resourceGroupName string) (result network.SecurityGroupListResult, err error) { @@ -493,7 +586,10 @@ func (az *azSecurityGroupsClient) List(resourceGroupName string) (result network glog.V(10).Infof("azSecurityGroupsClient.List(%q): end", resourceGroupName) }() - return az.client.List(resourceGroupName) + mc := newMetricContext("security_groups", "list", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.List(resourceGroupName) + mc.Observe(err) + return } // azVirtualMachineScaleSetsClient implements VirtualMachineScaleSetsClient. @@ -522,7 +618,13 @@ func (az *azVirtualMachineScaleSetsClient) CreateOrUpdate(resourceGroupName stri glog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, VMScaleSetName) }() - return az.client.CreateOrUpdate(resourceGroupName, VMScaleSetName, parameters, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("vmss", "create_or_update", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.CreateOrUpdate(resourceGroupName, VMScaleSetName, parameters, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azVirtualMachineScaleSetsClient) Get(resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) { @@ -532,7 +634,10 @@ func (az *azVirtualMachineScaleSetsClient) Get(resourceGroupName string, VMScale glog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): end", resourceGroupName, VMScaleSetName) }() - return az.client.Get(resourceGroupName, VMScaleSetName) + mc := newMetricContext("vmss", "get", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.Get(resourceGroupName, VMScaleSetName) + mc.Observe(err) + return } func (az *azVirtualMachineScaleSetsClient) List(resourceGroupName string) (result compute.VirtualMachineScaleSetListResult, err error) { @@ -542,7 +647,10 @@ func (az *azVirtualMachineScaleSetsClient) List(resourceGroupName string) (resul glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q,%q): end", resourceGroupName) }() - return az.client.List(resourceGroupName) + mc := newMetricContext("vmss", "list", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.List(resourceGroupName) + mc.Observe(err) + return } func (az *azVirtualMachineScaleSetsClient) ListNextResults(lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) { @@ -562,7 +670,13 @@ func (az *azVirtualMachineScaleSetsClient) UpdateInstances(resourceGroupName str glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%q): end", resourceGroupName, VMScaleSetName, VMInstanceIDs) }() - return az.client.UpdateInstances(resourceGroupName, VMScaleSetName, VMInstanceIDs, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("vmss", "update_instances", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.UpdateInstances(resourceGroupName, VMScaleSetName, VMInstanceIDs, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } // azVirtualMachineScaleSetVMsClient implements VirtualMachineScaleSetVMsClient. @@ -591,7 +705,10 @@ func (az *azVirtualMachineScaleSetVMsClient) Get(resourceGroupName string, VMSca glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) }() - return az.client.Get(resourceGroupName, VMScaleSetName, instanceID) + mc := newMetricContext("vmssvm", "get", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.Get(resourceGroupName, VMScaleSetName, instanceID) + mc.Observe(err) + return } func (az *azVirtualMachineScaleSetVMsClient) GetInstanceView(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) { @@ -601,7 +718,10 @@ func (az *azVirtualMachineScaleSetVMsClient) GetInstanceView(resourceGroupName s glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) }() - return az.client.GetInstanceView(resourceGroupName, VMScaleSetName, instanceID) + mc := newMetricContext("vmssvm", "get_instance_view", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.GetInstanceView(resourceGroupName, VMScaleSetName, instanceID) + mc.Observe(err) + return } func (az *azVirtualMachineScaleSetVMsClient) List(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result compute.VirtualMachineScaleSetVMListResult, err error) { @@ -611,7 +731,10 @@ func (az *azVirtualMachineScaleSetVMsClient) List(resourceGroupName string, virt glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, filter) }() - return az.client.List(resourceGroupName, virtualMachineScaleSetName, filter, selectParameter, expand) + mc := newMetricContext("vmssvm", "list", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.List(resourceGroupName, virtualMachineScaleSetName, filter, selectParameter, expand) + mc.Observe(err) + return } func (az *azVirtualMachineScaleSetVMsClient) ListNextResults(lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) { @@ -650,7 +773,13 @@ func (az *azRoutesClient) CreateOrUpdate(resourceGroupName string, routeTableNam glog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, routeTableName, routeName) }() - return az.client.CreateOrUpdate(resourceGroupName, routeTableName, routeName, routeParameters, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("routes", "create_or_update", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.CreateOrUpdate(resourceGroupName, routeTableName, routeName, routeParameters, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azRoutesClient) Delete(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { @@ -660,7 +789,13 @@ func (az *azRoutesClient) Delete(resourceGroupName string, routeTableName string glog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): end", resourceGroupName, routeTableName, routeName) }() - return az.client.Delete(resourceGroupName, routeTableName, routeName, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("routes", "delete", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.Delete(resourceGroupName, routeTableName, routeName, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } // azRouteTablesClient implements RouteTablesClient. @@ -689,7 +824,13 @@ func (az *azRouteTablesClient) CreateOrUpdate(resourceGroupName string, routeTab glog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, routeTableName) }() - return az.client.CreateOrUpdate(resourceGroupName, routeTableName, parameters, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("route_tables", "create_or_update", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.CreateOrUpdate(resourceGroupName, routeTableName, parameters, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azRouteTablesClient) Get(resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) { @@ -699,7 +840,10 @@ func (az *azRouteTablesClient) Get(resourceGroupName string, routeTableName stri glog.V(10).Infof("azRouteTablesClient.Get(%q,%q): end", resourceGroupName, routeTableName) }() - return az.client.Get(resourceGroupName, routeTableName, expand) + mc := newMetricContext("route_tables", "get", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.Get(resourceGroupName, routeTableName, expand) + mc.Observe(err) + return } // azStorageAccountClient implements StorageAccountClient. @@ -727,7 +871,13 @@ func (az *azStorageAccountClient) Create(resourceGroupName string, accountName s glog.V(10).Infof("azStorageAccountClient.Create(%q,%q): end", resourceGroupName, accountName) }() - return az.client.Create(resourceGroupName, accountName, parameters, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("storage_account", "create", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.Create(resourceGroupName, accountName, parameters, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azStorageAccountClient) Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) { @@ -737,7 +887,10 @@ func (az *azStorageAccountClient) Delete(resourceGroupName string, accountName s glog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): end", resourceGroupName, accountName) }() - return az.client.Delete(resourceGroupName, accountName) + mc := newMetricContext("storage_account", "delete", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.Delete(resourceGroupName, accountName) + mc.Observe(err) + return } func (az *azStorageAccountClient) ListKeys(resourceGroupName string, accountName string) (result storage.AccountListKeysResult, err error) { @@ -747,7 +900,10 @@ func (az *azStorageAccountClient) ListKeys(resourceGroupName string, accountName glog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): end", resourceGroupName, accountName) }() - return az.client.ListKeys(resourceGroupName, accountName) + mc := newMetricContext("storage_account", "list_keys", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.ListKeys(resourceGroupName, accountName) + mc.Observe(err) + return } func (az *azStorageAccountClient) ListByResourceGroup(resourceGroupName string) (result storage.AccountListResult, err error) { @@ -757,7 +913,10 @@ func (az *azStorageAccountClient) ListByResourceGroup(resourceGroupName string) glog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): end", resourceGroupName) }() - return az.client.ListByResourceGroup(resourceGroupName) + mc := newMetricContext("storage_account", "list_by_resource_group", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.ListByResourceGroup(resourceGroupName) + mc.Observe(err) + return } func (az *azStorageAccountClient) GetProperties(resourceGroupName string, accountName string) (result storage.Account, err error) { @@ -767,7 +926,10 @@ func (az *azStorageAccountClient) GetProperties(resourceGroupName string, accoun glog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): end", resourceGroupName, accountName) }() - return az.client.GetProperties(resourceGroupName, accountName) + mc := newMetricContext("storage_account", "get_properties", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.GetProperties(resourceGroupName, accountName) + mc.Observe(err) + return } // azDisksClient implements DisksClient. @@ -795,7 +957,13 @@ func (az *azDisksClient) CreateOrUpdate(resourceGroupName string, diskName strin glog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): end", resourceGroupName, diskName) }() - return az.client.CreateOrUpdate(resourceGroupName, diskName, diskParameter, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("disks", "create_or_update", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.CreateOrUpdate(resourceGroupName, diskName, diskParameter, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azDisksClient) Delete(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan disk.OperationStatusResponse, <-chan error) { @@ -805,7 +973,13 @@ func (az *azDisksClient) Delete(resourceGroupName string, diskName string, cance glog.V(10).Infof("azDisksClient.Delete(%q,%q): end", resourceGroupName, diskName) }() - return az.client.Delete(resourceGroupName, diskName, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("disks", "delete", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.Delete(resourceGroupName, diskName, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azDisksClient) Get(resourceGroupName string, diskName string) (result disk.Model, err error) { @@ -815,5 +989,8 @@ func (az *azDisksClient) Get(resourceGroupName string, diskName string) (result glog.V(10).Infof("azDisksClient.Get(%q,%q): end", resourceGroupName, diskName) }() - return az.client.Get(resourceGroupName, diskName) + mc := newMetricContext("disks", "get", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.Get(resourceGroupName, diskName) + mc.Observe(err) + return } diff --git a/pkg/cloudprovider/providers/azure/azure_metrics.go b/pkg/cloudprovider/providers/azure/azure_metrics.go new file mode 100644 index 00000000000..2ef21bb5a5c --- /dev/null +++ b/pkg/cloudprovider/providers/azure/azure_metrics.go @@ -0,0 +1,82 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +type apiCallMetrics struct { + latency *prometheus.HistogramVec + errors *prometheus.CounterVec +} + +var ( + metricLabels = []string{ + "request", // API function that is being invoked + "resource_group", // Resource group of the resource being monitored + "subscription_id", // Subscription ID of the resource being monitored + } + + apiMetrics = registerAPIMetrics(metricLabels...) +) + +type metricContext struct { + start time.Time + attributes []string +} + +func newMetricContext(prefix, request, resouceGroup, subscriptionID string) *metricContext { + return &metricContext{ + start: time.Now(), + attributes: []string{prefix + "_" + request, resouceGroup, subscriptionID}, + } +} + +func (mc *metricContext) Observe(err error) { + apiMetrics.latency.WithLabelValues(mc.attributes...).Observe( + time.Since(mc.start).Seconds()) + if err != nil { + apiMetrics.errors.WithLabelValues(mc.attributes...).Inc() + } +} + +func registerAPIMetrics(attributes ...string) *apiCallMetrics { + metrics := &apiCallMetrics{ + latency: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "cloudprovider_azure_api_request_duration_seconds", + Help: "Latency of an Azure API call", + }, + attributes, + ), + errors: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cloudprovider_azure_api_request_errors", + Help: "Number of errors for an Azure API call", + }, + attributes, + ), + } + + prometheus.MustRegister(metrics.latency) + prometheus.MustRegister(metrics.errors) + + return metrics +} diff --git a/pkg/cloudprovider/providers/azure/azure_metrics_test.go b/pkg/cloudprovider/providers/azure/azure_metrics_test.go new file mode 100644 index 00000000000..978c6b50540 --- /dev/null +++ b/pkg/cloudprovider/providers/azure/azure_metrics_test.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAzureMetricLabelCardinality(t *testing.T) { + mc := newMetricContext("test", "create", "resource_group", "subscription_id") + assert.Len(t, mc.attributes, len(metricLabels), "cardinalities of labels and values must match") +} + +func TestAzureMetricLabelPrefix(t *testing.T) { + mc := newMetricContext("prefix", "request", "resource_group", "subscription_id") + found := false + for _, attribute := range mc.attributes { + if attribute == "prefix_request" { + found = true + } + } + assert.True(t, found, "request label must be prefixed") +} From 0601916d78fa0e5dac790ddf32041040f7c10916 Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Fri, 12 Jan 2018 18:12:32 +0800 Subject: [PATCH 136/264] add KUBE_ROOT in directory --- build/common.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build/common.sh b/build/common.sh index 8f8254ca228..f5716709b91 100755 --- a/build/common.sh +++ b/build/common.sh @@ -451,8 +451,8 @@ function kube::build::build_image() { cp /etc/localtime "${LOCAL_OUTPUT_BUILD_CONTEXT}/" - cp build/build-image/Dockerfile "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile" - cp build/build-image/rsyncd.sh "${LOCAL_OUTPUT_BUILD_CONTEXT}/" + cp ${KUBE_ROOT}/build/build-image/Dockerfile "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile" + cp ${KUBE_ROOT}/build/build-image/rsyncd.sh "${LOCAL_OUTPUT_BUILD_CONTEXT}/" dd if=/dev/urandom bs=512 count=1 2>/dev/null | LC_ALL=C tr -dc 'A-Za-z0-9' | dd bs=32 count=1 2>/dev/null > "${LOCAL_OUTPUT_BUILD_CONTEXT}/rsyncd.password" chmod go= "${LOCAL_OUTPUT_BUILD_CONTEXT}/rsyncd.password" From 9958389eb9771bb964f6e01ce04c27129c6c4417 Mon Sep 17 00:00:00 2001 From: FengyunPan Date: Fri, 12 Jan 2018 18:57:46 +0800 Subject: [PATCH 137/264] The lbaas.opts.SubnetId should be set by subnet id. Fix #58145 The getSubnetIDForLB() should return subnet id rather than net id. --- pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go index 036af670bc7..c605dd19e64 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go +++ b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go @@ -537,7 +537,7 @@ func getSubnetIDForLB(compute *gophercloud.ServiceClient, node v1.Node) (string, for _, intf := range interfaces { for _, fixedIP := range intf.FixedIPs { if fixedIP.IPAddress == ipAddress { - return intf.NetID, nil + return fixedIP.SubnetID, nil } } } From 90bc1265cf9fd9a5c98f91274be2a0c3f4d3f528 Mon Sep 17 00:00:00 2001 From: Penghao Cen Date: Fri, 12 Jan 2018 20:09:07 +0800 Subject: [PATCH 138/264] Fix endpoint not work issue --- pkg/kubelet/cm/deviceplugin/endpoint_test.go | 55 ++++++++++++++++---- 1 file changed, 44 insertions(+), 11 deletions(-) diff --git a/pkg/kubelet/cm/deviceplugin/endpoint_test.go b/pkg/kubelet/cm/deviceplugin/endpoint_test.go index 226148a6b06..f4634db85f4 100644 --- a/pkg/kubelet/cm/deviceplugin/endpoint_test.go +++ b/pkg/kubelet/cm/deviceplugin/endpoint_test.go @@ -19,7 +19,6 @@ package deviceplugin import ( "path" "testing" - "time" "github.com/stretchr/testify/require" @@ -54,22 +53,56 @@ func TestRun(t *testing.T) { {ID: "AThirdDeviceId", Health: pluginapi.Healthy}, } - p, e := esetup(t, devs, socket, "mock", func(n string, a, u, r []pluginapi.Device) { - require.Len(t, a, 1) - require.Len(t, u, 1) - require.Len(t, r, 1) + callbackCount := 0 + callbackChan := make(chan int) + callback := func(n string, a, u, r []pluginapi.Device) { + // Should be called twice: + // one for plugin registration, one for plugin update. + if callbackCount > 2 { + t.FailNow() + } - require.Equal(t, a[0].ID, updated[1].ID) + // Check plugin registration + if callbackCount == 0 { + require.Len(t, a, 2) + require.Len(t, u, 0) + require.Len(t, r, 0) + } - require.Equal(t, u[0].ID, updated[0].ID) - require.Equal(t, u[0].Health, updated[0].Health) + // Check plugin update + if callbackCount == 1 { + require.Len(t, a, 1) + require.Len(t, u, 1) + require.Len(t, r, 1) - require.Equal(t, r[0].ID, devs[1].ID) - }) + require.Equal(t, a[0].ID, updated[1].ID) + require.Equal(t, u[0].ID, updated[0].ID) + require.Equal(t, u[0].Health, updated[0].Health) + require.Equal(t, r[0].ID, devs[1].ID) + } + + callbackCount++ + callbackChan <- callbackCount + } + + p, e := esetup(t, devs, socket, "mock", callback) defer ecleanup(t, p, e) go e.run() + // Wait for the first callback to be issued. + select { + case <-callbackChan: + break + } + p.Update(updated) + + // Wait for the second callback to be issued. + select { + case <-callbackChan: + break + } + time.Sleep(time.Second) e.mutex.Lock() @@ -102,7 +135,7 @@ func esetup(t *testing.T, devs []*pluginapi.Device, socket, resourceName string, err := p.Start() require.NoError(t, err) - e, err := newEndpointImpl(socket, "mock", make(map[string]pluginapi.Device), func(n string, a, u, r []pluginapi.Device) {}) + e, err := newEndpointImpl(socket, resourceName, make(map[string]pluginapi.Device), callback) require.NoError(t, err) return p, e From 296ae178d91de909d89005b8d495203f8bf97abd Mon Sep 17 00:00:00 2001 From: Slava Semushin Date: Fri, 12 Jan 2018 16:29:29 +0100 Subject: [PATCH 139/264] hack/generate-bindata.sh: make output cleanly by suppressing pushd/popd output. --- hack/generate-bindata.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/generate-bindata.sh b/hack/generate-bindata.sh index 4c843c5cdc8..40605fb419d 100755 --- a/hack/generate-bindata.sh +++ b/hack/generate-bindata.sh @@ -39,7 +39,7 @@ if ! which go-bindata &>/dev/null ; then fi # run the generation from the root directory for stable output -pushd "${KUBE_ROOT}" +pushd "${KUBE_ROOT}" >/dev/null # These are files for e2e tests. BINDATA_OUTPUT="test/e2e/generated/bindata.go" @@ -84,4 +84,4 @@ fi rm -f "${BINDATA_OUTPUT}.tmp" -popd +popd >/dev/null From 6831581f1c86003d584a6dd52591bf59035f9f7c Mon Sep 17 00:00:00 2001 From: Ross Light Date: Fri, 12 Jan 2018 10:16:13 -0800 Subject: [PATCH 140/264] Bump fluentd-gcp version --- cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml index ac9fdcd0053..130e84aaccd 100644 --- a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml +++ b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml @@ -1,13 +1,13 @@ apiVersion: extensions/v1beta1 kind: DaemonSet metadata: - name: fluentd-gcp-v2.0.13 + name: fluentd-gcp-v2.0.14 namespace: kube-system labels: k8s-app: fluentd-gcp kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile - version: v2.0.13 + version: v2.0.14 spec: updateStrategy: type: RollingUpdate @@ -16,7 +16,7 @@ spec: labels: k8s-app: fluentd-gcp kubernetes.io/cluster-service: "true" - version: v2.0.13 + version: v2.0.14 # This annotation ensures that fluentd does not get evicted if the node # supports critical pod annotation based priority scheme. # Note that this does not guarantee admission on the nodes (#40573). @@ -27,7 +27,7 @@ spec: dnsPolicy: Default containers: - name: fluentd-gcp - image: gcr.io/google-containers/fluentd-gcp:2.0.13 + image: gcr.io/google-containers/fluentd-gcp:2.0.14 env: - name: FLUENTD_ARGS value: --no-supervisor -q From 4711bccd057d3934ffb265c4ca140d98426918e2 Mon Sep 17 00:00:00 2001 From: vikaschoudhary16 Date: Wed, 10 Jan 2018 19:20:21 -0500 Subject: [PATCH 141/264] Bump runc to d5b4a3e This fixes a race condition in runc/systemd at container creation time opencontainers/runc#1683 Signed-off-by: vikaschoudhary16 --- Godeps/Godeps.json | 274 ++--- Godeps/LICENSES | 1041 ++++++++++------- vendor/BUILD | 3 +- .../github.com/containerd/console/.travis.yml | 17 + vendor/github.com/containerd/console/BUILD | 71 ++ .../console/LICENSE} | 14 +- .../github.com/containerd/console/README.md | 17 + .../github.com/containerd/console/console.go | 62 + .../containerd/console/console_linux.go | 255 ++++ .../containerd/console/console_unix.go | 142 +++ .../containerd/console/console_windows.go | 200 ++++ .../containerd/console/tc_darwin.go | 37 + .../containerd/console/tc_freebsd.go | 29 + .../github.com/containerd/console/tc_linux.go | 37 + .../containerd/console/tc_solaris_cgo.go | 35 + .../containerd/console/tc_solaris_nocgo.go | 31 + .../github.com/containerd/console/tc_unix.go | 75 ++ .../cyphar/filepath-securejoin/.travis.yml | 19 + .../cyphar/filepath-securejoin/BUILD | 26 + .../filepath-securejoin/LICENSE} | 3 +- .../cyphar/filepath-securejoin/README.md | 65 + .../cyphar/filepath-securejoin/VERSION | 1 + .../cyphar/filepath-securejoin/join.go | 135 +++ .../cyphar/filepath-securejoin/vendor.conf | 1 + .../cyphar/filepath-securejoin/vfs.go | 41 + .../docker/docker/pkg/symlink/BUILD | 68 -- .../docker/docker/pkg/symlink/README.md | 6 - .../docker/docker/pkg/symlink/fs.go | 144 --- .../docker/docker/pkg/symlink/fs_unix.go | 15 - .../docker/docker/pkg/symlink/fs_windows.go | 169 --- .../cadvisor/container/docker/handler.go | 1 + vendor/github.com/google/cadvisor/fs/BUILD | 1 + vendor/github.com/google/cadvisor/fs/fs.go | 13 +- .../google/cadvisor/pages/static/assets.go | 4 +- .../google/cadvisor/pages/templates.go | 2 +- .../opencontainers/runc/libcontainer/BUILD | 27 +- .../opencontainers/runc/libcontainer/SPEC.md | 86 +- .../runc/libcontainer/apparmor/BUILD | 1 - .../runc/libcontainer/apparmor/apparmor.go | 37 +- .../runc/libcontainer/cgroups/BUILD | 1 - .../runc/libcontainer/cgroups/fs/apply_raw.go | 13 + .../runc/libcontainer/cgroups/fs/freezer.go | 13 +- .../libcontainer/cgroups/rootless/rootless.go | 128 -- .../cgroups/systemd/apply_nosystemd.go | 4 +- .../cgroups/systemd/apply_systemd.go | 14 +- .../runc/libcontainer/compat_1.5_linux.go | 10 - .../runc/libcontainer/configs/BUILD | 10 +- .../configs/cgroup_unsupported.go | 6 - .../runc/libcontainer/configs/config.go | 4 + .../libcontainer/configs/device_defaults.go | 2 +- .../runc/libcontainer/configs/intelrdt.go | 7 + .../runc/libcontainer/configs/validate/BUILD | 1 + .../libcontainer/configs/validate/rootless.go | 68 +- .../configs/validate/validator.go | 17 + .../runc/libcontainer/console.go | 17 - .../runc/libcontainer/console_freebsd.go | 13 - .../runc/libcontainer/console_linux.go | 129 +- .../runc/libcontainer/console_solaris.go | 11 - .../runc/libcontainer/console_windows.go | 30 - .../runc/libcontainer/container_linux.go | 149 ++- .../runc/libcontainer/container_solaris.go | 20 - .../runc/libcontainer/container_windows.go | 20 - .../runc/libcontainer/criu_opts_linux.go | 4 +- .../runc/libcontainer/criu_opts_windows.go | 6 - .../runc/libcontainer/factory_linux.go | 77 +- .../runc/libcontainer/init_linux.go | 103 +- .../{cgroups/rootless => intelrdt}/BUILD | 8 +- .../runc/libcontainer/intelrdt/intelrdt.go | 553 +++++++++ .../runc/libcontainer/intelrdt/stats.go | 24 + .../runc/libcontainer/keys/keyctl.go | 2 +- .../runc/libcontainer/message_linux.go | 2 + .../runc/libcontainer/mount/BUILD | 30 + .../runc/libcontainer/mount/mount.go | 23 + .../runc/libcontainer/mount/mount_linux.go | 82 ++ .../runc/libcontainer/mount/mountinfo.go | 40 + .../runc/libcontainer/process.go | 4 + .../runc/libcontainer/process_linux.go | 80 +- .../runc/libcontainer/rootfs_linux.go | 38 +- .../libcontainer/seccomp/seccomp_linux.go | 47 +- .../runc/libcontainer/setgroups_linux.go | 11 - .../runc/libcontainer/setns_init_linux.go | 13 +- .../runc/libcontainer/standard_init_linux.go | 42 +- .../runc/libcontainer/state_linux.go | 5 + .../runc/libcontainer/stats_freebsd.go | 5 - .../runc/libcontainer/stats_linux.go | 6 +- .../runc/libcontainer/stats_solaris.go | 7 - .../runc/libcontainer/stats_windows.go | 5 - .../runc/libcontainer/system/BUILD | 17 +- .../runc/libcontainer/system/linux.go | 11 + ...scall_linux_arm.go => syscall_linux_32.go} | 3 +- .../libcontainer/system/syscall_linux_386.go | 25 - .../libcontainer/system/syscall_linux_64.go | 3 +- .../runc/libcontainer/system/sysconfig.go | 2 +- .../runc/libcontainer/user/BUILD | 12 - .../libcontainer/user/lookup_unsupported.go | 38 - .../runc/libcontainer/utils/cmsg.go | 10 +- .../k8s.io/kube-openapi/pkg/generators/README | 31 + .../kube-openapi/pkg/generators/openapi.go | 90 +- .../kube-openapi/pkg/util/proto/document.go | 14 +- .../kube-openapi/pkg/util/proto/openapi.go | 25 + .../pkg/util/proto/validation/types.go | 13 +- 101 files changed, 3694 insertions(+), 1709 deletions(-) create mode 100644 vendor/github.com/containerd/console/.travis.yml create mode 100644 vendor/github.com/containerd/console/BUILD rename vendor/github.com/{docker/docker/pkg/symlink/LICENSE.APACHE => containerd/console/LICENSE} (94%) create mode 100644 vendor/github.com/containerd/console/README.md create mode 100644 vendor/github.com/containerd/console/console.go create mode 100644 vendor/github.com/containerd/console/console_linux.go create mode 100644 vendor/github.com/containerd/console/console_unix.go create mode 100644 vendor/github.com/containerd/console/console_windows.go create mode 100644 vendor/github.com/containerd/console/tc_darwin.go create mode 100644 vendor/github.com/containerd/console/tc_freebsd.go create mode 100644 vendor/github.com/containerd/console/tc_linux.go create mode 100644 vendor/github.com/containerd/console/tc_solaris_cgo.go create mode 100644 vendor/github.com/containerd/console/tc_solaris_nocgo.go create mode 100644 vendor/github.com/containerd/console/tc_unix.go create mode 100644 vendor/github.com/cyphar/filepath-securejoin/.travis.yml create mode 100644 vendor/github.com/cyphar/filepath-securejoin/BUILD rename vendor/github.com/{docker/docker/pkg/symlink/LICENSE.BSD => cyphar/filepath-securejoin/LICENSE} (92%) create mode 100644 vendor/github.com/cyphar/filepath-securejoin/README.md create mode 100644 vendor/github.com/cyphar/filepath-securejoin/VERSION create mode 100644 vendor/github.com/cyphar/filepath-securejoin/join.go create mode 100644 vendor/github.com/cyphar/filepath-securejoin/vendor.conf create mode 100644 vendor/github.com/cyphar/filepath-securejoin/vfs.go delete mode 100644 vendor/github.com/docker/docker/pkg/symlink/BUILD delete mode 100644 vendor/github.com/docker/docker/pkg/symlink/README.md delete mode 100644 vendor/github.com/docker/docker/pkg/symlink/fs.go delete mode 100644 vendor/github.com/docker/docker/pkg/symlink/fs_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/symlink/fs_windows.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless/rootless.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/console.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/console_solaris.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/console_windows.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/container_solaris.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/container_windows.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go rename vendor/github.com/opencontainers/runc/libcontainer/{cgroups/rootless => intelrdt}/BUILD (65%) create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/intelrdt/intelrdt.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/intelrdt/stats.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/mount/BUILD create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/mount/mount.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/mount/mount_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/mount/mountinfo.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stats_solaris.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go rename vendor/github.com/opencontainers/runc/libcontainer/system/{syscall_linux_arm.go => syscall_linux_32.go} (93%) delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 45b60e8306b..44d80a60754 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -437,8 +437,13 @@ }, { "ImportPath": "github.com/container-storage-interface/spec/lib/go/csi", + "Comment": "v0.1.0", "Rev": "9e88e4bfabeca1b8e4810555815f112159292ada" }, + { + "ImportPath": "github.com/containerd/console", + "Rev": "84eeaae905fa414d03e07bcd6c8d3f19e7cf180e" + }, { "ImportPath": "github.com/containerd/containerd/api/services/containers/v1", "Comment": "v1.0.0-beta.2-159-g27d450a", @@ -968,6 +973,11 @@ "Comment": "v1.0.4", "Rev": "71acacd42f85e5e82f70a55327789582a5200a90" }, + { + "ImportPath": "github.com/cyphar/filepath-securejoin", + "Comment": "v0.2.1-1-gae69057", + "Rev": "ae69057f2299fb9e5ba2df738607e6a505b74ab6" + }, { "ImportPath": "github.com/d2g/dhcp4", "Rev": "a1d1b6c41b1ce8a71a5121a9cee31809c4707d9c" @@ -1119,11 +1129,6 @@ "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, - { - "ImportPath": "github.com/docker/docker/pkg/symlink", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", - "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" - }, { "ImportPath": "github.com/docker/docker/pkg/system", "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", @@ -1475,218 +1480,218 @@ }, { "ImportPath": "github.com/google/cadvisor/accelerators", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/api", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/cache/memory", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/client/v2", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/collector", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/container", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/container/common", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/container/containerd", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/container/crio", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/container/docker", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/container/libcontainer", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/container/raw", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/container/rkt", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/container/systemd", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/devicemapper", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/events", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/fs", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/healthz", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/http", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/http/mux", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/info/v1", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/info/v2", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/machine", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/manager", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher/raw", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher/rkt", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/metrics", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/pages", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/pages/static", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/storage", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/summary", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/utils", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/utils/cloudinfo", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/utils/cpuload", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/utils/cpuload/netlink", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/utils/docker", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/utils/oomparser", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/utils/sysfs", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/utils/sysinfo", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/validate", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/version", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/zfs", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/certificate-transparency/go", @@ -2339,78 +2344,83 @@ }, { "ImportPath": "github.com/opencontainers/runc/libcontainer", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/apparmor", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/fs", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" - }, - { - "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/rootless", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/systemd", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/configs", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/configs/validate", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/criurpc", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" + }, + { + "ImportPath": "github.com/opencontainers/runc/libcontainer/intelrdt", + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/keys", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" + }, + { + "ImportPath": "github.com/opencontainers/runc/libcontainer/mount", + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/seccomp", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/stacktrace", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/system", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/user", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/utils", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runtime-spec/specs-go", @@ -3212,35 +3222,35 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/aggregator", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/builder", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/generators", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/handler", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto/validation", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/utils/clock", diff --git a/Godeps/LICENSES b/Godeps/LICENSES index 8021a07e90e..3f809458a4d 100644 --- a/Godeps/LICENSES +++ b/Godeps/LICENSES @@ -12724,6 +12724,215 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/containerd/console licensed under: = + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/containerd/console/LICENSE 86d3f3a95c324c9479bd8986968f4327 +================================================================================ + + ================================================================================ = vendor/github.com/containerd/containerd/api/services/containers/v1 licensed under: = @@ -34676,6 +34885,42 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/cyphar/filepath-securejoin licensed under: = + +Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +Copyright (C) 2017 SUSE LLC. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/github.com/cyphar/filepath-securejoin/LICENSE 8d322afab99e1998dbfcc712f94e824d +================================================================================ + + ================================================================================ = vendor/github.com/d2g/dhcp4 licensed under: = @@ -40204,205 +40449,6 @@ Apache License ================================================================================ -================================================================================ -= vendor/github.com/docker/docker/pkg/symlink licensed under: = - - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2017 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/docker/docker/LICENSE 9740d093a080530b5c5c6573df9af45a -================================================================================ - - ================================================================================ = vendor/github.com/docker/docker/pkg/system licensed under: = @@ -75650,205 +75696,6 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -================================================================================ -= vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/opencontainers/runc/LICENSE 435b266b3899aa8a959f17d41c56def8 -================================================================================ - - ================================================================================ = vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd licensed under: = @@ -76645,6 +76492,205 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/opencontainers/runc/libcontainer/intelrdt licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/opencontainers/runc/LICENSE 435b266b3899aa8a959f17d41c56def8 +================================================================================ + + ================================================================================ = vendor/github.com/opencontainers/runc/libcontainer/keys licensed under: = @@ -76844,6 +76890,205 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/opencontainers/runc/libcontainer/mount licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/opencontainers/runc/LICENSE 435b266b3899aa8a959f17d41c56def8 +================================================================================ + + ================================================================================ = vendor/github.com/opencontainers/runc/libcontainer/seccomp licensed under: = diff --git a/vendor/BUILD b/vendor/BUILD index 20495c2248a..f48108ef5a6 100644 --- a/vendor/BUILD +++ b/vendor/BUILD @@ -62,6 +62,7 @@ filegroup( "//vendor/github.com/codedellemc/goscaleio:all-srcs", "//vendor/github.com/codegangsta/negroni:all-srcs", "//vendor/github.com/container-storage-interface/spec/lib/go/csi:all-srcs", + "//vendor/github.com/containerd/console:all-srcs", "//vendor/github.com/containerd/containerd/api/services/containers/v1:all-srcs", "//vendor/github.com/containerd/containerd/api/services/tasks/v1:all-srcs", "//vendor/github.com/containerd/containerd/api/services/version/v1:all-srcs", @@ -135,6 +136,7 @@ filegroup( "//vendor/github.com/coreos/pkg/timeutil:all-srcs", "//vendor/github.com/coreos/rkt/api/v1alpha:all-srcs", "//vendor/github.com/cpuguy83/go-md2man/md2man:all-srcs", + "//vendor/github.com/cyphar/filepath-securejoin:all-srcs", "//vendor/github.com/d2g/dhcp4:all-srcs", "//vendor/github.com/d2g/dhcp4client:all-srcs", "//vendor/github.com/davecgh/go-spew/spew:all-srcs", @@ -151,7 +153,6 @@ filegroup( "//vendor/github.com/docker/docker/pkg/longpath:all-srcs", "//vendor/github.com/docker/docker/pkg/mount:all-srcs", "//vendor/github.com/docker/docker/pkg/stdcopy:all-srcs", - "//vendor/github.com/docker/docker/pkg/symlink:all-srcs", "//vendor/github.com/docker/docker/pkg/system:all-srcs", "//vendor/github.com/docker/docker/pkg/term:all-srcs", "//vendor/github.com/docker/docker/pkg/tlsconfig:all-srcs", diff --git a/vendor/github.com/containerd/console/.travis.yml b/vendor/github.com/containerd/console/.travis.yml new file mode 100644 index 00000000000..ba93012c767 --- /dev/null +++ b/vendor/github.com/containerd/console/.travis.yml @@ -0,0 +1,17 @@ +language: go +go: + - 1.9.x + - tip + +go_import_path: github.com/containerd/console + +install: + - go get -d + - GOOS=windows go get -d + - GOOS=solaris go get -d + +script: + - go test -race + - GOOS=windows go test + - GOOS=solaris go build + - GOOS=solaris go test -c diff --git a/vendor/github.com/containerd/console/BUILD b/vendor/github.com/containerd/console/BUILD new file mode 100644 index 00000000000..8b43101917a --- /dev/null +++ b/vendor/github.com/containerd/console/BUILD @@ -0,0 +1,71 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "console.go", + ] + select({ + "@io_bazel_rules_go//go/platform:darwin": [ + "console_unix.go", + "tc_darwin.go", + "tc_unix.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "console_unix.go", + "tc_freebsd.go", + "tc_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "console_linux.go", + "console_unix.go", + "tc_linux.go", + "tc_unix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "console_unix.go", + "tc_solaris_cgo.go", + "tc_solaris_nocgo.go", + "tc_unix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "console_windows.go", + ], + "//conditions:default": [], + }), + cgo = True, + importpath = "github.com/containerd/console", + visibility = ["//visibility:public"], + deps = select({ + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/github.com/pkg/errors:go_default_library", + "//vendor/golang.org/x/sys/windows:go_default_library", + ], + "//conditions:default": [], + }), +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/vendor/github.com/containerd/console/LICENSE similarity index 94% rename from vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE rename to vendor/github.com/containerd/console/LICENSE index b9fbf3c98fb..261eeb9e9f8 100644 --- a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE +++ b/vendor/github.com/containerd/console/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -176,7 +175,18 @@ END OF TERMS AND CONDITIONS - Copyright 2014-2017 Docker, Inc. + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/containerd/console/README.md b/vendor/github.com/containerd/console/README.md new file mode 100644 index 00000000000..4c56d9d134a --- /dev/null +++ b/vendor/github.com/containerd/console/README.md @@ -0,0 +1,17 @@ +# console + +[![Build Status](https://travis-ci.org/containerd/console.svg?branch=master)](https://travis-ci.org/containerd/console) + +Golang package for dealing with consoles. Light on deps and a simple API. + +## Modifying the current process + +```go +current := console.Current() +defer current.Reset() + +if err := current.SetRaw(); err != nil { +} +ws, err := current.Size() +current.Resize(ws) +``` diff --git a/vendor/github.com/containerd/console/console.go b/vendor/github.com/containerd/console/console.go new file mode 100644 index 00000000000..bf2798fda37 --- /dev/null +++ b/vendor/github.com/containerd/console/console.go @@ -0,0 +1,62 @@ +package console + +import ( + "errors" + "io" + "os" +) + +var ErrNotAConsole = errors.New("provided file is not a console") + +type Console interface { + io.Reader + io.Writer + io.Closer + + // Resize resizes the console to the provided window size + Resize(WinSize) error + // ResizeFrom resizes the calling console to the size of the + // provided console + ResizeFrom(Console) error + // SetRaw sets the console in raw mode + SetRaw() error + // DisableEcho disables echo on the console + DisableEcho() error + // Reset restores the console to its orignal state + Reset() error + // Size returns the window size of the console + Size() (WinSize, error) + // Fd returns the console's file descriptor + Fd() uintptr + // Name returns the console's file name + Name() string +} + +// WinSize specifies the window size of the console +type WinSize struct { + // Height of the console + Height uint16 + // Width of the console + Width uint16 + x uint16 + y uint16 +} + +// Current returns the current processes console +func Current() Console { + c, err := ConsoleFromFile(os.Stdin) + if err != nil { + // stdin should always be a console for the design + // of this function + panic(err) + } + return c +} + +// ConsoleFromFile returns a console using the provided file +func ConsoleFromFile(f *os.File) (Console, error) { + if err := checkConsole(f); err != nil { + return nil, err + } + return newMaster(f) +} diff --git a/vendor/github.com/containerd/console/console_linux.go b/vendor/github.com/containerd/console/console_linux.go new file mode 100644 index 00000000000..c963729296d --- /dev/null +++ b/vendor/github.com/containerd/console/console_linux.go @@ -0,0 +1,255 @@ +// +build linux + +package console + +import ( + "io" + "os" + "sync" + + "golang.org/x/sys/unix" +) + +const ( + maxEvents = 128 +) + +// Epoller manages multiple epoll consoles using edge-triggered epoll api so we +// dont have to deal with repeated wake-up of EPOLLER or EPOLLHUP. +// For more details, see: +// - https://github.com/systemd/systemd/pull/4262 +// - https://github.com/moby/moby/issues/27202 +// +// Example usage of Epoller and EpollConsole can be as follow: +// +// epoller, _ := NewEpoller() +// epollConsole, _ := epoller.Add(console) +// go epoller.Wait() +// var ( +// b bytes.Buffer +// wg sync.WaitGroup +// ) +// wg.Add(1) +// go func() { +// io.Copy(&b, epollConsole) +// wg.Done() +// }() +// // perform I/O on the console +// epollConsole.Shutdown(epoller.CloseConsole) +// wg.Wait() +// epollConsole.Close() +type Epoller struct { + efd int + mu sync.Mutex + fdMapping map[int]*EpollConsole +} + +// NewEpoller returns an instance of epoller with a valid epoll fd. +func NewEpoller() (*Epoller, error) { + efd, err := unix.EpollCreate1(unix.EPOLL_CLOEXEC) + if err != nil { + return nil, err + } + return &Epoller{ + efd: efd, + fdMapping: make(map[int]*EpollConsole), + }, nil +} + +// Add creates a epoll console based on the provided console. The console will +// be registered with EPOLLET (i.e. using edge-triggered notification) and its +// file descriptor will be set to non-blocking mode. After this, user should use +// the return console to perform I/O. +func (e *Epoller) Add(console Console) (*EpollConsole, error) { + sysfd := int(console.Fd()) + // Set sysfd to non-blocking mode + if err := unix.SetNonblock(sysfd, true); err != nil { + return nil, err + } + + ev := unix.EpollEvent{ + Events: unix.EPOLLIN | unix.EPOLLOUT | unix.EPOLLRDHUP | unix.EPOLLET, + Fd: int32(sysfd), + } + if err := unix.EpollCtl(e.efd, unix.EPOLL_CTL_ADD, sysfd, &ev); err != nil { + return nil, err + } + ef := &EpollConsole{ + Console: console, + sysfd: sysfd, + readc: sync.NewCond(&sync.Mutex{}), + writec: sync.NewCond(&sync.Mutex{}), + } + e.mu.Lock() + e.fdMapping[sysfd] = ef + e.mu.Unlock() + return ef, nil +} + +// Wait starts the loop to wait for its consoles' notifications and signal +// appropriate console that it can perform I/O. +func (e *Epoller) Wait() error { + events := make([]unix.EpollEvent, maxEvents) + for { + n, err := unix.EpollWait(e.efd, events, -1) + if err != nil { + // EINTR: The call was interrupted by a signal handler before either + // any of the requested events occurred or the timeout expired + if err == unix.EINTR { + continue + } + return err + } + for i := 0; i < n; i++ { + ev := &events[i] + // the console is ready to be read from + if ev.Events&(unix.EPOLLIN|unix.EPOLLHUP|unix.EPOLLERR) != 0 { + if epfile := e.getConsole(int(ev.Fd)); epfile != nil { + epfile.signalRead() + } + } + // the console is ready to be written to + if ev.Events&(unix.EPOLLOUT|unix.EPOLLHUP|unix.EPOLLERR) != 0 { + if epfile := e.getConsole(int(ev.Fd)); epfile != nil { + epfile.signalWrite() + } + } + } + } +} + +// Close unregister the console's file descriptor from epoll interface +func (e *Epoller) CloseConsole(fd int) error { + e.mu.Lock() + defer e.mu.Unlock() + delete(e.fdMapping, fd) + return unix.EpollCtl(e.efd, unix.EPOLL_CTL_DEL, fd, &unix.EpollEvent{}) +} + +func (e *Epoller) getConsole(sysfd int) *EpollConsole { + e.mu.Lock() + f := e.fdMapping[sysfd] + e.mu.Unlock() + return f +} + +// Close the epoll fd +func (e *Epoller) Close() error { + return unix.Close(e.efd) +} + +// EpollConsole acts like a console but register its file descriptor with a +// epoll fd and uses epoll API to perform I/O. +type EpollConsole struct { + Console + readc *sync.Cond + writec *sync.Cond + sysfd int + closed bool +} + +// Read reads up to len(p) bytes into p. It returns the number of bytes read +// (0 <= n <= len(p)) and any error encountered. +// +// If the console's read returns EAGAIN or EIO, we assumes that its a +// temporary error because the other side went away and wait for the signal +// generated by epoll event to continue. +func (ec *EpollConsole) Read(p []byte) (n int, err error) { + var read int + ec.readc.L.Lock() + defer ec.readc.L.Unlock() + for { + read, err = ec.Console.Read(p[n:]) + n += read + if err != nil { + var hangup bool + if perr, ok := err.(*os.PathError); ok { + hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO) + } else { + hangup = (err == unix.EAGAIN || err == unix.EIO) + } + // if the other end disappear, assume this is temporary and wait for the + // signal to continue again. Unless we didnt read anything and the + // console is already marked as closed then we should exit + if hangup && !(n == 0 && len(p) > 0 && ec.closed) { + ec.readc.Wait() + continue + } + } + break + } + // if we didnt read anything then return io.EOF to end gracefully + if n == 0 && len(p) > 0 && err == nil { + err = io.EOF + } + // signal for others that we finished the read + ec.readc.Signal() + return n, err +} + +// Writes len(p) bytes from p to the console. It returns the number of bytes +// written from p (0 <= n <= len(p)) and any error encountered that caused +// the write to stop early. +// +// If writes to the console returns EAGAIN or EIO, we assumes that its a +// temporary error because the other side went away and wait for the signal +// generated by epoll event to continue. +func (ec *EpollConsole) Write(p []byte) (n int, err error) { + var written int + ec.writec.L.Lock() + defer ec.writec.L.Unlock() + for { + written, err = ec.Console.Write(p[n:]) + n += written + if err != nil { + var hangup bool + if perr, ok := err.(*os.PathError); ok { + hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO) + } else { + hangup = (err == unix.EAGAIN || err == unix.EIO) + } + // if the other end disappear, assume this is temporary and wait for the + // signal to continue again. + if hangup { + ec.writec.Wait() + continue + } + } + // unrecoverable error, break the loop and return the error + break + } + if n < len(p) && err == nil { + err = io.ErrShortWrite + } + // signal for others that we finished the write + ec.writec.Signal() + return n, err +} + +// Close closed the file descriptor and signal call waiters for this fd. +// It accepts a callback which will be called with the console's fd. The +// callback typically will be used to do further cleanup such as unregister the +// console's fd from the epoll interface. +// User should call Shutdown and wait for all I/O operation to be finished +// before closing the console. +func (ec *EpollConsole) Shutdown(close func(int) error) error { + ec.readc.L.Lock() + defer ec.readc.L.Unlock() + ec.writec.L.Lock() + defer ec.writec.L.Unlock() + + ec.readc.Broadcast() + ec.writec.Broadcast() + ec.closed = true + return close(ec.sysfd) +} + +// signalRead signals that the console is readable. +func (ec *EpollConsole) signalRead() { + ec.readc.Signal() +} + +// signalWrite signals that the console is writable. +func (ec *EpollConsole) signalWrite() { + ec.writec.Signal() +} diff --git a/vendor/github.com/containerd/console/console_unix.go b/vendor/github.com/containerd/console/console_unix.go new file mode 100644 index 00000000000..118c8c3abfd --- /dev/null +++ b/vendor/github.com/containerd/console/console_unix.go @@ -0,0 +1,142 @@ +// +build darwin freebsd linux solaris + +package console + +import ( + "os" + + "golang.org/x/sys/unix" +) + +// NewPty creates a new pty pair +// The master is returned as the first console and a string +// with the path to the pty slave is returned as the second +func NewPty() (Console, string, error) { + f, err := os.OpenFile("/dev/ptmx", unix.O_RDWR|unix.O_NOCTTY|unix.O_CLOEXEC, 0) + if err != nil { + return nil, "", err + } + slave, err := ptsname(f) + if err != nil { + return nil, "", err + } + if err := unlockpt(f); err != nil { + return nil, "", err + } + m, err := newMaster(f) + if err != nil { + return nil, "", err + } + return m, slave, nil +} + +type master struct { + f *os.File + original *unix.Termios +} + +func (m *master) Read(b []byte) (int, error) { + return m.f.Read(b) +} + +func (m *master) Write(b []byte) (int, error) { + return m.f.Write(b) +} + +func (m *master) Close() error { + return m.f.Close() +} + +func (m *master) Resize(ws WinSize) error { + return tcswinsz(m.f.Fd(), ws) +} + +func (m *master) ResizeFrom(c Console) error { + ws, err := c.Size() + if err != nil { + return err + } + return m.Resize(ws) +} + +func (m *master) Reset() error { + if m.original == nil { + return nil + } + return tcset(m.f.Fd(), m.original) +} + +func (m *master) getCurrent() (unix.Termios, error) { + var termios unix.Termios + if err := tcget(m.f.Fd(), &termios); err != nil { + return unix.Termios{}, err + } + return termios, nil +} + +func (m *master) SetRaw() error { + rawState, err := m.getCurrent() + if err != nil { + return err + } + rawState = cfmakeraw(rawState) + rawState.Oflag = rawState.Oflag | unix.OPOST + return tcset(m.f.Fd(), &rawState) +} + +func (m *master) DisableEcho() error { + rawState, err := m.getCurrent() + if err != nil { + return err + } + rawState.Lflag = rawState.Lflag &^ unix.ECHO + return tcset(m.f.Fd(), &rawState) +} + +func (m *master) Size() (WinSize, error) { + return tcgwinsz(m.f.Fd()) +} + +func (m *master) Fd() uintptr { + return m.f.Fd() +} + +func (m *master) Name() string { + return m.f.Name() +} + +// checkConsole checks if the provided file is a console +func checkConsole(f *os.File) error { + var termios unix.Termios + if tcget(f.Fd(), &termios) != nil { + return ErrNotAConsole + } + return nil +} + +func newMaster(f *os.File) (Console, error) { + m := &master{ + f: f, + } + t, err := m.getCurrent() + if err != nil { + return nil, err + } + m.original = &t + return m, nil +} + +// ClearONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair +// created by us acts normally. In particular, a not-very-well-known default of +// Linux unix98 ptys is that they have +onlcr by default. While this isn't a +// problem for terminal emulators, because we relay data from the terminal we +// also relay that funky line discipline. +func ClearONLCR(fd uintptr) error { + return setONLCR(fd, false) +} + +// SetONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair +// created by us acts as intended for a terminal emulator. +func SetONLCR(fd uintptr) error { + return setONLCR(fd, true) +} diff --git a/vendor/github.com/containerd/console/console_windows.go b/vendor/github.com/containerd/console/console_windows.go new file mode 100644 index 00000000000..d78a0b8419b --- /dev/null +++ b/vendor/github.com/containerd/console/console_windows.go @@ -0,0 +1,200 @@ +package console + +import ( + "fmt" + "os" + + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +var ( + vtInputSupported bool + ErrNotImplemented = errors.New("not implemented") +) + +func (m *master) initStdios() { + m.in = windows.Handle(os.Stdin.Fd()) + if err := windows.GetConsoleMode(m.in, &m.inMode); err == nil { + // Validate that windows.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. + if err = windows.SetConsoleMode(m.in, m.inMode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err == nil { + vtInputSupported = true + } + // Unconditionally set the console mode back even on failure because SetConsoleMode + // remembers invalid bits on input handles. + windows.SetConsoleMode(m.in, m.inMode) + } else { + fmt.Printf("failed to get console mode for stdin: %v\n", err) + } + + m.out = windows.Handle(os.Stdout.Fd()) + if err := windows.GetConsoleMode(m.out, &m.outMode); err == nil { + if err := windows.SetConsoleMode(m.out, m.outMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil { + m.outMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + } else { + windows.SetConsoleMode(m.out, m.outMode) + } + } else { + fmt.Printf("failed to get console mode for stdout: %v\n", err) + } + + m.err = windows.Handle(os.Stderr.Fd()) + if err := windows.GetConsoleMode(m.err, &m.errMode); err == nil { + if err := windows.SetConsoleMode(m.err, m.errMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil { + m.errMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + } else { + windows.SetConsoleMode(m.err, m.errMode) + } + } else { + fmt.Printf("failed to get console mode for stderr: %v\n", err) + } +} + +type master struct { + in windows.Handle + inMode uint32 + + out windows.Handle + outMode uint32 + + err windows.Handle + errMode uint32 +} + +func (m *master) SetRaw() error { + if err := makeInputRaw(m.in, m.inMode); err != nil { + return err + } + + // Set StdOut and StdErr to raw mode, we ignore failures since + // windows.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this version of + // Windows. + + windows.SetConsoleMode(m.out, m.outMode|windows.DISABLE_NEWLINE_AUTO_RETURN) + + windows.SetConsoleMode(m.err, m.errMode|windows.DISABLE_NEWLINE_AUTO_RETURN) + + return nil +} + +func (m *master) Reset() error { + for _, s := range []struct { + fd windows.Handle + mode uint32 + }{ + {m.in, m.inMode}, + {m.out, m.outMode}, + {m.err, m.errMode}, + } { + if err := windows.SetConsoleMode(s.fd, s.mode); err != nil { + return errors.Wrap(err, "unable to restore console mode") + } + } + + return nil +} + +func (m *master) Size() (WinSize, error) { + var info windows.ConsoleScreenBufferInfo + err := windows.GetConsoleScreenBufferInfo(m.out, &info) + if err != nil { + return WinSize{}, errors.Wrap(err, "unable to get console info") + } + + winsize := WinSize{ + Width: uint16(info.Window.Right - info.Window.Left + 1), + Height: uint16(info.Window.Bottom - info.Window.Top + 1), + } + + return winsize, nil +} + +func (m *master) Resize(ws WinSize) error { + return ErrNotImplemented +} + +func (m *master) ResizeFrom(c Console) error { + return ErrNotImplemented +} + +func (m *master) DisableEcho() error { + mode := m.inMode &^ windows.ENABLE_ECHO_INPUT + mode |= windows.ENABLE_PROCESSED_INPUT + mode |= windows.ENABLE_LINE_INPUT + + if err := windows.SetConsoleMode(m.in, mode); err != nil { + return errors.Wrap(err, "unable to set console to disable echo") + } + + return nil +} + +func (m *master) Close() error { + return nil +} + +func (m *master) Read(b []byte) (int, error) { + panic("not implemented on windows") +} + +func (m *master) Write(b []byte) (int, error) { + panic("not implemented on windows") +} + +func (m *master) Fd() uintptr { + return uintptr(m.in) +} + +// on windows, console can only be made from os.Std{in,out,err}, hence there +// isnt a single name here we can use. Return a dummy "console" value in this +// case should be sufficient. +func (m *master) Name() string { + return "console" +} + +// makeInputRaw puts the terminal (Windows Console) connected to the given +// file descriptor into raw mode +func makeInputRaw(fd windows.Handle, mode uint32) error { + // See + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + + // Disable these modes + mode &^= windows.ENABLE_ECHO_INPUT + mode &^= windows.ENABLE_LINE_INPUT + mode &^= windows.ENABLE_MOUSE_INPUT + mode &^= windows.ENABLE_WINDOW_INPUT + mode &^= windows.ENABLE_PROCESSED_INPUT + + // Enable these modes + mode |= windows.ENABLE_EXTENDED_FLAGS + mode |= windows.ENABLE_INSERT_MODE + mode |= windows.ENABLE_QUICK_EDIT_MODE + + if vtInputSupported { + mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT + } + + if err := windows.SetConsoleMode(fd, mode); err != nil { + return errors.Wrap(err, "unable to set console to raw mode") + } + + return nil +} + +func checkConsole(f *os.File) error { + var mode uint32 + if err := windows.GetConsoleMode(windows.Handle(f.Fd()), &mode); err != nil { + return err + } + return nil +} + +func newMaster(f *os.File) (Console, error) { + if f != os.Stdin && f != os.Stdout && f != os.Stderr { + return nil, errors.New("creating a console from a file is not supported on windows") + } + m := &master{} + m.initStdios() + return m, nil +} diff --git a/vendor/github.com/containerd/console/tc_darwin.go b/vendor/github.com/containerd/console/tc_darwin.go new file mode 100644 index 00000000000..b102bad743a --- /dev/null +++ b/vendor/github.com/containerd/console/tc_darwin.go @@ -0,0 +1,37 @@ +package console + +import ( + "fmt" + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TIOCGETA + cmdTcSet = unix.TIOCSETA +) + +func ioctl(fd, flag, data uintptr) error { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 { + return err + } + return nil +} + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + var u int32 + return ioctl(f.Fd(), unix.TIOCPTYUNLK, uintptr(unsafe.Pointer(&u))) +} + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCPTYGNAME) + if err != nil { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", n), nil +} diff --git a/vendor/github.com/containerd/console/tc_freebsd.go b/vendor/github.com/containerd/console/tc_freebsd.go new file mode 100644 index 00000000000..e2a10e4413c --- /dev/null +++ b/vendor/github.com/containerd/console/tc_freebsd.go @@ -0,0 +1,29 @@ +package console + +import ( + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TIOCGETA + cmdTcSet = unix.TIOCSETA +) + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +// This does not exist on FreeBSD, it does not allocate controlling terminals on open +func unlockpt(f *os.File) error { + return nil +} + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN) + if err != nil { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", n), nil +} diff --git a/vendor/github.com/containerd/console/tc_linux.go b/vendor/github.com/containerd/console/tc_linux.go new file mode 100644 index 00000000000..80ef2f6fb39 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_linux.go @@ -0,0 +1,37 @@ +package console + +import ( + "fmt" + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TCGETS + cmdTcSet = unix.TCSETS +) + +func ioctl(fd, flag, data uintptr) error { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 { + return err + } + return nil +} + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + var u int32 + return ioctl(f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) +} + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN) + if err != nil { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", n), nil +} diff --git a/vendor/github.com/containerd/console/tc_solaris_cgo.go b/vendor/github.com/containerd/console/tc_solaris_cgo.go new file mode 100644 index 00000000000..f8066d8e398 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_solaris_cgo.go @@ -0,0 +1,35 @@ +// +build solaris,cgo + +package console + +import ( + "os" + + "golang.org/x/sys/unix" +) + +//#include +import "C" + +const ( + cmdTcGet = unix.TCGETS + cmdTcSet = unix.TCSETS +) + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + ptspath, err := C.ptsname(C.int(f.Fd())) + if err != nil { + return "", err + } + return C.GoString(ptspath), nil +} + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + if _, err := C.grantpt(C.int(f.Fd())); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/containerd/console/tc_solaris_nocgo.go b/vendor/github.com/containerd/console/tc_solaris_nocgo.go new file mode 100644 index 00000000000..0aefa0d2bb1 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_solaris_nocgo.go @@ -0,0 +1,31 @@ +// +build solaris,!cgo + +// +// Implementing the functions below requires cgo support. Non-cgo stubs +// versions are defined below to enable cross-compilation of source code +// that depends on these functions, but the resultant cross-compiled +// binaries cannot actually be used. If the stub function(s) below are +// actually invoked they will display an error message and cause the +// calling process to exit. +// + +package console + +import ( + "os" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TCGETS + cmdTcSet = unix.TCSETS +) + +func ptsname(f *os.File) (string, error) { + panic("ptsname() support requires cgo.") +} + +func unlockpt(f *os.File) error { + panic("unlockpt() support requires cgo.") +} diff --git a/vendor/github.com/containerd/console/tc_unix.go b/vendor/github.com/containerd/console/tc_unix.go new file mode 100644 index 00000000000..df7dcb93342 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_unix.go @@ -0,0 +1,75 @@ +// +build darwin freebsd linux solaris + +package console + +import ( + "golang.org/x/sys/unix" +) + +func tcget(fd uintptr, p *unix.Termios) error { + termios, err := unix.IoctlGetTermios(int(fd), cmdTcGet) + if err != nil { + return err + } + *p = *termios + return nil +} + +func tcset(fd uintptr, p *unix.Termios) error { + return unix.IoctlSetTermios(int(fd), cmdTcSet, p) +} + +func tcgwinsz(fd uintptr) (WinSize, error) { + var ws WinSize + + uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) + if err != nil { + return ws, err + } + + // Translate from unix.Winsize to console.WinSize + ws.Height = uws.Row + ws.Width = uws.Col + ws.x = uws.Xpixel + ws.y = uws.Ypixel + return ws, nil +} + +func tcswinsz(fd uintptr, ws WinSize) error { + // Translate from console.WinSize to unix.Winsize + + var uws unix.Winsize + uws.Row = ws.Height + uws.Col = ws.Width + uws.Xpixel = ws.x + uws.Ypixel = ws.y + + return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, &uws) +} + +func setONLCR(fd uintptr, enable bool) error { + var termios unix.Termios + if err := tcget(fd, &termios); err != nil { + return err + } + if enable { + // Set +onlcr so we can act like a real terminal + termios.Oflag |= unix.ONLCR + } else { + // Set -onlcr so we don't have to deal with \r. + termios.Oflag &^= unix.ONLCR + } + return tcset(fd, &termios) +} + +func cfmakeraw(t unix.Termios) unix.Termios { + t.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + t.Oflag &^= unix.OPOST + t.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + t.Cflag &^= (unix.CSIZE | unix.PARENB) + t.Cflag &^= unix.CS8 + t.Cc[unix.VMIN] = 1 + t.Cc[unix.VTIME] = 0 + + return t +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/.travis.yml b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml new file mode 100644 index 00000000000..3938f383494 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml @@ -0,0 +1,19 @@ +# Copyright (C) 2017 SUSE LLC. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +language: go +go: + - 1.7.x + - 1.8.x + - tip + +os: + - linux + - osx + +script: + - go test -cover -v ./... + +notifications: + email: false diff --git a/vendor/github.com/cyphar/filepath-securejoin/BUILD b/vendor/github.com/cyphar/filepath-securejoin/BUILD new file mode 100644 index 00000000000..aa508dc1f1c --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/BUILD @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "join.go", + "vfs.go", + ], + importpath = "github.com/cyphar/filepath-securejoin", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/pkg/errors:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/vendor/github.com/cyphar/filepath-securejoin/LICENSE similarity index 92% rename from vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD rename to vendor/github.com/cyphar/filepath-securejoin/LICENSE index 4c056c5ed27..bec842f294f 100644 --- a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD +++ b/vendor/github.com/cyphar/filepath-securejoin/LICENSE @@ -1,4 +1,5 @@ -Copyright (c) 2014-2017 The Docker & Go Authors. All rights reserved. +Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +Copyright (C) 2017 SUSE LLC. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md new file mode 100644 index 00000000000..49b2baa9f35 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/README.md @@ -0,0 +1,65 @@ +## `filepath-securejoin` ## + +[![Build Status](https://travis-ci.org/cyphar/filepath-securejoin.svg?branch=master)](https://travis-ci.org/cyphar/filepath-securejoin) + +An implementation of `SecureJoin`, a [candidate for inclusion in the Go +standard library][go#20126]. The purpose of this function is to be a "secure" +alternative to `filepath.Join`, and in particular it provides certain +guarantees that are not provided by `filepath.Join`. + +This is the function prototype: + +```go +func SecureJoin(root, unsafePath string) (string, error) +``` + +This library **guarantees** the following: + +* If no error is set, the resulting string **must** be a child path of + `SecureJoin` and will not contain any symlink path components (they will all + be expanded). + +* When expanding symlinks, all symlink path components **must** be resolved + relative to the provided root. In particular, this can be considered a + userspace implementation of how `chroot(2)` operates on file paths. Note that + these symlinks will **not** be expanded lexically (`filepath.Clean` is not + called on the input before processing). + +* Non-existant path components are unaffected by `SecureJoin` (similar to + `filepath.EvalSymlinks`'s semantics). + +* The returned path will always be `filepath.Clean`ed and thus not contain any + `..` components. + +A (trivial) implementation of this function on GNU/Linux systems could be done +with the following (note that this requires root privileges and is far more +opaque than the implementation in this library, and also requires that +`readlink` is inside the `root` path): + +```go +package securejoin + +import ( + "os/exec" + "path/filepath" +) + +func SecureJoin(root, unsafePath string) (string, error) { + unsafePath = string(filepath.Separator) + unsafePath + cmd := exec.Command("chroot", root, + "readlink", "--canonicalize-missing", "--no-newline", unsafePath) + output, err := cmd.CombinedOutput() + if err != nil { + return "", err + } + expanded := string(output) + return filepath.Join(root, expanded), nil +} +``` + +[go#20126]: https://github.com/golang/go/issues/20126 + +### License ### + +The license of this project is the same as Go, which is a BSD 3-clause license +available in the `LICENSE` file. diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION new file mode 100644 index 00000000000..1f5f83047d3 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -0,0 +1 @@ +0.2.1+dev diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go new file mode 100644 index 00000000000..f20985479d4 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/join.go @@ -0,0 +1,135 @@ +// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +// Copyright (C) 2017 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package securejoin is an implementation of the hopefully-soon-to-be-included +// SecureJoin helper that is meant to be part of the "path/filepath" package. +// The purpose of this project is to provide a PoC implementation to make the +// SecureJoin proposal (https://github.com/golang/go/issues/20126) more +// tangible. +package securejoin + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/pkg/errors" +) + +// ErrSymlinkLoop is returned by SecureJoinVFS when too many symlinks have been +// evaluated in attempting to securely join the two given paths. +var ErrSymlinkLoop = fmt.Errorf("SecureJoin: too many links") + +// IsNotExist tells you if err is an error that implies that either the path +// accessed does not exist (or path components don't exist). This is +// effectively a more broad version of os.IsNotExist. +func IsNotExist(err error) bool { + // If it's a bone-fide ENOENT just bail. + if os.IsNotExist(errors.Cause(err)) { + return true + } + + // Check that it's not actually an ENOTDIR, which in some cases is a more + // convoluted case of ENOENT (usually involving weird paths). + var errno error + switch err := errors.Cause(err).(type) { + case *os.PathError: + errno = err.Err + case *os.LinkError: + errno = err.Err + case *os.SyscallError: + errno = err.Err + } + return errno == syscall.ENOTDIR || errno == syscall.ENOENT +} + +// SecureJoinVFS joins the two given path components (similar to Join) except +// that the returned path is guaranteed to be scoped inside the provided root +// path (when evaluated). Any symbolic links in the path are evaluated with the +// given root treated as the root of the filesystem, similar to a chroot. The +// filesystem state is evaluated through the given VFS interface (if nil, the +// standard os.* family of functions are used). +// +// Note that the guarantees provided by this function only apply if the path +// components in the returned string are not modified (in other words are not +// replaced with symlinks on the filesystem) after this function has returned. +// Such a symlink race is necessarily out-of-scope of SecureJoin. +func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { + // Use the os.* VFS implementation if none was specified. + if vfs == nil { + vfs = osVFS{} + } + + var path bytes.Buffer + n := 0 + for unsafePath != "" { + if n > 255 { + return "", ErrSymlinkLoop + } + + // Next path component, p. + i := strings.IndexRune(unsafePath, filepath.Separator) + var p string + if i == -1 { + p, unsafePath = unsafePath, "" + } else { + p, unsafePath = unsafePath[:i], unsafePath[i+1:] + } + + // Create a cleaned path, using the lexical semantics of /../a, to + // create a "scoped" path component which can safely be joined to fullP + // for evaluation. At this point, path.String() doesn't contain any + // symlink components. + cleanP := filepath.Clean(string(filepath.Separator) + path.String() + p) + if cleanP == string(filepath.Separator) { + path.Reset() + continue + } + fullP := filepath.Clean(root + cleanP) + + // Figure out whether the path is a symlink. + fi, err := vfs.Lstat(fullP) + if err != nil && !IsNotExist(err) { + return "", err + } + // Treat non-existent path components the same as non-symlinks (we + // can't do any better here). + if IsNotExist(err) || fi.Mode()&os.ModeSymlink == 0 { + path.WriteString(p) + path.WriteRune(filepath.Separator) + continue + } + + // Only increment when we actually dereference a link. + n++ + + // It's a symlink, expand it by prepending it to the yet-unparsed path. + dest, err := vfs.Readlink(fullP) + if err != nil { + return "", err + } + // Absolute symlinks reset any work we've already done. + if filepath.IsAbs(dest) { + path.Reset() + } + unsafePath = dest + string(filepath.Separator) + unsafePath + } + + // We have to clean path.String() here because it may contain '..' + // components that are entirely lexical, but would be misleading otherwise. + // And finally do a final clean to ensure that root is also lexically + // clean. + fullP := filepath.Clean(string(filepath.Separator) + path.String()) + return filepath.Clean(root + fullP), nil +} + +// SecureJoin is a wrapper around SecureJoinVFS that just uses the os.* library +// of functions as the VFS. If in doubt, use this function over SecureJoinVFS. +func SecureJoin(root, unsafePath string) (string, error) { + return SecureJoinVFS(root, unsafePath, nil) +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/vendor.conf b/vendor/github.com/cyphar/filepath-securejoin/vendor.conf new file mode 100644 index 00000000000..66bb574b955 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/vendor.conf @@ -0,0 +1 @@ +github.com/pkg/errors v0.8.0 diff --git a/vendor/github.com/cyphar/filepath-securejoin/vfs.go b/vendor/github.com/cyphar/filepath-securejoin/vfs.go new file mode 100644 index 00000000000..a82a5eae11e --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/vfs.go @@ -0,0 +1,41 @@ +// Copyright (C) 2017 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import "os" + +// In future this should be moved into a separate package, because now there +// are several projects (umoci and go-mtree) that are using this sort of +// interface. + +// VFS is the minimal interface necessary to use SecureJoinVFS. A nil VFS is +// equivalent to using the standard os.* family of functions. This is mainly +// used for the purposes of mock testing, but also can be used to otherwise use +// SecureJoin with VFS-like system. +type VFS interface { + // Lstat returns a FileInfo describing the named file. If the file is a + // symbolic link, the returned FileInfo describes the symbolic link. Lstat + // makes no attempt to follow the link. These semantics are identical to + // os.Lstat. + Lstat(name string) (os.FileInfo, error) + + // Readlink returns the destination of the named symbolic link. These + // semantics are identical to os.Readlink. + Readlink(name string) (string, error) +} + +// osVFS is the "nil" VFS, in that it just passes everything through to the os +// module. +type osVFS struct{} + +// Lstat returns a FileInfo describing the named file. If the file is a +// symbolic link, the returned FileInfo describes the symbolic link. Lstat +// makes no attempt to follow the link. These semantics are identical to +// os.Lstat. +func (o osVFS) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) } + +// Readlink returns the destination of the named symbolic link. These +// semantics are identical to os.Readlink. +func (o osVFS) Readlink(name string) (string, error) { return os.Readlink(name) } diff --git a/vendor/github.com/docker/docker/pkg/symlink/BUILD b/vendor/github.com/docker/docker/pkg/symlink/BUILD deleted file mode 100644 index f15ddab2d8d..00000000000 --- a/vendor/github.com/docker/docker/pkg/symlink/BUILD +++ /dev/null @@ -1,68 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "fs.go", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "fs_unix.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "fs_unix.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "fs_unix.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "fs_unix.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "fs_unix.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "fs_unix.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "fs_unix.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "fs_unix.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "fs_unix.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "fs_unix.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "fs_windows.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/docker/docker/pkg/symlink", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/docker/docker/pkg/system:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/github.com/docker/docker/pkg/longpath:go_default_library", - "//vendor/golang.org/x/sys/windows:go_default_library", - ], - "//conditions:default": [], - }), -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/docker/docker/pkg/symlink/README.md b/vendor/github.com/docker/docker/pkg/symlink/README.md deleted file mode 100644 index 8dba54fd089..00000000000 --- a/vendor/github.com/docker/docker/pkg/symlink/README.md +++ /dev/null @@ -1,6 +0,0 @@ -Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks, -as well as a Windows long-path aware version of filepath.EvalSymlinks -from the [Go standard library](https://golang.org/pkg/path/filepath). - -The code from filepath.EvalSymlinks has been adapted in fs.go. -Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go. diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs.go b/vendor/github.com/docker/docker/pkg/symlink/fs.go deleted file mode 100644 index 52fb9a691b3..00000000000 --- a/vendor/github.com/docker/docker/pkg/symlink/fs.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.BSD file. - -// This code is a modified version of path/filepath/symlink.go from the Go standard library. - -package symlink - -import ( - "bytes" - "errors" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/system" -) - -// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an -// absolute path. This function handles paths in a platform-agnostic manner. -func FollowSymlinkInScope(path, root string) (string, error) { - path, err := filepath.Abs(filepath.FromSlash(path)) - if err != nil { - return "", err - } - root, err = filepath.Abs(filepath.FromSlash(root)) - if err != nil { - return "", err - } - return evalSymlinksInScope(path, root) -} - -// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return -// a result guaranteed to be contained within the scope `root`, at the time of the call. -// Symlinks in `root` are not evaluated and left as-is. -// Errors encountered while attempting to evaluate symlinks in path will be returned. -// Non-existing paths are valid and do not constitute an error. -// `path` has to contain `root` as a prefix, or else an error will be returned. -// Trying to break out from `root` does not constitute an error. -// -// Example: -// If /foo/bar -> /outside, -// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/outside" -// -// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks -// are created and not to create subsequently, additional symlinks that could potentially make a -// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo") -// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should -// no longer be considered safely contained in "/foo". -func evalSymlinksInScope(path, root string) (string, error) { - root = filepath.Clean(root) - if path == root { - return path, nil - } - if !strings.HasPrefix(path, root) { - return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) - } - const maxIter = 255 - originalPath := path - // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c" - path = path[len(root):] - if root == string(filepath.Separator) { - path = string(filepath.Separator) + path - } - if !strings.HasPrefix(path, string(filepath.Separator)) { - return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) - } - path = filepath.Clean(path) - // consume path by taking each frontmost path element, - // expanding it if it's a symlink, and appending it to b - var b bytes.Buffer - // b here will always be considered to be the "current absolute path inside - // root" when we append paths to it, we also append a slash and use - // filepath.Clean after the loop to trim the trailing slash - for n := 0; path != ""; n++ { - if n > maxIter { - return "", errors.New("evalSymlinksInScope: too many links in " + originalPath) - } - - // find next path component, p - i := strings.IndexRune(path, filepath.Separator) - var p string - if i == -1 { - p, path = path, "" - } else { - p, path = path[:i], path[i+1:] - } - - if p == "" { - continue - } - - // this takes a b.String() like "b/../" and a p like "c" and turns it - // into "/b/../c" which then gets filepath.Cleaned into "/c" and then - // root gets prepended and we Clean again (to remove any trailing slash - // if the first Clean gave us just "/") - cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p) - if isDriveOrRoot(cleanP) { - // never Lstat "/" itself, or drive letters on Windows - b.Reset() - continue - } - fullP := filepath.Clean(root + cleanP) - - fi, err := os.Lstat(fullP) - if os.IsNotExist(err) { - // if p does not exist, accept it - b.WriteString(p) - b.WriteRune(filepath.Separator) - continue - } - if err != nil { - return "", err - } - if fi.Mode()&os.ModeSymlink == 0 { - b.WriteString(p) - b.WriteRune(filepath.Separator) - continue - } - - // it's a symlink, put it at the front of path - dest, err := os.Readlink(fullP) - if err != nil { - return "", err - } - if system.IsAbs(dest) { - b.Reset() - } - path = dest + string(filepath.Separator) + path - } - - // see note above on "fullP := ..." for why this is double-cleaned and - // what's happening here - return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil -} - -// EvalSymlinks returns the path name after the evaluation of any symbolic -// links. -// If path is relative the result will be relative to the current directory, -// unless one of the components is an absolute symbolic link. -// This version has been updated to support long paths prepended with `\\?\`. -func EvalSymlinks(path string) (string, error) { - return evalSymlinks(path) -} diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go b/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go deleted file mode 100644 index 22708273d60..00000000000 --- a/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows - -package symlink - -import ( - "path/filepath" -) - -func evalSymlinks(path string) (string, error) { - return filepath.EvalSymlinks(path) -} - -func isDriveOrRoot(p string) bool { - return p == string(filepath.Separator) -} diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go b/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go deleted file mode 100644 index 31523ade923..00000000000 --- a/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go +++ /dev/null @@ -1,169 +0,0 @@ -package symlink - -import ( - "bytes" - "errors" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/longpath" - "golang.org/x/sys/windows" -) - -func toShort(path string) (string, error) { - p, err := windows.UTF16FromString(path) - if err != nil { - return "", err - } - b := p // GetShortPathName says we can reuse buffer - n, err := windows.GetShortPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - if n > uint32(len(b)) { - b = make([]uint16, n) - if _, err = windows.GetShortPathName(&p[0], &b[0], uint32(len(b))); err != nil { - return "", err - } - } - return windows.UTF16ToString(b), nil -} - -func toLong(path string) (string, error) { - p, err := windows.UTF16FromString(path) - if err != nil { - return "", err - } - b := p // GetLongPathName says we can reuse buffer - n, err := windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - if n > uint32(len(b)) { - b = make([]uint16, n) - n, err = windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - } - b = b[:n] - return windows.UTF16ToString(b), nil -} - -func evalSymlinks(path string) (string, error) { - path, err := walkSymlinks(path) - if err != nil { - return "", err - } - - p, err := toShort(path) - if err != nil { - return "", err - } - p, err = toLong(p) - if err != nil { - return "", err - } - // windows.GetLongPathName does not change the case of the drive letter, - // but the result of EvalSymlinks must be unique, so we have - // EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`). - // Make drive letter upper case. - if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' { - p = string(p[0]+'A'-'a') + p[1:] - } else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' { - p = p[:3] + string(p[4]+'A'-'a') + p[5:] - } - return filepath.Clean(p), nil -} - -const utf8RuneSelf = 0x80 - -func walkSymlinks(path string) (string, error) { - const maxIter = 255 - originalPath := path - // consume path by taking each frontmost path element, - // expanding it if it's a symlink, and appending it to b - var b bytes.Buffer - for n := 0; path != ""; n++ { - if n > maxIter { - return "", errors.New("EvalSymlinks: too many links in " + originalPath) - } - - // A path beginning with `\\?\` represents the root, so automatically - // skip that part and begin processing the next segment. - if strings.HasPrefix(path, longpath.Prefix) { - b.WriteString(longpath.Prefix) - path = path[4:] - continue - } - - // find next path component, p - var i = -1 - for j, c := range path { - if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) { - i = j - break - } - } - var p string - if i == -1 { - p, path = path, "" - } else { - p, path = path[:i], path[i+1:] - } - - if p == "" { - if b.Len() == 0 { - // must be absolute path - b.WriteRune(filepath.Separator) - } - continue - } - - // If this is the first segment after the long path prefix, accept the - // current segment as a volume root or UNC share and move on to the next. - if b.String() == longpath.Prefix { - b.WriteString(p) - b.WriteRune(filepath.Separator) - continue - } - - fi, err := os.Lstat(b.String() + p) - if err != nil { - return "", err - } - if fi.Mode()&os.ModeSymlink == 0 { - b.WriteString(p) - if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') { - b.WriteRune(filepath.Separator) - } - continue - } - - // it's a symlink, put it at the front of path - dest, err := os.Readlink(b.String() + p) - if err != nil { - return "", err - } - if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) { - b.Reset() - } - path = dest + string(filepath.Separator) + path - } - return filepath.Clean(b.String()), nil -} - -func isDriveOrRoot(p string) bool { - if p == string(filepath.Separator) { - return true - } - - length := len(p) - if length >= 2 { - if p[length-1] == ':' && (('a' <= p[length-2] && p[length-2] <= 'z') || ('A' <= p[length-2] && p[length-2] <= 'Z')) { - return true - } - } - return false -} diff --git a/vendor/github.com/google/cadvisor/container/docker/handler.go b/vendor/github.com/google/cadvisor/container/docker/handler.go index 541df67c99d..c5c46ae4bf2 100644 --- a/vendor/github.com/google/cadvisor/container/docker/handler.go +++ b/vendor/github.com/google/cadvisor/container/docker/handler.go @@ -391,6 +391,7 @@ func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) { } spec.Envs = self.envs spec.Image = self.image + spec.CreationTime = self.creationTime return spec, err } diff --git a/vendor/github.com/google/cadvisor/fs/BUILD b/vendor/github.com/google/cadvisor/fs/BUILD index a5d4bdae7b9..077db1a4866 100644 --- a/vendor/github.com/google/cadvisor/fs/BUILD +++ b/vendor/github.com/google/cadvisor/fs/BUILD @@ -17,6 +17,7 @@ go_library( "//vendor/github.com/docker/docker/pkg/mount:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/devicemapper:go_default_library", + "//vendor/github.com/google/cadvisor/utils:go_default_library", "//vendor/github.com/google/cadvisor/utils/docker:go_default_library", "//vendor/github.com/mistifyio/go-zfs:go_default_library", ], diff --git a/vendor/github.com/google/cadvisor/fs/fs.go b/vendor/github.com/google/cadvisor/fs/fs.go index 271b01e3562..ae11b576802 100644 --- a/vendor/github.com/google/cadvisor/fs/fs.go +++ b/vendor/github.com/google/cadvisor/fs/fs.go @@ -35,6 +35,7 @@ import ( "github.com/docker/docker/pkg/mount" "github.com/golang/glog" "github.com/google/cadvisor/devicemapper" + "github.com/google/cadvisor/utils" dockerutil "github.com/google/cadvisor/utils/docker" zfs "github.com/mistifyio/go-zfs" ) @@ -409,10 +410,14 @@ func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, er fs.Type = ZFS default: var inodes, inodesFree uint64 - fs.Capacity, fs.Free, fs.Available, inodes, inodesFree, err = getVfsStats(partition.mountpoint) - fs.Inodes = &inodes - fs.InodesFree = &inodesFree - fs.Type = VFS + if utils.FileExists(partition.mountpoint) { + fs.Capacity, fs.Free, fs.Available, inodes, inodesFree, err = getVfsStats(partition.mountpoint) + fs.Inodes = &inodes + fs.InodesFree = &inodesFree + fs.Type = VFS + } else { + glog.V(4).Infof("unable to determine file system type, partition mountpoint does not exist: %v", partition.mountpoint) + } } if err != nil { glog.Errorf("Stat fs failed. Error: %v", err) diff --git a/vendor/github.com/google/cadvisor/pages/static/assets.go b/vendor/github.com/google/cadvisor/pages/static/assets.go index 156737f2bd4..ec854945f3a 100644 --- a/vendor/github.com/google/cadvisor/pages/static/assets.go +++ b/vendor/github.com/google/cadvisor/pages/static/assets.go @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -251,7 +251,7 @@ func pagesAssetsStylesBootstrapTheme311MinCss() (*asset, error) { return a, nil } -var _pagesAssetsStylesContainersCss = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\xb9\xd7\x8e\xe3\x68\x9a\x28\x78\xdf\x4f\x91\x3b\x83\x05\xce\x39\xec\x6c\x7a\x57\x85\xbd\xa0\x11\x45\x27\x7a\x7f\xb3\xa0\x27\x25\x7a\x27\x92\x85\x79\xf7\x85\x22\x22\x23\xb3\x4c\x57\x75\xcf\x6c\x20\x95\x11\xfc\xf9\x7f\xde\x7f\xfa\xdb\x3f\xca\xbe\x2f\x9b\xfc\xeb\x56\xcf\x6b\xdc\xd4\x67\xbc\xd4\x7d\xf7\x75\xe9\xfb\x26\x89\xa7\x5f\x8a\xbe\x5b\xbe\xce\xf5\x99\xff\x04\x43\xd0\xff\xfd\x5f\x7f\x7a\xf9\xcb\x9f\xbe\xfd\x9a\xef\x43\x3f\x2d\x5f\xeb\xf7\x4b\x7f\xff\xff\x01\x55\x16\x2f\xf1\xff\x08\x4f\xb5\xb4\xcd\xd7\xb4\xcf\xf2\x5f\xda\x78\x2a\xeb\xee\xeb\x54\x97\xd5\xf2\xd3\x3f\xe0\xbc\xfd\x73\x51\xbf\x43\xbe\x78\x69\xe2\xee\xed\xed\xbb\xb6\x9e\xf9\x1b\x92\xa4\x6f\xb2\xbf\x40\xd2\x3f\xbe\x26\xeb\xb2\xf4\xdd\x2f\x43\x9c\x65\x75\x57\xfe\x84\x0c\xfb\x5f\xc0\x2c\x53\x1d\x77\x65\x93\xff\x32\xf4\x73\xfd\x7a\xf3\x53\x9c\xcc\x7d\xb3\x2e\xf9\xcf\xef\xcc\x43\x3f\x2f\xfd\xf0\x13\xf4\x17\x68\xd2\x78\x78\x7f\x8e\x93\x26\xff\xe5\x59\x67\x4b\xf5\x66\xe1\x9f\xbf\x71\x02\xfd\xfc\xae\x92\x9f\xa0\x9f\x93\x7e\xca\xf2\xe9\xf3\x8f\xaf\x69\xdf\x34\xf1\x30\xe7\x3f\x7d\xfb\xe3\x2f\x68\xcd\x6d\xdc\x34\x5f\xb3\x3a\x6e\xfa\xf2\x83\x14\x0e\x41\x7f\x29\x69\x52\x97\xbf\x06\xa2\xfe\x05\xa0\x1f\x69\xfd\xb9\x6b\xfc\x88\xff\xf7\xba\x4c\xe2\xf4\x51\x4e\xfd\xda\x65\x2f\x71\xfb\xe9\xa7\xff\x4c\xe1\x8c\x2e\x8a\x6f\xca\x80\x87\xfd\xcb\xdc\x37\x75\xf6\xe5\x3f\xd1\x18\x27\x49\xec\x53\x71\xd4\xbf\xc5\xe3\xd7\xe4\x5f\x67\xf3\x6b\x52\xfe\xf2\x7b\xbe\xb2\x2c\xfb\xf9\xf7\xfc\xbf\xf9\xc0\xcf\x4d\x5e\x2c\x7f\xe9\x0a\xbf\x62\x67\xa9\x97\xbf\x8a\xcd\x1f\x39\x7a\xbb\xfe\x07\x4c\xe5\x50\x9e\x15\xf9\xcf\x1f\x4f\x10\x04\xfd\x9c\xae\xd3\xdc\x4f\x3f\x0d\x7d\xdd\x2d\xf9\xf4\xa3\xb6\xbe\x73\x3f\xe5\x4d\xbc\xd4\x5b\xfe\xf3\x0f\x59\x07\x19\x96\x9f\x7f\x1b\x56\x3f\x6f\xf9\xb4\xd4\x69\xdc\x7c\x8d\x9b\xba\xec\x7e\x6a\xeb\x2c\x6b\xfe\x1d\x2f\xfc\x9a\xf6\xdd\x92\x77\xcb\xbf\x2e\xe8\x07\xc0\x1f\x88\x5a\x14\xc5\xa7\x34\xd8\xb0\xff\x8a\xd9\xae\x9f\xda\xb8\xf9\xb9\xdf\xf2\xa9\x68\xfa\xe7\x4f\xf1\xba\xf4\xff\xb6\x35\xbe\xa6\x4d\x3f\xff\xbb\x36\x79\x07\xfa\x81\xdd\x9f\x96\x29\xee\xe6\x21\x9e\xf2\x6e\xf9\xb2\x4e\xcd\xff\x7a\xbb\xf0\xff\x26\xfd\xfe\x8f\xb2\x2e\xfe\xf7\x97\xae\xff\x3a\xe5\x43\x1e\x2f\x5f\xe6\x74\xea\x9b\xe6\x4b\x9a\xbf\x19\xaa\x7a\x97\x04\xc6\x7f\x34\xd4\x6f\x52\x0e\x0c\x0d\xfb\x9b\xc7\xbd\xac\xf9\x91\x4b\xf0\x7f\x2f\x0c\x3e\xf4\xfb\xa5\x2e\xa6\xb8\xfd\x37\x84\xfd\x35\xdc\x8f\xd9\xe5\x1b\xe7\xe4\xdb\xc3\xef\xc2\x36\x69\xe2\xf4\xf1\x5f\xff\x48\xab\x78\x5a\xe6\xaf\x75\xd7\xd4\x5d\xfe\x35\x69\xfa\xf4\xf1\xcb\xef\xfd\x31\xab\xe7\xa1\x89\x8f\x9f\xbe\xb6\xfd\xf9\x79\xb7\xdf\x3f\xcf\x7f\x04\xff\xaf\xff\xf3\xe5\x55\x1c\xbe\xfc\x11\xea\xbf\xff\x9f\x9f\x8a\x7a\x9a\x97\xaf\x69\x55\x37\x19\xf0\x4f\xef\xfd\xf2\x6b\xc4\x9f\x6c\xb6\x79\xb7\xfe\x68\xd3\x37\xe7\xfb\x9e\x95\xdf\xd2\x54\x9a\x7e\xf9\x4f\x82\x20\x3e\xfe\x4b\xd3\xf4\xdb\x85\x79\x39\x9a\xfc\xa7\x37\xe9\xbf\x1d\x7d\xd8\x6a\xd8\xbf\x85\x67\x96\x17\xf1\xda\xbc\x47\xdc\x87\xf7\x7e\x81\xd1\x61\xff\xc2\x4c\x75\xdc\xfc\x7d\x8e\xbb\xf9\xeb\x9c\x4f\x75\xf1\xbd\x3a\xf4\xeb\xf2\xe2\xf1\xa7\xae\xef\xf2\x1f\x03\xe1\x0b\xf4\x07\xfe\xf2\xd2\x5e\x96\xef\x3f\x21\x10\x04\x41\xbf\x12\xeb\x5b\x25\xfc\x51\xba\x2c\xcb\xde\x5c\x15\x04\xe7\xb9\xf9\x47\x39\x2f\xf1\x52\xa7\xff\x48\xfb\x16\xcc\xb3\x7a\xe9\x27\xf0\x1d\xe6\x6b\x52\xfe\x63\xe8\xca\xff\xfd\xe5\xdd\x81\xbf\xee\x5f\x96\x7e\xf8\xf2\xca\x7d\xdf\x4b\xd7\x3f\xcf\x44\x4d\x3d\x2f\x1f\xba\x79\x13\xe1\x43\x30\x64\xd8\xff\x58\x34\xe8\xe7\x25\xdf\x97\xaf\x59\x9e\xf6\xd3\x9b\x5b\xbe\xbf\xfe\x27\x29\xe9\xf7\x12\x7e\xed\xd7\x25\x9f\x5e\xee\xf3\xf7\x3f\x7a\x5b\x77\xdd\xfb\xdb\x5f\xfe\xb9\xd9\x3e\x84\x89\xe3\xf8\xb7\x64\x97\x7e\xf8\x73\x9a\xbf\xfc\xa6\xaa\x7f\x77\x81\x97\xbd\xbe\x89\xf8\x87\x38\xbe\x73\xf6\x0d\xc7\x97\xaf\xf0\x67\x68\x7d\x20\x82\xbe\xbc\x8e\xbe\x21\x7a\x79\x0e\x36\xec\xbf\x0d\x89\x3f\xc6\xfa\x56\xac\x5e\x18\xff\xec\xfa\xb4\xfc\xf1\xf9\x77\x01\x3f\xd1\x7c\x24\xa7\xb7\x9c\xfb\xdf\xc0\xf8\x9d\xb1\x1f\xf1\xfc\xf3\xf8\xfd\x4b\xa1\xfe\x35\xd0\xbf\x66\xe7\x0d\xe5\x6f\xe4\xfb\xe9\xa7\xa9\xef\x97\x3f\x02\xfc\xe5\x2d\xab\x7c\xe4\x42\xe8\x4f\x2e\xfe\xa8\xc2\x7f\x15\xe4\x07\xa6\xfe\x55\x90\x8f\xc6\xf3\x57\x00\xef\x89\xe6\xcf\xa0\xb2\xa9\x1f\xb2\xfe\xf9\x87\x60\x7f\x78\xbf\x9e\x5f\x8d\x6d\xf6\x63\xbd\xae\xdb\xb8\x7c\x8f\xe5\xff\xab\x6e\x5f\xf3\x43\xdc\x2d\x3f\xf7\x43\x9c\xd6\xcb\xf1\xd3\x3f\xd0\x9f\xdf\x92\xfb\x0f\xcf\x45\xdd\x2c\xf9\xf4\x53\xdc\x0c\x55\xfc\xbf\x3e\xce\xff\x1f\x14\xfa\xdf\x7f\x4a\xf0\xcf\x35\xfb\x87\x11\xff\xa7\xa0\x9f\x1a\xfe\xf7\x41\x3f\x34\xfd\xef\x03\x7e\x2a\xfb\x23\xcf\xa0\x28\xfa\x83\xc6\x7e\x9d\x83\x68\x9a\xfe\xfe\xee\xcf\xc2\xec\x1b\xc5\x3f\x2b\x81\x7f\x68\xc1\xef\xf9\xf8\x95\x5a\x7e\x60\xe4\x33\x5b\xfd\xfa\xfc\x0f\xcd\x53\xbd\x5a\xb0\xff\x86\x6d\xfe\x39\xdc\xef\xf2\xf4\x37\x85\xa4\xc5\x97\xff\x24\xe8\xfc\xe3\x3f\x32\x2e\xfe\x82\xb5\x38\x7d\xb5\x18\x7f\x48\xbd\x1f\xf2\xee\x0f\x3a\xce\x24\x49\x7e\x9c\x4f\x3e\xab\x6c\xd2\x2f\x4b\xdf\xbe\x15\xbe\x3f\xa4\x54\xf4\xe9\x3a\xff\xb7\x5c\xf4\xcf\x20\xff\x89\x22\xfa\x29\xee\xca\x3f\x2e\x80\xdf\x52\xc0\x77\x03\xbe\xb5\x0b\x5f\xa0\x7f\xb5\x9a\x7d\x3a\xe8\x8f\xcd\xe9\x7b\xfd\x21\x5f\x05\xe9\x7b\x0b\xf1\xa7\xdd\xc3\xfb\xaf\x8f\xa6\xf2\xbd\x83\xf8\xde\x05\x7f\x45\x29\xea\xad\x28\xfe\x8b\x3c\x7d\x9b\x85\xdf\x57\x08\x7f\xa8\xc5\x5f\x5f\xf9\x6f\x98\xe1\x5f\x40\xf0\xdb\x22\xfd\xb1\xd1\xf8\xe3\x8a\xfe\x89\xef\xe5\x32\x7f\x4e\xf1\x75\xe3\x2f\xaa\xc6\x07\xc1\x6f\x33\xe7\x7f\x0b\xdb\xef\xd8\x7f\xc7\xf6\x2d\xe9\x7c\x2b\x7d\xdf\x46\xef\xa2\x28\xfe\x9a\xd0\x1f\x5e\xa8\xf2\xf4\xf1\xaf\x3a\xf4\x6f\xa9\x66\x59\xf6\x2b\xaa\xf5\x92\xb7\xbf\xfc\xd0\x60\xfe\x75\xf3\xfc\x4f\x7a\x4e\xe8\x57\xfd\x33\x99\xb7\x6f\x81\x81\xbc\x4d\x55\x55\xbd\xe4\x5f\xe7\x21\x4e\x5f\x20\xcf\x29\x1e\x7e\xc7\xc2\x6f\x9f\x5f\xad\xc4\xb7\x18\x7b\x97\x81\xcc\xdb\x6f\x14\x3e\xfc\x02\x79\xdb\x56\xfc\xa8\x84\xae\x7f\xd3\x4d\xd2\xef\x5f\x7e\x8b\xf0\xef\xbf\xb9\x58\xa7\x7d\xf7\xbb\x4b\xbf\xa6\x08\x23\xbf\xc7\x1f\xa7\x69\xde\xfc\x73\xb8\x0f\xc6\xa0\xdf\x00\xbe\x49\xf4\x6d\x0a\xff\x77\x94\xfd\x7b\x2c\x7f\x58\xfc\xde\xde\xbc\xb1\xf6\xf7\x7f\x03\xe0\x37\x0c\xa5\x69\xfa\x4f\xb2\xfd\x5f\xe0\x79\xa9\xf2\x97\xff\x69\x2f\xf2\x86\xa9\xaa\xcb\xaa\xf9\x5d\x0a\x7a\x7f\xf5\x2a\x66\x7f\xb4\x41\x22\x72\xba\xa0\x7e\x53\xd8\x7f\x7d\xf8\xee\xab\x59\xbf\x2c\xf9\xef\x87\xc7\xef\x93\xc3\xd7\xf7\x12\xf4\xea\xfb\x3f\x8f\x96\x7e\x78\x3d\xff\x81\x31\x3f\xfc\xec\xf7\x8c\xbe\x69\xe3\x07\x3e\xdf\x53\xf2\x4f\x9f\xc9\xf9\x73\x29\x41\x0c\xfb\xfb\x8e\x8b\xf8\x93\xed\xc4\xab\x41\xfe\xe3\xf1\xec\xdb\xae\x82\xf8\x23\xf6\x7e\xdb\x88\xff\x05\xcf\x7f\x78\xfd\x4d\x90\x37\x06\xdf\x98\x78\xe7\xe7\x47\x6a\xfd\xfb\x16\x76\xce\x9b\x3c\x5d\xfe\xd0\xc1\x7e\x4b\xf0\x2f\x21\x7e\xa3\xbc\xff\x49\x09\xc4\x5f\xe1\xfb\x05\xfa\xbd\x6e\xde\xe2\xe4\x97\xef\x2d\xe0\xcf\x59\x3d\xe5\xe9\x9b\xf2\x9b\x65\xfa\xf9\xbb\xc4\xdf\x4b\xfc\x9f\x59\xe8\x63\xa4\x7e\x37\xcd\xdb\xd1\xbf\x68\x8e\x77\x3e\x3e\x6a\xc4\x0f\xd6\xfe\x01\xdd\xef\xba\xa1\x37\xc0\xb6\xcb\xdb\xbe\xab\xd3\xaf\x55\xdd\x2d\xbf\xfc\x76\xa0\x5f\xbb\x2c\x9f\x7e\xb7\x78\xf9\x35\xe0\x9c\x0f\xf1\x14\x2f\xfd\xf4\xa3\x1a\x7e\xb5\xb7\xfc\x1e\x02\x6f\x0c\xbe\xe6\xe0\xbf\xfd\xed\x1f\x2f\x1b\x7c\x6d\xe2\x24\x6f\xbe\xfc\xf2\xb7\x2f\x5f\xbe\x7c\xf9\xdd\x72\xf3\x6f\xff\xf5\xb7\x7f\xac\x5d\xfd\xeb\x5b\x1f\x44\xa8\xb7\x9f\x9f\xbf\x03\xbe\x07\x66\xbd\xc4\x4d\x9d\xbe\x41\xbe\xb7\x93\x5f\xd3\x61\xfd\xa7\xf8\x7f\xc0\x07\xbd\xfd\xbc\x01\xd6\xdd\xef\x40\x7f\x43\xf4\xbf\xfe\xf6\x8f\x29\x7e\x7e\x7d\x49\x30\xff\x88\xbc\x88\xdb\xba\x39\x7e\xfa\xf2\x1f\x5c\xbf\x4e\x75\x3e\x7d\xd1\xf2\xe7\x7f\xbc\x93\xf9\xb1\x6c\x7d\x19\xa6\xfc\xeb\xab\x72\xbd\x93\x9b\xfb\xe6\x63\xab\x57\x2f\x4d\xfe\x6b\x8a\xc2\xdb\xcf\xdb\xbd\x21\x2e\x5f\x03\x5f\x9c\xe5\xd3\x97\x0a\xfe\xb8\xf6\xec\xa7\xec\x0d\xd5\x4f\x5f\x92\x29\x8f\x1f\x5f\x5f\x07\x6f\xd7\xdf\xbe\xcf\xf8\x3a\xf5\xcf\x3f\x64\x30\x7d\x67\xf0\x3f\xfe\xfe\xe5\x3f\xda\xbe\xeb\xdf\xf8\xfa\x8f\x1f\xb5\xf9\x32\xdd\x97\xb7\x76\xf2\xed\xf0\x07\x3f\xfa\xf2\xe6\x5e\xef\xc7\xbf\x49\x26\x5f\x96\x7e\x78\x7f\xf1\xb1\x6e\xfa\xf2\x89\xe0\xc7\x2e\xe6\x0b\xfa\x9b\xd3\x77\x87\xfd\xdd\xf1\x2b\x61\xfe\xee\xf0\x23\xb1\xbe\x9f\xff\xd7\xdf\xfe\x31\xaf\xc9\xab\xf6\xc4\xf5\xab\x57\xf9\xd8\x16\x7e\xad\xbb\x61\x5d\x3e\x04\xff\x15\x65\xec\x1b\xb6\xf7\x84\xf7\x05\x83\xde\xd1\xfc\x67\xd3\x97\xfd\x07\xc0\x47\x4e\xfd\x82\xbc\xed\x4b\x7f\xc7\x10\xf2\x79\xfa\xfb\xc4\xfc\x3d\x67\xfc\xee\xc6\xbb\x46\x3f\x58\xfd\xdd\xdb\xcf\x84\xf0\x6d\xdb\xfc\xdb\x0b\xef\x33\xfb\x2b\x85\xfd\x47\x16\x2f\xf1\x4f\x6f\xcf\xe0\xd0\x95\x3f\x27\xf1\x9c\x13\xd8\xdf\x6b\x8f\xd5\xad\x27\xa4\x5c\xcb\x9e\x61\x18\x46\xb3\xdd\xea\xe2\x96\x0c\xc3\x92\xee\xeb\xb1\xe6\x18\x89\x61\x18\x8e\x45\xcc\x01\x65\x18\x46\xb1\xfd\x86\x37\x61\xf6\x74\x1f\x19\x27\x5d\x1a\xc1\x3c\xc3\x5d\x77\x42\x44\x75\xa4\xf5\x66\x32\x4c\xae\x8d\x99\x8b\x66\xe1\x03\x4c\x7d\x63\x01\xd1\xc8\xa4\x7d\x73\xb9\x3f\xe7\x28\x9b\x4b\x93\xa9\x25\xe2\x29\x1d\x2c\x1b\xf5\x15\x67\x33\xd7\xeb\xe5\xc2\xca\xcc\x1e\xfa\x12\xd7\x84\xae\x60\x5d\x53\x5b\x08\x2e\xe5\xe2\x2a\xb6\x8c\x48\xd8\xc8\xf4\xeb\x35\x63\xeb\xbe\x8a\x6b\xd8\x4b\x75\x1d\x2b\x50\x25\x87\x8b\x60\xda\xc8\x1c\x25\x61\x70\xa5\x70\xfc\xec\xc0\x1b\x8e\x9f\x4f\x80\xb9\x58\x52\xfb\xc8\xe3\x3a\x2e\x19\xdc\xed\x5d\x4a\x5f\xca\xe2\x0e\xd1\xd2\x7e\x6b\xe9\x92\x91\x3c\x5b\x67\x2e\x12\xcb\xb7\x07\x25\x77\x6c\xea\x32\x8c\xc1\xf0\x79\x50\x01\x99\xca\x80\x20\x73\xdd\x92\x27\xc3\x31\x22\xef\xa9\xb5\x69\xef\x80\x08\x96\xe4\x60\xca\x4d\xca\x48\xf2\x85\xc1\x38\xc9\xc6\x9f\x33\xfb\x10\x24\xe6\xe0\x70\xf7\x60\x4d\xe6\x60\x4b\x66\x16\xf4\x33\x62\x7a\xa6\x61\x18\x26\x11\x1b\xca\xac\x25\x66\x2c\x21\x86\x34\x2c\xd9\x28\x5d\x06\x51\x5a\x07\x65\x53\x06\x51\xa4\x74\x34\x4b\xe6\x6e\x5e\x98\xd6\xae\x44\x99\x91\x99\xf5\xc9\x96\x5e\x28\xd8\x2a\xa3\x32\xcf\x92\x2b\xa7\x0b\x53\xab\xa5\xc3\x4c\xe5\x95\xf1\x93\xf0\xb0\x9e\x0a\xb3\x99\x2e\x93\xf5\x78\xf8\x60\x68\x26\x64\x38\xa6\x75\xa5\xf9\x64\x98\x52\x67\x24\xc6\xdc\x31\xe7\x34\x6f\xa5\xc3\xe8\xe5\x4d\x53\x40\x4c\x69\x8a\x54\xf2\x31\x9b\xb9\x30\xcf\xdb\xea\x9d\x51\xa6\x42\xf7\x9b\xbb\xca\x81\x79\xed\x8a\x83\xd2\xf9\x5a\xca\x55\x2e\x51\x05\x2e\xb4\xd2\xe1\x72\x0d\x65\x9c\x53\xd2\x06\x9f\xef\x17\xe8\xde\x31\xd0\xa9\xf3\x0c\xc3\x5c\xb7\xce\x7a\x16\x18\x66\xe0\x7c\x87\xaf\x4e\x85\xb7\x49\x97\xe8\x20\xb4\xc7\x2a\x10\x83\xec\x26\xdd\xb1\x8a\xca\x40\x60\x93\x9e\x1c\xcb\x30\xa6\x33\x83\x1b\xd2\x80\x0d\x9e\x85\x7c\x7a\x4b\xd9\x59\xf4\xc1\x98\x68\x12\x46\x1b\x7c\xe6\x7a\x2f\x00\x0f\x3a\x91\x67\x5b\xfa\xe5\x64\xe6\x5b\xab\xe6\xce\x9d\x32\x59\x22\xef\x3d\x5e\x32\x58\x28\xe5\xca\x59\x25\x6d\xaf\xee\x13\xe8\x89\x61\x33\x00\x9e\x46\x95\x00\x9c\x5a\x6c\x09\x65\xb2\x24\x80\xa0\xd3\x93\x11\xaf\xb1\x29\xb7\xcb\x53\xd1\x79\x30\x10\xaa\xa4\x8b\x57\xb7\x27\xbb\xe3\x69\x71\x37\x9f\x44\xf1\xc3\x00\x76\xcc\x03\xc1\x10\x1b\xa0\x5c\x82\x66\xef\x39\x0b\xe1\xb4\xfb\x52\x8b\xb9\x9c\xac\xed\x68\x8b\xbb\x83\x70\x91\xd2\xd6\xc3\x2a\x75\x28\x4e\x75\x17\xf1\x84\xe6\x22\x0d\xe5\x4d\x6e\xae\x1e\x20\xe3\x4c\xa1\xb8\xa4\xcb\xfc\x14\xc9\x95\x31\x15\x95\x6f\xee\x76\x56\xb2\x4c\xb1\xdd\x96\x3b\xdb\xab\x0f\x93\x31\xa3\xd3\x3c\xf2\x22\x65\x18\x59\x07\x81\x90\x66\x94\xe7\x85\x39\xa3\xfc\x81\x3d\x19\x66\xd3\xcb\x30\x50\x6a\x57\x80\xce\x2b\xc7\x30\x8c\xa5\x70\xac\x32\xb1\x2c\xfb\x3c\x59\x41\x39\x19\x7e\x38\x59\x31\x39\x19\xe1\x29\x57\xec\xc3\x62\x78\xce\x61\x0c\x96\xe5\xd8\xd5\x66\x44\x66\xac\xae\x72\xc4\x5e\x3d\x87\xd1\x97\xf2\xb2\x24\x0c\x7f\x1d\x19\xab\x5d\x2f\xe6\x72\xbb\xec\x3a\x83\x76\x18\x2b\xa7\x65\xbe\x3c\x85\x67\x54\x5e\x65\x92\xbb\xaa\xc7\x95\x33\x2d\xf6\x60\x4a\x27\xb4\x2f\x44\xcd\x0a\xf5\x7d\x29\xeb\xb3\x2c\x83\x36\xc4\x2e\x52\x2b\x48\x9a\xac\x30\x03\xc7\x0f\x35\x6b\x59\xd2\x1c\xbc\x9c\x90\x1b\x5d\x79\x7c\x78\xb0\x24\x81\xd2\x92\x56\x26\x7e\xf7\x76\x06\x18\xf8\xe4\xe0\x98\xa3\xde\x0e\x65\xbb\xa4\x7b\x23\x5d\x0f\xd7\x34\x5c\xa6\xf3\x98\x35\x2e\x8b\x6b\x3f\x5d\x7b\xd1\xb0\x32\xde\x34\x82\x90\xf3\xfb\x76\xb3\xae\x93\x99\x1b\x21\x87\xf4\x63\x61\xd9\x40\x05\x67\x0c\x4b\xd7\x3d\x7e\x2f\xb5\xcb\xce\xb7\xf1\x0d\xf1\x85\xf4\x2e\x65\x42\x28\x35\xe1\xb5\x4e\x44\xa1\x35\x1a\x9f\xf7\xfc\xfb\xe9\xdd\x45\xaf\x44\xb3\xca\x4d\x9e\x32\x5e\x91\xcf\xf2\xc1\xf4\x2b\x73\x01\x9e\x52\xd0\x9b\xe6\xb3\x3d\xb8\x87\xcc\x8a\xfe\x65\x0f\x4d\xa2\x6c\x48\x4e\x87\x2e\x53\x2f\x79\xcf\x96\xaf\x78\x91\x93\x0f\xc5\x19\x2f\x00\x24\x07\xbd\x0d\xa4\x16\x16\xde\xc7\xfd\x21\x54\xd7\x89\x4b\x56\xc9\xc2\xaa\xae\xf6\x72\xf9\x89\x39\x01\xae\x49\x32\x43\xca\x8f\xd5\xd2\xb9\xfa\xd2\x3c\x78\x59\xe6\xa0\x66\xb2\xb5\x45\x52\x63\xcb\x19\x78\xd2\xbe\x5c\xdb\xd4\xe1\x92\xe9\x71\x45\x65\x74\x35\xa5\xb9\xe7\x66\x49\xce\xad\x05\x67\x8c\xc7\x05\xb0\x01\xaa\xe3\x71\x35\x75\xf5\xd9\x97\xb0\xc7\x53\xee\x6b\xcb\x1e\x64\xd3\xb6\xef\x5e\x63\x00\xee\x65\x88\xa8\xc3\x1c\xda\x72\x6c\x44\x6d\x8c\x48\x97\x53\x46\x49\x1f\xe8\xd8\xf6\x91\x32\x6b\x98\xcd\xc5\xf2\xe1\x72\x5a\x70\x5b\x07\x5a\xbb\x47\xfc\x41\x98\x4b\x4f\x07\x66\xdb\x36\x5d\x5c\x64\xbd\x0a\x34\xd0\x5a\x76\x59\xb9\xe3\x42\x43\x73\x01\xd4\x6e\x3d\x0b\x74\x1d\x11\x1a\x50\xf6\x7c\x6a\x55\x89\xf0\x3b\x54\xd5\x0a\xdb\x4c\xd7\x60\x8e\x86\xbd\x46\xaf\x36\x10\x39\x4a\x28\x40\x78\x1e\x1d\x82\xf9\x84\x4f\x29\x69\xfd\x3c\xe4\xf1\x6a\xf3\xbc\x72\xa4\xeb\x81\x12\x22\x51\xb9\x0c\x2e\xd1\x78\xb1\x57\x0c\x41\x37\x36\x96\x97\xc2\x2e\x08\x07\xed\xc8\xc1\xf6\xe4\x99\xc0\x14\x34\x11\x3d\x3c\x22\x6f\x86\xf9\xfb\x68\x76\xae\x30\x6e\xde\x68\xc1\xde\x4a\x3c\x90\x69\xc6\xed\x0c\x56\x0c\xf7\x46\x5b\x01\xa1\x03\x01\x88\x70\x40\x7b\xd3\xe6\x3e\x16\xac\x91\xbb\xda\x8a\xe6\x86\x64\x71\xbd\x86\x50\x25\xcc\xe1\x11\x15\x42\x68\x2e\xd1\xe1\x95\xc6\xa4\x5e\xd7\xd0\xf1\xe5\x64\xde\xba\xd9\x11\x43\x01\xd8\x17\x44\x45\x97\x9b\x0b\x69\xe7\xd8\xc6\x73\x34\x08\x6d\x97\x47\xfb\x69\x5e\xf4\x7d\x12\x4b\x8a\xef\xa2\xee\x32\xe9\x52\x8a\x69\x67\x37\x2f\xcc\x01\x81\x6a\x22\xfa\xf1\x08\xb7\x53\x40\x2c\x1a\x3d\x10\xc8\x44\xc6\xe1\x0a\x1f\xa4\x95\x2c\x04\x3d\x6d\x59\xc0\x65\xa6\x0c\xcd\x5d\xe1\x12\x89\x33\xa1\x99\xd9\x12\xda\x34\x58\x6b\xef\x13\x08\x1a\x18\xc4\x9d\x74\x42\x60\xc1\x09\xb3\x83\x8f\xcd\xd5\x89\x0c\xb5\x2c\xd0\x5b\x68\xc5\x80\x05\x60\xda\xe0\x15\xb4\xc4\x33\x7d\x6a\x4c\x89\x3c\xd8\x66\xbe\x39\xd5\x9d\xba\x1d\x66\x08\x54\x53\x7b\x6d\x31\xa8\xe0\x2e\x58\x5c\x5a\x07\x52\x13\x8f\xd4\xea\x7b\x5a\x90\xfb\x23\xc7\xe4\x95\x10\x23\xe0\x06\xe7\x6c\x0a\x44\xe3\xbe\x89\x57\x65\xd6\x34\xbc\x41\xaf\x33\x15\xfa\xf2\x70\x5b\xd6\xc7\xae\x38\x02\xae\x62\xc3\x19\x59\x53\xdb\xc6\x04\x94\x38\x4e\x76\x3b\x6f\x07\x77\xa6\x93\x56\xcf\x47\x7e\xe8\x7a\xe6\xa7\x31\x8e\x57\xdb\xd2\x52\x08\x78\x68\x55\xec\x09\xdd\xcd\x53\xea\x33\x56\x77\x3c\x89\x9b\xee\x54\xd9\x28\xd2\x9a\x2d\x9f\x5b\x21\x04\xbc\xae\xf1\x69\x2f\x09\xa6\x4b\xe7\x07\xfa\x3c\x13\x08\xea\x7b\xc8\x6e\x5e\xe1\x3c\xe1\xe5\x99\x68\xa9\x15\x19\x15\x24\x99\x52\x0b\x69\x11\xb4\x10\x74\xc9\xbd\x1f\x9d\xad\x14\x9e\x93\x2c\xfd\x4e\x1a\xe7\xcc\xad\x47\x4c\x32\xc9\xa2\x17\x51\x45\xd7\x01\x92\x14\x89\x21\xa6\xcf\xf4\x52\xa1\xbe\x8e\x9a\x64\x21\xca\x50\x96\xee\x48\x22\x72\x14\x41\x38\x95\x76\xd3\x87\x5d\xe9\x78\xc2\xcb\xba\x2b\x92\x57\x29\x8e\x9e\x9a\x95\xb7\x36\xce\xab\x97\x21\x43\x9a\x3d\x73\x92\x38\x57\x06\x84\x28\xf2\x79\xf5\x70\x6b\x5a\xd7\x45\x81\x21\xd0\xbf\xb7\xe4\x9a\xa0\x0a\x1a\x25\x99\x93\x9d\x3c\x2a\x14\xf1\x46\x3a\x36\x91\x5c\xa9\x26\x7b\x9e\xe8\x66\x16\xce\x93\x86\xef\x75\x9a\x9e\xf7\x2e\xce\x93\x81\x52\xcf\x87\x9e\x6e\x41\x17\xe5\xbe\x47\x92\x9b\x85\x1b\x33\xfe\x6c\xbb\x25\x7c\x26\xe9\x3d\xd9\x85\xad\xdc\xd2\x85\x0c\xf9\x1e\x30\xfc\x80\x38\xd7\x07\x6f\xcc\x21\xc0\x0c\x34\xd1\x61\x39\x30\x4c\x58\x8d\x9e\x08\x20\xa1\xa4\x5f\x0c\x0d\xc0\x82\x47\x0e\x9c\x1b\x71\x80\x88\x01\xa8\x60\x95\x17\x92\x4f\xb9\x98\x10\x32\x22\xcb\xe4\x2e\xbb\x85\x97\xb8\x4c\x6e\x7c\xfc\xbc\x15\xac\xfc\xb8\x0c\x66\x0a\xb3\x21\x24\xac\xcf\x70\xe3\x79\xcc\x33\xcb\xa7\xcc\x7b\x8f\x22\x1d\xb4\xed\xc9\x16\xc7\x15\x6c\xee\x37\x7a\xef\x54\x79\xb2\x84\xb4\x97\x3a\xcb\x8b\xaf\x60\xff\x3c\xb9\xe8\x26\x2e\xa1\x75\xef\x13\x83\xd3\x28\xb3\x68\x81\x16\x70\xcf\xe1\x54\x43\xa9\xd6\x59\x31\x91\x6a\x4c\x44\x1e\x51\x6b\x15\x78\x61\x99\x83\x72\x57\x46\x75\x1d\xac\x45\xd0\xa0\x34\x70\x50\x55\x8b\x67\x5c\xe8\xc8\x88\x44\x74\x2a\x04\x86\xbb\xfa\x20\x11\x64\x44\xd2\x2e\xea\xa6\xeb\x30\xbb\xed\x7e\x27\xe5\x15\x2b\xd5\x7a\x5c\x25\x01\xab\xc0\x4a\xa0\xa2\xd4\xce\xd8\xdb\x83\xcb\x11\x19\xbf\x39\xe1\xce\x23\x32\x86\xde\x38\x1c\x39\xe2\x1b\x7e\x3e\xda\xc6\xf6\xb3\xab\xbe\xe2\xd9\x51\xaf\x56\xd1\x12\x29\xa9\x76\x5d\x4a\xf9\x8e\x17\x3d\xc4\x27\x56\x45\xa5\xdd\x90\x12\x88\x87\xb7\xd2\x6c\x98\xed\x7a\x01\x9b\x01\xc1\x35\xd1\x32\x8e\x4a\x5e\x05\x07\xf2\x81\xbe\x47\x6a\x37\xb9\xa2\xeb\xa5\xa7\x1e\x6b\x9c\x79\x43\x80\xe8\x1a\x44\x16\xae\x41\xc4\x52\x64\x58\x5d\x70\xbd\x3d\x1b\xc3\xad\x17\xfc\x61\xb5\xf6\x6a\x1b\xa7\xe3\x45\x17\x78\x21\xe6\x93\x6e\x76\xc4\x86\xf0\x46\x3e\xdc\x4e\xd1\x6c\xde\x6f\xfd\x85\xdb\x82\x9b\x9a\xf6\x0b\x44\xb7\xca\xb1\x3a\x7c\x6e\xe1\x78\x8b\x2a\x57\x5c\x17\x8f\xad\x78\x48\x60\x73\x52\xf4\xec\x25\x92\x77\x83\x62\x1d\x72\x47\xdc\xe4\xab\xe8\xa6\x12\xe5\x94\x0b\x73\xe8\xa3\x35\x66\xab\xb0\xe4\x2f\xe6\x89\x22\x09\x40\x6c\x8f\x3b\x87\x49\x7e\xa1\x22\xe6\x2c\x70\xd8\xac\x1a\xe1\x2a\x56\x11\x95\x33\xf5\x06\xf9\xa4\x50\xeb\xaa\xbb\x67\xa8\x7a\x17\x32\x77\x1a\x1e\xf9\xb5\x7b\x42\x1d\xaa\xc6\xb7\xf5\x68\x36\xd8\xc4\x9a\xca\xf6\x0a\x79\xcc\xb3\xf3\xb8\x37\x0a\x0f\x8d\x7e\x63\x1e\x2b\xe7\x9d\xb6\x37\x46\x8e\x7c\x20\xc4\x4a\x0b\x23\x1e\xee\x5e\xd5\xf8\x96\x47\x22\x63\x00\x7b\x8b\x5f\x1c\x7d\xd0\x78\xc0\x9c\x2a\x47\x3f\x6d\xf5\xd3\x9a\xee\x2d\xd6\x64\x7a\x40\x7b\x14\x1d\x34\x0b\xb2\x4c\xb9\x3d\x15\x0a\x99\x2f\x12\x41\x38\x53\x32\x9e\xa8\x9f\x00\x9b\x3d\xc1\xa3\x99\x05\x66\xb5\x31\xda\x64\x6d\xfb\x3d\x77\x93\x00\x81\x15\xb4\xb3\xab\xbc\x9d\xe0\xfb\x8c\x40\xaf\xec\xc0\x9c\x5e\xa5\xe5\x70\x72\xaa\xf1\x8c\x80\x4a\xa5\x8d\x06\x92\x0f\x30\x18\xa4\x8b\x0f\xc6\xc8\xb4\x90\x64\x24\x2f\x93\xbf\xd1\x30\x9a\x83\x37\xbd\xdc\x21\x4f\xa7\xc9\xad\xd1\x0f\x6a\xb1\x17\xcc\x43\x56\xda\x0a\x50\x38\x59\xd4\xb5\x5c\x10\xe3\xda\x4a\xc9\x3a\x81\x6d\xb1\x20\x98\x85\x66\xc6\x1e\x11\x46\x4e\x78\x24\x0b\x46\x02\x09\x2c\x63\x76\xa5\x67\x54\x01\xc0\x8d\x94\xe4\xc4\x93\x90\xbb\x85\x4b\x7a\xc7\x37\x2d\xc6\x6e\xc8\xb0\x2d\x6d\x97\xc3\xa9\xd0\x8f\xfc\x2c\xb3\x78\x7a\xe9\x50\x22\xa2\x72\x40\x33\x09\x45\xd4\x9f\xf4\xc9\x9f\x19\x10\x15\x64\x01\x73\xfa\x22\xe5\xf6\x66\xeb\x13\x0d\x4f\x1c\x72\x4f\x62\x86\xee\x13\x74\x23\xa4\xe3\xde\x59\x70\x2e\xba\x39\xb0\x80\x34\x40\x9e\xe2\x3d\x44\x03\x51\x0b\xe9\xa0\x1b\xb9\x4c\xe1\xd1\x1d\xc8\x6d\xee\x01\xe8\x5d\xa4\xe4\x79\xe7\x62\xa0\x91\xd9\x11\x2a\x76\xa8\x63\x1c\x51\x0c\xc3\x56\x90\xc0\x9d\x49\x01\xb9\xb1\x67\x10\xf9\x38\xd0\x00\x2c\x64\x8a\x26\xbb\x3b\x8c\x3e\x37\x91\x87\xa4\x2d\xab\x91\xc3\x47\x6d\x02\xa2\xa7\x3b\x2c\x16\x1d\x4f\x63\x6e\xbd\xf9\x08\x38\x45\x45\xa1\x92\x5e\x4a\x4d\x9d\x03\x39\xf9\x4c\x42\x39\xe8\x14\xe0\x30\x3e\x35\xef\x84\xc1\x7b\xa0\x36\x48\x91\x27\xe8\xb6\xaf\xcf\x64\x41\xb0\x16\xb5\x0e\x4c\x4e\x26\x18\x10\x13\x84\xb4\x32\x94\x5b\xf1\x1b\x2a\xc0\xc4\x1d\xad\x37\x50\x25\x63\x30\xef\x45\xa0\x04\xc7\x82\x74\x80\xd9\x0f\x98\x24\xe7\xb1\xf0\xc6\x96\x27\xa5\x45\x3c\x40\xe9\x5c\x98\xe7\xe0\xe0\x80\x10\x56\x54\xd0\xa3\xb8\x59\xb4\x7b\xb7\x6a\x5d\xbb\xe3\x50\x21\xee\xb3\x76\x9d\xd0\x23\xba\x11\x0e\x16\xe7\xc7\xb0\x1b\xc6\xd1\xe1\x3e\x88\x76\x53\x4a\x6c\x1d\x0a\xa2\x35\x00\x6d\x86\x6f\x9c\x21\xc5\x6c\x78\x4d\x01\xe0\xd4\xb5\x03\x85\xaa\xdb\x48\xb4\x9b\x3e\xa7\xf7\x7d\x07\xa8\xb3\x7b\xd0\x54\x9c\x50\x53\x4a\x26\x45\x0e\x60\x24\x39\x15\x3b\x09\x15\xad\x7d\xa7\x4c\x03\x37\x29\x5a\x24\x42\x2a\xef\xa0\x0b\x88\x1a\x6b\x46\x81\xdb\x73\xa3\x31\xe7\x04\x20\x50\x96\x22\xea\x35\xe7\x5d\xc3\xc7\x22\x5a\x0c\x0f\x32\x06\xc5\x80\x04\x47\x0f\xf8\xed\x75\x2e\xa7\x97\x2a\x4a\x9f\x0c\xa3\xd6\x37\x86\xe1\xb0\x3b\x13\xd4\x8d\x81\x86\xaf\x77\x62\x76\x69\x34\xcb\x2c\x72\xb6\x34\x6d\xe9\x6e\xdc\x99\xf5\x35\x29\x32\x25\x73\x69\x2e\xa6\x67\x61\xc8\x7a\x66\x19\xc2\x26\x30\xda\xa1\xa9\x2f\x59\xd2\xe5\x62\xcb\xea\x1d\x1d\x6a\x4b\x4f\xc2\x26\x5e\xea\x20\x6a\x56\x73\xc9\xd3\xfe\x3e\x47\xc8\xc3\x4f\x11\x1d\x3a\xa2\x05\x23\xad\x0d\x42\xad\x87\x3e\xb7\xc9\xd0\x36\x57\x8c\x84\x1b\x9f\xd2\x62\x78\x62\xda\x0a\xb2\x14\x91\x93\x1a\xf2\x0e\xb1\x8f\x92\x65\xef\xd2\x86\xf2\x84\x07\x88\x26\x2d\x48\xbd\x40\xf2\x18\xbf\xe1\x4f\x00\x7d\xd8\x6d\x8a\x9f\x64\x47\xdd\x54\x0a\x2b\xd8\xb4\xc9\x8a\x89\x37\xdf\x64\x62\x24\xae\xc4\x44\x86\xe0\x9f\x2e\x53\x30\x92\x39\xaf\x34\x94\x5f\xad\x46\x16\x31\x6e\x1a\x94\x50\xf8\x76\xc9\xf5\x60\x07\xa0\xe2\x24\x81\x1f\x9d\xdc\x96\x42\x29\xf0\x0e\x44\x1b\x64\x05\xd2\xf8\x3a\xe9\x53\x0b\x87\x8c\x07\x81\x02\xd8\x5d\xde\x40\x4c\x8d\x0b\xdb\xa8\xc3\xef\x3b\x48\xef\xba\x77\xa8\x1e\x72\xe4\x1a\x42\x2c\x8e\x68\x41\x85\x79\x31\x35\x7e\x22\x72\x88\xce\x0d\xd6\xb4\x91\x65\xb2\x1f\xd3\x21\x9c\x20\xa8\x5b\x3d\x17\xf0\xe8\x91\x9a\x2f\x2c\x2c\xe3\x6a\x0d\x88\xe9\x5c\xf5\x84\x0a\xee\x39\x2b\x90\xbf\xa2\x47\xe4\x6e\xc1\xe4\x41\x8e\x60\x79\x82\xff\x64\x2e\x43\x41\x02\x60\x79\x13\x79\x18\xee\xe3\xe4\x91\x52\x19\x8c\x92\x27\xdc\x0c\xb6\x0f\xb1\x49\x45\xe0\xfa\xbb\xac\xa5\xc2\xd6\x0b\xbd\x81\x60\x98\x82\xd9\xd4\xa4\x0d\x08\x74\xce\x98\x93\x9d\x2c\x25\x77\x37\x8e\x99\x83\xed\x79\xe6\x79\x13\xbb\xb3\x73\x05\xed\xa4\xd0\xad\xea\xee\x8f\x95\x1c\x97\x02\x88\x64\x65\x88\x25\xfc\xa9\xb9\x77\xf1\x99\xbe\xe1\x63\xe5\x19\x45\xe9\x01\x27\x3a\xc7\x03\x40\xf0\x91\x3e\x46\x20\x9b\xe1\x91\x1e\x24\x9e\x13\xcd\x37\x54\xe8\xd9\x51\x60\xeb\x50\xf3\xb2\xac\x9b\x1f\x08\x54\x1b\x40\x2c\x59\xb6\x9f\x3c\x5d\x99\x53\xbb\x93\xc0\x03\xa7\x00\xd9\xed\x6c\x16\x24\xd2\x86\xdd\x19\xd3\x58\xb1\x88\x7f\xce\x2a\xf7\x3c\x6e\x9b\x22\x27\xe8\xae\x8d\x74\x60\xcb\x39\x17\x3f\x24\x8a\x7f\xb7\x8b\x6a\xde\x28\x0a\x80\x69\x8b\xc2\x6c\x03\x14\xec\x07\x5e\x4a\xec\x59\xf9\x0e\x35\xaf\x9b\x3f\x25\xf0\x8e\x3d\xc3\x98\xf5\x9e\x13\xf5\xcd\x2e\x2c\x3b\xc6\xc8\x06\x92\x39\x08\xa6\x92\xbb\x41\x7a\x03\x1d\x78\xb8\xfa\xc9\xd2\x66\x81\x05\x69\x3a\xc4\x49\xca\x09\x80\xbd\x80\xf2\x0d\x34\x5c\x4d\x4a\xe5\x68\xad\xd7\x04\x10\x24\x86\xec\x1e\xb6\x72\x2f\xa0\xdc\x02\xc8\x90\xfa\x86\xf0\xc2\x98\xc7\xbc\xa9\x8e\x8b\xeb\xd7\xea\x99\x02\x53\xb9\xa5\x6a\x93\xee\x10\x90\x4e\xca\xb8\xfa\x89\x8c\x41\xd9\x08\xc7\xd0\xf9\xb0\x4b\xe6\x02\xd1\x5b\x42\x53\x44\xf5\xdc\xb3\xd8\xdf\xc8\x9a\x5e\x92\x1a\x2b\xfd\x0b\x46\xf1\xf3\x2c\x52\x1f\x8a\xe5\x52\x67\x40\xd6\x7c\xd3\x97\xce\x2b\xce\xc6\x1b\x97\x6c\xbb\xd6\x95\xbf\x05\x53\xdf\xaa\xf7\xd8\xdb\x18\xd6\x09\xe0\x2c\xa0\xcf\x51\xe4\x27\x2b\xdd\x82\x35\x08\x0a\x60\xdc\x2d\x54\x3b\xe6\xf5\x91\xdc\x98\xed\x6d\xee\x64\x18\x8e\xc1\x74\x0b\xd2\x37\x10\x46\x51\x0a\xbb\x9d\xda\xd9\x78\xc0\x06\xf9\xe5\xa8\xcc\xe3\x16\x90\x85\x9f\xf0\x89\x63\x98\x9c\x12\x5f\xc1\x4d\x17\x1d\xec\x50\x7c\x64\x19\x37\x50\x81\x1a\xd9\xf6\x40\x02\x5b\xb3\x41\x74\x18\xb2\x53\x28\xee\x8d\x3f\xa6\xd2\xb1\x5c\xac\x84\xdc\x6b\x06\x62\x1d\x27\x50\x9d\x0c\x0b\x37\x92\x1c\x89\x66\xa4\x98\x5e\xfa\x98\x26\x7a\x0d\x5c\x99\x32\x78\xe8\x22\xc9\x8b\xda\xd0\x53\xae\x07\x07\xd6\x13\xc6\xe9\x6f\x60\x4d\xb5\x9e\x4a\x58\xd8\x33\x06\x9c\xfb\xb7\x18\x5b\x7c\x67\xc7\xc6\xcd\xd8\x0f\xc0\x91\xf5\x6e\x46\x92\x89\x00\x3a\x43\xe0\xf6\x28\x5d\x10\x7c\x89\xdb\x5e\x49\x4c\xa7\xf4\x20\x54\x13\x30\x2c\xd5\xb3\x29\x83\x4e\x5d\xdd\x88\xcd\x8b\xab\xcc\x6f\x34\x8d\x0f\xb5\x44\xda\x9f\x1f\xb6\xb6\x67\xda\xb0\x21\x5c\x0f\x9a\x67\x6a\xb0\x31\x48\x48\x0f\x3b\x4f\x93\x6b\xd5\xc7\x57\x6c\x56\x38\x02\x20\x93\x16\xf6\xa3\x3a\xd1\x58\x9d\x4d\x82\x11\x44\x51\xa3\xbd\x3e\xa1\x5c\x3f\x57\x29\xa7\x23\xb2\x66\x8b\x9b\xc6\x96\x86\xc6\x5e\x63\xad\x60\x9f\xef\x26\xbf\x3d\x7d\x15\x1e\xb1\x9c\x02\x93\x19\x5c\xc9\x17\x9c\xf9\xd8\xd4\x2a\x8a\x7d\x54\x3e\x52\x82\x8e\x17\xd5\xbe\x4b\x03\x4e\x31\xf8\x83\xb0\x07\x5e\xeb\x08\x0c\xcb\x8b\x20\x7b\x64\xa2\xa3\x2f\x64\x25\xd0\xac\x1e\x72\xa0\xbc\x7f\xf8\xd0\xd3\x87\x2b\x6b\xeb\x8a\xa2\xe8\x41\xf1\x86\xa7\x41\x9c\x83\x7d\xac\x83\xbd\x7d\xa1\x8d\x15\x21\xb3\x75\x95\x04\x0a\x78\x8c\xa5\xd6\xec\x2b\x8d\x7a\x5a\x12\x6c\x5b\xb7\xdd\xc2\x29\xd0\xc1\xcc\xbb\xc9\x8d\x06\x14\x15\x9e\xb1\x79\xcc\x7d\xe7\xf3\xb8\x59\xad\x66\x85\xbe\xe0\xa4\xa0\x91\x06\xc4\x88\x2e\x00\x18\xe1\x38\x6e\xd8\x8f\x21\x5e\x91\x08\xb6\x82\x36\xf2\x47\xa4\xdd\x4c\x6e\x7d\xcc\x46\xa5\x8b\x3e\x41\xcc\x8e\xec\xc2\x9a\xe3\x92\x38\x71\xba\xae\xaa\xc0\xe6\x28\x24\x90\x80\x3f\xd4\x86\x0e\xa6\x81\x29\xdf\x43\xb7\xe6\x5d\x38\x40\xef\x2d\x92\x67\x14\x80\x8f\xbb\xbc\x18\x23\x1c\xaf\xbe\xda\xb4\x33\x04\x74\xfd\x4d\x62\xa6\x56\x3e\x52\x43\x24\x6b\x5c\x6f\x47\x62\x68\xa3\xab\x05\x00\xd3\x65\x6c\x80\x75\x0c\x40\x7a\x98\xb5\x45\xab\x42\x8e\x0c\x8d\x27\xf6\x86\x94\x13\xd5\xb3\x2e\x47\xcd\x1a\x0d\x2d\xda\x8c\x2a\x86\xc7\x19\x1e\x10\x32\x9b\xe8\x93\x30\x38\x05\x4f\xe7\xd1\x23\x08\xe9\x2e\x1d\x8f\xf3\xc9\x00\xe1\xd5\x7a\xe0\xb7\x9a\xdc\x81\x69\x3a\xc9\x2e\x4b\x06\x00\xcc\x85\xa8\x95\xd7\x0b\x76\x13\x8b\xf9\x33\x57\xa9\x14\x8a\x76\xe7\x94\x09\x3e\xdc\xc1\x18\x65\xf0\x3b\x75\xe4\xb2\x30\xc2\xf9\x56\xdf\xfd\x28\xe6\x7d\xe6\x95\x6c\xc8\x81\xc8\x7b\x28\x37\x34\x0f\xc0\xa4\x4b\x6b\x77\x58\x8c\xd1\x51\xb8\x28\x70\x3a\x0f\x74\xc0\x88\xaa\x00\x8a\xef\x41\x60\x82\xb7\x41\xb1\x41\x24\xb8\x9f\x6b\x76\x56\xb7\xce\xc1\x91\x75\xac\xb1\x45\xf5\xda\x33\x6e\xbc\x31\x2e\x7b\x8d\x85\x64\x9f\xc2\x7c\x3f\xa4\x72\x7d\x41\x33\x08\x10\x5d\x77\xf1\x86\xcc\x6f\xe4\xdb\x82\x5d\x71\x69\xa7\x3e\xd0\xd9\x60\x27\x9f\x33\xa1\xdf\xfb\x23\x6d\x64\xf0\x89\x56\x54\x70\x1f\x89\x75\x9a\x88\x75\x3a\x29\x50\xf5\x27\x01\xf7\x7c\xa1\x62\x10\x4c\x34\xb1\x20\x26\x4f\x3a\xf1\xc4\x73\x41\x0a\xb1\x7a\x58\x32\x4e\xfa\x8d\xe5\x0a\x57\x48\xc4\x7b\x9c\xfa\xe4\x51\x30\x47\xcf\x6f\x64\x2a\x6c\x9d\x01\x31\x43\x02\x96\x13\x84\x78\xc5\xb7\xfd\x18\x78\x7f\xf2\x60\xdf\x6d\x90\x07\x74\xdd\x7a\x83\xe5\xaf\x24\x1a\x90\x07\xac\x37\xce\x40\x2c\x14\x80\x73\xbc\x72\xcc\x93\x1a\x07\x67\x79\x05\x01\x0e\x13\xcf\x6f\xb1\xd5\xc7\xdc\xb4\x82\x5b\x00\x82\x41\x71\x2b\xa9\x58\xb0\x7c\x38\x9d\x83\x43\x48\xd7\xc0\x72\xad\xcb\x8d\xf1\x98\x07\xfb\x88\x44\x07\x06\xc0\x9b\x1d\xcd\xa8\xbb\x36\xd0\xa6\xa7\x8e\x1b\x9f\x19\x45\x51\x26\xba\x2a\x3d\x6b\x7c\xcf\x79\x8c\x3d\xdf\xa5\x33\x84\xa0\xb6\xa6\xdd\x11\x27\x58\xc7\x85\x0d\x92\xdf\x9f\x54\xba\x39\x99\x65\xce\x93\xd2\xe4\xab\x7d\xa9\xa4\x90\x53\x18\x49\xe3\x7d\xb8\x23\xa9\xfd\xd6\xfa\x70\x93\x80\x46\x40\x9e\xdc\xa6\x1e\x69\xd9\x89\x8c\x47\x55\xdf\xec\x5e\x49\xa3\xfd\x58\xe5\xfb\x0e\x81\x92\x41\x90\x0e\xe1\xec\xf0\xba\x0e\x90\x17\x78\x95\x99\x5e\xc7\x63\x70\x05\xb6\xe8\x6e\xac\xc5\x49\xe8\x15\x47\x83\x02\xc1\xb1\x23\xd3\xf9\x12\xd3\x95\xa6\xba\xc6\xee\x43\x4e\x9f\x22\xc6\x6d\x8f\x0f\x07\x65\xdd\x53\xa6\x40\x07\x17\x28\x90\x57\x36\x98\xde\xde\xca\x37\x32\x8c\xe1\x43\x65\x0f\x3c\x9c\x84\x7a\x4f\xc6\x8c\xef\x5b\xd6\x2c\xa5\x0b\xae\x3b\x03\x44\x64\xd3\x38\xae\x73\xb6\x2d\x47\x13\xd7\xde\xb8\x75\xe7\xb1\xd4\x56\xc9\x16\x92\xf5\x61\x2a\xc9\x56\x48\xe5\x98\x6b\xa9\x96\x88\xbc\x19\xa2\x74\x0e\xc5\xaa\xc1\x29\x02\xc9\x80\x27\x95\x06\xea\xf0\xa0\xc2\xde\x3b\x10\x4d\x93\xc7\xf0\x7a\x11\x22\xc3\x01\xc0\xfd\x71\xbc\xe5\x33\x5a\xec\x29\x89\xf6\xed\xc7\xb0\xaf\x9a\xb5\x86\x02\xc5\x06\x9f\x3d\xc4\x28\x70\x13\xec\x7a\x42\x15\x78\x30\x6d\x4b\xee\xed\x84\xe8\xa2\x70\xb2\x97\x4f\x76\x78\x09\x4e\xc8\x5d\xa0\xd6\x00\x07\x82\xe6\x76\xe1\x18\x7d\xa1\x17\x9a\xec\x4e\x9e\xc8\x34\x0f\x75\x20\xda\xac\x5c\xa4\x00\x31\x2a\x2a\x8b\xa7\x54\x7c\xe6\x2c\xa9\x75\xe8\xad\xab\x79\x1c\xb8\xfa\xb6\xcc\xbb\x88\xe6\xa4\x88\x66\x17\x97\xfd\xa0\x93\xa5\x73\xf6\xbe\x77\xa9\xac\xa4\x4d\x78\xb9\x63\xb9\xe0\x79\x99\x8b\x2e\xd7\xd4\x37\xdc\xad\xc5\xda\xbd\x3a\x59\xa3\x34\x01\xe7\x3d\x99\x48\x92\xe9\x0a\x56\xd4\x6b\xec\xc0\x9e\x8f\x06\xdb\x70\xd1\x82\x32\x98\x44\x17\x1c\xc4\xae\x6c\xe4\xe7\xad\x12\xa5\x9b\x0d\xd1\x7a\xa8\x5e\x58\x3d\x18\x01\x14\x70\x65\xd5\xf3\x02\x78\x77\xd7\x40\xbe\x3c\xec\x19\xaa\xce\x40\xd8\x6d\xb7\x56\x1c\xb6\xf8\x11\xb7\xd3\xd3\xc5\x98\x76\xfa\xfe\xaa\x43\x17\xf5\x06\x36\xe9\x12\xf8\xdd\xd8\x2e\x46\xa4\xe7\xdd\x14\x51\x48\xa2\x1e\x73\xa3\xe4\x98\xd9\x61\x2d\x05\xb7\x23\x41\x0c\x53\x3c\xae\xb3\x57\xce\xe5\x50\x28\x98\x3d\x32\x27\xd1\x52\x41\x4d\x67\xef\xa1\xc5\x5d\xee\xaf\x98\x07\xd0\x74\x71\xe6\xd2\xbe\xbb\x21\xaf\x7b\xcd\xe0\xce\xbe\x8a\x84\xa4\x9e\xb3\xe7\x7b\x5d\x58\x26\xc7\x85\x17\x1b\x59\xa7\x52\xa5\x1d\x6a\xb8\xe4\x60\x6e\xde\x81\xe0\x41\x8d\x4f\xfe\x29\x17\xb1\xc1\x3c\xdf\x8b\xbe\x0d\x89\xa5\xc1\x74\x0e\x16\x70\xe9\x34\xf6\x40\xe0\xf9\x59\x3b\x8c\x47\x64\x88\x75\x65\x05\x0d\xdb\xeb\xec\xd5\x74\xda\x58\xdc\xa9\x4d\x49\x10\x47\x47\xa2\x79\xbf\x39\x97\xa7\xe0\x2e\x0f\xa2\x3b\x28\x52\x09\xb7\x9e\x41\x0f\xe9\xa3\x04\x70\x97\x66\x76\x2e\xbb\x7e\xef\x89\x5b\x27\x1f\x26\x37\xae\x09\x0d\x62\x0a\x92\xad\x34\x99\xaf\x04\x80\xe5\x37\xd1\xde\xc3\xe5\x4c\x51\x3d\xb4\x00\x36\xce\x9d\x23\xaf\xe6\xb8\x6d\x87\x29\x5e\xb6\x04\x31\xf9\xdd\x94\x9c\x79\xb2\x89\xb9\x39\x4a\x8d\xba\x5f\xa9\xf1\xdd\xfe\xbc\xa4\xa4\x77\x43\x47\x75\x44\x5d\x79\xe5\xd5\x4f\x66\xc1\xab\x25\x09\x60\xaa\x10\xf7\x7a\x8f\x7c\x4f\xb6\x83\x3d\x0d\x84\x5b\xb4\x1d\x14\x3a\xd2\xdd\x06\x36\x74\x30\xbe\xfe\x89\x8f\xe9\x04\x6d\xd0\x05\x88\x79\x03\x3d\x65\x86\xaf\x33\xda\x14\xc4\xe0\x2a\x55\xdf\x6f\x00\xb9\x76\x93\x06\x67\x45\xd0\x9d\x0f\xda\x70\x9e\xc5\xf3\x1e\xcf\x65\x47\x3c\x2e\x85\x44\x7e\xe6\x20\x29\xcf\xe6\xa0\x40\xd5\x89\x7c\xc6\x14\x0f\xda\x23\x6c\x45\x6d\xf4\xf0\xde\xfa\x12\xdb\xb5\x2e\x96\x70\x23\xe8\xe0\xcc\xa7\x97\x17\x1b\x08\xe1\x77\xe2\x72\x33\xaa\xd5\x78\x1a\x15\x65\x54\xad\xdc\x46\xa9\x33\x66\x37\x88\xe8\x20\xa0\x81\xce\x7b\xd5\x75\xbc\xb6\x36\xf4\x36\xa4\x53\x93\xaa\x53\x91\x90\x41\x4c\x06\x36\x19\xe0\xee\x4e\xc1\x0f\xad\x70\xe6\x16\xf6\x47\xd8\x1f\x69\x2f\x1b\x96\x60\xcc\xd4\xe9\x49\x19\xec\x13\x8c\x40\xa0\x7f\xcd\x15\x05\x35\x61\x0f\x10\xf4\x76\x2a\xba\xa6\xdd\xa0\x2f\xd9\x98\x5c\xcf\x39\x11\x50\xfa\xd5\xcf\xcc\xc9\x75\x97\xd4\x3a\x55\xd1\x73\x9d\x89\x76\x50\xdc\xe7\x84\x09\xdb\x67\x9d\xaa\xe4\x83\x03\xf9\xc3\x53\x56\x16\xbd\x8b\x2d\x8d\xce\xc4\x3a\xbd\x3e\xaf\x1a\x58\xa5\x9e\xd0\x44\x6e\x73\x03\x0a\x8f\x72\x76\x0e\x11\xaa\xd0\x26\x82\x18\x91\x8f\xb4\x1d\x8e\x78\x1e\xda\x16\x1f\xe3\x76\x78\x7d\x8e\x78\x7e\xc0\x1a\x92\xa3\x8e\x36\x6d\x59\x32\xa5\xa4\xc8\xe0\xf2\xa3\xb2\x07\x27\x92\x1e\x76\x24\xa7\xea\x92\xce\xe0\x88\xc7\x53\x48\x50\x2a\x27\xd5\xb7\xbb\x54\xbf\x5c\x45\xa9\x27\x25\xe6\xcd\xf8\x9e\x26\xfd\xa8\xdc\xfb\x56\xaf\x43\xe2\x5a\x6b\xd6\x7c\xe3\x5a\x99\xc8\x8d\x2e\x07\xb0\xfc\x8c\xd6\x91\x5c\xc9\x91\xc3\x15\xc5\xb0\x6c\xbe\xd4\x01\xc6\xd8\xda\xf2\xbd\xe7\x7c\xb0\x37\x4f\x88\x7c\xb7\x79\x25\x52\xd6\x4c\x1b\xc0\xd8\x73\x79\x8c\x5b\x7c\x8d\xdb\x27\x74\x35\x51\x71\x2f\x27\xc5\x7e\x6c\x27\x65\xa8\x5e\x2c\x66\x87\x43\x88\x31\xa9\x7a\xf1\x14\x65\x7b\x3d\x7b\xe3\xe2\x8d\x9b\x10\x0e\x6d\xda\xc9\x63\xd8\xc9\x47\xd8\x8a\x0c\x26\x5e\x04\xfc\x88\xa4\x47\x10\x92\xe4\x04\x32\xf0\x23\xba\x5a\x37\xe7\xb2\x0f\x43\x1d\xb6\xf2\xbd\x47\xb4\x2a\x7c\x50\x0f\x37\x3e\x55\x61\x9f\xfd\x38\x6c\x15\x0e\x6c\xeb\xa0\x8d\xda\x68\x1f\x93\x96\x02\xd2\x44\x9d\x7c\x78\xcd\xd0\x55\x52\x57\x62\x53\xf7\x34\x59\x10\xc6\xc0\xb9\xcf\x38\x74\x03\x3c\x0f\x16\xfe\x81\x01\xf4\xae\xb3\x07\xbc\xf8\xd9\x18\x53\xf4\xc5\x7a\x84\xab\xcf\x73\xe0\xa2\x98\x98\x7a\xa6\x28\xc7\x87\xae\x03\xc1\xda\x3d\x3c\xf4\x17\xe5\x3b\x06\x18\x77\x0c\x31\xee\x18\xa0\xbd\x7e\x23\xc8\x13\x39\xfb\x93\xbe\xa9\x2f\xfd\x17\x09\xfb\x0c\xaf\xc6\xc6\x77\xf1\xe4\x12\x72\x9d\x5c\x45\x36\x0d\x22\x0c\x3b\x77\x2a\x10\xf6\xd4\x8f\xce\x40\xdc\xa3\xee\x66\x76\xb7\xe7\x69\x38\x68\x86\x8a\x67\x55\x5c\xb4\xbb\x7b\x4b\x6a\xf2\xe8\xa4\x03\xd1\x70\x06\x24\x19\x20\xff\x98\x9d\x84\xb7\xa4\x0b\x91\xe2\x6d\x63\x0d\xb8\xb9\x96\xc7\xb5\x1f\x55\x0e\xdb\xba\x93\x7b\xce\x2a\x2d\x8e\x34\xc0\xf8\x24\x9a\x93\x15\x16\x5d\x2b\x2c\x7f\xe5\xd4\xeb\xdb\xe7\x91\xbd\xfd\x1d\xb5\x56\xe3\x4f\xf2\x9b\x6c\xed\x7e\x2b\x44\x99\x32\xbb\xfd\x96\x76\x32\x67\x3e\x64\x3b\xb8\xb6\x14\xc4\x52\xe9\x2c\x7a\xe3\x0a\x8f\x45\xc3\x85\x88\xe6\x44\x70\x1e\xec\xc0\x4d\x06\x86\x8e\x20\x86\x02\xd4\xef\x18\xd0\x55\xae\x0a\xb0\x27\xf9\xd9\x1f\xd8\x65\xa7\x9f\xb3\x74\xaf\x79\xb0\xde\x9f\xba\x85\xa2\xf4\x5a\x4c\x44\xba\x8f\x9a\x5c\xa7\x64\x4e\x13\x54\xdf\x35\x42\x95\x2b\x8f\x84\x25\x8c\x08\xa9\xfa\x5c\x64\xfb\x58\x64\x9f\x94\x28\xcd\xc9\x24\x84\x0d\xdf\x47\x06\xef\x44\x64\x11\xae\x2b\x88\xd2\x38\x6a\x4d\x9a\x3c\x61\x80\x51\x3f\x5e\x69\x6f\x97\x9c\x07\xae\xdb\x6e\xa3\x5b\xd8\x0c\xb7\xe6\x0c\xb7\x54\xd0\xd2\x86\x03\x00\x42\x33\x24\x2d\x77\x40\x05\x5a\xdf\x8a\x5d\x51\xea\xbe\xbc\x8b\x8c\x43\x57\xe0\xf9\x51\x13\x2e\x0e\x4d\x2b\x4d\xc1\x33\x94\x51\x01\x9c\xf0\xb2\x81\x60\x25\x0f\x1c\x28\x3a\x1c\x28\x9a\x6b\xf9\xbc\xea\x61\x8e\x20\xfe\x49\x83\xa6\x46\x83\x05\x4a\x33\x4f\x9d\x2f\xa1\x57\x84\xe8\xd5\x6c\xdc\xd0\xd7\xec\xb8\xae\xf6\x63\xd0\x6d\x17\x97\x9d\x07\xae\x3a\x6f\x6d\x73\x63\xf0\xcf\x94\x7f\xa8\xb3\x50\xce\xc2\x01\xc0\x41\xb2\x63\x84\x27\x27\x08\xe8\x40\x19\x53\xe9\xe6\xab\x1f\x2b\x50\x14\x4d\x17\x9b\x35\x0e\xf6\xb3\x5f\x57\xe4\x1c\x05\x85\x6b\xe6\xc2\x8b\x22\x66\xcc\xe4\xbd\xdc\xbe\xe8\xf0\xd6\xea\x22\xa4\xe8\xa2\x6b\x1a\xdd\xdd\xb8\xe0\xcf\x0b\xac\x65\x81\x83\xd0\x45\x82\xa0\x33\x2e\x0c\x40\xd8\x5e\xf9\x8a\x4d\xaf\xc7\x2d\xda\xb4\xd8\x6f\x87\x38\x6e\x07\xc2\x6f\x86\xd7\xdf\xe0\xbd\x26\x22\x64\x9f\x27\x45\x3f\x47\x42\x3f\x67\x12\xa1\x43\x74\xa7\x8c\x9b\x45\x6e\x6d\x70\x0e\x8d\xef\x29\x70\x43\x96\x3c\x60\xde\x9f\xef\xbe\x65\x36\x96\x46\x46\x5d\x10\x2c\x34\x2b\xd2\x98\x23\xec\xf9\x2b\x86\xac\x47\xf4\xea\x4e\xc6\xf8\x31\x48\xd4\x4a\xa0\x8d\x3a\xe4\xd3\x42\xe6\xdb\x40\xe6\xe8\x40\xe4\xe8\x40\x16\x41\x38\x89\x26\xa2\x73\xad\x3b\x76\x35\x06\xdc\xce\xdb\x22\x9d\xd0\x72\x3b\xa1\xc5\x70\xa0\x46\xee\x32\xf4\xc0\x77\x7f\x5a\xc8\x9d\x0a\xaf\x6c\x8d\x6d\xa2\x3c\x6b\xd5\x4a\x17\x29\xdd\x19\xa0\x31\xdc\xa5\x1d\x02\x4a\x1e\x63\x80\xfc\x33\x26\x3d\x21\x6a\xd9\x07\x8c\x6d\xa0\xb8\xbf\xea\x0b\xf5\xf2\x25\xbc\x00\x07\xaa\x79\xd8\x8f\xe1\x76\x97\x28\xf4\x55\xb7\x5f\xb1\x2f\x5a\xae\x27\x5a\xda\x72\x97\x4e\x97\x08\x9c\xad\x73\x93\x85\x0e\xa6\x25\xdb\xb6\x07\xae\x3b\x25\x24\xb2\x68\x86\x66\x9d\x07\x16\x26\xc4\xb2\xe2\xd0\x9b\xee\x44\xa7\xe4\xed\x2e\x1d\xb7\xbb\x7c\xd0\xed\x9e\xf9\x42\x1d\x3a\x63\xa6\x3a\xd0\xa6\x9e\xb7\x53\x3a\x6f\xfb\xcd\x7e\x8c\xb7\x5b\x35\x15\x20\x74\x55\xe5\xcb\xe3\xbc\x9d\x37\x64\x45\xe1\xf1\x95\x7b\xd3\x4e\x4d\xc3\xea\x11\x7d\x44\x58\x2c\x96\xa4\x56\xc2\xfa\x5a\x35\xf1\xab\xfe\x6b\xbd\x09\x30\x1b\x49\x0a\x31\xf3\x51\xbb\x78\xb3\x91\xeb\x30\xe6\x99\x58\x7c\x42\x97\xc9\x18\x5e\x99\xcf\x8b\xa7\x18\x0a\x0c\x40\x2e\x82\x90\x94\x64\xb7\x3b\x72\xfe\xa4\xc2\xfb\x94\x9f\x80\x77\x64\x81\x85\x64\x9d\x85\x6c\x5b\xda\x11\x1b\xd5\xd8\x91\x9a\x90\x71\x24\xc0\x47\xbf\xa8\x9e\x7f\x87\x32\x46\x7c\x5e\x40\x77\x79\x6f\x50\x98\x8b\xaa\xda\x10\x2d\xf7\xcf\xfa\xc0\x02\xee\xad\xc6\x21\xeb\xb3\x13\x29\x2b\xcb\xee\xb3\xbb\xa9\x7b\x4d\x3c\x7d\x48\xb9\x30\x28\xae\xbf\x83\x94\x99\xe2\x37\x43\x26\x5d\x18\x4e\xba\x8b\xcf\x82\xaa\xbb\x6f\x03\x14\x47\x70\x32\xc6\x70\x1c\xa1\x21\xbb\x53\x3a\x9f\x73\x3b\x6f\x36\xe9\x04\xa4\x26\xc3\xbd\x6a\x68\x1d\x3e\x64\x9a\x11\x9f\xe2\xe7\x6c\xcb\xf7\xf5\x3d\x4f\x9e\x94\xc1\x13\xe6\x8d\xb9\xd9\x6e\x27\xc7\x8d\xab\x6c\x3d\x03\x30\xe0\x27\xdd\x81\x73\x3b\x99\xbe\x5c\x18\x49\xcd\x7b\x41\xb1\x63\x53\x2d\xbd\xef\xbb\x01\xb3\x19\xf6\x1b\xc8\x58\x8c\xcb\x75\x69\xcf\x30\xe0\xe7\xdc\x24\xd9\x10\xce\x39\xc8\xfd\x79\x33\x3d\x41\xb6\xa5\x07\xc4\x80\xd2\xf0\x39\xff\x40\x95\xe4\xe4\x46\x98\x31\x57\xdc\x34\xa5\xb2\xfe\xac\xb3\xaa\x7d\xb1\x74\xa9\x63\x38\xd5\x26\x40\x77\xbf\x39\x22\xc4\x14\xcc\xf7\x3d\xd1\xa5\x56\xb5\xa8\x81\xf4\x0a\xa6\x19\x8e\x8b\x2b\xc9\xc2\xda\x92\xc7\xae\xe0\xb7\x39\x80\x77\x5f\xbd\x0d\x64\x9a\x4c\x7f\x17\xce\xf8\xa9\x52\x77\xf4\x9b\xae\xf8\x91\x53\x5e\x2a\xac\x0d\xb2\x64\xa2\xcf\x7d\xd5\x45\xc9\x10\xf7\x29\xb1\x2e\xcf\xf8\xb8\x1e\xc9\x25\x0f\x58\x77\xec\x43\x05\x15\x2c\x0f\xba\x2c\x97\x8c\xe9\xd8\x07\x2a\xcf\x3c\x78\xdc\x52\xf6\xed\x95\x22\xea\x02\xab\xf1\x97\x27\x27\xc9\x7d\x94\x5c\x00\x16\xfc\xdc\x9f\x09\x4c\x10\x1e\xca\x85\xa9\x99\xf4\x42\xa6\xed\xd3\x00\xa5\xef\x39\x18\x23\xf4\xa1\x6f\x19\x96\x53\x9c\xb1\x70\x08\x30\x78\xc9\x88\x7e\xca\xa8\x94\xa7\xf5\x48\x6c\x96\x61\x6b\x34\xca\x6f\xd6\xd0\x33\x87\xc4\x7e\xea\x8e\x98\x55\xfe\x89\xba\x84\x18\x40\x8c\xc4\x10\xf7\x47\x0a\x2e\x0c\xdf\x7e\xe6\x88\x0b\x1b\x46\x17\xf7\xe2\x30\x4c\x05\xb9\x80\x8a\xfa\x25\xfb\x5d\x56\x96\xbf\xea\x09\xc5\xec\x6a\x3f\x54\x81\xe6\x30\xab\xc5\x18\xe5\xbb\xea\x18\xe3\x81\x31\x37\x61\xb5\xcb\x68\xb5\x7b\x26\xfd\x94\x93\x29\x55\x18\x2b\x1c\x96\xe1\x6b\x48\xca\x4e\xf7\xa9\x3e\x4c\xfe\x43\x3f\x17\x2e\xb4\x46\xbd\xe2\x4c\x0e\x53\xcc\x6b\x55\x7d\xf3\x4b\x89\x79\xde\xa3\xbe\xe7\x7a\x56\xd2\x8e\xe0\x48\xdd\x97\x6a\xdf\xf9\x63\x24\x6b\xd0\x8c\x83\xe1\x2f\x4c\x19\xdf\x5d\x54\x76\x39\x9f\x3c\xbd\xf8\xa3\x7f\x67\x64\xb1\x19\x98\x92\xdd\x73\x15\x4d\x1b\x86\xc1\xbf\xf3\xce\xb4\xb6\x25\x6b\x0c\xc7\xe8\x77\x2a\xe3\x2f\x4f\xe6\x47\xb9\xda\x47\x58\xd7\x0c\x53\x5f\x78\x00\x62\xf2\xe8\x5b\xdf\xc0\x98\xf8\xb0\xef\x50\x69\x32\xd8\x50\x47\x6d\x7c\x91\x4b\xe6\xfc\xb4\x05\x27\x1a\x37\xfd\x5a\x72\x8c\x2c\xac\xf6\xea\xdc\x4b\xf6\x87\x77\xf2\x9a\xcb\x15\x25\x4a\x2c\x23\x6a\x87\xc7\x08\xeb\x37\xbf\x67\x4d\x1c\xf4\x88\xc4\x09\x93\xc2\x64\xca\x7b\xdf\xc9\x76\xf4\x1b\x1b\xbd\x5a\xbd\xfb\x33\xef\x66\x4a\x36\x39\xc6\xb9\xec\x37\x88\x89\x18\xf6\x73\xf6\xd1\xac\x48\x15\xe2\x57\xda\xe9\x21\xda\x70\x0c\x45\x72\x1e\x8c\xf8\x19\x77\x30\x03\x19\x3c\x7d\xbb\x30\xba\x9a\xb5\x8f\xa8\x67\x56\x76\xfb\xc1\xb7\x50\x8b\xc7\xf4\x52\x98\x9d\x8b\xac\x7e\xf7\xab\x98\xe8\x16\x3c\x6d\x19\x96\xd5\xd4\xdf\xf8\x9c\xe4\x22\xf6\xa0\x91\xb9\xb1\xf5\x8c\xc9\xdb\x59\x90\x41\xbf\xf6\xb9\xcb\x7d\x22\x57\x7c\x5f\xbd\x13\x62\x42\xb6\xaf\xfa\xf8\x4a\x8a\x21\x59\x98\xfc\x0f\xb9\x86\xb7\xdb\x9a\x4e\x7b\xe6\xc1\xb0\x9c\x25\xbb\x8f\x45\x55\xd0\xdf\xc6\xf5\x98\xb4\xed\x83\x63\xf8\x0b\xac\xf1\x89\xc5\xde\xb8\xed\x7b\xbc\x5c\xa7\x2d\x81\xef\x99\x68\xbb\x46\xc9\x98\x2e\xb4\x4c\x04\xe0\x3c\x64\xf1\x85\x23\xfb\xc4\xb1\x3e\x88\x5c\x7f\xb8\x0e\xc3\xdc\x55\x0c\xbc\xa6\xd3\x38\xb6\x3c\xc6\x83\xdf\xe3\x55\x76\x0b\x90\x62\x2a\x62\x66\x2e\xda\x21\x3f\x88\xbc\x1d\x88\x62\x60\x00\xa6\xfb\xd4\x11\xcb\xc1\x11\x40\xe7\x13\x9d\xa9\xe2\x93\x93\x4e\x5d\x78\x6e\xa2\xd1\xdf\xde\x68\x59\x9f\xb4\xea\x71\x98\xe2\x9a\xce\xe2\xf1\x66\x30\x37\x0b\xa5\x16\x67\x38\x67\x6c\x7d\xab\x73\xdf\x73\xca\x6b\xda\x81\xc8\xe5\x80\xdb\x13\x63\x38\xa6\xb6\x65\x1e\x05\xb1\xc7\xe6\xd7\x66\x13\x33\x45\x9d\xd6\x0d\xd5\x30\xdf\x66\x3b\x35\x46\xe8\x35\xcb\x73\x59\x3c\x0b\xe6\x72\x69\xb6\x3b\x06\x48\x69\x5e\x70\xcc\x2e\x85\x82\xcf\xf0\xf8\xe5\x5b\xcc\x71\xdc\x55\xbc\xef\x4f\x3a\x35\x98\xcc\x8e\xe4\x9e\x0d\x39\xe6\xb2\x63\x79\xb1\xe4\x59\x86\xdb\x5e\xb3\x30\xca\x53\x28\xbe\xdb\x31\x15\x87\x4c\x25\x21\x6a\x5c\xb4\x33\x45\x20\xe6\xc1\x32\x3e\x89\x8c\xe9\x62\x68\x1a\xd1\xb8\x4a\xd3\xb3\x27\xfb\xbd\x6e\x00\x12\x72\xbb\xdf\xc9\x93\x34\x0c\xfe\x49\xe9\x10\xf3\x60\xcc\xe3\xb6\xa9\x5b\x01\xe6\xf9\x21\xdd\x25\xe4\x21\xdc\x2e\xc5\xe7\x4e\x5a\xd2\x32\xaa\xcf\x83\xc6\x83\x71\x58\x67\x0f\x88\x79\xf0\x66\x2d\xad\x1a\x5a\x90\x0c\x18\xb7\x83\x92\x98\x5c\xcb\x6c\x9f\x3b\x8c\x1b\xbf\x11\xe4\x24\xd0\xc0\x93\x7e\x62\x7c\xcd\x60\xec\x2d\x17\x27\x7c\x21\x70\xa8\xd9\x53\x1f\x62\x86\x12\xfe\xf4\x31\x91\x3f\x72\xfe\xd8\x36\x50\x6c\x5b\x47\xeb\x62\xd3\x67\x42\xc5\xb3\x47\x27\x9e\x37\x40\x24\x08\x3a\xb1\xb8\x96\xf3\x51\xc8\xf9\xbe\x23\xbf\xea\x28\x82\xd3\x38\xb9\x75\xfa\x59\xe3\x31\x63\xf3\xbd\x17\xd9\x41\x92\xa0\x93\xd3\xc3\x57\xf3\x58\x99\x18\x72\x01\xfe\x5b\x58\xb5\xda\x3d\x44\x41\x0c\x3b\xdf\xfa\x41\x88\x79\x70\xd2\x71\xeb\xf4\xa2\xe3\x49\xd0\xba\x58\x97\xc4\xe4\x56\x2e\xf9\xc4\xdf\x6b\xaa\x87\x16\x28\x74\xa3\xc0\x88\x0c\xa2\x98\xb1\xb9\x87\xcc\x85\x0f\xc3\x50\x25\x13\xbd\xee\xf3\xeb\xbe\x38\x7d\xe7\x47\x13\x43\xd8\x70\xec\x83\x00\x88\x80\x7b\xf1\xc3\x5c\x2c\xd7\x13\x8c\xe2\x34\xe9\xcd\x86\x34\xa1\x67\x03\xee\xfb\x8e\xe7\xa1\xd4\xe3\xd4\x75\xe7\x14\x21\xb6\xac\x5d\x14\x86\x13\xd4\x89\xce\xb6\x02\xe0\x00\x50\x7d\x52\x7e\x29\xa4\xfc\xf7\xdd\xcd\xf3\x6d\xcf\x6f\x60\x3c\x0b\x19\xcb\x38\x1b\x7c\xe6\xc6\xf7\x9b\x2d\xd5\xd2\x2c\x97\x1c\x43\x19\x30\xd0\x40\xfa\x00\xeb\x83\xa7\x0c\x70\x3e\x3e\x67\x9e\xdb\x60\x60\x83\xf3\x06\x7a\xcd\xc7\x4b\x76\x1f\x33\xe8\x49\x88\x21\x3d\x54\x1d\x99\x24\x64\x76\x9b\xaf\x8c\x59\x37\x6e\xef\x5d\xad\x47\x74\x0d\x1a\xfc\xb0\xe6\x75\x42\x6b\xcd\x1a\x35\x7b\xf4\xcc\xd1\xf3\xaa\xb0\xe5\x2c\x59\x7c\x34\xc7\x0a\x3f\xd7\x85\xde\xaa\x94\x2f\xd3\xfb\xda\x0d\x71\xb4\x69\x64\x17\x93\x26\x31\x58\xfe\x08\x23\x08\x1d\x93\xc7\x63\x50\x7d\xde\x60\x02\x92\xd8\x3f\x63\x92\x51\xb3\x69\x81\xc9\xfb\x40\x61\xd7\xd4\x0c\x46\x2d\x17\x5f\xa3\xa1\x3c\x3b\xe2\xab\x87\x6f\x73\xa3\x76\xb8\xc8\x49\x94\xe7\xa0\xd4\x83\x32\xd0\x86\x86\x2d\xee\xc3\x4e\xd4\x28\xd1\x62\x99\x8b\x37\x41\x6f\xb1\x48\xdc\xe7\xe4\xf5\xb9\xee\x7d\xde\x0d\x47\x1a\xd6\x30\x7b\x11\x85\x7d\x0e\x1f\x32\xe7\xc7\x27\xe2\x1f\x7a\x7d\x06\x42\xdb\xca\xda\x6b\xb6\x3c\x5e\xcd\xe1\xd9\x61\x4b\x34\x46\x6d\xd4\xa6\xed\x24\xc7\x13\x1b\x4f\x78\xa1\x70\xe2\x0e\x65\xd7\x1d\x8b\xb8\x5c\x4d\x03\x05\xf6\x51\x74\x52\x9b\x35\x50\x9b\x49\xf1\x4b\x6e\xfe\xfe\x7d\xd9\x4b\x87\xe0\x46\x5c\x11\xd0\x87\x35\xf3\x8a\x04\x9e\x60\x3d\xa2\x47\xd4\xc6\xef\x0d\xfb\xeb\x33\x26\x97\xfd\xe6\xdc\xd4\x2a\xa6\xe4\x57\xbf\xfc\x9a\x39\x40\x94\x66\x40\x94\x06\x33\x88\x36\x62\xfd\x1e\x1e\xcf\xe7\xc1\x85\xb8\x6e\x3f\xa6\xdb\x79\x6b\x64\xdb\x5d\x34\xdb\x1d\x14\xfb\x41\x19\xda\x59\x48\xd0\x68\x6b\x7c\x08\x18\x0b\x03\x42\x5a\x51\xa0\x74\x11\xc0\x34\xea\x10\xa8\x49\x94\x44\xdb\x28\x43\x30\xe2\xc3\xb2\x06\xaa\x46\x08\xf1\xac\xae\x9a\x8c\xfe\xb0\xdf\x64\xec\x48\x85\xa8\x71\xdd\xe0\x2c\x2f\x38\x8c\x54\xeb\x10\xd1\xce\x74\xd3\x8e\x74\x52\x8e\x79\x52\x1c\xa9\x36\x9a\xe3\x62\x6e\xa4\x32\x8c\x71\xbb\x4e\x2b\xb1\x92\x2b\xf9\x1a\x2e\x72\xb4\x21\xfd\x2b\xba\xab\x3d\x2d\x3c\x22\x3f\x6a\x23\x24\x6b\x22\xc4\x6b\xa2\x36\xe9\xe9\x60\x12\xc8\x23\xe5\x4b\x4c\xcf\x3a\x0f\xce\xd0\x0c\x5d\xc8\x1c\x42\xda\x13\xc2\x80\xe9\x04\xaa\x75\x0b\x50\x94\xa1\x89\x27\x8a\xe9\xfa\x67\xef\xc7\xc8\x97\xce\xa6\x0e\x80\x82\xb5\xbb\x2b\x68\x6c\xd0\x67\x42\x9f\x4e\xca\xc9\x95\xb3\x5a\x63\xbd\x72\xd9\x6f\x76\xa5\xa0\x4e\x34\x69\xd9\x26\xd6\xc3\x90\xf8\x30\x9d\x2b\x0d\xe1\xb7\xba\x4a\x15\x73\xeb\x1e\x6d\x3f\x2b\xdc\xb3\x57\xb8\xd7\xbc\xbf\x3b\xc6\x7e\x07\x0c\x38\xdf\x36\x02\x80\x97\xd8\x6f\x96\xd8\x47\x17\x12\x78\x4a\xb4\xe1\xad\x64\xb4\x05\x9a\x46\xa3\x12\x52\xde\x80\xcb\xa7\x5e\xf8\x27\x2b\xf6\xd8\xb4\x78\x23\xbe\xbb\x5e\x33\xc4\xae\xc0\xba\xde\xd1\x71\x7a\x75\xa5\x48\x94\x46\xb3\x22\x21\xaa\x39\xbe\xee\x8f\xc2\x50\xd2\x22\xa8\xcf\x3c\xb1\xa1\x41\xb5\xa1\x41\x71\x71\xdd\x61\xc6\x65\x5f\x97\x33\x45\xb4\x88\x02\x0b\x92\xa0\x7b\x48\xb7\x99\xbb\x77\x95\x0e\x49\x34\x4a\xf2\x33\x97\x3e\x19\x6b\xd5\x15\xdc\x1f\x81\xd4\x13\xa2\xd8\x6b\x66\x22\xf1\x76\x2b\x9c\x8b\x5a\xcc\x13\x34\xbf\xf7\xa4\x4e\x3d\x21\x0d\xa7\x88\xa2\xc3\x71\x64\xe6\xce\xfd\xe1\x3e\x5b\x17\xf2\x70\xdf\x6f\x06\xdb\x1f\x14\x7b\x64\x8b\x9a\x82\xdb\x18\xa9\x66\xbd\x2b\x80\xe2\x92\x89\xd1\xc1\xfb\x1b\x42\x2e\xed\xb0\xac\xe8\xad\x8e\x59\x9e\x60\x3e\x67\xcf\x4b\x4d\x05\x2b\x78\xaa\xe4\x19\x42\x6c\x55\x62\x49\x89\xab\x2d\x29\x3f\x66\x5f\x15\x0e\xe4\x56\xf7\xbd\x32\x57\xcd\xbe\xd2\x88\x76\x77\x21\x78\x49\xbc\x85\xb4\x90\xd9\x81\x8f\xf6\x63\x9f\x1d\xcf\x5d\xb5\x9b\xee\x15\xf0\x47\x48\x9f\xc7\x39\xb9\x5a\xc8\x32\xc6\x0f\x3a\x25\x13\x3d\x9a\x6e\x82\xb7\x06\xc1\xe4\x2d\x24\x46\x85\xda\x74\x01\x4b\x30\xbd\x8b\x8c\xf9\xee\x74\xb8\xe2\x05\xc1\x88\x4c\xe3\x13\x08\x3b\x72\xf0\xc6\x32\x38\x71\x02\x0e\xb7\xfb\x13\xd5\xa0\x62\xc7\x11\x94\x40\xa8\x7b\x1f\x8b\x1e\xec\x04\x9e\x46\xa6\xe7\x8d\x67\x66\x38\xba\x78\xa2\x45\xd0\xe9\xec\x3c\x7b\xe3\x39\x11\x8c\x04\x8a\x10\x7a\x56\xcf\xbe\x75\x86\xb3\x27\xd7\xa7\xd2\x93\x12\x94\xc0\x0d\x0d\xa2\x4e\xef\x2b\x1b\x0f\xa2\x44\x27\x7f\xab\x73\x65\xae\xc2\x1a\x4d\xcc\xd3\x88\xc9\xb5\x20\xc3\xfa\x82\xd6\x6f\xdf\x35\xc0\xc5\xb5\xa5\xa0\x4a\x73\xd3\xf3\x76\x1a\x9e\x96\xb8\x9d\x33\x10\x87\x3f\x22\x33\x22\x73\xf6\x63\x38\xa8\xbc\xf1\x1a\xdf\x6b\x07\x6b\xbc\xc9\xa3\xf9\x72\xa6\x99\xcc\xb6\xf3\x02\x84\xf1\xd5\xe4\xdb\xac\xb8\x6d\xad\x13\xcf\xa3\x47\xca\x75\x99\x6e\x9f\xb5\xb5\xf4\xc5\x80\xbc\xa3\xa2\x83\x3f\xdd\x8d\x22\x24\xee\x66\x8c\x5e\x14\xa8\xcd\x81\xa8\xb5\x54\x4a\x48\xda\xda\xb2\xde\x50\x26\xa5\x5b\x12\x40\xe9\x7c\x89\xec\x99\xa4\xe5\xc9\x83\xd4\xd7\xc9\x9b\xa8\xcd\x38\xcb\x8d\x08\x64\x85\x64\xc2\xe9\x92\x1a\xe8\xf5\x18\xa7\xe9\x4a\x06\xa1\x1a\x8c\x83\x32\x24\xd3\x9d\x5e\x56\x5f\x6d\xba\x1b\x54\x06\xdc\xf7\xfe\x5a\xac\x0e\x4d\x40\x69\x80\x4c\x54\xb4\xb6\xf2\x09\x06\xe8\x78\x52\xbc\xc8\x9d\xe8\xa0\xa7\xdd\x3d\xf4\x85\x35\x81\xf3\x19\xc8\x74\x10\xd4\xfb\xf4\x76\xef\x09\x0d\x92\x8e\xaa\x89\xda\x68\x0e\x54\x0a\x50\x0f\x20\xf0\x9a\x27\x72\x73\x84\x1d\xb8\xdd\xad\x76\x19\xe2\x3b\x94\xda\xab\x4b\xc0\x7e\x87\x82\x45\x91\xfa\x2a\x7e\x37\x1a\x46\xfc\xa8\x65\x0c\x03\x65\x81\xdf\x75\xcd\xb2\xef\x73\x0d\x3e\xb9\x09\x02\x1e\xa3\x47\xe0\xe1\x8c\x0e\x9e\xa2\x53\x11\x78\xd6\xb6\xac\x05\x41\x77\x6e\xcf\xb3\xbe\xaa\xbb\x2b\xa1\xe5\x23\x8a\xe3\x75\xbc\xd2\x4e\xea\x0a\x56\x60\xd7\x25\x95\x2c\x70\xa9\x8b\xce\x40\xe1\xed\x91\x61\x62\xbe\x38\xe7\x79\xb8\x17\x1c\xd1\x36\x5d\xb8\x5c\xd2\x6f\xb5\xd9\xf4\x37\x5d\x68\xda\xbc\xc8\x61\xc2\xde\x35\xe4\xe6\x7b\x42\x9e\x2d\xba\x76\x45\xb4\x3a\x1c\xea\x59\x41\x39\x12\x4d\x73\xbd\x3b\xab\x83\x16\x90\x0e\x82\x1e\x28\xd3\x0c\x50\xa6\x39\x57\xde\x9a\x7d\x35\x7e\x4a\x78\xa6\xb9\xd3\x9c\x30\x98\xce\xaf\x86\x0f\xa0\x4b\xe2\xaa\x3e\x21\xb2\xa1\x71\x87\x30\x8c\x6a\xe5\x23\x3d\x7d\xb3\x6a\x3e\x67\x0d\x51\x84\xe9\x22\x00\xf1\x29\x07\x2f\x67\x64\x9d\x6e\x65\xa0\xa4\xdc\xb8\x48\x74\x65\xef\x1a\xdc\xbc\x72\xfb\x96\x2c\x06\xcc\x92\x78\x7d\x2c\xdc\xc5\x8a\x27\x3f\xf2\xa2\xdc\x83\x09\x3a\xbe\x56\x58\xe1\xdc\xe3\xcd\x85\x35\x9b\x8a\x00\x30\x9f\x13\x61\xbf\xa5\xf3\xd9\xba\x47\x01\xa6\x47\xd7\x8f\xfa\xbd\xb5\xdb\x8b\x6a\x7c\xd4\x70\x89\x2b\x27\x21\x99\x26\x94\xa4\x5b\xaa\x93\x6d\x53\x98\x6a\xac\x7c\xa2\xe2\xe9\x38\xe1\x1c\xa8\xbe\xa9\x92\xf6\x09\xd1\x5b\x0c\x6c\xab\xa1\x95\xa9\xc0\xfa\x95\xc0\x70\xe5\x74\x24\x24\x08\x37\x53\x8d\xe9\xc2\xb0\x4f\xd9\xbe\xd2\xe4\x26\xde\x2b\x88\x28\x34\xe7\x4e\x99\x55\x18\xb4\xc3\x48\x8c\x69\x51\xe0\x8e\x61\xb0\xdc\xf1\xe9\x3f\x9c\x12\xb7\x51\x87\xd3\x3e\x08\xe4\x28\xfb\xb4\x6f\x7c\x6c\x3f\x56\x25\xa1\xdb\x43\xb7\xfb\xb1\x6e\xdb\x58\x0b\x41\x91\x02\x88\x1d\xc3\x52\x5d\x44\x63\xd2\x08\x2c\x3e\x95\xaf\xd6\x25\x5a\xb6\xad\xeb\xaa\x67\x39\xc4\x37\xab\x78\xf5\x88\xdb\x06\x0a\xad\x07\xe7\x68\xa9\x06\x9a\x72\x0c\x04\x71\xbb\x88\x80\x6a\x1a\x7d\xd8\x7c\xeb\x9b\x4b\x51\x01\x03\x80\xca\xbb\x0e\x81\x87\xc4\xf2\x97\xf9\xd5\x6e\x69\xcb\xb6\xf9\x01\x00\x8a\xdd\xed\xe1\x02\xb7\xbb\x44\xae\x88\x21\xb4\x16\x92\x76\x93\xc5\xfa\xb9\xd7\x0d\xcd\x8c\x24\xf7\x4b\x78\x9f\x0a\x5e\xee\x72\x18\xf2\x8a\x20\x2f\x8a\x59\x30\x78\x6d\x7c\xf9\xfc\x19\x2d\x6b\xd4\x63\x66\x39\x7f\xcb\x41\x3c\x87\x14\x06\x82\x76\xb9\x21\x5a\xf9\x32\x0e\x6a\xcd\x06\x45\x5a\x34\xcb\xc5\x62\x4c\x07\xbf\x68\xf9\x04\x16\x59\x48\xa5\x86\xc8\x9f\x3d\xe5\x56\x90\x79\x94\xd7\x12\x24\x11\x1f\xa1\x16\xe5\x58\x1b\x21\xc1\x60\xed\x4e\x02\xd0\xbc\x6e\xaf\x3e\xbe\x6e\x92\x10\x9e\xb6\x35\x50\xf7\x5e\xb2\xf4\x8b\x7b\xb9\x7d\xfa\xe8\x81\xb7\x67\x16\x03\x14\x30\x0c\xa6\xfc\x6a\xaa\x6a\x24\xd1\x0e\x38\x9b\x87\x7b\x44\x45\x3a\xb0\x4d\x44\x10\x74\x38\x1c\x89\xb6\xeb\x95\x65\xad\x9f\xdb\xd4\xb5\x70\xe2\xd8\xa1\x33\x15\x8c\x68\x47\x92\xbe\x75\x46\xff\x8a\xe7\x2a\x9d\x9a\x45\x98\x96\x10\xa3\x01\xa9\x9e\xcf\xc7\xb3\xe1\x6c\xee\xf2\x2d\xb5\x4b\x83\x0a\x80\x20\x9d\x83\x40\x8c\xaa\xb8\xa5\x74\x83\xe6\xb8\x8d\xb8\x6d\xe8\x10\x9b\x8f\x1b\x3f\x03\x72\xe0\x10\xd9\x81\xc7\x68\xb1\xb4\x89\x0e\x99\xcd\x63\x6c\xfc\x89\xe6\xb3\x7c\xf3\xc9\xe5\x3a\x79\x89\xc3\x27\xe0\x48\xea\x03\x41\xc5\x5e\x20\xd7\x61\xbb\x73\x8f\x41\xbf\x4f\x04\x87\x83\xa4\xb2\x13\x6b\xa9\x71\x04\x11\x5d\xbf\xcd\xc2\x97\x9b\x03\xd1\x60\xba\x81\x00\x0e\xde\x85\x7e\xc6\x64\x1b\x8e\x7d\x51\x4c\xa6\xa4\xe8\x33\x7e\x68\xad\x1d\x7b\xd5\x90\x8d\xcc\x4e\xc1\x3c\x57\xef\x52\x3d\xa2\x3c\xa7\x68\x04\x54\x4f\xaa\xad\x05\xe3\xfa\x44\x10\x6c\xde\x36\x5a\xf5\xe1\x1a\xd7\x6d\xe7\x81\x23\x21\x4d\x53\x95\x46\xe9\xbd\x6a\x9a\xce\xc7\x5e\x80\xe5\x05\x11\x24\xb1\xec\xba\x4d\x96\x7b\x55\x20\xbf\x84\xfc\x33\xda\xfc\x09\x9f\x95\xe2\x56\xcf\x24\x01\x6c\x6d\x5e\x14\xb4\x30\x2e\x99\xa4\xb4\xb1\xdf\x0d\x27\xfc\x44\x49\x24\x1d\x6d\xe9\xb4\x23\x15\x11\xcf\x0a\xa3\x35\xf8\x8e\x0d\x36\x34\x76\x6a\xf3\xf2\x9d\x96\x1d\x2f\xdc\x67\xac\x31\x26\x5f\x41\xc0\xbc\x81\x8c\xe9\x5e\x95\x3a\x24\x74\x74\x23\x99\xc2\xea\x06\x22\x3d\x54\x69\xdd\x11\xe0\xc5\x6b\xb0\xcb\xc3\x05\x81\x4d\x1f\x94\xb1\xc7\xab\x0d\x94\x85\x91\x0e\x6c\xee\xe1\x42\xda\x3d\xa6\xa9\x65\x98\x9c\x31\xed\x2d\x68\x59\x5e\x7d\x39\x56\xba\x6a\xcb\xe6\x17\x66\xfb\x46\x87\x3f\x5e\xee\x5d\x80\x1b\x69\x40\xb5\x2d\x5f\xa4\x5d\x49\x3b\x99\x3f\x01\xc8\xb0\xc3\xb6\xea\x1a\xeb\xc1\x59\x72\x42\x9e\xb8\xf7\x40\x97\xab\xd7\x57\x25\xbe\xac\xa8\x5a\x21\xa7\x3f\x11\xbd\x72\x00\xe0\x46\x74\x1d\xda\x20\x69\x27\x2b\xd5\x55\xaa\xa5\x5a\x01\x28\xb5\x05\xeb\x1e\xd1\x22\x0f\x33\xe9\xdb\x37\x7f\x67\x94\x99\xde\x16\x03\x04\x9b\xe9\x69\xe1\xb6\x00\x57\x66\x81\xd3\x34\x4d\xdc\x61\x60\x8d\x84\xfd\x92\x1a\x9d\xec\x76\x1a\x35\x72\x97\x21\x9e\xfc\x24\x09\x82\xdc\x6b\x86\xc4\xe4\x2a\x21\x9c\xb2\x04\xcc\x8b\x31\xea\x63\xa1\xba\x84\x93\x7c\xde\xee\x18\xad\xa4\x39\x76\xb0\x07\xcb\xa3\xda\x03\xa2\x00\xdb\x7a\xef\x7b\x19\x66\x20\xc0\xde\x0f\x5e\x4e\x6d\x29\xd5\x83\x88\x54\x81\xa6\x0f\xfa\x1e\x67\x7b\x5d\xdf\x3a\xfd\x9c\x2b\xa9\x20\xb9\xca\x8a\x11\x1a\x6c\xf8\xe8\xea\x5c\xa4\x27\x05\x7a\x42\x15\x9a\xdc\x7c\x09\x1d\x26\x14\x9a\xd8\xa5\xc1\x04\xbe\x87\x80\xa1\xc0\xe4\x6d\xbf\xd9\x17\x04\x80\xd6\xe3\x46\x77\x41\xc5\xa7\x00\x53\x4a\xef\x52\x71\x0f\x08\x0e\x3c\x01\x1e\xe2\x17\xad\x33\x1f\xbc\x8c\xde\x81\x2b\x3e\x37\x96\xa7\xe5\xde\x96\x94\xf4\xb6\xc0\xfa\x4c\x0a\xf4\x74\xed\x47\xe5\x2e\x8a\xe8\x0c\x29\xa7\xee\x78\x65\x25\x4c\x74\x0a\x82\x3a\x8f\x60\x65\x70\x5a\xf6\xbd\x45\x8c\x8b\x23\x5a\x48\xe6\x30\x23\x77\x05\x40\xe2\x5d\x1e\x5e\xb8\x4b\x84\x4e\x76\x0d\x39\x3d\x6b\x6f\x52\xc1\x0a\x18\x96\x6d\x1d\x2e\xd1\xc5\xd3\x2a\x33\x15\x79\xfe\x84\x42\xb1\xc2\x75\x05\x3d\x85\x88\x97\x69\x62\x13\xc4\xc7\x2e\x39\x71\xe8\x33\x1a\x3b\x69\x45\x51\xe4\x2e\x2c\xe4\x43\x57\x36\x8f\xda\xa6\x41\x7d\x78\xf5\xf2\xe7\x1b\x1d\xe3\xa3\xe7\xbc\x30\x51\x30\x82\xe8\xe0\x65\x79\xe6\x8c\x59\xdd\xe4\x47\x9e\x69\x9a\x06\xef\xa0\xe8\xdc\xea\x79\x39\xd2\xee\x92\x82\x80\x5b\x5d\x2a\x49\xf0\x4f\x01\xa6\x36\xa3\x73\xee\x27\x15\x27\x67\xe2\x70\xed\x75\x80\xf3\x61\xf1\x01\x3a\x5e\x5b\x7b\xc7\x06\x2f\x54\xe5\x18\xcd\x56\xbc\xb6\x2a\xfe\x86\x22\x04\xf4\x2e\x0f\x57\xd6\x8a\xb1\x89\xeb\x36\x10\x9e\x4f\xdd\x39\x24\xd1\x08\x64\x9c\xae\x26\xb1\xcb\xb9\xd5\x6d\x64\x2f\xef\xd8\x7e\x15\x2b\x88\x18\x8e\xf0\x0c\xae\x86\x01\x0e\xf2\x83\x6a\x2e\xca\xd3\xee\xdc\xb1\x59\x37\x00\xf5\x23\x77\x0d\x14\x81\x94\xdb\x44\x83\x4f\x2b\x8b\x5b\xf6\x72\x51\xc0\xcf\x18\xe2\xaa\x3e\x17\x79\x12\x3f\x6d\xe3\x72\x24\xcb\x81\x6d\x9d\xda\x70\x63\x78\x77\x15\xbb\x1d\xee\xe4\x01\x49\x46\x85\x39\x3b\x93\x45\xd1\x6d\x18\x2a\xd1\x00\x05\x29\x13\x4d\x68\x32\x4b\xe9\x35\xb3\x2c\xcd\xb2\x8d\x71\xd9\x7b\xe4\x61\x8b\x3e\x40\x11\x01\x83\xbd\x68\xc8\xc5\x67\xfc\xa4\x9b\x40\x6e\x0a\x01\xd0\xaf\x59\xb2\xb1\xe1\x3b\x9a\xf5\xaf\xfa\xba\x46\x4e\xcf\xd9\x48\xd4\xe2\xfd\xcd\x30\x14\xde\x8b\xdb\xbb\xb0\x38\x52\x7c\x8f\x40\x6a\xe7\xf4\xd7\x24\x08\x49\x3d\xd7\x7b\x92\x94\x1e\x67\xf9\x1a\xc1\x19\xdb\xb5\x58\x4f\x19\x97\x8c\xc6\xad\x40\xdd\x19\xc4\x91\x5a\xf9\xc8\x50\x72\x99\xb4\x8c\x63\xfa\x47\xf8\x31\x93\xea\x16\x42\x2d\xd6\xb0\xbb\xb4\xef\x27\x02\x85\x84\x50\xa2\x9d\x4c\x50\xda\xe5\xa6\x82\x05\x90\x03\xb4\x37\xc2\x3e\x84\xad\x7d\xc4\x4b\xd1\x4b\xae\xa7\xd9\x3d\xa9\xc9\x2c\xb1\x78\x0a\x88\x57\x5e\xe7\x40\xf5\x6c\x38\xf4\x7e\x6a\x2d\x61\x23\xf0\xe2\x30\x39\x47\x09\xeb\x4d\x78\xaf\x17\x0c\xce\x9b\xbb\x9e\x74\xe0\xf3\x22\x34\x31\x5c\x3f\x43\x03\x5b\xfc\x33\x3c\x05\x4c\x98\xd0\xe9\xa8\x11\x84\xa0\x2b\x07\x4f\xe5\x10\xbe\x6e\x05\x0a\x00\xb8\xe2\x3c\x90\x46\x28\x35\x79\x5c\x8a\xa2\xc8\x10\x5a\xa9\x67\xe9\x04\x6e\x71\xa2\x9f\x50\xce\xd3\x3a\x2b\x7d\xd6\x23\xce\x84\x53\x22\x5b\x91\xa8\x8d\x2a\x73\x96\xb8\x1b\x0d\x46\xf8\xe2\xc3\xee\x98\x55\x94\xb8\x8c\x9e\xda\x8c\x7b\xb2\x62\x21\x10\x48\x08\x49\x4e\xcf\xbb\x76\xe6\xa2\x51\x9a\xd0\xe6\x91\x20\x25\x19\x49\x88\x28\xae\xc1\x2f\x40\x26\x3a\x10\xa4\x5f\xee\x27\xe3\x73\xfc\x77\x3f\x06\xa6\x18\x0a\xae\xfb\x98\x08\xdc\xb3\xd4\xee\x09\xf2\xcc\x51\x94\x40\x4d\x0a\xe3\x2f\x61\xa2\x1d\xcf\x6d\x3c\xa0\x8a\xc7\x53\x24\x4c\x60\x18\xce\xdd\x86\xd6\x45\x89\x65\x9c\x0b\x68\x3c\xcd\x27\x38\xd2\xf9\x45\x81\x3b\x1e\x42\x7d\xed\x1e\x22\xc6\x8d\x0d\x25\xf9\xdb\x8e\xd3\xe4\x62\x5b\xe6\x3a\xb2\xe2\x73\xda\xd8\x0e\xab\x22\x23\xd1\x72\xdd\x5d\xa0\x04\x76\xf0\xe4\x65\xcb\x37\xfd\x2e\xed\x90\xbf\x09\xbb\x32\x4f\xea\x34\x81\xab\xbe\xf5\xa6\x26\x28\xf6\x03\x00\x68\x1c\xc1\x04\xee\xd5\x13\x3a\x0b\x4a\x22\x97\xfa\x72\xf9\xbe\x9b\xbd\xdd\x1b\x00\x5b\x1d\x89\x50\x6a\x8e\xf5\x0f\x9a\xa2\xc8\xcb\x7e\x31\x5b\x69\xa8\xfa\x25\xeb\x45\x72\xda\x82\x58\x3a\x1e\x82\x05\xf8\x8f\x41\xa6\x76\x00\x41\xa1\x30\x2d\x5d\xd8\x0a\xd7\xab\x48\xf3\x66\xef\xa7\xba\xc0\xd9\xd0\x32\x29\xbb\xce\xf6\x97\xcb\xa7\xce\x4b\x0a\x6e\x73\xa0\x30\x43\xd3\x55\x7d\x75\x69\xb3\x40\xdc\x99\x3a\xac\x81\x9b\x73\xd9\xb5\x1d\x3d\x4f\x78\xd4\xcc\x42\x01\xf1\x56\xe2\x6e\x37\x9e\xa6\x41\xd1\x75\x65\x96\x48\x06\x38\x06\x0a\x4d\x81\x95\x67\x29\x20\x24\x8a\x87\x8f\x3a\x6c\x98\x94\xfb\x36\xce\xf1\x12\x96\xf2\x6c\xb6\x5b\x94\x7a\xcd\x80\x60\x25\xaf\x7c\xbe\x46\x95\xcf\x79\x2d\xbe\x9c\x10\x06\xa1\x50\x5f\xa0\x2b\xba\x25\x5e\xa7\xa5\xa2\x08\x49\x76\x3d\x7a\xc4\x4a\x2c\x19\xe5\x27\xa7\xdb\x7b\x37\x5e\x3a\xde\x16\xa2\xfd\xaf\xf0\x62\x68\x7a\x97\xec\x0b\xba\x5c\x63\xef\x7a\xc7\xfa\x03\xed\x0c\x54\x9b\xfc\x58\xc4\xae\x3c\x92\x0f\x5e\x53\x18\xdd\x49\xdc\x8c\x86\x80\x93\x95\x88\xfb\x59\x66\x00\x96\xbf\xf2\xcf\x80\x07\x8b\x22\xeb\x35\x76\x6c\xb8\xee\xde\x41\xd7\x6b\x07\x51\xcd\x45\x62\xee\xcf\x6f\xfb\x11\x46\xa5\xc9\x1c\xa0\x81\x3b\x95\xdf\x61\xbd\x4a\x48\xf2\x74\x62\xde\x3c\x38\x48\xdc\xa6\xfb\xb2\xe2\xc0\x75\x0a\x7d\xa1\x22\xfc\x66\x20\x10\x00\xeb\xee\xd8\x65\x8b\x6c\xb7\x61\x67\x54\xd9\xa2\xe9\xe6\xc4\x09\x59\x70\x56\x4f\x3d\xb9\xb2\xe4\x3f\xe6\x69\x9e\xd9\xc8\x81\xcc\x87\x31\x6e\x71\x37\x2e\x72\x5f\xc1\x43\x5f\x60\xd3\x9d\x53\xa4\xfa\x76\xd7\x76\x02\xde\xa1\x0a\xed\x9d\x67\xaa\x41\x74\x86\xd0\x5e\x2a\xcc\x3b\xae\xda\xe3\x7c\x97\x36\x15\xce\x07\xc8\x05\x52\xe3\xea\x9f\x48\x54\x0a\x2c\xdf\x2f\xea\x7b\xfd\x64\x00\xcd\x67\x4b\x80\xea\xb5\xc0\xb2\xdd\x55\xd5\xb4\x67\xb0\xcc\x99\xa5\x0f\xe4\xdb\x4e\x10\x1d\xc8\x7c\x5b\xd0\xec\x99\x8a\x6c\x1d\xce\x72\x3d\x67\x75\x3b\x0c\xcf\x75\x40\x8d\xab\x12\xb4\xb4\x3e\x42\x48\xa0\x00\x46\xcd\x73\xe1\xdb\xce\xea\xba\x73\xc5\x45\x26\xac\x6e\x88\xf3\x6e\x40\xb2\x29\x91\x23\x66\x52\xba\xca\xf4\x05\xc1\x5e\x03\x51\xac\x71\xa5\x35\xc1\x20\x49\xb6\x6b\xe3\x14\xe2\x4e\xb9\x02\x11\xef\x80\x36\x24\x08\xb8\xc5\x3a\xd8\x93\x00\x95\x88\x6c\x99\x5e\xf7\x15\xf6\x95\xa7\xe3\x8d\x9b\x43\x44\xfb\xae\x2f\xd0\xda\x40\xf1\x1d\xca\x1b\x08\xcb\xc5\xea\x16\xc7\x61\xcb\x56\x6f\x7b\x32\xd9\x7e\x8d\x1d\x10\x6c\x38\x10\xad\x9e\x14\xf8\xe2\x1e\x10\xc7\xda\x1d\x5b\x04\x7d\x8d\x65\x28\x88\x8f\x40\x37\xc4\xae\xb7\x9a\x6e\xae\x6f\xd3\x82\x43\x14\x42\x9d\xf6\xb5\xfc\xcc\x81\xc6\xd1\xcf\xc4\x36\x61\xb3\xa7\xd6\xe5\xb1\x78\xe3\x8a\x24\x09\xac\x8e\x4f\x24\xef\xd0\x67\x63\x27\x93\x59\xe8\x42\x5a\x14\x08\x9e\x02\x59\xfc\x32\x61\x8e\x0e\x6d\xe1\x0f\x5a\xd8\xca\x75\xf1\x10\x58\xdb\x6f\xe4\xdc\xf5\x84\xd7\xcb\xe8\x6a\x3d\x20\xd1\x19\x86\x6a\x46\x97\x29\xca\x45\xf6\x91\xf1\xa9\xb0\x2d\x84\xca\x95\xa3\xa7\x0a\x65\xe1\x08\x45\x00\xd1\x9a\x9d\xe9\xc1\xb6\xa2\x6a\x77\xad\xfa\x58\xbf\x97\x44\x70\x9c\xc1\xd4\x40\x83\xe2\x0c\x18\x58\xc0\x38\x9e\xbb\x5e\x64\x05\xcd\x9e\x06\xa2\x89\xb2\x4d\x1d\xb6\xf2\xba\xf9\xc1\x69\x1f\x00\x38\xce\xf7\x7e\x20\xd6\x11\x26\xf0\xe8\x4c\x9a\x2b\x77\x2d\xc7\x89\xce\x54\xe1\x63\x76\x2d\x36\x85\x44\x41\xd0\x18\x92\x10\xb1\xc2\x28\x7f\xcd\xac\xe9\xf5\x99\x37\x87\x35\xe4\xd7\xba\xdf\x36\x32\xf5\x11\x80\x72\x3d\xcd\xc1\x40\xdd\x41\x55\x2f\x4e\xbc\x06\xcf\x73\x7f\x3c\x26\x6f\x82\x81\xe4\x81\x97\x11\xbb\xd3\x3d\xa9\x77\x24\xb9\x9e\xc0\x05\xd9\xb2\x38\x57\xef\xab\x0d\x83\x94\xd9\xf1\x54\x34\xf3\x0f\xfc\x6a\x75\x16\x55\x54\x26\x56\xa0\x64\x36\x93\x3a\xcf\xf7\x14\x9d\x1b\xb5\x54\x85\x73\xe7\x0c\xc7\x3c\x29\x2f\x9f\x37\xee\x35\x82\x80\xe0\x4e\xf8\x59\xec\xd7\xd4\x78\xeb\x9c\xe6\xa8\x7c\x34\xd1\xba\x5b\xbb\x43\x9c\xf0\xed\xbb\x84\x1b\x5b\xe4\xdd\x40\x16\xa0\x5f\x80\x44\x20\x41\xd7\xe8\x44\xd9\x14\x04\xcf\x36\x73\xaa\xec\xa1\x26\x5e\x3c\x15\x1b\x4a\x76\xdb\x34\x25\x79\xdc\xe2\x13\x45\x15\x23\x9e\x07\x8f\x44\x6b\xcf\x70\xbb\x7a\xaa\xdf\x0d\xb1\xc9\x67\xe3\xbe\x2f\x9b\xb7\x14\x6e\xb5\xd8\x28\x72\xc0\x89\x36\xc2\xe9\x6c\xab\xd8\x34\xae\xc3\xec\x77\xe8\xd4\xc0\x64\xb6\x02\x79\x27\x9f\x10\x10\xae\x13\x9c\x0f\x42\xfd\xff\x51\x75\x1d\x6b\xae\x22\xcd\xf2\x81\x58\x14\x1e\x6a\x29\x09\x09\x6f\x84\x87\x1d\xde\x7b\xcf\xd3\xdf\xaf\xcf\x3f\xa7\x67\xee\xba\xf5\xa9\x1b\xaa\x32\x33\x22\x33\x32\xfa\x0c\xd7\x5d\xef\xf9\x0e\xa7\x69\xd9\x96\xbf\x3d\x22\xcd\xab\x3b\x02\xb8\x6b\xb9\x3e\x62\x3a\x12\x24\x3a\x77\x00\x70\xae\x19\xf2\x6e\x2e\x50\x90\xd2\x77\x20\xfa\xeb\xb2\xf2\x7f\xf4\xc5\xdc\x43\x80\x4c\xb4\x79\x2b\x19\xbe\x2d\x73\xc8\x10\xc0\x40\xfa\x1d\x70\xd7\x07\xf7\x1e\xc7\x47\x4a\x1c\x82\xe8\xfd\xfe\xbb\xf8\x4a\x79\x24\x82\x5e\xef\x0c\xc8\x57\x1a\xdb\xf9\x13\x5f\xe7\x08\x83\x34\x9b\xf8\x34\x52\x4f\x9c\x68\x89\xbd\x84\x66\xad\x53\xb8\xd5\x40\xc8\xd7\xc2\x02\x0f\xb3\x1a\xeb\x73\x0d\x95\x78\x19\xb0\x5e\xd7\xed\x05\x39\x69\x9b\x95\xf7\xa9\xda\xb2\x1b\x6d\x38\x11\xa3\xab\x5c\x4f\xd3\x18\xb9\xd8\xb9\x09\x07\x19\xac\xcc\x9e\x32\x08\xd8\xa9\x9a\x61\x18\x06\x1d\x96\x1d\xee\x31\x2e\x55\x4e\x2f\x5d\xdf\xe6\xa9\x57\xe9\x37\x02\xfe\x7a\x4d\x67\x99\xde\x35\xeb\x15\x28\x5f\x38\xf9\xfb\x98\xff\x8c\x3d\xfe\x77\x26\x8f\xfd\x07\x03\xa5\x08\x13\x0d\x6c\xfd\xfd\x98\x7e\x3b\x61\xa9\xfb\xec\x53\x06\x85\x8a\x1d\x33\x3d\x86\xed\x37\x5b\xf4\x46\x7f\xaf\xed\xa2\x8d\x08\xc8\x3b\xb8\x33\xa4\x00\x11\x22\xdf\x89\x09\xf3\xa6\x63\x90\x5f\x85\xb2\x2d\x7c\xb4\xba\xdd\x26\xab\xb6\x83\x6a\x43\xfc\x82\xd4\x28\x8f\xfe\x4c\x0e\x19\x61\x7e\x7d\x94\x5f\x5f\x24\xbb\xcb\x72\x45\xee\x71\x1c\x53\x48\xd2\x2b\xf7\x97\xd0\xef\x1b\x23\x32\x86\xb0\x50\x5f\x99\x6f\x04\xef\x56\x2c\x44\x76\x35\xec\x35\x3e\x6c\xe2\xc7\x6a\x15\xd0\xc8\xb0\x97\xc5\xee\x0d\x25\xd9\xec\xf7\x60\x75\x2e\xc2\x92\xc5\xbd\x6a\x34\xe7\xf8\x12\x7d\xfe\xce\x93\x58\x71\x85\xc8\x01\x50\x98\xfb\x8a\x81\x53\xe4\xa6\xd3\x25\xf3\x05\x03\xcc\xc7\x6f\x24\x48\x5b\xbc\x91\x69\x0a\xef\x0c\x83\xb9\x4f\x8d\x90\x23\xaa\xa4\x97\x50\x36\x31\x80\xd0\x75\x38\x18\x52\x65\xaa\x86\x49\x79\x5c\x33\x62\x35\xa3\x12\xe1\xeb\xb6\xba\xdd\xfe\x55\x48\xa3\xfd\xdf\x3e\xc6\x31\x7c\x9e\x11\x4b\x74\xcf\x60\x21\xe2\xf5\x18\x1c\xc5\xa6\x50\x56\xb7\x51\xc8\x04\x6c\xf9\xc3\x25\x88\x95\xbe\x29\x68\x49\xcd\xd5\x8d\x53\xd6\x8f\xf4\x57\x70\x04\xc9\x95\xdb\xf0\x72\x3c\x61\xef\x66\xe3\xe4\x36\x88\x6d\x03\xe4\x03\x3e\x78\xe4\x72\xf1\x2c\xff\xed\xd7\x20\x07\x8e\xe0\xfb\x3e\x53\x5c\x53\xd9\x54\xc9\x30\x25\xb2\x6d\x72\x35\x8c\xaf\x11\x7a\x34\x6b\x70\x18\x0f\xa3\xd8\x96\x23\x5b\x42\x19\x3a\x0a\xca\x11\x01\x96\x44\x5e\x29\xce\xa4\x43\x28\x9c\x48\xde\x53\x6c\xfe\x55\x31\xc9\x76\xba\x71\x9a\x48\x89\x29\xdc\x42\xd5\x2b\xc2\x20\x9e\x80\x7d\xe2\x58\x7e\xb8\x23\x9d\x75\xb1\xd6\x21\xdb\x1f\xe5\x5f\x39\x5f\xe8\xf9\xe4\xbe\xa8\xc6\xa4\x1b\xbe\xcd\xf4\x96\x6b\x04\x44\xfc\xcf\x99\x0c\x82\xcd\x87\x9c\xac\xd5\x22\x43\xcf\x17\xe6\xab\xdc\x40\x6f\xbd\xd2\x56\xd3\x83\x25\xbf\xe9\xf3\x57\x5b\xa0\x3f\x99\x93\xa4\xf3\x4d\xa8\xcf\x66\xbf\xeb\x1f\xe0\xfa\xbf\x1a\xec\x46\x82\x37\x68\xca\xd5\x8c\x57\xb2\x1b\x3a\x71\x0f\x8b\xc1\x22\x0d\xf3\xd4\xf0\x94\xa4\x90\x37\xe3\xd2\xa9\xf1\x91\x2c\xb7\x5b\xee\xa6\xc0\x4b\xf1\x07\xaa\x9d\x6a\x2a\xe4\xc6\xc9\xbd\x1d\x02\xd4\xe7\x80\x96\x99\x61\xde\x21\x6f\x37\xb6\x9d\xe7\x84\x36\x5b\xf8\x4a\xa4\x4c\x0c\x4d\xa4\x1f\x99\x94\xb9\x71\x64\x2e\xdb\x5c\x17\x5e\x65\x20\x5c\x2f\xc7\xe4\xb3\x7e\x74\xd3\xd8\x67\x7b\x14\xd3\xf5\xf3\x1c\xa3\xa8\x93\x0a\xbf\xa5\x75\x54\x4c\xfe\xe2\xb3\x43\x4d\x1a\xcb\x9f\xc7\xd6\x4d\xfb\xb0\x8b\x1b\x01\xcc\x38\x8e\x53\x92\xed\x34\xd2\x1d\xc7\x3d\xb5\xc5\x1d\x85\x24\x39\x2e\x80\xc7\x4c\xab\x2a\x57\xd0\xc8\xe2\xe9\xc2\x85\x78\xd3\x24\x17\x62\xa5\x6b\x9e\x5a\x2b\xd6\x44\x4f\xb3\x61\x92\xa3\xfc\x4a\x20\xd1\xdd\x59\xb6\x66\xa9\x98\x57\x4f\xe7\x53\x06\xdd\x27\x9a\x89\xf8\x79\x7e\x11\x83\x2a\x57\x26\xf3\x77\x44\x23\x77\x97\xe6\xcf\x25\x82\xc8\x9d\x35\x94\xe7\x76\xa1\xe7\x4a\x3e\x15\x73\x8f\x88\xc3\x11\x62\xa4\xf5\xcb\xc9\xf1\x3a\x6e\x79\xc2\x1c\x06\x8f\x7d\x5c\x4a\xf1\x3b\x67\x12\xae\x65\xca\xdb\x57\x1a\xa0\xaf\x12\x1b\x1a\xe9\xca\xd3\xad\x73\x15\xd9\x40\x5e\x39\x4a\xe9\x75\xdd\x67\xb9\x7e\xa7\x01\x41\xa6\x66\x28\x37\x0b\x9e\xeb\x42\x8d\x5d\xa1\xce\x57\x66\x26\xe6\x86\xf9\xce\x84\x8d\x42\xf5\x7a\x63\xb6\x41\xba\xce\x0e\x84\x4b\xd5\x60\x9a\xed\x36\x1f\xe6\x8c\x66\x97\x22\x80\xef\xf5\x36\x45\x46\xbc\xd9\x9f\x34\xfa\xc0\x3b\x9c\xd8\x71\x62\x15\xca\x26\xf3\x14\x97\xdd\xc0\xac\x4d\x28\x26\xd9\x13\x3d\xce\x04\x4f\x13\xb7\xae\x45\xb3\x16\xc9\xec\xac\x88\xd7\xbb\xce\x9e\xcf\x4f\x91\x9e\xd2\x3f\x7a\x06\xee\x98\xa3\x79\x8a\x1a\x9f\x27\x7c\x8e\xe3\xc2\x13\xc9\x60\xee\xcd\x71\x9e\x64\xd3\xa2\x3e\x30\x9c\xcd\x0c\xfe\xb4\x89\xfb\x1a\xcd\x34\xff\x28\x6c\x1f\x61\xd1\x8e\x6e\x3b\x57\xe0\xc8\xf6\x93\xf8\xae\x66\x54\xbe\xc4\xfd\x34\xd1\x90\xcf\x77\x56\x00\xa7\xe8\x7e\x2c\x9f\xf3\xb8\x35\xcb\x01\x6a\x73\x37\x64\x97\xd8\x75\x59\x89\x0c\x38\x31\xc0\xa6\x6b\x6f\x05\x98\xcf\xf0\x1b\xc3\x24\x67\x45\xeb\xaf\x6e\xe1\xf1\xcc\x97\xb6\x58\xda\x8b\xc6\x2e\x1b\x6e\x5e\x3b\x4f\xe0\x07\x16\x0a\x30\xf0\xb4\x82\xe8\xf7\xb2\x1b\xab\x43\xf8\xfe\x69\xad\xdf\x83\x41\x30\x24\x47\x00\x02\x92\x53\x32\xfa\x06\x26\x56\x62\xe5\xb6\xae\xf7\x5c\xe1\x7b\x66\x40\x89\xac\x0f\xd6\xcb\x1c\xce\x59\x0d\x0b\xec\x01\x43\x50\x61\x3f\xab\xb4\x7e\x37\x2c\x00\x53\x47\x40\xe4\x30\xf7\x57\x32\xbb\xf4\x25\xd6\xe2\xf5\xcd\xe1\x71\x87\xc8\xc2\x0a\x05\x9c\xdf\x03\xff\xc1\xaf\x7f\xee\xa1\x94\xeb\xbd\x8c\xdb\x00\x7e\x8e\xc0\x3b\xb2\xfc\x44\xb8\x23\xe7\xde\x2b\x45\x7f\xe3\xb8\x84\x19\x48\x11\x64\x16\x84\xf2\x88\x04\x53\x67\xa4\x69\x99\xdc\xe7\xa0\x96\x1d\x1f\x50\xba\xdd\x8d\xa3\x52\x05\x98\xcf\x84\x48\x81\xc6\xee\x84\x65\x52\x02\x03\x6e\xc8\xeb\xcb\x07\x24\xb3\x40\x84\x6d\x75\x93\xa7\x15\x9f\x86\xd9\xe2\x57\x04\xd8\xb8\x28\x9e\xf3\xf6\xf5\x3a\x16\xab\x08\xdc\x7f\x7a\x9a\x5c\x61\xe1\xf9\x4c\x6f\x4e\xae\x71\x0d\x35\xc5\xaf\xa3\xd0\x23\x10\x22\xdd\x15\x9a\x0b\x1e\x8f\xe6\xba\x69\xb1\x87\x35\x2c\x84\x6d\xa9\x54\x4c\x45\x75\xea\xa7\xf0\x14\x6a\x89\xec\xa7\xc6\xc6\x03\x16\x81\xf2\x41\x1b\x38\x02\x07\x6c\xbf\x84\xe6\x42\x04\xc3\x30\xf6\x85\xb0\x89\x53\xa1\x34\x24\xf9\xf9\xec\xe4\xd2\xbb\x8b\x3c\x56\x92\x3f\xd2\xe9\xe4\x9f\xa5\xff\x37\x7f\x3c\x6e\x22\x45\xe1\x40\xeb\x35\x3b\x20\x4c\xb8\x3a\xd3\xb2\x88\x46\x66\x9c\x76\xcd\x2e\x14\xf1\x67\x7c\x9f\x03\x7a\xd6\xfa\x68\x61\x45\x9c\x7a\xb9\x32\x15\xce\x9a\x34\x17\xdd\xe7\xb2\xe5\x28\x6a\x4a\x95\xa8\xf3\x2b\x80\x86\x4f\xd8\xed\x82\x5d\x75\x5f\x93\xa8\xfc\xde\xf3\x9c\x57\xb4\x71\xd5\x6e\x31\xfe\xa9\x03\xad\x45\xe5\xa2\x81\x53\x4a\x84\xec\x28\xe4\xbe\xca\xbb\xe0\x7f\x9b\x92\x24\xa7\xf5\xf4\x69\x07\xba\x59\xa4\x82\x8b\x79\x6e\x33\xdf\xc6\xf5\x20\x14\x1c\xa4\x69\xb6\x37\x82\xf6\xe9\x16\x82\x58\x65\x8c\xa5\xd2\x5c\x6e\xba\x07\x2e\xd5\x03\xb9\x29\x17\xab\xd4\x4b\xb7\xfa\x4a\x73\x45\x8a\x84\x08\xc4\xdc\x34\x4e\xda\x08\x33\x8f\xcc\x3b\xad\xf4\x60\x67\x4b\xcd\x99\xe8\x7a\x0c\x87\x54\x10\x85\x1c\xa3\x0c\x89\xba\xa9\x73\x0d\xea\x6f\x51\xfc\xe6\x7f\x0d\x1c\xd9\x8c\x6a\xb5\x43\x5c\x35\xb6\x51\x49\xe1\x86\xa4\x93\xd1\x4c\x1e\x30\xe1\x93\x31\x49\xff\xc1\xb2\x01\xb6\xbb\x23\xbe\xbd\x67\xb5\x9c\xb0\xe4\x73\x60\x2a\x0d\xfd\x19\xa2\x95\xa7\x78\x29\x99\xb7\x63\xe7\xae\x5e\x75\x12\x12\x36\xc6\x1d\x83\xb0\x2d\x04\x56\x72\xa6\x14\xf7\x3e\xd5\xc8\x6b\x5f\xfd\x89\x67\x12\xc5\x0a\x0f\xfe\xf3\xfb\xdc\xcf\x2d\xed\x5d\xcc\x76\x30\xf5\xe2\xe8\xd9\x8e\x92\x46\x36\x58\x46\xbe\x46\xef\x5c\xe2\xf7\xc5\x21\x23\xaa\xcf\x00\xbb\x3d\xe0\x61\x30\x51\x70\x89\x6b\x46\xd9\x66\x81\x72\x3e\x7a\x18\x7f\xaa\x33\x98\x92\x89\x42\xbf\x5a\xb1\x30\xe7\x7b\x6e\x6f\x87\xd0\x08\x80\x5d\x18\x3c\xf5\xe7\xf1\xd2\x3a\xdb\xbd\x4a\xff\xe2\xe0\x6c\xe2\x56\xbf\x2c\xb6\xf1\x28\xa9\xbf\x7d\xd9\x27\x4b\x12\x17\xf7\xe8\x17\xde\x22\x27\x45\xbc\xc4\x2a\x32\x28\x01\xed\x26\x4c\xbe\x16\x17\xe7\xde\xd0\xb0\x09\x78\x47\x10\xee\xb1\x7e\x3b\xec\x2b\x7b\xaa\x7c\x2f\xfb\x47\x91\x3e\xbc\xd9\xa5\xa1\x19\x6a\xe7\x0d\x73\x3f\x47\xd2\x75\xb7\x28\xb1\xf1\x1a\xc3\x42\xd7\x3f\x0c\xa2\x11\x5a\xcb\x1a\xd5\xa8\x0b\xdb\xa0\x7e\x14\xfe\x2f\xff\xd2\xa8\x29\x52\xd8\x02\xc8\x9f\xbe\xf4\x91\xda\x40\x27\x4f\xae\xb9\xe5\x54\x73\x1e\x9b\x89\x13\xcb\x85\x33\x59\xc5\x60\xe9\x38\x90\x47\xe2\xa1\x12\x8f\x5e\xbb\x96\x53\xb5\x9a\xc1\x9a\xad\x89\x21\x2c\x4a\x94\xb2\x90\x4c\x70\x3a\x53\x67\xb6\x4a\x8c\x3b\x58\x4c\x80\x20\x04\x22\x66\x4f\x2d\x18\x96\xa7\xad\x97\x9b\xab\xf3\x8b\xd2\x3d\xff\xee\xce\xbe\x1e\xdc\x71\xa8\xdc\x67\x6c\x73\xb4\x4c\x83\x46\x7a\xa5\x7d\x84\x47\x32\x4a\xba\x04\xd4\x7b\x5e\x92\x5b\x61\xbd\x22\x17\xee\x0c\x64\x59\x36\xb0\x05\xf2\xfc\x4c\x69\xfd\x8d\xee\x44\x96\xab\x85\x83\x66\xbb\x54\x44\x91\xbe\x0a\x52\xe7\x9e\x83\xf6\xec\x85\xc1\xd1\x7d\x81\x59\xda\x34\xcf\x32\xdf\x75\x31\xd2\x8c\x03\x71\xba\x96\x15\xd5\x47\x42\x2d\x98\x90\x48\x95\x32\x51\x7e\x20\xab\xc9\xfd\xf6\x10\x8c\xfe\x55\x8e\x8e\x8f\x7c\xbf\xbe\xfa\x6a\xd8\xc4\xc0\xf9\xd3\x53\xad\x51\xba\xd4\xfa\xcb\xf2\x42\x0b\x63\x1f\xcb\x41\x73\xde\xac\x17\x07\xe4\x0b\x12\x89\x0e\x85\x11\x7a\x5e\x27\x39\x2f\x87\xea\x13\xba\xcf\xf7\xe1\x5e\xdd\xa9\xc8\x5f\x4d\x2f\xa1\xf9\x13\xf5\xc1\x35\x9c\x2c\xd1\x73\xec\x8e\xcf\xe2\xd8\xfa\x44\xb8\x69\xc4\x56\x51\x79\x94\xf1\xe9\xfb\xfb\x1f\xce\x5e\xbc\x14\x2a\x53\xda\xab\xa3\x55\x80\xee\xdc\x17\x35\x32\x04\x81\x84\x7a\x3f\xce\xd3\x5a\xa5\x69\xb5\xba\xab\xbf\x19\x36\xaf\x10\x80\x68\xf2\x13\xe4\xdb\xa7\x9b\xa2\x77\x29\x5a\xe8\xaa\x99\x0f\xf0\x22\x67\xbf\x7e\xe6\x60\x20\x1e\x2e\x88\x6b\x01\x10\x22\xe3\x07\x8a\xeb\xca\xfc\xb0\x22\x0f\x65\xcc\x67\x9c\x00\x0c\x93\xf4\x92\x15\x7d\x68\xa7\xfd\x64\xd2\x8e\x14\x8a\x33\xb1\xf2\xf7\x75\xff\x6a\xf8\xa4\x6a\xee\xc2\x2e\x24\x56\x12\xe9\xbe\xb9\x76\x25\xb4\x5e\x0f\xa4\x37\x15\x20\xcf\x80\xc4\xb8\xd3\x82\xe9\x55\xc8\x4f\xf2\x54\x0d\xa7\xb7\x51\x18\x00\xd4\x37\xe5\x0a\x54\x7b\x41\xc7\x42\xfd\xb4\xf4\x27\xd7\x3a\x96\x81\xcd\xf4\x5c\xa1\x27\x1a\x7e\x4e\xd4\xba\x49\x0d\xee\x91\xc0\x1d\x4f\x08\xc0\xde\x3e\xe0\x95\x10\x52\x93\x83\x00\x38\xfb\x22\xa0\xd9\xe3\x57\x2e\xa7\x9e\xae\x89\x67\x3d\x50\x53\xb5\xa2\x02\xd7\xd8\x77\x55\xbd\x7d\x17\xf3\x3e\x65\x60\xbe\xcd\xb7\x23\xba\xe1\x70\x3a\x88\xc0\x3b\x0d\xa5\xdb\x00\xd4\x4d\x83\x2a\xd8\x28\x7f\x1b\xd6\x50\xce\xea\x51\x72\x7b\x23\x19\xfd\x7e\x8a\x24\x62\xd4\x04\x60\x01\x38\x0f\xef\xd3\x45\xaa\xf6\x98\xdd\xe9\x5d\x3c\xfb\x19\xd2\xac\xca\x3d\x89\x78\xc5\xab\x54\xb5\x57\x77\x4c\x9d\xf6\x58\x58\xfa\x59\x93\x88\xf6\x12\xec\xe0\x93\xeb\x07\xf1\x7a\xfd\xd3\x7a\x55\x4f\xd3\xc3\x36\xe6\x7e\xe9\x93\x2b\x4f\x16\x59\xf8\x55\x2a\xbf\x88\x15\x64\x00\xec\x86\xcf\x40\xa7\xc3\xf6\xaf\xe8\xbc\xa1\x66\x27\xb8\x8e\x11\x99\x88\x52\x32\x89\x6b\x95\x0a\x0d\xfb\x81\x54\xe4\xec\xcf\xcf\xc7\x11\x19\x8d\x81\xdc\x82\x01\xee\xb2\x87\x92\x41\xa6\x11\xf3\x7e\xf8\x10\xaa\x09\xe0\x4d\x36\xff\xd0\xc9\x3a\x3a\x23\x2b\x55\x36\xf0\x0e\xf5\x78\xfd\xab\xad\xa5\xa2\x6e\xdf\x1b\xa1\x7f\x33\xfa\xb5\x4c\x72\x31\x24\xf6\x7c\x14\x08\x00\x15\x6b\x94\x9b\x71\x03\x1c\xc1\xf0\xc1\x0b\x85\x46\x18\x11\x36\x35\xed\x9f\xda\x41\xce\x56\xbb\x2a\x5f\x72\x51\x18\x85\xd2\x00\xc8\x0e\x5f\xa8\x4f\x22\x5e\x89\x3c\xef\xcf\x28\xb0\xe3\xc7\x27\x66\x74\xc9\x11\x63\x37\xda\x09\x33\x07\xe7\xb7\x57\xe2\x86\xe2\xbf\x71\xed\x2b\x3b\xe4\xc1\x4c\x3f\x0a\xae\xfc\x9d\xdb\xbe\x82\x37\x9d\xdb\xba\x5d\x90\xb2\xa1\x17\x2c\x39\xc8\x67\xd6\x3f\x8a\x9c\x60\x26\x7a\x9b\x88\x7d\xcb\x87\x1a\x8f\x62\x74\x52\xb3\x2a\xe8\x25\x02\x9c\xd8\xc7\xa8\x56\xd9\x1b\x16\xee\xf5\xd8\x7f\x62\xb0\xc3\xb6\x78\x20\x8b\x33\xae\x53\xa6\x04\x80\x7d\xe7\x20\x27\x3f\x5a\x15\xfb\x85\x74\x65\xfd\x53\x6e\x1b\xc0\x50\x70\x8d\x3d\xac\x28\xad\xda\x99\x1a\xf9\x75\xac\x8d\x30\x5b\xf3\x44\x6d\x84\x25\x56\xc6\xdf\x3c\xad\xbe\x92\x1f\x7a\x67\x49\x6a\xc3\x46\xf2\xfb\x20\xb3\x58\xfd\x41\x2b\x39\x08\xfb\xc5\x83\x46\x4a\xb2\xe4\x82\x18\x67\x66\xcc\x0c\x72\x38\x63\xb8\x80\xcd\xf8\xa2\x1a\x17\x88\x4e\x43\x55\x01\xff\x5c\xe7\xd7\x68\xef\xcb\x7a\xff\xd1\x04\xc9\x68\x71\xde\x37\x20\xfc\x37\x34\x6a\xaa\x50\x8c\xea\x58\x66\x39\x15\x86\xf7\x8d\x4b\x37\xa0\x97\x34\x9f\x3f\x91\x15\x88\x6b\x7f\xb3\x31\x7f\xaa\xe1\x12\xe0\xa0\x7b\xca\xd7\x6f\x1c\xab\x56\x9c\x2f\xb4\x5b\xaa\xf6\xfb\xbc\x9c\x51\x2c\x5a\x00\x5e\xec\xc5\xaa\xdc\xe3\x62\x01\x8a\xd0\x7a\xbd\x7f\xba\x10\x47\x88\x96\xfe\xe1\xe9\xa2\xea\xd2\x3b\xa4\xbb\x1c\x7c\xac\x84\x2a\xbf\xde\xe7\x61\xcb\x29\xd5\xb3\x57\x16\x0b\x5d\x58\x05\x88\x41\xec\xf9\x0e\xfa\x94\x7c\x6e\x89\x25\x15\x68\x4c\x3e\x3d\x6c\x8d\xd2\x0d\x75\x28\x87\x4f\x9f\x3f\x0c\xa7\x04\x3a\xb1\xfa\xda\xa7\x97\xc5\xfa\xaf\x3e\x93\x7b\xf4\x86\x8d\x43\x75\x60\x07\xbb\x0d\x87\x15\x6e\x79\x9e\xcf\x23\x07\xce\xf2\xcb\x6a\x54\xe2\x85\x42\x6f\x10\x51\x0b\x7d\x17\x0b\x51\x3a\x73\xe5\x6f\x50\x28\x43\xf5\x74\xcf\x61\x3f\xfb\x9e\x60\x22\x6d\x77\xfb\x27\x55\x90\x82\x48\x29\xed\x45\x4b\xab\x1e\xc4\x65\x38\x31\x04\x60\x88\x9c\x58\x58\xa1\x7c\x4f\x55\x50\x89\x97\x5a\x7d\x11\x86\x47\xd9\x19\x3e\x1f\x5a\x26\xff\x7b\x3e\x39\xcc\x19\x8a\xf1\x8a\x98\x42\xfc\xee\xe5\x1b\x8b\x72\xdf\x70\x5f\xdd\x01\xb0\x62\x86\x69\xb1\x5f\xc5\x72\xb6\x4e\x11\xe9\xc5\x89\xc0\x62\x04\x40\xbc\xf1\x29\x27\xbd\x76\x63\xfc\xe4\x22\x1a\xac\x12\x9f\x3f\xc3\x59\x0b\xcb\xf7\xb4\xb8\xdb\xaa\x28\x0c\x83\xbe\x93\x6d\xb1\x46\xb3\x0d\x67\x37\xfa\x79\x17\xa5\x34\x21\xfb\xca\x64\xbd\x87\x40\xc9\x29\xce\x9f\x77\xf2\xcd\x59\xfe\xf1\x1f\x4f\x81\x43\x3f\x35\x1b\x85\xb1\xf2\x8c\xc2\x27\x4b\xe8\x57\xa9\x94\x78\x97\x76\x61\x4b\xb1\x60\x70\x0c\xc0\xf4\x9f\xa0\x6b\x97\x0f\x2f\x0a\x13\x9b\xd7\xba\xdf\x57\x23\x5f\xce\xe3\xc4\x97\xdf\xf0\x25\x7c\xb6\x19\x43\x36\xbb\x09\x0d\x1e\x5c\x87\x19\xb2\xfc\xa7\x0c\x22\x84\x85\x8a\x92\xc6\x3b\x38\xfa\x93\x05\x79\x73\x7d\xff\xd4\x8e\x4b\x28\x38\x8e\xb9\x21\xa7\xbf\x5c\x49\x76\x5b\xab\xbf\x2b\x83\x80\x94\xae\x7e\x85\xbf\x7d\x06\x9e\xc3\x29\x8d\x8d\xf9\xe7\x12\xf3\x27\x9a\x2a\x89\x8c\x31\x70\xa2\x58\x44\x45\x78\x58\xdd\x16\x59\xd8\x25\xbe\x4d\x13\x9a\xf2\x66\x61\x26\xda\x02\x58\xd6\x6b\x9f\x72\xf9\xee\xb3\x99\x7e\x29\x1c\x2f\xa7\xdc\x94\x2a\x0c\x40\xd1\x26\x47\x26\x7a\xb6\xdd\xc4\xe0\x8e\x4b\x74\xe4\xa4\x22\x88\x22\x45\x53\x3f\xc8\x0c\x5f\x7a\x97\xef\xe1\x52\x6f\xa9\x31\x06\x89\x4d\x7a\xd9\x32\x09\xf3\x95\xfc\x6a\xb7\xb8\x90\x3f\x27\x4b\x3f\x6d\xa5\xed\x09\x40\xb2\x39\x75\xa9\x44\xdd\x22\x18\xcc\x68\xc0\x91\xee\x8b\x3d\xea\x14\xdf\x5f\x42\xb7\xd4\xed\x09\x84\xca\xe0\xdf\x60\xbd\x4e\xa9\xd3\x83\x36\xa7\x14\x9a\x80\x24\xfd\x93\x68\x9b\x1d\x48\xf4\x1e\xef\x19\x3e\xb5\x64\xf9\x55\xb9\x07\x9d\x1a\xfd\x19\xde\xef\x16\x90\x88\xb6\x7d\x7a\xc3\xa4\xf3\x60\x5e\xf6\x5b\x3d\x25\x35\xec\x25\x96\x13\x7e\xe5\xb7\xea\x95\x01\x6d\xb3\x9f\x69\xab\x78\x58\xea\x63\x50\x30\x59\x5f\x00\x69\x27\x55\xc1\x92\xce\xf3\xb5\xc6\x81\x03\x09\xc8\xbe\xb9\x27\x6b\x26\x5a\x4b\xef\x2e\xc5\x2d\xfe\x7e\x43\xf2\x88\x38\xd1\x6a\x4e\x27\x30\xe5\x74\xbb\x83\x24\x70\x01\xc2\x2a\xf7\xcd\x02\xf5\x7e\x95\x31\x69\x6b\x00\xc8\x2a\x69\x02\x08\xbe\x3c\xa6\xd9\x0e\x34\xea\xa3\x72\x2f\x03\x9e\x17\x5b\x8b\xb2\x8e\xb8\xef\x67\xff\x0f\x16\x7a\x16\x70\xfb\x1a\x46\x2d\xde\x68\xe3\x91\x2c\x97\x66\x04\x21\xb4\x0a\x51\x77\x74\x94\xb9\x84\xa3\x20\x1b\x04\x88\xbd\x56\x8b\xfa\x3a\x96\x58\xcb\xf5\xde\xa4\x0d\x13\xbc\xaf\xa0\x93\xaa\x27\xd7\x6d\x14\xa2\x35\xa1\x6c\x2b\xf9\x8b\xe3\x4d\x7c\xdc\xa3\x7b\xf5\x7d\x02\xf9\x28\x67\x91\x0b\x04\x58\x31\x37\x71\x67\xe1\x49\x46\xef\x3a\xf0\xa9\x42\x7c\x1d\xfb\x9d\xcd\xd8\x5e\xb5\x9f\x2c\x6c\x22\x9e\xf8\x18\x1c\xe8\x79\xb1\xfe\xcb\x07\xb8\x27\x23\xd2\xfa\x45\x6d\x6a\x54\xdb\xc3\x28\x3b\xfd\x39\x11\x01\xdd\x07\x79\x96\x65\xb9\xec\xdf\x2a\x6b\x18\x37\x43\x00\x4c\x9f\xbe\xb3\xd1\xdf\x1b\x11\x21\xa9\xde\xb8\xb9\x4e\xeb\xe5\x80\x65\xcd\xf4\xb6\x46\xc9\x42\x51\x73\x73\xf9\x51\x76\x64\x07\x82\x38\x8e\x01\xf8\xcc\xa0\xd7\x72\x80\x75\xb1\x60\xaa\xd3\x77\x04\xec\xa6\xe7\xba\x84\xe9\x4d\x94\x5e\x0c\xe6\xa3\x8e\x48\xba\x52\xbe\xbd\x5a\xd0\xae\x8f\xb9\xf4\xfe\xd5\x9d\x77\x96\xc4\xb7\x55\xd0\x28\xfa\x3b\x54\xad\xe6\xfe\x46\x75\x26\xfa\xf9\x7e\x07\x49\xbc\xa7\xa0\xcc\x00\x23\x35\xc0\x58\x89\x0c\xab\x42\x90\x59\xb4\x4f\x43\x80\xc0\x14\xbc\xfd\x9b\x15\xe0\xc6\xda\x43\x70\x3b\xd8\xc7\x32\x37\xea\x3b\x99\xfd\x05\x72\x08\xf3\xbe\xba\x63\x4c\xb3\x8c\x1c\x7c\x6b\x33\x19\x98\x24\xbf\xc8\x45\x4e\xa4\xdd\x4b\x8b\x40\xe7\xd8\xcd\xd7\xe5\x96\xe2\xbf\x8d\xcc\xb9\xd1\xe6\x24\xa8\x2d\x6d\x64\x86\x5a\x62\xa5\xfe\xe6\x60\x74\xb1\xdf\xe5\xcb\x94\x92\x81\x0e\x5a\xe9\x68\x8e\xc1\xcd\x43\x53\xe9\xd3\x79\x07\x4c\xbd\x02\x09\x20\xe5\xad\x40\x92\xde\xb2\x55\xb1\x88\x6a\xa6\x00\x57\x1e\xb4\x92\xa8\xa6\x9c\xb4\xb2\x75\x7d\x50\x6b\x88\x0f\xe5\x73\x31\x28\xdc\xcd\xec\x76\xf3\x8d\x60\xf2\xfd\xb6\xf3\xc6\x68\xdc\x26\x13\x6a\x52\xa0\x00\xeb\x37\x5c\xa2\x1e\xac\xf8\xd6\xe5\x19\xcb\xe6\x77\xf9\xf8\x52\x26\x63\x06\x73\x7c\xa6\xdf\xee\x5f\xdd\xb2\x17\x8a\x00\x9a\x5c\xf9\x0d\x3c\x4b\x92\x50\x3a\xcb\xc5\x69\x67\xea\x15\x9d\x5c\x80\x64\xf9\x7e\x11\x8d\x8e\x18\x14\x44\x30\x8a\x04\x7b\xf7\xf0\x38\xc4\x6e\x28\xb5\x8a\xe5\x54\xb3\xdf\x83\xb3\x36\xee\xb4\xda\x03\x62\xb4\xcc\xa9\x36\x64\x62\x10\x0c\xe6\x83\x9b\xcb\x01\x62\x50\x53\xd0\xe1\xde\x78\xeb\x23\x06\x00\x42\xec\xde\xeb\x7a\x2e\x8e\x41\x75\xf5\x3c\x9e\xe7\x36\xee\xfc\x9e\xd1\xbe\xa9\xdb\x28\xb8\xeb\xa2\x3d\x19\xc6\xfa\x07\xd4\xfc\xa4\x44\xc3\x7b\x85\xad\xd0\x0a\x7d\x4c\xa2\xe9\xa4\x6b\xf0\xb0\xa3\x5c\xde\x15\xac\xbf\x59\x08\xe0\xf8\x65\xd7\x10\x45\xf2\x7c\x64\x08\x02\xbd\x0a\x29\xfa\xbe\x84\x4f\xb9\x6b\xcd\x72\x14\xea\x61\x50\x22\xa5\x34\xf4\xfb\x41\xba\xba\x50\xc3\x13\xba\x2c\xab\xde\xc6\x8d\x59\x10\x8c\xf1\x36\x5b\x64\xfc\x32\x9e\x8c\x91\x02\x80\x08\xc8\x3c\xd1\x85\xd4\x1e\x7e\x8a\xa7\x46\x4c\x40\xab\xcd\xcc\x41\x59\x34\x4c\x9e\x19\x9e\xf5\xfe\xeb\x59\x73\x6b\xb2\x73\x5b\xf1\x5d\xe9\x21\xb9\x20\x92\xef\xfb\x3e\x0d\x15\x1f\x69\xbd\x2c\xcf\xb2\xc0\x37\x00\x42\xed\x44\x66\xf6\xf7\x48\xb1\xed\x9e\x23\x72\x1c\x33\x5f\xba\x90\x7c\x33\x2a\xde\x29\x1a\xcd\x6e\xd4\x5c\x2d\x7f\x1b\xb4\xb8\xe8\xaf\x7e\xa4\x93\x98\x2f\xa7\x8d\x8e\x60\xa0\x48\xc1\x9e\x6f\xbc\x22\x98\x68\x9a\x57\x84\x4a\xf9\x12\x64\x98\x3d\x07\xc1\x6a\x5c\xa5\xab\xb9\xa1\x7f\x45\x37\x93\x29\x46\x46\x2f\x4b\x8f\x84\xc5\x27\x31\xfe\xce\xae\xd5\xc7\x62\x3b\x98\x76\xd9\x57\xa5\x5b\x94\xce\x46\xdb\xd6\x43\xb7\xc3\xa2\x1c\x50\x24\x00\xe0\x0d\xc0\x89\xb8\x17\x12\x75\x93\xf1\x83\xb2\x10\x6d\xf6\x89\x84\x14\x44\x1e\x2b\x3f\x63\xec\x6d\x5f\xd0\x03\xb4\xc6\x46\x3f\x54\xf8\x10\x98\x9f\xfa\xe0\xb2\xdc\xb8\x11\x89\x6b\x69\xe1\x79\x24\x10\x41\x6a\x03\x83\xb9\x8f\x6c\xa0\x65\x59\x7c\xcd\x09\x26\x25\x7c\xac\x2a\xa6\x08\x99\x5d\x82\x4a\xaf\x9c\xa6\xae\xd3\xa6\xf4\x87\xc9\xff\xed\xf3\x76\x2f\x2f\x26\x2c\xa6\x4f\xde\x61\x3d\xbe\xe9\xde\x01\x91\xb5\xbb\xcc\x8e\xe4\x46\x1f\x82\x94\x48\x19\x06\xc3\xba\x24\x03\x98\xb9\xc7\x34\xdc\x2d\x6a\x0a\xbb\xbc\xf7\x05\x70\x17\x19\x84\x50\x9d\x9f\x78\xdd\x82\x2e\x9d\xb7\x3d\xf5\x90\xcb\x60\xad\xc8\xe6\x15\x5c\xba\x03\xdc\xdd\xdf\x51\x38\x12\xbd\x37\x11\xde\x0d\x80\x1b\x52\x80\x22\xa1\x8a\x00\x16\x97\xa8\x29\xea\xec\xfb\x66\x2f\xb9\xb5\xc7\x43\x39\xd7\xb6\xd9\x0f\xbe\x39\xd8\x0a\x92\x70\x80\x5c\xf8\x5b\x63\x9e\x61\x37\xc7\x0a\xb2\x14\x98\x43\xde\x97\xe8\x85\xbe\x0b\xd3\xa4\x18\x99\x8e\xda\x22\x61\xbd\x01\xc9\x70\x86\x91\x71\x47\xd6\x43\xd3\x4e\xe3\x79\x6a\x82\x24\xcb\x77\xc2\xc5\x0e\x89\xf8\xd8\x0e\xa1\x55\x6d\x56\xa0\xbd\x93\x7f\xae\x77\x81\x21\x51\x87\x27\xe0\x6b\xd2\x06\x85\xa4\x86\x91\xfb\x7e\x0f\xe0\xcb\x24\x36\xc6\x26\xc7\x53\x7f\x5e\x00\xd9\x9c\xdd\x00\x04\x39\x82\x91\x05\x76\x67\xc7\xc2\x48\x67\xe5\xd7\xa8\x2e\xb5\x16\x19\x37\xf2\x12\x04\x5b\xd9\x9d\x09\xac\xf1\xb0\xff\xd5\xb8\xde\x9a\xae\x3b\xd0\xb3\xd4\x4e\x2a\x87\xd3\x02\x54\xd0\xed\xb3\xf0\x81\x65\xd6\xcf\xf4\x14\x02\x84\xd1\x85\x95\x43\x98\x7c\xc5\x10\x6b\xfc\x74\xe0\x1e\xfc\x8e\xea\x20\x64\xc9\xe6\x4f\x6f\xc5\x0f\xf1\xf4\x11\xe3\xb2\xd2\x52\x8b\x1b\x1f\xfc\x3b\x8c\xbd\xf3\xfc\x7a\x22\xfb\x8a\xf6\xbb\x16\x5c\xba\x1a\x27\xbf\x1d\x10\xd2\x00\x60\x02\x00\x5b\x41\x6e\x20\x8b\x9f\xd8\x53\x3a\x33\x00\xc7\x7b\x82\x20\x8c\x4d\x20\x00\x5b\xbe\x05\xe8\x92\x35\x96\xcd\x20\xaf\xce\x60\x97\xb1\xe9\x69\x4d\x9e\x5c\x85\x2e\x16\xd9\x53\x66\xa7\xe8\x21\x5a\xe8\xa2\xfc\xa3\x0f\xb9\x49\xea\x32\xb5\x5c\x16\xab\x61\xb4\x80\x24\xee\x7e\x7c\x37\x0d\x4c\xc7\x18\x07\x00\x41\x20\x4b\xc1\x0c\x5c\xf3\xbd\xd6\xeb\x42\xc4\xe9\xe4\x74\x92\x89\xb6\x3e\x30\x49\xff\x91\x18\xbd\x39\x50\xd8\xc2\x84\x80\x0f\x1c\xee\x93\xc2\xcf\x47\xa1\x18\x0b\xad\x3f\x9f\x4a\xab\x72\x39\xfa\x58\xf8\x43\xd9\x35\xad\x7e\x1c\xaa\x91\xfb\xac\x5f\xb0\x33\xc3\x5c\xd7\xc0\xf8\x3e\x44\x8e\x24\xd3\x3d\x8f\x09\x16\x3f\x0e\xd5\x81\xe9\x23\x90\xec\xa9\xef\x62\x4c\xba\x41\x2a\x60\x33\xbd\x09\x79\x93\xfc\xd4\x77\x7b\x51\xd9\x2c\x57\x83\xa3\x04\xfc\x00\xdf\xa4\x56\x93\x1b\x4e\x3c\x2a\x83\x0f\x1e\x08\x57\xff\x37\x67\x52\x6c\xfd\x98\x7a\xd3\xe3\xcb\xc0\xc5\x52\x22\x67\xe0\xea\x22\x48\xae\xe5\x40\x83\x39\x10\x14\xa4\x28\x5c\xa5\xbd\xc8\x55\xbe\x80\x71\x27\x98\x64\x3b\x3b\xf3\x15\xbe\xa4\x5e\xcf\x11\x9e\x0a\x26\xa1\x99\x89\xef\x62\xd8\xb8\x87\xea\xc1\xe1\x6f\x96\x1d\xe9\x5c\x9b\x2d\xcf\xa2\x57\x59\xd6\x83\xdb\xfd\xb0\xb7\x9f\x13\x3f\x68\x00\x0a\x2f\xe3\x64\x0c\x82\x89\x73\x62\xdf\x31\xdc\xff\xdc\x09\xda\xf9\x4a\x5b\x9d\xa9\xd7\x9a\x4e\x2b\xe1\xe9\x27\xef\xfb\x16\xe9\xff\xc7\x27\xd6\x39\xc2\x17\x3a\xd1\x5d\x0d\x6e\xcd\x7c\x12\x69\xf4\x22\xd0\xb0\x93\xd0\x39\xd3\x6c\xf6\x3e\x00\x75\x56\xe2\xfa\x57\x23\x79\x58\x38\x99\x0b\x91\x04\x5b\x4b\xac\xc4\xd2\x02\x94\x41\x23\xc6\x88\x80\x71\xcd\xe7\xd8\x1b\x97\x98\x1f\x17\xa0\xa9\x06\xdd\x8e\xab\xfb\xb1\x1c\x35\x99\x06\xb7\xdb\x97\x74\x5c\xd7\xab\x0c\x9b\x22\x2f\x04\xfb\x0d\x0d\xef\x3a\xbc\xd0\x84\x6f\x39\x51\x2c\xa4\x7e\x69\xe6\x44\xf8\x34\xe5\x66\xac\x41\x20\x99\xd6\x9f\x6c\xfe\x81\x19\xc8\x37\x9c\xd9\x09\x76\x67\xd1\xce\x39\xf9\x01\xfb\xec\xaa\x52\xa6\xca\xf8\x73\x7e\xef\xf3\x9d\x3e\xfa\xe9\x55\xbb\x6e\x28\xf8\xbb\x9a\xd8\xf7\x53\x97\xbf\xff\xe1\x78\x92\xd3\x67\xb9\x76\x16\xea\xab\x44\x2e\x5f\xa5\xbe\x46\x33\x6d\x29\xb6\x55\xac\x5b\x2d\x6e\xf9\x0a\x2f\x01\x83\xc0\x99\x7b\x91\x55\x16\xc5\x7c\xfb\xf9\x13\xaa\xc3\x59\xad\xee\xb4\xba\xdd\x22\x2a\x7e\x19\x6e\xd5\xec\xd0\xbb\xa2\x92\x6e\xa3\x14\x0a\x38\xf3\xf7\xff\x72\xd5\xf1\xba\x25\x41\x68\x02\x4e\x98\x3f\xca\xdd\x35\x41\xb7\x87\xb5\x88\x18\xb2\xbb\x3d\xc8\x75\x43\xcb\x08\x9b\x72\xfe\x4a\xa0\xd9\x39\x7e\xe8\x50\x02\x95\xb0\x3f\xb8\xf9\x77\x66\xca\xbf\x3c\x49\xe8\xef\x24\xc4\xf4\x11\xb9\x31\xbb\x39\x55\x4b\xbf\x5e\xf0\x61\x7c\xe0\x41\xcd\xab\x3b\xb9\x56\xe5\x46\xde\x69\xe6\xec\x4b\x52\xb9\xef\x82\xe8\x9f\xc8\xa7\x74\x5f\x7f\xf9\xbc\xf9\x77\x2f\x32\xdc\x97\x7a\x74\x67\x4f\xb8\x26\xf7\x66\x73\xe9\xb5\xa3\xf5\x53\x37\x5e\xdf\x57\x3b\x06\x9d\xfc\x82\x59\x7b\xdb\x53\x6a\xb3\x41\x22\xd3\x82\x75\x09\x73\xb7\xb8\xe3\xe7\xf1\xdd\x38\xd0\xc9\xef\xcf\x5f\xec\x8c\x7a\x05\x9a\x14\x74\xdf\xfa\x19\xc9\x51\xeb\xa9\x8f\x2d\x9a\x13\x9c\xae\xbf\xb5\x55\xfd\xa6\xb2\x65\xab\x37\xb7\x46\x7d\x0e\x63\xd6\x7e\x57\xab\x32\x7b\xfc\xf3\xa9\x54\x41\xef\xfc\xcf\xa7\x8b\x0f\x25\x11\x01\x48\x12\xba\x11\x19\xc2\xd2\xcf\xec\x1a\x6c\x1f\x54\x4e\x61\xb0\xba\x13\xc9\x62\xee\x1b\x17\x94\x39\x99\xdb\xb0\x07\x54\xc8\x5e\x74\xfb\x61\xae\x65\x42\xb7\x1a\xab\xb9\xf1\xfd\x8c\x7f\x71\xeb\x5a\x43\xeb\x9a\x0f\xce\xc9\x1a\x7b\x58\xb1\x6d\x59\xe4\x22\x8e\xae\x07\x45\x22\xca\x5b\xeb\x6e\x6a\xd0\xb1\xc5\x16\xf7\xec\x41\x30\xe7\x25\x5c\x39\x7b\x7f\xff\xce\x71\x19\xb5\x46\x4a\x2e\xdb\x97\x35\x9b\x31\x6f\x0a\xe1\x0f\x39\xfe\xe4\x23\x96\x3b\xe6\x0b\xa3\xac\xbd\xfa\x3e\x6b\x0a\x2b\xef\x6f\x5e\x15\x65\x7e\xe4\x02\x28\x8c\xec\xdf\xfc\xae\x96\x35\x9a\x14\x4c\xdf\x5a\x66\x3b\x63\xa9\x72\xea\x6a\xae\x4f\xc4\x55\x9b\x4e\x59\x1b\x54\x61\xb9\x94\xc4\x9a\xf3\xeb\xc0\xfa\x1b\xb4\xfd\x6d\xad\x50\x0d\x3f\xcc\xf1\x1c\xf4\xa7\x15\x64\xc7\xaf\x7e\x59\xd1\x1a\x54\x5d\xa2\x2c\x2e\xa0\x30\x79\xbb\xcb\x22\x0d\x42\x1a\xd9\x20\x9c\xb5\xe1\x4c\xfb\x5c\x20\xb6\xf1\x52\x1a\x92\x3e\x29\xe3\x76\x70\x43\xfe\x7a\xbb\x44\xc7\xd2\xf1\xff\xea\x61\x08\x49\x84\x69\x7d\xca\x09\x04\x53\xe5\x24\x63\xe3\xee\xf7\x4b\xd3\xe2\x35\x1d\x7a\xef\x33\xe6\xca\x99\x7e\xdf\x52\x88\xe9\x9f\xf3\x6d\xbe\x5d\x79\xae\x00\x51\xff\x97\x03\xb4\x9d\xa5\x67\x4f\xcb\x78\xe1\x5a\x15\xac\xe6\x94\x0f\x0c\x6e\x84\x86\x34\xf2\xc7\x53\x18\xa0\x70\x18\x9a\x6b\xaa\xb5\x60\xc7\xef\x0e\xeb\xf2\x43\xf1\x6b\xca\xa5\xf2\x3f\xeb\x91\x2e\xcb\x31\x7a\xf1\xcf\xf7\x4c\x72\x35\x6c\xdf\xe4\xae\x85\xda\x31\x42\xda\xb5\x26\xf7\xa5\x7e\x3c\x56\x6c\xee\x8e\x72\xd6\x43\xff\x1c\x2f\xad\xf0\x4e\xf6\x52\xd9\x56\xb5\x3f\x6c\x4e\x73\xea\xb4\xdd\x4b\xc5\x6c\x93\x5c\x0f\x54\x49\xbc\xfe\x9b\x47\x6e\xc1\x44\x7d\x51\x97\x50\x59\xec\x42\x39\xba\xe6\x9b\xd9\xa7\x65\x94\x93\xc1\x13\xf9\x02\x61\xc6\xd2\xdb\xc5\xd0\xf9\xcc\x07\xd8\xc6\xc9\x93\x17\x6e\x6a\x7a\x5f\x33\xd1\xf4\xa3\x22\xec\xf7\xb3\xff\x1b\x47\xc3\xe6\x6b\x27\x82\xe1\xe4\x22\x16\x21\xf7\x08\xee\xf7\xbc\x0b\x14\x92\xf5\x65\x74\xa8\x27\x6d\x50\xc2\x97\xf6\x72\xee\x29\xdd\x0e\x66\xe7\xef\x88\x31\xe4\x2d\xcb\x23\xe3\x8b\xff\x07\xdb\x06\x93\xde\xd8\x4e\xbe\xc2\x76\xa9\x5a\x78\x85\x5d\xc0\xb3\x08\x15\xf3\x66\x2f\xaf\x84\x29\x17\xb2\xd3\x86\xdd\x3b\xec\xc2\x66\x71\x6c\x88\xba\x79\x83\xc5\xd4\x66\xc5\x78\x3b\x6a\xcf\xa9\x46\x93\x8a\xfd\x62\xf5\x7f\xbf\x4f\xa9\x5d\x6f\x74\x15\x51\xfb\xf3\x8c\x2e\xfc\x1e\xca\x21\xf8\xe8\xd6\x8f\x85\xad\x9d\x38\x66\x3b\xec\xe0\xbd\x9f\x8e\xdb\x86\x56\x72\xbf\x66\x8c\x0d\x67\xe3\x83\x73\x1a\x8e\x98\xb1\x57\x45\xb6\x95\x50\xc4\x6b\x7b\x46\xbf\xba\x17\x3e\x94\x26\xe8\xcb\x59\x62\xcb\xa7\x6a\xbd\xc9\x31\x4d\xbe\x1c\xb3\x45\xa0\xf9\x20\xc1\xec\x8b\xaa\x62\x21\x5b\xfd\x92\xaa\x34\x64\xe9\x93\xea\x81\x46\xef\xa1\x7a\x29\x15\x7e\x77\x51\xc6\x7e\xbb\x7f\xff\x3e\x3c\x54\x6f\x44\x79\x46\xed\x97\x3d\x17\x42\xbb\xc4\x4a\xac\x3a\x21\xc9\xbb\x1a\x82\x22\x13\xb1\x97\xb4\x19\x15\xc3\x75\xd4\x85\x04\xf7\x35\x63\x43\x6c\xa1\xab\x54\x05\x3c\xef\xce\x9d\x44\x67\xe9\xdc\x27\x4b\x81\xfa\x33\xf8\x72\xcf\xe1\x31\xff\xee\xc8\xab\x57\xd6\xbf\xa1\xb7\x6a\xed\x12\xcf\x69\x8c\x67\xab\x3b\x41\xa7\x1d\x23\x5f\xdd\x68\x38\x68\xd2\x6c\xa9\x9b\xcf\x44\xf1\x27\x3a\xb3\xcb\x38\x3d\xc9\xdc\xc5\xf8\x62\x7b\xd1\x9b\x2a\xc0\x78\xee\xdb\x8c\x8a\x3d\xb3\x39\x52\x5f\x7e\x35\xde\xcf\x4e\xb9\x9a\xd3\x49\x98\xf2\x5c\x62\xe1\x13\xc3\x86\x6c\xc4\x5c\xae\x8a\xe1\xcd\xbf\x01\x58\xeb\xa0\x93\x6a\xf8\xf6\x84\x67\x4b\x79\xee\x94\xfa\x6f\x93\xa1\x99\xc2\xfd\x0e\xf9\xa1\x86\x3c\xe5\x62\x87\x95\x74\x52\xcd\xed\xef\x67\x43\xfc\x9d\x15\x30\x2f\xa7\x97\xd8\x86\xea\xd5\xeb\xbc\x9b\x34\x5a\xfb\x23\x64\x11\x72\xb6\x0e\x77\x18\x97\xbc\x5e\x82\x89\xce\xb1\xeb\xfd\x34\xe1\xa8\xbd\xa8\x60\x11\x5f\x18\x3d\xab\xf1\xc5\x29\x2d\x06\xee\x60\x93\xef\x59\x1e\x5b\x61\x7f\x4b\xcb\x2f\x9f\xd4\x3a\x5b\xcf\x8b\x80\x37\x31\x13\x48\x8a\x72\x2d\x9b\xe6\xb8\x1f\x75\x77\xa0\xe3\xb6\xc1\xe4\x2f\xc6\x80\x4b\x45\x93\x25\xfe\x72\xc3\x9a\xec\x5e\xa6\x74\x35\xc0\x9e\x5a\x6f\x71\x43\x20\xfe\xd9\xfd\x53\x4f\x67\x17\xef\x32\xc8\xdb\x6d\x7c\xbe\xcd\xb7\x15\xb5\x37\xb6\x08\xf3\xf0\x6b\xe0\x76\x64\x3f\xf1\x13\xf2\x66\x13\x8b\x9a\x76\x17\xe8\x6c\x14\x6d\x74\x5b\xd1\xc7\xda\x3c\x15\xb8\x77\x78\x7f\xa3\xcf\x45\x45\xa3\x50\x84\xc7\x21\x2e\xd3\x00\x6a\xd6\xa9\xea\x81\xd6\xe5\x96\xf9\x4c\xd0\x9b\xbc\x09\x65\x45\xb6\xe9\x39\x54\x79\xb2\x11\xc6\x74\xc4\xfd\x7e\x85\xc3\xe7\xf7\x77\x48\x74\x33\xca\xb1\xac\x69\xf6\x97\x94\x54\xb1\x25\x98\x27\xf2\x00\x2a\x92\xe2\xbb\x7f\x9a\x44\xae\x16\x8f\x87\xf8\x78\x4a\xe6\xfb\xe3\xcc\xa9\x36\x68\xe2\xe4\xab\x76\x81\x4a\x82\xec\x97\x85\x25\x32\x67\xe2\x28\x68\xa9\xd9\x9e\xff\xb9\x57\x82\xe6\xb2\xc5\x9d\x51\xde\x92\xb8\x9e\xa8\xd9\x9d\x7a\x16\x82\xca\xfd\xc6\xf1\x53\x57\x6b\xc5\x0a\xcd\x75\x8e\x84\x5a\x24\xc1\x7b\xc8\x97\x76\xc6\x26\xf2\xe4\x1f\xa7\x0a\xcc\x73\xc5\x95\xd2\x2c\x39\x85\x7e\x1e\xe1\x9b\x19\xc5\x82\x8a\x88\xfc\xe2\xd3\x57\x42\x79\xdf\xe6\xc5\xbd\x14\xf4\x52\xeb\x01\xa7\xfc\x17\x13\x96\xcb\xae\x5e\x3c\x37\x89\x76\xfd\xcf\x8c\x86\x7b\xf4\xbc\xd9\x5d\x65\x0a\x36\x0d\xcf\x1a\x53\x68\xeb\xeb\x41\xf0\x8f\x73\xc6\x68\xac\xa6\xb7\xfc\xf5\x79\x6e\xcc\xd2\xcb\xed\x4b\xf2\x51\x08\x32\x85\x78\x95\xcf\x6b\xde\x47\x6a\x5c\xc3\x55\xd0\x50\x12\x45\x89\x32\x7b\xc9\xcd\xae\x6a\x8f\xb1\x48\x7f\x7b\x72\xd2\x2d\xee\x1c\x7e\x6c\x74\x2a\x94\xf2\x86\x90\x2c\xc9\x74\xc6\x44\xe1\xca\xd9\x2e\x58\x34\xbb\xf1\xa5\x5e\x49\x2b\x4d\x84\xfb\xa2\xde\x31\x06\x8d\x90\xde\x93\x01\x18\xb3\x35\x4c\x7c\x8d\x00\xa3\x5b\x70\xa6\x23\xfd\xe0\x8a\xcd\x06\x1a\x33\x7e\xd6\x7c\x40\xf0\x9b\x52\x90\x63\x49\xbf\xfe\x13\x5b\xd2\x4f\x9d\xee\x58\xfc\xe2\xd2\xdc\xc5\x2c\xe2\xe5\x5a\x30\xdc\xea\xef\xad\x52\x46\xd8\x9f\xb8\x6b\xf5\xbe\xd0\xfb\x99\xec\x88\x4d\x71\x21\x8d\xcc\x18\xf1\xf8\xfe\xbe\x33\xd4\x6c\xbc\x4f\x1f\x0b\xbe\x9f\x5d\x29\x9f\xad\xc3\x24\x56\xd2\x95\x0a\xe5\x7b\xa0\x37\x66\x6c\xf8\x6b\x53\xee\x4c\x15\x9f\xff\xee\x42\x52\xd7\x42\x68\x8d\x87\x2e\xc9\x57\xd1\x36\x04\x81\x25\xdd\xe5\xbd\xe7\x2b\x2d\xe9\x16\x6e\x5d\x09\x9a\x05\xdf\x4d\xb8\xe0\x31\x49\x4b\x95\x27\x60\xf7\x42\x42\x08\xd1\xf9\x5b\xb6\xfa\x48\x68\x8f\xb3\x34\xd6\x84\xd0\xee\xce\x9f\xc4\x5f\x4d\x09\xf7\xf4\x11\x5b\xe6\x45\x59\xab\x03\x4c\xb3\x9d\x4b\x4a\x60\x86\x11\xc4\xf3\x9d\x0a\x09\xb1\xdc\xac\x82\x3f\x4f\xd6\x19\x9f\xca\x09\x49\x44\xf8\xc1\xcc\x55\xcb\x37\x87\xf7\x22\x23\xb8\x7a\xee\xc6\x2e\xa3\xec\xb5\x44\xfc\x44\xf6\x6f\xe5\x42\xc3\x46\xad\x51\x62\x4f\xe0\x37\x4d\xc0\xd6\x6f\xe9\xaf\x47\x02\xd7\xc9\x15\x34\xb0\xf2\xd5\x58\x54\x45\x26\xda\x02\xd3\xdb\x60\x30\x7a\x3a\x26\xbe\xfe\x58\x15\x9f\xdd\x7a\x8b\xea\xfb\xda\x12\xef\x85\x41\x0e\x3f\x97\x93\xa1\x83\xc4\xbe\xef\x0e\xb2\x63\x8c\xce\xcf\xd0\xe1\x38\x9c\xf2\x27\xd2\x13\x49\x65\xa2\x29\x77\x55\x7e\xf0\xbf\x6c\xf5\xd2\xcd\x31\x6f\xf3\xfe\x77\xd6\xf4\x53\x1e\x4f\xd5\x7c\xe3\x41\xc4\x60\x5a\xd0\x5d\xa2\xbb\xe7\x80\x7b\x4e\x72\x31\x7c\x96\x07\x86\xe4\x66\x6f\x54\x9a\x28\x65\x68\x2a\x84\x62\x53\xa5\xc8\x89\xc6\xfc\x99\x04\xeb\xa7\x7c\xcf\x2a\x9b\x45\x2b\xce\x44\x67\x39\x41\xc9\xe9\xe5\x4b\x94\xf3\xb5\x24\xbd\x8f\x1f\x1f\x03\xc1\xe2\xf6\xcf\xd9\x7c\xfe\x9e\xcd\xe3\x91\x77\x9f\xd4\x91\xb5\x35\x50\x22\xd4\x7f\xc6\xfd\x44\xeb\xf5\xf0\x64\xc2\x3d\x04\xd1\x4e\x0c\xca\x9f\x49\x0d\x56\x8c\x96\x2d\x2f\xc7\x0b\x69\xf6\xad\x2f\xcd\xf6\x23\xe6\x48\xf6\xed\x7f\x72\xbd\xa7\xb6\x3e\xc3\x08\x82\xb0\x1a\x63\xfa\x92\x1c\xb2\x7f\x90\x5b\xbf\x37\x08\xe2\xc5\x62\xd9\xe7\x1f\xf1\x45\xfd\xce\xb2\x8c\xfa\xb1\x0b\xed\x67\x19\xb0\x76\x71\x2b\xde\x44\x83\xf7\x96\x73\x2b\x77\xa3\x03\x05\x19\xea\x6b\x64\xad\x42\x40\xa9\xe7\xa5\x48\x39\x42\xd5\xa8\x18\x3d\x44\xe5\x73\x89\xdf\x86\x01\x44\x51\x06\xcd\x44\x65\x71\x47\xf4\x33\x13\x07\xa2\x93\xd0\xdd\x6a\xc7\x31\x77\x42\x77\x82\xf6\x7d\x25\xdf\xaf\xf8\x37\x96\x8a\x07\x6c\x42\xdc\x97\xb6\x68\x75\x7a\x09\x07\xcf\xc1\x28\x13\x0c\x90\xc6\x63\x2f\xa2\xe9\xce\x17\x5e\x28\x87\xc6\x7b\x7d\x38\x95\xd6\xea\x00\xd7\x4a\x72\x98\xbd\x75\x8e\x40\x4a\x1a\xa2\xe8\xb0\x08\xc7\x71\xb4\xd7\x8c\xca\x2b\x9b\xd9\xd0\xdf\xf7\xd0\xad\x9d\xa0\xb6\xb8\xcf\x45\xd4\xb1\x8d\x42\x59\x35\x2b\x55\xf9\x49\x38\xbf\x73\x72\x71\xc9\x15\xe8\x4f\xc7\x20\xb1\x78\x10\xce\x91\x87\xba\x14\x1c\x88\x18\xb6\x96\xdb\xfe\x5c\x84\xea\xaa\xc4\x4a\xdc\xcf\x70\x64\x89\x52\xb1\xf0\xe5\x91\x26\x60\x4c\x85\x10\x4d\x7f\xde\xb1\x50\x92\x88\x94\x21\x6c\xe9\x79\xf0\x83\xa0\xcc\x3c\xcf\xed\x04\x03\xd1\x29\x4e\x7f\x82\xfe\xbd\xc2\x57\xc7\x41\x20\x14\xcb\xe3\x91\xab\xb9\xfa\x57\xd4\xab\x3f\x6c\xc9\xf2\x3f\xcb\x40\xb8\xc4\x4a\x95\x2b\xdc\x3f\x5b\x1e\xee\x39\x92\xc7\x45\x79\x9c\x8a\xfc\xc3\xff\xcf\xbe\x4c\x47\x94\xd0\xb8\x2f\xff\x29\xb4\x8f\x3c\x17\x3c\x8c\xf3\x45\xa7\xe9\x34\xba\x27\xf9\x7e\xf2\xa6\x4f\xd4\x95\x87\xc8\x0e\x0a\x89\x14\xe4\x0d\x25\x9b\xcb\x71\x3a\xbe\x82\x3f\x34\xf1\xf3\xfa\xf5\x50\xc9\x79\x2d\x73\xdc\x0f\x99\x29\x05\x1b\xa0\x9d\x85\x69\x5c\x20\xa9\x34\x46\x21\x76\x97\xef\x8a\x69\x62\x0d\x27\xd0\x19\xba\x1a\x76\x86\x20\xac\x3d\x52\xb4\x54\x05\xbd\x3e\x4f\xcc\x83\xee\x3e\x5c\x40\xa9\x8d\x95\x29\x0e\x4b\xa2\x7d\x3f\x54\x72\xd5\x49\x6b\x4e\xdd\x46\x8e\x08\xf7\xb7\xc6\x43\x52\xbb\xa9\xe7\xda\xf3\x56\xd3\x4a\x76\x73\x8a\xc1\x03\x2d\x9c\xdf\x78\x7c\xb2\x5f\x7a\x77\xaf\xbd\x61\x60\x96\xc4\xfc\x60\xd1\xe0\xcd\x22\xb0\x2b\xc7\xc8\xea\x15\x52\x55\x2b\xf1\xd2\x01\x42\x82\xdc\xa1\x5b\x87\x1c\x4d\xd4\x58\x1f\x78\x4d\x66\x9f\x32\xed\x57\x38\xc7\x78\xda\x0b\x93\xe5\x9c\x2a\xd0\x23\xa2\xef\x9e\x54\x4e\xdb\xb2\x1b\xba\xf1\x8a\xa3\xf6\x78\xd2\x3b\x06\xf7\xaf\xac\x21\x91\x82\x68\x7a\x25\x3e\x5f\xbf\xfa\x41\xa9\xbb\xd0\x52\x09\xcf\x6e\xdc\x19\x86\x0b\xe3\x87\x02\x0b\x19\xf3\x72\x04\xc9\xc1\xd0\x0a\x68\x4f\x04\x18\xc3\x20\x30\xb8\x54\x4e\xfe\xc8\x7a\x8f\x20\xa2\xf9\x60\x64\x77\x0e\x35\x88\xb1\x2c\x1d\x6b\x1d\xd6\x97\x02\xfe\xc0\x3b\x00\x36\x01\x50\xcf\x49\x12\x69\xbd\x5e\x7a\x89\x4e\x1b\x9e\xa6\x9c\xb3\x4e\x5e\xc7\x23\xdd\x09\xe6\xf7\x6c\x67\x33\xf0\x3e\x3f\xf9\x82\x1d\xee\x16\xfa\xf3\x7c\x53\xa8\x4f\xef\x0a\x10\x88\x9d\x10\x4c\xe0\xfb\x59\x0e\xf2\x85\x4e\xd4\x19\x1b\x30\xde\x9b\x2f\x4a\x49\xbe\xd4\xfd\x96\xad\x6f\x69\x37\xec\xf0\x08\xaf\xf7\xb2\x1f\xdb\x22\x64\xfe\x32\xfa\xe9\x45\xc5\x02\x53\x93\xdb\x8e\x60\x4b\x91\xc2\xac\x9d\xa7\xef\xfb\x2d\xbd\xf4\xf1\xf1\x46\xc7\x83\xc4\x2c\x6f\x5b\x2c\x8c\xf0\x18\xc5\xdd\x6e\x0b\xf6\x60\x13\x8e\x67\x2d\x24\xb6\x15\xa1\x61\xbf\x77\x7e\x03\x21\xf8\x14\xb1\x65\x68\x48\x36\xdf\x86\xaf\x23\xb4\x80\x23\xec\xdd\x68\xd6\x39\x23\xd4\x06\x10\x77\xaa\x34\xb4\x71\x09\x1c\xc1\x09\x02\xd0\x4f\x78\x2c\xd8\x28\x77\xe3\x94\xd3\xcf\x87\xad\x7a\x2c\xf2\x2a\xbf\x48\x8b\x7a\x53\x51\x82\xc4\xcd\x81\xe7\xce\xd1\xb5\xf7\x88\xd9\x5d\x8a\x01\xa0\x01\x28\x4e\x7c\x3e\xbe\x51\x6b\x33\x70\x43\x09\xe6\x53\x69\x16\x95\x7a\xaf\x3d\xae\x1f\x67\xfa\xcb\x6d\xe4\x67\x19\xb6\x6b\xb6\x68\x1b\x6d\xc3\x78\xc5\x19\x33\x5a\x1f\x6b\xba\xe2\xc6\x4b\x22\x40\xd3\x84\xac\x9c\xcc\x13\xbd\xed\xe0\x9e\xf7\x53\x5a\x5d\x65\xc5\x99\xfb\x33\x1a\x9e\x7e\x22\x6f\xde\xc4\x37\x2a\x56\x9e\x91\x70\x00\x7e\x07\x20\xdc\x95\x72\x32\x01\x9c\xfb\x1d\x54\x9f\x34\x4c\x13\x71\xb5\xbe\xcd\xd2\x35\x4d\x9c\x58\x0a\xc3\x90\x46\x78\xf0\xea\x33\xff\xf5\xb0\x10\x23\xf4\x56\x9f\x47\x28\x98\x4b\xfc\xe6\xd9\x9a\x7d\xe6\x80\x7c\x20\xdb\xae\x0f\xd3\x3d\xd8\x13\xbd\x8d\x3f\x77\x11\xa8\xfa\x65\xce\xb9\xee\xdc\x5e\x48\x30\xb0\x9d\x89\x69\xde\x70\xc5\x55\xd6\xd7\x1b\xd3\xec\x1c\x02\x1b\xb0\xbb\x75\x2b\xbd\xa1\x13\x84\x30\xcf\x0c\x71\x4f\x0e\x2d\x6b\xd1\x8e\x3c\xfb\xee\x7b\x45\xc9\x75\xba\x1f\x07\x63\xcb\x60\x74\x9e\xdc\x2f\xfe\xfe\x4e\xd3\x3e\xd3\xc8\x3c\xdd\xfe\x92\x70\xfb\x07\x47\xa5\x6d\x75\x67\x40\xbb\x00\xd9\xe2\x8d\xd9\xa8\xd3\x78\xd0\xb9\x8d\x98\x30\x07\xd8\xa8\xc5\x01\x36\x3c\xee\xf5\xa2\xf8\x82\x14\xb8\x70\x0f\xf2\x9d\xb6\x1b\x4c\xb7\x87\x01\x9f\xaa\x6d\xdf\xb6\x51\xc9\x01\xd3\x87\x3e\x33\x6e\x44\x9d\x9e\x3e\xb8\x53\x82\x9d\x49\xf5\x89\xf3\x7d\x69\x84\x89\x7f\xc5\xe3\xae\x9d\xf5\x20\x26\xd7\xaf\x34\xca\xc1\x65\x87\x5f\xcb\x3c\xc8\x91\xe7\x43\x1c\xdf\xdf\xf9\x2b\xe1\x1b\x40\xc1\x0e\x18\xec\x5c\x62\x9e\xad\x8f\xa7\xf6\x26\x98\x96\x06\x48\x84\xa7\xfd\x48\x3e\x6b\x9c\x69\x99\x15\xf6\x8a\x8c\x3c\xa0\x18\xec\x1f\x76\xe5\x35\x01\x10\x6a\x78\xc3\x78\x2d\x33\x78\x4f\x8c\x3e\x03\x86\xd9\xaf\x4d\x16\xd7\xf9\x61\x2e\x39\xff\x2c\x02\x9e\xb3\xf7\x44\x0a\xb3\x25\x38\xbc\x37\xfd\xaf\x97\x91\xcc\x70\xa8\x3f\xdb\xee\xf2\xc2\xdf\x57\x97\x4e\xd0\x64\x4d\x1c\x1d\x04\xf9\xe0\x32\x04\x26\x38\xbd\xcd\xe3\xd5\x11\xd8\xc7\xc9\x99\xb2\x07\xcc\xb9\xec\xfe\xfc\x34\xef\xf8\xd4\xed\x06\x8d\x0c\xe1\xfe\x30\xa5\x1f\xef\xc4\xf8\x11\xc7\xc7\x37\xc7\x11\x0a\x00\x42\x43\x73\x80\x15\xa1\xbf\x7f\x22\xaf\x9d\x11\xfa\xdc\xf3\xe4\xb1\x87\xa5\x40\xcc\x58\x87\xf3\x42\xb4\xe1\xeb\x14\x61\x63\xe0\xb8\x6f\x39\xe4\x7e\xef\x25\xf7\xd4\x50\x48\xdd\xa8\x3b\x73\xdc\x9c\xb6\x34\x13\x2e\x5a\xb5\x16\x82\x37\xce\x11\x03\x28\xc6\x63\x09\x8d\xb3\x9c\x1b\x5a\xf3\xb4\xcf\x04\x00\x85\x81\x61\x27\x33\x87\x35\x87\x33\x52\x1b\x1d\x8d\xa3\x9b\x91\x52\xf9\xad\x71\xf1\xf2\x09\xf4\xce\xdf\x7b\xdb\x27\xfc\xcc\xc7\x32\x6f\x5b\x23\x62\xdd\x19\x00\x95\x18\xe4\x0a\xc2\x05\xc7\x26\x1c\x1e\xd8\x71\xde\x8f\x48\x18\xa6\xe9\xb9\x8b\x82\xfa\xeb\xa9\xfa\x70\x88\x7d\xc6\x5a\xb4\xb3\x42\xf5\x3a\xc3\x2e\xec\xb2\x83\xcc\x3e\x8a\xaa\x73\xce\x12\xed\xbb\x0e\x99\x3b\xc0\x34\x2a\xe7\x46\x6d\x5f\xe1\x04\x10\x84\x81\xe9\x8a\xf3\x5c\xa7\xea\x8a\x39\x69\x0c\x9c\x29\x97\x9f\xfc\x6c\x69\xfc\x6f\x86\x8a\x5e\x85\xb8\x4a\x0e\xa8\xbe\x07\xcc\x12\xb2\x73\xf3\x5d\xf3\x94\xc8\x89\x95\x21\xe8\x7e\x99\x25\xeb\xd6\x3f\x6b\x4e\x68\x82\x3e\xa2\x27\xa3\x63\x3b\xc5\x63\xe6\x20\xfe\xab\x2f\x2a\xdd\x34\x75\x53\xdf\x24\x3a\x26\x20\x53\x8a\xfc\xc2\x13\x1e\xa9\x83\x69\x29\x60\xc0\x8e\x2c\xfe\xac\x77\x2b\x9f\x27\x14\xc8\x01\x46\x40\x40\x92\xc3\x3c\x9f\x26\x5e\x6a\x7b\x4e\x48\x1f\x7d\x44\xb7\x1a\x6f\x0d\x1b\x25\x13\x5d\x38\x43\x3c\x93\x00\x03\xf4\x0c\x40\x3e\x05\xe1\xec\x79\x99\x5e\xa1\xf9\x4e\xd0\x31\xc1\xf4\xf8\x4d\xe7\x36\x7b\x97\xa3\x2b\x8f\x8c\x07\x36\x55\x3f\x8f\x96\xbe\x18\xef\xe3\xba\x58\x4d\x07\xbf\x5a\x77\xfe\x69\xfa\x02\x81\xb9\x41\x90\x46\xc4\x73\xa0\x75\x5b\xf2\xbb\x8f\x42\xe6\x06\x05\xf6\x35\x26\xe0\x79\x80\x20\xb4\xfa\x6c\xe7\xc7\x29\xde\x09\xa0\x73\xda\x57\xc9\x1f\x68\xd1\x35\x70\xc3\x77\xac\x95\x35\x1f\xac\xab\x9d\xa2\xbe\x99\xe2\x99\x90\x42\x46\xa7\x00\x88\xb3\x1d\xdf\x08\xdc\xa9\x89\x9e\x00\xe0\xc9\x81\xbb\x07\xa0\xa3\x51\xa4\x71\xd2\xe1\xcb\xa4\x08\xae\x9f\x9e\xc2\x0b\xef\xcd\xa3\xac\x6d\xdf\xa4\xd3\xe5\xae\xf0\xf3\xf7\x2e\x79\xcf\x1f\x32\x83\x29\xed\x75\x9c\x8c\xf0\xd8\xf7\xd9\x0c\x29\x7e\x1c\xba\x3d\x34\x00\x10\xf7\x1e\xec\x2f\x0a\x77\x43\x65\xe6\xa5\x7d\x97\xa7\x09\x61\x28\x32\xfe\xf9\x3c\xe9\x66\x21\x17\xaf\x1d\x62\x98\x93\xde\xba\x46\xfc\x1c\xa2\x77\x6d\x92\x88\x6c\xa8\xf2\x3c\x33\x7d\x87\x80\x3c\x13\x41\x25\x2d\xdf\x97\x3c\xec\x07\xf0\x01\x73\x11\xe0\x5e\x09\x72\x66\xfd\x86\x6e\x9d\x09\x41\xe0\x3c\x96\x25\xb6\x31\xe1\xec\xfb\x34\xee\xbb\x05\xda\xfd\xf6\x0d\x3f\xc3\x66\x7a\x6c\x69\x40\x90\xe4\x3a\x11\x26\xac\x28\xb1\x48\xad\xe7\x48\x3a\x02\x60\xc2\x72\x3b\xfd\xde\x6a\x79\x87\x00\x20\xa1\x00\x73\xd3\x18\xbc\x37\x88\x56\xf5\xad\x66\x35\x03\x90\x73\x8c\xad\x8c\x40\x20\xa2\x49\x5d\x13\x67\x5f\xef\xba\x3c\xb8\xc7\x78\x46\x40\x94\xb8\x7d\xdf\x6f\xd5\x4b\x8b\x90\x75\x02\x84\x97\x32\xcc\x8d\xda\x0b\xf1\xaa\x19\xfd\xe7\x1c\x5f\x62\xce\x9d\xe5\xa9\x34\x93\x35\x13\xc4\xe6\x3e\xed\xc7\xfa\xcb\xa5\x78\x29\x5c\xfc\x93\x4a\x7d\x0f\x0b\x82\x3c\x86\x9a\xa4\x14\x4b\xf6\xf9\x0c\x7b\xbb\x2c\xcb\xb6\x01\xf0\x7c\xd2\x34\x7b\xed\xe6\x89\x8e\x72\x6b\x31\xcc\xbd\x67\x10\xa1\xfa\x04\xc8\x80\xfa\xae\xf8\x9d\x7e\x86\xfd\x63\x21\x00\x6a\xb6\x78\x65\x3b\x57\x88\xc9\x5b\xa0\xfd\x88\xd9\xc1\xbc\x03\xc0\xb4\x3b\xb8\x94\xf7\x56\x61\xcb\x26\xd0\x34\x02\x01\x29\x0a\x6e\x84\x05\x8a\xb1\x50\x6f\x71\x17\x22\x1a\x67\x83\x4c\x22\x76\x6d\x17\x69\xee\xfc\xfe\x6a\xcb\x03\xe5\x23\x36\xb3\x5d\xe6\x69\x12\xa5\x73\xf5\x67\xc5\xb4\x24\x11\xd1\xa7\x22\xb9\x1a\x66\x00\x8c\xd9\x07\x7b\xbc\x83\x85\x6e\x84\x6f\xb7\xb8\xdb\xce\xeb\x77\x9e\x01\xf6\xc1\xc9\x58\xb6\xbe\x7b\x04\x45\xbd\x12\xbd\x60\xb8\x8e\xf9\xe0\x65\x84\x70\x53\xdd\x81\xcb\x80\x24\x0d\xa0\x98\x35\xa4\x08\x00\x50\xdf\xf7\x2d\xd3\xca\xf6\x05\x82\x1d\x10\x00\xc0\x1d\x80\xd1\x07\x4c\x48\x02\x5e\x34\xe0\xb4\xe2\xe4\x3d\x4b\x95\x29\xa1\x43\x3c\x13\x4b\x5a\x3d\x31\xf5\x77\xe6\xa8\xc8\x8a\x3b\x61\xb9\x8b\x69\xdf\x20\x9d\xeb\xde\xef\x8b\xb2\x61\xbb\x60\xa8\x65\x22\x47\x38\x68\x10\x3d\x49\x9a\xeb\xb6\x6b\x9f\xe4\x0d\xe7\x8c\xd8\xe9\x94\x21\x88\xfd\x66\x00\x1b\x06\x39\x17\x04\xc3\x73\x67\x6c\x8a\x72\xbe\x95\x13\xcd\xfb\x0c\x84\xb3\x0a\x71\x26\x1d\xd7\x6a\x7a\x13\x9a\x9d\x02\x84\xe8\x73\xc0\x4e\x00\x5c\xed\x45\x94\x96\x29\x6d\x3b\x01\x19\x02\x90\xa8\x93\x6e\x5b\xee\x78\xce\x71\x90\x9f\xc7\x10\x6e\x7e\xcb\x52\xc1\xdc\xae\xb1\x87\xcd\xc2\xf7\xd0\xf2\x66\xf6\x41\x23\xbb\xff\x40\x24\xe5\x49\xfb\x28\xd1\x15\x14\xfd\x38\x4f\x72\xfe\x04\x7d\xc4\xcc\x51\x77\xa2\xa6\xe5\x26\x76\x91\xf2\x34\x93\x27\x7a\xd7\xb6\x27\xc9\x64\xdd\x79\x25\xad\x72\xa0\xf9\x0c\x41\x0d\xe8\xe6\x06\x08\xcd\x00\xaa\x63\x26\x5a\x9f\xf5\x24\xc3\xb5\x3a\xb8\x21\xc3\x36\x7d\xd9\x21\x4c\x13\x7a\xcf\x82\xd5\x24\x05\xab\x3a\x39\x18\x66\x33\xad\x60\x0e\xa6\x37\xc3\x0c\xb5\x10\x34\x99\x2e\x3c\x79\xc0\x43\x45\x6e\xad\x1d\x20\xd1\x0e\xb6\x5d\xd0\x6b\x3b\xbe\x35\x31\xa7\x4e\xdb\x30\xb8\x82\xd4\xf9\x5e\x5d\x26\xe8\xd2\x18\x85\xc6\x84\x47\xfa\xd7\x79\xaa\xbf\x5a\x06\xe5\x45\x46\xdd\x38\x71\x25\x8a\x04\x6b\xcd\x91\xfb\x20\x3c\xd7\x34\x05\x4f\x4a\x43\xd9\xd3\xea\x99\x89\xde\xea\x05\x61\x37\xde\x6b\xd7\x08\x21\x56\x3a\xec\xcc\xe6\x69\x22\x25\xbe\xdd\x00\x76\x09\x60\x81\x0d\x20\x49\x4d\x58\x74\xb9\x92\xba\xf2\x14\x44\xea\xda\x46\x4f\xcd\x0e\x73\x1c\xb9\x36\xd7\xea\x9c\xab\xf3\x9c\x54\xf0\x82\x70\x51\xb1\x6e\x03\xf2\x34\xd1\x34\x60\x83\xe8\xa7\x14\x20\x7b\x4d\x9e\x0e\x50\x3c\x6c\x0d\x76\x80\xb8\x39\x50\x3b\x7f\xef\xec\xe6\xc4\xc0\xa5\xdd\x05\x0a\x0d\xdb\xde\x6f\x7b\x49\xf8\xdd\x9b\x15\x66\x41\x9b\x35\x20\xf1\x27\xde\xb4\x4c\x94\x62\xef\x7f\x7c\x5a\x44\x32\xc9\x94\x66\xcf\x73\x78\x29\x1c\xe7\xc7\xca\x46\x16\x10\xb2\x0f\xbd\x9b\x28\x79\xa3\xaf\x9f\x04\x69\x3f\x80\xcd\x22\xb9\x51\x1d\xe2\x5b\x95\x9f\x0f\x31\x66\x98\xfb\xbe\xe7\x7d\x4f\xe6\x3d\x1d\xa7\xbd\x27\x2d\x4d\x10\xfa\xfa\x18\x7c\x89\xcc\xb1\x6e\xc5\x6a\x40\x17\x17\x74\x5c\xd7\x0b\xe3\xed\xdb\xdb\x14\xc3\xa2\x5f\xb0\x67\x84\x60\x8f\xfd\x97\x56\x92\xea\xbb\xcd\x1b\x63\x80\x6e\x24\x98\x1e\x29\xcb\x5d\x4d\xad\x75\x60\x0a\xe5\x02\xfe\xbe\x19\x3e\x4e\x13\x54\xcd\xc9\xc1\x7c\x8d\x04\x60\x4b\x82\x06\xfd\xe1\xe8\x61\xf3\xeb\xbd\x72\x1c\x3d\x0a\x90\x9d\x3f\x49\x36\xd1\x74\x78\xef\x00\x22\x79\x6c\x9b\x52\x4b\x79\x04\xce\xb8\x37\x85\xe8\x86\x91\xc7\xd1\x44\xb3\xae\x6a\x9b\xc9\xe6\x2b\x6d\x79\x53\xec\x79\xe1\x04\x31\x4d\xd3\x54\xb6\x73\xf7\x3e\xba\x1d\x08\x5a\xcc\x12\xea\x2d\xc5\x12\x42\x6f\xd8\x98\x39\x77\x0f\xa8\x7c\x1c\x56\x94\x22\x2f\xad\xae\x09\x9c\x96\x75\x90\x19\xf3\x1f\xb7\xeb\x60\xc0\x9e\x4e\x68\xcd\x33\xd3\xd2\x24\x85\x11\xfe\x7c\x6e\xbd\xdb\xed\x3c\xf3\x53\x5a\xee\xde\x3e\x47\x7c\x33\xd2\x2c\xcd\x9c\x55\x83\xe0\x8d\x0e\x81\xf5\xab\x4b\xe5\xb8\xd7\xa5\xd6\xc3\xca\x30\x37\xe1\x1d\x78\xc4\xde\x65\x34\x4d\xf3\x0f\x6e\xbd\xd9\xee\x8a\x35\x5a\xbe\x93\x5b\x35\x8c\x5d\x55\xa9\x2d\xcb\x11\xa1\x38\x02\x9d\x07\xb9\x94\xb9\x58\x4a\x80\x2f\xfa\x83\x33\x99\x38\x23\x56\x26\x0a\x10\x82\xa2\x28\x38\x37\xab\xc7\x64\x2e\xe6\xee\x4b\x5d\x40\x18\x68\xc4\xe7\x43\xad\x04\x90\x5a\x95\x5a\x5d\xdf\xef\x7b\xbc\xf3\x32\x3c\x86\xeb\x46\xbd\x34\x57\x67\x16\x3a\x66\xee\x71\x5c\xd7\x6d\xbb\xb1\x2f\x6f\x3f\x77\x98\x28\x25\x2b\x40\x48\x52\xfb\x99\x69\x57\x70\xdc\x4d\x52\xd7\x00\xa0\x23\x08\xee\x54\xf7\x84\xb8\x78\xfc\x72\xf4\xf7\x07\x81\x9b\x69\xeb\x44\xbe\xa9\xdb\x79\x60\x0c\xb1\xe3\x05\xe1\xef\x11\xfb\xa6\xeb\x80\x35\x8c\x5b\xbe\x2e\xb8\x13\xb1\xd4\x32\x14\x0d\x2f\xcf\xba\x58\x8d\x7f\x48\x82\x8b\xa6\x9f\x93\xc2\x42\xd7\xdf\xf3\x7c\xc2\xbc\xd5\x6b\x51\xee\xf3\x2c\x02\x61\x07\x09\x30\xae\x8a\x38\x55\x6d\x5b\x01\x90\x04\x80\x3f\x79\x7b\x2f\xfb\x97\x1a\xd5\x31\x73\x22\xdb\xba\xef\x06\xc3\xaf\x1d\x11\x87\x65\xd6\x8b\x0c\x52\xc9\xfc\xee\xcf\x33\xc1\x5e\xd5\x9e\xa5\x10\xc3\x2c\x2a\xd9\x94\x40\x08\x73\xa4\x32\x4d\x62\x53\xb3\xe2\xba\x4e\x4f\xa6\x40\x1c\xc6\x33\x53\xb2\x46\xb0\x44\x56\x61\xfc\x6a\xc5\x45\xed\x33\x40\x7d\x07\x30\xc0\x02\x85\x8f\x55\xd2\x33\x43\xb0\x89\xbe\xd1\xa6\xb6\xef\xa2\xd7\x33\xb8\x05\x12\xec\x38\x93\xee\x00\x9b\x19\x88\xfc\xa9\x95\x03\xda\xe3\x81\xc3\x0d\x59\xbd\xc7\x18\xa4\xa2\xd1\x8d\x60\xb4\x79\xfe\x6e\x9a\xa1\xe7\xb6\x96\x6b\x8f\xce\xd1\xb1\x08\xb1\xe7\x1d\x44\x53\xf9\x50\x09\x5c\x9c\xdd\xeb\x06\xb8\x86\x11\x6d\x72\x19\x03\xb2\xd3\x4d\xa2\x0b\x10\x61\xd2\x8d\x00\x39\x18\xd4\x3f\x5e\x6b\x03\xe8\x1a\xef\x45\x13\x1b\xbd\xcd\x09\x4b\x44\x6b\xb4\xfa\x33\xc3\x0c\xc3\xb0\x25\x4a\x9b\x4b\x1f\x7d\xfe\x92\x89\x6a\x80\xcd\x2c\x8f\x83\xf2\x3d\x6c\x67\x4a\x0c\xb0\xfb\xbb\x8b\x91\xef\x73\xff\xed\x47\x71\x9c\x0e\xeb\x94\x30\x0c\x1f\x55\x8e\x83\xf1\xdf\x1a\x04\x19\x00\xc4\x86\x44\xdb\xb2\xcb\xcf\xe4\x56\x6f\xe9\x60\x13\x4f\x3a\x12\xe1\xd9\x64\xbc\xb9\xc4\xdc\x37\xe0\x54\x46\xbd\x45\x9b\x6b\xdc\xef\xd5\x39\x45\x07\xbd\xd3\x73\x5d\x57\x43\x91\x43\x2f\x59\xd0\xb3\x60\x87\xae\xeb\xba\x7f\x2c\xde\xbe\x04\x76\x5b\xe8\x6a\x51\xc1\x22\xa7\xee\xcd\x88\xae\xa6\x30\x0c\x8d\x6f\x6b\x8b\x9c\x11\x93\x20\x9a\x80\x4d\x67\xe6\x87\x22\xf0\xea\xaf\xa5\xfb\x42\x15\x7e\x60\xce\xb0\xf5\x71\x69\xdc\x17\x55\xed\xf7\xa9\xdf\xea\xcd\x1f\x47\xf7\x3e\x9a\x66\x69\x1b\x79\xf4\xac\xd0\x6d\x5d\x37\xc1\xbb\x2c\x46\x91\x1e\xe6\x33\x0b\xf6\x99\x47\xe6\x99\x46\x26\x5f\xab\xa8\x35\xc2\x91\x7d\xa2\x97\xf9\x8a\xa5\xb7\x38\x0a\x00\xac\x11\x71\x20\xbc\x18\x38\x39\x4b\x24\x10\xd9\x66\xaf\x2d\x33\x47\x95\x02\xed\x9f\xe6\xd5\xb3\x10\x3e\x84\xbf\x83\x7e\x65\x48\x5a\xad\x9f\x25\x7e\xca\x7b\x9e\x41\x57\xd3\x20\xf5\xa0\x92\xf5\x4d\xd0\xad\x0d\xe1\xc9\xf8\x77\xdb\x0b\x76\x14\x33\x5f\x04\x41\x8c\x46\xde\x29\xc4\x1e\x9c\x6b\x9b\x1e\x80\xa5\xe4\x1f\xfc\x45\x46\xe9\x62\xff\xd4\xd7\x34\xcf\x52\x5e\x1e\xa0\x15\x4c\xce\xff\x82\xe2\xc1\x71\x96\x24\xa1\xe7\x91\x80\x40\x71\x22\x08\xf7\x21\xa2\x11\x80\xa6\x7b\x6c\x22\xbe\xf2\xb5\x8f\x2c\x47\xee\x53\x62\x47\x56\xc4\x1f\x55\xe3\xd7\x0b\x6b\x25\x6a\x76\x3e\xb6\xaa\xdb\x85\x7b\xc4\xf8\x72\xd3\xf0\x0d\x10\xb8\x87\x23\x25\xf3\x2a\xce\xdf\xfd\x91\xb7\xba\xfa\xf1\xd8\x00\xcf\xf3\xf9\xbe\x7f\x9f\x14\x43\x4e\xde\x47\xbb\xb1\xcf\xdb\xbe\x19\x0c\xea\xe7\xd2\x70\x53\x62\xac\x58\x7e\x97\x58\x77\x2d\x39\x89\x18\xd5\x7d\xdf\xd8\x4e\x3d\x3f\xfb\xbe\xf5\xfc\xbb\x2f\x1e\x8a\xf0\xd0\x7f\x63\x58\xfb\x6c\xbb\xfe\xa1\x7b\x16\xf6\xb8\xce\xbf\x8e\x8c\xab\x6d\x70\x39\x60\x92\x5b\x26\x41\x53\x1d\x04\x39\x2d\xbf\xf5\xe4\x8e\x00\x30\x14\xa9\x61\x08\x67\x08\xbe\x6d\xc3\x76\x82\xd0\x93\x68\xeb\x0c\x71\xba\x58\x1e\xb5\x85\x8a\x82\xb5\x02\xe5\x7e\xe8\xdf\xd9\xe7\x3b\xb6\x35\x1d\xe5\x18\x06\xa4\xd7\x4b\xcd\x74\x7f\x5e\x28\x37\xcd\xc6\x5c\x6e\xa7\x1e\x16\xa4\x94\x1b\xc6\x7b\x49\xef\xa5\xec\xa0\x93\x64\xd9\x75\x04\x21\x97\xc2\x28\x95\x30\x10\x25\x00\x01\x7d\x80\x28\x65\x69\x62\x5f\x87\x0a\x48\x23\x0f\x38\x79\xb8\xac\x5f\xec\x5a\x4d\x2f\x66\x02\x2a\xfc\xd0\x28\xc6\x0e\xc5\x1e\xfb\x0c\x45\x6b\x9f\xda\x55\x18\xb4\x54\x8d\xb2\xbf\x6f\xd4\x7a\x85\xdc\x89\x10\xf9\x7b\x68\xdf\x02\x71\x0f\xa4\xff\xa5\x6d\xab\x95\x6f\x2a\x5a\xbd\x03\xa0\x02\x67\x5f\x95\x96\xee\x9b\x4c\x4f\xd3\x30\xff\xff\xef\xdf\x0c\x1c\xc7\x19\x2c\x8b\x31\x5e\xd8\xf9\x53\xa2\xe8\xa0\x1b\xa7\x8a\x9d\x9c\xa6\x61\x1b\x16\xa8\x65\x3d\xb8\x2f\xd1\x99\xef\x94\xd4\xb9\xaf\x42\x30\x14\x27\x38\x83\x6a\xa5\x4c\x46\x41\x10\x84\xba\x69\xe2\xe7\x14\xf5\xf7\xee\xba\xa9\x95\x85\xdf\xe9\x5f\xbf\x50\x21\x60\xf2\x80\x61\xf6\xaa\x90\xde\x1d\x2f\xd8\x6f\xaf\x82\x00\x90\x47\x4a\xb9\x5d\x32\x66\xd9\x3c\xfa\xc0\x4a\xf5\xc6\x49\x89\xa5\xd7\xfd\x78\x23\x62\x0d\x93\x05\x67\xe6\x4f\xd5\x7e\x7f\x0f\x16\x34\x0b\x1a\xc1\xfb\xfb\xb1\x53\x15\x41\xb6\x37\x53\x2b\xe7\xf7\x71\x04\xbf\x18\xf0\xa3\xe2\x57\x5d\xd0\x9c\x00\x0a\x5c\xb5\x7b\x8a\x7c\xab\x46\x78\x8b\x61\xd5\xb2\x2f\xbf\xef\xb1\xd5\xc5\x19\x88\xbd\x84\x17\xb1\x77\x78\x96\x18\x42\x3d\xbe\x05\x67\x7c\x48\x4f\x99\x02\x6c\x62\x9c\x9d\x74\x8a\x07\xa4\x51\x12\x25\xfe\xfc\x63\x85\xfe\x72\x54\xe3\xd7\xcf\xf3\x24\xfc\x78\xfe\x63\x3f\xef\x04\xd9\x12\x81\x1d\x7b\x43\x00\x26\x6b\xa3\x4f\x3b\xc7\x8c\x78\x37\x18\x6e\x3c\xc3\x6c\x38\x88\x83\x85\x7b\x61\x3f\x48\x9d\xc3\x54\xc1\x71\xa6\xde\x79\x1c\x2c\xa2\x71\x0e\x8c\x33\xf3\xb9\x31\x1b\xad\xb4\x10\xf0\x89\x2b\x44\x96\xfa\xcb\x79\x1b\x5a\x1c\x52\x88\x50\x8e\xeb\xba\xe2\x01\x68\xac\xdd\x99\xea\x13\x6a\x73\x9c\x11\x0c\xd9\xe5\x20\x38\xe8\xfb\xa3\x4e\x6d\xce\x9c\x8f\x1b\x09\x47\xa3\x43\x22\xcf\xbb\xf1\x24\xc9\x72\x5a\x02\x81\xca\x89\x05\x31\x41\x62\x80\x2c\xf2\x56\x1d\xe1\xf2\x7e\xb5\xf6\x0e\x6f\x61\xab\xed\x10\x04\x61\xd5\xc7\x37\x85\x30\xf6\x7d\x9f\xd8\x61\x1a\x8c\xbb\xb9\xa3\xdc\xe3\x86\x08\xfb\xca\x47\xed\x8d\x5d\x49\xf9\x92\x4c\x9d\x81\xa4\x8f\xdf\x0b\xa3\x3b\x47\x6f\xa2\x7e\x0e\xc6\x9b\xa0\x10\x56\x7b\x96\xe2\x01\xa7\xd5\x1d\x88\x7c\xb3\x39\x7b\xca\x6c\x64\x32\x9a\xdf\x59\xad\x38\xef\xec\xee\xf9\xb3\x7b\x9d\x84\xfe\xe2\x8c\x1d\xbc\xaa\x1f\x28\x85\xf6\xe7\xed\xb9\x64\xa8\x9b\x02\xbc\x91\xaf\xfd\x0a\x46\x33\xa2\xba\x90\x34\x08\x60\xbc\x10\xe7\x55\xbe\x65\xe7\x7c\x63\xc0\xeb\xfb\x9d\xf9\x10\x73\x8e\xf3\xfe\x9d\x44\x34\x42\x6d\x13\xbe\xb6\xf9\x54\x0b\xbf\x3d\xd5\xd7\x9b\x95\xde\x06\x81\x7d\x5a\xe4\x63\x14\xf9\x2e\x28\x21\x11\xef\xe4\x97\x37\xc6\xfa\x8d\x18\x3b\xc3\x21\x08\x6d\x9c\x88\xee\x04\x1d\xdf\x8d\x53\x08\x32\x0d\x40\xd4\xab\x1c\x67\xac\x3f\x09\x8b\x80\x85\x37\x7a\x65\x50\x0d\x7c\x15\xb2\x7d\xe3\x69\x88\x7d\xd8\xf6\xd1\xff\xee\x35\xbd\xf9\x03\xc7\xaf\x04\xc9\xe7\xac\x8b\xbe\x41\x0e\xe9\x04\x64\x50\x21\x91\x47\xa3\xa8\x9c\x78\xb1\x83\xc6\xe4\x68\x85\x77\x74\x76\xb2\xf9\x3b\x27\x8c\x9d\x4b\xb8\x52\x95\xbf\x56\x4f\x83\xe1\xbe\x6f\x86\x38\xc7\x68\x7d\x96\xc0\x67\x80\x91\xe6\x69\xca\x5b\xa1\x1c\x3c\xc6\xaf\xf4\x9b\x8b\x5e\xe5\x14\x75\x03\x02\xd5\xb7\xc1\x0b\x20\x91\x28\x48\x67\xcb\xee\xcd\xa9\x99\x87\x0f\x9d\x2b\x40\xce\xb2\x0a\x25\x96\xc2\x8b\x7d\xa8\x31\x7f\xce\x10\xdc\x0c\x81\x8a\x65\x16\xce\xdb\x6a\x18\xbb\x8f\xb6\x13\x11\x73\x1c\xbd\x0d\x55\x91\x23\xb0\x4c\xb9\x21\xe3\x64\x87\xa3\x1a\x05\x5c\x6d\x04\xbf\xcf\x7f\x9e\x47\xd4\xc7\x29\x3a\x75\xc9\x14\x00\x28\x54\x41\x30\x7c\x86\x2d\x5f\xa6\x34\x5b\xe8\x6a\x8b\xfc\x03\x69\xe8\x74\xdb\xa0\x60\xa0\x64\xf3\x1e\xad\xca\xe1\xe5\xd3\x53\xc1\xf3\x8b\x61\x69\xef\xd0\x96\x63\x7c\x9a\x05\x36\x7d\xbe\x4f\xb7\xe1\xef\xb9\xba\x33\xdf\x94\xb8\x8f\xc7\x9f\x9f\x1f\xcf\xab\x98\xff\x6a\x2f\x1e\xca\x93\x58\x99\x1c\xe5\x1e\x98\x66\xa3\x47\x90\xaf\x4c\xb6\x2b\xd4\x47\x4c\x15\x8b\x40\xb1\xf2\x21\x8b\x89\x2e\x00\x46\xfa\x1e\x48\xef\x54\xad\xec\xb8\x95\x64\xea\xd2\xe3\x66\x05\x7c\x6d\x23\xe3\x44\x55\xec\x94\x3e\xf2\x68\x52\x08\xa5\xf1\x59\x41\x66\xaa\x01\x2c\xd7\xc5\x4e\xe4\xb6\x5d\x5f\x3d\x9c\x41\xfb\xc1\xe8\xd8\xf7\x1f\xcf\xbc\x97\x93\x71\x37\xd2\x67\x00\xb9\x59\xbb\x19\x0d\x5e\xf0\x67\x9e\xcd\x10\x4d\x30\x68\x79\x92\x67\xb5\x28\x3a\x22\x5e\x01\xb8\xba\x2c\x17\x95\x32\x32\xc6\xa9\x25\xe2\xf4\x16\xdb\xee\xf3\xfc\xc9\x9d\xc8\x09\xc9\x84\x2f\x07\xdc\x28\xb9\x95\xc9\x88\x3c\x1f\x41\x1e\x4c\x4c\x71\x95\xdc\x6f\x3d\x7a\x7e\x34\xfb\x7d\xb2\x90\x80\xc8\x5b\xdc\x3f\x35\x07\x50\xb4\x72\x95\x7c\xcb\x6c\x81\xc8\x28\x5f\x3d\x5f\xef\x04\x85\xbd\xc0\xcd\xeb\x86\x3b\xc1\xfc\x91\x9a\xdd\x46\x70\x1c\x81\x8c\x26\x29\xb2\xe3\x9e\x75\x80\x1b\x04\x00\xf5\x13\x0f\xbb\x6a\x7d\xf8\xfb\x34\x5d\xec\xae\xe7\xc8\x8d\x33\x44\x42\x2e\xc3\xf3\xda\x8b\xec\x77\xde\x75\xc2\x3c\xc6\x88\x3c\xab\x05\x3a\xb7\xa3\x4c\x35\x7c\x57\x28\x01\x62\x0b\x06\x16\xc6\x1e\xbb\x2c\x9f\xb7\xf3\xb7\xa6\xea\xc2\xdd\xca\xf2\xf7\xbc\xcf\xcc\x9f\x63\xa6\xa7\xa4\x69\x77\xad\xaa\x93\xc9\xb5\x3e\x49\x92\x65\x91\xb6\x60\xfd\xe1\xdb\x21\x17\xed\xd3\x84\x0e\x20\xb3\x3d\xbf\xe2\x33\xfc\x0a\xcd\xaf\x67\x7f\xc5\xef\xbb\x07\x96\x2c\x47\xe4\xde\xcc\x7b\xd7\x75\x7d\x97\x23\xc0\x50\xd0\xab\x3f\xe3\xa2\xc7\x71\xc1\x4f\x7c\x15\xdf\x03\x1a\xf6\xfb\xb4\x5e\x83\x8e\xad\x9c\x9d\xff\xe0\x82\x15\xae\x9a\x1c\x5c\x0d\x0d\x86\x79\x66\x18\x1c\xdd\x22\xbf\x22\xbc\x12\x41\xae\x1e\xd0\x80\x3f\x27\x57\x7d\xaa\x83\xf7\xeb\x29\xc4\x17\xfb\x0e\x94\x69\x2c\xd3\x5c\x5e\x63\xf3\x07\xab\x82\x04\xfd\x49\x51\x8a\x5a\x6a\x65\x2d\xd1\x59\x81\x02\x96\xd1\x73\x29\x54\xc4\x62\x46\xa2\x4b\xf2\xa8\x64\x9f\x11\x87\xae\x55\x37\x89\x21\x3d\x0b\xfb\xe9\xaf\xa0\x66\xfd\x9b\x0d\x88\x04\x53\x2a\x67\xd5\x9c\xe2\xe7\x9d\xb0\x55\xc8\x66\x7a\x7f\x35\xf2\x3b\x23\xbc\x09\x23\x08\xc6\x3d\xa9\x60\x70\xed\x87\x2d\xf3\x7f\x7b\x68\xef\x27\x6a\x10\xe0\x9a\x4c\x94\x49\x4f\x7f\xa6\x55\x4c\xb3\xfd\x74\x03\x62\x6d\x50\xb8\x5a\x7d\x5a\xa7\x6b\x88\x1e\x29\x1d\x2c\x5e\x3b\x72\x66\xd4\x15\xa1\x77\xc5\xdf\xba\x49\xb0\xe8\x50\x36\x60\x7b\x86\x09\xf8\x92\x43\xec\x2f\xd5\x9d\x27\xbb\xa6\xd1\x17\xc2\xb4\xef\x04\x53\xe6\xb8\x6c\x42\x5e\x10\x88\x16\x4d\x7a\x79\xf2\x1e\x3a\xf9\x8b\xc9\x4d\xfd\xa9\xfe\xcb\xe9\xbe\x7d\xc4\xcc\xdc\x0d\x2e\x5a\xe4\xb3\x82\x75\xa2\x42\xc3\x36\x00\x1f\x02\xc1\xc0\x89\x72\xe6\x2e\x18\x79\xb5\x0e\x58\x80\xac\x75\x2d\x5e\x7a\xfd\xf4\x41\xcb\x37\xe0\xe0\x68\xc8\xdc\xd5\xe4\x7e\xf7\xf1\xcc\xa6\x79\xca\x68\xb8\xaa\x7a\x18\xf4\xc9\x42\x1b\xfd\x2d\x94\x48\x85\x2d\x7d\xed\xaf\x5c\x86\xc7\x1a\x81\x92\xc3\x16\xda\x4a\x18\x04\x44\x9d\xaf\x69\x9e\x24\x06\xca\x53\x85\xfc\xdd\x81\xec\xce\xd8\xe3\xaf\x2e\x4e\x94\xde\x6d\xb4\xcf\x17\xb3\x13\xb0\xc8\xfa\x89\x96\x1f\x9a\x38\x42\x15\x1a\xdf\x3f\xfe\x02\x11\xde\x0e\xae\xe9\x7d\xf8\x57\x53\xfb\xd0\x4e\x09\x46\xb8\xc9\xc6\xc9\x5e\x47\xfa\xac\x76\x81\x6c\xa2\x5c\xa5\x83\x66\x67\x51\x3a\xd8\x5b\xd5\xbf\x2e\x95\x9c\xc3\x4e\xd3\x83\x83\x24\x74\xdb\xc3\x20\x8d\x41\x2d\xf5\xdd\x36\xf4\xdc\x5e\xf6\x3e\x9f\xef\xc1\xe7\x40\x39\xc4\x7b\x25\x98\x6a\xae\xc6\xd5\x57\x94\x99\x68\xb0\x76\xdd\xc2\x8a\x7c\xbc\xc0\xfe\x8f\xc6\xf7\xd9\x35\xce\xd1\x31\x4c\x4f\xec\x83\xf0\x3c\x2c\x59\x1f\x22\xe5\x95\xb3\x89\x60\xc0\xda\xc3\x96\x11\xfd\xa8\xf6\xc2\xe8\xb7\xd1\x13\x33\x8d\x2b\x42\x5d\x11\x83\xb3\x5e\x04\x12\xde\x39\xeb\xb4\x86\x35\x4e\x94\x7b\x43\xce\xea\x91\x8b\x44\xf7\xba\xf6\x67\x65\x23\xc7\x75\xdf\xd6\xcf\xb6\xfe\x1f\x55\xd7\xb5\xeb\x36\xcf\x6c\x1f\x48\x17\xea\xed\x52\xbd\xf7\xae\x3b\x75\xc9\xea\xcd\x2a\x4f\x7f\x90\x7c\xc9\xce\x7f\x0c\x18\x89\x01\x6f\x52\x34\x87\x53\xd6\x0c\xd7\xa0\x16\x21\xee\x7e\xf5\x8d\x40\x9a\x3a\x86\x29\x21\x45\x4c\x28\xaa\x2b\x3b\x63\xed\x04\xc0\xcc\xb3\xa9\x22\x61\xc8\x7f\xf7\x6e\x25\xed\x46\x18\xf0\xfc\x46\x74\x9f\x6a\x1e\x76\x91\x1f\xcc\x76\xeb\xf2\x97\x1c\xfc\xd2\xc5\x52\xd8\x5f\x71\x59\xa1\x79\x4c\xbf\x10\xb6\x7f\xc5\x6a\x71\x27\xb6\x42\xe6\xec\x98\x1c\xb4\xb2\xab\xe2\x0c\x9f\x70\x03\xbf\x78\x79\x8e\xc9\x15\x2c\xf4\xa9\xde\x80\x09\x80\x00\x82\x6e\x61\x07\x4c\xb6\x81\xc8\xb4\xed\x43\x10\x05\xd0\xb5\x7f\x0b\x4f\xa5\x5b\xd8\x7b\x6e\x2b\x6e\xd7\xf6\x7d\xae\x74\xac\x7d\x18\xf6\xfd\xf1\x41\xa5\xc4\xfb\x04\xd9\x2f\xcb\x61\xdf\x54\x2e\x75\x9f\xdd\x38\xe8\x03\x06\x08\x04\xdd\x86\x45\x74\x41\x4d\x94\x58\x28\x07\xc0\xc9\x9e\x80\xb9\x96\x78\x7e\xef\xd4\xb5\x00\x0f\xc5\xfc\x82\x7e\x82\x3d\xd9\x81\x98\x99\xff\xa0\xc4\x0d\xbe\xa8\x28\x17\x96\x86\x97\xe8\xf7\x0b\x42\xc8\x87\xaa\x79\x2e\x7d\xb6\x9c\xcf\xc8\xbc\xfe\x22\xbe\x47\x9f\xfe\x1a\x7a\xb7\x0b\xab\x38\xa9\x0e\x13\xf8\x0a\x37\xe2\xa6\xe9\xa0\x31\x06\xf5\xc3\x81\x38\x0a\x17\x21\x7f\x41\x80\xaf\x9e\x62\x54\xb1\x1d\xe8\xb7\x3c\x1a\x8e\xe3\xfc\x66\x11\x72\x2c\xa9\x3a\x66\x65\xa2\xdb\xb9\x87\x2f\x75\x8d\xea\xc2\x67\x7c\x03\xe2\xb3\x75\x3d\x50\x39\xa5\x08\x40\x50\x42\xcb\x9d\x56\x87\xdb\x88\x10\xb6\xba\x45\xdd\x07\xf2\xfd\x43\x3a\xbf\x67\xbd\x69\xda\x43\x4c\x1c\x08\x52\x93\xff\xa6\x00\x75\x4c\x53\x0b\x1d\x74\x07\xd1\x1d\x15\x7b\xe8\x91\x23\x6b\x09\xe2\xac\x6c\xc8\x93\xf2\x2f\x2e\xb2\x57\xd1\x21\x54\x9c\x24\xef\xb3\x1e\xb9\x0f\x40\xd2\xd1\x2f\xc7\x73\x20\x08\xb2\x2e\x54\xd4\x27\x22\xb7\xd2\x3b\x2e\x7f\x2b\x32\x39\xab\x73\x78\xd3\xf3\x03\x85\x9d\x9b\xf6\x1b\x38\x03\x67\xb5\x81\xb2\x31\xae\x38\xf4\xde\x74\x41\x01\xeb\x9d\x4f\xb2\x14\xc0\x72\xfb\x05\xa2\x71\x84\xb3\x7d\x36\x51\xed\xc4\xbb\x6c\x9a\xc8\xf3\xfc\xed\x67\xce\xfb\x35\xf2\xbf\xce\x0c\x48\x17\x83\xbd\x6a\x9b\xf6\x65\xf8\x69\x6b\xc8\x1f\xfe\x5e\xe3\x5a\xe5\x0b\x04\xdf\x06\xdd\x27\xb6\x6c\xc5\x7c\x13\x13\x52\xce\x49\x33\xac\x40\x1a\xd4\xe1\xe1\x16\xc0\x6f\x8a\x9c\x04\x54\x59\xf2\xaf\x73\xdc\x1f\x22\x0a\xe9\x79\xe8\x7f\xa9\xb5\x38\x6a\x03\xae\xe1\x74\xb4\xad\x0d\x01\x08\x33\x85\xc2\xad\x13\xa5\x0c\xfa\xea\xbc\x5f\xee\x99\xb4\xec\x38\x73\x83\x27\xd4\xf4\xd4\xb9\x6c\x1b\x09\xe0\x09\x54\x10\x93\x2b\xb6\xb6\xb1\x13\xd6\x87\xc4\xbb\x04\xb8\x8b\xfb\xc2\xf4\x7f\x5c\x82\xcb\x52\xda\x4b\xfc\x92\x24\xa8\xe3\x34\x00\xa8\x77\x90\x2e\xda\x49\x23\x67\x46\x10\x00\x70\xa8\x3a\xbc\x88\xfc\x14\xcb\x10\xa1\x0b\xb1\x89\x50\x95\xd4\xce\x51\xca\x3f\xf5\xb7\xd6\x87\xe3\x9c\x6f\xe1\x8e\xaf\xbd\x44\x37\x1a\x4c\x50\xee\x3d\xa9\x77\x0c\x4a\x18\x01\xb0\x6d\x95\xda\x26\x34\x69\xbf\xbb\xa2\x0a\x44\x4c\xbd\x3c\x91\x1c\xa2\xc6\x00\x53\x8c\xa7\xb2\xef\x78\xaa\x4b\xe0\x21\x48\x5c\x52\x47\xf6\x6d\xbe\xff\xfc\xa5\xe1\x40\x73\xe2\xf3\x45\xa9\xa6\xa7\xc6\xc2\x96\xfb\x4e\x9c\x4f\xd5\x78\x39\xfb\x46\xed\xc3\xac\x00\x1a\xe0\xd1\xaf\x56\x55\x34\xae\xaf\xa7\xfc\x1e\x00\xa8\x86\x71\xfc\xf9\x96\xe8\x03\x06\x17\xbb\x51\x10\x0c\x60\xe9\x62\xf4\xa3\x67\x18\x52\x4a\x1c\x80\x99\xeb\x26\x36\x83\xf0\x19\xeb\x27\x01\xd0\xc0\x06\xeb\x43\x77\x2d\xf8\x73\x78\x26\xbb\x95\xc7\x17\x35\xf5\xf0\x44\x55\x57\x51\x8b\x20\x1f\xe8\x7a\xcd\x46\x7c\x1f\xbc\x7c\xa1\xc0\x2c\x34\x3f\xc1\x91\x5a\x0a\x87\xfe\xd4\x2a\xa9\x1f\xfb\x29\x09\x60\xca\xbf\xc8\x6b\x43\xf8\x48\xa1\x08\xfd\x4d\x25\x65\x94\x5a\xf7\x6c\x21\xab\x23\x68\x5c\xd5\x75\x18\x06\xc7\x72\xfb\xea\xd9\xb8\x79\x44\xed\xe6\x00\x98\x7e\x91\x7c\x81\x73\xe9\x2e\x5e\x56\xae\xce\x88\x24\x91\x35\x9a\x7c\x51\x36\x2a\xb2\x2c\x2a\x44\x2f\x4c\x81\x06\xf3\x08\x3f\xab\x13\x08\x43\x34\x85\xcb\x60\xf5\x42\x31\x9f\x40\xcb\xef\x71\x35\x3b\x10\x30\xdf\xd1\xf4\xd0\x96\x24\xc8\x22\x21\x14\x67\x5b\x8e\xf9\x98\x0d\xc2\x1c\x5d\xda\x06\x3a\x45\x8c\x56\x19\x03\xff\xc9\xd3\x18\xea\x70\x59\xf6\x4b\x2d\x3b\xba\xd0\x12\xd0\x69\x22\x1a\xfb\x38\xba\x72\x57\xc6\xcf\x0d\x1c\x0e\x67\x04\x67\x70\xf6\x8d\x51\x3a\x75\x50\x98\x7e\x0a\x5a\xd7\x58\x3f\x3b\xe2\xaf\x4d\x7f\x4a\xb4\x35\xcc\xb6\x2f\xea\xa5\x84\x7c\x90\x56\xd2\x49\x94\x0e\x89\x92\xe7\x4a\xce\x50\x10\xca\xb3\xcd\x8f\x8b\x2c\x14\xc3\x30\x9e\x64\x1c\x8f\xbe\xf1\xa6\xe7\x69\xae\x85\xf4\x75\x9e\x44\xbd\x69\x68\x49\x9f\x08\x99\x54\xb6\xcc\x86\x8f\xb2\x6e\x36\x11\x1f\x7e\x18\x0e\x38\x1a\xc7\xf1\x60\x56\x34\x87\x30\xd5\x3f\xbd\x96\x75\x04\xb8\xea\x23\x0c\xa4\xe3\xb2\x00\x00\x49\xd9\xbc\xed\xda\x3e\x04\xe8\x56\xf5\x12\xc0\xb6\xe8\xf0\x0a\x6d\x4f\xc9\x5c\x23\x05\x55\x36\x8a\x77\x30\x40\x96\x2d\x46\x15\x6f\x94\xbd\x4b\xfb\x79\x5f\xef\x8b\xc6\x25\x7c\x47\x56\xb7\x12\x87\x0a\xe6\x24\xbe\xe8\x96\xf3\x12\xe1\xd8\xae\xe7\xb6\x86\xde\x37\xde\xf2\x13\x53\x59\xb0\x91\x9c\x55\x2c\x4f\x30\x3d\xd1\x2f\x3a\x75\x83\xab\xbf\x2d\x5c\xc6\x01\x6c\x56\x35\x8e\x1b\xd2\x3a\x14\x47\xac\x5b\xf4\x28\x3b\xb2\xf5\xd3\x5f\x88\xe7\x7c\xe6\x45\x64\xbe\xc5\x89\x1b\x71\xdf\x4a\x6e\xb1\x5a\x7c\x8b\xe5\x01\xd6\x75\x5b\x1e\x3c\x99\xe1\x15\xce\xe0\x23\x8f\xb5\x7d\x7c\x91\xd3\x6d\x2a\xc9\x1f\xae\x59\xaa\x7f\xf9\x5c\x02\xaa\x0f\x60\x12\x27\x30\x1d\x15\x28\x68\x68\x8b\x9a\x04\x25\x96\xe1\x7e\xee\x83\x65\x05\xf5\xea\x57\x12\xb7\x6c\x9d\xbf\xd1\x76\x20\x70\x31\x87\xa2\x1b\x84\xe9\x97\x4e\xfb\x7d\xea\x7b\xcf\x94\xe9\x9b\xc2\x0a\xcb\x9f\x8d\x48\xfc\xb2\xc1\xe8\x1d\x92\xaf\xf6\x3d\xd7\x50\xdb\x0b\x6e\x65\xb8\xa2\x31\x5f\x9a\x8e\xfc\xbe\x57\xe8\x3c\x72\x61\xfe\x70\x07\x71\xd7\xb8\x10\x09\x92\xa3\x18\x5c\x51\x72\x9d\x72\xc5\xa8\xce\x99\xdc\x5e\x3e\x98\x97\x75\xbb\x6b\x74\xf2\x9d\xde\xf0\x59\xa2\x63\xf3\x90\x91\xa3\x16\xd9\x85\x0a\x9b\x66\x2f\x0a\x32\x8d\xd7\xf0\xc5\x05\x9d\xec\xa5\xdb\xd2\xcf\x41\x80\x95\x02\xe4\x49\xdf\x4e\x03\x81\x1f\x27\x47\xc5\xc7\x52\xfb\x63\x9c\x6b\x7d\x52\xc6\xda\x90\x1d\xd1\xf6\xe5\x49\xb0\x4a\xb2\xca\x5b\x43\x6b\xda\x20\xc4\xc6\x78\x16\xc3\x2a\xeb\x49\xb3\x16\x14\xdf\x48\x43\x23\xfc\x4c\xc7\x0f\x55\x92\x4d\x06\xde\xa7\x09\xa1\x68\xde\xb5\xac\x45\xda\x4c\x12\xfe\xd4\xb7\x18\x4a\xb9\x9b\xdf\x2f\xc5\x3d\x94\xe8\x10\x22\x0a\x16\x5b\x8a\x49\x2f\x48\x94\xd6\x48\xd8\x61\x7b\xd0\xdf\x0c\xf9\xcd\xc2\x32\x13\x03\x1a\x62\xdf\x10\x6f\x6f\x7c\x30\xd4\x1e\xa8\x48\x1e\x26\x48\xb3\xbb\x31\xea\x97\x3f\x93\xea\x98\xec\x79\x5b\xd9\x83\x48\xcd\xee\x17\x17\x76\x2b\xe8\x9b\x2e\x9c\x01\x95\xdc\x0e\x6b\x4e\xb8\x19\xb2\xae\x03\x1a\x42\x25\x87\xe7\xf0\xf1\x16\xaa\xac\x6a\xd6\x8b\xdd\x45\xf5\x26\x54\x65\xcb\x1f\xb0\x46\x26\x7f\x23\x33\xa7\xe6\x82\xd6\x1b\x8e\x3c\x36\xb1\x98\xa3\x08\xb8\x44\xf4\xf3\xc4\x32\x0b\x5e\xe1\xd6\x6d\x7e\xea\x3e\x0d\xa6\x40\x8f\x05\xc4\x8c\xce\x4d\x40\x9b\xc2\xc6\x06\x42\x8e\x3a\xce\xb0\x25\x20\xb0\x37\x1b\x97\x2d\x82\x97\xfc\xbf\x36\x93\x7d\xe2\xee\x5f\x55\x50\x0e\xb9\xb6\x51\x8e\xf8\x82\x60\x40\x94\xd6\xe4\x2f\xa8\x95\xf2\xca\x50\xf6\xd9\x91\xb5\x1f\x0c\xd1\xc2\xdc\x65\x8f\xd0\x92\x64\x10\x69\x4e\x54\xf5\x4d\xed\xc9\x81\x8f\x19\x20\xb9\x05\xc8\x5a\x5e\xb3\xe8\xef\x52\x33\x61\x10\x9c\xd0\xcb\x47\x7c\xcf\x88\x73\x0f\x97\xa2\x01\xf7\x95\xdc\x7a\xd8\xfc\xdd\x47\x97\x3c\xc1\xaf\x8d\xca\xcf\xa2\x4f\x27\x43\x4f\x07\xbd\x64\x51\xd8\xa3\x3e\xfe\xb5\xc5\xbe\x73\xcb\x12\x40\xc3\x30\x8c\x42\x9d\x3b\x99\x1f\xce\x1b\x89\xd1\x28\x60\xdc\x8b\x19\x0f\xb4\x23\xce\xe1\xfb\xb4\xef\xe2\xc5\x28\x70\xa1\xd0\x5f\x6b\xe8\x02\x93\x0f\xce\xc8\x34\xfd\x10\x3e\xb6\xc3\xdd\xfc\xcc\xd3\x6a\x5f\xb8\x0d\xbf\x81\x6c\xde\x81\x4c\x1e\x74\x7e\x45\xd6\xd6\x47\x75\xa9\xac\xb6\x88\x4c\xbc\xf7\xed\xb6\x66\x8f\x02\x2d\x14\x3e\xd1\x39\x7b\x00\xd4\x24\x9b\xfd\xb9\x20\x93\x0e\x10\xf3\xf3\x4b\x29\x39\x90\x49\x27\x22\x73\x49\x2b\x2c\x4d\x7c\x51\xf0\x0d\x65\xf3\xd7\x63\xf2\x17\x65\xf1\x2d\x76\x6e\x14\x28\xcc\x63\xb6\xc7\x73\xf5\x21\x2b\xfc\xf4\xc7\xa9\x9b\xa6\x82\x5c\x8e\xf3\x1c\x30\x64\x31\x3a\x06\xfb\xe1\x6b\xe6\xba\x11\xa5\xa7\x21\x2f\x41\x0c\x98\xfc\x05\x33\x59\x08\x2c\x78\x2a\x9e\x69\x38\x0c\x7f\xfb\xaa\x4d\xc0\x35\x46\xcb\x5f\xc0\x76\x01\x1b\x5d\x2f\xa5\xb0\x7a\x87\xce\xc4\x3e\xe7\x78\x3b\x25\x36\x05\x9f\x60\xc6\xbb\xbf\xd6\xef\x22\x95\x1e\xb7\x3f\xc2\x6d\xfa\x51\xf8\xa6\x69\xec\x1d\x4c\x31\xda\x1d\xbd\xc2\x94\x97\xbe\xef\x47\xa9\x28\xd4\x3c\x80\x02\xce\x82\xa7\xa6\xbe\xfa\x3e\xdc\xbf\x66\x29\x43\x37\x50\x58\x6e\xde\x34\x10\x02\x50\xfa\xc2\x37\x62\x5b\x9b\x95\x6a\xfb\xa0\xed\xcd\xb6\x2b\xa6\x14\x29\x7f\xb3\xb1\xa9\xbf\x1c\xaa\xa6\x90\x6e\xc0\x1e\x6b\x1b\x40\xca\x71\x5d\xd7\x39\xff\x65\x87\x29\x05\x86\x22\x47\x1e\xff\xf5\x59\x72\x51\xcc\xe2\xda\xe6\x2a\x6c\x1d\xfc\xe4\x2e\x94\x49\xbf\xfc\x43\x6f\xa5\xf6\x70\xfd\x86\x64\x9c\xe1\x13\x47\xe4\x68\x95\x5d\x08\x02\x21\x23\x91\x2e\xdd\xaa\x26\x5b\x99\xe1\x53\xe6\x11\xdb\x79\x12\x53\xbf\x0e\xc1\x3a\x04\xca\x22\x2a\x2a\xab\xdf\x15\xa7\x64\x1d\x96\x72\x58\xe8\xac\x77\xdc\xc3\xf8\x6d\xc5\xdf\x5c\xea\xea\x81\x73\xae\xf2\x01\xc1\x1c\x96\xdd\x76\x4d\x06\xb9\xec\x0e\xfa\x5b\xe8\x5b\x5d\x26\x71\x9d\x90\x7e\x46\xc6\xa1\x1d\x9e\xb1\x59\xf5\xab\x87\x57\x50\x9c\xe6\x7c\x96\xbf\x5e\xe6\x3f\x91\xb3\x0e\xc1\x3c\x26\xa0\x88\xe9\x6c\xa6\xbb\x9c\xe6\x7a\xba\x0f\x09\x1f\x56\xed\x97\x64\x8f\xdc\x59\x61\xd9\x9c\x22\xfd\x72\xaf\x44\x5a\x6c\x53\xf0\xe5\xc6\xc8\xfa\x21\x62\xd7\x5d\x47\xb3\x7d\x8a\x7b\xe8\x81\x9a\xbe\x09\x7e\xbe\xbb\x2f\xd3\xe0\xf7\x19\x2b\x84\x19\xf9\x30\x5f\xb8\x9a\x7c\x5b\xab\x1c\x32\x50\xb6\x3e\x25\xa1\x8f\x2c\x02\x50\x9f\x7b\x62\x6b\xad\x5b\x91\xe3\x2d\xb0\xc9\x21\x28\x74\x74\xb9\x0d\x04\xdb\xf6\x3c\x7b\xa2\x19\x9f\xf6\x38\xda\x3f\x3c\xc1\xce\x71\xe2\x15\x42\x8d\x24\xfc\xe6\x38\x68\xa2\x25\xf7\xbc\x48\xf4\x06\x02\x50\x25\x1f\xb9\x15\x08\xbb\x3a\xa2\xe7\xb5\x69\xed\x1e\xac\x0f\x53\xfb\x6b\xb9\x21\xb9\x6e\xd9\x86\x79\x77\x54\xf0\xd8\xbe\xf9\x92\x66\x7b\x13\x73\xff\xfc\xe4\x2e\x58\x36\xce\x69\x60\xa1\x6a\x49\x6d\xb6\x27\x40\x23\xf2\x4b\x88\x9f\x91\x8a\xbb\x45\xdb\xc9\x1b\x5a\x84\xb0\xcb\xdd\x0c\xe5\xe5\x5d\x42\x9a\x6c\x5c\x87\x46\xff\x72\xa7\xd6\xed\x1b\x42\x7f\x73\x31\x08\x96\x47\xdb\x4b\x82\xef\x03\x33\xa6\xfc\xb2\x63\xea\x7f\x38\xba\x30\x04\xc4\x40\x03\xe8\xae\x01\x8a\xf6\xe8\x7a\x85\x1c\x47\x1e\xc2\x1e\x8a\xed\x69\x32\x7b\xa5\xd4\x93\x69\xf4\x41\x83\xa0\x70\xf8\x65\xab\x2b\xd9\x91\xba\x84\x13\x91\x0c\x8e\x96\x33\x1b\x97\x62\x8f\xc2\x8d\xd1\xe0\x86\x98\x4a\xe2\x2d\x6f\x00\x76\xc3\x11\xdc\x9f\xe6\x0f\xee\xcd\x1a\xc0\x05\x42\x2a\xab\xa9\xcd\x46\xef\x7e\x00\x1f\x19\x72\xe4\x73\x9c\xd6\xf3\xc7\xdd\xad\x52\xc0\x25\xd4\x88\x08\xe0\x93\xfb\x1d\x91\x77\xfe\x9a\xb2\xe1\xc3\xa0\x11\x5e\x1f\xeb\xeb\x93\x0f\x48\x61\xaf\x23\xb3\x58\x29\xb9\x54\x35\x1b\xff\x8b\x11\x08\xce\x6b\x90\x16\xad\x82\x7b\x44\x14\x71\x99\x4f\xe5\x94\xe2\x75\x4e\x66\xb9\x35\xaa\xb6\xf5\x2c\xbb\xb5\x79\x9b\x36\x47\xa6\x4a\x56\xd1\x47\xc3\x1b\x2e\x16\x58\xd1\x8d\x58\x77\x22\xb8\x2b\x05\x28\x20\x2e\xab\xc5\xa0\x66\xff\xdf\x98\x1e\x49\x93\x44\x18\x17\x0f\xe5\x0e\x52\x70\x6c\x29\x52\xad\x68\xf3\xd1\x51\x5a\x2f\xac\xb7\x3e\x2d\xfb\x04\xa7\xb9\x9b\x5a\x6d\xa9\x31\x4c\x87\xc7\x48\xba\xcf\x5c\x6a\xf7\x7c\x2b\x53\x91\xb7\x25\x6e\x2e\x59\x57\x3e\x3c\xb5\xe8\x7f\xfa\x55\x39\xef\x3a\xf9\xa0\x6b\xf8\xfd\x95\x14\xab\x96\x8d\xc7\x86\xd0\x71\x69\x40\x59\x43\x96\xc9\x34\x22\x29\x81\xb2\x2c\x8e\x68\xa7\x5e\xa3\xe5\xc0\x52\x93\x5a\xfb\x44\xf8\x20\x9b\x6e\x75\xd7\xdc\xc8\xc7\x2f\x6b\xc4\x39\x95\x0f\xb3\xef\xc0\xc9\x3f\x58\x64\xc4\xeb\x29\x69\x63\xbc\x9d\x24\xe5\x7b\x0e\x90\x1e\xaa\x71\x8c\x7f\xac\xae\x96\xb3\x04\x31\x61\xb7\x25\xef\x36\xb0\x36\x03\x9e\x38\x29\xfd\x5d\x5f\xf2\xcb\x7e\x20\x74\x43\xf2\xe4\x41\x0c\xc1\xda\xf3\xb6\x53\x2d\xb6\x31\x3b\x03\xf2\x9f\x48\x34\x62\x67\xc9\x1f\x28\xfa\x14\x99\x61\x6b\xca\xfe\x40\xb0\xf9\x06\xb0\xe9\xe7\xca\x1e\x80\xd4\x43\x23\xaf\x8d\xac\xf7\x19\x18\x67\xd9\x54\x13\x03\xee\xa2\x94\xcd\xf8\x9b\xa9\x5b\x04\x1f\x69\x15\xc2\xe5\xb7\xb0\xab\xe9\xbb\xe1\xfe\x4e\x9a\x2c\x31\xd3\x8f\xf3\x4f\x96\x15\x65\x11\xb0\xda\x72\x93\xc0\x21\x0f\x1a\x2e\xa1\x72\xca\x70\xd4\xc5\xc7\x39\x7d\x4c\x76\x82\x49\x60\x38\x0d\xab\x4c\x5e\xdb\x2d\xdd\x42\xbd\x3e\x02\xd3\xfe\xfe\x4e\xcc\x91\x1f\x1e\x47\x43\x3a\xaf\x11\x39\x62\xb4\x40\x22\x7f\x6a\xb0\x98\xcf\xe4\x14\x4f\x17\x7a\x6d\xbc\x11\xda\xba\x1e\x5b\x04\x9b\xbe\xb3\x47\xa5\x2b\x63\x81\x78\x8d\x17\x71\x78\x63\x10\x81\xa9\x21\xb0\x58\x15\x4f\xea\xba\xcf\xc7\x8c\x39\xb0\x14\x0c\x4b\x1e\x2d\xc1\x9e\x77\xe8\x87\xfe\xaf\x33\x6f\x5f\x26\x0f\xee\x84\xc3\xe7\xfb\x57\xf7\x73\x4a\xcc\x7d\x73\x98\x76\x5d\x5a\x0a\xc3\x30\xdc\xeb\xf4\xeb\xcc\x9f\xd7\xb0\x70\x0c\x43\x3f\xf1\xdc\x62\xdd\x0a\x7d\x6d\x19\xbe\xf9\x35\x96\x24\x14\x00\xbf\xc4\xb9\x3d\xd5\x06\x8b\xf2\x35\xe4\x8f\x28\x9e\xff\xfa\xe3\x29\x18\x24\x1b\xd7\x0a\xec\xdd\x47\xf0\xfd\xc3\xce\x11\xfe\x93\x79\x03\x01\xc7\x1f\xb4\x94\x85\x2d\x17\x2b\x10\x2a\xeb\x0a\xf2\x13\x7f\xc3\x2f\x49\xbb\x23\xd6\x15\xb3\x08\x5f\xb2\x68\x59\x87\x31\x14\x5b\x5b\x36\xde\x9d\xa8\xec\x03\xb5\xbf\x1a\x4b\xed\xc6\xf5\x07\x63\x77\x2e\xe3\x35\x48\x83\x6f\x7a\x58\xe5\x47\x73\xd1\x48\x34\xc2\xc7\x85\xbe\x7a\x88\x5a\xf5\x16\x2e\x0e\x68\xde\x27\xe8\x5c\x74\x17\x4e\xbd\x54\x6c\xf6\x61\x5e\xb5\x6e\x25\xce\x7c\x01\x63\x95\xf6\x66\xbe\xbd\xa8\x4d\x39\xee\xf5\x56\x2e\xe6\x1f\x87\xb8\xf2\xeb\x1c\x73\xea\x47\xf9\x6f\x9f\xf3\x0a\x31\xbb\x5e\x63\x96\x37\x14\x53\x64\x1a\xe7\x9c\x7f\x68\x6d\xc7\x8e\xf8\xf3\xa0\x27\x3e\x79\x49\xb3\xc3\xe3\x48\x9e\xc8\xa3\x2b\x3b\x72\x7c\x38\x2c\x66\xbf\xe4\x33\x8d\x9c\x14\x71\x3f\xb9\x07\xf9\xf2\x05\xdc\xe6\x15\xc2\xe6\xf8\x91\x3e\x3f\x48\xd1\x08\x12\x37\x3e\x7d\x19\x0d\x38\xf9\xd5\xe2\x47\xa7\xcc\x31\x49\xa8\x53\xf5\xf7\x25\x7f\x3b\x57\x5d\xd7\x71\x11\xeb\xe7\xd5\x7c\x44\xe6\xc7\xea\x8d\xbc\x44\xc7\x93\xbf\x85\xf9\xac\x53\x33\x25\xc1\x37\x3d\xe0\x0c\x63\x34\x3c\xf8\x91\x4b\xc7\x76\xd0\xd3\xfe\x8a\x55\x6a\xf1\xbe\x07\xf1\x65\x1a\x97\x9f\xb8\xb6\xe5\x6b\x1d\x5c\x69\xd9\xd2\xe1\x01\xc2\x47\x0f\xbf\x51\x14\x96\xb1\x0a\x8d\xde\x63\x59\xc6\x1d\x29\x93\xff\x4d\x9e\xf8\x69\xe4\x9f\xbb\x4c\x9c\x96\xff\x67\x57\x29\xf4\x0e\xf9\xff\x74\x0e\x52\xd2\xb5\x8f\xda\xa4\x77\xf9\xef\x8d\x27\x11\xc5\x8d\xf6\x63\xb2\x20\x4e\xfa\x5b\xe8\xc1\xb6\x36\xae\xc8\xe1\x07\xb0\x49\x56\x49\x5e\xe5\xd8\x4d\xe6\x98\x67\xcb\x88\xa0\x59\xc5\xce\x3e\x7f\x74\xc4\xbc\x2f\x38\xdf\x42\x10\x14\xf5\xa3\xf7\x4b\x82\xcf\x6d\x25\xc0\x18\x03\x0b\xb8\x7e\xd8\x31\xe1\x80\xab\xca\xb9\x6f\xb8\xb6\x01\x9a\x27\x99\xa6\xbb\x43\x18\x96\x61\x19\x1f\x34\x5d\x27\x99\xda\xde\x88\x76\xf7\x24\x4c\xfb\x2c\xad\xc9\x7f\x7d\x49\x8d\x4b\xd4\xae\x9a\xdc\xbe\x96\x7d\xbe\xdc\xc3\x30\xca\xc7\x65\xcb\x11\xfa\x7b\xf0\x4d\xe5\x47\xe6\x52\x59\x7a\x43\x4c\x1e\xcc\xd4\x93\x4a\x6d\x16\x89\x55\x1c\x0a\xeb\x7c\x51\x7d\x88\xea\xdd\x49\xab\x9d\x0b\xc9\x90\xdf\x19\xaf\x2d\x77\x0e\xcf\xaa\xe2\xfb\xf9\xa7\xfe\x6b\x56\x3f\xc6\x11\x4f\x94\xcc\x11\xf1\x89\x9a\x29\x4e\x6b\x83\x77\x87\x62\xe2\x2d\x3a\x1c\xfb\xc1\x91\xc3\x68\x94\x62\xdf\xf9\xfc\x7a\x3b\xf6\xbd\x0e\x01\x50\x5e\x29\x7b\x8f\x3c\x43\x8e\x2d\x43\xc6\xba\x6a\xd9\x52\xfc\xe5\xd0\xa0\xa2\x68\xa9\x50\x9f\xfc\xd5\x99\xf3\xb8\x8c\x29\x92\x4a\x6e\x90\xdb\xa0\xa8\x88\x4c\x1a\xd7\x28\xb5\xac\x99\xb4\x0b\xf5\xa9\x14\x62\x05\x4e\x0a\x35\xb4\xde\x03\x14\x21\xfd\xf5\xd0\x0f\xe3\x2e\x8f\x68\x9c\xa9\x38\x9c\x99\xf4\x99\x49\xeb\x7d\x2a\xfd\x1e\x41\xb8\xb7\xe4\x77\xd2\xd8\x49\x0b\xcc\xef\x4f\x5f\x5c\x57\xe6\xf1\xe7\x10\x5c\xe0\x24\xd3\x8c\x00\xb6\xd5\x2f\xcb\x5a\xb2\x8d\xc0\x43\xfa\xaf\x0c\x23\x37\x26\xd1\x76\x5a\xb9\x66\x0b\xa7\xee\x3d\x29\x8d\x17\xaa\x70\x19\x87\x28\x0d\x6a\xeb\x7a\x6e\xde\x29\x09\xd1\xa7\x77\x24\xa0\xa9\x92\x28\x75\xae\x7f\xf9\x4c\x51\x50\x16\x89\x6d\x0a\xd1\x03\xe6\x1e\xf2\xa3\x63\xcd\x86\x15\xd9\x4c\xfb\x28\x3f\x16\xa8\x46\x69\x9e\x53\x25\x04\xeb\xd1\x45\xa2\xe8\xc7\x15\x8b\xcb\x5f\x40\x02\x3a\x6b\x34\x44\x5d\xb8\x2c\xeb\xd8\xa4\xba\x27\x16\x66\x10\x06\x84\xcf\x8d\xb7\xc5\xc0\xf3\x3f\x75\xda\x85\x7b\xe1\xd5\x76\xf5\x13\xf8\x8e\x05\xff\xc9\x90\x63\xc9\x23\x7a\x2f\x6c\x7d\x7d\x5b\xc9\x7b\xe0\xf3\xd7\x7f\x3d\xb6\xe2\xdc\xb6\x02\xe0\xc4\xbc\x82\x3d\x8f\x08\x34\xa5\xc7\xe7\x5d\xb3\x51\xed\x51\xdf\x3b\x81\x69\xc4\x89\xd4\xa8\x4c\x7f\x50\x44\x5c\x69\xff\x1e\x1f\x97\x55\x16\x79\x26\xbd\xef\x70\x93\x38\x6e\xab\xf8\xf0\xbc\x6b\x3e\xe2\x5f\x13\x40\x1a\x60\x92\x6c\x30\xb1\x6c\xa6\xe4\xf8\x98\x44\xb7\xe5\x31\x16\x61\xdc\x2b\x74\xe8\x9e\xf8\xa1\xe3\x0a\x3d\xc8\x28\x3c\x75\xf4\xa2\x5b\xee\xd3\xaa\xff\x6a\xdb\x15\xc5\x57\xd1\xfd\x1d\x83\x57\x05\xf1\x06\x98\x48\x60\x5b\x89\xf3\xde\x73\xa1\x2f\xf1\x28\x9a\xa3\x92\x84\xcf\x1c\xa2\x5d\xb3\x7d\x31\x02\x28\xcf\x25\xea\xdb\xcb\x72\x91\x63\x8b\xd0\x83\x6c\xa9\x23\x04\xbe\x48\xe0\xee\xb5\x94\x8e\x15\xef\xf3\x8b\xf6\xd3\x1b\xce\x60\xe7\x28\x84\xc0\x22\xd3\xd0\x5a\x26\xab\x6e\x5e\x98\x10\xae\x51\x68\x0d\x75\xaf\x98\xcc\x6d\x04\x1e\xe0\x60\x95\x0b\x7c\x40\x70\x1b\xf9\x8c\xd4\x08\xd7\x84\xb5\x8f\x61\xb7\x99\x34\x2c\x51\x9e\xed\x10\x60\xdf\x83\x41\xdb\x75\xed\x63\x45\x96\x54\xea\x4f\x0f\x24\xb6\x39\x6c\xd2\x67\x48\x11\xfb\x6a\x37\x90\x0a\xa3\xc7\xcc\xb2\xb6\xad\xe4\x79\xef\xa9\x34\x52\xa8\x64\x45\x6c\xf5\x82\xd1\x3e\x45\x75\x8f\xfa\x65\x5a\x47\x2d\xc6\xb9\xc1\xa6\x06\xc4\xb4\x13\xd1\x50\x96\xe3\xe3\xaf\x60\x1c\x22\x18\x08\x00\x93\xcf\x60\x43\xdf\x4f\x06\xb3\x24\xe5\xbf\x39\x74\xbd\x6c\x9f\x56\x85\x8e\xbb\xa2\xf6\x5c\x52\xb7\xb3\xfc\x9e\xde\x68\x1a\x40\xa4\xc5\xdb\xf9\x86\x4d\x63\x2c\xb5\x0e\x97\x5b\x06\x1f\xd2\xfd\x5a\x5f\x78\x18\x0b\xd1\x58\x06\x23\x8a\xd1\x93\x05\x22\xfe\xaf\x4e\x4d\xb4\x4e\xed\x8a\x69\x21\xd2\xfe\xd1\x21\x55\x74\x83\x70\x4a\xe1\x52\xf7\x88\xaf\x4f\x54\x5e\x60\xa8\x28\x6e\xcb\xfa\x7b\x29\x4a\x85\x37\x3d\x5b\xe5\x68\x45\x7f\x33\xeb\xd7\x7b\x3f\xd7\xb1\x18\xb0\xc9\x5f\xde\xa0\x4c\x61\x9f\xe9\x22\xe9\xdf\xb8\x9f\x8f\x0a\xe1\x5e\xcb\x0f\xd5\x46\x56\x9f\x04\x1e\xc8\x0a\xab\x65\x8c\x08\x32\xb5\x55\x55\x44\xc4\x48\x78\x42\x58\x9d\x6a\xfb\xad\xa0\xea\x58\xb0\x5a\x5f\x40\xbc\x85\xb7\xd1\x3d\xbc\x4e\xf1\x8a\xee\x88\x7b\xee\x4a\xbe\x9b\x42\x69\x88\xd9\xf0\x7e\xee\xe1\x7c\x54\x53\x51\x99\x4c\x0d\x51\x22\x7f\x80\x70\xcd\x7f\x6d\xba\xf5\xec\x2b\xd3\x2a\xc2\x4e\x3f\xa6\x52\xa0\xfe\xe4\x88\x18\x86\xa3\xc0\x08\xa0\x74\x9c\x1a\x2e\x34\x04\x6f\xb0\x46\x78\xf5\x4b\x94\xc8\x13\xaf\x5d\xb6\xfa\x1c\xd9\xa6\xf1\xd0\x4f\x6c\xa3\xbc\x3e\x82\xee\x6b\x4f\xc1\x5e\x4b\x56\x2f\x72\x23\xe6\x27\x41\xf5\x81\x00\xe6\x35\x1b\xc3\x34\x89\x38\x8e\x23\xc2\xde\x2f\xca\x68\x8a\x2b\xf0\x59\x23\x68\xc0\x80\x73\x56\xbc\x31\x95\xc6\xac\x8e\x61\xbe\x38\xe3\x8d\xe8\xbe\x39\xcc\x86\x43\xa2\x51\x68\xf9\xc1\x2a\x9e\x09\xe6\x06\x9b\xd5\x9f\x7c\x1c\xff\x99\xac\x76\x20\x01\x49\xd4\x94\xc5\x0b\xe1\x0a\xad\xed\xe9\x51\x83\x33\xda\x27\x31\x2c\xe4\x2f\x11\x41\x19\xff\xdd\x6a\x10\x78\x0e\x79\xa0\x2a\x4f\xfa\x06\xb7\x88\xff\xda\x53\x3f\x40\x4d\x84\xfe\xea\x2b\x81\x8c\xf3\xa0\x6f\xdf\x26\xad\xaf\x7e\x40\xa5\x40\x34\xc9\x3f\x28\x30\xc7\xf3\x0a\x60\xb8\x9d\xea\x12\x5f\xfe\xfc\x12\x47\xbe\xa5\x51\x38\xa4\x48\x19\x47\xf0\x41\xfa\x44\xfd\x21\x92\x2d\x39\x54\x98\x28\xc7\xea\xa8\x6a\x0a\x43\xfd\xe0\xa8\xe3\xde\x98\x97\xcf\xfc\x2d\x33\x85\x65\x93\x63\x7d\xde\x6f\x8e\xf2\x24\x58\xfa\xbd\xdd\x63\x70\x5c\x10\x3d\x35\x38\x2c\xee\x18\xf6\xf6\x87\xfb\x80\x67\x95\x1c\xa1\x6a\xb7\xe8\x54\x04\x80\x91\x93\x46\x96\x50\x56\x9f\x62\x52\xd7\xac\x5b\x4b\x7f\x30\xfc\xd9\xbd\xf2\xf5\xdd\xef\xf8\x38\x2a\x3b\xde\xc6\x18\xc6\xb0\x83\x5f\xfd\xd4\xb7\x3a\x2e\xea\xb2\x70\x18\x52\xd1\x69\x84\x54\x1c\x47\xe9\x33\xa8\xcb\xa0\xf0\x0c\x64\x9b\x6f\x82\x04\xcc\x1a\xb1\xde\x9b\xfc\x87\x2f\x31\x9d\xea\x4a\xea\x5c\x29\x0e\xa1\x2b\xdc\x9a\x4d\x21\x4c\xff\xfa\xbd\x91\x76\xcd\xa6\x8c\x44\x5d\x57\xd0\x0c\x10\xda\xcc\x18\x8e\x11\xf4\x92\xf1\x4b\x25\xd9\x63\x9e\xbb\xa4\xeb\x1f\x5f\xc5\x41\x0b\x3e\xb6\xdc\x3c\x42\xa5\x95\x45\x20\xe9\x51\x98\x16\x19\x61\x68\x40\xf4\x6f\x9e\xf4\x43\xab\xe8\xfa\x82\x6a\xff\x7e\x6e\xa5\x3f\x7d\x14\x00\x61\xa0\x7e\x34\x77\x54\xd7\x6c\x52\x89\x2a\xfd\x9c\x38\x27\xa4\x23\x18\x7f\xc9\x91\x06\x7d\x58\xaa\xee\xc8\x24\xab\xbc\x89\x3b\x88\x77\x0e\x81\x69\x04\x66\x89\x34\xa7\x5d\xa3\x0d\x1b\xd5\x27\x66\x30\x6c\xca\x48\x8d\x09\x35\x46\x2c\xfe\x34\xec\x65\xdc\xea\x18\xbe\x11\xc7\x20\x23\x81\x00\xdf\x55\x3a\xb7\x0f\x96\xf0\x02\x6e\xfb\x58\xc8\x29\x21\x1a\x6b\xc1\xee\xe6\xfe\x4a\x55\x47\xe4\x81\xad\xfd\x39\x72\xc8\x87\x43\x97\x37\xc5\x6c\x83\xab\xfa\x0d\x43\x47\x00\xac\x5b\x64\xaa\x91\x40\x8f\x6f\x7c\xa0\x5b\xe8\xf5\x2b\x2b\xf4\xcc\x1f\x9b\xc8\x26\xa5\x6a\xf9\x01\x25\x36\x1d\x3f\x07\x90\xf8\xe4\x9c\x0a\xda\x03\x44\x4c\x0b\x51\xfd\x7e\x63\x5f\x01\x4b\xbd\x51\x43\xd2\xcf\x87\xae\xcc\xb2\x14\x13\xf7\x53\xda\x0b\xd7\x38\x2b\x68\x63\xda\x8a\xc7\xc8\x35\xc7\x71\x09\x6c\xd0\x3a\x04\x5b\x15\x27\x97\x37\x0a\x34\xee\x25\xef\x7f\x58\x0d\x77\x93\xe5\x4a\xd3\x94\xaf\xc6\xe7\x7a\xad\x92\x76\x23\xc6\x6b\xd0\xc6\x6b\xf8\xdc\x1a\xce\xed\x2c\xf5\xcf\x15\x61\x6d\xc8\x22\xd2\x85\x7c\x69\xba\x0e\x5c\xd9\x31\x15\x78\xd5\xb5\x65\xdb\x36\x62\xdd\xb6\x4c\xb2\x76\x7e\x47\x78\xea\x3f\x9f\x5f\x6c\x8b\x28\x44\xbf\xf2\xdd\xf5\xfc\xfc\x35\xb1\x98\xc1\x64\x85\x24\x80\x39\xbd\x2c\xde\xa1\x6d\xdf\x20\x2d\xc8\xe6\x4b\xca\x70\x3e\x6d\x55\xce\xdd\x6f\xfb\xd0\xcd\xbb\xbe\x94\xa2\x62\x26\xf8\x45\xa0\x01\x05\x7c\x06\x0d\xab\x06\xe6\x3f\x6e\x0d\x83\xeb\xa4\x16\xab\x3c\x2f\xf3\x61\x43\xb8\xf2\xad\x96\xc9\x3a\xd9\xe3\x7c\x86\x24\xe6\xb5\x5e\xe3\x11\x9c\x5e\xe2\x8a\xc5\x59\xa3\x30\xa5\xec\x05\xac\x49\xd4\x22\xed\xd0\xe7\x13\xc5\xe2\x1d\x7c\xd4\x75\x5c\x1d\xc6\x50\x67\xe0\x7f\x18\x8f\x4e\xd0\xfe\x52\xcf\x4b\xf3\xe5\x0d\xf3\xd9\xec\x34\xe7\x19\xcc\x7a\x8d\x57\xb8\x08\x99\x2b\x16\x57\x1a\xdd\x94\x9e\x56\xba\xde\xc0\x9a\xac\x53\x45\xe8\x91\xab\x7b\xac\xd2\x6f\x28\x83\xff\x04\xa8\x19\x2a\xda\xc8\xfe\xd4\x3b\x34\x42\x16\x7a\x6b\x25\xb7\x42\xb5\x38\xee\xef\x7d\x06\xbe\x94\xbd\x6f\xfb\x31\xec\x1a\xeb\x15\x53\x0a\x99\x6d\xa0\xa5\xbe\x92\x6b\x97\xec\x04\x6e\x2d\xe3\xb5\x48\x7c\x5f\x00\x46\x04\xec\xd4\x31\xd3\x5f\xea\xaf\x7a\x8f\x36\xc9\x39\x4d\xff\x9f\x44\x2b\xc7\x54\xc8\x5c\xa2\x89\x89\x13\xae\xdf\x10\x07\x6d\xb2\x96\xf1\x12\xa6\xec\x16\xb2\xee\xdd\x60\xbb\xc2\xcf\x6a\x3f\x64\x32\x0e\x7a\x7d\x93\x00\x63\xbc\xe6\x89\xf8\x81\x02\xb8\xe5\x15\xc4\x5c\x70\x43\x9d\xfa\x40\xeb\x1f\xf3\xaf\x6e\xdf\x95\x0f\xc1\x5e\xc9\xdc\x8c\x4d\xef\xd5\x2a\xf6\x65\x09\x1b\x46\xd4\x54\x9a\x61\xa9\xa1\xc4\xc6\x60\x3d\xe3\x46\xd4\x80\xed\xb5\x72\x09\x88\x09\x02\xbe\x70\xb5\x41\xc0\x48\xc1\x92\x53\x78\xc1\x1b\x25\x32\x3b\xff\xce\xfd\xce\x6c\xf1\xe8\x3f\xe3\x8a\x9d\xb7\x96\xfe\x8c\xf0\x2d\x2c\xa8\x93\x4f\xc4\x0e\x31\xc5\x21\x5c\x16\x43\x6f\xb4\x8e\xd6\x06\x2a\x8b\x2c\xde\xa7\x63\x5d\xb1\xc8\xbf\xa5\x3c\x83\x31\x01\xc6\x8a\xb1\x49\x21\x57\x3b\x36\xaf\x72\xf6\x45\x3f\xb1\x1c\xcc\xff\xad\x5d\x6c\xfa\xe6\x32\x79\x55\x83\xc7\x39\x1a\xb2\xdf\x2a\x30\x8a\x86\xed\x3c\x5d\x4f\x72\x4e\x0d\x03\x65\x0c\x76\xa4\x0e\x42\x3c\x55\xa9\x79\xa2\x16\x31\x9b\x25\x6c\x08\x18\x7b\xc5\x4a\x1e\xfb\x43\xb8\x1e\xe7\x27\xd8\x05\xbe\x3f\x3e\x0f\x77\x45\xe3\x7a\x26\xbd\xe7\xaa\xf9\x56\xe7\x64\x9c\xc7\x30\x5d\x4d\xc1\x3a\x5d\x1b\xe6\x7a\xab\xeb\x8b\x8e\x23\xa7\x39\x9f\xa1\x08\x60\x23\xa7\x09\x9d\xc7\xf3\xea\xa2\xe1\xf5\xfb\x36\x03\xbf\x37\xfe\x48\x35\x2d\x62\xbd\x9f\x38\x41\x88\x5c\x9f\xcb\xe5\xbb\xab\xe6\x86\x18\x97\x75\x58\xdc\x20\x14\xdd\x20\x92\xee\x88\xd5\xd4\x5b\xbc\x3d\x79\x1e\x85\x2a\x9b\x27\xb1\xcf\x7d\x53\x2d\x37\xf2\x4b\xe8\x11\x1c\x86\xdc\xa0\x64\x6c\x93\x48\x4f\x51\xb6\xf1\xbf\x5e\xf4\xa2\xe1\x3b\x94\x33\x59\x61\x30\x6a\x2f\x12\x5d\x48\x84\x84\xc3\x52\x84\x9d\xe7\x39\x5e\x1f\xfc\xe6\xf2\x14\x94\x51\x46\x86\xca\x2c\xdf\xfb\x89\x9f\x5f\xba\x1b\xbe\x4e\xe3\x2b\x7b\x26\x8f\xdd\x63\xf0\x0c\x26\x97\xc4\x78\xef\xfc\xe8\x13\x51\x31\xf9\xeb\xf6\xda\xd0\x80\x07\x3c\x11\x99\xff\xc6\xcb\x7c\x28\x6a\x7a\x61\x1e\xa4\x19\x17\x31\x5b\x23\x37\xeb\x1b\x2c\x05\xae\xf4\x56\x0a\x5b\x0b\x6d\xc0\x34\x0a\x68\xce\x77\x09\xb0\x4d\x4f\x49\x93\x77\x0a\xe1\x78\x05\x22\x62\xa4\x58\x12\xff\xe3\x9d\x61\x82\xb8\xcb\x7c\xc9\x85\xd2\xc5\x21\x07\xc7\x34\x20\xbf\xb4\x29\xb0\x59\xf5\x3b\xd5\xcd\x74\xb5\x5d\xcf\xf6\xe3\xcc\x30\xbd\x70\xad\x58\x08\x18\x20\x4b\xbd\x5f\x55\x18\x3b\x75\x0c\x87\x5e\xbe\x8a\xe5\x1f\x7e\x32\x7f\x55\xd9\xdd\x63\x67\x74\x6e\x54\xfb\x94\xe1\x5a\xe6\x3b\x9d\x6f\xe5\x44\xae\xc0\x12\x72\x73\xf6\x29\xd2\x0e\x8a\x1a\x48\x9a\xbf\xa2\xb1\x5c\x9e\x1a\xb8\x61\x98\xd4\xbc\xe1\x56\x8a\x75\x5f\x96\x39\x8f\xf1\x59\xca\xee\x1e\x5c\xe3\x85\x9a\x51\x12\xa4\xc5\xf7\x8f\x81\x68\xfd\x52\x28\xee\x30\xd1\xb5\x75\x05\xa6\x0b\x98\x28\x0a\xa5\xe9\x32\xd0\xda\xd1\xbc\x2b\xed\x8e\x09\xf1\x3e\xf3\x61\x1e\xcf\xf4\xb2\x58\xc8\x3a\x29\x93\x55\x14\xfc\x5d\x4a\x7b\x81\x4a\xe9\x6e\xe7\x94\xf0\x1a\xd9\xc7\xc2\x59\xfa\x4f\x9e\x3a\x2a\x49\xd7\x0d\x93\xe0\x20\xac\xad\x65\xdc\x41\x72\x25\x1b\xaa\x87\x26\x39\xf9\x8a\x98\xad\xe2\xca\xcd\x5a\xaa\xcb\x5f\xd5\x72\x03\xba\xa7\x5a\x03\x72\xc2\xc8\x48\xc1\xe7\x41\x7f\xf7\x72\xae\xcb\x7c\x2b\xb3\x2d\x75\x44\xf1\x65\x20\xa3\x57\x00\xfe\x63\xfc\xd4\xe1\x88\xe6\x27\xa0\x65\x87\x92\xd7\xc2\x3a\x79\x36\x0d\x8f\xfb\xd7\xdf\x1d\xf6\xd7\xf8\xae\xa1\xb3\x8e\xc1\x33\x42\xc8\x20\xf6\x81\xd5\x06\x5c\x93\x7c\x64\xa1\xd3\x73\x02\xbf\x91\x81\xf8\x86\x04\x89\xa6\xa4\x99\xb6\xa1\x78\x94\xc3\x9e\x33\xb4\xbc\xcd\x99\x7c\x7a\x90\x16\x00\x6a\xf7\xe7\xd8\xce\x86\xa8\x78\xba\xf2\xac\xbf\x5e\x51\xf7\x44\xe4\x4a\x5a\xa7\x53\x50\xcb\x61\x2f\x05\xdf\x14\xdc\x5c\xf3\x73\xc8\x60\x42\x33\xca\x17\x20\x5e\x16\x9b\xe9\x44\xbe\x58\xa7\x9b\x33\xd9\x16\x10\x53\x40\xa0\x08\x00\xdf\x20\x9e\xe1\xdf\x4a\x66\x13\xa9\xbd\x32\xa5\x59\x93\x48\xec\xf6\xf2\x63\x90\xda\x0b\xc2\x20\x12\x30\xba\xd1\xfc\xcf\xba\x2e\x8a\x69\xc5\x4b\xd1\xd5\xfd\x73\xc5\x9f\x61\xdf\xc5\x3e\x74\x41\x99\x00\x51\x80\x02\x81\xeb\x80\xe2\x84\x16\x94\x45\x9c\x4d\x56\x53\x5d\xab\x14\x94\x43\x4c\x74\x36\xd3\xa1\x4c\x1c\x3e\xeb\xba\x16\x18\x68\x96\xfc\x58\xfa\x4b\xad\xe3\xb1\x6e\x82\x85\xd8\x1e\x38\xb0\x90\xe5\x79\xa7\xf7\x8d\xc7\xe0\x2d\xff\x3a\x57\xd6\xf0\x62\x7f\xfc\x2a\x71\x10\x9a\x7e\x19\xcb\x71\x65\x8c\x88\x14\x61\xff\x57\xfc\xfd\x8e\x93\x51\xf4\xc9\x3a\x24\x8b\x34\xd3\xbf\xe7\x30\xef\xd6\x52\x2b\xfe\xa9\x46\xe9\xb8\xe4\x19\x94\x31\x50\x0c\xb4\x21\xc8\x24\x8a\x1a\x7e\xf7\x37\xe7\xb3\xfc\x8d\x24\x14\x01\x6e\xe4\xa4\xe0\x71\xa7\x07\x80\x2e\x6f\xa3\xa5\xec\xa5\xe4\xb0\x9d\x08\xd3\x30\xe9\xf4\x3e\x8d\x5b\x2c\x95\x07\x54\x45\x23\xe6\x71\x93\xd2\xfb\x83\x1d\x6b\x1a\xe7\x42\xbe\x48\xc4\x0f\x53\x03\x99\x07\xad\xeb\x46\xd4\x3c\x59\x5b\xc0\xdd\xc0\x71\x40\x8f\xc1\x35\x0a\xf3\x24\xa0\x36\xad\x83\xfd\x4c\xfc\x06\xdc\xf7\x21\xd0\x16\x58\x5b\x4a\xf5\x05\x71\xd1\x5a\x68\x2b\xf5\x8d\x2c\xe9\xe1\xd1\x53\x85\xd0\x15\xc5\x21\x0a\xc5\x5f\xff\x86\xa1\x19\xfe\x7e\xb9\x70\xde\x83\x19\xbe\x8c\xc0\x1b\x9a\xd0\xeb\x9a\x11\x76\xb7\xbb\x4c\x15\x55\xba\x66\x63\xbf\x0e\xc1\x64\x96\x7d\xc3\xbd\x98\xa1\xfc\xd1\x8f\x5d\x0f\x95\xc2\xdd\xcd\x2c\xa5\x19\x4b\xd7\x46\xc7\x74\x0c\xa8\x0d\xc2\xfb\xf3\xeb\x99\x7a\x68\x98\x8c\x65\x80\xac\x69\x07\x80\xe1\xe1\xb6\xf3\x78\x6b\xfc\x43\x8e\x8f\xab\xf6\x7b\xd0\x79\xa9\x2a\xf6\xeb\x03\x82\x3d\xb8\x21\x54\xa7\xcd\xdd\xaa\xe9\xc6\xf1\x92\x39\xa0\xfb\x8b\xd2\x47\x69\xf2\x8e\xce\x35\x29\x6e\x32\xae\xc0\xf1\x49\x00\x53\x1f\x3e\xae\xc3\x2d\xed\x02\x65\xc2\x5f\xff\x8a\x53\x5a\x5a\x9e\x06\xcf\x2d\x3f\xb2\xbe\x77\xaf\xbe\x0f\x09\x70\x87\x45\x48\x4d\x72\x9f\x20\xee\xef\x46\xef\xf8\xa8\x0d\x7f\x0a\xff\x18\x67\xe9\x4d\x76\xb4\x6f\x4b\x0e\x19\xbb\xf5\x88\x84\xad\x6e\x80\x59\x4b\x23\xe5\x62\xa8\x14\xef\xae\x61\xdf\x1f\x8e\x5a\xd6\x31\x46\xaa\x72\x6d\x3c\x52\x8e\xf9\x23\xe2\x40\x22\x50\xad\x61\xb5\x8f\xed\x6a\x72\xc4\x74\xff\x73\x37\x37\x9d\x3c\x63\x4a\x6c\x45\xbc\xc5\x23\x48\xbe\xf6\xeb\x4c\xfd\xd7\x65\xaf\xf9\xf6\x46\xe7\x6f\xbe\x97\x6b\x0c\x34\xd2\xd2\x51\xed\x7a\x7e\x6f\x7c\x21\x82\xee\x8e\x02\xc7\xba\x72\x8f\x5a\x7e\x38\x65\x67\x85\x1f\x3c\x50\x09\x6e\xe3\x45\x7c\x6f\x8d\x66\x06\xd4\xda\x47\x57\x7a\xaf\xf4\xda\xc8\xec\xa8\x0f\xf8\x98\x7d\x02\xf2\xe1\x3f\x1f\x45\x74\xbc\x8c\x03\xb9\x61\x97\x3b\x6a\x35\x4f\xea\x53\x93\xbf\xe4\xf3\x51\x03\xa6\x4a\xd8\xbf\xb5\x6b\x1a\x73\xdd\x7b\xb9\xd8\x33\x4e\xf5\x05\x15\x0d\x84\x32\x93\x26\x2d\x33\x69\xc6\x1b\x11\x11\x5f\x1f\xff\xdf\xfd\x55\x5f\xb4\x28\x54\xee\x3f\x35\x56\xb9\x59\x31\xfb\x8c\xfc\x98\x7e\xca\xda\x0d\xe9\xff\xf0\x5d\xb0\x3e\xf3\x5a\xd7\xfe\x16\x95\x6b\x63\x28\x8d\x41\x17\xfb\x4a\x00\x57\xb3\xef\xf8\xd3\x0b\x96\xeb\x98\x27\xe9\x97\x6e\xe6\xa9\x35\x5b\x24\x21\x99\x29\xd6\xdd\x70\xcb\x56\xc2\x2e\xf0\x3f\x94\xdf\xc2\x0d\x37\xfe\xc4\x7c\xdc\xc8\x76\xfd\x99\xac\x1a\xc3\x13\x9f\xd7\x10\x35\x35\x14\x9b\xf4\x1c\x0d\xe6\xdf\x9d\x74\x26\xe5\xec\xf9\xe1\x67\xfb\x21\x19\x51\xe5\xec\xf9\xe6\x74\xa6\x07\xfa\x46\xf8\x37\x0e\xd5\x46\xf6\xfd\x58\xae\x26\x87\xcc\xd4\xba\xe9\x31\x2c\xaa\x36\xaf\x06\x78\x1a\x33\x27\x11\xd9\xd5\xdd\x49\xf9\xb7\x67\x5c\xa5\xbe\x34\xd7\x28\xd6\x40\xe9\x86\xf3\xed\xb1\xa7\x13\xc6\x74\x4a\x73\x3e\xd5\x65\x8f\x01\x7e\x30\x1c\xfd\x97\xd1\x4e\x16\xe6\xab\xcb\xd7\x3b\x88\x33\xd7\x68\x27\xd8\xb9\x5f\xe4\x42\xac\x17\x7a\x9d\x1b\x6a\xec\x15\x47\xff\xd8\x66\xca\x0f\x10\xff\x43\x18\x8c\x69\x67\x76\x87\x09\x07\xce\x7d\x0c\x68\xe7\x8e\x00\x6f\xd8\xbf\x58\x99\x22\x82\x3e\x84\x5c\x9e\xcb\x88\x33\xc9\x52\x71\xca\x69\x58\x90\x88\xdc\x28\xa9\x4b\xc3\xfe\xef\x7a\x14\xad\x0d\xb5\x45\x94\x43\x06\x9c\xdb\xc1\x4d\x7b\x25\x1f\xbc\x46\x30\x1b\x90\x1f\xfe\xe9\x4b\x5c\x8a\x3c\x96\x71\xc4\x79\xdc\x84\x7d\x56\x0d\x96\x2f\xd6\x80\xe1\xdb\x42\xed\xc8\xe5\x66\xea\x9f\x79\x39\x8a\x6a\x1c\x48\x4c\xf2\x30\x13\xc3\x71\xe4\x3b\xb5\x01\xdf\x46\x73\x8e\x1a\x26\xd2\x21\x25\x1a\xde\x0d\x6f\x5a\xfb\x13\xc7\x2a\xf7\xf5\xde\xec\xb5\xb2\x78\xb7\x19\x6c\xb0\xdc\x4d\xc5\x48\x5a\xef\xca\x7f\xb1\x9c\x87\xf9\xe2\x16\xb3\x70\x33\x39\x6a\x86\xe8\x86\x83\xe7\xa9\xd6\xcc\x88\xcc\x19\xfd\xf4\xb3\xdf\x65\xce\xf5\x54\x1f\xd9\xbb\x46\xa1\x26\x69\x64\x88\x48\xae\xd9\xb7\xfb\xa9\x85\xe1\xae\x62\xc2\x61\x79\x76\x79\xc5\x68\xa1\xd9\x2e\x44\x47\x53\xbf\x66\xd0\x54\xca\xbf\x8b\x9b\xec\x05\x91\xf6\xce\x07\x6e\x2b\x18\x8d\x66\xb6\x08\xa4\x08\x05\x9b\xea\x76\x9a\xf0\xac\x20\x7b\xcc\xf3\x73\x47\xd6\x50\xc4\x14\x33\x02\xc6\x09\x5b\xe5\xa6\x27\x6b\xe0\xb4\x74\xe8\x47\x85\xcf\x7f\xb8\x8e\xae\x1d\x92\x38\x1f\x77\x18\x65\xde\x23\x02\xee\x45\x46\x6d\xad\x97\x4e\xd4\x81\x19\x23\x79\xff\xe3\x0d\x9c\xbf\x7c\xd9\x0f\xe6\xb4\xb0\x63\x63\x90\x84\x8c\x7b\xc2\x9b\x9c\x3d\x63\x1c\x97\x6a\xb6\x83\xbc\x3f\x7c\x05\xac\xb3\x4a\xce\x30\x94\xcb\x45\x2a\x13\x84\xbc\x81\xe3\xb5\x8f\xae\xed\x1a\xb3\xc0\x8e\x4d\x26\xf6\x45\xb2\x3f\xb5\xef\x6c\xa6\x6c\x9b\x32\x8b\xbf\xe4\xe3\x6d\x96\x9c\x13\x65\x25\xf0\x3e\xff\xf9\x34\x50\xf3\xd3\x57\xdc\x60\x38\x36\x31\xda\x6e\xe6\x38\x4c\x1a\x1d\xa5\xc7\x34\xf3\x81\x1d\x4e\x5f\x4f\x08\x51\xc4\x7f\x5c\xc8\xcc\x1e\x70\x4d\xb1\x34\xcd\xcc\x19\xb0\x54\xdf\x71\x5b\x98\x06\xc7\x86\xfb\xee\x74\x57\x24\xb0\xf5\x4f\x0c\xd2\x24\x0d\x15\xb0\x6c\xb4\xc2\xd1\x00\xe7\x21\x66\xaa\x1b\x37\xc3\x9c\xce\xfa\xa5\xaf\xd4\x4a\xe6\x32\x7f\xe2\x0a\x66\xf7\xd5\xaf\xca\x28\x25\x2f\x37\xe8\xc0\x76\x1e\x15\xa1\xc2\x23\x5e\xff\x7a\x16\xeb\x42\x02\x4b\xc6\x7b\x3f\x7c\x12\x36\x09\x0e\x3c\x3d\xeb\x8a\x7d\x14\xa5\x56\xc7\x82\x97\xcb\xd8\x2b\xc6\x61\xff\xfa\xdd\x1f\x6c\x71\x07\x41\x37\xa3\x23\x06\xa8\x16\x1f\x7e\x95\xc0\xe1\xdb\x42\x71\x3a\x47\xfc\xc7\xbb\xd6\x7a\xfc\xd7\xd1\x04\xe7\x6a\x66\xca\x2c\x1f\xc2\x61\xa4\x1e\x02\x06\xcc\x16\xb0\x52\xe1\xfe\xed\x9f\x33\x3b\x1f\xdc\x32\x66\x5e\x50\x93\x22\xab\x24\xf8\x53\xcc\xbd\xd0\xf4\x5e\x2b\x34\xea\x5f\x5c\xd7\x60\x39\xd7\xf8\xc8\x81\xd3\x3d\x3a\x77\x6a\x4e\x8b\x0a\xb7\x06\xe9\x22\xb3\xa2\x21\x27\xec\x3f\xf2\x22\x28\x4e\xb9\x32\x8d\x27\xf1\xe2\xd4\xc2\x19\x4a\x30\xd7\xa9\x05\x5a\xf2\xaf\x87\x76\xd4\x34\xa0\x3e\x3f\xbc\x72\x43\xb1\x44\x4e\x58\xc0\x6d\x65\xa0\x78\xef\x8f\x6e\x7a\x17\x6d\x09\x7f\xc5\xc4\x0d\xa9\x4c\x38\x4c\xc5\x36\xa3\xee\x1f\x93\xd5\x18\xf6\x5c\x03\x35\xf9\x89\x05\xd4\x5b\xf4\xfc\xc5\x51\xf8\xbc\x0b\x49\x39\xcd\xe3\x4c\x63\xc4\xc4\xfc\x5f\x4c\xf7\x5d\xb5\x5b\x12\x42\x06\xec\x5c\x35\xf9\x02\x18\x26\x75\x4c\x83\x69\xca\x5d\x19\x01\x13\x29\x3f\xbe\x1f\xce\x5d\x91\xea\xd2\x32\x93\x75\xab\xd6\xf7\x5e\xe9\x32\x29\x18\x86\x8e\x60\x0e\xbb\xd9\xe3\x02\xdf\xfd\xe3\xc1\x2c\xda\xcc\xa4\x82\x01\x17\x19\x3e\x5d\xc1\x06\xf2\x73\x33\xeb\x12\xda\xe9\x94\x88\x97\x88\xdd\x11\xda\xe2\x4f\x6e\x87\xb7\x3a\x7e\x70\x04\xdb\x43\xcc\x26\xca\xfd\xcc\x50\x34\x83\xe7\x9c\xc8\x38\x39\xe5\x2f\x0e\xca\x39\x06\xa2\x58\x22\xe3\x93\x37\x04\x3d\x7e\xf9\x4a\x0e\x23\x3b\x23\x73\x74\x3f\x71\x94\xe5\x19\xe6\xa6\xcc\x9c\xa2\xc6\x05\x74\xf9\x08\x78\xbb\x1c\xff\x74\x02\xf2\xff\x9e\xbf\xf2\x61\x46\xc0\x85\x21\x0c\xc3\x31\x5a\x13\x1b\x3b\x15\x84\x65\x3a\xa8\xc0\x2d\xc3\xf9\xe1\x2f\xe0\x18\xf4\x97\x6f\x31\x04\x8b\xcb\x02\x9e\x8a\x77\xcb\x98\xe4\xe6\xd2\x77\xee\xef\xcf\xbf\x9f\x1f\xfc\xf7\xdd\x94\xbd\x66\x4e\x5f\x90\x51\x09\x7a\xcf\x4f\x72\xf3\xb5\x1c\x90\xde\x58\x70\x92\xfe\x4d\xae\x32\xb9\xd1\x60\x01\x2b\x42\xa4\xcf\x87\x29\x6e\x99\x0d\x19\xb0\xcb\xbf\xe7\x77\xc6\x69\xe4\x41\x8b\xd9\x18\xcc\x63\x6a\x93\xa2\x7b\xc4\x85\x02\x26\x1e\x34\x55\x35\x88\x30\x61\xff\xc9\xb6\x3e\xa8\xbd\x42\x7d\xf2\xf7\x29\x20\xf8\x4d\x48\x05\xa4\x88\x7c\xad\x7c\x2e\x5b\x18\xc8\xe5\xff\xe9\x4a\x88\x65\x41\x7d\x52\x24\x0a\x16\xf3\x6f\x99\x6e\x81\xa7\x74\xf2\x17\xf7\x6c\x96\x6a\xe4\xb6\x90\x7a\xfe\xef\xdc\x4e\xc1\xb1\x5f\x73\x58\xb6\x51\x41\x6c\x4d\xf1\x81\xca\x46\xb4\xc6\x15\xfe\xff\x58\x5a\xfc\x82\xe4\x5d\xf2\x4b\x49\xdb\xe2\xb4\xc3\x43\x09\x43\x07\xf9\x32\x51\x61\x8c\x8c\x45\xe6\x7d\xa3\xfc\xdc\x4b\x62\x4f\xe1\xf4\x3f\x8f\x42\xf5\x5e\x3d\x41\x3b\x85\x8f\x3b\x14\xda\x48\xa1\xb9\x1d\xe7\xaa\x8b\x8f\x8e\x05\xc9\x0f\xf2\x73\xd5\x9a\x30\xfd\x9c\x47\x54\x67\x41\x59\x9e\x3c\x93\x43\xea\xf4\xc8\xb2\xa5\x89\xf8\x39\xb8\xc6\x88\x78\x6f\x1f\xba\x2d\xab\x85\x2c\x08\x43\xd3\xbe\xb2\x92\x17\x97\xa7\xe3\x32\x7e\x85\x08\xa9\x2f\x0a\x42\xa7\xa4\x3f\xf2\x32\xc2\x9e\xd6\x30\x26\x5e\x54\x1e\x2d\xd0\xd3\x97\xbe\x20\x93\xbf\xcb\x81\xd2\x44\x27\x32\xc0\xb5\x5b\xb5\x24\x2b\xde\x76\x57\xc9\x10\x2e\x5f\xb9\xe6\x53\x33\x8c\xc2\x14\x3f\x2f\xf1\xb9\x4a\xed\xa7\x7f\x31\xc7\xa0\x2c\xfb\x2a\x01\x88\x91\x7d\x18\x86\x47\x34\x65\x88\xd9\x61\x97\x67\xda\xaa\x6a\x7d\x12\xc4\x0a\x07\x47\xf2\x3f\xf1\x00\x35\x91\x71\x02\x86\xc7\xe2\x89\xd1\x39\xfc\x3f\x2e\x4b\x54\x73\xbd\x2c\x59\x99\x9a\xff\xec\x6c\xd2\xa6\x2b\x31\xaf\x29\x92\x4e\x0d\x25\x39\x49\x61\x5b\xe9\x66\xf0\x0c\x66\x85\x4d\x32\x8b\x26\x56\xdc\x18\xde\x82\x19\xa0\xce\x4e\x82\x51\x03\x46\x41\x80\xdf\x09\x3f\x39\x6c\xa3\x35\xad\x1e\x02\x3e\x73\xe4\xa7\xd9\xd7\x5c\xce\x18\xb4\x86\x6c\x3b\xb2\xf5\x29\xa6\xb6\x57\x25\x32\x28\x1e\x4a\x6e\xb1\x64\x17\xfb\xe3\x77\x29\xde\xa7\x1e\x76\x92\xa5\xe5\x69\xeb\x68\xae\xf9\x9f\x33\xba\xb0\xda\x82\x86\xe5\x72\xe1\x89\xa1\x9f\x74\xc2\x0e\x9b\x9b\x84\xf5\x70\x46\x6c\x53\x88\x86\xba\xd9\xc9\x75\x63\xf5\x74\x75\xb2\xf2\xc6\x21\xec\x25\x45\x01\x47\x6d\x7c\xb0\xed\x01\x9d\xde\xcd\x8f\x06\x1b\x7c\xfd\xf8\x8a\xbb\x1f\x5f\xd6\x72\x9a\xb7\xc2\x40\xdc\x28\xd3\x93\x47\x95\xbe\xf3\xc3\x50\xac\xa6\x60\x1d\xf1\xb8\xd4\x74\xf5\x6e\x34\x3f\xf8\x6f\x1f\x3f\x54\x79\x6c\x1f\x81\x94\x19\xc2\xbe\x61\x6b\x86\x49\x37\xdb\xfc\x2c\xff\xc4\x46\xc2\xe1\x79\x87\x41\x85\xc3\xff\x3b\x67\xf2\xe8\xbc\x0b\xfb\xd5\xbb\x92\xcd\x1e\x17\xab\x63\x02\x15\x30\x52\x15\x95\x5f\xa1\x75\xae\x0a\xd2\x24\x34\x7d\x3b\x97\xb2\xce\xa6\xba\x1f\x20\x30\x51\xa4\x45\x44\x30\x37\x88\xe4\x44\xa6\x4b\x9c\xad\xf9\xbe\x94\xcc\x0c\xf4\xe3\x4f\xb3\x4f\x6f\x10\x4e\x3e\xf2\xad\x2a\xf4\x6b\xc5\x80\xdb\x78\xce\x59\x47\x05\x8f\xe9\x07\xb4\x75\xef\x3b\xa0\x70\x2f\x52\xbd\x0c\x26\x32\xc8\x08\x86\x84\xcd\x04\xc5\xe9\x4e\xb2\xad\x14\x11\x6e\x34\x72\x09\x37\xed\x3f\x99\x67\x2c\xe3\x7c\x01\xe7\x26\x77\x73\xf8\xb2\x12\xdc\x90\x93\x43\xd8\x08\x82\xdc\x91\x90\x4a\x2e\x60\xdf\x95\x24\xd6\xb5\xc4\x5e\x9a\x3a\x18\xde\xa2\x06\x71\xc9\x99\x88\x94\x14\x14\x3e\xba\xc5\xbd\xf2\xa2\xe3\x0d\xed\x3f\x9d\xc1\xd3\x33\xf5\x91\x31\x52\xf4\xc2\x72\xcb\xd6\x79\xf0\xd2\x2d\x37\x61\x94\xc1\x61\xc2\xfa\x24\xb0\xac\xb4\x92\x7e\x87\x7a\x4b\x23\xe7\xfa\x2c\xcd\x21\xce\x67\xa7\x59\x73\xf7\xe5\xee\xc4\x49\xd8\xff\xb7\x2f\x0e\xb9\xb2\x36\xdf\x94\xef\x1e\x62\x51\x80\xfa\xa0\x4c\xd4\x77\xa4\xaa\x4b\xa8\xf0\x0d\x16\xe2\xe4\x18\x5c\x92\xba\x16\xe3\xa6\x26\xba\xae\xec\x21\x1c\x86\xc6\x99\xd7\x3e\xa7\xe6\x8f\x21\x68\x4d\xab\x72\xff\xf4\x81\xbc\xb0\x5f\xed\xae\x4c\x71\x2d\xc3\x28\x84\x7f\x59\x78\xee\x75\x33\xd9\xed\x4d\x13\x9c\xe2\x2e\xf1\xba\x4a\x72\xfb\xbc\x07\x43\x38\x91\x52\xb1\x47\x90\xb4\xe1\xbb\x37\x1c\x1f\x42\x2f\x71\x81\xc1\x21\x39\xa0\x57\xe0\x1b\x30\x81\xf2\xb3\x64\x5c\xe0\xed\xb4\xeb\x39\x71\x4f\xc7\xfd\x81\xab\x8c\xfb\xea\xc5\x18\x0e\x89\x45\xda\x10\x6d\xff\xbe\x40\xc1\xa1\xc4\xe2\x73\xb9\x6f\xbf\x5c\xf2\x46\xc9\xaf\x35\xaf\xeb\x2c\x69\xdc\xd7\x32\xad\x90\xbb\x2b\xdb\x9e\x4b\x55\xb3\x1b\x28\x10\x96\x9f\x7b\xc7\x9c\xd3\x20\x4f\xb4\xc5\x74\x32\x1b\xf1\xf9\xc0\x42\xca\x69\x9d\x66\x7c\xeb\x09\x4b\x25\xd1\x06\xcb\xa7\x90\x9a\x79\x08\x36\x13\xcc\x47\xe2\x5c\xb7\x86\xbf\x3d\xfa\x52\xdb\x4f\xda\x76\xef\xb2\x4e\xda\x46\x15\x06\xae\x13\x4a\xaf\xd0\xbc\xfe\xc3\xcb\xc8\x14\xa9\x1c\x75\xbc\xf7\x3a\xc9\xb7\x1e\x7a\xce\x5b\xce\x6d\x59\xc9\x1d\x20\xb6\x86\x32\x78\x8e\x6c\x92\x84\xfb\xa5\x27\x5c\x4f\xf6\xcf\x17\xcd\x48\xb4\xaa\x61\xe9\x33\x8d\x97\x13\x59\x55\xf9\x2a\x46\xf8\x4d\x9c\x19\xa7\xc4\x56\x7e\x0a\xa7\xfb\xc9\x79\xf5\x8c\xd5\xf1\xde\x93\x30\x05\x0d\xd3\xf6\x40\x95\xcb\x71\x90\x3b\xb0\x94\xf2\x72\x25\x3c\x37\xe5\x38\x65\xb2\x6b\x35\x2e\x3b\x48\x1d\x79\x36\x22\x33\xf7\x88\x92\x6e\x04\x57\x66\x8f\x41\x09\x25\xa7\xa4\x73\x36\x2a\x2b\xee\xe7\xc7\xd6\xf3\xfb\x47\xde\x7f\x19\x8a\x76\xf2\x07\xcd\x1e\x37\x85\xc1\xa4\xf9\x34\x3f\xca\x6b\x51\xf7\xf7\x43\x15\x7e\x1a\x03\x37\x72\x5e\x68\xae\x86\x08\x66\xe4\xa7\x21\xe5\x96\x0e\x56\x66\x23\x04\xd2\xf2\x13\x93\x59\x1e\x37\xc5\x74\x22\xa8\xb5\x9e\x95\x1e\x1b\x73\x44\x4c\x50\x77\xbd\x20\xe5\x98\x8e\x79\x05\xe6\x63\x3c\x06\xf7\x98\xc8\xfc\xe5\xcb\xca\xe4\x85\xaa\x59\xe6\x11\x7b\x7b\x53\x30\x4a\xbf\xc6\x64\xdb\xf2\x3d\x9c\x6e\xde\x95\xf5\xfa\x08\x3f\xb1\xab\xd1\xb0\x97\x93\x44\xf7\xde\x94\x37\x19\xc0\x70\x54\x80\xb3\xac\xf6\x25\xef\x44\x15\x5e\x4f\x71\x83\x19\xe0\x99\xe3\x43\x9b\xe9\x77\x55\xbf\x48\xf4\x64\x7d\x1d\xe0\x28\xde\x7e\x94\x19\x48\xf5\xca\x6d\x84\x40\xfc\xdf\xe7\x14\x0a\xc6\xd0\x93\xca\xf1\xe5\x08\x42\x46\xb0\xda\x53\xd8\x5f\xcf\x0f\x76\x59\x6a\x6b\x2d\x34\xb8\x10\x50\x29\xdf\x84\xee\x12\xea\x08\x46\x2b\x1a\x11\xeb\xa2\x35\xc0\xf1\x7e\x3f\x82\xdd\xf9\xf7\xb8\x60\x07\x25\x6b\x57\xab\x31\xf1\x8f\x4e\x8f\x59\xf6\x50\xe4\x84\xc0\x11\xea\xd7\x47\xc6\x0b\x5c\x36\x94\x4b\xe4\x84\x90\x28\xf5\x2c\x3b\x12\xcd\x32\xcd\x63\x40\xfd\xba\xca\x42\x1b\x7c\x03\xd9\x04\xe0\xce\xe4\x6b\x60\xd2\x42\x64\xd3\x0b\x12\xf1\xd3\x73\x8d\x66\xc0\xfc\xcb\x87\x23\x3d\x03\x7c\xe8\x78\x43\x13\xff\x3b\xfb\x4d\x03\xed\x8d\x69\x2f\xc6\xb7\x53\xba\x55\x11\xb4\x6e\xb3\xc0\xa5\x4c\x74\x3e\x79\xac\x29\xb7\x06\x2a\xfe\x50\xbf\xce\x58\x6c\x81\x46\xc8\xba\x03\xb7\xa6\x19\xf5\x1f\x6e\xf6\x6c\xbd\x29\xb6\x52\xf2\xd3\x17\x88\x61\x93\xd8\xc2\x3b\xda\xd2\xe6\x8d\xbf\x51\x0f\x8c\x8d\xd7\x78\xa1\xd7\x78\xd2\x2f\x85\xd1\x28\xa8\x15\x7c\xb9\x85\x61\x0e\xc0\x5d\xba\xdf\x31\xca\x66\x71\xfa\xe6\x8c\xf1\xfc\xd4\x30\xf6\x8a\xf5\x4e\x1d\xa3\xbc\xb6\x55\xf4\xe9\x2b\x12\x46\xd8\x1e\xe1\xba\x69\x3b\x7d\x3c\x3e\x44\x58\x6d\x4e\xd3\xb1\xf1\x65\xaf\x4c\x0e\xd6\xa9\xce\xbe\xec\xb1\xd3\x13\x55\x0f\x94\x9b\x21\xd1\x69\xe4\xc7\x2f\x9b\x26\xc6\x2c\xb3\x6f\x9c\xe2\x70\xff\xa3\xdb\x85\x46\xc0\x8b\xd3\x85\x2c\x65\x81\x91\xc1\x0d\x00\x3e\x34\x2d\xf0\xa8\x4b\x11\xaa\x65\xf6\xd6\x16\xda\xf8\xcc\x84\x39\xd9\x34\xfd\xf8\x3d\x0c\x97\xa5\xac\xa7\x99\x72\x5b\x38\x74\xea\xcd\x47\xa9\x5c\xe9\x2e\x76\x9c\xf9\x77\x2e\xd9\xa2\x9e\x91\x00\x10\x02\xef\x8c\x85\xf0\x70\xbe\x32\x02\xd8\x9f\x92\xdf\x4a\x18\x2d\xd7\x6a\xda\x46\x54\x16\x88\x29\x22\x50\x81\x81\xf9\x6c\xdc\x46\x0b\x49\xa7\xa0\x28\xf6\x07\xce\x48\xb4\x48\xd2\x4f\x7b\xc5\x42\xad\xde\x9c\x9c\x5a\x41\xa3\xfc\xe7\xbb\xdb\xcf\x1d\xbb\x1e\xae\x8d\x18\x40\x7e\x25\x6c\x72\x48\xd9\xaa\x81\xc4\x5b\xeb\x1a\x5c\x02\xd8\xf4\x23\x2e\x34\xde\xcf\x74\x02\x49\x86\xed\x74\xda\x16\xc7\xa3\xca\xaf\xd2\x2b\x19\x97\x8b\x97\xfb\x67\x9c\x62\x67\x65\x0c\x17\x7c\x37\xcd\xf4\xef\x1e\x61\xcb\x37\x2f\x1d\xc4\xab\xe1\xe7\x1b\x84\x63\xf7\x7e\xd4\x81\x92\xf9\xfe\xab\xac\x57\x2b\x30\xef\x9f\x78\x8a\x69\x58\xe0\xaa\x5d\x08\x2f\x98\xc9\xd9\x8b\xf2\x73\xc4\x2b\x38\x5d\x63\x09\x1f\x0e\x57\xde\xa8\x51\x17\x70\xec\xde\xc0\x00\x59\x92\x9b\xe2\x9f\x57\x06\xc5\xf1\x58\x54\xc1\x21\xd5\xa7\xff\x8b\x4d\x01\x9e\xa1\x3b\x10\x59\x24\xe7\xf5\xdd\xd6\xf1\xfc\xee\xdc\x31\x05\xac\xd8\x66\xf3\x3a\x2f\xe1\x29\x8f\x66\xe4\xba\xdd\xf4\x00\x53\x6f\x1b\x35\xd5\x27\x34\x7f\xca\x96\x27\xfc\xc1\x0a\xf1\x9e\x1a\x91\x9d\xa3\xbc\x54\xe9\x3d\x9b\x32\xc3\x30\xd8\xb4\xef\x3b\xd9\x44\x38\x9c\xd9\xba\x06\x3b\xbf\x8e\x51\x6c\x34\x9f\x37\x10\xb5\x34\x75\xf5\x8f\xca\xed\xf2\x65\x23\x7f\xe2\x5c\xbe\x6f\x90\x9d\xa7\x90\x0a\x46\x8a\x80\x48\xd6\x7d\xd3\xa8\x8b\x18\xd7\x75\xf1\x10\x06\x39\x96\xb6\x79\xf5\xa2\x73\xaf\x6b\xe3\xe4\x30\x53\x5a\xdc\xf0\xfe\x3b\x13\xbc\xbe\xe1\x72\x8f\xe1\x0d\x7a\x40\x1f\x75\xf5\x5e\x7d\x48\x23\xb6\x00\x71\x17\x22\x1d\xed\x0b\x83\x09\xb3\x91\xfc\x28\x46\xed\xbd\x5e\x73\x77\xc0\x27\x8e\x5c\xb2\x76\x7d\x8c\x6a\x58\xbc\xbf\x9c\xf4\x20\x37\xf6\xd2\xaf\x31\xe8\xfd\x80\xe5\x4f\x85\x6e\x16\x79\x6e\x8b\x46\x09\xb5\x35\x8d\xa0\x01\x67\x9a\x39\xba\xc9\x36\x72\x05\xd0\x38\xce\xbf\x38\x5a\xd6\x71\x33\xd4\x47\x11\x9a\x8c\x8e\x50\xe6\x72\x3b\xcc\x95\x13\xd5\x7e\x70\xfd\x38\x8e\xc7\xed\x22\xe4\x0b\xf8\xde\x9c\x88\x4b\x01\x76\xa7\x07\xbb\xb7\xbf\x79\xf4\x05\x9f\x92\xdf\x2c\xf8\x13\xe8\x5e\x35\xb1\x76\x01\xe3\x1b\x2d\x54\x2b\xaf\xe9\x6f\x74\x8a\x2f\xec\xa0\xfa\x01\x82\x68\xde\x3c\x83\xe9\x5c\x97\xdb\xc4\x0e\x17\x7f\x5f\x53\xcf\x32\x3f\xb1\x1c\xfb\x38\xf9\xc8\x55\xad\x09\xfb\xb1\x78\xf8\x79\x2e\x26\xff\xe5\x97\x84\xcb\x7b\x45\xd0\x8e\x32\x74\x97\xac\xf1\x76\xb6\xb1\x79\xcf\x5d\x60\xf9\x7f\x72\x43\x2d\xdf\x03\x52\xa4\x77\x3f\xd6\x23\x5f\x0a\xff\x23\xcb\xf8\x2f\xe7\xc4\xc5\x51\x3f\x8e\xe0\xae\x9a\x17\x70\x69\x97\x3d\xd1\xf1\xcd\xe3\x7b\xec\xe6\xbf\x87\xe6\xfc\xc1\x3f\x79\xbc\x27\xe0\x68\xa5\xa3\xec\x8d\x92\x85\xf5\x48\x06\x8f\x3d\x0d\x34\x5f\x5d\x75\x20\xb3\xf9\x00\x11\x84\xba\x9f\x72\x3b\x8a\x98\x9d\xae\xed\x68\x40\xbe\x91\xee\xfa\xfe\xa3\x37\xd8\x23\xdb\x2c\xb6\xac\xfc\x9b\x23\x9f\x04\x89\xfd\xe6\x04\x97\x66\xf0\x62\x05\x9f\xd2\x17\x4f\xec\x91\x21\xfe\xc7\x27\xbb\xae\x94\x89\xd5\xa6\xf7\x62\x3f\xd7\x04\x8b\x43\x6f\x8f\x15\x8c\x5d\x96\xf3\xb9\x91\xbf\xff\xe2\x0b\x53\x40\x3e\x9c\xde\x30\x2d\x65\xb4\xcf\xab\xc2\x1a\x5f\x9e\x07\x7d\x1a\x12\x68\xb6\xa4\xad\xb2\x76\x43\xb6\xff\x7a\x3a\xab\xbc\x3d\x63\x7a\x72\xd3\x77\x95\xaa\x03\x52\x89\x57\xc0\x54\x57\x6d\xb0\xc5\xff\xc4\x55\x2d\xf3\x30\xe7\x2b\x98\xef\x90\x6b\x65\x35\x12\x26\x26\x22\x11\x0f\xaa\x37\xa7\x94\x3f\x7e\x84\x74\x29\x56\xac\xf0\x65\xbe\xd6\x62\x61\x4e\x4f\xc5\x5f\x01\xe3\x96\x99\xbd\x35\x98\xc3\xbf\xff\x3f\x96\x61\xeb\x46\xf5\x84\x23\x21\xd6\x0e\x92\x14\x51\x84\x2f\xe0\xd4\x21\x0d\xfb\x9f\xf5\xce\x48\x8f\x74\x33\x8b\x62\xdf\xc8\xfb\xe0\x35\xca\x15\x8c\x29\x0c\x77\x03\xe4\x9c\xfb\x81\x0c\xec\x1f\x97\xf9\x9b\x90\x95\xff\xa9\xb4\xf2\x6d\x80\x64\xc5\x13\x2c\xc2\xf5\x8f\xa3\x16\xba\xf4\x2b\x9e\xfb\x9f\xb5\x7e\x14\x80\x67\xac\x87\xc9\xdb\x6c\x5d\xf9\xed\x2c\xad\x93\x3f\x52\x71\x88\x44\x25\x20\xfe\x5f\x6c\xf2\xeb\x19\x2d\x50\xc0\xc1\xd0\x63\xe5\xcf\xbc\x3b\x76\xc4\x2f\x8d\xfc\x83\xdf\xf1\x25\xe5\xb0\xe4\x64\xbf\xe2\xbc\x35\x9a\x7e\x47\xda\x5d\xe9\x83\xd9\x7c\xed\x76\x87\x8a\xff\x17\x07\x9b\xcb\x61\xf6\x9d\xbf\x36\xb2\x75\x55\xee\xfd\x86\x42\x78\x5b\xed\x1b\x30\x55\x75\x41\x68\x23\x3b\xdd\x5f\x1e\x45\xbe\x77\x17\xa5\xf7\x4b\x6b\x66\x28\xb2\x5d\xb2\x5d\xaa\x27\xe3\x29\x74\x43\xc0\x09\xe5\xb6\x8b\xff\x89\x3b\xd9\x31\x77\x50\xf9\xd4\x2a\x11\xa2\x2c\x87\x9e\x2c\x19\xae\xbe\x5d\xc0\x15\x64\x5b\xa8\x9c\xed\x40\xff\xd6\x32\xb1\xe2\x8b\xb1\xef\x2e\x52\x79\x9a\x8a\x03\x62\x2d\x68\xc0\xe4\x00\x5d\xc8\xec\x47\xf4\xfe\x07\x07\x50\x9e\x84\x41\x2a\x5f\x35\xa6\x6d\xce\x7b\xad\x9e\xbf\xf6\xfd\x40\xfe\x2e\xf3\x0c\xfb\x0f\x6f\xc2\x67\x92\xd2\xde\x33\x45\x4e\xc4\x4d\x97\x06\xc3\x46\xaa\x3e\x8e\x61\x53\xa7\x98\x47\xae\x5d\x90\x3f\x0f\xd3\x6d\xc3\x5f\x9c\xe5\xb9\x79\x97\x9d\x28\x5a\x2c\xd8\x88\xfe\x1c\xe1\x4c\xcb\x9f\x12\x66\x98\x5d\x76\x75\x3a\x09\x9c\xff\xf4\xbe\xa3\x11\x85\x3f\x2a\x16\xa8\xcc\xfb\x83\x60\xa8\xe1\x7d\xac\xe6\xde\x27\x4b\x30\x43\x51\x0c\xc3\xd5\x31\x04\xf7\x2f\x37\x95\xd1\xa5\x0f\x15\xf9\x34\xc8\x6c\xf2\x4c\xcb\xbf\x7f\x5b\xdb\x2c\x35\xf1\xd1\xe6\x81\x91\x23\xee\x8a\x16\xea\x10\x05\x8f\x19\x7f\xb0\x63\x83\x28\xfc\xae\x72\x35\x2b\xc7\x2a\xe7\xfa\xa0\xed\x60\x13\x85\x53\x98\x68\x19\x2a\xde\xf6\x0f\xf3\xe9\x5c\x75\x5e\x8c\x5d\x67\x33\x7d\x19\xa7\x6f\xc9\xa5\xfd\x0e\xf7\xae\xbb\x7c\xd4\x54\x58\x48\xae\xf1\xbe\x3b\x0d\x4e\x18\x02\x71\x1d\x15\x77\x2a\x0b\xc8\xeb\xff\x60\x71\x4e\xb2\x87\x11\x47\x24\xf5\xb0\x1e\xf0\xc0\x60\xb1\xf9\xb6\x6a\x6a\xab\x1f\xf6\x73\x53\x95\xe2\x7e\x0d\x22\x5b\x3e\x7d\x51\x1e\xd8\x52\xf0\x43\x6a\x74\xfc\x1a\xa6\x8e\xf7\x47\x07\x46\x8d\xce\xf0\x7d\xe7\x96\xa7\x7c\x4a\xba\x71\xb6\x81\x29\x75\x08\x72\x21\xd2\xf5\x48\x07\xaf\x62\xaa\xc6\x37\x65\x9d\xdd\xad\x7b\x45\x1d\xf6\x05\xb0\x9b\x09\x7f\x78\xff\x38\xb9\xc8\x3f\xa5\xa8\xc9\xc6\x39\x80\x36\xe7\xc3\x34\x64\xb5\x4f\x5b\x70\x9d\xeb\xb1\xe9\x50\x09\x36\x7f\xa1\x38\x18\x79\xde\xdb\x1c\x75\x8a\x83\x61\xb6\x39\xae\xa3\xb3\x53\x8f\xdd\x4c\x30\x3c\xff\xc9\xc1\x25\xa8\x18\xc7\x7f\xb5\xe9\x00\xa7\x9a\xae\xbe\x6b\xb6\xee\x32\xdb\x14\xa6\x8e\xcb\xf4\xf1\xc1\x4e\x2d\x5d\x44\x8c\xfc\x7a\xd8\xa4\x94\x79\x9a\x6d\x2e\x66\x1f\xb4\xad\x8d\x9d\xab\xa2\xa2\x26\xee\xff\xf3\x7b\x60\x5e\x7e\x69\x23\xa0\xcd\xfb\x35\xd1\xa6\x03\xbd\x55\xf8\xfc\xe6\xac\xf3\x47\x24\xe1\x1a\xc8\x7a\xcd\x3c\x0c\x87\xd4\x3e\x8c\xb6\x52\xd1\xa9\x78\x76\x51\x2d\xde\x76\x37\x0e\xaa\x6d\xb8\x0e\x3b\xfe\xe8\x5a\xee\x11\x5a\x0a\x61\xf0\x0a\xe5\xa6\xa2\xb6\xc3\x01\x0e\xc5\x04\x15\xef\x3d\x97\x3f\xee\x90\x28\x8b\x90\xc8\x2d\xb1\xac\xeb\xbc\x1f\x38\x56\x29\x47\x9d\x12\xb1\x3b\x8f\x7b\x46\xb6\xdf\x80\xfb\xa7\xf3\xd9\x46\x71\x5d\x71\x2f\xbf\x07\xac\x89\xfb\x1b\xd3\xf2\xce\xfd\x67\x2f\x9f\x62\x5c\x12\xae\x83\x10\xcd\xd6\x52\x5e\xa7\x19\xcc\xd2\xe6\xc1\x0d\x42\xd5\xcf\x3f\x4c\x5b\xe1\xaf\xbe\xb0\x09\xf8\x62\x5f\x53\xfa\xdf\xde\x25\x4d\xd9\x78\x91\x9e\x16\xfe\x5a\x8d\x6b\x6d\x83\xc0\xbe\x30\x8e\x18\x05\x58\x25\xb9\x2e\xa7\xdd\x2b\x63\xe4\x32\xbb\x58\x4b\xa8\xc0\x70\x18\x0f\x96\x47\x75\x30\x3d\x7d\x29\x68\xf4\x6e\xf9\x94\x6f\x47\x71\xf8\x1f\x8e\x50\x8e\xfd\x8a\x98\xed\x7a\xad\x2d\xba\x11\x7e\xa4\x56\xa4\x6a\x23\x71\x76\x6b\xfe\xd6\x89\xcd\x4b\xc9\x33\x06\x41\x06\x97\x9f\xa4\xe2\x1c\xaf\xf0\x9b\x42\xee\x81\x74\x5a\xfa\x0e\x86\xc7\x28\xcd\x00\x59\x83\xf2\x54\x6f\xd3\xaa\x6b\xd8\xf6\x7f\xb1\xee\x5a\xbf\x28\x2d\x59\x9f\x86\xe1\x33\x1c\xb4\xd3\x3c\x4f\x03\xed\x13\x60\xa6\x7e\x25\xf3\x35\x4b\x0d\x6e\xfa\xc1\x9d\xe9\x4b\x9e\x8d\xe7\x72\x4f\x2e\xe0\x10\xc0\x73\x60\x5e\x57\xe9\xac\xc7\x14\x3f\x38\x2d\xa3\xdd\x6a\xce\x50\x7a\xde\xb9\xa5\x00\x9b\x47\x11\x76\x5d\xe0\x74\x09\x62\x88\xf5\x9c\xf6\xb6\x8a\x25\x72\x3b\xc2\x11\x77\x57\x1f\xaa\x6e\x0b\xd5\xc7\x59\x66\xc8\x53\xa2\xe6\x93\xdc\x4e\x2b\xa6\x0a\x25\x38\x74\x27\x29\x06\xb7\xd9\x57\xfe\x61\xd3\xdf\x3e\x81\x8e\x06\x48\x85\x7e\x2d\x34\xf0\x12\x51\x46\xff\x9d\x33\x56\x53\x5d\xcd\xd5\x35\x9b\x52\xcc\x06\x6d\x74\x82\xe4\x63\x90\x6a\xf0\xb2\xc0\x35\x47\xa6\x3c\x9c\xaa\x77\xf6\xf7\x9e\xf5\x3c\x41\x4c\xb9\x87\x5a\xba\x0d\xcc\x73\x27\x3c\x9b\xd7\x54\x22\x4d\x75\xbf\x73\xfd\x30\xd0\x37\xcf\x9d\x59\xb0\xca\x59\x8a\x6e\x66\x48\x53\xd5\x12\x54\xc9\x73\x5b\x8b\x31\xfa\xa3\x53\x5b\xe4\xd5\x3f\x23\xaa\x3a\x94\x45\x40\x20\x0a\x04\xbc\xa4\xc9\xfe\x89\x94\xaa\x6a\x41\xdf\x2f\x1c\x12\xbe\xb2\xd0\xf6\x0b\x54\x4d\x6f\xd7\x15\x8e\x03\xbd\x93\xe1\x25\x36\xad\x34\x7a\x16\x7a\x6b\x08\xa3\x07\x9f\x77\x65\x73\x24\x6d\xb1\xe7\xdd\xf2\x2f\x89\x09\xd7\xea\xb1\xef\xf3\xf7\xde\x38\xc3\x32\x9f\x36\x50\xda\x9e\xea\x9d\x96\x78\x3a\x28\x6a\x92\x76\x9e\xf0\x17\x80\x90\x8c\x70\xd5\x04\xb6\x7c\xeb\x66\x9c\x05\x2b\xfb\x6f\xb6\xb7\x22\x33\xbb\xac\x24\x40\x92\xe4\xec\xdd\x2b\x79\xc5\xf2\xc2\x56\x0b\x9b\xca\x3d\x86\x71\xc8\x16\xac\xe7\x58\xed\xf3\x27\x91\xc6\x70\x8a\xec\x2a\x35\x52\xa4\xbd\x69\xe4\x2f\x8e\x6a\xb8\xad\x6b\x89\xc9\x5e\x16\x4c\x1e\x02\xc1\xc9\x4b\xf9\xa9\xc9\x9a\x27\x38\x45\xea\xb2\x63\xf8\x18\x12\xc7\xa9\x37\x3e\x76\x9a\x0e\x57\x9f\x4b\xd6\xb4\x69\xdd\xe5\xab\x5c\x1c\x14\x2b\xfe\x0c\xca\x24\x6d\xf9\x6a\xd1\xa2\xb8\xd7\x8a\xc7\x5a\xa4\x1d\x18\x71\xbd\xd1\xda\x6a\xca\xdc\x74\x22\x33\x87\x16\xbd\x76\x52\xe9\xc7\x23\x2d\x47\xe8\x41\xe2\x26\x3d\xee\xd8\x38\x12\xfe\x07\xd5\xdd\xcb\xf2\x1e\xd9\xa2\xb2\xea\x5d\xdc\x7b\x9a\x69\x21\x89\xca\xdd\x8a\x02\xcd\xfe\x5c\xa3\x63\x9e\xfe\x5f\x5d\xc1\x28\x72\x22\xe9\x4c\x6c\xaa\x7b\x8e\x89\x33\x2d\x1b\xcc\xeb\x9f\xd7\xa7\xc4\x41\x0b\x47\x0b\xe4\xce\xbd\xc2\xe1\x78\x0f\x47\x53\x11\xdc\x9f\x7b\x13\x46\xbb\xd6\x94\x6d\x25\xad\x45\xcb\x53\x8e\xb7\x7d\x88\x92\x29\xb2\xb1\x8c\x23\x31\x07\xa5\x92\x73\x7d\x38\x15\x99\xb2\x84\x26\xa7\x4e\xe3\xef\x72\x9c\x13\x6f\x73\x33\xd0\xb3\x39\x1a\xb2\xed\xae\x26\x7f\xd6\xd3\xdc\x9a\x8f\xe4\x20\x23\xf4\x24\x61\x7e\x3b\x65\xd5\x00\x5f\x3a\x23\x24\xf4\x8c\xab\x70\x3d\xc4\x8d\x8b\x55\x53\xf8\x15\x21\x25\xbb\xeb\x62\x56\xfb\xee\xa4\x35\x06\x8f\xb9\xd9\x9f\x5c\xd0\xe2\x77\x7d\xda\x90\xdb\xdc\xa5\x51\x7a\xfe\x3b\xdc\x7f\x27\xeb\x54\xd8\xda\x08\xd3\xd1\x3a\x81\x94\x18\x48\xda\x63\xdd\x11\x85\x30\x7d\x4d\x30\xd8\x25\xfc\x39\x2f\x8b\xbd\x74\x0e\x81\xcc\x7d\xeb\x3b\x46\xf9\x25\x1b\xc9\x2c\x7e\x86\x50\x4f\x91\x32\x48\x79\x3b\xeb\x55\xa3\x31\x3f\x09\xd4\xc1\xdd\xe3\xe5\xea\x1b\x66\x5b\x58\xe8\x7b\x49\x6a\xed\xd9\xb0\x6d\xa9\xc8\xa0\xfe\xb7\xae\x92\x11\xf8\x33\x9b\x3f\x6e\xc6\x57\x9a\x43\x05\x9d\x67\x2a\x2b\x81\x1f\xe5\x6c\xda\xde\xa9\x35\x88\x7c\x81\xd1\xd9\xae\x42\x8c\x46\xf8\x73\x3e\x16\xad\x74\xf6\x6e\xae\x27\xa6\xd3\x69\x67\x1e\x15\x19\x1b\xe3\xd7\x2d\xc9\x02\xea\x34\xff\x89\x97\xb8\x96\x4a\x64\x12\xa0\x51\xa1\xf1\x08\x71\x18\xf8\x14\x78\xa2\xff\x8b\x21\x30\x1c\x73\x6a\xbc\xe0\x7d\x72\x87\x88\xfd\x0e\x1f\x21\xb5\xd9\x58\x11\x0f\x4f\x19\xed\x6d\x8f\x00\x23\xe4\x40\x0f\x22\x47\x66\x48\x5a\x42\x4d\xad\x08\xeb\x89\x9c\xe7\x08\xb7\xa1\x5b\x81\x09\x8f\x59\xb8\x94\x53\xc2\x2e\xc9\x19\x2f\x89\x54\x8e\x4a\x9a\x19\x93\xc0\xb3\x05\x9e\xd9\x42\xc8\xb1\x35\xe7\xaf\x6f\xc2\x28\x1c\xd6\x99\x4a\x2c\x78\xbe\x35\x29\x7c\xf8\x1d\x56\xd3\x5d\x43\x8f\x3e\x48\x08\x88\x1f\xba\xf6\x18\x19\x6b\x28\x41\x5a\x4e\xa0\x72\x6e\xb5\xff\x64\x9b\xe9\x4a\x5f\x18\x59\x6f\xc0\x7e\x01\x7c\xc9\x22\x5c\x71\x50\x89\xb9\x10\xf3\x93\xa6\xba\x94\x69\xf3\xa6\xf4\xbf\xec\x3d\x76\x38\x99\xdd\x31\xcc\xc2\xf0\x78\xf1\xf7\x28\x29\x8d\x4e\x74\x6e\xff\x35\xdc\xb0\x23\xf3\x34\x37\x05\x14\xa1\x77\x3a\x5e\x69\x74\x66\xca\xd7\xb6\x80\xb5\xbc\x46\x0e\x92\x4f\x3b\x5e\x81\xf8\xb9\x6d\x0f\x98\x81\x31\xcb\xed\x14\x36\xb7\x0c\x3e\xe4\x74\xac\x3e\x17\xf5\xb9\xeb\x10\xa0\x3b\xc9\xc8\x92\x5d\xb8\x3b\xf0\x6d\x49\x95\x65\xa7\x99\x96\x9c\xce\x36\x58\x85\x85\x9f\xd3\x7f\x09\x94\x79\x18\xd3\x12\x2e\xf6\xef\x46\xca\xc2\x2d\xd9\xee\x0c\xa5\x53\xfa\x72\xa9\xdf\x6c\xd9\x1e\x72\x2d\x64\x8d\x28\x64\x27\xb0\x99\x7f\x29\xc6\xf6\xdc\x92\xce\x30\xe7\x06\xe3\x6a\xe1\x3e\x5a\xf6\x11\x26\x5a\x8c\x5e\xde\xb4\x13\x84\x75\x0c\xb6\x35\xb2\x78\x28\xf9\xe4\x69\x02\xef\x60\x39\x40\x91\x3b\x94\xf3\x58\xcb\x34\xa0\xad\xac\x37\xca\x26\xb5\x84\x6f\x49\x6e\xec\x59\x20\x16\x7e\x74\x08\x07\xa1\xbe\xd5\x22\x5e\xcc\x0e\xfa\x1c\x72\x33\x9c\x6d\x42\x5e\xd2\xe3\x91\xaa\x2e\x0e\x17\x82\x13\xf3\xe1\x49\xb6\x80\xed\x79\xf9\x78\x5c\xc0\xf7\x05\x09\x79\xb3\xd5\xc1\x64\x38\xb8\x73\xd5\x68\x45\xa2\x82\x6f\x8b\x10\xa0\xb2\xaa\x07\x75\x2c\x5a\xce\xfc\x9c\x4e\x72\x48\x96\xa1\x9c\x05\x9a\x1c\xc3\x74\x79\x4d\xb5\xb5\xd4\x1d\x39\xd3\x07\xf3\x3b\x0c\x67\xac\xff\x95\xab\x8a\x1d\x4c\x4f\x51\x7b\x5d\xb7\xd3\xc1\xe4\x8f\xfd\x79\xf3\x05\x33\xeb\xce\x32\x0b\x3a\x23\xe7\xea\xbd\xaa\x9c\xfc\xca\x02\x0a\xd9\xd7\xcb\xaa\x91\xba\xae\x30\xb0\x87\xc5\xd4\xcf\xa2\xac\xc0\x26\x97\x76\x00\x4d\x58\x9f\x23\x43\xcc\x55\xad\xc1\xd4\xde\x3b\xdf\xd6\xf7\x61\x3d\xc4\x13\x78\x7c\x25\xfe\x35\x67\xf4\xff\xe6\x14\x07\x23\x6d\x9c\x56\xa1\x86\xde\x18\xd0\x0e\xc2\x8b\x68\xc7\xc7\x91\xa8\x5a\xb2\x96\x71\x10\xa2\x6b\x5a\xa9\x61\xa0\x06\x69\xa0\x1f\x32\x5d\xf5\xc7\x61\x45\x20\x44\x83\x04\x07\x2d\xd1\xf2\x2d\xa0\x8e\x06\x66\x83\x55\x34\x17\x26\x3e\x7e\xb6\x84\x9a\xeb\xeb\xd5\x82\x6b\x46\x41\xa8\xf6\x25\x70\x6b\xcf\x79\xea\x95\x44\x50\x63\x5f\xd8\xdf\xad\xbe\x8d\x63\x77\x5c\xa5\x77\xa0\x3a\x1a\x94\xc7\xfd\xf8\x3e\x4f\xca\xe6\x74\x52\xf5\x87\x8a\x1b\xfc\xb3\x1e\xa8\x19\xf8\x68\xd7\x39\x8a\xd3\x11\xb1\x74\xbd\x04\x2e\xb8\xcc\x16\x66\x1b\x1c\xad\x9c\x22\x6e\x39\xb2\x5d\xa5\x1c\xa9\xc7\x70\xd6\x78\xb5\x14\x9b\xcc\x10\xbe\x2a\x8d\x70\x94\xda\x6f\xb4\x3a\x69\x8a\x85\xc5\x76\x88\x33\x25\x92\xba\xcd\xae\x66\xd4\x65\xbe\xe2\x60\x4d\x5d\x74\x7f\x73\xab\x8c\xa3\xdf\x50\xd4\xa1\x8f\xa2\x8e\x72\x5a\x8e\xfd\x11\x50\x90\x94\x40\xfb\x03\x66\xf8\x4e\x4d\xc7\xb6\x22\x1b\x4e\xa8\x84\x1b\x24\xfd\xa1\x86\x43\x8d\xe6\x79\xfd\x1c\xa8\xf9\x46\x38\x73\x89\x77\x35\xc5\xd1\x79\x2f\xdc\xec\xb2\xac\x20\x64\xbc\x53\xe6\xdf\x36\x2a\xf3\x89\x83\xc7\x58\xe1\xbc\xc6\xe7\xf6\x9e\x23\xfe\x9d\x69\x86\x71\x31\x7e\x23\x78\xf6\xf5\x55\x3b\xb5\x54\x61\xf2\xd2\x2d\x86\xce\xd0\x70\x5a\x89\xbb\xf2\x17\xcf\x96\x50\x5d\x4c\x6b\xe9\xef\x5e\x38\x07\xa8\x20\x20\x25\xe7\xb1\x78\x8d\x39\x1d\x22\xd0\x0a\x1f\x27\x8a\x96\x05\xc6\x35\xa1\x3b\xd5\x83\xc9\x52\xa0\xf9\x79\x83\x65\x4a\xe4\x8d\x63\xf8\x2b\x62\x1c\x8f\x01\xff\xd4\x2b\x32\x0c\xa7\x5d\xa3\xca\x68\x8f\xd7\x18\xd7\xe3\x1d\x6d\x3f\x0a\xfe\x5a\xfa\x9a\x81\x97\xed\x71\x56\x80\x98\x45\x60\x39\xee\xe0\x83\xf2\xb2\xb3\x8a\xc4\x00\x99\x54\xf4\x6a\x04\x00\xcf\x19\x02\xd8\xa0\x79\xec\xe6\x37\x58\x38\x95\x80\x8b\x22\xcd\xb0\xeb\xa5\x02\x83\x4b\x9c\x65\x61\xea\xf6\xc7\xc0\x0b\x89\x9e\x34\x62\xaa\xa6\x0c\xbe\x09\x77\x65\xd1\xa4\x55\xad\xb7\x86\xc6\x84\xeb\x68\x55\xfd\x7f\x5c\xbd\xc5\xb2\xec\xca\xce\x35\xfa\x40\x6e\x98\xa9\x69\x66\x66\xf7\x5c\x66\x66\x7c\xfa\x1b\x6b\xed\xfd\x9d\x73\xfe\x1b\xb3\x37\x23\x5c\x95\x95\x29\x69\x8c\xa1\x94\x25\xad\xbb\x7d\xce\x78\xa3\xd6\x00\xcd\x6b\xa2\xc8\xa9\xf8\x7e\xe2\xa1\x3e\x8c\x42\xdb\x3f\x26\x15\x47\xa4\xc0\x4f\x65\x87\xd6\x21\x58\xc7\xa0\x34\xe9\x81\x57\x07\x63\x39\xcc\x45\x70\x0a\x85\xa3\xfe\x7b\x5e\x35\x53\xef\x6b\x58\xab\xdc\x3d\x71\x86\xf2\xb9\x99\xd0\x01\x14\xb1\x04\x6b\x2f\x66\x66\xaa\xf4\x59\x09\xc7\xd2\xeb\x2e\x9f\x94\xf8\xf7\xc7\xb9\x15\x59\xc2\x07\x7a\xd2\x37\x40\x86\x6f\x65\x52\x7f\xf8\x85\xcc\xe0\xf2\x6f\x58\x8f\xe1\xf4\xaa\x3a\x65\xdf\x49\x83\x22\x8e\xc6\x09\x29\xb7\x34\x83\xc8\x1c\x7b\xf9\x4f\xf9\x08\xc3\xf0\x22\x10\x0f\x5a\xf0\x46\xae\x20\xf3\x2a\xef\x35\x99\xed\x82\x0e\x72\x03\xdf\xb5\xe9\xd0\xd0\x7f\x13\x3f\x03\x87\x68\xea\x53\x51\x07\x6c\x1d\x89\xd7\x4c\xe6\x83\x2a\x28\x2e\x37\x17\xfc\x56\x70\x7d\x9e\xd4\x93\x14\x65\xa1\xb7\x86\xde\xec\x2c\x58\x13\x4c\xf5\xca\x21\x7c\xd2\xa0\xe5\xff\x05\x20\x86\x11\x3c\xb6\x11\x9a\xd1\x33\xe3\x96\x72\xda\xd4\x97\x7e\xce\x44\xc4\x67\x59\xa9\xbf\xa8\x4a\x27\x93\x5d\x7a\x63\x98\x7a\x14\x10\xef\xd3\x24\x43\xc3\x76\xcb\x29\x80\xcf\x82\x70\xa6\x94\x72\x70\x0d\x7f\xfa\x14\x7f\x1f\x82\x6f\x3a\x44\x7b\x22\x96\x91\x3d\xb6\xfa\x2f\x46\x2a\x0c\xf6\x87\x0f\xa4\x77\xf4\x58\x48\xa9\x3f\x9c\x71\xc5\x9f\xcf\xdb\xa7\x69\xb6\xe4\x44\x4f\x5c\x9b\xe2\xa1\x88\x93\x84\x0e\x11\x57\x88\x5f\x25\x79\x4e\x34\x91\x4a\x33\x2e\x73\xce\xd8\xab\x4f\x33\xe8\x2f\x6f\xf1\x3e\x97\xea\xf9\x54\x54\xb5\x5c\xff\x8f\x3b\xf3\xad\xa1\x5e\xa6\xa9\x44\xd5\x58\x8e\x66\x1a\x8e\xdd\x9d\xf2\x59\x15\xb1\x33\x32\xde\xaf\x5b\x50\x5d\x4f\x84\xc0\xf6\x6a\x90\xd4\xc3\x90\x04\x8d\xd5\x47\x57\xbf\x1f\x6a\xd4\x60\xcf\xd5\x7b\x39\xfe\x1e\x9c\xe9\xb5\x54\x1a\x8e\x42\x33\x0c\x88\xf8\x52\x87\xd1\xb7\xda\x62\x0e\x45\xb9\xf4\xff\xd8\x32\x63\x8d\xab\x6a\xa4\x8d\xdd\xf6\x66\xb2\x8b\x93\xb9\x8a\x18\x2d\xbb\x7c\xc5\xab\x4c\xc7\x65\x5b\xa8\x25\x19\xd5\xfe\xf8\xf4\x47\x1b\x28\x52\xcc\x60\x4c\x3c\xe8\x7c\xbb\x9f\x7d\x02\x4b\x51\xb7\x3c\x84\xae\x76\x4c\x00\xe1\x3b\x06\x6e\xd9\xde\x7e\xb7\x89\xce\xca\x02\x7d\xc1\x8d\x13\xdb\x9d\xca\x1f\x33\xfa\x1f\x5f\xe5\x5d\xdc\x6c\x0e\x4e\xd0\x09\x69\xad\xc7\xa2\xd2\x5c\xd5\x1c\x7e\xdf\x40\x2c\xa1\xb5\x42\xd1\x39\x08\x5e\x36\x38\x84\xec\x4e\xa4\x40\x77\x3d\xbd\x83\x8e\x46\x5b\x21\x09\x6d\x8f\x2c\xd1\xe6\x3b\x6a\xfd\xe0\xa9\xbf\x68\x92\xa4\xe0\x52\x20\x1c\x2a\xe4\xdb\x73\xc2\xff\xb5\x2f\x53\xa8\xf5\x54\x8f\x36\x36\x51\x71\x2f\x30\xcb\x0d\xce\xf5\xb6\xb2\x74\xbb\x3f\x84\x5f\xf2\xc1\x70\xaf\xd7\x27\xb7\x16\xa8\x4c\x97\x53\x80\x98\xbf\x81\xae\x26\x10\xb2\xa3\x9f\x2f\x95\x27\x2c\x94\xe5\xd7\xef\xd7\x60\x2d\x90\x45\x52\x39\xb4\x84\xce\x0a\xe0\xcf\x1f\x60\xc7\xf0\xff\x68\x0c\x96\x31\xb9\xac\x0b\xbc\x3f\xf1\x8f\x5c\x56\x1b\x78\x09\xd5\xfb\x90\xec\xc9\xb6\x30\x1b\x42\x61\x3e\xc2\xf6\xd3\xa5\x19\x9e\x60\xab\xb7\xf2\xf4\xdd\xd0\x26\xa4\x3b\xaa\xea\x7e\x55\x81\xcd\x27\xdd\x48\x46\xda\x1b\xfd\x72\x13\x5c\x90\xa4\xf8\x15\x2a\x0b\xa2\x59\x1d\xb3\x9f\xca\xac\x40\x35\xf9\x5f\xae\xcf\x31\xe3\x79\x23\x61\x4a\x5b\x1e\xab\x16\x4b\x46\xfb\x2b\xca\xbe\xa4\x69\x98\x83\x91\x1e\xcb\x38\x13\xc0\x03\x54\xa8\x89\xa2\x45\x43\xdb\x17\x0d\x56\x17\x82\xfc\xd1\x4d\x8d\x6a\x6e\x5c\x98\xa8\xef\x00\x71\x79\x94\x5c\xb7\xe2\xc8\x58\x0e\xd5\xe8\xff\xf3\xd9\xaa\x50\x79\x7d\x90\x75\xe1\x3a\x04\xc2\xf1\x3e\x76\x37\xcc\x5e\x89\x24\x67\x48\xa1\xc8\x94\xd9\x0f\x60\x3f\x79\xf3\xc4\x24\xb2\x2a\xe4\xfe\xd7\x0f\x73\xbd\xcb\x13\x5a\xd5\x0a\x86\xd5\x8c\x9d\x0a\xdb\xe1\xfc\x2d\x2c\x5b\x0b\x5e\xaa\x60\xae\xb3\x2c\xec\x9d\x67\xff\x71\x43\xd1\x3c\xfd\x1a\xd3\x14\xc2\x89\xfb\xd6\x3d\xc2\xd5\x83\x9a\x1d\xee\x4d\x5d\xd9\x11\xa0\xb2\xd4\xad\x24\x8f\x95\x28\x5b\x60\x53\xa5\x44\x50\x78\xba\x48\x3f\x88\x63\x1e\x76\x34\x9b\xbe\xc6\x4b\x8c\x81\xea\xeb\xbf\x38\xd7\x0a\xe3\x12\x5a\x1d\x4c\x68\x48\xec\x5f\x2e\x21\x87\x6a\x66\x78\xac\x0a\x8d\xbf\x03\xa1\xc5\x40\x50\xdc\xda\x30\xbb\xc9\xdf\x42\x6d\x29\xa4\x59\xf8\xc9\x8d\xb1\x1c\x06\x98\xd3\x4c\xad\xfa\x03\x5e\x26\x35\xa7\xb2\xc8\x7f\x31\x9b\x61\x7a\x4f\x14\xfb\x3e\x99\xdb\x4d\x6d\xb3\xd8\xda\x0b\x7e\x2d\x9c\x55\xc0\x70\xca\xdc\xcd\xc6\x29\xda\x3a\x54\xfb\x23\x05\xd2\x97\x65\xde\x49\xfd\x72\x9b\x93\x4d\xfb\xce\x5d\xe6\xff\x1c\x89\x65\x62\xa1\x75\x17\x56\xfa\x90\xdc\x55\x8e\x3c\x45\xa3\xae\x99\x8e\x28\xa2\xfd\xb5\x70\x12\x79\x00\x3a\xdb\x21\xa6\x50\x5b\x1c\xf6\xf3\xea\x5e\x6b\x0c\x76\x73\x43\x37\xf6\xf8\xff\xe1\xdd\x0a\xaf\x7d\x48\xe2\x12\x72\x34\xb2\x69\x38\x7e\x9c\x17\x95\x18\xa1\x7a\xab\xef\x64\x5f\xc0\x83\x1e\xae\xf4\xc3\x59\xc9\x6f\x42\x86\x34\xd2\xb7\x26\x55\x5a\x8a\xc0\x10\xce\xf4\x5f\x9d\xf5\x28\x85\xe0\x85\xeb\x8d\x64\xcf\xbc\x34\x33\x08\x50\x6c\x63\xaa\xa3\xa1\x1b\x7b\xed\xda\xa2\xa3\x2d\x21\x21\x87\x73\xb6\x28\x12\x75\x89\xb7\xc0\x14\x8a\xfb\x3f\xba\xa6\xb6\xe7\xbc\xe3\x21\xc9\xb1\xf4\x3f\xfb\x67\xae\x42\x52\x61\x7f\xf8\x76\x0d\x93\x15\x88\x12\xd9\xd2\xc2\xa3\x01\x28\xbe\x8d\x5f\xa6\x37\x88\x4a\x12\xf4\xd9\xfd\x31\x78\xb1\x7e\x50\x43\x7a\x42\x7d\xef\xff\xe1\x80\xfa\x5b\xc9\x5f\xc7\x26\x66\x7b\x14\xaa\x30\x65\x7b\x9e\x56\x36\x9b\x6d\x4b\x4c\x71\x30\xa2\x3c\x0a\xb1\x6f\xdc\x9c\x36\xb9\x96\xb2\xc4\x5e\x50\x27\x06\x4b\xea\x6d\x2a\xfe\xd7\x18\xff\xc1\x27\xbb\x99\xdb\x5d\xcd\x9c\xb6\xff\xa4\xfa\x79\x47\xe7\x79\x04\xe6\xe0\x21\x14\x5a\xcf\xfc\xe8\x8d\xd5\x60\x2f\x67\x11\x93\x4f\x49\xd3\xb4\x7d\x84\x39\xdd\x9a\x96\xa5\x15\xff\xdf\x77\x94\xfe\x4e\x47\x16\xa1\x36\x72\x1e\x5e\xb2\xe0\xa8\x61\xe4\x3c\x9d\xcc\xe5\x6c\x36\x75\x7d\x22\x0e\x59\xc9\x16\xab\x7b\x2f\x7d\xde\xfe\x7d\xb5\xf9\x52\xdf\xfc\x61\x64\xcf\xcc\xd5\xee\x3f\x44\x9a\x65\x94\xd3\x1f\xf0\x95\xc3\x1a\xde\x4f\x7f\x7e\x62\x74\x65\xa3\xf4\xe7\x4d\x13\x04\x1b\x68\x83\xfc\x8b\x63\x77\x3d\xc2\x75\x72\x60\x6f\x78\xa5\x0c\xde\xb9\x57\xb7\x04\x2e\xbb\xf7\xbc\xf9\xbf\x4f\x51\x19\x5f\xdf\xa7\x6c\xa7\x71\xa7\x3c\xc2\x55\x54\x25\x8b\x93\x6c\x97\x38\xc3\xc7\x12\x14\xbf\x48\xfb\x00\x12\x6a\x69\x2a\x56\x5f\x1d\x53\xc0\x56\x04\x9e\xb8\xbb\x7f\xdf\xdd\xfb\xe3\x4b\x6b\xc0\x78\xae\xa5\x2a\x41\xff\x3e\x18\x81\x4c\xcc\xd0\xef\xf0\x08\x47\x2b\xec\xad\x81\x33\x0b\xb5\x34\xd4\x9e\xa7\xcc\x6d\x23\xdb\xeb\xb2\x38\xfe\x66\x27\x89\xf3\x5f\x48\xdc\xfd\xe6\xd5\xc2\x45\x9a\x24\x87\x9f\xaf\xb4\xe3\x32\x99\xcb\x42\x77\x45\xcd\x2f\x67\x92\xb6\x0e\x1e\x69\x86\xb4\xc6\x61\xdf\xfe\x46\x88\xef\x1d\xd8\x9c\x2d\x94\xf4\xbf\xf6\xc1\x39\x1a\xf3\xf1\x38\xc7\xc6\x1a\x3b\xbc\x67\xfe\xec\x66\x2b\x58\x6d\x92\x7e\xce\x3c\x46\x23\x6e\xf1\xe8\x5a\x79\x58\xec\xb7\x56\x2b\x2a\xbf\xff\xe1\x19\x0e\xa3\x84\x38\xbf\x3b\xac\x32\x29\x4e\x98\xb8\xc6\xac\x72\x8a\x9f\xa7\x5d\xd2\xf2\xa2\xc9\x40\x53\x49\x33\xb9\x1b\xfe\xff\x9e\xd1\x07\xbc\x8c\x5e\x5e\x31\xb0\x45\xfd\xdc\x44\xe4\x1a\x8d\x4f\x7b\x6e\xbd\xc2\x7b\xd5\xc6\xe6\xf6\x7b\xfc\x4c\x6e\xcb\x50\xf9\xb1\x37\x81\xff\x38\x34\xab\x29\xcb\x39\x50\x3a\x27\xda\x66\x9a\x0e\x3f\x3f\xd9\xb0\x9a\x49\xeb\xd7\x57\x2f\xf3\xce\x1f\x4e\x1e\xda\x7a\xcd\x39\x46\xeb\x2d\xe0\x7f\x72\x07\xba\xa0\x71\xb8\xc8\x78\x3f\x39\x97\xf9\x43\x54\x1c\x83\x91\x82\x68\x08\x9a\xde\x64\x77\xa7\x69\x4a\xf7\x2d\x3d\xd2\x0a\x7a\x9e\xb8\x89\xfc\x3f\x7e\xab\xaa\x3f\x33\x1b\xa2\x25\x54\x1a\x7e\x6d\x3a\xa8\xdc\xc2\xac\xf3\x1b\x86\x6d\xf0\xdf\x4f\x4c\x83\x28\x18\x1b\x66\xcc\x81\xa8\x59\x18\xb6\x67\xff\x77\x9d\xa7\x60\x56\xf5\x72\x77\xba\x31\x65\x24\x1f\x4b\x4c\x27\xb8\x6e\x28\x61\x16\xdc\x02\x13\xb9\x73\x56\x86\x19\xb5\x81\x61\x3d\xc7\xfc\x87\xda\xf4\xd1\x20\x36\x4c\xa2\x39\xc2\xd6\xe3\x67\xd6\xee\x4c\x33\x18\x5b\x98\xa4\x5d\xf0\x13\x94\x6a\x80\xac\x15\x52\xfb\xa4\x34\x6a\x83\x56\xfe\xef\x21\x8f\x6f\x8f\x70\xb4\xe0\x88\xe5\x64\x95\x37\xb2\x2e\x9a\x61\xed\x38\x98\x66\xb3\x7f\x1b\xaf\xa5\xbc\x2c\x2a\xfb\xa2\xb7\x4a\xed\xb0\x46\x6d\xff\x17\x23\x54\xc6\xf0\xb4\xa4\x1f\x83\x7b\xdc\xe9\x73\x6f\x19\xb3\xb7\x55\xa5\xa7\x40\xf5\x0d\x9c\x9a\x1c\xa3\x56\xbb\x87\x90\xed\x79\xe9\x7f\x74\x36\xdb\xbf\x72\x4e\xa4\x56\xdb\x0f\xef\xdd\x7b\xfe\xea\x18\x89\xb7\xcf\xa1\x33\xeb\x52\xf2\x61\x0b\x03\x95\x09\x46\x6d\xe6\x28\x77\x8a\xfc\x3f\x8e\xea\xf0\x7e\x28\x88\x84\xa9\xa5\xba\x98\xe9\x6e\x26\x3a\x0a\x43\x38\xb6\x77\x47\x3b\xac\x2d\x2e\xfb\x7a\x33\xe1\x61\x82\x20\x31\xff\xef\x73\xae\xea\x05\xbd\x5b\x2f\x8c\xa0\x2d\xb1\xf6\xe3\xb3\xa1\xe2\x73\xac\xeb\x2f\xdc\x9e\x7d\x3e\x09\x04\xc7\x98\x4e\x4c\x28\x8b\xb2\xdb\x9e\x7c\xec\xb3\x51\x03\x04\x28\xdb\xe2\x22\xed\x8d\x5c\x91\xec\x7a\xfd\xcf\xfe\xb2\x6a\x72\xf0\x5b\xe0\x18\x0d\x14\xb5\x59\x71\xfa\x83\x37\x8a\x76\xa9\x62\x36\x7d\x79\x7d\xd1\x00\xc4\xb9\xed\xdf\xb5\xd7\x06\x76\xbc\x7a\x1a\x8e\x24\x9a\x10\xbc\xa3\x69\xe1\xd0\x24\x1d\xa7\xb2\xea\xff\xea\x41\x9c\x1b\xf8\xa4\x8d\x59\x6a\x52\x3f\x49\xc9\xdc\x7b\x21\x15\x3c\xd6\x2e\xf8\xd9\xad\x06\xb6\x60\xe6\x85\x8e\x85\x0e\xd6\x30\xcf\x6c\xcb\x52\x72\x3f\x03\x7d\xa9\x72\x09\x99\xc9\xbd\x86\xf8\x2d\x58\xab\x04\x21\x26\x19\x77\xfb\x7f\x0b\xe4\x1c\xe3\xb7\x12\xe7\x45\xd2\x60\x5d\xaa\xd5\x92\xe1\x28\x89\xbd\xf6\x47\x55\x50\x82\x9b\x57\x67\x56\xf9\x8f\x79\x05\xa6\xae\x45\x65\xa8\x8d\xa6\x87\xa5\x19\x86\xe8\xbd\xac\x4d\xb2\xce\xa4\x05\xc5\xbb\xd3\x01\x8f\xa2\xda\x16\xaf\xd5\x96\x73\xd8\x25\xb9\x12\xac\x70\xb1\xba\x03\xaa\x38\xc6\xe3\x1d\xf7\x76\x84\x55\x66\xc6\x04\xb4\x26\x95\x6d\xfe\xf3\x7b\x14\xde\x80\x8a\xc9\xa5\xaa\x12\x04\x3d\xd4\xfd\x3e\x95\xf8\xd8\xae\x7f\x10\x53\xea\x23\x57\xcd\x4b\xb9\x51\x60\xaa\x11\x35\xd5\xd7\x1a\x07\x59\xe1\xd0\x3b\x5f\xcf\xdf\xbd\x4f\x3f\x36\x2f\x3f\x86\xf9\x78\xe3\xa7\xac\x5e\xf6\x3e\xb7\xe8\x58\xf4\x7a\xf6\xb0\xa4\xcc\x13\x5c\xea\x57\xa7\xd3\x01\x3a\xf0\x03\x48\x83\x69\x77\xda\x75\xdd\x13\xfe\xd8\xe2\x89\xfc\xb8\xe0\x3f\x66\xcc\xbc\xa1\x9b\x05\xfd\x33\x94\xfd\x34\xe3\x9a\xb9\x20\xa7\xac\xf4\x08\x77\xfc\xb2\x4b\x44\x2d\x7c\x21\x81\x6e\xf7\xcd\xf9\xcb\xbb\xef\x5a\xa7\x22\xd5\xa5\xe4\x1d\x89\x24\xc5\x9a\xc7\xd4\x97\x6a\x40\x4c\x91\x09\xa6\x67\xec\xa9\x61\x36\x39\xc6\xea\xa5\xff\xb5\x5b\xae\x0f\xda\x35\xec\x06\x27\xa8\xf7\x66\x2f\x7f\x23\xae\x1b\x87\x96\x1b\xc5\x3c\x76\xd9\xef\x53\x10\xd3\x0f\xc6\xad\x00\xa4\xb5\xa0\x05\x63\xa6\xab\x65\x1f\x67\x4e\x31\x70\x9a\xb0\x43\xa2\xbe\xa5\x34\x56\x25\x53\xe3\x68\x51\xe0\xf8\x4f\xe1\xff\x6b\xa2\xaf\x9f\xbe\x2e\x3b\x72\x0a\xa7\xe9\x48\xf6\xb9\x2b\x17\x91\xe5\x9b\xf9\x7d\xf6\x5b\xca\xa5\xd4\xf1\xc7\x2c\x49\x42\x5b\xde\x12\xfd\x00\x22\x8c\xd2\x3b\x7d\xfc\xb9\xcf\x4f\x08\x69\x02\x8b\xc7\x14\x36\x0b\xbd\x25\xb2\x03\xfc\x74\xda\xf4\xbf\x7c\xb7\x96\x9c\x4f\x4a\xa0\xa8\x7d\x86\x47\x50\xc2\x6c\x0b\xf0\x17\x2e\xa7\x46\xe9\x27\x2f\xdb\x64\x88\x56\xdc\x9c\x77\x20\xb5\xf2\x34\xfb\xa1\xaf\x93\x8a\x6a\xe8\x2f\x49\x4d\x82\x8f\x89\xe5\x53\xc6\x59\xca\x85\x89\x88\x96\x1a\xb3\x6e\xc4\x61\xf1\x5c\x2d\x81\x84\xf1\x7f\x39\x45\x30\xf2\x7d\x2b\x2b\x82\x12\x63\xfb\xfb\x6d\x6f\xbe\x46\xe0\x84\xae\xb4\xbf\x8a\x4e\x21\x17\xa8\xf9\x95\xa8\x6a\xd5\x0b\x28\xe3\x60\x4f\x1f\x66\x3f\xba\x88\xde\xe0\x76\xb2\xdc\x7c\xe6\xba\xd3\x30\x06\x45\x19\x4f\x0a\x31\xcf\xde\x9b\x6a\x9e\xb2\x3b\xac\xd1\xe8\xff\x1b\x63\xb8\x3f\x7e\xd9\x05\x19\x6f\x19\x3e\xdc\xe3\x44\x3b\xd9\xa3\x55\x88\x7d\x29\x41\xe3\x74\x6c\x19\x5c\xc4\x9e\x82\x42\x96\x87\x91\x0d\x11\x62\x46\xcd\xc9\x2b\xed\xb7\x44\x9c\x82\xd6\xee\xc9\x6f\x1e\x97\x7d\x98\x92\x61\xa6\x56\x82\xae\x9b\x2a\xe7\xc5\x82\x7b\x7f\x9c\xc2\x20\x4e\xff\x49\xf7\x1f\xc1\x06\x41\x34\x59\xc6\x03\x5c\xc4\x21\x1c\xa2\xc5\x2f\x23\x2e\x1b\x07\x65\xbc\x52\x58\x56\x01\x42\xec\x38\x2f\x42\xca\x4b\x5b\x4e\x62\x87\x61\x24\x36\x71\x67\x68\x46\xc9\xe3\xb2\x01\xc5\xd9\xe9\x37\x9f\x78\x5f\x63\xb3\x2b\xd4\x66\x58\x42\x8b\x38\x44\x8b\x2e\x50\xd5\x4a\x77\xcd\xe7\x73\x5c\x7e\x8d\x1d\x28\x7f\x60\x21\x48\x7f\x61\x5f\xbe\x09\x64\xed\xd8\x7b\x06\x2f\x6b\xb1\x96\x80\x44\x8e\xc6\x2a\x4a\x21\xbb\x10\x96\x19\x9e\x34\xb7\x0d\x84\xcf\x8e\xd1\xfc\xe5\xb6\x2c\x53\xae\xed\xb1\x86\x13\x00\x55\xda\x11\x91\x55\x04\x1f\xae\x92\xdb\xc5\xf8\xa9\xaf\xfa\x4f\xf2\xe1\x89\x83\x50\x66\x67\xcf\x94\x41\x1b\x63\x9c\x46\xe0\x5b\x7a\xdc\x21\x64\xf2\x33\x9c\x3e\xc2\xf1\x40\x8f\x97\xca\xf9\xa3\x0a\x4f\x6c\xd5\xb9\x3d\xcc\x80\x82\xec\x7f\xd7\x8f\x04\x1d\x97\x1d\x85\x47\xd2\x65\xc9\xec\x26\x83\xc6\xae\x17\xbe\xcc\x37\x0f\x04\xc0\xb6\x31\xae\xf3\x83\x4f\x26\x76\x85\x5b\x42\x4d\xb5\x04\x6e\x74\x26\x28\xfc\x73\x2c\x6b\x74\x9c\xbf\x89\x6c\xa9\xbc\xb8\x2e\x9b\x90\x0a\x9a\xc6\x5d\x7c\x53\x73\x87\x61\x6a\xb6\x01\xfb\x50\x0f\x5e\x59\x0d\x94\x04\x10\xc3\xf5\x30\x1f\x5c\x71\xf3\x6d\xc8\x37\x3a\x77\x59\x56\xd0\xda\x22\xe5\xf7\x03\xed\xa8\xb8\xa6\x58\x41\xb8\x79\xc9\x40\x46\x9f\x41\x05\xab\xfc\x94\xbe\x7f\x6d\x1a\x64\xb1\xcb\x55\x79\xa6\x58\xfb\xe6\x86\x85\xba\x58\x5f\xab\x51\xbb\xbf\x8c\x89\x55\x90\x8c\x28\x2c\x04\x20\x81\x34\x85\x33\x3a\x57\xeb\x99\x9d\xff\xec\x15\xde\xf6\x8b\x9e\xfe\xf4\x54\xe9\xf8\x9d\xab\x83\x76\xf3\xe7\x37\x1e\xa2\x70\x3e\xc5\xb9\x6d\x39\x45\x5f\xc9\xaa\x24\x47\xbc\x6b\x51\xfc\xb2\x49\xb4\x24\xc7\x8d\x2c\xf6\x6e\xbc\xd3\x3f\xdf\x9d\x34\xf5\x68\xd3\x00\xeb\xf9\x6a\xde\x4d\xb7\x5c\xf6\x21\x93\xee\x26\xfb\x87\x07\x38\xfa\x0c\xa0\xbf\x0d\xc6\x9c\xbc\x72\xd6\x41\x60\x18\x5e\x49\x98\xc7\x1c\x08\x6b\x27\x63\xee\xd6\x2d\xdb\x28\x70\xa6\x1f\x33\xac\xfb\x3a\x66\xec\x3f\xf9\x25\xc9\x0c\x29\x46\x2a\x47\x51\x03\xeb\x26\x0b\x97\x5a\x75\xe3\x2c\xf7\x1d\x73\xb1\x6e\x65\x10\xb5\x14\x5c\x2f\x93\x8e\xc3\x62\x9e\xe2\x5e\xa3\x97\xcf\x22\x08\xbc\x3a\xf8\x13\x5f\x88\xea\x42\x3c\x9f\xf0\x54\xa3\x34\xa4\xb9\xc7\x1c\x29\xe5\xff\xe6\x81\x04\xae\x5d\xd7\x75\xad\x2e\x34\xc1\x08\xd1\xe1\x34\xe1\x6f\xfc\x0d\x07\x3c\xd2\x46\x33\x7d\x24\x51\xb9\x00\x61\x79\x97\xb1\x9a\x00\x6c\x38\x76\x68\x0c\x00\x5a\xcf\xa4\xe5\x7d\xe8\x74\x89\x59\x4e\x28\x97\x72\x33\x71\xf8\xd8\xe1\x45\x44\xfc\xb2\xc4\x31\x83\xb6\xb3\xd1\xf4\xe1\x02\x3d\xde\x5a\x8d\xb9\x49\xb6\x92\x08\x8a\x7a\x48\xba\x94\xb5\x7f\xf3\xcb\x6c\x0b\x7f\x65\xf2\xae\xff\xe0\xec\x2f\xde\x38\x8a\x22\x38\x71\xb7\xb6\x5e\xce\x13\x7f\x49\x08\x9b\xd6\x52\x9c\x52\x97\xf2\x3e\x96\x94\x7b\xaa\x6e\x14\x80\xaf\xe2\x95\x0a\x55\xaf\x10\xb8\x61\xbf\xe3\x7b\x0b\xde\x80\x78\x5f\xd9\xc9\xe2\xe0\xd4\xfc\x56\xef\x79\x0b\xf4\x87\x84\xd1\x10\xcc\x43\x08\x42\xc2\x39\xa0\x27\x8d\x4e\x0c\x36\x69\x5b\x7d\xe8\x5e\x1e\xae\x47\x20\xb9\xe0\x04\xf3\xda\x1f\x5e\xda\x09\x49\x19\xcc\xee\xf3\x3d\x33\x56\xfc\xe1\x52\xcf\xbf\xfc\xce\x51\xb6\x68\x0b\x61\x92\x34\xeb\xe7\xf9\xb3\x1c\x5d\x17\x5a\x56\x1c\x8c\xc1\x69\x6c\xbc\xcc\x65\xfd\xc8\xb6\x9c\x30\x60\x44\x81\x24\x79\xdb\xbe\xb9\xe0\xe6\x78\x8b\x29\x79\x51\x8e\xad\x44\x4b\x12\x2d\xd1\xe5\x40\x46\xbc\xd3\x6b\x07\x26\x6a\xe2\x6c\x0c\x2f\x6c\xa8\x78\xa5\x45\x21\x76\x4d\x15\x32\xf3\x00\x3b\x4e\x1a\x19\xe1\x4c\x1b\xe9\xae\xc7\xa8\x49\x31\xcb\x0e\x6d\x19\xb6\xb7\xff\xcd\x71\x2f\xd1\xb6\xf6\x4c\xff\xf7\x4c\xf6\xdb\xd7\x0d\x8f\xbb\x1b\x19\xf7\xc3\x6c\x1f\x6f\x57\x0d\x0e\xed\x98\x56\x0a\x1c\x08\xe4\x03\xca\x9e\x02\xa7\xe2\x2b\x51\xda\x24\x03\x02\x75\xb4\x26\xd0\x05\x39\xb5\xd5\x48\x4d\x87\x08\x5e\x2c\xf5\xeb\x6b\xb0\xe7\xeb\x42\x3a\x7e\x3f\x5a\xbe\x3b\x85\xfe\xde\x6e\x7a\xa9\xc1\x69\xf5\x22\xb6\x3d\xb1\xa6\xf8\xa3\xe7\xff\xf9\x6e\x5e\x25\xca\x6f\xb7\xe5\xee\x56\xac\x3f\xb8\xc8\xa6\xcb\xef\xa5\x42\x05\x1b\x70\xc6\xb8\xff\x60\xc1\xfe\x4d\x7c\xbc\xb8\xd2\x8c\x4d\x96\xbf\x65\xc5\x19\xba\xb8\xc2\x49\x56\xfe\x05\x10\xad\x67\x97\x2a\xfc\xc1\x86\x48\xb9\x46\x3a\x6a\xc9\xa9\x80\x68\xd2\x45\xd2\x11\xca\x64\x86\x22\x89\x05\x8d\xbc\x67\x84\xc2\x43\xf6\xba\xc9\x5a\x83\xc8\x5b\x78\xbb\xea\x1d\x41\x6c\x1f\xa5\x47\x71\x69\x82\x09\xf9\xb5\x24\x47\xfc\x37\x1f\xd2\xe3\x74\xfb\x24\xef\xb8\x97\x75\xff\x27\x96\x7b\xbc\x61\x19\x52\x17\x78\x79\xd9\xd8\x0b\x6d\xa7\xe4\x2f\x33\x2a\x69\xa0\x89\x29\x80\xa6\xf5\x94\x17\x8e\x51\xe8\x2a\xae\x68\x65\xee\x39\x43\xed\x4d\x5d\x51\x33\x0b\xd7\x20\xd2\x4f\x53\x09\xdf\x7f\xcc\x2c\xd5\x54\xaf\x36\xd9\x55\xba\xbe\x69\xd4\x87\xe6\xf6\x33\xfc\xbd\x83\x10\x84\x25\x6f\xa4\xb4\x50\x01\xd2\xeb\x5a\x11\xb8\x4b\x86\xc3\xee\x63\xcc\x29\xf7\x1f\x12\xcf\x7a\x18\x82\xbf\xca\x5d\x33\x8c\xca\x89\xef\x7e\x68\x81\x45\xd4\x3c\x06\x4b\x18\xd5\xe9\x5f\xe1\xc6\x0b\xcc\xcc\x85\x28\xa4\x7f\x38\x21\x31\x20\xa7\x13\x2f\x8a\xd9\x25\xb0\xeb\x15\x48\x96\x6e\x27\xe9\x53\x8b\xb5\xd8\xaa\x67\x9a\xd0\x95\x5f\x22\x75\x0f\xd5\x51\xf0\x01\x6e\xff\x70\xf0\x97\x09\x61\x55\x46\xf9\xf2\xf8\xdd\x28\x25\x77\xad\x4c\xf7\xc7\x60\x48\xef\x6c\x7a\xbb\x08\x55\xfc\x53\xe4\xbd\xb5\x85\x69\x26\x8d\xfa\x0f\xe4\xb0\xaa\x2b\x88\xc1\x1f\x3d\xe3\x09\xd8\x0c\x2b\x7f\xfe\xcb\x32\xe6\x85\x33\x4b\xd6\x7d\x81\x85\x21\x6d\xbf\x0e\xef\x54\xf5\x69\x95\x6e\x87\xd8\x1f\x90\x1b\x8a\x4e\x2b\x9d\xd7\x81\xc3\x70\x5f\x22\xc7\x2f\x24\x2f\x17\x75\x69\xb8\x33\xd2\x79\xd1\x42\x71\xe5\x55\x61\xdc\x9e\x42\xf7\x05\x54\x81\x4d\xce\x81\x98\x86\xb0\x0f\x1a\xed\xa0\xa8\x0b\x88\xde\x2b\x65\x55\xdd\x27\x2d\x62\xd2\xa2\xe7\x16\xd8\x16\x8e\xb5\x5b\xfe\xac\x77\x8f\x4c\x49\x71\x08\xc7\x0d\x29\x50\x60\xfe\xd8\x0e\xb7\x04\xde\xfa\x14\x0c\xc6\x30\xba\x1a\x04\x91\xc4\xda\x9b\x45\x04\xc6\xef\xa9\x02\x7e\xf9\xe4\x52\x89\x67\x38\xcb\x86\x36\x32\xdd\xc9\x26\x84\x6e\xa1\xc0\xf3\x87\xca\xc8\xb5\x54\x36\x5e\x19\xd0\xa1\x17\x32\xfa\x4a\xde\x0e\x45\x8d\x42\xe8\xe4\xf2\x97\x14\x1c\x72\x81\x16\x68\x8e\xaa\x5a\xbd\x80\x3c\x09\x06\xf0\x69\x42\xa3\x67\xae\xfb\xc1\xc6\xe2\xd9\xf7\x5e\xbe\x04\xcc\x39\x3c\x01\x46\xe1\x42\xdb\xbd\xf5\xa5\x53\xc8\x1d\xaf\x37\x53\x43\xff\xfa\xb9\xac\xbf\xc6\x84\x0c\xd2\xdf\x5e\xc5\xbc\x34\xf9\x54\xe4\x84\xcc\x2a\x32\xb1\x05\x1a\x4f\x79\x98\x97\x9e\x74\x32\xfa\x06\x27\x72\xa0\x3b\x35\x8d\x34\xe1\x29\xe9\x5f\x5f\x37\x49\xa8\x55\x88\x27\xca\x1e\xd5\x92\x8f\x76\x0f\x67\xa5\x8e\x8e\x2b\xb3\xba\x4d\x20\xe9\x58\xcd\xc3\xaf\x2f\xca\x09\xb6\xb6\x21\x1f\x66\x05\x2b\x09\xc7\x4b\xdf\xc1\xd1\x58\xdf\x02\xa5\x07\x87\x6e\x98\xd3\x97\xd2\x96\xbb\x9e\x63\x11\x59\xd1\xf6\x7f\x34\xa9\x67\x10\x8a\xe0\x99\xbd\xd1\x4c\xed\x9f\x20\x80\x78\x3d\xfa\xe0\xc4\x42\x7a\xad\xec\x69\xa2\x63\xa9\xa8\x45\x1a\x93\x1f\xb8\xee\x60\x54\xf9\xf1\xa3\xd1\xbe\xb8\x2e\x9a\x89\x34\x26\x39\x06\x6d\x09\xb5\x09\xf6\xd2\x06\xa5\xdd\x75\x08\xb2\x4e\xad\xdb\x76\x0f\xa6\x3e\x5e\x10\x4e\x31\x3d\x52\xea\x5a\x6b\x7e\x3b\x2d\xdb\xc4\xbe\x4c\xef\xc1\x59\x87\xc0\x52\xd1\x74\x6a\x95\xca\x42\x92\x60\xdd\x44\xfe\xd8\x25\x83\x85\x6a\x64\x3a\x4b\x5b\xe6\x07\xaf\x28\x75\x61\x4d\xad\x69\x5c\xd6\x0c\xe4\x89\xea\x03\xa8\xea\x92\x0f\x8c\x3e\xd8\xd9\xe4\x40\x9a\x8e\xa2\x7c\x77\x4a\xe7\x8d\x7c\x7f\xe9\x41\xb0\xb0\x8e\xa8\xf9\x73\xf6\xba\xee\x3f\xb6\x3c\x6d\xf0\x09\x6a\xf1\x37\x5a\x47\xb2\xcf\x21\x33\x73\xcb\x87\x9b\x2a\x5c\xc4\xd8\x68\xba\xbd\xf9\x2b\xd1\x7d\x16\xad\x95\x56\x71\xed\xd4\x3d\x6c\x2d\xf8\x12\x65\x19\x95\x16\x68\x4b\xcd\x90\x38\x9d\x28\x78\x25\x4b\x12\x69\x72\xbb\x39\xa7\x25\xce\x43\xa9\x89\xb8\x00\x1b\x8f\xe7\xbc\x68\xda\x16\x14\x5f\x4d\x85\x27\xb3\x06\x5d\xcb\xd3\x44\x19\x42\x7d\xf9\x0c\xe0\x5a\x09\xca\x6e\x28\x3b\x2d\xc0\xba\xa1\xe5\xa5\xc8\xa2\x5e\xb5\x67\xa3\x46\x5e\x06\xe4\x17\x86\x62\xc1\xdf\x15\xc3\x8b\xbb\x97\xa8\xff\xfb\x63\x4d\x49\x27\x39\x7a\x4a\x9f\x23\x1a\x3d\x3f\xc3\x61\x31\x56\x84\x5f\x9a\xfb\x8b\x95\xba\x16\xf9\x6b\xbe\x69\x5e\x0f\x04\x61\x54\xfc\x3c\x3a\x5f\x43\x26\x10\xf9\xf6\xcb\x75\x48\xf1\x52\x05\x59\xc9\xd0\x56\xa1\xd7\xc2\x20\xa2\x5c\x4f\x01\xdb\x76\x80\x3f\xc2\xd5\xf4\xd6\xc1\x40\x36\x52\xc5\x63\x8d\x8e\xcf\xea\x29\x05\xab\xe9\xb5\xb3\x99\x73\x7e\xe9\x64\xe0\x77\x25\xa1\x29\x29\xc9\x33\x42\xaf\xb2\x13\xde\x1c\x1a\x33\x97\xa2\xf8\x11\xfb\xd9\xf8\x72\xa5\x21\x43\xb9\xc7\x88\xbd\xdc\x7e\x0e\xdb\xaf\xd2\x33\x97\xe2\x44\xd3\x1f\x89\x3d\x60\x71\xc4\x3f\x33\x6a\x7d\x99\x85\xcb\x27\x65\xc6\x3f\xe8\xe9\x3c\xfa\xdf\x76\xcb\x65\xce\xc8\x7f\x6d\x28\x2a\x4f\x6a\xcf\xac\x6f\x2b\xd6\x24\x58\x27\xe3\x3b\x5d\xac\x11\x88\x29\xf4\x36\xf2\xa8\xfd\x7d\x22\x3d\x33\x95\x5b\xff\x92\xe8\xa8\x7d\x83\x57\x3f\xd0\xc2\x9e\x8a\x2f\xc7\x9f\x1f\x05\x4e\xf4\x6f\xaa\x3e\x18\xde\x11\x15\x9d\x84\xc5\x7d\xa6\xf5\x07\xa5\x3b\x01\x17\xcb\x6b\x5c\x30\x42\xea\x68\x2f\xc9\x3f\xe5\x47\x59\x02\xcf\x91\x45\x0b\x60\x60\xfa\xfb\xd1\x27\x9d\x3d\x05\x67\xb8\x1e\x59\x00\xf0\x7b\x86\xaf\xc2\xdc\x2c\x2b\x28\x20\x0d\x4c\x12\x7d\x27\xcc\x5f\x4e\x5a\x2c\xba\x9a\x4a\xb6\xdc\x19\x6b\x09\x13\x87\xb7\x3a\xeb\x7b\x5d\xdc\xf3\xc5\xdb\x79\x5e\x86\x54\x7f\x24\xae\x42\x58\xd2\x74\x52\xda\xa9\x22\x78\x90\x11\x5c\x7e\x3b\x91\x1c\x58\xf0\xa0\xd2\xb9\xa1\x47\xfd\x87\x00\x3b\xef\x0a\x90\x42\x8f\x7c\x8b\xd6\x86\x5c\xad\x4d\xe2\x18\x28\x6b\xbe\x64\xd7\x8b\x51\xa0\xec\x32\x74\xaf\x02\xb1\xc4\x3c\xf6\x02\x11\x74\x2a\xcb\x13\x81\xad\xc4\xae\xcf\x6e\xdb\xe6\x7c\x4b\x6d\x92\xb2\x60\x35\xcb\x08\x82\x6b\xf9\x3b\x79\xc5\xe0\x4b\xbb\xab\xc9\x6a\x7f\x25\xe9\x19\x0e\x69\x90\xf1\xf6\xb2\x97\x5a\x90\x75\xce\x4f\xcd\x13\xa5\x0e\xb9\x75\x08\xbb\x2b\x31\x71\xa0\x3f\x7e\xfa\xfe\x21\xf7\x4b\x87\x89\xfa\x43\xfe\xf2\x23\x56\x1b\xf4\xfd\x44\xcb\xa7\x58\xe7\x28\x13\x76\xfc\xcf\xda\x02\xa3\xd5\xb9\xfa\x4a\x48\xa9\x89\xac\x58\xed\xcb\xcc\x09\xf5\x80\x89\xfe\x68\x21\x45\x91\xd7\x6c\x3c\xab\xd2\xfe\x62\xd0\xa3\xc3\x9a\x92\x8e\x48\xc7\x72\xc7\xb1\x14\x4b\x06\xf3\xd3\xac\x00\xaa\x2a\xb9\xba\xd6\x04\xa6\x66\x98\xbd\x61\x55\x41\x35\x26\xbf\xe5\x55\x9e\x03\xe4\x95\xb6\x97\x3f\x9a\xa6\x93\x81\xd4\x41\xbc\x2f\xaa\x73\xb5\x29\x86\xdd\x3b\x42\x22\x9e\xd7\x56\x67\xd3\x0f\x37\xc1\x26\xd7\xbb\x12\xce\xf9\x0f\x94\x19\x14\x3a\xba\x9e\xbe\x2e\x70\x84\x9b\x78\x0d\x9d\x25\x66\x92\x06\xf1\x1b\xbd\x0b\xee\x51\x7b\xef\xfd\xb5\xdb\x81\x96\x56\xb8\xbc\xfc\x39\x70\x9b\xf3\x15\x37\xa5\xbb\x8d\x1e\xb2\x69\x92\x3e\x06\xc5\x45\x95\x44\x9c\x41\x79\x47\x65\x9e\xdd\x1d\x56\xd1\x34\xe8\xb0\x7d\xb3\xa0\x09\x80\xac\x7e\x3f\x3c\x56\xf0\xb9\x16\x18\xc6\x9c\xa4\x1f\x9f\x84\xe2\x13\x8e\x14\xb3\x43\x52\x82\x58\x97\x78\x1b\x05\x07\xa4\x3e\xff\x73\x5c\xc7\xdc\xf3\xe1\x48\xe9\x31\x87\x24\xcb\xad\x70\x68\xe0\xe7\x90\xa3\x57\x44\x4c\x2a\x9a\x2c\xd1\x93\x84\xcb\xe9\xb8\x96\xed\xe7\xec\xb8\xdf\xe9\x7c\xb0\xbf\x4f\x12\x89\x6d\x1d\xb4\xf5\x03\x74\xa9\x31\x76\x9a\xb1\xa4\x4c\xdd\x94\x88\x8c\x30\xaf\xaf\xd5\x8e\x21\xdb\x07\x48\x5a\xa5\xe2\x48\xa8\xbf\xb0\x4a\x3b\x06\xeb\x74\x02\xc5\x48\x6b\x70\x76\x54\x16\xd7\xb0\x44\x61\x4c\xe9\x96\xa2\x14\xc6\x32\x0c\x4f\x48\x72\x05\x44\xf2\x28\xe9\x07\x01\x45\x2d\x8c\x9c\x37\xcc\xa3\x7c\xbe\xc7\x5f\xc5\x93\x2c\x70\x0e\x1a\xf0\x1e\x50\x30\x1c\x31\x04\xc4\x08\xf0\xfb\x62\xfb\x05\xaa\xef\xc4\x97\x5f\x39\xa2\x6b\xa1\x9f\x71\x45\x68\x4e\x9e\x12\x43\x93\xe9\xeb\x7d\x6b\x43\xb4\x7e\xfa\x0b\xfb\xd3\x9b\x4f\xe6\x97\x7f\xbc\xd6\x9b\xf1\x95\x27\x0e\xa3\x08\x62\xb4\xc1\xaf\xaa\x18\xfe\xde\xde\x66\x15\x8a\x61\x38\xa4\x59\xa4\x34\x0e\x73\xc5\x2e\x01\x26\x10\x8a\xc6\x7e\xe7\xfb\xb9\x5a\x1f\xbc\x03\xc9\xfc\x8a\x1b\x93\xbf\x40\x20\x59\xc5\x44\x85\x3e\xb9\xc1\x4a\x76\x92\x48\xc7\x46\x50\x73\xa8\x3d\x6c\xc2\x30\x9c\x63\xa9\x83\x61\x8e\x64\x5e\xc6\x17\x42\xee\x67\x2f\x42\x91\x97\x4a\x84\x15\x67\x60\x9c\xa0\x1f\x39\x23\x53\xf6\xe3\x33\xf8\x50\x5f\xeb\x1a\x8c\x66\x80\xf6\x26\x66\xff\xa9\x53\x01\xe2\x17\x78\x0d\x04\xa1\xa7\x4c\xc4\x8e\x9b\x14\x4f\x05\x0b\x29\x13\x2c\x51\x55\x72\x0f\x7a\x3d\x3c\xde\x22\x92\xa8\xd5\xa4\x02\xa8\x46\xb3\x0b\xbe\xaf\x88\xbc\x48\x49\xd3\x56\x15\xa8\xb4\xab\x38\x59\x57\x7c\xca\xea\xb9\xf6\x8f\xdf\x4b\x82\xa5\xba\x7b\xdc\xa9\x1f\x0a\x3e\x9c\xa5\x30\xb9\x99\x56\x30\x43\xfc\xed\xfb\xf2\xe8\x63\x81\x51\xa5\xf5\x4f\x1e\x9d\x5d\xe6\x5f\x3b\xef\x7a\x86\xe8\xd3\x86\xff\xc2\xb5\xcb\x8a\xd5\x1d\xf0\x72\xc9\x60\x51\x4c\xfc\x2c\xb5\x2d\xa4\x0c\x44\xd7\x57\x86\x43\x5b\x71\x51\xd8\xe9\x73\x57\x21\x30\xa4\x4d\x7c\xff\xc9\xba\x8e\x57\x2a\x2b\xce\x86\x5e\x0f\x6c\x45\x62\xcb\xb9\x4b\xce\x2b\x0d\x63\xe4\xa6\x9f\x4f\x05\x07\x16\x0f\xd8\xa9\x21\xa6\x3d\x21\xa4\x39\xb2\x05\x8c\x4e\x43\x17\x72\xf3\xc0\x36\xe5\x2b\x5b\xeb\xc6\x70\x9e\xf0\x01\xc5\x96\x8d\x0b\x27\x5b\xda\x83\x9f\x96\x1d\xfc\xa0\xe2\x9f\x94\x98\x36\xfa\x2a\xa9\x18\xea\x78\xdf\xdc\xe1\xbd\x4f\xa7\x52\xb9\x0c\x47\x91\x74\x84\xeb\x15\x66\x5b\x28\xd0\x3d\xd2\x04\x56\x03\x9d\x07\xf1\xab\x5b\x3e\x38\xe1\x97\xa7\xad\x65\xff\x49\xb3\xc6\x32\x1a\x6d\x22\x63\x46\x9e\xe8\xd7\xfa\xed\xa4\x10\x97\xd8\x2f\xe5\xaf\x7f\xfa\x21\x33\x9e\x31\x40\xfb\x54\x3f\x66\xc9\x91\xb1\x5c\xe1\xe5\x0c\x39\x66\xa7\xe1\x1b\xb8\x40\xcb\x0b\x88\x6b\x13\xfb\xf7\x1a\x88\x15\xa4\x19\x2e\xb7\xef\x99\x1f\x8b\x86\xcb\x26\x68\x57\xbd\xbd\xe7\xef\x48\x5f\xe2\xb4\x5d\xe2\x0a\x94\x53\xbd\x5b\x4e\x15\x46\xf2\x61\x13\x18\xa1\x88\x1d\x62\x21\x2d\xaf\x62\x78\xab\x45\x40\xd2\x53\xf2\x8a\xca\x00\x23\xd9\xe4\x53\xbd\x40\x6a\x45\xcd\xca\x71\x6f\x5a\x6e\xf1\x9b\xde\x6c\x39\x6c\x22\x2c\x98\xc0\x8c\x63\x4d\x55\xee\x91\xf9\x5b\x97\xc6\x71\xbc\xae\x52\x8c\x92\x9d\xc9\x25\xb6\xec\xbf\x69\x42\xa5\xcf\x0c\x8d\xb7\xd2\xce\x1a\x49\x10\x5d\xe9\x6b\xe5\x99\xb1\xc4\xf0\x23\xf2\x34\xd8\xde\x1c\x02\x75\xdf\x33\x7c\x3c\x8b\xca\xde\xcd\x18\xb0\x67\xf0\x21\xa0\x82\xcd\xdf\x54\x56\x0d\x38\x50\xd7\x99\x45\x06\x36\x73\x6f\xf4\x87\xdb\xb1\xcc\xc4\x5b\x8c\xb7\x06\xab\xde\x2e\x06\x81\xb5\xa6\xdc\xc1\xda\x42\x87\xad\xc6\x08\xca\x1f\x1d\x20\xf4\xde\x06\x79\x8c\x42\xf9\x07\x00\xca\xdd\x30\xc0\x54\xfb\x7f\x3d\xb2\xf3\x5c\x0a\xde\x72\x4c\xe2\x37\x55\xa1\x91\xf8\xd2\xe0\x31\x9b\xd5\x75\x57\x3a\xcc\xba\x20\xd1\x36\xef\x23\x77\xb8\x33\xaa\x2c\x6d\x4b\xf2\x92\x31\x50\x46\xd0\xfa\x15\x9d\x2b\x3a\x7f\xe8\xf2\xba\x43\x7b\x84\xf5\x64\x31\xde\x24\x47\xf9\x92\x0c\x2d\xb4\xea\x26\xd7\xe3\x08\x0d\xce\xb4\x70\x97\x87\xa3\xf0\x9c\xca\xb0\xb6\xde\x31\xcc\xd0\x18\xcb\x11\x6a\x8b\x13\x88\x86\xe4\x3d\xc1\xfe\x7f\x03\xe3\x14\xc8\x1a\x21\x1c\xa3\x8a\x1e\x26\x49\xdf\xb4\x20\x08\x1f\x2b\xb8\xbd\xaa\x80\xea\x9a\x57\x35\x47\x34\x1e\xb5\xb9\xfd\xc6\x6b\x8c\x85\x91\x7e\xce\xeb\xa3\x19\x5e\xe4\x82\x58\x1b\x02\x6f\xca\xf0\x6d\xf4\x3b\x5d\x16\x02\xd7\x9f\x3e\x39\x4c\xf5\x71\x81\xb3\x59\x1a\x08\xe4\x70\x16\x4b\x84\x64\xca\x80\x49\xc9\x13\x18\x4f\xe5\xbf\x07\x47\x6c\xc5\x70\x2f\x0c\x4b\x4c\x39\xc4\xae\xff\xb3\x11\xc1\x33\xa5\x41\x8b\x5d\x67\x28\x45\x67\x10\xf3\x50\xeb\x24\x68\x19\x12\xcf\x1f\xc5\xf4\x73\xb2\x48\xcd\xcc\xab\x2d\x3d\xf1\xcc\xb1\x6d\x95\x1e\xcd\x65\x14\x7a\x1a\x29\x3f\xd7\xc3\x2b\x15\xd6\x2e\xa9\xc4\x64\xfa\x8d\x16\xad\xcf\xd0\xf2\xfb\x1e\x8c\x74\x89\x90\x4d\xd9\x92\xf9\xbd\x0a\xc5\xab\xc6\x3a\x0c\x30\x22\x29\x86\x77\x2d\x44\x31\x3d\xc9\x1c\xb6\xcc\x3f\xf7\x37\xb4\x3a\x23\x45\xac\x55\x97\xe6\x40\x7f\xdc\x6f\xde\x35\xee\x30\x9b\xb0\x65\x44\xb5\xef\xd2\x0d\xc5\x1d\xf4\x41\x85\xc9\xa7\x43\xa8\x90\x7d\xe8\x0c\x81\x0d\xce\xda\xc0\xab\x9d\x23\x40\x7a\x68\x3c\xb5\xd6\x99\x43\x4e\xa9\xe0\xe2\x08\xe7\x43\x81\xb2\x20\xc9\x6b\x66\x6e\x85\x3f\x9c\x6d\xce\x09\x80\x82\x46\xe7\x55\x1c\xf2\x89\xe0\xec\x11\xff\x6d\xf1\x53\x8b\xd8\x3a\x44\x1b\x57\x0a\x43\xb5\x09\x83\x1b\x8a\x78\xf9\x1b\x09\x4b\xce\x8f\xd7\xc0\x0f\x6b\x81\x83\x15\x20\x52\xc1\x4a\xc8\x2d\xa7\xe8\xd4\x00\x46\x27\xf1\xf4\xe1\x65\x50\x8f\x20\xc1\x2c\x7d\xcb\xc9\x4f\x11\xe3\x76\xf4\x19\x20\x46\x53\x60\x98\xbe\x15\xb4\x06\x30\xa6\xa2\x78\x0d\x8b\xca\xce\x6d\xa5\x4f\x38\x7b\xaa\xf6\xdf\x1c\x39\xdd\xfe\xf9\x71\xf8\x87\x43\xc7\x8d\x0e\xc1\x92\xc6\xa2\x37\xc3\x40\x7f\xfb\xe6\xc4\xa4\x5b\x54\x50\x9f\x20\xd5\x30\x36\xf6\xaf\x18\x49\x09\xd5\xbf\xc5\x9d\x97\x30\x5e\x6e\xa3\xa7\x9f\x01\x90\xda\xac\x54\x40\x35\x4f\x38\xe8\x1f\xec\x97\xce\xb3\xca\xb2\x33\xac\x2c\xd7\xfd\xda\x10\x13\xce\xe9\x9f\x5a\xff\x7b\x69\x47\x5f\x45\xb6\x20\x5b\xa7\xe3\x0f\x42\xbc\xbf\x6a\x8a\x17\xb1\x3b\x10\x89\xd9\x67\x57\x5b\x0a\x77\x35\x15\x83\xaa\x35\x6d\x3e\xac\xab\xe2\x78\xa1\xf3\xfe\x70\x8b\x62\xca\xb5\x97\x08\x13\xe1\x2a\xfb\x53\x69\x53\x12\xe3\x93\xdb\xbd\x87\xda\x80\x9d\x9a\x73\x02\x9d\x06\xa8\x7d\x4c\x77\x24\x69\xf8\x49\xce\xae\x11\xfe\x79\xd0\xae\xff\x9f\x1d\x5d\x6e\x2b\x8d\x3e\x4e\x6b\x7d\xa8\xe2\x61\x79\xc4\x1c\x01\x12\x86\xbe\xf4\x6f\x66\x2c\xc8\x94\x61\x79\xc3\xe7\x69\xbc\x27\x3f\x33\x65\x9f\x61\xdb\xe9\x31\x70\x17\xe8\xa7\xb8\x58\xe0\xba\x01\x46\x11\x70\xd4\xc2\xeb\x71\x9d\xaf\xd7\x42\xa5\x29\x73\x52\x51\x63\xac\xb5\xe0\x10\x96\xb8\x9f\xc4\x59\xfc\xfe\x83\x09\x81\xe1\x78\x17\xa5\xd7\xaa\x04\x64\x1c\x04\x27\x96\x06\xa5\x50\x63\xb1\xfc\x41\x11\xa8\x60\xa7\x7f\xf7\x9b\x77\x7d\xe1\xd7\x01\x91\x2d\x7b\x92\x68\x53\x93\xfa\x6d\xcb\xf3\x8d\x9e\x71\x1d\x60\x5e\xb6\x54\x98\x87\x56\x27\x6c\x18\xbb\x2b\xaa\xad\xf2\xcc\x8f\x8e\x2a\x40\x4f\xdd\xfe\x3b\x07\x75\xf8\x7c\x67\xd6\x3d\x34\x22\x7e\x9e\x3b\xbe\xae\xda\x75\x07\x48\x72\x41\xfc\xe1\x3e\x97\x7a\x18\xe4\xc7\xf4\x86\xd4\x01\x26\x33\x82\xc1\xda\xc3\x53\x1b\x1e\xf8\x79\xdd\xea\xa2\x55\x41\x97\x38\x09\x92\x1b\xd5\xb6\x7b\x1f\x61\xa9\x01\x74\x78\x76\xcc\xdf\x54\xe6\xff\xba\xbd\xa6\x99\xb2\x38\x96\x03\x7e\xfa\x77\x03\xd7\x51\xfc\xd3\x70\xa5\xff\xf4\x6b\x8d\x62\x8e\xbc\x28\x98\xca\xba\x2c\xdb\x0f\xb1\x73\x49\x94\x54\x6c\x31\xd3\xf1\xec\x8d\x8d\xa1\x3e\xc4\xe1\x91\x7a\x06\x82\xa4\x2b\x4c\x51\xe0\x8e\x5a\x74\xb2\x26\xe3\xd0\xda\xf9\xf0\x4e\xf4\xf4\xd5\xf9\x8b\x09\x5a\x70\x99\x88\x65\xfe\xfa\x9e\x30\x3c\x5e\x3e\xf9\x17\x7a\x8c\x70\xba\xda\x13\xd8\x99\x3d\x5c\x14\x55\x15\xcf\x46\x64\x07\xd0\x7a\x46\x9a\x14\x1c\xba\x87\x54\xbf\x03\xd1\xfe\x6f\x76\xa6\x4c\x1b\xe7\xa8\x6a\xab\x98\xce\x78\x71\x5c\xb9\x76\xa7\x7a\x75\x85\x69\x50\xaf\x69\xbb\xa6\xcb\x4e\x4e\x97\x35\x49\x62\xac\x91\x31\xf1\x48\x79\x27\x95\xcf\xa8\x9c\x3a\x57\x17\xd9\x94\xa3\x0e\xad\xfa\xaa\x80\x54\xe6\xc5\x35\x61\xe7\x4b\x7a\x7b\xff\xc1\x9d\xfd\xf1\x19\x8e\xbf\x45\x35\xaa\x35\xdf\x23\xc0\xb8\x3a\xcb\x75\x25\xe1\x07\x0e\x4b\x77\x0c\xe1\x32\x46\x01\xca\x66\x19\x9d\x4d\x44\xc6\x69\x4c\xff\x44\x00\xcb\xb6\x87\xf4\x23\x16\x37\x89\xde\x7d\xd5\xae\xcd\xed\x00\x9a\xa0\xd5\x12\x14\x26\xc1\x1e\xde\xa7\x34\xae\xd3\xf3\x3e\x4e\xff\xf7\x7a\x2b\x7b\x49\x25\x8e\xbb\xe6\xc0\x88\xe4\x78\x7f\x5f\x1e\x70\x99\xdf\xb9\x34\x1a\x4f\xb2\x7b\xfe\x4c\x4b\x14\x82\x50\xec\xda\x5f\x04\x61\xa8\x49\x96\x3d\xc8\xef\xa0\xaa\x90\x69\x6a\x6b\x50\x09\x29\x39\x8c\x9e\xf6\x17\xa5\x62\xc4\x2b\x3f\x37\x14\x2c\x97\xc0\x1b\x4e\x2a\xd8\xfc\x7c\x11\x0d\x28\x72\x7a\x8f\xe2\xa1\x88\xc3\x05\x57\x71\x99\x67\x32\xfe\x0f\xa7\xce\x59\x85\x71\x6e\xeb\x8f\x66\x08\x4d\x75\x36\xd5\x85\x2d\x71\x68\xf2\xe3\x30\x3c\x92\xee\x2f\xff\xc7\x77\xa4\xaa\x80\x61\x18\x30\x28\xb1\x2a\x3d\x09\x61\x40\x3d\x72\x84\x2e\xc1\x5f\x00\x17\x01\x3e\x05\xa4\xf1\xf7\x52\x8b\xad\x6f\x11\x61\x5e\xbb\x42\xc8\xf4\xd1\x69\x9d\x33\xe5\x2e\xf0\x66\x35\x8f\x6a\x18\x02\x32\x4b\x26\x6d\x74\x42\xd5\xd2\x5f\xa6\x6b\xb4\xdb\xc6\x16\xb4\xc7\x67\xb4\x85\x6e\x77\x71\x09\xb5\x28\xac\x26\x6c\xd5\xdf\x05\x67\x55\x4f\xd5\xa0\xa8\x85\xcc\x2e\x40\xdc\x11\x59\x61\x69\xc8\x0d\xf5\x16\xf9\x42\xdb\x38\x17\x1e\x1b\x3b\xb8\x57\x50\x72\xd5\xda\x78\x6c\x64\xb0\x75\xbc\xd2\xb9\x7a\xfa\xf0\x9e\xd9\x67\xc3\xb9\xbf\xb2\xd4\x84\xbb\xd6\x27\x74\xa7\xec\x42\x76\x21\x55\x12\xec\xd4\xd7\xd3\xa6\x00\x32\x75\x81\xc6\xa7\x66\x3c\x7b\x51\x18\x86\xe5\x85\x7d\x0d\x42\x6c\xc2\x8c\x0a\x28\x57\xf2\x61\x84\x3d\xe6\xdd\x7b\x2c\x2b\x20\x85\x93\xf1\x09\x84\x39\xd2\xd5\xfe\xba\xce\xeb\x01\xbb\x7f\xe6\xc1\x9a\xae\xc6\x7a\x71\x8c\x0a\x22\xb6\x70\xe2\x9f\xf8\x88\xd0\x5b\xb6\x12\x8b\xad\x5e\x6e\x0a\x4b\x5b\xb8\x28\xbd\x9f\x4f\xf6\x6e\x54\xef\xe1\xae\xd0\xa9\xbf\xe7\xb2\x66\x28\x0a\xc4\x77\xb2\x61\x6f\x0f\x03\xd7\x0f\x39\x66\xfe\xf6\x58\xb5\x9f\x92\x5d\xee\xa9\xbb\xb9\xf8\x31\x3f\xc1\x19\x88\xa0\x9e\xca\x21\x7c\x79\xf9\x25\xd3\xa2\xeb\xb7\x1a\x97\xf0\xa8\xb9\x4f\xb7\x35\xfc\xed\x79\xe9\x3a\x30\x69\xe9\xaf\xb8\x99\xf8\xd7\x44\xb1\xdc\xa1\x04\x9c\x4a\xdd\xf3\xa8\xc4\xeb\x1c\x6a\x7f\x18\xe7\xa8\xa9\xd1\xfa\xb1\x21\x63\x60\xaa\x8a\xff\x91\x51\xb5\xf4\x77\x06\xa9\x9b\x3e\x35\x5e\xe4\xe5\x3a\x58\xdb\x9b\x2b\xc9\x49\x84\x21\xb6\x92\x8e\x4c\xc2\xef\x10\xd0\xb6\x0f\x25\xc6\x3f\xef\x9f\xce\x3f\x5a\x17\xa8\x30\xdb\xd4\x40\xf8\x2d\xc8\x40\x7c\x6a\xe3\x86\x14\x41\x2b\xdb\xaf\x2f\x19\x9f\x37\xb4\x60\x70\xdd\x4c\x97\x7c\xe8\x67\xc0\x51\xbf\xcc\x4b\xb9\xe0\x56\xa7\x23\x7b\xab\x25\x1e\xb1\x2f\xb8\xcc\xdb\xdf\x48\x58\x0e\xe6\xb8\xec\x7b\xa7\xb1\x7c\x85\x49\x04\x6c\x03\xb7\x0a\xa8\x13\xc0\x46\x30\xa6\x65\x8c\xa1\x0e\x91\xf4\xad\x05\xa2\xb9\xae\xf2\xc3\x48\x6b\xf5\x86\xc3\x6f\x09\x99\x24\x87\x51\x58\x25\x51\x20\xcb\xb5\xdc\xf6\xf7\x0e\xd0\xf0\x24\xba\x3c\xc9\x73\x5b\x04\xd4\xe2\x29\xb6\xb2\x41\x80\x80\xc5\x39\xa9\x77\x95\x2d\x75\x18\xb8\x8a\x5d\xc6\xe8\xa6\xfe\xe7\x42\x0c\x68\x4d\x95\x5a\xd5\xcb\x0f\x0e\x21\x48\xae\x1c\x55\x5f\x3c\x84\x1f\x04\x5d\x3a\xf7\x60\xdb\x90\x9b\xd4\x8f\x54\x02\xd1\x9f\x53\xf5\xae\xe2\x3c\x3b\xa5\x7b\xaa\x00\xbb\x07\x77\x50\x05\x8b\x49\x22\xe9\x44\x29\xdc\x06\x99\xeb\x3c\xf9\x85\xd9\x4a\x0b\x3d\xd9\xdd\xa6\xc0\xf8\xac\x60\x12\x2d\x64\x5a\x3b\xc4\x6f\x0e\xf3\xf8\xcd\xb8\xe9\x8e\x0e\xc6\xcd\x94\x29\xe0\x11\xde\xa7\xf6\xf6\x8b\xb9\x1c\x46\xd2\xdf\x21\x9c\xbc\xfa\xd5\xc5\xdd\x7d\xf6\x59\x57\xb5\xf2\xa6\xe9\x5c\xbd\x08\xdd\xdb\x89\x21\xa3\x24\x0a\x10\x0b\x8c\xa2\x26\xa7\xf0\xb3\x30\x6f\x14\x5d\x28\xf1\x54\x01\x3d\x2e\x7d\x68\x21\xcf\xfc\x42\xc3\x8d\xf4\x01\xe1\xff\xb9\xbc\xb5\xb8\x3d\xe5\xf5\xb4\x53\xce\x71\x07\xb4\x61\x9a\x02\x82\xcc\xee\x54\xa4\x08\x8a\x10\xaf\x16\xdf\x38\x28\xea\x61\x3c\x0a\xfa\x97\x80\xed\xed\xde\x0e\x99\x36\x9f\x0a\xba\x2c\xa1\xab\x4e\xbb\xae\xc0\xe0\x96\x00\x38\x7f\x80\x80\xfe\x1e\xe7\xb6\x11\x57\xf9\xcd\x77\x0e\xf9\x7d\x6e\x79\x03\x6e\xb9\xb2\xd8\x47\x49\x89\x20\x0e\x2e\xd7\x82\x56\x39\x41\x63\xe3\x35\x37\x52\x01\x15\xf7\x26\x3b\xc1\x69\x96\x0c\xde\xec\xb2\xcf\x88\x0e\xf8\x1f\x2c\x3f\xe3\x2f\x07\xca\xf2\xd1\xb9\x2b\x32\x77\x68\x84\x70\x5f\x15\x3e\xd1\x66\x94\xe4\x6f\x6c\xf9\xc9\x15\x05\xd8\xb2\xf6\xf6\xb3\x6b\x7e\x0d\x08\x5e\xdf\x3c\x42\xa1\xe8\xa6\xc6\xe8\x3f\xea\x03\xe0\x51\x38\xe4\xa5\x2d\xe7\x36\xf3\xb7\x86\xc4\x60\x53\x22\x8e\x88\xac\xb0\xc6\x60\x12\x2e\x28\xe4\x15\xfc\x8f\xf6\x5d\x68\xf2\x89\x02\xbc\xcd\x06\xf7\xe6\x59\xc3\x8b\xa5\x06\x8d\x52\xb3\xd0\xa0\x70\xbc\x98\x30\x97\xa9\x99\xe7\xa1\x4b\x04\x36\x5b\x68\x71\xa0\x84\x26\x3b\x79\xbf\xe6\xce\x6f\x97\xc3\xb6\x49\x55\x5d\x69\x35\xfd\xe7\xc4\x2a\x04\xd1\x99\x8e\x56\xe7\xe6\xe2\x51\x40\x25\x82\x59\xeb\x28\xf0\xc3\xa7\xcb\x13\xbc\xcb\xc3\xd6\xd6\x9d\xa1\x1d\xa4\x88\x30\xc4\xbd\x48\x34\x4a\xb0\x4a\x43\xad\x2c\x24\x11\xa2\x25\x5f\x40\x58\xec\x07\x59\x7f\x30\x5d\xd7\xb3\x23\xd1\x4e\x3d\x2a\xbb\xe3\x73\x87\x13\xcb\x54\x09\x03\xc8\x5b\xf0\xa8\x7f\xe6\xfb\x24\x12\x8b\x9f\x7e\xef\xab\xf3\xab\x03\x39\x95\x36\x4d\x4f\xa6\x97\x8b\x5e\x9d\x85\x6e\xba\xd3\x43\xbf\xc1\x1a\x29\x38\x0b\x86\xbb\xef\xa9\xde\x3b\x64\x24\x85\x44\x3d\xa3\xf0\xb6\x44\x3f\x59\x49\x00\x8f\x5b\x5f\x1e\xb2\x85\x51\x30\x62\x93\x01\xd6\x2f\xdf\x65\x92\x69\x2e\x0b\x1f\xe0\x98\x6b\xe3\xb6\x0c\xb2\xd0\x1b\x96\x32\x94\x18\x44\x6a\x74\xec\x3c\xc3\x1b\x28\x0b\x4d\x38\x63\xe3\x7c\x29\x1a\x7d\xe4\x2f\xe9\x7f\xf2\xbe\xb1\x7d\xac\x35\xde\x5c\x39\xd4\x6d\x1b\xad\xcd\x20\x1e\x9c\xf0\x1c\x56\xec\x50\x26\xf8\x05\xf6\x66\x51\xec\xf2\xcb\xe7\x6e\x07\x46\x8f\xaf\xee\xe6\x37\x57\x49\x57\xbc\x59\xd3\x49\xee\xbf\xfb\xdf\xb3\x61\xde\x7e\xd1\x93\xb6\x05\x3d\xda\x86\x5a\x5b\x7b\x57\x12\xa3\x53\xe0\x4d\x02\xe7\x31\x55\xa4\x88\x43\x6f\x19\xca\x70\x5c\xde\xa8\x0e\xaa\x7c\x7f\xe7\x2c\xe8\x69\xc5\xa5\x38\x21\x96\xc2\x52\xec\xc4\x09\xc3\x97\x1a\x56\x64\xe1\x84\xb2\x5d\xad\xf8\xf3\xb0\x3d\x4d\x96\xa5\x6a\xee\xfc\xaa\xe9\x4a\x7e\xf5\x54\x0b\x46\x22\x64\x50\xf3\x52\xb7\x12\x3b\x6a\xe0\xa8\x3e\x27\x99\x64\xd3\xb4\x28\x65\xc3\x1c\xb4\xcf\x15\xe3\xaa\x3d\x32\x7f\x2d\xf8\xcb\x10\x12\xbd\x75\xe4\xbb\x52\xb2\xf3\x8a\x36\x36\x42\x0d\xa0\xa4\x0c\xec\xdc\x68\x9b\x34\x4e\xf9\x57\x16\x9e\x02\xb4\x34\x78\x81\x68\x07\x67\xf5\x5d\xfd\xc3\xb7\x9e\x16\xd4\xbb\x6f\x32\x48\x6a\xae\x30\x64\x59\x3b\xaf\x3f\xf5\x2b\xe6\x87\xe9\xbe\xba\x0f\xaf\xd4\x7e\x23\x89\x5e\x33\xf5\xe6\x65\x23\x34\xf3\x0a\xbe\x60\x23\x19\x5c\xf2\x30\x9a\x7f\x58\xc9\x1e\x95\x10\xee\x96\xe0\xb1\x0b\x6d\x3f\x67\x72\xd5\xb1\xb5\xd7\x42\xf1\x90\x68\x07\xc8\xc0\x10\xde\xa7\xfe\x42\xcb\xb1\x46\x2d\x9c\x6d\x6a\xa6\xde\x30\x46\xa9\x7a\x4a\x51\x59\xb9\x9a\x71\xdf\x7a\x93\xaf\x0e\xfd\x0e\xf7\x8f\xe7\xb0\x2d\xe0\xba\x12\xc7\x70\x2f\x52\x68\x43\xd2\xa4\xb1\xe6\xdd\xeb\xd8\x75\x0b\xa9\xc5\x12\x4e\x21\x95\x8f\x9d\x04\xa3\x15\x1c\xac\x12\xff\xf0\x45\x95\x73\x97\x1e\x29\x2a\x03\x13\x04\x08\xf5\xd3\x28\x18\x50\x70\x42\x26\x63\xf4\xd5\xb6\x0d\xdb\xc6\x3f\xb7\x80\x98\x74\x8f\xba\xe0\x72\x84\xa8\x08\xd0\x54\xc5\xe3\x20\x65\x7f\xe7\x58\x48\x10\x47\x66\x53\x40\x41\x13\x83\x60\x06\x69\xe2\x62\xca\x47\x9a\x3b\x05\xda\x2f\xa5\x72\xad\xb9\x64\x5c\x81\x46\x08\x1d\x52\x2f\x5a\x6c\x8f\x4d\x74\x2b\xd5\xf5\x05\xc3\x01\x40\x4b\x8b\xe5\x3a\x26\x86\x02\x2a\xe0\xfd\x7b\x7f\x93\xd6\x0d\xf3\x53\x99\x8a\xa5\x9a\xf2\x21\x31\xa2\xb8\x3a\x47\x71\x05\xd4\x92\xc0\xf2\x52\x64\xe0\x45\xb2\xef\x4b\x6b\xdc\xa1\xfc\x69\x5d\x89\xd5\x9a\xfe\xa8\xda\xc4\xfd\xe7\x1c\x7e\xe4\x13\x1c\xae\xf1\x82\xc4\x70\x88\x18\xbd\xeb\xac\x6e\x9c\x96\x2c\x0e\x00\xfc\xab\x26\x7f\x99\xef\x78\x0d\x03\x6f\x94\x64\x29\x9d\x4f\x5d\x66\x88\xc9\x21\xa4\x36\x3a\x9e\x36\x81\xab\x8d\xc5\x47\xc9\x59\x39\x85\xf9\x1a\x3d\xad\xbf\x9e\x77\x7a\x25\x6a\x05\xaf\x3d\x41\xd4\x20\xc1\xe7\x71\xcb\xed\x61\x73\x1c\xbc\xfd\xd4\x7d\xc3\xc7\xb6\xb0\xea\x7e\x87\x57\xc7\x73\xb5\xdd\x4f\x7d\xff\xf4\x2f\x15\x95\x09\x8e\x7e\x70\xac\x3f\x9b\xb2\x9f\x17\xea\x21\xca\x4b\xa3\xb3\x08\x65\x46\xa6\xee\xb0\x56\xbb\xa3\x28\x30\x7b\x0c\x62\x4a\xed\x30\x51\x74\xa5\x41\x27\xc5\x89\x71\xf4\xfb\x7a\x9e\xd7\xb8\x3a\xec\xfb\xe4\x85\x8d\x44\x5f\xa4\x02\x58\x48\x10\xa9\x31\x0a\x9c\x6b\xdc\xdd\xbf\xa3\x22\x11\x85\x2e\x9b\xa5\xff\x63\x03\xe6\xf8\x81\x07\x69\x7d\xfb\x15\x5a\x4b\xa0\x7a\xf1\x90\x66\x91\xb8\x19\xbb\x46\x89\xb4\x79\x76\xb0\xb4\xc5\x12\x5c\xf0\x09\x54\x08\xee\x8d\x53\x5b\x13\xcd\x3f\x1a\x11\x8c\x8a\xa8\x95\x09\xd1\x54\xb8\x61\x9b\x67\xf3\x58\xd7\xb4\x64\x9b\xba\x06\x74\xd4\x44\x88\x16\x51\x60\x1d\xbd\xf4\xda\x08\x7e\xdc\x56\xa4\x00\xf5\x46\xcd\x5d\x5b\x97\xc1\xa4\x09\x32\xdf\x7e\x89\xf0\xda\xb4\xda\xc5\x99\x7e\xf5\xd3\xd4\xf8\xd4\x3d\xd8\x7b\xe3\x21\xcd\x94\x57\xcb\xc2\x92\xa1\x6f\x27\xad\x0d\x23\x8a\x55\x1b\x0f\x0c\x1d\x01\x9a\x04\xb4\xf6\xde\xa7\x8e\xdf\x2b\xe3\x9e\x6b\x79\xa8\x41\x7e\xe1\x06\x50\x26\x41\xa4\x0b\x37\xb7\xf8\xea\xc8\x3f\xfe\xc4\x8c\x1a\xa9\x43\x6b\xc1\xeb\xb8\x92\xd0\xec\x34\xa6\x8e\x8a\x98\xf9\x6f\xcc\x90\xd8\x3b\xdf\x26\x0a\x1b\xfa\x78\x0d\x5d\xad\x6e\x59\x43\x3d\x2d\x88\x23\xd2\xbc\x28\xa2\x70\x86\x32\xb3\x97\xd2\xd7\x56\xd4\x86\x30\x34\x0e\x64\xd4\x09\x9c\x98\xad\x5f\x2c\x01\xb3\xd8\xb2\x25\x44\x29\xc7\xdc\x11\xd0\x2e\x8b\x49\x92\xad\x01\x81\x5a\xb1\xdd\x76\x02\xfe\x70\x0b\x5e\xcb\xe5\xb0\x21\xf4\xb1\x85\x64\xc3\x80\x8a\x09\xdc\x10\x97\xcb\xf1\xcf\xce\x00\x2b\x0e\x18\x07\xbd\x45\x91\x0b\x87\x27\x1f\xb9\x33\x4d\x4b\x73\x5a\x8a\x70\x5d\xa0\xb7\x2c\xcd\x08\xc5\x31\xac\x30\xe0\xb5\x09\x58\x46\xe8\x4b\x50\x19\x3f\x15\x17\xc6\x24\x70\xf4\xb7\x3a\x9d\x9a\x75\xe4\x34\x3b\xd0\xf4\x28\xd0\x12\x6d\xd9\x9d\xca\x90\xca\xb2\x9b\x64\x8f\xed\x06\xac\xc0\xd5\x8f\x0c\x2a\xe1\xe6\x30\xaa\x74\x9f\xa2\xbb\xeb\xba\xa5\xfa\x6f\xff\xfd\xdb\x62\x03\xef\x8c\xf7\xc8\xb7\x38\xdb\x87\xc7\xfe\x1d\xcd\x62\x38\x2f\x18\xe9\x21\x62\x4f\xc6\x83\xaa\x3a\xd1\x71\x1b\x4d\xa6\x5b\x5e\xa2\xbd\x66\x3d\xda\xaa\x4f\x87\x53\x5b\x8b\xb1\x43\x13\x06\x73\xd7\x2a\x6e\x27\x65\x7a\x35\xa5\x56\x1a\xe8\x78\xcc\xa9\x04\x1a\xbd\xbe\x7d\x6e\xbf\x1f\x89\x53\xbe\x8b\x22\x35\xdd\x25\x7b\xa6\xd8\xee\x69\xd3\x4a\x17\x22\x9e\xee\xfe\x29\x06\x9d\xc6\x7b\xa0\x6e\x93\x00\x9a\x9c\x05\x4b\xd5\x60\x6c\xb6\x57\x11\xfe\xa2\xf4\x5b\x98\xe9\x5f\xb4\xba\xf9\x98\x50\x63\xaf\x34\xf2\x77\x8c\xb0\xd5\xb9\xca\xb3\x67\xd2\x65\xbf\x79\x3f\x18\x5a\xc2\x61\x49\x05\x61\x8e\x9a\x66\xda\xb8\x80\x2f\x24\x10\x39\x94\x91\xc4\xb2\x6e\x0a\x52\xe2\x6e\xbc\x87\xf3\x95\xc4\x7f\xf9\xb0\xc2\x18\xcb\x21\xce\x50\xbb\xe3\x72\x2f\xfb\x85\x8e\x75\x1b\xd9\xce\x0e\x47\xc4\x7c\x29\xc4\xd5\x0c\x14\x00\x9a\x53\x3e\x91\x3b\x44\xfd\x23\x74\x3c\xbc\xe7\xce\x87\x08\x52\x2b\x5d\xe2\xc9\x1b\x3f\xa3\xfc\x07\x02\x7e\x16\x03\x16\x89\x0f\x58\x9b\x3e\xc9\x03\x8c\x99\x6f\x92\xf0\x4a\x68\xe4\x56\xf5\xfe\x70\xf2\x4b\xb2\xcf\x09\x1a\x95\x70\xda\x31\xb7\x4e\x8b\x76\x94\xf6\xbe\x8d\x8b\xdb\xb0\x16\x58\xc3\x6c\x79\xf7\x43\x23\xf9\x74\x3b\x5b\x5f\xbe\xd0\xf1\xeb\x11\x0f\x50\x30\xaa\xed\x5d\x4c\x16\x2c\xc3\x6c\x5c\x86\x49\x35\xa9\x73\x31\xaa\xb4\xe3\x25\x78\x18\xc8\x82\x96\xfe\x1b\x7b\x62\x3a\x81\xef\x7d\xa1\x90\x42\xeb\x2c\x26\x48\xa3\xb0\x3e\xee\x1f\x8f\x55\xf9\x14\x96\x16\xf8\x0f\x96\x7d\x51\x36\xac\x86\xec\x25\x5e\x0a\x30\xaf\x05\x3d\x95\x29\x6c\x26\xef\x81\x10\xe1\x29\xd6\x47\x55\xd0\x42\xca\x43\x24\x76\xa0\x0f\xc6\xd0\xef\x16\x95\x14\xf8\xdd\xcb\xf5\xf0\x3c\x6b\x95\xf1\x4d\xe2\x22\xf7\xc1\x4c\xe9\x39\x67\xf1\x64\x32\x38\x38\xd7\x89\x4e\xfb\xac\xb8\x1d\xc8\xef\xad\xe0\xf6\x28\xa6\x08\x82\x63\x0a\x10\x49\x4d\xaa\xee\xcd\x5b\x9f\x77\x93\xc4\x7a\xfc\x3f\xb9\x66\x05\x15\x62\xff\x01\xbe\x9d\xc8\x21\x37\x39\xa3\xd6\x3f\x33\x78\x7c\x4b\xa6\x97\xfe\x2d\x6c\x7e\xe5\x7e\xf4\x8a\xe5\x84\x37\xed\x09\x56\x5a\xca\x41\x47\x10\x7e\x80\x14\x3f\x60\xbf\xeb\xa6\x5c\xe2\x0f\xa1\x71\x6c\x78\xeb\x74\x8b\x81\xf7\x93\x41\xdf\xb7\x73\x36\x6c\xea\xee\x27\x1a\x86\x58\x5f\xcf\x2b\xa6\x1d\x0a\x22\x05\x53\xf9\xf7\x5e\x34\x99\x38\x65\x9e\x7e\xd3\x2e\xc2\x69\xfa\xd1\x70\x23\x05\x49\x35\xd4\xb9\xb8\x97\xfa\x42\x62\x70\x35\xe3\xfc\xd1\xeb\x29\x27\x55\x1f\x59\x00\xa8\x3f\xbb\x26\xda\x59\xdf\xf2\xb9\xcf\x7c\x17\x06\xba\x41\x19\x2b\x55\xe0\xcf\x28\x2c\xfa\x39\x4d\x2c\xf9\x5b\x0f\xc5\xe8\x1a\x1b\x73\xc9\xa6\xb5\xf3\xac\xcb\xbc\x96\xca\xc8\xaf\x05\x36\xfe\x04\x67\xd4\x66\xef\x33\x38\xeb\x86\xf3\xb1\x35\xcd\x99\xb0\x59\x48\x9f\xfa\x9d\x12\xa5\x15\x54\x75\xb6\x17\xce\xf0\x26\x7f\x0b\x4a\xdd\x71\xd2\x71\xbf\x68\x4e\x3c\xf9\x6a\x80\x1d\xd8\xfc\xf0\xe3\x65\x3d\xae\xdf\x2f\xd2\x40\x6d\xb3\x50\xa0\xce\x15\xd8\x5a\xa3\x40\x92\x61\xea\x97\xf8\xe8\x52\x41\x6d\xfc\x31\x9f\x1a\xb2\x97\x8f\x1d\x8e\x3a\x6b\xf4\x57\x1e\xcb\x18\xce\x58\xed\xfc\xa7\x78\xb9\x17\x42\xc8\x3a\x11\x85\xea\xb4\x1f\xec\xaa\xc5\x6f\x8f\x85\xed\x09\x16\xbf\x62\x4d\x31\x39\x28\xb2\x0d\x4d\xc0\x60\x1c\x10\x66\x46\x93\xed\xf8\xf1\x13\x08\x24\x60\x9a\x53\xd6\x31\x1a\x5b\x78\x1f\x2f\x85\xa9\xed\x6a\x70\xec\x9a\xf0\xb3\x04\x9a\xfe\x3e\x73\x4d\x08\xda\x49\x81\xf5\x95\xa8\xfa\xe5\x14\x83\x1b\x3c\x9d\x38\xee\xde\x66\x81\xba\x17\x84\x5a\xab\x6d\xbb\xe6\x15\xe3\xd4\x91\x83\xbb\x67\x9d\x7d\xb4\x94\xc2\xe7\x1c\xf8\x59\x50\xb4\xc2\xca\x49\xf2\xd4\x19\xeb\x65\x4a\xca\xef\xdf\x7a\x03\x96\xa1\x5b\x56\x85\x46\x02\x1d\xd2\x32\x24\xe2\x80\xa8\x5b\xb6\x77\x36\x83\xa8\xc4\x2e\x77\x15\x29\x8c\x89\xdf\x74\xdc\xe8\xa6\xbf\x68\xee\xd5\x8f\xd9\xf4\x14\x55\xef\x60\x17\xb7\xe0\x55\xe6\xbc\x16\x46\x1d\xc7\xd2\x7e\x73\xa3\x0b\x50\xbe\x24\x0a\xac\x5d\x1d\x3b\xe4\x29\xdf\x9f\x6b\x15\x4c\x65\xf4\xde\xa4\x4c\xf7\x9f\xa8\x64\xe2\x21\x31\x71\x2e\x6d\xb7\x76\xbe\x41\xdb\x2e\x3a\x77\xcf\x54\x3e\x88\x30\x1c\xab\xcb\x3c\x0e\x01\x66\x13\xc7\xd8\x88\xdd\xf1\xf4\xe1\x37\x8a\xfa\x80\xfb\x02\x61\x18\xc7\x2f\xda\xda\x9b\x57\xda\x1b\x8a\x0b\xf8\x07\xfe\xad\xb5\x54\xb8\x5d\xc1\xad\x87\x75\x62\x5c\xf5\x42\x31\x9c\xd6\x38\x32\xa3\x54\x81\x83\x3d\x34\xb4\x47\xb6\x48\x3a\x89\xc1\x50\x04\xd6\xd0\xb9\x62\x47\x86\x28\x6b\xa7\x67\x69\x61\x53\x95\x12\xa2\x26\xa5\x50\x8a\xb8\xf0\x21\xe8\x5b\xce\x00\xc5\xed\x9d\xf6\x12\x66\xda\x30\x05\xe1\x5f\xfa\xab\x57\xbc\x9d\x34\x8e\xc5\x48\xfa\x16\x65\xa2\x0f\x49\xd5\x45\x7f\x86\x3a\x33\x10\x73\xe1\x79\xc2\xef\xfb\xf8\x2e\x84\x0a\xe1\x09\x35\x40\x3a\xdc\x62\x35\xe7\xaa\x42\x55\xf6\x49\x3d\x12\xc5\xbf\x7b\x3b\x0c\xda\x16\x72\xe9\xdb\xbd\xed\x4f\xa8\xeb\xd1\x37\xea\x59\x39\x1d\x03\x4c\x82\x3a\xd9\x06\x29\x78\x2a\xa0\xb2\x19\xd9\x10\x3c\x79\x08\xe9\x48\x9c\x36\xe8\x52\xf1\xdf\x57\x5f\xa5\xa1\x6f\x53\x73\xc7\x39\x48\x8c\x82\xf0\x34\x32\x91\x5f\x5d\xdc\x00\x14\x5a\x63\xdd\xc1\x56\x28\x68\x26\x01\x7a\x74\xe0\xf5\xb8\xdc\xcf\x7f\xec\x5f\xa9\xc7\x42\xe2\x62\xf6\x2b\xf2\x64\xe2\x83\xda\x7d\xcf\x8c\xe0\xd4\xf6\x4d\x38\xee\x6f\x25\x4f\xba\xbc\xa2\x17\x6c\xd9\xb1\xa8\x8c\xe1\xbc\x3a\x20\xc8\xab\x5e\x6c\xb0\xd2\x59\x02\xc8\xcf\x7b\x14\xa9\x48\xe2\x5c\xd6\x5c\x70\xfe\xd6\xfb\x0b\x0c\xc2\x6d\x81\x03\xb6\x56\xfb\x53\xfa\x09\x76\x5c\x36\xb2\xfb\xc1\x58\x85\xe7\x68\x91\xf9\xc6\x03\x1e\x74\x0b\xb5\x91\x60\xea\xc9\xdb\x72\x02\xd2\x9b\x7c\x37\xfe\x81\xda\xa6\x79\x19\xc5\x1d\xbb\x98\xc2\x81\x88\xee\xde\xec\x26\x69\x3d\x69\xeb\xc1\xe7\xa0\xdc\xbc\xe4\xad\xe9\x34\x55\xd6\xc4\xd8\x2f\x05\xb3\x2a\x89\x90\x2b\x71\x6c\x5a\xa0\xde\x62\x2d\x30\xdc\x1c\x0f\x67\x5c\xb6\x90\x11\xcb\x70\x22\x86\xb3\x7c\x99\xf9\xe9\xca\x38\x53\x20\x7d\x5d\xb4\x32\xe2\x37\x7a\xdd\xa2\x51\x81\xc7\xdd\x07\xba\x5b\xdc\xbe\xe3\x32\xab\x38\x1a\xaa\xf5\x4f\x0e\xb8\x5f\x12\xfc\x68\x38\xd6\xca\xdd\xc8\x32\x75\x53\x58\xf1\xb0\x92\x2f\x23\x1a\x0c\x5d\xdd\x69\xd2\x16\x16\xc2\x83\x34\xf0\x40\xe5\x33\x10\xf8\xee\x07\x62\xe5\x81\xea\x0a\x45\x76\x81\xb1\x8c\xfe\x90\x5b\x94\x7d\x06\x9c\x86\x31\x1d\xed\xbb\x55\x59\x52\x89\x20\x98\x5b\x8b\x32\x02\x0f\x22\xa3\xf2\xb1\xf1\x41\xc7\x4e\xf5\xa8\x35\xe7\x30\xfe\x8b\xdd\x6c\x9a\x5a\xbd\x67\x24\xe7\x26\xb2\x4e\xe4\x2f\x33\xa5\x95\x09\x38\xa1\xf1\xf3\xfc\x7d\xb3\xdf\xa7\xa3\x1f\xbc\x86\x3e\x93\x3c\x5c\xfd\xb7\x77\x87\x63\xfe\x16\xa4\xbd\xa2\x61\x5a\x94\xec\x26\xd1\x78\x74\x87\x02\xb9\xe7\x3b\xcf\xbd\x2d\x46\xcf\x81\x39\xcd\xc5\x47\xc0\x60\x8e\xed\xa0\x90\xb9\x47\xa3\xd0\xa6\x10\x13\x4f\x59\x03\x67\x0b\x56\x5b\x7b\x36\xd1\x50\x2d\xb2\xa7\xb4\xb9\x24\x7e\x6d\x20\x0b\x27\xec\xe9\x2a\xdf\xb0\x0b\xa7\x93\xac\x20\xd5\x57\x3d\x33\x2e\x8b\xd4\x0c\xc7\x90\xa1\x98\x46\x53\x57\x37\xe9\x20\x81\xd2\x1f\x6d\xb6\x9a\x4b\x79\x90\x14\xe0\xe9\xa7\x8f\x14\x13\x4e\xae\x30\xf1\xf1\xff\x04\x2f\x95\x85\x1f\x6d\x0f\xee\x54\x20\x18\x90\x5e\x26\xf6\x74\x39\x0f\xa6\x7f\x4d\x26\x4f\xa3\x6e\x5f\xb2\x51\x5b\xc5\xf3\x33\x4d\xc9\x54\xd2\x20\x53\x48\x1a\x3e\x65\x9a\xe3\x1c\x61\xa2\xdf\x52\xc3\x6c\x9c\xec\xef\x4e\xb8\x9e\xe3\x11\xc7\x1e\x77\x00\xa6\xaf\xab\xec\x66\x04\xc6\x61\x05\xe1\xe6\x04\x91\xa3\xfc\xaf\x2a\xaf\x05\xca\xa1\x53\x87\x81\xeb\x20\xce\x6f\x27\xcf\x3f\x7f\xdb\x49\x9c\xb3\xc6\x32\x4a\xa3\xef\x9e\xe8\x74\x9f\xdd\x40\x4a\x33\xd5\xd4\xc7\x1b\xc6\x6d\x0a\xa8\xcf\x1b\xe7\xcb\x6b\x57\xb4\x8a\xc1\x3f\xc3\x90\xdb\x25\xb4\x36\xd4\x4a\x79\x27\xf3\xbd\xec\x43\xf2\xf7\xd5\x35\x45\xdb\xc3\x21\xda\x43\x5f\xd8\xf2\x38\x1f\x7d\x3b\x2b\x2a\x7b\xba\x2a\x99\x20\x9e\x4f\x6e\x7f\x61\x2c\x06\x67\x14\xcb\xf6\x61\xba\xcd\xd6\x2a\xa8\x8c\x21\x03\x62\x92\x25\x32\x5d\x29\x76\x89\x84\xbd\x10\xe7\xf6\x02\x15\x0a\xc4\x28\x30\x2d\x58\xfd\x80\x02\xf5\xc1\x46\xf0\xfe\x94\xfd\x26\xd9\xf7\x0c\xee\xb1\x27\x8d\x7d\xa5\xb3\x99\x77\x32\x1e\x8a\x9c\x57\x4a\x10\xb7\x7a\xeb\x80\xeb\xff\xe9\xff\xd3\x5f\xe7\xb7\xe3\x86\x02\x2b\x2b\x6c\xc4\x23\xa6\x55\x44\xaf\x78\x16\x44\xa3\xa9\xb7\x30\x03\x18\x94\x0d\xba\xc9\x2c\xd1\xca\xb4\xce\xc1\x50\x54\x2d\xd1\xef\xaa\x05\x5e\x7b\xa7\x2c\xe7\xbe\x29\x61\xfd\x46\x6a\x2e\xd0\x71\x4d\x7a\x84\x5b\xd2\x71\xa6\x16\x76\x76\x93\x06\xbe\x3c\x36\xc4\xb5\xdd\xef\xa8\xa5\xe0\x31\x20\x86\x40\x92\x22\x66\x53\x62\xea\x94\xda\x6c\x5c\x96\x35\x43\xb6\x8d\x00\xae\x95\x00\xae\x8d\x00\xd6\xdf\xb8\x2c\xd6\x41\x7a\xd0\x41\xe6\x7a\xdd\xf2\xed\x8e\xb4\x70\xb4\x5c\x05\x3e\xf1\xbd\xf5\xbc\xd6\x64\xde\x79\x43\x99\x6e\x24\x7a\xc1\x84\xd3\x76\x3f\x36\x3d\x33\xdb\x0b\x79\x92\xe5\x2e\x62\xde\xe4\x8f\x5d\xe4\xbd\xd1\xf8\x33\xc7\xea\xa7\x2e\x3d\xdc\x90\x45\xe9\xdf\x58\x66\xca\xdd\x7c\x67\xb6\x3b\x77\xfc\x59\x50\x2e\xec\x15\x2b\xbf\x3c\xd8\xd4\x99\x6b\xba\x11\x7f\x27\x10\x5e\xeb\xf8\xec\x59\x39\x2d\x24\x76\x20\xf7\x3b\x56\x5a\xe1\xaf\xf9\x87\xe2\x52\x35\xe1\x40\x15\xc3\x8f\x17\x0c\xaa\xe7\x1c\x06\x94\xfc\xd3\x0f\xab\xfd\x63\x13\x9b\xf6\x77\x7c\xe5\xb2\x65\xc0\xd6\x83\x41\x7e\xd9\x26\x07\xfd\xb2\x5d\xe8\x9d\xd9\xea\x45\x79\x73\x5d\xa4\xb9\xb0\xbd\x7b\xd7\xd7\xc8\x9c\x2b\xb4\x3f\x22\xbd\x16\xa2\xce\x39\x3b\xd6\x86\xba\xfd\x65\x09\x40\x86\xd9\xe9\x06\x65\x36\x66\x8c\xce\xba\x82\x36\x32\x75\xaf\xab\x18\x71\xd1\x42\x09\x1c\xd7\x07\x60\x20\x78\x50\x17\xd1\xaf\x03\x34\xae\x1f\x52\xc2\x48\x8a\x49\xdf\x2d\x4e\xb5\xce\xdd\xb3\xd6\x6e\xda\x97\x23\x83\x66\xba\x05\x63\x34\xd4\xab\x80\x72\x6b\x55\x38\x46\x95\x96\xec\xd3\x08\x68\x77\x4f\x42\x95\x80\x35\xfd\x28\xc0\x96\x4f\x72\x74\x73\xe6\xf8\xa5\x48\x31\xa5\x48\x38\xc0\x73\xbb\x11\xfe\x1c\x72\x41\xe8\x06\xa1\xb8\x44\xd1\xa0\x66\xd6\xf8\x33\x46\x53\xec\x9e\x91\x61\x15\xd0\x7b\xca\x23\x5e\x1d\x8e\xa9\xb1\xfa\xd7\x11\xf9\x9d\xfe\x4a\xa9\x6d\x3c\x7d\x7c\xee\x91\x5b\xc5\xf6\xc5\x4d\x59\x96\xa9\x52\x71\x40\x80\xa7\x17\x99\x6f\xde\x13\xdc\xf8\x87\x02\x4c\xae\x71\xb0\xfc\x17\x73\xf8\xe3\xc3\x24\x92\x8d\x7f\xb6\xeb\xd8\x2c\xb0\x13\x45\x12\x45\x5f\x2b\x7e\xfb\x43\xf7\x82\xcc\x94\x3a\xb7\xc6\xa9\xed\xfb\x9e\x25\xb3\x25\x45\xfe\x5b\xd5\x5d\x5b\xd2\xba\x65\xe3\xf2\x27\x72\x63\xbb\xff\x08\x54\x61\xcb\x3c\x8d\x6c\x3a\x42\x5f\x55\x09\x8a\xfb\xad\x09\x9e\x3e\x3c\x54\xac\xd9\x76\x7a\x3e\xc6\x7b\xc1\xc9\x0f\xe5\xf9\xc3\x56\x7e\xde\x76\xe3\x61\x01\xdb\x7d\x52\x60\x83\xd3\xac\x57\x07\x78\x59\xf9\x92\x30\xdc\x4f\x50\xeb\x73\x8d\x88\xb8\x83\x27\x58\x28\x91\xe0\xf6\x39\xc2\x94\x50\x6d\x00\x78\xac\xd9\x78\xac\xe9\xf8\x01\x64\x01\x15\x60\xff\x52\x9b\xee\xce\x58\x66\xc6\x7a\xff\xaa\x52\xe7\x2e\x51\x3c\x20\x76\xc3\x51\xdc\x7d\xe8\x1e\x34\x69\x37\x50\x56\x6a\x7f\x14\xd7\xf1\x9b\x12\x74\x42\xf0\xc5\x8a\x51\xe2\xe0\x29\x87\x63\x12\xae\x0e\x38\x06\x47\xc7\xd4\xd7\x52\x47\x53\x5d\x6d\x09\xd6\xe3\x38\xae\xc3\xf2\x7b\xbc\xd8\x76\xfa\x03\x84\xc4\x88\x55\x21\x08\x45\x17\xba\x12\x1f\xa8\xc4\x86\xb2\x2f\x3a\x7e\x48\x8a\xda\x39\x16\xac\x2e\x94\xf4\xe9\x17\xd0\x39\x0c\xc9\x53\x8c\x65\x34\x76\x86\x4a\x5b\xee\x60\xc2\x94\x35\x7f\x39\xc7\x16\x5b\xb5\x27\xa1\x0a\x6b\xe4\x9b\xe7\x5d\x9c\xc5\xdc\xf6\x22\xb9\x0b\x5d\x7b\xfd\xe0\x4f\x6c\x19\xb4\xc1\xe7\xb0\x53\xcb\x7a\xa2\x10\xc6\xab\x82\x57\xb8\xa0\x49\x9b\x24\xe9\x62\x36\xd9\x06\x12\x18\xbf\x21\x54\xa1\x3b\xd1\xdf\x89\x6c\x30\x49\x03\x22\xdf\xc0\x2a\x0c\x31\x98\x54\x43\xb9\x25\x7f\xf8\x0b\xc8\x7c\xbf\x0c\xc4\x35\x7d\x2e\x4e\x9d\xce\xd2\x13\xb9\x5d\x48\xd3\xfe\xb7\x2d\x80\x30\x08\x4e\xe8\x1a\x53\xba\x19\x1f\xba\xff\xad\xc7\xd7\x09\xd1\x59\x6d\x5d\xd7\xc1\xea\x40\xe0\x0a\x4f\xcb\xa8\x79\xf7\xd5\xea\x66\x82\x95\x97\x61\x13\x57\x44\xda\x7b\xe6\x56\x39\x59\x7b\x87\xd7\x53\x98\x3a\xcd\xfd\x55\xae\xde\x12\xb9\xe8\x5c\x63\xc9\x59\xc1\x8d\x72\x70\x3a\xd9\xd0\x6e\x9e\x80\x14\xc7\xce\x76\xb3\xa2\x34\x37\xb9\x68\x51\x18\x41\x96\x38\x62\xde\xfa\x81\x9a\xac\xd5\x54\xb1\x76\x1f\x84\x7e\xf5\x2c\x5a\xb6\xd4\xe1\x14\x94\x15\x5a\xff\xa6\xb6\x3c\x75\x79\x74\xc6\x1a\x03\x76\x33\x69\xdd\x54\x69\x8b\x0d\x4e\x19\x3b\x0f\x80\x79\x65\xa3\x04\xb5\x12\xd7\x36\x59\xc0\x9b\xf0\xfc\x4c\xc7\xb3\x5a\xaf\xed\x69\x8d\x06\xbc\x0b\x59\x27\xf9\x21\x41\x9b\xed\x93\x10\x05\x98\x81\x2b\x46\x95\x86\x0f\xcc\x38\x5f\x5d\x5f\xbe\xae\xeb\x5a\x8c\xdf\xef\x9a\xec\x29\x85\xa9\x1c\x10\x74\xc1\xd9\x47\xf5\x06\x4d\x0a\x4e\xc6\x75\x6d\xcc\xf0\xfd\xe8\xcc\x6f\x38\x01\x02\xc6\xdc\x98\xe2\xf5\xb0\x26\x7f\xe9\x91\x6a\x1b\x9c\xd5\xc8\xd8\x94\x30\x4e\x9e\x05\xf7\x3c\x2d\x55\x08\xfd\x7d\x9b\x6b\x7e\x19\xc8\xc4\x1f\x08\x7a\xb2\x8d\xad\xf0\x11\x30\xf5\x2e\x6f\xe1\xda\x1c\xe7\xc5\xca\xdd\xb4\x05\x5c\x9d\xb3\x93\x85\xa6\x3b\x00\xfa\x3e\x59\x9c\x48\xf9\xdd\xe5\x5a\x22\x25\x0c\x5c\x74\x6a\x5a\xe3\x2b\x05\x7e\xb8\x1e\x30\xbe\x47\x5f\xba\x03\x40\x6a\x6b\xc1\x5b\x35\xc4\x3f\xe8\xe9\xb2\x32\x4f\x03\x95\x55\x5a\x1f\xfa\x3b\x41\xed\x7b\x5f\x6a\xa9\x0d\xbe\x0d\x6c\xaa\x7f\x5a\x34\x0d\x65\x2e\xc6\x5c\x73\xb4\x1c\x53\x11\x9c\xd2\x13\xc1\x5e\x50\x0d\x0f\x77\x02\x25\xb7\x05\xb0\x79\x55\x76\xe0\xf5\x87\x02\x05\xf6\x02\xe7\x94\xda\x05\xe4\x00\x2d\x15\xaf\x1f\xb5\x2e\xb9\x6b\xba\xd1\xee\x64\x1d\x89\x47\xb7\x01\x21\x0b\x00\x6a\x39\x41\x6b\x02\xf1\xbe\x7f\xc5\x22\x1a\x2f\x41\x64\x46\x62\xb4\x54\xc4\x79\x8e\x9e\xea\xec\x6d\x85\x28\x9e\x02\xc5\x68\xc9\xc5\x89\xfe\xfc\x03\x00\xf5\x26\x81\x91\x05\x2d\xe2\xf8\x40\x7f\xe6\x44\xe2\x18\xfd\x23\x51\x7c\xe9\xb3\x6e\xf4\x53\x1a\x99\xdf\x74\x7f\x09\x9c\x56\x55\x01\x36\x1e\xa6\x9a\x1e\x06\xf8\x63\x63\x24\x0b\xdc\x64\x6a\xdb\x68\x44\xa2\xd1\x44\x3b\xe7\x9f\xc0\xbe\xad\xc4\x92\x46\xa1\xce\xec\x16\x3a\xbd\xb8\x72\x8c\x9f\x5a\x3f\x19\x92\x9e\xbf\x91\xf4\x8a\x96\x49\x78\x63\x1b\xf2\xfd\x9d\x45\x56\x45\xa9\x5f\x63\xa8\xdb\x45\x48\x9d\x48\x26\x22\x4e\x7f\x7e\x74\x81\x57\x10\x0a\x45\x85\x84\xed\x53\xd6\xd7\x83\x89\xf5\x2c\xd5\xf4\x89\xfe\x8a\x13\x4b\x72\x1b\xf9\x15\xe7\x90\xa3\xa8\x8d\x89\x4b\x16\xb5\x73\xa4\x13\x43\x72\x09\xde\x05\x53\x40\x6e\x46\x17\xa2\xfe\x2c\xb0\x28\x25\xd1\x53\x9d\xa9\xf3\x18\xa5\x0e\x98\x5a\x66\x88\x89\x84\xf6\xda\xf3\x35\xbe\x84\x7a\xb1\xa6\x24\x28\x52\xf3\x1b\xb1\x8b\xbd\xd9\xc5\x78\x56\xee\xf4\xfb\x99\xde\x4a\x07\x49\xcc\x32\xe2\x07\xda\x24\x0d\xa0\x3f\xd0\x98\xc6\x2d\x3c\x11\x93\xcf\x51\x1e\xac\xec\xce\x5d\xd8\x7c\x05\x94\x40\x3a\x05\x87\xde\x6e\x74\x69\xd1\x99\x65\x57\x2f\x50\x19\x2e\xd6\x96\x4a\x53\x5c\xca\xc3\x88\x20\x89\x6c\xf6\x6c\x5a\xc8\xc7\x23\xe7\xd4\xfb\x07\xe8\x31\x6f\xde\x65\x66\x96\x8e\x88\x4c\x77\xf4\xa7\x89\xaa\x31\xba\x0a\x54\x94\xff\x1f\x49\x67\xad\x24\xbd\x0e\x05\xe1\x07\x72\x20\x33\x84\x63\x98\x31\x33\x67\x66\x66\xf6\xd3\xdf\xda\xff\x46\x5b\x9b\xb8\x64\xa9\x75\xfa\x6b\xd5\x19\xcb\x14\x4f\x74\xe4\xce\xd1\xa8\x2f\xe3\x95\x23\x18\x3f\x4b\x6a\x2e\xef\xd4\x52\xbe\x96\x63\x3e\x98\x94\x81\xd7\xc0\xca\x03\x53\xd9\x03\x44\xd4\x8e\x3e\x9a\x7a\xb1\xc6\xb8\x90\xfb\x8a\xe3\x89\xfe\x7b\xf3\xad\x34\x7e\x04\xf1\x90\x0c\xa5\x2a\xeb\x4a\x21\xc3\x35\x59\xf2\x54\x53\x3d\x2e\x3c\x7e\x1f\x7b\x29\x82\x82\x68\x13\x09\xc6\x81\x77\xb5\x5e\x51\xd8\xe3\x1a\x38\x83\x4a\x63\x74\x77\x0c\x1c\xe2\xf7\x4b\xd3\x14\x45\x5d\x44\x06\xe5\x85\x79\x64\xca\xc4\x08\x13\x54\x6b\xc8\xc1\x20\xa9\x3e\x80\x78\x0b\x74\x31\xf7\x7d\x3f\x40\xf6\x54\x61\x1d\xe7\x81\xfc\xdf\x44\xfd\xe0\x55\x74\x17\xe4\xb3\x0f\x61\x7b\xa9\xe3\x3d\xca\x88\x3e\xc6\x47\x28\xf2\x76\x2d\x60\x85\xf9\xdd\x04\x77\x7e\xed\x5d\x9f\x74\xd1\x1c\xde\xa9\xcf\xa8\x86\x53\x3c\x32\x37\xa8\x72\xa7\x47\xa4\xf2\x23\x81\x27\xf9\x91\xb8\x33\x3d\x54\x7a\x7b\xc3\x52\xbe\x1d\x67\xff\x70\x88\xd2\x27\x7d\x92\x88\xb6\x50\xfd\x72\x8b\x69\x43\x06\xba\x9e\xa3\xb9\xe7\x79\x51\x33\xd2\x3a\xb9\x33\x0d\x81\x7c\x30\x5e\x0a\x27\x8b\xd5\xf9\x2b\x0d\x7f\x60\x40\x2d\xcf\x16\xf1\x5c\x32\xf1\x22\xfc\x74\x8e\x33\xfb\xdc\x82\x04\x0b\xfa\x4e\x0a\x07\xef\xaa\x4d\xdb\xe6\x00\x93\xb9\xb1\xfb\xc3\x4e\x9e\xd1\x5c\x69\x6a\x3d\x32\x10\xc1\xf7\x51\x61\x88\x2f\x53\x84\x7e\xe8\xc3\xb9\x58\x33\x60\x27\xc6\x0d\xfa\x34\x5e\xe9\xee\x5f\x65\xd6\xd9\x4a\x93\x93\xa0\x9f\x93\xa0\x27\x08\x04\x60\xb3\x33\x7b\xaf\x46\x25\x36\xae\xb1\x67\x79\xa0\xbe\x49\xc4\xc1\xbf\x7b\xbe\x10\x3f\x0e\x45\x9c\x1e\x22\xb8\x78\xf9\x98\x57\xf6\x70\xdd\x7e\x36\x52\x48\x30\x2e\xf0\x2c\x13\x43\xe7\x2f\x75\xe7\x0e\x2b\x0c\x8e\x20\x61\x72\x15\x20\xb7\x52\xeb\xaa\x22\x14\x44\x75\x6a\xdd\xed\x32\x03\xfc\x18\x4b\x18\xd9\xef\x58\xe7\x1f\x66\xbe\x1d\xc7\xbe\xb5\x22\xfe\xb1\x59\xa0\x48\x8e\x1b\x1d\xa8\x19\x2c\xee\x77\x22\x65\x5a\xb8\x2d\x41\xf2\xd5\x8b\x59\xfd\x30\xbc\x61\x26\xde\xde\xec\x08\x8d\x73\xe0\x4f\x31\xe5\xe3\x16\xed\x6a\x25\x40\xcf\xf5\x9d\x51\x4a\x5a\xe9\xef\x9f\x1f\xfd\x71\x54\x6f\x12\x67\x95\xd3\x10\x4d\x5f\x7e\x46\x92\x16\xb1\x52\x3b\x84\xbd\x25\xda\xfa\xab\x3e\x82\xc7\x49\x54\x17\x3f\x31\xa2\x54\x29\x29\x85\x4a\x67\xdb\x25\x8a\x5a\xdf\xb7\x69\xfb\xd7\xfe\x0d\x8d\x83\xa6\x1f\x15\xd6\xed\x5c\x6c\x98\x05\x29\x76\x30\xb4\xf1\x36\x6b\xa3\x8b\xa6\xe6\x42\xc6\x73\xb1\xad\x9e\x92\xe7\x59\x01\xc1\x7d\x8f\xe0\xcb\xd3\x0f\xf3\xdf\xe4\x21\x00\xdc\xef\x1d\xaa\xfd\x05\x3d\xb6\xd4\x34\x8b\x3c\xbc\xf1\x4e\xbc\x14\x85\x19\x55\x25\x6b\x27\xe7\xe2\xcc\x95\x95\xc0\x14\xbf\x26\xde\xe0\x57\x9c\x97\x65\x10\x3e\x84\x36\x36\xd4\xe1\xda\x13\x5c\x80\x02\x6c\xb9\x34\x50\xd7\xb6\x6d\x67\x09\xa5\x31\x86\x8a\xb0\x86\xec\xa1\xcf\x14\x4b\xef\x1c\xa1\xef\xbc\xfa\x63\x98\xe8\x61\x8a\xed\x4a\xec\x18\x66\xfb\x2f\xb1\xd5\xdd\xb6\x59\xcc\x03\xbe\xed\x8d\x92\x80\x71\x93\x7b\x3f\x13\xe0\x02\x06\x7d\xf7\x87\x10\xaa\x77\x40\x1a\x24\x35\xb1\x16\x80\x12\xa3\x98\x21\x34\x73\x02\x97\x85\x27\xcd\xb2\xfd\xd8\xb7\x87\x84\x02\x73\xa1\x82\x7e\x77\x69\x08\xa7\x28\xf2\x4e\x8f\x87\xc8\xd5\x73\xb3\x24\xf7\xa8\xbc\xef\x0d\xdd\x76\x80\x61\x58\x2f\x46\xc1\xb7\xfe\xdb\x63\xe4\xb2\x2e\x39\x43\x40\x05\x63\xfe\x5a\xcf\xd8\x38\x59\x3c\x3a\x16\x12\x0b\xb5\xcd\xa9\xda\x2b\x55\x1f\x42\xb2\xc1\x6f\x23\xd4\xa4\x03\xec\x04\x8e\xcf\xdc\x6b\x55\xab\xd2\x27\x60\x25\xc1\x4f\xd6\x03\x69\x28\xec\x27\x84\xd3\x8a\x2d\x9c\x4c\x1d\xf3\x96\xd5\x79\xd7\xb0\x49\x50\xdf\x88\x5b\xaf\x90\x6a\x5a\x2c\x27\x24\x4a\xae\xfe\x90\x55\x3e\xd5\x6a\xe5\x28\xec\x3a\xc7\x57\xfe\xf6\x29\x9c\xf1\x82\x96\x31\x3f\xed\x67\x6d\x81\x3a\x57\x68\xe9\x4e\x83\x4b\xaf\x3b\xaa\x52\xed\x19\x02\x86\x04\x06\x53\x30\xe6\xb1\xe3\x1f\x81\x86\xb4\x3c\x3c\x80\xf2\xff\xfc\x3d\x7f\xd0\xe8\x3f\x1c\x4e\xd0\xa7\x7e\x1e\x00\x43\x13\x12\xa2\xf4\x8d\xdc\xe4\x3d\x31\x85\x2b\x33\xc4\x6f\x4b\x8f\x17\x84\xa5\x3a\x9e\x71\x53\xf0\x50\x69\xf8\x9a\x2a\xb3\x12\x24\x91\x1b\x58\x11\xbf\x45\xb2\xae\x54\x3f\x60\xe7\x4d\x1c\x61\x4f\x68\x01\xd8\x61\x12\x49\x47\x6c\x7d\x74\x28\xcf\xce\xbb\x33\x6a\x2a\xdf\xb0\x55\x6e\x21\x9c\xa4\x28\x0a\x1b\xe7\x79\x3f\xef\xea\xd6\xf6\x9f\xe7\x3b\x70\x98\x05\xdd\x97\xad\x8a\xff\xfb\x45\xec\x09\x4e\xf4\x20\xdd\x50\xcd\x10\xf9\x69\x40\x2e\x07\x4d\x75\xaa\x40\xf6\x79\xbe\xe0\x96\xe0\x91\x3c\xf5\x4a\x93\x87\xa0\x1b\x3a\xc1\xde\x9e\x0c\x5a\x2e\xc9\xfb\xe2\xb7\x9d\x89\x27\x68\x1b\x22\x85\xe8\x5c\x33\x74\xfd\x21\xe9\x72\xfe\x06\x0b\x1d\xe0\x37\x11\x33\x90\x06\xbb\x2e\x44\x95\xfb\x43\xab\x27\xb3\x65\x86\xc8\x56\xf8\xe6\xc8\x27\x46\x3d\x8d\xfb\xcc\xe1\x6a\x7e\x69\x70\x97\x21\xb6\x12\xd8\xcd\xd0\x9d\xfc\xed\xf5\x2b\xb2\xb9\x8f\x1d\xac\x7d\xf3\xaf\xcf\x35\xa5\x56\x82\x98\x41\x06\x72\x53\x53\x94\xce\xe3\xa6\x10\xa2\x8f\x0c\xa8\xa0\xbf\x32\x2f\x39\x84\x2b\xb3\x10\x90\xee\x28\x91\xd3\x87\xd8\x86\xef\x0c\x37\x67\xcf\x4c\xce\xfc\xee\x9d\xa2\xfa\x5a\x23\xe8\xc0\x48\xf1\x9d\x96\xa4\x70\xb6\xc3\xce\xba\x6d\x7b\x92\x42\x87\xed\xe6\x46\x97\x3e\x7d\x5b\x27\x16\x14\xc4\x38\x06\xbb\xb9\x13\x0e\xf7\xe5\x8a\x99\xbf\xa1\x29\x43\x3e\x45\x68\x14\xd7\xfb\x05\xe0\x7c\x57\x62\x03\x2e\x19\x3a\x91\x45\x8f\x21\xbc\xab\xcf\x49\x31\x44\xb2\x81\x03\xb3\x9a\x2f\x67\x40\xb8\x04\x67\x50\x61\x42\xbc\x6b\x6f\x18\x73\xba\xb0\xf3\x5b\xc5\xd0\x0c\x91\xf7\xc5\x28\xca\x3c\x01\xb4\x67\x8c\x09\xce\x78\x95\xf6\xf1\x2e\x49\xb9\xa3\x7c\x5e\xb9\x5a\x4e\x26\x36\x14\x14\x67\x9e\x63\x1b\x25\x04\xc5\x8b\xec\x69\xb0\x33\x04\x4d\x13\x28\x86\x11\xf8\x9d\x41\xd1\xbe\x33\x70\x62\xdd\xbf\x32\x67\x08\x12\x22\xe9\xf3\x7c\x5f\x18\xc6\x7e\xb5\x67\xd4\x66\x09\xea\xd6\x82\x4d\xbe\xba\x68\xa6\x36\xab\xf1\x9d\x04\x9e\xa5\xed\x3c\x18\xcf\x1c\x88\x9d\x8c\xee\xc1\xb7\xb9\x13\x65\x60\xc1\x68\xfc\x28\xc2\xa6\x0f\x00\xbd\xa0\xbc\xee\x57\xd2\xbe\x74\x61\x9a\x6d\x30\xa0\x60\x47\x55\x9c\x59\x83\xfd\xc7\x23\x48\x1a\x30\xc7\xba\x52\xef\xc3\xf4\xfb\x19\xba\xf9\x89\x6d\x28\x86\xfd\x9e\x17\x7d\x05\xb1\x5e\xc8\x67\x3b\x95\x1a\xba\x19\x09\xd4\x3e\x80\xd7\x2f\x00\x00\xa7\xd2\xf5\x64\x18\x8c\xc1\xb3\x4c\x15\x1a\x5f\x4d\x77\x88\x3a\xa8\xa9\x3c\x4a\x88\xff\x54\x96\x1e\x1b\xef\x94\xdd\x14\x46\xbd\xed\x2e\xb6\xf7\x4d\xe0\x16\x7d\xce\xe3\x49\x31\x12\x69\xb4\x55\x79\x1c\x17\x28\x97\x8f\xf0\xfb\x5e\xd7\xef\xcf\x9b\xe8\x82\x36\x41\x8f\xa4\xde\x63\x92\xf7\xd8\xdd\x67\x1d\x21\x7b\x2c\x37\x4c\x39\x76\x8e\xa2\x91\xaa\x26\xdd\x95\x23\x71\x03\xdf\x39\x52\xd4\x78\xc6\x2d\x49\x75\x63\x93\xb6\xbc\xa2\xf1\x4f\x5b\x07\xfb\x2c\x67\x31\xc0\x3c\x6b\xd0\x03\xdc\x54\x99\x52\x56\x55\x0a\x83\x5e\x8c\x29\x0a\x06\x41\x74\x86\x21\x1d\x98\x30\xf8\x23\x76\xe0\xfa\x02\xf9\x71\x8f\x0c\xf3\xa6\xd8\x4e\x1a\x9e\xe6\xce\xdb\x0d\x53\xe0\x0c\x8f\x13\xa4\x07\xea\x98\xfd\x98\x3c\xfb\x1a\x62\x1d\x1e\x65\xa3\x1b\x53\xf4\x25\x69\x66\x2f\xd1\x91\x90\x6b\xa3\xdb\xa7\x90\xf4\x1d\x9e\xc2\x14\x39\xc8\xda\x0a\x50\x04\x27\xfd\xf5\xa0\xa0\xad\x32\x6c\x9d\xfb\x07\x67\x00\xa4\x68\x65\x79\xd2\xe2\xf0\x02\x22\x36\x6a\x70\xfc\x71\xf0\x9c\x97\x45\x33\xfc\x12\x24\x84\x68\xfa\x07\x30\x0c\x1b\x45\xcf\x82\xb2\xcc\xce\x4d\x8d\x7c\x56\xb5\xc9\x8d\x44\xcf\xb5\xa0\x30\x8e\x5f\xa8\xf7\x0f\x92\x37\xe7\x6c\x0f\x85\xfb\x9e\xe1\x79\x42\xc5\x98\x9b\xe0\x85\x78\x2a\x66\xa0\x61\x08\x64\x2d\xa7\xa0\xf0\x81\x18\x86\xa1\x52\xfa\x47\x43\x19\x0d\x18\xe4\x4e\x93\x67\x1b\x2e\x84\xcb\x0c\x51\x6c\xce\xf1\x3c\xde\x22\xed\x0a\xc1\x0e\x7d\x26\xe3\x19\x06\x01\x8c\x51\xdb\x4e\x81\xf8\x0a\x6b\x93\xb4\x49\x51\xdb\xdb\x11\xd8\x70\x9e\xd8\x0a\x83\x12\x6a\xeb\xfd\x4b\xbc\xa2\x75\xfd\x28\xea\xc4\x62\x50\xff\x78\x97\xc7\x09\x50\xa7\x29\x85\x13\x14\x31\xaf\x21\xd6\x1e\x90\x67\x75\x7f\xf9\x99\x64\xa3\xb6\xcd\xdf\xf9\x8b\xa2\xf3\xde\x71\x17\xcb\x89\x1c\x83\x71\x0e\xce\xb3\x40\x96\xa4\xf1\x0e\x7a\xd1\x95\x7a\xfb\x7e\xaf\x06\xb9\x5f\xe8\xa6\x67\x9c\xbc\x3f\xe8\x28\x50\xbf\xaf\x35\x43\x27\x00\xa5\x88\x3d\x57\x62\x92\xdc\x4c\x50\xf4\x9b\xf4\xee\xfc\x72\x79\xb7\x70\xf7\x54\x88\xc5\x01\x31\x6c\xd6\xde\xc6\xf0\xc6\xc7\x69\x86\xc3\x70\x43\x38\x85\x91\xb2\x4c\xe7\x14\x55\x9e\xf4\x1f\xb9\x40\x39\x16\x86\x63\x87\xd4\xc7\xee\x6e\xcf\x06\xc4\x8a\x14\x7d\xec\x81\x4b\x5d\xa4\x59\x87\xa6\x19\xec\xe1\x1f\x3c\x20\xcc\xf9\x73\xf2\x37\x45\x13\x8e\x01\x8e\xbf\x67\xfd\xd5\xcc\xf1\x35\xd8\xb7\xa0\x30\x07\x82\x68\xf3\x88\x46\xe8\xf4\x4f\x8a\x70\x7c\x3f\xf8\x0e\x5c\xce\xd9\x70\x29\xfe\xed\xd9\x26\x6d\x59\x34\xce\x33\x69\xc0\x15\x83\x98\xff\x38\x7b\x19\x96\xfd\x8b\xfd\x58\xd7\x63\xfe\x38\x7a\xbb\x78\x57\xfd\x7c\x11\x86\xa6\x63\x83\x2a\xc7\x17\x73\x31\x32\x51\x67\xa0\x70\x99\x7c\x87\x5a\xd8\xc8\xd6\x39\xbc\x21\xd8\x7b\xbf\xa0\x0e\x6a\xbb\x35\x2b\xb2\xf9\x4f\x54\x82\xcc\x3b\x85\x24\x2c\x46\xd4\x01\xc5\x61\x85\xe6\xcc\x98\xc6\x7a\xc2\xfd\x0b\x00\x14\x6d\x7c\x88\xb2\x93\x74\xa6\x07\x5a\x1a\xe6\x09\x60\xa6\x2c\x4b\xc0\x53\x0c\xa0\xb3\xb6\xbe\xee\x5a\xac\x24\x01\x6a\x19\x82\x20\x21\x84\x40\x90\x97\x7a\xb6\x1d\xb8\x41\x93\xdc\xfb\xe6\xa7\x38\x79\xae\x73\x07\x27\x9e\x9f\x06\x9e\xe4\x46\x33\x5a\xce\x2d\xa7\xfe\xad\x8d\xfe\x55\xd3\xd9\x7f\xdc\xcd\x9c\x5e\x66\xac\x19\x4a\x36\xba\x89\xd8\x37\xa6\xb1\xcf\x76\x14\x32\x06\x88\x0b\x92\x96\x45\x6e\xea\xe7\x88\x2d\xcb\x96\x08\x6c\xf7\xcc\x37\x2b\x7c\x71\x4a\x17\x45\x53\x14\x60\x33\x4a\x77\x94\x2a\xcb\x2c\x3f\xf7\x16\x50\x8f\x8f\x78\x23\xdc\xdc\xe5\x8d\x89\xd9\x8f\x84\x40\xb9\x15\xfb\x7e\x61\x18\x87\x4e\x41\x99\x13\x5b\xa0\x02\x68\x3e\xf6\xb3\x30\xd2\x79\x26\x8e\x64\x68\xf9\xb0\x26\xd5\xc5\xd6\xbd\xb3\x3c\x29\x89\x81\x48\x3c\x83\xf2\x71\xbc\xb0\x67\x53\x01\xa0\x0a\x70\x13\xdc\x07\x6f\xc7\xb3\x9f\xe3\xcc\xa6\x3c\x3b\x3a\x43\xc5\x3f\x3e\xe8\x43\x34\x68\x34\x32\x9f\x7c\x5c\x99\xea\xe3\x0d\x4a\x61\x29\x20\xea\xba\xef\xdc\x7d\x93\xff\xf3\x92\x89\x99\x18\x1f\xf2\x18\x19\xd5\x65\xd0\xb2\xb7\x24\xb1\x3a\x81\x83\xd2\xfb\xd2\x50\x02\x00\x09\x30\xcf\xc8\x31\xe9\x07\x31\x50\x12\xc3\xc6\x16\x4b\x56\xa0\xb7\xf8\x7d\x9c\xe7\x43\xda\xa3\xdb\x43\x94\xde\x36\x00\x7b\x24\x2b\x1a\xd1\x77\xdf\xcf\x19\x1a\xc8\x36\x33\x44\xbe\x01\x80\xe4\x0c\x4d\x12\x58\x80\x87\x2c\x9e\x64\xa1\x53\x97\xe6\x70\x82\xd7\x71\x6e\x38\x17\xa3\x1e\x3a\x8e\x14\x2d\x44\x10\x96\xa0\x92\x13\x26\x51\xfb\x1a\x1a\x59\x51\x5c\xa6\xc9\xb5\x55\x18\x7e\xac\xc5\x9e\xda\x18\xc3\x37\xbe\x01\xd0\xe7\x4e\x2c\x87\x93\x08\x06\x72\x6b\xd4\xc8\x8f\x28\x37\x66\xe7\x53\xdf\xa7\x49\x2f\x73\xb0\xa6\x7d\x0d\x83\x02\x2a\xcb\x4b\x08\xeb\x26\x95\x9e\xc3\x5b\x6d\xe5\xcd\x87\x01\x36\x28\x1d\x80\xe9\x06\x34\xad\x88\x94\xf8\xb6\x28\xe2\x97\x33\xf4\x2c\xcb\x42\xee\x00\x32\xe1\xf2\x34\x47\xf2\x47\x9b\x38\xa1\x2d\x3f\xd8\x45\x48\xc6\x68\xcc\xf2\xe4\xf9\xfb\x66\x18\x5e\x85\x82\x08\xe3\xbb\x47\x16\xa9\x12\x4b\xcb\x33\x2c\x4b\x0f\x41\x7c\xac\x64\xc0\xe9\x53\xe7\x4d\xee\xe1\x0d\x3a\x0f\xa5\xf2\x73\x25\x8f\xd5\x22\xeb\xef\x8e\xd9\xf3\x6f\x74\x87\xd6\x88\x61\xa3\x81\xa2\x7c\xfa\x49\x3f\xe6\x25\xd0\xa0\xd0\xdb\x08\x8d\xb8\x62\x94\x48\xe0\x41\x80\xb9\xdf\x9d\x61\x1e\x12\xa7\x76\xf4\xfe\xe9\x1b\xed\x8a\x38\x18\x80\x09\xb0\x99\xe9\xf7\xc3\x4c\x59\x60\x92\xaa\x5d\x33\xe0\xaf\x0c\x50\xe2\xf3\x31\x4a\xe7\x85\x5e\x3c\x73\x02\x9f\x07\x44\xbd\x9f\x71\x7f\xc8\xb3\x68\x36\x86\x41\x31\x0c\x43\x98\xf7\x8b\x00\x1a\x87\x31\x4f\x9b\xb4\x49\x05\xa6\x09\x00\xdd\xf6\xfb\x59\x40\x54\x1c\xc3\x50\x69\xce\x68\x16\x93\xf7\xde\x36\x67\x5d\x28\x8b\x6c\xf4\x55\x1f\x2d\x4c\x05\xe7\x09\xe5\x88\xbc\x9e\xed\xe9\x22\x25\x48\x93\x3a\x53\xf6\x3d\x0a\xbe\x22\x23\xa9\xf5\x55\x7c\x9b\x97\x03\xd5\xd4\xdf\x3e\x85\xb9\x8d\xa8\x81\xd4\x6e\x1e\xd7\x60\x10\xad\xe9\x5f\xee\x4e\x20\x9a\x49\xd3\xc0\x94\x9a\x63\x2c\xcb\xd2\xce\xd9\x46\xf8\xaa\x0e\xbd\xb9\xed\x0b\x20\xfc\x06\x79\x5e\x72\xaa\xab\xb6\x56\x07\x19\xb8\x8f\x30\x04\x91\x93\x14\x85\xf6\x6b\x72\x9f\x0a\xfe\x6a\xb4\xc9\x50\x05\x76\x92\xd4\x4c\x00\x68\x7c\x0f\x30\xdc\x43\x3b\xc0\x32\x95\x1f\x10\x04\xf2\x38\x1d\x31\x70\x1c\x2e\x89\x84\x7c\xbe\x63\xac\xcd\x07\x31\x0d\xb1\xc2\x37\x3a\x51\x3c\xaf\x9a\xe7\x00\xe5\x89\x22\x51\x9d\xa0\xcd\x7d\xff\x4d\xbd\xea\x3f\xb0\xa3\x39\x7b\x9d\x99\xf5\x6c\xfb\x91\x35\x13\x90\x32\x47\xdb\x11\x52\xec\x35\xad\x5f\x07\x92\xc0\xf9\x73\x50\xf2\x58\x97\x91\x79\x11\x9c\xa1\x21\xc6\xb6\x0e\x71\x7a\xd0\x3c\xdb\x02\xff\x9b\xce\x2f\x04\x30\x9e\x3c\x5e\x47\x52\xf1\x52\x25\xeb\xfe\x47\xa9\xd6\x71\x06\xab\x77\x50\xb6\xa8\x8b\xf6\x95\x1b\x91\xa5\xc2\x70\x9e\x26\xd0\xde\x9c\xe5\x18\x62\x2d\xc6\x3c\x7f\x4c\x17\xf3\xd4\x49\x97\x0b\x0e\x8f\xd7\xe9\x67\xf5\x66\xef\x82\x61\x05\x5f\x6e\x97\xaf\xd6\x45\xa7\x7b\x71\x9c\xa6\xa6\x2f\x37\x41\x8c\xad\xf8\x66\xdf\xe7\xe7\x09\x96\xcd\x82\xc0\x91\x28\xd4\x6d\x37\x44\x8e\x2a\x41\x3e\xb1\x19\x54\x1c\x1b\x03\x93\x01\xb2\xaa\x32\x08\x7d\xe1\x58\x4a\x87\xdd\x32\xfe\xe5\x1f\x26\xa9\x1e\x73\xbb\x32\xfb\xc5\x88\x63\x3f\x31\x95\xbd\xe0\x74\x41\x86\x38\x49\x50\x4a\xa7\x18\x08\xc5\xb0\x36\xf1\xfc\x20\x88\x47\x96\x29\xe9\x5c\x7c\x7e\x77\x74\x04\x27\x65\x67\xcd\xed\x0a\x47\x99\x72\x19\x13\xaa\x5f\xf2\xf9\xf3\x97\x96\x88\x76\x92\x42\x5e\x8a\xa0\x72\xbc\x7b\xe3\x30\xdd\xec\x28\xfc\xc9\xc3\x9d\x0f\x41\x99\x2c\x0b\xc9\x18\xd7\x40\x99\x03\x3f\xba\xe9\x5a\xa8\xc5\x7d\x7f\xda\x36\xd0\x45\x9e\x41\x29\x71\xc7\xca\xb3\x24\x00\xc8\x33\x61\x21\x84\xb7\x7a\x38\x49\xb9\x93\xa3\x58\x7e\x75\x74\x34\x2d\x7d\xc7\xe9\x01\x15\xa7\xd5\x72\x1f\x9d\x36\x3c\x04\xb0\x80\xe6\xa1\xcf\x5b\x0d\x23\x11\x86\x38\xdc\x3f\x7a\x87\x6a\x70\xfe\x98\xb0\xe5\x48\xfb\xa8\x7e\x9b\x79\x3f\x0e\x51\x6c\xeb\x09\x93\x4b\x68\xe6\xe5\xa7\x3b\x90\x3a\x98\x82\x8b\xb2\xd0\x0b\x7f\xd8\x09\xc1\x6f\x3f\x14\x12\x72\x9a\xf1\x79\xee\x9b\xaf\xf1\x51\x59\xfe\x7e\x2a\xae\x74\x04\xff\x93\xf8\x52\xc5\x71\x52\x24\xd5\x80\x1a\x50\x05\x0f\x83\x09\x00\x7c\x3e\x52\xd9\x17\xac\xe5\x41\xf7\x33\xf9\xe3\x36\x9c\x30\x7e\xa5\x0c\xa1\x4a\x47\x3f\x68\x46\xe7\x23\xa7\x29\x17\xa5\x37\x10\x0e\x45\x69\x3a\xbe\x48\xef\x97\x74\x39\x32\xbf\x73\xe3\x03\x98\x2c\xff\x71\x46\xf3\x02\x9a\xfe\x5e\x65\x38\x0f\xd3\x55\x04\x24\x41\xe6\x98\x09\x4c\x18\x26\xd0\x7c\x8c\xdb\x15\x3f\xd6\x8d\x12\x5f\x74\xc8\x01\x43\x08\x39\x86\xe4\xae\x27\x7c\xec\x1b\x92\x3f\xb8\x18\x0f\x86\xc8\x0f\x1c\x2a\xad\x84\xc6\x9c\x2b\x56\xbe\x72\xfc\xef\x0c\x43\xdc\x50\x2b\xe1\x25\xab\xa7\xdc\xfe\xe7\x1d\x9f\xd3\x47\x99\x33\xd5\x51\x2c\xab\xb9\xae\x2e\x93\xaf\x45\xaa\x7e\x18\x9e\x58\x31\xe6\xcc\x77\xa4\xb2\x28\x17\x6d\xd8\x34\x4f\x8d\xba\x49\x1a\x5c\x82\xa7\x39\x96\x34\xad\x53\x62\xc6\xb4\xd8\x74\x37\xf2\x9c\x65\x4d\x68\xfd\xc2\x6b\xce\xe7\x4a\x3a\x68\xf0\x71\x6a\x14\x53\x66\xa8\x3e\x0e\x24\x88\x1b\xe3\x52\x34\x2e\xe8\x26\x80\x98\xd3\xc3\x02\x0e\xbb\xe9\xee\x24\x1f\x23\xce\x7d\xd4\x7e\x4b\xe2\x3a\xf7\xa7\x6a\x38\x58\xd9\xfa\xfc\x6a\x14\x4b\x9b\x28\x2b\xc2\x62\x99\x15\xe8\xa9\x80\x9b\x34\xf8\x6e\x14\xe2\xd4\x86\xbc\x2c\x98\xd7\xed\x88\x04\x42\xc1\xbc\x43\x7e\xe9\xcd\x07\xa9\xe7\xc0\xab\xc5\xe2\x18\x42\xd5\xcd\xcb\xb2\xe2\xbc\x93\x0e\xf6\xcd\x9a\x42\x7f\x2a\x65\xf2\xf4\x68\x1a\x9c\x0d\xcf\x30\x50\x49\xdb\x81\x69\x44\x14\x05\x4a\xed\xfc\xf7\x3e\x57\x34\xc7\x84\xc3\x42\xa5\x60\x62\xe3\x09\xfa\x79\x3f\xb0\x75\x1f\x79\x28\x26\xf1\xe3\x2d\x4f\x00\xd0\x00\xfd\x93\xf8\x0e\x04\xf2\x8c\x13\xe7\x63\x25\x62\x42\x09\x7e\x73\xaf\xdf\xe7\x9e\x96\x62\xb4\x69\x4b\x7f\x3b\xe6\x8a\x69\x62\x42\x68\x34\xbd\x5f\xb6\x09\x8f\x3e\xd8\xc2\xea\x9e\x27\x57\x10\x5d\x06\xa5\x34\x2a\x4d\x47\xd0\x32\x79\x71\x7f\x79\xa1\x30\xa8\xf8\x3c\xa1\x60\x18\x48\xdf\xb2\x1e\xf3\x8b\x01\x8d\x0e\xbb\xe7\xb7\xa1\x53\x19\x1c\x03\x63\x8b\xb4\x7a\xc3\x1f\xed\x36\xf4\xd9\xa5\xb4\xb8\x0f\x70\x8a\x97\xb9\x29\x84\x53\x8a\xd9\x11\xf4\x21\xb1\xd4\x3c\x61\x78\x4f\x8f\xbb\xec\xe8\x0d\x23\xa0\xb2\xf0\x72\x08\xed\x36\x9b\xe5\x04\x97\xcc\x77\xac\x86\x21\xd8\xbf\xad\x8e\x7e\x7f\x94\xac\xf9\x62\x6b\xcb\xab\x87\x0b\x85\x37\x5b\x0d\x61\x52\x95\x0e\x9f\xc9\x8e\x01\xd9\x34\x45\xd2\xfb\x9a\x5d\x40\x41\x15\x73\x3f\x7e\x68\x96\xe1\xba\x90\x47\x4c\x00\x02\x73\x61\x70\xa6\xc7\x3d\x59\xe2\xab\x06\xc9\x1e\x48\x11\xd0\x8d\x33\x58\xd7\x73\x86\xea\xed\x59\x49\x5e\x91\x78\x22\xd9\x8f\x17\x1a\x19\x6c\xa4\x09\x8c\xa9\x9e\xa5\xa5\x0f\x7d\x4f\x03\x0c\xc3\xc2\xf3\x1c\x01\x85\xdf\xf0\xa7\xaa\xd4\xa1\x48\x78\x98\x31\x4b\x40\xdf\x2f\x23\xac\x72\x18\x57\x8b\x11\x35\x3f\x40\xcd\x81\x52\xe4\x63\x44\x8e\xb1\x31\xba\xc2\x98\xbc\xf3\xa9\x13\xd3\x3e\xbe\x77\x7c\x57\x7f\xd9\x9c\xaf\xe3\xd6\x0e\x54\xf8\x0c\x0f\x14\x02\xf4\x8b\x15\xc0\xc7\xe1\x5b\x98\x4c\x11\x07\x63\x2f\x51\x0c\x4e\x6e\x63\xc9\x9b\xce\x3c\x5e\x88\xd4\x6d\xb6\x66\x70\xdb\xdc\xcb\x5b\xcb\x45\xaf\x71\x7d\x4c\xa1\xa6\x44\xfe\xf3\x1b\xc7\xbf\xcc\x9b\x19\x20\x67\x5e\x0b\xd3\xc9\x9c\x76\x99\x74\x8f\xd2\x45\x2c\x82\x90\x0e\xd0\x33\x52\xad\x12\x1a\x1e\xa5\xb3\x85\x49\x63\xc5\x59\x30\x68\x8a\x7a\x3c\x59\xd3\x84\x71\x9e\xbb\x44\xdd\x9d\xa1\xf5\x8f\x26\x2f\x5e\xb1\x2d\x34\x29\x34\x7e\x06\x94\x96\x4f\x56\x14\xcd\x49\x3c\x24\x0c\xef\xb7\x61\x9e\xd9\x59\x07\x6e\x7b\x8a\xa5\x39\xee\xfb\x79\x06\x58\x8e\xae\x41\x94\xb5\x5f\x83\x57\x1e\xdc\x98\x8b\xa0\x9f\x13\x51\xc4\xde\xa9\xc9\xb2\x32\x45\x81\x79\xac\x9e\x51\x60\xe9\x09\xce\x9f\x49\xb5\xeb\xea\xe7\x45\xdc\xdd\xa0\x84\xf2\x7e\xdc\x19\x84\x7e\x48\x32\x3f\x5e\x3e\x84\x11\x2b\x6c\x8e\x9f\x44\x98\x06\x53\xa0\xfb\x9a\xc0\x61\x16\x89\x12\x99\xc1\x91\xbb\xc3\xdb\x68\xd9\xac\xfa\x6d\x00\x7b\xcb\x9f\xaa\xa5\xf3\x17\xa3\x30\xec\x3c\x9d\xf0\xc4\x6d\x4e\xf2\x6c\xde\x66\xa1\x33\x3c\xb1\xdc\x80\x70\xba\x28\xe5\x2e\x5f\x09\xba\x62\x7e\x92\x11\xb2\x41\x31\x62\xcb\x8a\xc8\xa6\xbb\x0c\x87\xae\x84\x2e\x49\xf1\xfc\xe7\x37\x08\x04\xe8\xc7\xf2\x3c\x61\x11\x01\x74\x05\xdb\x8f\xe6\x23\x10\xbd\x99\xfa\x45\x56\xe1\x16\xe0\x26\x0d\x4d\x4f\x51\x00\x5d\xe0\x00\x67\x4e\x16\x6e\x2d\x74\x90\x96\xbd\x57\x71\x2f\xab\xcb\x86\x27\xae\xfd\xe4\x90\xf1\xe3\x1a\x3f\x0c\x7f\x88\x13\x3e\xa4\x1a\xa2\x21\x74\x8c\x8a\xc8\x20\x18\xdb\x8f\xf1\x4c\xd3\x4d\x1f\x11\xd2\x82\x41\xf3\x3e\xc1\x8b\xce\x65\x6a\x2c\x0f\x3d\x86\x2e\x9b\xb3\x04\xa7\xe7\x11\x7d\xc2\xbf\xcb\x8d\x61\xed\x9d\x86\xba\xa5\xbc\x44\x52\x9e\x9a\xae\x93\x29\x3d\x2c\x99\x67\x9c\xa9\xff\x07\x4c\xb4\x90\xed\xaa\x83\x30\xd4\x06\x09\x40\xac\x70\x51\xa4\xe6\x17\x63\x08\x5c\xa5\x28\xf4\xb1\x2f\xdc\xe5\x05\x30\x0a\x6b\xd2\xa2\x56\xaa\x26\x70\xb4\x0c\x52\x80\xea\x31\x73\xcc\xeb\xda\x8d\x8f\x43\x54\x70\xe1\xcb\xb6\x17\xff\xa4\x1f\xf2\xbe\x74\x56\xe0\xab\x60\x2f\xcc\x01\xd2\xfc\xd9\x8d\xa4\x89\xb5\xd4\xc3\x74\x86\x02\xd4\x69\x61\x2b\x82\x96\x75\xf2\xd0\x87\x92\x45\x82\xa1\x1c\x0d\xbe\xdb\xe2\xbc\x58\xde\x13\xe1\xec\xd7\x76\x06\xd4\x5b\x65\x7c\x50\x34\x79\x70\xce\x88\xe7\x71\x5a\xd2\xcc\xd5\x6f\xa0\xf0\x2b\x53\x4c\x91\x8a\xdb\x96\x06\x52\x10\x32\xfa\x88\x19\xc0\x88\x11\x3b\xb4\x0f\xd8\xa6\x2d\xc9\x47\xf1\x89\x50\x82\x06\x29\x9d\xc3\x3f\xe8\xa4\xc6\x2d\x8d\xe8\xb5\xb8\x95\x92\xd1\xec\x8c\x86\x19\x61\xea\xb3\x9f\x23\xeb\xcc\xe9\x5a\x3b\x6e\xef\x10\x8b\x86\x51\x38\x32\x73\xd4\x30\x11\x10\x4d\x79\x3b\xf2\x01\xa5\x40\x21\x84\x71\x3e\x13\xe5\x8f\xa6\xc2\x89\xd2\x4b\x25\xf7\xfe\x12\xad\xb0\x6c\x3e\xf1\x9d\x16\x24\x39\x8e\xc3\x0c\x50\x9c\x60\x72\xeb\x7d\x33\xb1\x2d\x5d\x7c\xbd\x02\x88\xdf\x4c\x96\x32\xa5\x0b\x00\x20\x0f\xe3\x08\x51\x0b\xb9\xad\x8f\xa7\xbb\x3e\x92\x63\x0c\x4e\x40\xf0\xb8\x0e\x8a\x69\x60\x2f\xfe\xc7\xda\x65\x09\x28\x95\xf8\x77\x5e\x78\x0b\x52\xd9\xb1\x52\x64\xa5\x82\x51\xf8\x1b\x9a\xfe\xed\x63\x78\x5c\x5f\x7d\x10\x0f\xe1\x94\x0e\x6a\x21\x91\xf1\xa5\xb5\x24\x11\x73\xee\xfd\xe3\x3b\x8a\xa2\x70\x71\x2f\x04\xe7\x39\xca\xd7\x2c\xa9\x29\x02\x00\xbb\x5f\x70\x9b\x32\xe3\xab\xd8\xab\xf4\x4a\x17\x81\x54\x17\xe3\x63\xf1\x7a\x03\xf0\x87\x29\xb6\x41\x9c\x7e\x03\xd3\x19\xbd\xa5\xff\xb5\x37\x99\x87\xe7\x46\x17\xa5\xd4\xf5\xbc\xc1\x51\xba\x9b\x63\x21\x66\xba\x18\x45\xa2\x11\x9e\x7b\xfb\x71\x5b\x1f\x40\x3c\x4c\xe6\x81\x83\xaf\x4e\x6a\x90\x72\xa6\xb8\x5b\x80\xe4\x70\x52\x92\x19\x76\x86\x00\x2d\xcf\xd5\x30\x9a\x67\xdd\x40\xb9\x14\x22\x55\x11\xac\xd3\xf9\x5f\x5b\xf0\x59\x9e\xbb\x37\xd6\x15\x3e\x4a\x78\x86\xc1\x68\xe8\x7b\x7a\x50\x7d\x2a\xcf\x77\x41\x7d\x6e\xe1\xe3\xd9\x2c\xcb\x5a\xd5\x97\x1d\x2a\xe1\xdf\x07\xb2\xda\x97\x09\x42\xbb\xac\x5f\xed\x61\x34\xa8\x64\x4e\x33\x8d\x43\x3b\x02\x39\x44\x8d\xe3\xda\xdf\xd4\x39\x14\x30\xe3\x9c\x1b\x5c\x0b\x98\x08\xb3\xc8\xce\xc5\x3e\x5d\xd0\xa0\x6c\xdb\xf8\x79\x3e\xdd\x5b\xc1\xbf\x09\xe3\xf1\xa0\x5e\x92\xb9\x8a\x7e\x54\x70\x6d\x69\x72\xef\xad\xf5\x68\xe3\x4b\xe9\x2d\x66\xbd\x10\xf3\x8d\x0e\x65\x27\xae\x67\xe2\x93\x6e\x12\xc2\xa3\x41\x43\x5f\x0f\x44\xd4\x1e\xcf\xe3\x24\x9d\xcb\xb6\xfc\x9b\xbc\x4d\xb9\x9c\xde\x7a\x9b\x0a\x85\x03\xf9\xe3\x4b\xf1\x13\x1b\x54\xc5\x13\xcd\xb2\x9b\xd9\xb4\xaf\x89\xeb\xf4\xce\x46\x19\xd8\xd9\x40\xa6\x38\xbe\x04\x26\x3e\xe6\xf3\x85\x08\xcd\x10\x73\x4e\xa0\xa8\xfe\x85\xa8\x34\x5c\xf4\x1f\x96\x32\x08\xa7\x50\xd4\x05\x4e\x8c\x22\x7d\x63\x44\x0e\x46\x71\x0e\x8c\x2e\xbe\x73\x32\xac\xd0\x89\x91\x86\x18\x88\xa4\x24\x89\xb4\x9b\x43\xd1\x96\x2a\x9b\xe2\x78\xd4\xb0\x6a\xd0\xc7\xb2\x34\xff\xcb\x5a\x1f\x36\xd1\x9a\xe9\xfd\xd0\x8f\xc4\x7e\xa4\x6f\xf3\x16\x67\x30\xf0\xf6\x71\x6b\xf1\xdd\x19\xa2\x48\x43\xa6\x33\x7b\x50\xb0\xd8\x0b\x9b\x32\x28\xa5\x3c\xdb\x8c\x93\x2a\xc4\x3e\x50\x88\x52\xbb\x2e\x92\x35\xc3\x5b\x9e\xc0\xe6\x9b\x22\xbc\x06\x9f\x2c\x77\xc1\x5f\x09\x1f\xc4\x4c\xea\xc1\xa3\x0c\xe7\x9a\xdd\xd6\x74\x95\x9a\x2c\x71\x12\xa7\x15\x3b\x5e\x2f\x13\xe6\x8d\x2f\x05\x1d\xc3\x54\x34\x7f\xe5\x12\x18\x1f\x13\x9f\xa2\x0a\x4d\xee\x1c\xf3\x6f\xf7\x4f\xde\x1d\x30\x94\x41\xae\xb0\xb6\xbb\x1b\x24\xe3\x73\x8c\x21\x14\xe9\x83\x9d\xd8\x4b\x6c\x4c\x02\x91\x9b\x09\xd2\x03\x1b\x43\x02\xdf\x02\x6d\x87\xc6\xa8\xf3\x18\x4d\x63\x2b\xed\x1b\xf8\x7d\xe8\xc3\xb9\xf6\x9a\x6e\xdf\xa0\xdb\x9a\xc0\x5b\xfb\xb9\x74\x44\x0f\xfd\x80\xbb\x06\xcb\xfe\x8c\x2d\x01\x45\x5a\x18\x26\xfd\x4b\x7d\xfb\x1b\xe2\xcf\xc2\x1e\x34\x38\x3c\x39\xa8\x15\xd7\xd6\x2e\x34\x6e\x6c\x4f\x12\x4a\x45\xfc\x9c\x75\x02\xbf\x5f\x06\x19\x4d\xaf\x1e\xfc\xde\x75\x15\x71\x35\x82\xf4\xbb\xa1\x49\xa4\xff\xa0\x60\x27\x94\xd0\x0e\xf4\x3d\x1a\x88\xbd\xe5\xca\x11\x78\x08\x92\x9f\x61\x8a\x0c\xeb\xf0\x12\x28\xaf\xb6\x01\x83\x0f\x80\x40\x9f\x44\xfd\x86\xf0\x8e\x28\xf1\x55\x9e\x58\xf8\x12\x93\xfb\xc9\xc2\x81\x38\xc2\xda\x12\x0a\x6b\xd5\xf2\x9f\xf5\xfa\x5e\x67\xca\xc3\x6b\x76\xb3\xe2\x8c\x18\xc5\x58\x9a\x57\xca\x46\x02\x9d\x6a\xef\xe0\x97\x45\xfa\x72\x1c\xb8\x7d\x8b\x9e\xd6\xab\xa5\xdf\xa9\x76\x6c\xa2\x3b\xd9\xce\x3a\x38\xbc\xbd\x69\xf0\x6c\x23\x94\xad\x7d\xf6\x22\x42\x4b\xa7\x35\xa8\xc0\x26\xf1\x88\xef\x7b\xa3\xa3\x32\xa7\x89\x5d\xa4\xda\x52\xb7\xe6\x62\x83\xce\x2c\x66\x2b\x7f\xe8\xc4\xbc\x68\x43\x7c\x58\x9f\x36\x47\x60\xac\x7a\x2f\xab\x08\x69\x41\x9e\x01\x95\x5a\x16\xed\xbe\x7b\xa3\xac\x1f\x78\xc5\x58\xca\xc8\x01\x55\x42\xad\xcd\x24\x4d\xdd\xe7\x37\x9b\xc6\x16\x20\x11\xa3\x9e\x3a\x18\xf5\xa7\x7b\x25\xf6\xe3\x14\xa8\x84\x27\xdd\x5c\x15\x98\x78\x12\x39\x34\xbf\x84\x6a\x8e\x04\x3d\xc8\x92\x53\x1a\x6b\x8f\x22\xc9\x3c\x7b\x4f\x72\x86\xab\x48\x27\x42\x2b\xad\xf1\x86\x6d\xad\xf4\x6e\x79\xd3\xc0\x39\x3f\xe7\x9c\xd7\xdb\x01\xb2\xab\xb6\xde\x4a\xe8\xb6\x3e\x48\xb4\xa7\xd9\xab\xb9\xa6\x0f\xe7\xbe\xe3\xf5\xc5\x01\xfe\xf5\xe8\xf8\x18\x89\x10\xf2\x1e\x50\x39\xb4\xbb\x53\xdb\x12\x64\x60\x5d\x25\xc9\x7f\x6c\x56\x10\x4a\x63\x73\x35\xa0\x77\xb1\xa5\x70\x92\x13\xb6\x66\x47\x10\xec\xb7\x96\x4a\xa0\x8f\xfd\xe3\x78\x9d\x6c\x0f\x0c\x4f\x9b\x21\x66\xdf\x26\x74\x36\x52\x27\x87\x83\x49\x47\x8b\xde\x78\xc2\x5a\x6e\x6d\x7e\x60\x25\x6f\x75\x06\x00\xbc\xe1\x4e\xe7\x66\x95\xc7\x0a\x23\x0d\x92\xd6\x82\x54\xcb\x2e\xb9\x37\xd3\xa2\xcc\x73\x12\x6c\xab\xe2\x74\xb3\xf2\xfc\x2d\xd7\xb4\x48\x4d\x4d\xef\xb5\xe6\x77\x9a\x3c\x99\xdf\x58\x17\xe7\x5c\xf9\x5a\x0a\x6b\xc9\x50\x78\x43\x61\x00\xe7\xed\xbf\xfb\x38\x6a\xdf\xf7\xc3\x3e\x0e\x82\x3e\x0e\xfc\x7e\x4e\xc2\x9e\x28\x42\x7c\xa9\xd9\x99\x93\xe8\xd2\xcc\x56\xf9\xb1\x04\xaf\x37\xef\xee\x3d\x17\x8d\x66\x2f\xb6\xb8\x49\xb3\x9d\xee\x0d\x4b\x27\xe4\x9e\x06\x58\xd1\x5a\x73\x7c\x7b\xc6\xee\xe2\x1f\x7b\x76\x40\x99\x4d\x19\xd1\xd3\x00\xd9\x79\xcf\xff\xea\xbe\xef\xfb\xae\xef\xc7\x41\xbc\x8a\x91\xca\xa6\xab\x83\x40\xd2\xb1\x42\xf3\x9a\x34\x93\xc2\x41\xe7\xc2\x55\xa7\x0a\x28\xfa\x48\xb9\xaa\xd2\x6e\xfe\xab\x93\x4c\xf1\x66\x87\x24\xf2\x2f\xd2\xc0\x7b\xb0\xd5\x5f\x05\xa6\x41\x09\x80\xda\x3f\xd4\x58\xcd\xb8\xa9\x44\x68\x71\x9e\x94\xe0\xc4\xba\x44\x0f\xbf\x6d\x55\x60\xc6\x9e\x1e\x89\xfb\x00\x11\x07\x8a\xdf\x25\xba\xe8\xa5\xae\xf5\x64\x94\x91\x9e\x9c\x25\x05\xf6\x7a\x45\x57\x85\x0f\xe6\x4e\xb6\x9e\xdc\xde\x90\x0b\x30\x23\xf0\x99\x2f\x6a\x8f\x92\x71\x2b\x43\xf2\x6b\xfe\x86\x54\x29\x09\x3b\x29\xce\x31\x29\xcd\x7e\xb1\x15\x55\x67\xea\x5c\xca\x51\x2b\x44\x3c\x97\xf0\x8e\xfd\x11\x42\xfa\x13\x72\x9e\xc7\x55\x09\x37\xec\x4b\x92\xe5\xc8\xf2\x45\xe9\xba\xae\x7e\x65\x3b\xd8\x3d\xc1\xb0\x14\xaa\x7d\xfa\xf5\x9c\x38\xbc\x03\x0d\xef\x4a\xfe\x47\xd2\x6a\x9a\x79\xa8\xb7\xbc\x4d\xe9\x67\x3d\xbf\x2d\x1a\xe4\x26\x19\xe6\x2d\xfd\xdd\x53\xfc\xbb\xbb\x54\xb8\x9d\x1c\x17\xba\x0b\x4e\xda\x2c\x6e\xb4\xb4\xcb\xd4\xa9\x54\xf1\x50\x22\xb8\xfe\xfa\xf2\x53\xc9\x8c\x18\xb4\xac\xca\xbd\x51\xc7\x1b\x37\x19\xed\x9e\x18\xda\x1d\x1b\x9a\x10\x88\xc7\xff\xbb\x93\x4c\x10\xac\x3d\x7c\x93\xae\x33\x32\x0b\x63\x82\x0d\xa3\x6f\x3d\x0e\xaf\xe1\xfc\xea\xf3\x6d\xa1\x03\x89\xee\x13\x69\x0c\xf3\x92\xfc\x49\xa6\x99\x96\x67\x5b\x35\xb5\xde\x54\xe6\x67\x20\xe0\xf3\xbd\x2a\x01\xa8\x0b\x39\xfc\x5b\x99\xf5\xa1\x4f\x45\xa1\x4f\xf5\x81\x77\xd5\x71\xf1\xe0\xeb\xb3\x83\xd0\x9f\x15\xad\xe5\x1f\x10\x19\x27\x9a\x66\xe8\xa4\xaf\xac\x40\x9f\x56\x09\x9b\x8c\x96\x1a\xcf\x3b\xc3\xa0\xc7\xa4\xc2\xc2\x0d\x8e\x87\x2e\x50\x2d\xb4\x53\x66\x50\xfb\xb9\x0a\x9c\x29\x5a\x7b\x45\xa6\xc7\x81\xca\x6f\x6e\x60\x51\xc1\x5b\x5b\x8b\x5b\xb9\xeb\xf5\xd2\xa3\xbd\xd9\xab\xb7\xd1\xad\xb9\xc2\xad\xb5\x11\xaa\xf3\xaa\x87\xec\x7f\x1a\x4a\x87\x8c\xaf\xf5\xce\xf6\x24\x56\x0b\xbf\xb7\xe7\x7f\xed\xc8\xfb\xb2\x7f\x32\xb5\x69\x50\x6a\xaf\xf1\x6e\x94\xf2\x6a\xaf\xfa\xd0\x06\x6d\x94\x8a\x39\xdc\xe1\xe2\x42\x29\x2d\x4f\xa5\x33\x77\x70\x7a\x19\xc1\xea\x93\x4c\x36\x96\xa2\x28\x62\x6d\xa9\xb2\x61\x11\xa3\xbe\x3e\xbc\xd3\x3e\x12\x56\xff\x0a\xe9\xef\xfe\x53\xff\x91\x0e\xc4\x90\x0e\xf1\x92\x0c\xf3\x3a\x93\x45\x3f\x27\xc1\x84\x59\x7f\xbb\x0a\x2a\xdb\x3b\x42\xe2\x21\xfe\xb7\x4b\xfe\xfe\xce\x49\x71\xa6\x14\x03\x39\xf1\xfa\xe5\x61\x32\x37\xbc\x26\xd6\x83\xd5\xc7\x5a\xca\xb8\x36\x1a\x88\x3f\x22\x54\xc3\xf5\xbb\x51\x45\xa6\x0a\xe4\x31\x2f\xc0\x8c\x66\xa5\x59\x8e\x20\x2d\x82\x07\x0a\x1c\xe2\xd1\xf5\xf1\xed\x5a\xd9\x09\x09\x24\xdb\xc3\x42\xb4\xa4\x6a\xa8\xd7\x18\x94\x29\x45\x31\x2e\x03\x4c\x73\x6c\xb7\xd1\x74\xe0\x5d\x76\xbc\x2f\xff\x78\x0d\x8d\x3b\xdf\x5a\x26\xd5\xe6\x31\x6b\x16\xd1\x2d\x8f\xff\xdc\xba\xeb\xbd\x09\x1a\x6f\xd1\xa0\x54\x29\xca\x56\x89\xc2\xfd\xc5\xb4\x7e\x4d\xd0\x8e\x91\xce\x1e\x33\x68\x48\xc8\xc2\xf1\x77\x0a\x6c\xb3\xed\xf3\x41\x6a\x7f\x5a\xb9\x2a\x3f\x19\x4d\xf8\x1e\x74\xc5\x96\x77\xe1\x53\x3d\xda\xaa\xbd\xda\xaa\x01\x73\x72\xbf\xb6\xe7\xff\xad\x86\xf5\x68\xad\xe9\x22\x8c\x19\x22\xcc\xe7\xd2\xf8\xdc\x82\xf5\xc2\x3f\x60\x5b\x31\x6d\xf4\x77\x05\x35\x8d\xd4\x19\x53\x65\xfc\x7c\xaa\x78\xa8\x10\x22\xbf\x32\x08\x4e\xe9\x35\xf5\xd0\x83\xab\x0f\xc8\x59\xa2\xca\x9d\x15\xda\x48\xa7\xe1\x62\x66\x84\x90\x44\xbe\xeb\x03\x91\xc8\x30\x23\x7a\x75\xf9\xed\xd4\x52\xc4\x77\x02\xe0\xf4\x01\x86\xea\xb2\x11\xcd\x15\x10\xcd\xf5\x50\xbd\x8d\x10\x24\x17\xbd\xd4\x74\x9d\x75\x56\x9c\x6e\x53\x3e\x95\xfc\x94\x23\x28\x4f\xec\x70\x3b\x82\x62\x88\x7e\x5e\x11\xe2\x82\xcc\xdb\x11\xeb\x8b\xc6\xe4\x57\xbe\x1c\x23\xa0\x98\xbb\xc2\x30\x8a\x19\x6b\x03\xb4\xa3\xea\x93\x78\x44\xdc\x8c\x15\x7c\xab\x69\xa4\xf6\xd2\xff\x3a\x70\x6e\x7e\x4b\xce\xea\x64\x9f\xef\x57\xa7\xfc\xcd\x47\x9a\xa0\xf9\x56\xa0\x69\x7c\x60\xe9\xe2\x47\x65\x7f\x55\x02\x77\xa7\xdb\xe9\x2a\x44\x76\x76\x85\xa5\x60\x69\x80\xa5\xfb\x82\xe6\x7b\x84\x22\x7b\x84\xea\x5d\xfc\xb3\x0d\x35\xe5\x22\x9e\x85\x73\xd1\x86\x73\xd1\x81\x96\xf2\x6c\x69\xf0\x27\x92\x61\x5e\x11\x68\x3c\x3b\x42\x2f\x4f\x6f\xd1\xdd\xc0\x57\x06\x1a\x12\xc2\xaf\x15\x7e\x2f\xcd\x40\x6f\x36\x98\x35\x82\x84\x34\x28\xaa\xec\xaa\x5c\x9f\x61\x3e\xd2\x0c\x4d\xab\x1b\xeb\xf1\x48\xb5\xa5\x81\xca\xd0\x94\x2c\xc1\x1a\x74\xd5\x2f\xe0\x9e\x60\x4d\x90\x1d\xba\x70\x23\xcd\x17\x38\x09\xfc\x75\x21\x93\x81\x6d\x94\xf5\xff\x4e\x24\xa2\x00\xb0\x9d\x87\x3d\x59\x74\xf3\xfb\xd0\x43\xe8\xe0\x70\x61\x86\x4f\xd4\x3b\x29\x32\xc7\xa3\x8f\x30\xe9\x1b\x67\x77\x74\x06\xb9\x0c\x2c\x11\x8a\xdb\x77\x4a\x7f\xc4\xae\x07\x6f\x6a\x0e\xae\x9f\xbe\xbd\xd9\x85\x1c\x81\xe1\x50\xac\xff\x6b\x3c\x35\x67\x2f\xe5\x63\x8a\xa4\xe2\x1d\x26\x63\xc3\x0d\xd1\x5c\x39\x8a\xf1\x26\x92\x3d\x18\xb0\xe2\x98\x42\x47\x83\x0f\x22\x34\x48\x74\x74\xf7\xc1\x9d\x0f\x43\x09\x65\x61\xef\x7a\xcd\xd5\x1f\x2c\x7f\x18\xd9\x9c\x1e\xa7\x9b\xe5\x9d\x64\x12\x74\x0d\xd0\x7d\xf5\x84\xd2\xda\x03\x52\x6f\x70\x70\x1e\x02\x3e\x53\x8f\x62\xd7\x1f\x61\xb4\x48\x17\xe7\x5b\x30\xc9\xdf\xf1\x2e\xd5\x87\x9e\xdd\x8e\x90\xdd\x8e\x30\x67\xac\x0c\x57\x12\x6a\xf6\x60\xcd\x5d\x1b\x49\xfe\x35\x5e\xfd\x0d\x6f\x5d\x93\x63\x5f\x48\x53\x71\x4e\x17\x38\xb5\x6b\x05\xce\x85\x0e\x34\xd2\x6c\x4c\xbf\x7d\xa7\x93\x31\x89\x15\xb7\x0f\x16\x71\xb6\x65\x1b\x42\x40\x07\xe4\xd9\x90\xf5\x9c\x10\x1c\x1a\x8c\xc8\xe1\xa6\xc3\x49\x7c\x16\xa5\x21\x4f\x1a\xc4\xc5\x9d\xfb\xad\xc6\x4c\x7e\x51\xb2\xc3\xfe\x0c\x30\x92\x09\x82\x66\x69\x3f\x1a\x3a\xf7\x97\x0f\x90\x3e\x2d\x01\x2a\x3b\x9c\x4b\x76\x43\x2f\x17\xb3\x08\x53\xfa\xe8\xb6\x6a\x16\x13\x5c\x4f\x28\xce\xf6\x47\x58\x08\x7a\x48\x7d\x09\xfc\x37\xa3\xb5\x97\x86\x81\x85\xe4\x61\xbe\x87\xa5\xc9\xe8\xd0\x48\x1c\xe5\xf8\x30\x07\x84\xe1\x07\xd8\x31\x95\xc8\x4e\x89\x2e\xf3\x0c\x48\xfe\x9e\x04\xf9\x98\xa0\xfd\x1c\x05\xf5\x14\xff\xec\x2e\xf9\x25\xcb\xbc\x38\xdd\xa2\xb4\x52\xc4\xb3\xa4\xc9\x50\x03\x6e\x70\xf0\x6e\x96\x21\x42\x5e\x99\x49\x2d\x59\x37\xc3\xed\xa3\xb2\x08\xd0\x01\x91\xdb\x55\xa2\x9b\x3a\x78\xc1\x4b\x8a\xf4\xd9\xa9\x00\xb9\x2a\x26\xfc\xad\x2e\x97\xbc\x6e\x7e\xa0\x6e\x47\x18\xcc\x81\xed\x01\x15\x2f\x76\x4c\xd4\xce\xe8\x23\xb9\x33\xca\x4a\x06\x95\x26\x76\xe2\xb8\xf6\x53\x6f\xb6\xf0\x06\x84\x7c\xa2\xdc\xfc\xa5\x3b\x4a\xe1\xd8\x40\xc3\x91\x25\xb0\x10\xc8\x4f\x2b\xb7\x32\xbd\x28\x30\x1b\x26\xf3\xd9\x26\xfd\x0f\x9d\x68\xea\xc7\x5b\x37\x28\x59\x92\x71\x26\xb3\x4a\x6a\xd9\x58\xee\xe6\x25\x31\x4e\x0c\x5b\x92\xa0\x9f\x03\x07\x77\x8a\x35\x79\xb3\xf0\x7b\x67\x01\x54\x86\xa7\x8c\x30\x6a\x80\xe8\x16\x5d\x80\x15\xfe\xd7\xe6\x86\x82\x52\x71\xaa\xa9\x53\x24\x33\xe7\x67\x06\x4c\x12\xa8\x2f\x3c\xb5\x6b\x56\x40\xe7\x0d\x8b\x28\x8c\x3d\x6a\xf2\x0c\x54\xcf\xc9\x87\xfb\x03\xd3\xa3\xf2\x9c\xf3\x0a\x17\x12\xb6\x84\x4a\x52\xab\x01\xba\xf7\x24\x1a\x75\xfe\xdb\x40\x69\x07\xc4\x0a\xd1\xdd\x20\xd5\x51\x98\xfc\x61\xc7\xcf\x3a\x8b\xa6\xd2\xd9\x69\xe7\xbe\x7c\x4d\x93\x85\x81\x97\x7a\xbc\x9d\x1f\x6a\x77\x3d\xc4\x78\x8b\xdd\xd9\x1e\x53\x50\x8e\xbe\xfd\xab\xfa\x4b\x71\x62\x96\xad\x02\xf7\xb1\x04\xff\x7b\xfb\xd5\x49\x5d\x74\x69\x76\xed\xad\x51\x7c\x9c\x9a\x27\xca\xd4\x30\x95\x17\x6b\xb4\xc4\x06\xb2\xa7\x7e\x94\x2c\xca\xb7\xf7\x9c\x86\x7b\x44\xfa\x4e\xe5\x78\x8c\xc5\x78\x97\x21\x67\x6c\xa6\xcf\x87\xfb\x7c\x1c\xcf\x66\x7d\x51\x90\xdb\x5b\x73\xc4\x16\x42\x7d\x15\x4f\x70\x3d\xf5\xba\x82\xfd\xce\xa9\x53\x73\x77\x48\x7c\xb6\x2a\xe7\x3f\x93\x9c\xee\xc3\x82\xb8\xcc\xcb\xe1\x0a\x79\xb4\x13\x74\x92\xe4\xd1\xf6\x40\x04\x40\xc6\xca\x1e\xce\xc3\x87\xaf\x68\xd0\x71\x74\xff\x44\xa4\x68\x91\x15\xfd\xad\xb4\xba\xdb\x88\xb9\x04\x9a\x3f\x6c\x00\xd0\x62\x56\x58\xcd\xd8\x32\x94\x7e\x1e\x66\xf0\x67\x55\x2e\x20\x44\x0b\xf6\xbd\xdc\x59\x9c\xc9\x1b\x54\x59\x05\x9e\x60\xa5\xe7\xd1\xa6\x8c\x85\xbe\x36\x93\xe3\xcc\x50\x47\xc2\x66\x2e\x92\x22\x2c\x87\x15\xd8\x73\x12\xf4\xee\xad\x14\xb7\x0e\x31\xd9\x03\x20\xb1\xeb\x9f\x76\xb4\xc8\x32\x23\xe9\xa3\xc0\x0b\x71\xac\x21\xd0\x76\xbd\xec\x78\x6b\xdf\xa0\xe0\x7b\x33\x26\x9c\x41\x20\x3b\xc3\xc5\x1c\x31\x4a\x37\x5f\x8c\xb5\x54\x96\xea\x51\xca\xad\x2f\xca\x5d\x16\x2a\x41\xb7\xf4\x47\x97\x46\x3b\x91\x46\x3b\xa6\x3b\xda\x06\x97\xa2\x38\xf8\x62\xb4\x1b\x9a\x9a\x76\xe7\x0b\xce\x40\x0a\xbe\xc6\x91\x96\x76\x8e\x37\xba\xaf\x49\x11\x9a\x83\xb9\xd6\x20\x44\xa6\xd2\x12\xbe\xec\xe4\x9f\x58\xba\x23\x14\x23\xf2\x1b\x66\x0e\x61\x4f\xce\xf2\x68\xac\x0b\x59\x95\x18\xc5\x40\x50\xee\x9e\x10\xb5\xa3\xcf\xd6\xca\x8a\xf8\x53\xb5\xf6\xcd\x6c\x06\x15\xf9\x4f\x56\xfa\xc3\xdc\x44\xb5\x90\x7f\xbb\xe1\x07\x2e\x73\x9b\xa9\xfc\x61\x16\xd4\x18\xdd\xba\xba\xba\x43\x44\x48\xa6\x74\x4b\x82\x5e\xb0\xd4\xb0\xe6\x25\x1e\xcc\xb7\xc0\xca\x13\xcb\x1f\x5d\x27\x21\xcc\x57\x9e\x25\x50\xfb\x87\xc8\x4f\xae\xd9\x7f\x85\x04\xff\x06\x2c\x18\xf9\xa1\x90\x5e\xa4\x37\x5b\xc6\x10\x24\x5b\x92\xd5\xf9\x02\xe7\x2b\xe3\x4f\x59\x9c\x62\xd7\x9a\x08\xcd\x24\xf7\x91\x7a\x89\x2e\xf2\x76\x85\x77\xe6\xa3\x3e\x3d\x34\xce\x92\x25\xd4\xcc\x43\x94\xfe\x0e\x15\x8d\xf6\x0c\xde\x3b\xae\x3d\xc6\x7c\xc0\x03\x9c\x3c\x95\x3e\x6c\x7f\x6a\xb0\xcc\x5b\x03\x91\x9d\x08\xe1\x7a\x8f\x66\xd6\xb7\xfa\xe2\x40\xe5\x87\x3d\x58\x67\xd7\xcf\xc3\x6f\x1f\x1f\xe1\x61\x8a\x3b\x4a\xe6\x98\xb9\x61\xdf\xce\x49\x41\xf1\xf6\x35\x57\x96\x4b\xe0\xd9\xfa\x50\x6c\x1f\x7c\x48\x44\x30\x1a\xb0\x4b\x4c\xa9\x93\xe5\x1e\x46\x73\x52\xd9\xa0\x52\xd9\x54\x5f\x3a\x2b\x67\xba\x04\x6f\xc7\x4d\x3e\xff\x21\x80\xf6\xfd\x54\xdf\xf2\x2f\x76\xd7\x82\x52\xe3\x06\x00\x9c\xe5\x71\x95\x57\x4d\xc2\xb4\x08\xd6\xf2\xb5\x64\x56\x92\x01\x20\x29\x87\xb3\x1c\x21\xb2\x25\x99\x53\xa1\x1e\xfd\xd8\xa0\x80\x8a\x05\x50\x00\x86\x2a\xf8\x37\x21\xc6\x0c\x00\xa3\xdf\x4b\x6e\x2f\xa5\x4b\x5a\xef\xaf\xa7\xc0\x14\x02\x8a\x84\x1b\x28\x7d\x02\xeb\xfa\x49\x77\x49\x45\xa7\x63\xb3\x38\x54\xf0\x2c\xa0\xd6\xe7\xb7\xb3\x49\x6c\xa8\x15\x9a\xfd\xf8\xf6\x4b\xd2\xa0\xd0\x82\xe8\x28\x35\x51\xf5\x57\x1a\xfb\x4d\x23\x9f\xa0\x3d\x00\xef\x60\x67\x50\xa9\xc9\xf8\xb8\xa6\x26\xfb\x10\xd9\xb9\x22\x10\x95\x9f\xe1\xf3\x2d\xc6\xe0\xc0\x1e\x79\x66\xa0\xd1\x2d\xbd\xd8\xb8\x63\x13\x2e\xe9\x3c\x05\xe5\x29\x5e\x30\x05\x35\x56\x5d\x85\x12\xf2\x4b\x21\x4f\x04\x90\xd5\x53\xf9\x75\x65\x2a\xc9\xa1\x4d\xd7\xc0\xb0\x3a\x30\x24\x03\x01\xf7\x80\x99\xf3\x03\xab\x9f\xf9\xc5\x7e\x22\x5f\xbf\xda\x43\x6f\x2e\x43\x96\x3b\xac\x08\xbc\x5e\xaa\xad\xc9\x12\x27\xb2\x5d\x60\x6c\x09\x42\x3a\x7b\x5d\xbb\x1b\x4d\xed\x08\x23\x3d\xb1\x3c\x54\x67\x24\x41\xa5\x34\x4b\xba\x4a\xf8\xb4\x07\x46\xed\x18\x9d\xa9\xdf\x03\x2b\x8f\x4e\x35\xbe\x48\x9f\x5d\x26\x43\x09\x01\xd1\xc5\x30\x19\xf1\x5c\x47\xe6\xfa\x86\x89\xfd\xed\x86\xe8\xfe\x66\xa1\xc0\x60\x3c\xcd\x7f\x75\xf7\xcf\x35\x8a\x23\xc4\xb3\x5b\xba\x84\x47\x7b\xb5\xd1\x8d\xeb\x48\x59\xbc\x9b\xc4\x2c\xf2\x8c\x76\xf7\x8b\x65\x66\xfb\x10\x86\x5b\xcb\xd0\xfe\xab\x2a\x6b\x11\xff\x9d\xbb\x78\xb6\x60\x33\x10\xc8\x7b\x7a\x6a\x30\x72\x74\xe7\xb6\x47\xf3\xd9\x25\x80\xd8\x32\x95\x78\x5f\x14\x46\x24\xe8\x7c\x60\x64\xf0\x47\x51\x4c\xe9\xfb\x5e\xef\x20\x34\x66\x7b\xfe\xd7\x43\x74\x97\x83\x37\x55\x79\xce\xf5\xed\x88\xfc\xd5\x5e\x6d\x31\x43\xe2\x81\x67\x15\x87\xcc\xf3\x0c\x57\x82\x3e\xb0\x74\x87\xe8\x47\xdb\x56\xe5\x19\xec\x96\x28\xcf\x10\x26\x7e\xbf\x7a\x8a\x09\x1a\x4a\xf5\x81\x38\xb0\x6d\xf5\x91\x6c\x94\x1d\x36\x58\x57\x5b\xc0\x17\xe5\xd9\xca\x7f\x20\x22\x3b\x5e\x57\x52\x28\x95\x1f\xd8\x9d\x85\xdf\xad\x9f\xbd\xf6\xec\x6f\xc3\x11\x6e\x2d\x14\x9f\x5a\xea\x9c\x72\x98\x07\x8d\x6d\xf2\x03\xb3\x0a\xf3\xe7\x48\x19\x53\x1c\xc2\x80\xb1\xbe\xac\xeb\xf2\x87\x1e\xea\x0f\xa6\x16\x98\xaa\x73\xf1\xd1\xb2\x5f\xb3\x05\x18\x78\x20\x47\xb1\xb5\x64\x9b\x51\x12\x3a\xa4\x28\x3a\x3c\x6e\x59\x3f\x23\x49\x40\xe4\x49\x64\xe5\xd8\x31\x0b\x92\x36\x77\x76\x24\x03\xaf\xfa\x55\x60\x28\xe2\xfd\xed\xd3\x38\x91\x28\x3a\x9c\xec\x3c\x2e\x50\x05\x7a\x13\xe2\x32\xe4\xa7\x58\x17\xb2\x65\x89\x03\xc3\xa5\xc8\xde\x4b\xe8\x09\x05\x33\x6c\x5e\x36\x66\x75\x3f\x9e\x9d\xee\x9f\x6f\x85\xfa\x43\x2c\x48\x71\x38\x7c\xa9\x6b\x23\xc5\xd5\xa6\xe0\xca\x28\x0f\xeb\x93\xc3\x01\xf3\xc5\x0f\x20\xcc\x4a\x36\x9e\xda\x4e\xfc\x25\x85\xc0\xff\x3a\x96\x79\x63\x09\x6d\xbe\x38\xe6\xee\x98\xdf\xdc\xa9\x3b\xa5\x4b\xc0\x7a\x7e\x5f\xe2\x3c\x54\xd1\xb9\xb9\xbf\xdc\x55\xc9\x82\xc7\x7d\xac\x91\x9f\x09\x7a\xe3\x20\x10\xa8\xcc\x3a\xf7\x4e\xd4\xe2\xe3\x48\x43\xa5\x8e\x16\x07\x21\x9b\x6d\xb0\xaf\x89\x85\x67\xba\xe8\xc6\xdb\xa0\x90\x85\x38\xfa\xe6\xc7\xba\x7e\x13\x79\xa8\x0e\x92\x85\x3d\xbe\x28\xcd\xa8\xdf\x59\x2a\xda\xda\x5f\x28\x0f\xeb\xa1\xe7\xb1\x8f\x8c\x69\x86\xc8\x3f\x92\xac\xee\x43\xfc\x73\xbd\x27\x17\xec\xcc\xc0\xa4\x31\x3f\x30\x13\xc3\xc8\x59\x41\xf7\x36\x10\x5a\xbf\x25\x68\x88\xe7\xf7\x9d\x81\xfa\xae\xbe\x43\xaf\x97\x12\x6d\x78\x27\x4e\x60\x4c\x2d\x43\x85\x4a\x92\x8d\xf9\xdd\xa8\xbf\xe0\x65\x40\x65\x4f\x4c\x89\x58\xd3\xf9\xef\x86\x2b\x3c\xa2\xf6\x95\xa0\x49\x78\xe7\x5d\xa2\xf1\xba\x57\xa3\x4c\x32\x08\x0c\xd6\xfa\xb2\xbe\xef\xfb\x79\x98\x63\x97\x69\xbc\x48\xc2\xe4\x68\xaa\x6f\xc6\xef\xd2\x29\x1e\x7a\xc3\x84\x96\x77\x76\x46\x7e\x1e\xc2\x7a\x40\x57\xd0\x96\x91\x45\x1b\xf1\xf5\xb5\x84\x3e\xe0\xf4\x28\x1c\xe8\xf8\xa9\xad\xca\xd3\x44\x68\x83\x6f\xb8\x68\xe1\x10\x1c\x62\xd4\x5b\x63\xfa\xef\x7c\xc9\x2a\xf3\x9b\xbf\x9c\xd6\x05\x13\xbe\x0c\xb6\x71\xb5\x48\x1e\x86\x07\x79\x8c\x14\xe3\xfc\x09\x3b\xf0\x7b\xd9\x1b\x7f\x84\xfc\xa8\xcd\x7b\xe0\xe0\x62\x8b\x79\xfe\xfd\x36\x74\x64\xd8\x6e\x1c\x3c\xd9\xf1\xfb\x28\xd3\x7e\xef\xbc\x30\xbf\x8a\x59\xdc\x47\xc8\xba\x4b\x02\x51\x2f\x60\x0b\x05\x40\x07\x7f\x39\x5a\x5d\x1e\x1e\x72\xf0\xcb\x66\xb7\x0f\x9a\x1c\x26\x26\x05\xcf\x7b\x11\x7c\xeb\x08\x3f\x2a\x3f\xd5\x7e\xba\x32\xe5\xb7\x42\xc1\xb2\x2e\xdf\xdd\xcc\x90\xd8\x4c\x7e\x36\x99\xcc\x63\x52\x42\x78\xfa\x0a\xb7\x66\x8f\x07\xee\x12\x13\xac\x70\x00\xbb\x21\xf5\x94\x46\x4e\xa0\x9d\x31\x53\x54\x92\x2a\x01\xe8\x06\x49\xbd\xc6\x0f\x39\xe5\x09\x2f\x4b\x54\xbc\xa3\xcc\x89\x51\x14\x23\x52\xfa\x18\xba\x35\x30\x5b\x89\xc6\x77\x94\xca\x11\x95\xf8\x49\x99\x21\xd6\x96\xc4\xca\xc6\xe0\x6a\x51\x04\x3a\x06\x66\x4a\xe7\x57\x50\x95\x29\x33\x0f\x00\xcb\xbf\x5e\xf1\x65\xa9\x66\x9a\x4b\x4c\x0a\xfe\xfd\x9c\xd5\xd4\xac\xc6\xb3\x44\xe6\x80\x4d\xbe\x5a\x9c\x62\x5d\xcb\x6b\x97\x9b\x56\xce\xc3\xbf\xc8\x14\xc9\x9b\xa8\xd5\x95\xa6\xa2\xcc\xf9\xbd\x8b\xf0\x9b\xed\xa1\xac\x22\xb8\x94\x44\xa6\x4b\x9a\xf6\x23\xe3\xe5\xf7\x5a\xbe\xcd\x1d\x6d\xa5\xe9\x5e\x93\x63\x61\xc9\xbe\x6c\x43\x02\x95\x29\xa0\xd9\x0f\xd9\x42\xbb\x39\xea\x63\x9e\x4c\x70\xa2\x8b\xad\x4d\x31\xb4\xf6\xe3\x37\x9d\xbb\x6a\xad\x30\xe9\xf2\x9a\xb4\xdb\xf2\xbf\x39\xca\x64\x57\x6e\x6d\xdf\x13\xa3\xea\x29\xf9\x1d\x6e\x7c\xd9\xbf\x42\xba\xf8\x17\xc7\x2e\xc8\x7c\xa3\x21\xde\x5f\x9c\xb6\x2b\x02\x5f\x8c\x6d\x17\xdd\x1f\xf2\x1b\x1c\xf1\x2e\x8e\x79\xc1\xa1\xec\x70\xa5\x2e\xd1\xc5\x25\x69\x26\x4f\xab\x0e\xeb\x31\xda\x9f\xab\x8f\x6c\x65\xfd\xfe\x6a\x33\xb9\xbb\x72\x07\x85\x3d\x61\xab\x29\xa3\x04\x2a\xdd\x9f\xc4\xf3\xfb\x5c\xfe\xd7\xf6\xca\xa6\xf7\xec\x09\x4f\xf2\x42\x8d\xb2\x8c\xff\x7c\xab\xe5\x8b\x90\x4c\x01\xa1\x26\xaa\xf2\xf7\x5c\x7e\xa0\xf0\x41\x91\x26\x98\x47\x93\x40\x20\x28\xff\x8c\xa8\x87\x85\x1b\xa2\x0d\x77\x86\x66\xb4\xb1\xd2\x4a\x4d\x6b\xf5\xfb\xcc\x9a\x1e\x77\x07\xd6\x95\x9f\x23\x54\x89\x66\x73\x0b\x39\x39\xe6\x25\xc2\x6e\xba\x9e\x88\x62\x76\x9a\x69\x91\x60\x86\xe9\x2b\xae\x17\x91\xae\x69\x08\x28\x37\x4d\xb2\x7b\xa2\xf4\xf1\xa2\x67\x07\x50\xa8\xfd\x9c\x49\xff\x49\xf7\x11\x4f\x4f\x33\xcf\x97\x5d\x46\x3f\x31\x70\x08\xe8\xc7\xb3\xf8\x16\xaa\xe7\xea\xab\xc6\x0f\x91\xe9\x0b\x12\xe3\x8a\xa1\x44\x9a\xd8\x91\xda\x65\x00\x20\xe8\x38\xd9\x83\x73\x24\xa1\x32\x9d\xd0\x52\x2b\xc5\xe2\x3b\xa9\x82\x1a\xf6\x0c\x8f\xc8\xc5\x04\x09\xeb\xad\x55\xf5\xc7\x11\x79\x83\x4b\xbd\x9a\x23\x09\xc7\xeb\x85\x18\x2f\x26\xea\x62\xd4\x9b\x22\x68\xca\x86\x55\x7e\x7c\x09\x46\x05\xad\x26\x88\xee\xf8\xef\x5e\x90\xcf\xa5\x5d\xdd\x48\xff\xfb\xc5\x9e\x12\x33\xe7\x40\xbf\xe3\x4b\x00\x10\xac\xd5\x24\x39\x5f\xf6\xa8\xa1\x36\x83\x4a\x23\x6c\x71\x68\xb4\xdf\x2f\xaa\xa5\xc1\xad\xbb\xf0\xa5\xfc\xf8\xfa\xa2\x0b\x63\x54\xf1\x8d\xaf\x0b\x13\xad\x97\x44\xfe\x30\x26\x45\x90\x44\x36\xa7\x3a\xca\x28\xa9\x68\x0e\xc4\x91\xb6\xf2\xf4\x9e\x48\xba\x61\x50\xb9\x23\x7a\xbd\x8a\xef\x0c\xe7\x06\x96\x43\x43\x6c\xc3\x91\x49\xb3\x5c\xca\xe0\x96\xe6\x0a\x68\x5e\xbd\x9b\x41\xd3\x35\xb5\x99\x32\xe3\x19\xc6\x58\xee\xcb\x02\x9d\xfc\xdb\xc4\x92\x1b\x91\x72\xf3\xb9\x3a\xf9\x62\x74\xd4\xf9\xfc\x31\xa3\x91\x9a\xe3\x29\x02\x10\xce\x52\xea\xfb\xac\x45\x9b\xe9\x4d\xdb\xd4\x3a\x91\x5c\x65\x7d\xc5\x7f\xdf\x4e\xbe\x61\x28\xd3\xb4\x34\x55\xde\x6a\x52\xcc\xd7\x4b\x62\x19\xbb\x65\xd9\x1c\x84\x67\xd8\x71\x60\xee\x0b\x92\x7e\x5c\xbf\x8d\x50\x89\xbb\xab\x60\x59\xc8\xa3\x3d\x0b\x36\x33\xc4\xac\x38\xf9\x62\x2a\xf1\x0f\xf8\x4d\x5b\x45\x4f\xba\x24\xf3\x0b\x5e\x52\xb4\xa0\x6f\x6e\x9f\xad\x08\x7d\x8a\xd8\xaf\x6d\x7c\x71\x9e\xe2\x9f\xae\x33\x46\xcc\xc1\x75\x33\x52\x5e\x36\xa3\xb3\x5d\xd7\x86\x1c\x3a\x74\x09\x6e\x91\xf4\x26\xe1\x26\xdc\x2a\x76\xfd\xe3\xba\x4a\x63\x6b\x8e\x65\x0b\xeb\x0c\x6e\x6f\xc0\x0c\x2f\x84\x89\x83\x1e\x50\x1c\x22\x4f\xaf\xfb\x74\x4f\xb5\x80\xef\x9a\xc2\xee\x21\xf3\x32\xb1\xbb\x6e\x8a\xac\x89\x2b\x77\x80\x68\x71\x2b\x71\x08\xa6\x22\x7d\x93\xbb\x1b\x68\x65\x6a\xcd\x15\x40\xb9\xa3\xe5\xb9\x91\xbe\x26\xa7\x18\x56\x62\x2b\x92\xa9\x63\x01\x23\x77\xa3\xc5\x60\x24\xe9\x96\x00\xe7\xd8\xb2\x57\x07\x95\x66\x37\xee\x49\xf7\x6b\x8d\xb0\xdc\x8d\x91\xe2\x20\x36\xda\x03\x78\x1e\x32\x48\x1b\xdd\x95\xdd\xd3\x74\xdf\x9e\x5c\x75\xe0\x5e\x8e\xb2\xd0\x50\x89\xc5\x36\x78\x08\x4b\xb6\x52\xcf\x30\x7e\x94\x53\x61\x57\x1d\xf4\x2c\x46\x60\x98\xa3\xfb\xda\x07\x4c\x97\x3f\x8f\x34\x4a\xd2\x47\x86\x37\xfa\xb3\xfa\xec\x0c\xa0\xb3\x8f\x3d\x7f\xec\x93\xc9\x1d\x6e\x8d\x57\xb8\x64\x4b\xbf\x77\x8d\xe3\xb9\x93\x06\xbc\xce\xce\xfb\xef\xd7\x2a\xe7\xc2\x55\x1f\x35\x6d\xc4\x65\x34\x9b\x21\x54\xc3\xc7\x1a\x9c\xe1\x7e\xff\x72\x21\x6a\xf2\x75\x97\x7f\x6d\xfc\xc9\x8d\xe9\x47\x3c\x80\xdb\x15\xa7\x5b\x80\x4b\xc0\x89\x58\x7b\x44\xa6\x7f\xaa\x22\xad\x22\x2f\x22\x6a\x6a\xd3\xeb\x8c\xe9\x68\xf1\x00\x1d\x6d\xeb\x93\xca\x57\x4d\xe7\x54\xda\xb1\xea\x0a\xc9\x24\xbb\xe8\xce\xb0\xf5\xa3\x6d\x5b\x24\xf1\x0c\x1c\x06\xab\x53\x58\x0e\x01\x1b\x84\xb6\x00\x69\xb6\xdc\xc1\xb5\x77\x77\x75\x2b\x04\xf1\xca\x25\x3b\x5f\x8b\xb3\x59\x2d\x3d\x27\x7c\x6c\x36\xbe\x4d\x85\xb2\xda\xb6\xf0\xdc\xd5\x01\x50\x22\x9a\x10\xd7\x0c\x66\xbb\x9f\xe3\x22\xba\xd8\x23\x57\x47\xeb\x6e\x4a\x27\x73\xa5\x09\x06\x3d\x98\xe8\xed\x79\x96\x79\xca\x40\xfe\x14\x29\x88\xcc\x62\x67\x8b\x55\xd3\x87\x37\xca\xf9\x6a\x72\x12\x2a\x98\x7d\x95\x7b\x37\xe6\xa0\x4a\x06\xa0\xa3\xbb\x27\xfb\xaf\xaa\x71\x0f\x39\x31\x2a\xdd\xdd\xdf\xb5\xd5\xab\x4b\x67\xc6\x3b\xc1\x49\xef\x96\xb3\xde\x64\xfb\x43\x2c\x98\x6e\x39\x26\x37\xa8\xf1\xd6\x77\xf7\x0b\x9c\x3a\x1b\x67\x88\xbb\xbc\xcf\xae\x72\x48\x1b\xf3\x35\x4c\x76\x7c\x06\xca\x26\x42\xbf\xeb\xdd\x13\x1f\xa6\xd6\xd8\xba\x3d\x55\xb8\x99\x16\x33\x33\xdd\xb7\x5b\x63\xca\xbb\x32\xa1\x66\xf4\xd6\x2e\x42\x60\xcc\x48\x74\x7e\x16\x0b\xfa\xe3\x47\x08\xf0\xf5\x87\xe4\xab\xcd\x10\x79\x27\x6d\x52\x0b\x8f\x8c\x3a\x46\xf4\x2f\xdb\xd9\x55\x26\xb4\xfc\xa4\x98\xec\x17\x65\xb2\xce\x6f\x06\x74\xef\x13\x6c\xeb\x0c\xa3\x35\x6b\x6b\x83\xf7\xd4\xee\x1e\x2f\xca\xc5\x18\x05\x7d\x83\x13\x99\xd1\x54\x73\xe2\xf5\x6c\x1d\xcc\xb8\xa8\x10\x13\x9d\x7d\x10\x28\x16\x6a\x29\x0f\xfd\x3e\x69\x76\x8a\xcf\x0e\x91\x6a\x4b\x3f\x0a\x56\x25\x39\xcf\x97\x4e\x9a\x87\x7d\xee\x62\x70\x65\x3a\x58\x56\x32\x39\xce\x81\xe2\xa1\xd3\x4b\x75\x42\x86\xb1\x39\x58\x04\xf4\x77\x0f\x14\x4b\x39\xaa\x0d\xec\xbd\x02\x12\xfa\x18\x02\xb6\x42\xac\x60\x22\x27\x48\x96\xe2\xd1\xd0\x3b\xd9\x7a\x7f\xb2\x2f\xdb\xae\xdf\xee\x08\xf9\xfa\x82\x5e\x40\xb4\xfe\x2d\x1f\x16\x62\x16\xda\x1d\x15\x38\xf8\x24\x66\x54\x52\x82\x86\x1c\x30\xee\x3d\x18\xb5\x23\x1d\xfa\x6d\xc5\x77\x46\xf9\x0b\x96\x42\x15\xf4\xaa\x83\xcb\xa6\xc2\x29\x76\xa2\x19\x24\x13\xb1\xb9\x63\x8b\xce\x75\x98\xf7\xb7\x41\xa2\x4d\x34\x29\xfe\xe4\x0d\xd2\xc2\x84\x67\x9b\x17\x86\xc1\x99\x81\xca\x8f\x3e\x64\x07\xc0\x6d\x3f\xb6\x8a\x44\xbe\x46\x1e\xca\xb8\x24\x39\x69\x96\x65\x5f\xfd\x3b\x0a\x3a\xb2\xf8\x8c\x2e\x41\x6c\x81\x9a\x17\x9f\x7a\x5f\xe0\x44\xfd\x76\x31\x5f\xfd\x79\xd8\xd3\xf1\x7e\x54\x18\x15\x1c\xb4\x87\x58\xe3\xe3\x05\x83\x50\x24\x0d\xc1\xfb\x54\xf1\xb8\xdd\xb1\x08\x23\xfa\x6b\xe3\x4c\x71\x49\x8a\x39\xae\xb8\xc2\x65\x23\x0b\x69\x62\xbf\xee\x18\x55\x3f\xd6\xc3\xcb\x5d\xd5\x7c\xbc\xaf\x1d\x8e\xef\xcc\x60\x32\x5f\x4b\x41\x9f\xf7\x5c\xce\x0d\x69\x7e\x58\x13\xfa\x03\x6e\x09\xe7\xf9\x26\xc9\x70\x67\xd1\x36\x8e\x78\x7e\x1f\xfd\xbe\xca\x33\x2d\x9f\x0a\xe4\xf6\xa2\xd7\x8e\xb7\xc9\x1f\x3b\xd8\xd4\xe1\x5b\x07\x17\x78\x39\x4d\xa2\x2b\x84\x38\xd2\xde\x1c\xdb\xcb\x1e\x31\x52\xe4\xeb\xbf\x00\x5d\x19\x05\x46\xaf\x7b\x93\xff\xf8\x4e\x80\x0b\x01\x4d\x97\xed\xc0\xac\x77\x4b\x2f\x25\x71\x47\xe5\x31\x51\xb3\xb7\xa6\xde\xf6\x7c\xed\x49\xb4\xd4\xc0\x0f\xb6\xad\x84\xe2\xf3\xcd\x80\x50\xa7\x03\xc2\x6b\xa3\x0f\xef\x6b\xca\xbb\x6d\x1f\x4b\x6d\x3f\xd1\x06\x7f\x2d\x42\x75\x8a\x28\x24\xb7\x69\xd8\xd2\x60\x50\xbf\xcd\xe3\xa1\x5a\x25\xd8\xdc\xda\x11\x86\x25\x40\x55\x9e\x1d\xd8\xac\x2b\xfd\x19\x22\xba\x9d\x6d\xbf\xba\x26\xd5\x5f\xeb\xf5\x8c\xb3\x24\x4e\xb7\xff\xc2\x74\xdf\xbe\x7c\x3d\x71\x53\x51\xf6\xf8\xc6\xf4\x9b\xd6\x93\xc2\xb6\x43\x20\x7c\xd9\xf3\x5c\xdb\xb2\xe6\x3f\x51\xfb\xc7\xee\xf5\x25\x73\x69\x72\x6f\x99\x78\x0e\x72\x12\x20\xfb\x8a\x90\x44\x72\x86\xd2\xef\x4b\xe8\x4e\xb3\xa9\xfc\x15\x8e\x17\x3d\x71\x7c\x2c\x8f\x66\x70\xa3\x86\xc0\x92\x5c\xe2\x70\x8e\xd2\x4e\x2f\x27\x7c\xd4\xce\x42\xa8\x82\x5e\x42\x55\xe6\x8d\xe5\xa0\x50\xfe\xa2\x04\xf7\x7e\x91\x5f\x2b\xb3\x84\x7f\x77\x5a\xbd\x40\x65\x08\xd7\xd1\x62\x58\x61\xe2\x48\xe0\xb0\x37\xbe\xa5\x50\x57\x86\x29\x0f\xd1\x9d\x6a\x47\x3e\x74\x71\x56\x93\xec\x78\x5f\x7f\x37\xc3\xf7\x2e\x74\xdb\xe6\x08\xfc\x58\xfd\xe5\x51\xec\xe3\xb3\x24\xc0\xd2\xb8\xfc\xa0\xe2\x35\x41\x77\x35\x32\x7f\x70\xdc\x08\xd4\x42\xef\xbb\xba\xc2\x2e\x7f\x1e\xe2\x18\x07\x9d\x53\x71\x55\xc0\x43\x98\x3a\x9e\xd8\x8a\xf2\xc2\xfa\x12\x2d\xdf\xef\x03\x2c\xdb\xa4\x47\xbb\xd8\x6b\x25\x70\x70\xf5\x57\x1a\x58\x22\x41\xfb\x5f\x3b\x18\x5f\xa2\xbe\x69\x93\xaf\x89\x3a\x72\xd0\xe3\x54\x9e\x6d\x8d\x35\x40\x7f\x44\x2e\x31\x3b\x69\xc5\x21\xb3\xfa\x84\x14\xa4\xed\x28\xdd\x9f\x68\x47\x8e\x05\x0e\x5d\x8d\x46\x2b\xa3\x3a\x6d\x3f\xa1\x2b\xb2\xf0\x1b\xf1\x9f\x00\xa4\x3e\x7e\x2d\x8e\xec\xe6\xd6\xad\xb5\x29\xca\x9c\x42\xcd\x49\x32\x0c\x95\xbf\x64\x98\x27\x40\xd0\xf4\xdf\xc3\x5d\x12\x49\x61\xa6\xf4\x31\x8a\xd9\xa8\xf2\x90\x04\x51\x67\xc3\x11\x23\x8e\x74\xcb\x0d\xf1\xc7\xac\x31\x9e\x07\x9e\x14\xbc\x31\x6a\xd3\xda\x90\x33\xe8\x3e\xa6\x17\x43\x44\xb7\x16\x7e\xf5\x00\xb7\xf8\x52\x62\x4a\x9e\x00\x78\xa4\x7c\x89\x22\xec\xf1\x1d\xb7\x3a\x7a\xc8\xcc\xd1\xfc\x8d\x23\x42\xf1\xd6\xa5\x13\x59\x5e\xfc\xe5\x51\xfe\xca\x44\xa8\x1c\x29\xfc\x0d\x54\x59\x7a\x6f\xe6\x90\x9f\x23\xe2\x38\xd9\xf1\x7a\xd8\x17\xf1\xbb\x22\x35\xd8\x60\xab\x40\xf8\xf2\x16\x6d\x40\xa0\x5c\xea\xcd\xaa\x1a\x1f\x79\xa8\xa2\xf7\xb5\xfc\x55\xd4\x1e\x5c\x08\x94\x66\xf3\xd6\xdf\xdc\x02\xed\xea\xc6\xce\x59\xa6\x68\xc4\xa6\x9e\xd8\xed\xfc\x65\xf0\xfa\xe9\x06\x97\x39\x46\xdf\x14\x26\x2e\xb4\x1f\x67\xd0\xa8\x3b\x50\x64\x12\xbc\xb7\x0b\xd1\x2a\xec\x58\x78\x77\x61\x70\x3d\xf1\x73\xcc\xef\xc8\x19\xef\xd5\x44\xe5\xad\x5d\x85\x0a\xdb\x71\x5b\xa7\x72\xd5\x3a\xbc\xf3\x9c\x07\xc8\xa1\x66\x0e\xd1\x8a\x7c\x4f\x17\x06\xb9\xa1\xb6\x8c\xe3\x27\x59\x10\x1b\x9a\x52\xef\x7e\x45\x3a\x45\x43\xde\xac\x38\x83\xf3\xc6\xf9\x8b\xd3\xd6\x16\xaa\xa8\xf4\x0c\x11\x46\xe1\x7f\xb9\x6f\x38\xc3\xb5\x3c\x9d\x87\xc8\x61\x97\x99\x52\x04\x65\xf0\xe1\xef\x55\x1e\x21\x37\x30\x4f\x31\x30\x72\x0b\xd3\x1d\x85\x3e\x21\x75\x65\xa8\x36\xee\x24\xf4\x01\x3b\x03\x4a\xe4\x52\xeb\x97\x10\xa2\x5e\xd8\xdc\x9f\x25\xbd\xb5\x64\xc8\xe3\xfb\x43\x59\xf5\xf9\xf3\xf8\x85\x28\x8e\x68\x61\x59\x4d\x3d\xc6\x74\x77\xba\xd9\xfc\x9e\xb6\xc7\x7f\x70\x93\xa9\x2f\x82\x2c\xc2\x53\x9d\x32\x4d\x63\x07\x53\xa1\x28\x66\xc7\xd2\x5f\xbe\x73\x1b\x4d\x7c\x66\x24\x2f\x83\xfa\xc2\x53\x36\xa9\x71\xd5\x78\x96\x5d\x9c\xa4\xc7\x3e\x95\x95\x90\x1c\x81\xfd\xd6\xba\xc8\x40\x37\x24\x46\x8a\x55\x38\x4a\x37\x2b\x8e\x4e\xf6\xe7\xf5\xf3\x63\x55\x4f\x7e\x6e\xbc\xfd\xf8\x38\x45\xbe\xdc\x97\xef\x5d\x36\x46\x14\xbe\x44\xd8\xf9\x37\x28\x60\x54\x9a\xfa\xc0\xd4\x4b\xde\x64\x6d\xa3\x47\xd3\xb8\xb5\xa4\x68\x6c\x51\x95\xb4\xd2\x71\xf6\x45\x11\x14\xca\x74\xb6\xf0\x1a\xc8\x17\x83\x42\xc7\x6e\x38\xf6\x8f\xd8\x5a\xdb\x68\x09\x21\x4d\x47\x4e\x82\xd9\xec\xd8\x88\x8a\xfe\x45\xba\x38\x3e\xd8\x89\xfd\x69\x47\x93\x55\x8e\x40\x48\xa6\x5c\x11\x88\xe2\x3b\x3d\x8f\x97\x66\x59\xe6\x09\xe4\x10\xc9\x96\xe5\xd6\x6f\x48\x42\x99\xc3\x85\x16\x90\xda\x39\x81\x28\xb6\x54\x80\x24\xf0\x9e\x94\x78\xa4\x89\xe9\x7c\xd3\x64\xfc\x23\x0c\xe2\x6f\xfc\x8b\x1e\x1c\x5b\x9a\x3c\x00\x70\x07\x41\x9d\x61\xfb\xce\xa4\x6e\x60\x7b\xb0\xca\x5b\x64\x04\x6b\xc7\xcc\x30\xb5\x49\xa7\xe4\x92\xbc\x4b\xa6\x80\x01\xe7\xb9\xec\xc1\x2e\xeb\x82\x94\x43\xb6\x88\x51\x3b\xa6\x02\x50\xd9\x1e\xfb\xa1\x31\x4b\x37\xc0\x25\xf0\x91\xa6\x3d\x4a\x2b\x39\x1f\x72\x9c\x4d\x71\xc5\x49\xc3\x91\x05\x48\x2d\x9a\xd9\x78\x33\xa6\xbd\x23\x2a\x41\xf7\x53\xde\xf6\xc4\x68\xa3\xbf\xff\x49\x24\xdd\xe9\x64\xea\xde\xb5\x88\x30\xa4\xaa\xaf\x28\xfc\xd6\x89\x02\xdd\xb1\xe4\x56\xcd\x97\x6a\x9f\xe3\xcf\x9f\x21\xba\xdb\x58\x41\x84\x9e\x5c\x1b\x5d\x9a\x4b\x97\xf7\x13\xb5\x86\xb7\xfa\x6f\x45\x3c\x70\x51\xef\x5a\xf4\xb9\x4e\x1e\x1b\x5f\xe2\xdd\xaa\xb1\x3d\x22\x37\x1e\x1a\x3a\xe6\x9c\xc4\x3d\x02\xd1\x9d\xdb\x78\x47\x51\x75\x6b\xe2\x33\x54\x9c\x21\xd5\x87\xcb\x23\x9f\xe8\xa1\x8f\x26\x75\x31\x05\x0d\xc5\xcf\xbe\xa7\xb5\x8a\xf2\xf4\x18\xf4\x74\xdd\x1a\x99\xab\x7e\x61\xf8\x24\x91\x1c\x25\xb5\xb1\x5d\x4f\x94\x6c\x02\x36\x8d\x51\xb5\xa1\xf5\x63\x5a\x65\xd6\xef\xe3\x60\x92\x01\x58\xe3\x03\x93\x55\x30\xfb\x6b\x82\x46\x59\x61\xb2\x11\x9e\xca\x94\x50\x40\x13\x3a\x34\x84\xe9\x4e\x8e\x21\xbe\xd9\xab\x95\x87\x60\x84\xc8\x4e\xdd\x24\x99\x33\x0e\x50\x64\x6d\x31\x63\x34\x1e\x9a\xd3\xd7\xdc\xa4\x74\x63\x1a\x8a\x35\xf4\x34\x44\xe5\xfa\x24\x55\x73\x9c\xfe\x65\x8a\x3c\xd9\xd9\x34\x11\x93\x55\xcd\x0a\x1f\x8b\x2d\xe8\x6f\xab\xcd\x03\x5f\xcf\x3c\x03\xf9\x8c\xed\xe5\xbf\xfd\xbc\x8f\x2a\xd2\x15\xf5\xfb\xb6\x9f\x59\x1a\xbc\x3e\x0e\xf6\x40\x8d\xb5\xf2\x03\x93\x27\xf4\x79\xf3\xe3\x1d\x56\x7f\xaf\xea\x47\x76\xbe\x3f\x64\xa7\x52\x58\x64\xd6\x13\xa1\x70\x26\x70\xd8\x3c\x0f\x73\x98\xfd\x5c\x25\x97\x4c\xc9\xb7\xee\x06\x48\xe9\x5c\x54\x6c\xbe\x07\x76\xc9\x50\x59\x57\x4c\x5c\xbd\xc6\x23\x78\x56\xe1\x98\x77\xe1\x28\x38\x31\xbb\x65\x39\xeb\x5a\x38\x5d\xdb\xcf\xbc\x0d\xfd\xbb\x1b\x9a\x48\xc0\x34\x99\x06\x7d\x50\xc5\xee\x51\x7d\x11\x93\x8e\x6b\x5a\x8f\x37\x14\xdf\xd7\x59\x73\x29\x0c\xa1\x92\x62\x28\x69\x28\x51\x0f\xa0\xfd\x5a\x2f\x52\xb9\xcf\x9b\x1d\x83\x41\x1d\x53\x34\x58\x99\x69\xd0\x27\xf3\x71\xbc\x7e\x7e\xd3\x05\x39\x61\x28\x57\xa6\x9f\x80\x13\xa9\x3e\x20\x10\x53\x4c\x41\xe1\x11\x93\xc8\x40\x90\x4e\x5f\x40\x7a\xb5\xc7\xa3\xc5\x24\x76\x8d\x44\x68\x6c\xcd\x14\xdd\x31\x5b\xbf\xdd\xbb\x8c\x75\x8f\xad\x37\xf1\xe8\xbf\x0b\x0e\xcc\x1f\xf8\x15\xe7\xd7\x49\x5c\xd2\xfd\x36\xb7\xd4\xbb\x32\x84\xa5\x3b\x09\x40\x46\xc9\x5d\x33\xf9\x1c\x1e\xcd\xfd\x83\x75\xf4\x78\x36\x06\xb5\x4a\xfe\xf3\x22\x62\x9b\x59\x67\xa0\x19\xf7\xae\x5a\xf4\x57\x74\x85\x6b\x97\xb8\xd5\xf9\xd4\x6c\x39\x12\x7f\xfc\xf9\xb9\xdd\x0d\x5b\x34\x4a\x2e\x08\x11\x4b\x44\x11\xef\xfc\x96\x0b\x47\x17\x55\xef\x1b\x27\x73\x73\xa4\x9d\xf1\x45\x95\xbb\x6f\x88\xd2\x1c\x43\x52\xff\xbd\xa4\xaf\x43\x6e\xec\x1c\x01\x78\x09\x44\x19\xa7\x26\x0c\x5f\x06\x04\x5c\x99\x86\xf2\x9b\x2d\x44\x76\x4e\xef\x4f\x9f\x82\xab\xfb\xf2\x51\x7e\x5d\x8b\xd2\x4c\xc5\xd8\x98\x56\xf0\x71\x6c\x9d\x51\xdd\xaa\xd8\xc6\xbf\xe2\x46\x1e\x1a\xe5\x1f\x04\x8d\x98\x51\x17\xf4\x74\xd3\x1a\x99\x4d\x2e\xa4\x90\xb5\x3b\xf7\x46\xde\x35\x08\x0f\xdc\x34\xc2\xc6\xdf\x6c\xb6\x40\x0c\x08\xbd\x5c\x8c\x99\x41\x21\xc2\x9a\x92\x5e\x22\x58\x91\x15\x11\x42\x97\xa1\xcb\x6c\x52\x9a\xe5\x08\x53\xe6\x80\x67\x57\x2e\xd8\x1b\x5c\x7d\x05\x30\x14\xa6\xf6\xe5\x98\x97\xa8\x47\x14\xcc\x47\xf2\x90\xde\x83\xfe\xf6\xfd\x28\xf8\x79\x44\xfe\x66\x45\xf6\x37\xb6\xee\xd4\xc5\xe1\x5b\xc7\x62\x35\x99\xa2\x6b\xd7\xe6\x91\xcf\x0d\x3f\x65\x6f\xc6\x44\xa5\x0c\x93\x25\x7d\xa6\x37\x94\x92\xc1\xa8\xfd\x47\xd3\x59\x2c\x59\xcb\x34\x5b\xf8\x82\x18\xe0\x36\xc4\x36\xee\xce\x0c\x77\x77\xae\xfe\x44\xbf\xdf\xf9\x07\x44\x74\x74\xec\x01\x55\x64\xad\x5c\x4f\x42\x65\x5d\x43\x31\x29\xd0\xf4\xc7\x49\xcb\x88\xed\xa8\x35\x7d\xb0\x11\x04\x15\x3a\xfe\xf0\xba\x47\x62\xb4\x9e\x55\xb6\xdf\x33\x34\x8d\x37\x2b\x21\xbe\x64\xef\x95\x12\xce\x54\x7e\x26\xcc\x6e\x47\x33\x10\xec\x9e\xfe\x8b\x36\x54\xf8\x61\x60\x8f\xf5\xdf\x2e\xc6\x3f\x86\xe1\x6a\x2c\xb3\x9a\xdb\x3d\x5f\x6c\x49\x83\x21\xfc\x4e\xf2\x87\x0d\xb5\xe3\x41\x91\x06\xbf\xb9\x39\x41\x44\xd1\x18\xab\xa4\x15\x17\x9a\xe1\x10\x95\x68\x18\xad\x84\xfe\x60\xa4\x67\x44\x83\xca\xee\x18\x29\x98\xeb\x59\xfa\xc0\xc5\x14\xf8\x5b\x30\x6a\x5e\x3a\x0e\x4d\x22\x7a\xfe\x09\xd1\x17\xa2\xd2\x8c\xfb\x63\x6f\xeb\xcb\x51\x5a\x4b\x9e\x9f\xe9\x4d\x11\xf9\xdc\x4c\x90\x7c\x8d\x9a\x4f\x51\xcb\xd8\x0e\x83\x2b\xec\x83\xc8\x4b\xfa\xee\x97\xc4\x3f\x72\x37\xc2\xfd\x3a\xb0\x84\xd5\x7b\xe5\x85\x19\xa0\x14\x49\xdf\x82\x92\xf4\x79\xa0\x44\xa9\xfc\x78\x41\xb9\xbf\x3d\x2d\x85\xa8\xd4\xd8\x06\x2a\x26\x17\xcd\x8e\x61\x49\x42\xc2\xca\x40\x68\xfa\x43\x3f\x53\xf2\x7a\x2a\xa7\x32\x1d\x55\xee\x9c\x0a\xa1\x41\xfb\x0a\x94\xa2\x52\xa3\x7a\xe1\xa2\x9d\xe3\x8e\x8f\x7f\xe6\x82\x1e\x78\x09\x01\xa9\xde\x8c\x48\x97\xba\x82\xee\x09\xcf\xe8\x25\x1d\xb0\xd9\x09\x44\x9c\xdb\x7a\xef\x2a\xb7\xae\xdb\x4a\x0e\xf9\xbf\x17\xf0\xeb\xba\x12\x3b\xf5\x81\xab\x83\x89\x38\x8f\xe0\x3b\xe2\xd8\x58\xf5\x2b\xfc\x60\xd4\x87\xf2\x08\x63\x4b\xa1\xfc\xb4\xfb\xfb\x59\x4c\x97\x67\x70\x69\x05\x85\x17\x3b\x27\x47\x9c\x4e\x4a\x4c\xff\xf5\x64\xb1\x2a\x3a\xd7\x28\x88\x3b\x16\x41\xe0\x04\x98\xd5\x07\xe6\xaa\xbe\x3b\xaa\xb1\x66\x27\x1b\x95\xc1\xfb\x70\x01\x80\x59\x4b\x4d\xcf\x04\xf0\xe1\x05\x82\x63\x16\xec\x4f\xe9\x16\x3f\xd0\xba\xf9\x58\x3f\x3b\xbd\x93\xdf\xec\x40\xa8\x36\x8f\x91\xee\xb4\xa4\x6e\x37\x01\xed\xcf\xf3\xa4\x96\xd8\x09\x49\x27\xf8\xb6\x49\xa2\xac\xea\x42\x33\xc3\x66\x30\x75\xf1\x21\x7c\x18\x7c\xcc\x5e\x5a\x06\x1b\x20\x98\xa3\x3e\x45\xad\x57\x8f\x9b\xce\x8e\x66\xca\x6f\x53\x8e\x39\x1b\x7f\x8f\x69\xd6\xb2\xb2\xe7\x59\x37\x8f\x0b\x51\x8a\xb5\x4c\xa6\xc7\x93\x1c\x8b\xe9\x14\x28\x79\x6c\xc1\x0a\xd4\x08\xb5\x80\x33\x71\x6e\x27\xaa\x19\x24\x71\x39\xcb\x20\x10\x26\x0c\x15\xac\x9d\xfe\x92\x1d\x89\x8f\x70\xb3\x6f\xfd\x33\xbc\x00\xae\xc0\x0f\x55\x30\xa2\xac\xa4\xc6\xd7\x9a\xf7\xf4\xb7\xe7\xe7\x01\xcf\xc8\xdc\x7f\xeb\x19\x5d\x61\xd2\xc0\x3e\x78\x59\xb9\x7e\xd1\x9c\x0e\x86\xf0\xfd\x10\xe4\x12\x3b\xc1\x52\x73\xeb\x99\x61\xed\x1e\x44\x3c\x13\xb8\xbc\x15\x75\x4e\xe4\xe7\x3f\x3e\xab\x25\x71\x6a\x7f\x24\x0d\x60\x41\x84\x94\x81\x06\x23\x48\x48\x3a\x73\xed\x91\xa0\x24\x14\xd2\x43\x40\xe4\xd1\x15\x6d\xd8\xe5\x1c\xcd\x75\xb1\x64\xdd\xa5\xfc\xd5\xb3\x28\xc3\x07\x2b\x86\xc3\x92\x09\x8f\x9e\x71\xa9\x8d\xc5\x46\x10\x14\x91\x83\x50\x8e\x63\x60\xb3\xc6\xdd\x42\x20\x9a\x1f\xe2\xea\x94\x55\x48\x41\xb3\x9a\xd3\x42\x97\x53\x06\x83\x10\xe3\x70\x8e\xab\x39\xd4\x1b\xa8\x0f\x7d\x0c\x33\x4c\x90\xc5\x9d\x9b\x52\x21\xfd\x08\xc4\x05\xda\x6d\x25\x80\x84\x06\x00\xad\x27\xdb\xad\x4c\xf2\x0b\x4d\x0a\x3f\x10\x9c\x73\x46\x29\x56\x29\x4e\x34\xc6\x4a\x0b\xc8\x6a\x8c\xe3\x2d\xa5\xaf\x41\xeb\xc1\x12\xa1\x9a\xc6\xcf\xd9\x9f\x36\x07\x29\x0c\xa3\x80\xaa\x3a\xea\x8d\xcf\x8b\x24\x8c\x86\xad\x38\x91\x34\x3c\xb6\xf9\x09\x7a\x99\x8b\x09\x71\xe0\x0c\x61\xd1\xf1\x0b\x9c\x98\x1a\xcb\x4d\xc6\x71\x74\x0f\xa2\xc1\xbe\xaa\x39\x0c\x58\x23\x57\x80\x8d\xce\xf1\xb2\xf2\x0e\x8a\xa8\x40\x68\x16\x4c\xbc\x94\x6d\xe5\x42\x13\xbf\x9d\x3e\x81\xfc\xce\x4d\x0d\xf7\x43\xb0\x16\xd8\x5d\x96\x97\x37\xef\x2d\xe9\xcf\x02\xa2\x40\x00\x11\x85\xf9\xa7\x4c\x37\x8c\x6f\x95\xf4\x50\xd0\xaf\x16\x7a\xdf\x54\xed\x1f\x08\x16\x00\xe9\xe2\xd5\x3e\x2e\x54\x67\x89\xb6\x42\xac\x18\x2d\x34\x42\xde\x2b\x6e\xa8\x78\x34\xf0\xec\x80\x85\x44\xde\xf9\x44\xaa\xf1\xe4\x7c\x9b\x7b\xc1\x8e\xd0\xa8\x03\xff\x0a\x6a\x7c\xf7\x4d\x3d\xc6\x2f\x6d\x7e\x71\x07\x3e\x29\x9b\x03\x99\x81\x66\x0d\xde\x13\x6b\x2c\x02\xaa\x27\xbd\xd3\xf4\x7c\x34\x08\x4e\x4f\x8d\x7e\xd8\xd3\xc6\x54\x6a\x39\xb3\x94\xd7\x6a\xbb\x12\x25\x9a\x9d\xba\x38\x06\xc3\x6a\x07\xe8\x4b\x81\xd6\x85\x92\xbb\xa5\x0a\x6d\xf0\x26\x1f\x2c\x96\xe4\xc9\x77\xfb\x2e\x77\x72\xcb\xf4\xd6\x36\xd0\x2d\x95\x3b\xc6\xce\x82\xef\x45\x98\xdd\x4c\xf2\xc8\x8d\x52\x9e\xc0\x0a\xf0\x69\xaa\x68\x76\x18\xe3\x93\x3e\x39\x30\xf4\xed\x6f\x58\xe1\xb4\x88\x14\xa8\x31\x7c\xdd\x9a\x6e\xa1\xab\xa0\xa1\xe4\x9f\x12\x30\x27\x6f\x57\x36\x1a\x35\xde\x9b\xaf\x71\x16\x67\xad\x78\x40\x20\x53\x72\x3e\x1b\x1a\x80\x61\xca\x20\x5a\x3b\x00\x76\xde\xb8\x3b\x37\x80\xd2\x2a\x35\x8f\xa4\x53\x1a\x00\xd2\x23\xfc\xfa\x9c\x1f\xb6\x22\x65\x6b\xfd\x59\x16\x38\xbd\x73\x5d\xe2\x38\xc0\x0b\x5e\xb2\x6c\x51\x80\x1c\x10\x48\xbc\x7a\x91\x83\x4e\x6d\x56\x18\x3b\x32\x3d\x88\x75\xa7\xe9\x41\x8b\x13\x85\xf1\x32\x6a\x31\x13\x4f\xb1\x53\xeb\x8c\x09\x8c\x05\x49\x54\xb2\x70\x0b\x0e\x6b\xf2\x30\xa3\x4a\x98\x73\xf7\x84\xc7\x5e\x05\xdb\x57\x98\xb5\xff\xa6\x2d\x46\xd8\x1a\xd2\xc3\x85\x23\xf8\xba\x2b\xaf\xc8\x85\x0e\x4d\xcf\xe6\xf4\x55\x22\x2d\xb8\x59\x69\x31\x12\x29\x08\x02\x18\xa1\xb0\xc2\xe9\x13\x31\xb5\xd4\x0b\xda\x57\x04\x8e\x13\x43\xa4\x87\xdd\x18\xe6\xa6\xb4\x65\xae\xf6\x2f\x79\xfa\x60\x72\x68\xdb\x72\xc3\xdb\x5d\x68\xf4\x80\x8f\x76\x73\xa1\xad\xa5\xba\x70\x20\xe5\x5e\xee\xe5\x72\xd8\xf0\x42\x26\xdd\x91\xcc\x80\x0a\xad\x7a\xfc\xb4\x2b\xb2\x92\x29\xa8\xed\xd6\xd8\x01\x02\x32\xf5\xed\xe7\xa7\x8d\xaa\x13\x1a\x23\xe3\xdc\xa2\x58\x7b\xb0\x1e\xbf\xe8\xf4\xdc\x1c\x54\xfa\x04\xc4\xa2\x5b\xac\xaa\x4d\xeb\x81\xcc\xad\x4f\x01\xac\x24\x6b\xa9\x89\x6d\xe8\xa0\x38\x0d\xb6\xc0\x13\x24\x42\x8c\x2c\xca\x2b\x82\x29\x4c\xc3\xd3\xa7\x54\x33\x63\x6c\xe1\xea\xb4\x15\x24\x61\xb3\xb7\x09\xc4\xce\x81\x8d\xb9\x57\xad\x5d\x76\x6c\xf5\xa3\x3c\x99\x44\x3c\xb3\xc2\x3f\xc3\xc1\x6e\x26\x50\xcf\x63\xf3\x60\x12\xaf\x6f\x31\x64\x4c\x1a\xd9\xe0\x6f\xf9\x5a\xff\xe7\xd8\xfe\x31\x7e\x59\x3a\x8a\xec\x7c\x63\x69\xeb\x9b\x12\xdf\x1e\xe1\x16\xda\xb7\x28\xb6\xbf\x46\xfb\x5d\x28\x49\x1c\x93\x83\xec\xc9\xc5\x39\x32\xef\x22\xb5\x5d\x0b\xf5\x3f\x3d\xfd\xad\xc5\x09\x3a\xf7\xe3\x64\x16\x27\xca\x92\x42\x07\x8c\x11\xf9\xb0\xc6\x98\x67\x27\xc5\x87\xc6\xb0\x81\x68\xa9\xc4\xb3\xcd\x00\x58\x9d\x68\xd6\x00\x91\xff\xb5\xa4\x03\xe5\xf0\x08\xe6\xb5\x3e\x3c\xaa\x58\x43\xc8\xa5\x06\x2a\xfc\x12\x6d\x78\x60\xe8\x5e\x5a\xd2\x4d\x55\x96\x6c\x03\xf2\xed\xb0\x2a\xd7\x2f\x5b\xb0\x7e\xe5\x41\x54\x15\xa2\x3c\x41\x84\xf4\xa3\x03\x56\x3b\x72\xa2\x2e\x67\xbb\x0f\x2e\x95\xbb\xd8\x77\x5a\xa7\x47\x12\x4a\x9d\x91\xcc\x6f\x0b\xe1\xfd\x50\xef\x40\x63\x2e\x6f\xfd\x71\x7f\x13\x14\xc9\x27\xe5\xd6\x79\x1b\x62\xd9\xd4\xec\xe6\x4e\x62\xc4\x5e\x11\x4a\xab\x93\xba\x07\x6c\x36\x28\x81\x80\x7f\xf7\x70\xa2\x69\xac\x9b\x06\xe6\x97\x56\x74\x99\x80\xa8\x8f\x40\xcb\xe2\xb7\x76\xda\x16\xd9\x80\xd6\xe7\xc3\xbf\x18\x3e\x38\xb3\xb2\xa6\xb8\x2c\x42\x1f\xb2\x9b\x1f\xc0\x58\xcf\xaa\xc9\x92\xac\x0d\xc4\xce\xac\x5c\xec\x73\x85\xe4\x3a\x35\x9f\xa5\x3d\xed\xf8\x81\x60\x5a\x8e\xad\x36\xde\x89\x92\x0f\xe7\x95\xf2\x1e\x6a\x5e\x72\xa1\xb1\xb2\x30\x41\x0b\x18\xbf\x65\x81\x09\xa2\x18\x2d\xc7\x9a\xbe\x79\xe8\xce\x1c\x6b\x7e\x4b\x8c\xd0\xad\x20\x31\x90\x47\x20\xf2\xaf\x6d\x55\x18\x94\x3a\x0e\xa3\x4a\xeb\xe1\x0d\x20\x3d\x7e\x55\x85\x8f\xc9\x58\x8a\xcd\xcf\xa1\x45\xa7\x4f\x4b\x99\x14\x9f\x13\x75\xe5\x76\xbe\xb9\x40\xd4\xdf\xd1\xd6\x25\x9e\x50\xe7\x9f\x67\xa7\x59\x52\x93\x89\x58\xc6\xa8\xf2\xe6\xcd\x4d\x95\xb1\xfc\xd1\x93\xbe\xa9\xef\xc2\xd7\x82\x24\xcc\xe0\x97\xc1\x34\x42\x16\x27\x21\xf1\x5e\x58\xf5\xeb\x4f\x28\x67\x75\xdf\x7a\x39\xab\x71\xf3\xe9\x33\xdb\x48\x08\x37\x76\xdb\x8c\x13\x2e\x59\x14\x9a\x38\xe3\xb8\x6e\x41\x8a\xf1\x73\xe6\xda\xff\x57\x9f\x6c\x31\x99\xfb\xc1\xdc\x71\xaa\x75\xac\x85\x90\xa0\xac\xe9\x58\x8b\x6b\x3a\x2e\xcb\x4b\x88\xbf\xbd\x10\x1e\xd1\x99\x63\x6c\xb0\x22\x98\xf6\x8a\x0b\x21\xd6\x7a\x96\x39\x96\xab\x09\xed\x29\x56\x2c\x89\xf4\x8f\x0a\x5d\x28\x1d\x97\x95\x3e\xbf\xb4\x71\x33\x4d\x07\xb0\xce\x62\x30\x49\xf2\x04\xc4\xe8\xe3\xed\xea\x8b\x15\xa8\xa4\xe2\x44\x53\xcd\x46\xb3\x79\x17\xc5\xf3\x0a\xbe\xe7\x94\xe7\x2c\x42\x29\xf0\x62\x1f\x75\xaa\x69\xbc\xcc\xd8\x6b\xc8\x16\x98\x84\xc8\x65\xa4\xfc\xc6\xc6\x2a\x4e\xab\x5b\xb5\x93\x3b\x41\xb7\x76\x5c\xc1\xb0\x42\x7b\xa1\x85\x49\xf2\xc2\x14\x5b\xfb\xa3\xdb\x27\xe5\x28\x7f\x07\x2a\x93\x28\xb8\x47\x06\xe7\x3a\xce\x59\xed\x15\xc1\xe2\x52\xc6\x64\x8c\x27\x11\x5c\xee\x0c\x26\x00\xf0\xa3\x56\x0e\xae\xbc\x8f\x87\xd8\x4e\xf0\xdd\xd2\x0f\x92\x90\x1d\x3c\xad\xb4\xf6\xf0\x6f\xdc\xcb\x9a\x8e\xbe\x09\xbb\x47\xe6\x42\xb8\xc5\x3b\x47\xba\xae\xc4\xc9\xb1\x0c\xa2\x11\xf5\xf8\x15\x57\x0f\x6b\xee\x7d\x76\x81\xf0\xce\xab\xda\xec\x68\x96\x04\x36\x64\xf0\x92\xa9\x44\xda\x40\x2b\xae\x8f\x8b\x3d\x10\xda\x17\x7a\xae\x9f\x02\x5a\xfd\x51\x00\x3e\x08\x28\x85\xa5\x75\x34\xa0\x5d\x6d\xa9\x93\x69\x98\x8c\xc6\x78\x99\x19\xff\xce\x42\xcf\x06\x41\x14\xd5\xb3\x20\xcd\x35\xe6\x2a\x5d\x3c\xfd\xfe\xf4\xdf\x0f\x8a\x88\x04\x7a\x15\xf0\xb0\x1d\x3c\xf1\x82\x12\x22\x62\xef\x9d\x21\x29\xc3\xa5\x7f\x2b\x36\xf9\x13\xc4\xb2\xa2\x4d\x07\x3c\x9a\xec\x85\x20\xa0\xdf\x02\x02\x74\xa1\xe2\xc7\xaa\xaf\xab\xa4\x2f\x1d\x21\x66\xb0\xd9\xab\x70\x1b\x7c\x1d\x77\x15\x84\xcf\x05\xb3\x85\xc3\x8a\x66\x07\x5a\xce\x1e\x94\xb6\x2f\xff\x76\x1a\xc1\x32\x29\x9f\xfe\xc5\x26\x64\x28\xe2\x6d\xfe\xee\xf9\xb7\xee\x37\x47\xde\x73\xbd\x49\xdd\x83\x6d\xda\x9c\x54\x27\xc4\x4d\x10\x5e\x5e\xe1\x38\x9a\x3e\x52\xfd\x3a\x55\x74\xfa\xa4\x16\x27\x6c\x11\xe4\x4e\x0a\xd6\x56\xe9\x9f\x63\x4b\xcd\xae\xde\x7f\xd2\xb9\x82\x7a\x85\x7b\xee\x92\x59\xce\x1c\xb2\x2b\xa9\x24\xbc\x1a\x6d\xc1\xcb\xf2\x19\xd3\x3f\x27\xd7\xd8\x34\x23\xb8\xae\x6c\x73\x3a\x51\x5a\xf6\x3e\xa7\xa2\xb3\xa3\xfa\xe8\x50\xab\x11\xc6\xd5\x21\x73\x8c\xcb\x8d\x62\xb7\xdb\x17\x71\x35\x0a\x45\x44\x01\x5e\xf2\xeb\xb9\x2c\xaa\x0b\x15\x17\x44\x58\x52\xc8\xcf\x84\xd8\xec\x91\x06\xab\xcb\x1b\x2b\xe2\x8f\x02\x2b\xb8\x61\x5d\xba\x3a\xaf\xaf\xb9\xef\x1c\xa2\x0f\x1a\x03\xcd\xc6\x71\xfb\x59\xdd\xd4\x04\x94\xd9\xd4\x92\xef\xa6\xb4\x3f\x9f\x41\x60\x1f\x74\xa3\x9a\x33\x3f\x58\xae\x26\xfe\x44\x9a\x40\x78\x08\x91\xaf\xc9\x27\x51\x88\xb4\xfd\xcd\x72\xcd\xd4\x32\x3f\x25\x48\xa1\x7b\xf5\x50\x4b\x28\x49\x0f\xe6\xe4\x39\xa6\x94\xef\x5a\xf7\xbc\x80\xd7\x1f\x6c\x03\x84\x52\x3d\x0b\xa2\x54\xcf\x24\xc3\x4f\xa1\xed\xa6\xae\x2d\x41\x34\x30\x54\x1f\xf4\x3a\xc6\x1d\x86\x54\x25\x21\x77\xa8\xfa\xc3\x9e\x89\x4f\xbd\xbc\x7a\x6b\x01\x15\xb2\x05\xc7\xef\x56\x74\xbc\xa1\x56\x46\x0b\xd1\x03\x19\x96\x34\xac\x65\x75\x9a\x47\xe5\x67\xd7\x82\x28\x82\x95\x0f\x1b\x5e\x0e\x6a\x02\x17\xf1\x09\x12\x59\xd3\x6d\xa5\x91\xa3\x91\xee\x77\x60\x80\xcc\x33\x76\x95\x30\x3d\x58\x7c\xc4\x1d\x60\x09\x10\x3d\x40\x28\xd4\xb2\x78\x1d\x57\x2d\xd3\x7c\x2a\xe2\x0d\x54\x44\x3f\x36\x86\x7f\x82\x6e\xc7\xda\x0c\x32\x38\x68\x5a\x92\x87\xa8\x43\x17\x73\xbd\xde\x43\x06\xbb\x1e\x01\x00\x96\xd4\x98\x14\x8e\x54\x63\xe5\x4f\xd0\x6c\x87\x2b\x4c\xb4\x2c\x75\xb1\x6f\x9d\x35\x13\x44\x13\x82\x9d\xfe\x73\x4a\xc0\xbb\x85\xcc\x89\xa4\x91\xfb\x90\x4a\x8f\x34\xab\xd9\x08\x47\x8b\x8f\xcd\x7d\xdb\xc6\x7b\x31\x80\xed\x06\xa6\xe5\x74\x5e\xff\xc4\x8f\x7e\x98\x06\x37\xb9\xc2\x7c\xfc\x37\x87\xec\x50\x63\xe0\xce\xff\xc6\xfc\x61\xeb\xbb\xd2\x9a\xce\xbc\x95\xd2\xad\xac\x09\x21\x8a\x4a\x0f\x9f\xe3\x42\x27\xf9\xd4\x54\x4c\x3a\x2d\x83\x38\x4a\x30\x67\x99\xbb\x6c\x0f\x40\xe3\xad\x8f\x61\xd0\xe7\xd1\x84\x3a\xe5\xb1\x52\xd7\x50\xa1\x07\xc9\xf1\xc9\xff\xbc\x67\x79\xed\xb4\x9a\x4f\x0a\x17\x53\xd5\x8d\x6c\x04\x50\x9d\xcd\x6e\xb7\x9c\xc0\x78\xce\x3a\xf8\x9b\xad\x45\x32\xc5\x74\xb7\x6b\x4f\xef\xbf\xe7\xd7\xa4\xe1\xaf\x87\x7c\xa2\xdc\xda\xb8\xe6\x53\x5d\xae\x28\xd8\xf0\xfc\x89\xbf\xcd\x88\x76\xdb\x39\x15\x9f\xc4\xea\x5f\x5e\xd8\x95\x1d\xe1\x1d\xbb\x83\x68\x2b\xe2\x93\x12\x94\x9e\x0d\x06\x40\x67\x3a\x06\xf1\x83\x69\xb0\x82\x24\xfe\x7e\x50\x09\xc3\x43\x83\x65\x15\x9c\xaa\x3e\x0a\x34\x2e\x34\xbb\x15\xad\x3b\x50\xb0\xfb\x76\xdd\xa5\xf1\x70\x4b\x56\x2e\x19\xc2\x9b\xc7\x18\x10\xc2\xcb\xec\x24\x3b\x07\x67\x13\x46\x5d\x2c\x7c\x7d\x2d\x04\xe3\x01\xb7\x5f\xd4\x5a\x56\x9c\xde\x6b\x07\xea\x2e\x19\x14\x29\xb7\x95\xd8\xaf\x50\xb3\xed\xce\xf1\x37\x9e\x49\x3a\x22\xde\x86\x90\xa3\x6a\xd7\x8e\x2d\x31\x55\x8f\xcd\x86\xe6\xf0\x77\x64\x2d\x16\x4a\xd6\xed\x52\xa5\xf1\xd9\xaf\xda\x58\x28\x89\xcf\x9a\x2c\xd7\xac\x20\x42\x47\xb8\x6b\xe5\x9b\x54\xe7\x68\x2f\x13\x45\x4b\x7f\x3e\x58\xcd\x40\x1a\x5e\x6c\xf0\x1b\xed\x57\x81\x90\x44\x13\x81\x71\x8a\x47\x05\xac\x3e\x5c\x27\xbb\xfd\xde\x43\x2d\x31\x33\xbc\xf7\xe5\x59\x7d\xf7\x2b\xcc\x08\xdd\x9b\x0b\x0b\x95\xc3\x5f\xa3\xaa\xb4\xde\xdf\x93\x43\x55\xf6\x8c\x08\x87\x5d\x80\xf0\xdb\xbd\xee\x37\x03\x55\xed\x60\xb7\xcb\xb2\xc2\x44\xe2\xf4\x1f\x94\xde\xa2\xbe\x94\x21\x93\x89\xe2\x38\xa7\x04\x37\x80\x50\xa2\x7b\xcc\x63\xe2\xd7\x8d\xfe\x58\xdb\x47\xad\x0d\x65\x1f\x87\xea\x17\x8e\x31\x3c\xb2\xca\x39\x10\x40\xaf\x0b\x25\x11\x01\x5e\x20\xa2\x52\xd6\xf4\x21\x85\x86\xfe\x74\xd4\x78\xc0\x8d\x98\x6b\xca\xad\xa9\xa0\x2d\xab\x01\x63\x60\x8a\x9b\x57\x4d\x73\xfb\xcb\x41\xc0\xa5\x4f\xc2\xa8\xb8\xea\x90\x82\xc7\x6a\x47\x34\xa9\x2d\x2d\x5c\x10\x64\x57\x45\xb4\x60\x3b\xa5\x68\x60\x6f\x86\x91\x19\x56\x71\x84\x9f\xcf\xbe\x67\xdc\x2f\x2f\x55\x5c\x7e\x6a\x58\x49\xf4\x17\x07\xd0\xe2\x91\xa0\x25\x78\xca\xce\x22\xf1\x6a\x0b\x58\x29\x35\x98\xb2\xf2\x6c\x4d\x23\x44\x11\x1b\x0c\x2c\xaf\x26\x5f\x63\x1d\xdb\x2d\x9c\x8b\x93\x04\x90\x19\xf7\xae\x70\xc7\xaf\x42\x66\xe3\x37\x33\xb1\xc8\x22\x68\x75\xfa\x3f\xc3\xb3\xbf\x02\xe0\xe2\x1e\x60\xeb\xf6\x77\x6f\xc6\xf4\xed\xec\x9a\x6f\x53\x4e\x78\xc6\x84\xc9\xa2\xcf\xed\x69\x0d\x44\x3f\x43\x8b\xee\x02\x0c\x75\x81\xe9\xa5\x5c\x4c\x3e\x1d\xf0\x8c\x3a\x24\x5e\xd9\x51\xb5\x59\x55\x3e\x17\xb0\xa9\x56\xc6\xf8\x6b\xa7\x4f\x28\x73\xf7\x4f\x8a\x48\xb9\x83\x01\xe6\x22\x44\x47\xa8\xe6\x5a\x2f\x6e\xda\x12\xf9\xcb\xf9\x23\xf5\x9e\x50\x13\x6a\x0e\x0b\xbb\xd7\xb8\x38\xa0\xb3\xf4\x1d\x36\x0f\x9e\x39\x85\xf0\x05\xc6\xc3\x51\x1a\x79\xfb\x44\xc4\x4f\x6f\xdc\xf1\x6d\xc4\x4e\x0e\xc9\x0c\xc2\x44\x2f\x6a\xe5\x1c\xfb\x3a\x10\xb2\xb8\x1e\x96\xca\xf5\xc9\x95\x25\x19\xcf\xab\xcb\xbb\x85\xa8\x6d\x3a\x2c\x15\xd9\x35\xc9\x17\xfd\xf4\x51\x94\xf4\x20\xba\x94\xe0\xd2\xfb\x80\xe2\xe7\xf8\x65\x7b\x74\xaa\xd8\xce\x9b\xf2\x50\x44\xa2\xf8\x6f\xe6\xa5\xbc\x03\x61\x70\xe9\xa4\x12\x49\x45\xd3\x43\xd9\xbe\xc0\x6e\x66\x36\xeb\xa1\x01\x15\x92\x77\xdb\xc2\xfc\xb3\x3c\x88\x5e\xc5\x88\xe4\x99\x98\x1f\x90\x8b\x16\xc9\xe2\x44\xdb\xfb\xd4\xea\xd8\x1f\x64\x54\x99\xac\x60\xc9\x0e\x29\x47\x0f\xac\xa4\x6b\x71\x5c\xd6\x58\x8c\x01\x4a\x93\x3e\x9c\x3a\x25\xf0\xb2\x4e\xf7\xd7\x40\x44\x45\x03\x05\x65\xfc\x92\x76\x37\xa6\x3d\xcc\x8c\x69\xb6\xff\xe2\xe3\x1a\xba\x66\xfe\xc4\x21\xfe\x40\xc3\x5b\xd6\x75\x5e\x69\x2b\xd6\xd8\x6a\xc2\x7f\x2f\x9d\xd7\x86\x9f\x63\x9f\x2f\x9a\xa0\xdf\x70\x3f\xb5\x92\x56\x8c\xbe\xbd\xc0\x9a\xbe\x17\xce\xaf\x5e\x95\x98\x08\x74\x51\x06\x9b\x44\x88\x3c\x6a\x2a\x31\x4f\x37\xca\x3a\x9f\x67\x6a\x3d\xd2\x06\x6c\x6f\xd0\xe4\xd7\xaa\x3f\xac\x32\xd3\xdd\x82\x9d\x20\xf0\x04\x7f\x89\x5b\xae\xde\xb5\xbc\x8c\x2c\x36\x09\xcd\x2e\x06\x59\x48\x11\x76\x28\xa3\x31\x4a\xe5\x6d\xc1\xa1\x20\x54\x9c\xa7\xf1\xa1\x00\x0e\xde\x19\x97\x55\xc5\x71\xeb\xc8\x90\xc7\x4b\xc3\x7d\x8c\x2e\x46\xcc\x39\xfb\x14\xe4\xb5\x20\x74\x6e\xe5\x06\x05\x2e\x15\x1a\x3d\x13\xd8\x5a\x7e\x58\xb8\x68\x48\x82\xe2\x8a\xe4\xdf\xdf\x1c\xe0\x14\x29\xde\x45\xb2\xd0\x0d\x75\xdf\xfa\xa6\x72\x33\x12\x28\x3f\xb8\x88\xd8\x25\x2a\x31\x1f\xea\x16\xb1\x73\x76\xc9\x74\xdc\xfa\xd2\x30\x4a\xef\x20\xb6\xbb\x50\x9e\x42\x24\x29\x36\x36\x6c\x55\xdb\xed\x94\x9a\xff\xf4\x81\x2e\xa7\xe0\x24\x8b\x38\x5b\xaf\x16\x5b\x13\x13\x28\x51\x6f\x81\xc0\x39\xd7\xad\x26\xe7\x6b\xed\xf7\xd2\xe5\x15\x25\x86\x4d\x2f\xe4\x31\x7e\x41\x7a\x02\x2c\x18\x69\x71\xb1\xdc\x75\x2b\xab\x5c\x7d\xb5\x64\x81\x08\x30\x59\xa2\x07\x40\x8e\x78\xec\x70\x2e\x16\x66\xe5\x5b\xf4\x49\xb0\x99\xf3\x2a\xd4\x39\x74\x0b\xce\x5a\xf0\x1e\x5c\x05\x77\xce\xb2\x7e\xdd\x63\x40\x1f\x3c\xc0\xa5\xae\x2b\xb1\x4b\x34\x40\x72\x69\x8c\xa2\xd2\xdd\xe1\xa0\xbc\xd1\x6a\x79\xe7\x92\xc0\xa2\x19\xbc\xa2\xce\xc7\x02\xee\x94\xa0\xb4\xc2\x09\xac\xff\xe3\xc2\x7f\xef\x29\xc9\x30\x52\x81\x07\x70\xfc\xac\x1e\x74\x02\x34\x23\xf0\x94\x54\x26\x1d\x29\xa4\x34\xa5\xde\x3d\xdb\x05\x5f\x7a\xa2\x6a\xce\x6b\x57\xe4\xf4\xdd\x37\x8d\xe4\x4c\x96\x22\xb0\xa9\x7b\xd0\x5f\xc1\x41\x82\x9d\xa4\xe1\xf0\xc7\x99\xbb\x32\x77\xce\x34\xaf\x6a\xbb\xb6\xc7\xdd\x72\xb0\x5e\xb9\xd0\xa6\xc2\xa5\x82\xdb\x53\x07\x93\x78\x67\x89\x04\xb8\xc2\x29\xd2\xbd\x1a\x2d\x6e\x05\x00\x7a\xbd\xf3\xdb\xe0\x15\xdd\x70\x20\x43\x40\xb7\xdb\xf1\x1d\xc9\xba\x67\xa6\xcb\x03\x60\x41\x5f\x68\xee\x28\x41\x8a\x4b\x59\xa0\x32\xd0\x06\x84\xd7\xea\xb0\x87\xc4\x29\x09\x65\x1b\xf4\x92\xfe\x99\x74\xb6\xbd\xc7\x26\xbf\x22\xc1\xe9\x56\x9b\x28\xbc\x85\xa2\x92\x7c\x4b\xe9\x72\xf7\x7f\xac\x67\x77\x69\xc6\x0c\x46\x1b\x92\xc7\x0b\xf0\xd5\x91\x2f\xc4\xba\x72\xc7\x01\x89\x62\x2f\x44\xc9\x5b\xd8\x46\x72\x17\xe5\xe7\x0b\x39\x4a\xc9\x99\xe4\xe8\xae\xea\x70\x3f\xf6\x26\x2c\xb6\xd1\xc7\xde\xaf\x29\x21\xd6\x27\x67\xce\x7f\x8c\x3d\x88\xfc\xbb\xc2\xe5\xb2\xa6\x3d\x62\x74\xc9\x2b\xcd\x3f\x6b\xfa\x3e\xe3\xa5\x76\xfe\x27\x79\x28\x65\xf0\x1e\x44\x99\xd7\x71\xba\xcf\x91\x05\xf0\x9f\xce\x55\xaa\xdd\x50\x23\xa0\x53\x95\xac\x5a\x68\x98\x3e\x80\xc1\x72\x4e\x1c\xfd\x70\x40\x69\x2b\x3f\x37\x02\xcd\xec\x3c\x98\x26\x26\x07\x2e\xbc\xb0\x96\x95\x4a\x93\x3c\x78\xe4\x85\x78\xaf\x5b\xbe\xc2\xbe\x61\xc5\xf5\x33\xd2\xa8\xb0\x89\x7e\x8a\xbb\x5f\xbc\xc5\x16\x30\x59\x58\x74\x47\x8f\x0f\x4d\x68\x19\xa7\xd7\xaf\xe1\x4c\xd6\x81\x97\x5b\xfa\xe2\x7e\xdd\x5a\xda\x4b\x2d\xaa\x2b\x0c\x38\x7c\x24\x6b\x3a\x6e\xab\xc8\xae\xf8\x4f\xa9\xbb\x56\x98\xdc\x73\x47\x00\xb0\x6a\xe1\xec\xf0\x95\xce\x8d\x55\xfc\xac\x46\x7b\xf6\x8d\x66\xc4\xb2\xea\x42\xcb\x6b\x53\x6f\x1d\x00\x0a\xfb\xf7\xf8\xc1\x50\xfa\x5b\xf0\xcd\xb8\x49\x08\x81\xe7\x17\x2f\x5d\x5a\xd1\x56\xab\xa0\xec\x50\x78\xda\xb8\x80\xb4\x08\x84\xe4\x1d\x48\xf0\x33\xa2\x8c\x50\xe5\x7e\x91\x5d\xba\xef\x08\x39\xba\x01\x10\x7e\x72\x49\xe5\xea\x39\x11\x9d\x99\x30\xbb\xcd\x98\x61\x54\xd7\x0b\x7a\x86\x89\x84\x71\x10\xfd\xe5\xce\x68\xeb\xf5\x55\x4f\xda\x3d\xfb\x24\x60\xe5\x36\x68\x97\xaf\x9b\xa5\x8d\x9e\x6d\xff\x90\x07\x84\xb9\x17\x96\x9d\x9b\x2e\x07\xcf\x44\x20\xef\x2d\xb5\xef\xfe\x57\x51\x8e\x73\x53\xbc\x63\xd2\x7b\x02\x56\x50\xc6\xe6\xcf\x6f\xa1\x74\xbe\x30\xa8\xb6\xe1\x2b\xea\x4c\xad\x85\x92\x34\x1c\x85\xc1\xa8\x64\x17\xf5\x6f\x27\xa3\xd2\xc6\x63\x70\x64\xe1\x39\x31\xdf\xee\x36\xc6\x73\x9c\xfa\xa1\xd9\x90\x2d\x08\x62\xfd\xfd\x1a\xa8\xb7\xa1\x61\xb0\x85\x8e\x39\x05\x29\x29\xdc\x00\xce\xa5\x69\x1d\x78\x45\x4f\xa3\xe6\x34\x63\x49\x2e\x38\xfb\x1a\x1e\xca\x8c\x57\x89\xc1\xa7\xc5\x78\x23\x5f\x11\x58\x63\xd9\xe9\x79\x1f\x02\xd0\xfc\xcb\x72\xb7\xc9\x52\x28\x35\x35\x57\x94\xa1\x1f\x00\xb2\x3d\xc4\x7d\xac\xdf\x9c\xa8\x0b\xd1\xd6\x86\xd5\x0e\x9e\x65\xcd\x4b\x64\x6a\x6e\x11\x98\xdc\xe4\x72\x06\x23\x14\xc3\x5d\xc9\x97\x53\xa6\xf2\x0e\x9a\x00\x1f\x71\xb8\xb6\x7d\x08\x35\x2c\xf5\xcc\xf8\xa0\xb7\x42\x44\x9c\x01\x08\x4a\x1d\x80\x4a\x03\x7e\x74\xf6\xa8\x6e\x2b\x74\xb0\x8e\xdd\xeb\x43\x5f\x5e\x07\x16\x13\x77\xcb\x59\xca\xb5\x60\x2d\x93\xc7\xbf\x16\xb5\x5d\xeb\xb9\x77\x22\x1f\x3d\xac\x57\x05\x89\x3e\xa9\x4b\x75\x07\xf6\xc3\x7f\x62\x0e\x4e\xae\x12\x74\x2c\x3a\xa1\x44\xcc\xd1\xdd\xbe\x8a\xdb\x7a\xef\x80\x0b\x7c\x2a\x9e\x1f\xb3\xc6\x46\x6c\x73\x5a\xb5\xcb\xd5\xe0\x74\x31\xd8\x9c\xa5\xc8\x6f\xe0\x6e\x8f\x06\x80\x75\xe8\x76\x1b\xd7\xa6\x8a\x06\x1e\xcf\x1f\xb0\x34\x5c\x56\xe0\xda\xc8\x39\x86\x40\xaf\x76\x04\x06\x02\x47\x89\x7f\xfa\x56\xbc\x49\x63\x97\xbd\xfd\x2c\xdc\x22\x8a\x60\x20\x6b\x3e\xaa\x79\xd9\x0f\xc7\x80\x26\x82\xf9\x9a\x8a\x3a\xa8\xa3\xbb\xee\x41\x34\xa5\xef\x4e\xd1\x4f\xac\x0d\x04\x18\x26\xe3\xd2\x39\x97\xd8\x5d\x40\x50\x55\x76\x18\x99\x6f\x00\x92\x6d\xf7\xe4\xa2\x58\x06\x75\x1f\xec\x31\x39\xd9\x02\xcd\xdd\x94\x78\x3e\x46\xf4\x47\x43\x82\x7d\x53\x5f\x19\xcb\x4d\xfe\xe9\xb1\xd4\x10\x39\xb6\x92\x1e\x2a\x63\x3f\xc8\x11\x70\x2c\x27\x0a\x04\x63\xd7\x82\xdf\x8a\x3f\xcf\x1b\xb9\x7e\x73\x5d\x38\xb0\x9b\x7e\xa4\x3d\xb2\xf7\x52\x97\x06\x05\xb5\x6a\xa7\x95\xa7\x80\x80\xc8\xe7\xd5\x2f\x56\x9f\xde\xb8\xac\xfb\xb6\x8d\xd8\x2e\x5f\xd0\x75\x30\xff\xc7\xba\xe1\x16\x7c\xce\x29\xe9\x50\x4d\xfd\xe2\x4b\x74\x95\x76\x9e\x55\xe4\xaa\x69\xa4\x04\x2b\x0b\x45\xdf\xdf\x8f\xb7\x97\x60\x47\xc7\xe4\xd3\xc3\x04\x34\xa3\x6c\x8e\xeb\x0c\x67\xbb\x78\xf5\x89\x77\x2c\x0e\x84\x66\xd1\xb1\xca\x0e\x84\xbe\xb2\x91\x30\x23\x04\x5d\xb0\xd3\xdb\x13\x91\xad\x6b\x1e\x97\xa9\xa6\x17\x8f\x93\xf6\x8a\x29\x43\x60\xf7\x88\xf0\xc2\xd2\x29\xa0\x32\x7f\x8f\xed\xdf\x3c\x5e\x74\x56\xf4\x33\xa0\x58\xae\xaf\x4f\x82\xa8\xad\x57\x03\xbd\xdf\x09\x34\x45\x8e\xe5\x8f\x51\x8d\xe9\x9a\xa4\x5a\xc2\x4f\x15\xcf\xaf\x0b\xdd\x06\xb7\x0f\x7e\xac\x83\x71\x0b\xf7\xa3\x40\xab\x7b\xb2\x6a\xc9\x6c\x41\x92\xbe\x05\x7d\xae\x05\xc9\x45\xb6\x8e\x4f\xc9\xdb\xd8\xb6\x93\x71\x50\xaf\x50\x09\x60\x9e\x41\xfe\x25\x61\x34\x74\xc9\x8e\x02\x76\x55\x13\xef\x81\x29\x8a\x5d\x55\x37\x97\x4a\x08\x98\xec\xad\x6f\x7e\xae\x1e\xf0\x35\x66\x56\x2f\x5c\x50\xeb\xbe\xe4\xdd\x5a\x58\x27\x68\x01\x45\xbd\x13\x05\x20\x77\x33\x21\x6a\x8e\x9c\xe3\x1d\x9e\x4b\xd3\x9d\xf1\xdb\x9a\x91\x34\x41\x97\x60\x06\xad\xc5\x2d\x62\x05\x42\x16\xd0\x40\xa6\x0a\x2b\xea\xa8\x17\xeb\xbc\xc3\xc6\x4b\x1b\x8f\x5a\x87\x10\x75\xb8\x89\xb6\x8c\xb6\x4a\x93\x36\x72\x28\xde\xf5\x86\x14\x26\xbc\x20\xeb\x9d\x38\xed\xc8\xc9\x3c\x30\x65\x24\x00\x4c\xae\x4d\x5e\x44\xd9\x2f\xa6\x93\xa3\xda\xf4\xe3\x9e\x12\xa7\xcb\x9e\x8c\x8b\x0f\x43\xb3\x03\x79\xf2\x4a\x8a\x31\xfa\xc5\xb9\x4a\xa8\x69\x02\x12\x6e\xaf\x31\x1d\x6d\x13\xf2\x49\x6b\x15\xb1\xbc\x19\x9a\x2e\x00\xa2\x5e\x61\x07\xdf\xbb\xda\x2f\x74\x17\x4f\xe1\x12\xb2\x9b\x47\x96\x3b\xa0\xec\x71\xd3\x1b\x71\x9d\x57\x8d\x06\xc4\x18\x86\xca\x47\xb7\x9b\xf0\x2b\x43\x2f\x94\xa4\xd9\x95\x72\x7f\x83\xe1\x85\x68\x87\xf7\x48\xbc\x5c\x3d\x4a\x9c\xdb\xfa\xcd\x3f\x84\x5e\xf8\xe3\x5f\x9d\xd9\x53\x7a\xce\x7c\xe5\x69\xed\x64\xd5\xea\xf5\xfb\x2a\x92\x30\xe6\xa6\xac\xf1\x83\x21\x21\xce\xed\xea\x02\xa5\x36\xf9\xa6\x7e\x47\x08\x98\xe6\x9a\xf2\x6e\x0e\xe4\x26\x28\x6c\x99\xfa\x9c\x16\x22\x2a\x4e\x14\x8f\x51\x11\xa1\xf3\x0b\xc4\x00\xf5\x3e\x42\x88\xae\x2a\x09\xd0\x3d\x3b\xe1\x44\xdd\x04\x2d\x20\x50\x93\x61\xf7\x26\x0a\x37\xca\x8b\xa9\x35\x2e\x81\x68\x0a\xa3\x80\x6d\x43\x28\x67\xa6\x93\x33\x7a\x70\x40\x9d\x3c\xed\x51\x2a\x4a\xaa\x29\x8b\x23\xe5\x00\x20\xcb\x3a\x7f\x2d\xf1\xd9\xb3\xf8\x11\xb2\x31\xfa\xdd\xb9\xcc\x1d\xe4\x44\xdf\x20\x49\xe6\xc7\x09\x74\x82\x02\xe5\x52\x0c\x08\xac\x72\x26\x63\x2a\x04\x71\x2f\xbb\x1e\xd5\x59\xb6\x74\x08\xba\x32\xe9\xd7\x85\x12\x58\x22\x74\x72\x88\xa6\x47\x0d\x91\xd5\xfe\x59\x28\x4c\x3b\x7e\x25\x4c\x17\x95\x8c\x3b\x96\x1b\x12\x8a\xbd\x20\xef\x02\x74\xb5\xe0\xf2\xca\xf8\x85\xe0\x5a\x99\x00\x8e\x9b\x0c\xac\xe3\xb0\x5e\xe8\x06\xb1\x6c\x08\x96\x8c\x42\xb8\x6e\xe3\x97\x16\x8a\xe9\xab\x91\x76\x09\x04\xc6\x71\x79\xc8\x33\x2d\x85\xf0\x96\x5c\x19\x46\x01\x2e\xf3\x68\x2e\x75\x9e\x5a\x4b\xdc\xe3\x2e\xbd\x01\x24\x97\x62\x73\x73\x2a\xdd\xe2\x39\x52\xe4\x9d\x5c\xd7\x2e\xa3\x3f\x46\x97\x86\xa1\x52\x06\x15\xa4\xcf\x3f\x31\xa5\xaa\x68\x3f\xc9\xe3\x1b\xbd\x64\x1f\xbb\x06\xac\x56\xa8\xb0\x3c\xb4\xb4\xca\xc8\x1a\xc9\x03\xa1\x71\xf0\xc5\xf3\x77\x52\xde\x20\x08\x82\xa8\xed\x71\xd6\x6c\x7c\xa5\xc3\x40\x7e\xac\x65\x23\xee\xfa\x85\x62\x99\x3b\xe7\xd6\x25\x0c\x87\x35\x9d\x96\x54\xdc\x12\x31\xd5\x07\xba\x44\xa4\xe2\x7c\x8c\xec\x17\xc7\xfe\x52\x8b\x50\x7f\xf5\x37\x1f\x48\xfc\x8c\xad\xea\xb9\x61\xb8\xd0\x29\xc4\xba\xae\x83\x01\x80\x14\x9a\x3d\xe0\x75\xa2\xd9\x6e\x91\x07\x6d\x79\x11\xec\x45\x3f\xc7\xf7\x03\x89\xc5\x22\x16\x2a\x24\x7b\xdf\xd2\x9b\x28\xce\x6d\x46\xd6\x3d\x92\x76\x09\x54\xf8\xd9\x2d\x3b\x79\xa2\x95\x67\xd3\x4f\xfd\xb9\x4f\x8b\x03\x2a\xc3\x4e\xa7\x00\x2e\xc2\xa2\x8d\x41\xb4\xdf\xa8\xef\x86\x72\x43\xaa\xc5\x95\x06\xc0\x8c\x25\xbf\x00\x27\x0c\x64\x22\x71\x53\x50\x80\x96\x7f\x2c\xc6\xe2\x65\xee\x00\x24\x12\xa6\x86\x88\xc3\xa8\xbc\x30\xa7\xd4\x17\x6e\xbd\xb2\xd0\xd5\xef\x17\x99\x5b\x81\x3e\xc2\x64\xc0\x69\x5d\x56\xbe\x9f\xa9\x23\x93\xd0\xa6\xf8\x0e\xc8\xba\x47\xb1\x66\x9a\x91\xf5\xee\xdf\xf6\x8d\x6f\xc3\x69\x82\xce\x22\x89\x58\xb7\xf5\xd9\xff\x75\x22\x18\xf1\x36\x1e\xd9\x3a\x15\x9d\x39\x15\xcf\x6d\x21\xca\x69\x26\x76\xc5\xa4\x20\x4b\x7e\x96\xcb\x0f\x74\x0e\x95\x3f\x3b\xfd\x52\x18\xac\x50\x37\xc0\x2d\xe7\x23\xc8\x02\xfd\x1e\x92\x09\x33\xd2\x9a\xde\x22\x0d\x83\xb3\xdb\x5d\x57\xf8\xc2\x0c\x3c\x23\x86\x25\xd9\xdd\xc1\xe5\x42\xc6\xfa\x0d\xa7\xea\xa0\xaf\xb6\x87\xec\x1e\x1c\x43\x84\xd6\x5c\x97\x2d\xad\x65\x81\xa7\xec\x2b\x4f\x79\xf9\xd3\x71\x8a\x2e\xef\x78\x26\x0c\xf5\x18\x89\x34\x1c\xae\x3b\x46\xb4\xc5\x10\x91\x3d\x1b\xbf\x72\x59\x03\x8e\xd3\x03\xc9\xb9\x26\x1c\xd8\x59\xc8\x7d\x75\xfa\x3b\xf9\xd9\x37\xbc\x10\xee\x26\xd5\x7b\x55\x5d\xd8\x36\x38\x37\x08\xbc\xe2\xd1\x46\xeb\xaa\x3b\x0f\xe1\x2b\xe3\x1b\x88\x54\x66\xee\x7c\xed\x1a\x10\x5a\x1b\x04\x2a\xc1\x26\x4c\xed\x31\x46\x34\x43\xb6\xe2\xea\x70\x51\x91\xf5\xfa\xda\x43\x2f\xd1\x17\xea\x89\xe6\xf0\xd7\xa4\x1e\xdd\x70\xfe\xa0\xb8\xfe\xa0\xa4\x74\x79\xad\x1b\x68\x3f\xab\xb1\xac\x62\x33\xff\xb7\x17\x79\x5a\xfe\x1d\x04\x61\x6e\x33\xf1\x37\x59\xe7\xb4\x12\xe6\xb1\xa5\x48\x47\x81\xe6\xef\xba\x6b\x38\x3b\xd6\xca\x76\x52\xeb\x0f\x1c\x91\xbd\x93\xb9\x1f\x34\x98\xd9\xdf\xb4\x66\x07\x52\x0b\xe6\x3b\x6d\x00\xa9\xf4\x53\xf1\x3b\xbe\xc5\x1c\xe6\x34\x3c\x2f\x94\xe4\x80\x22\xd9\x91\x54\xf6\x05\x41\x3a\x80\x4b\x53\x5f\xbd\xd3\x3f\x01\x5a\x0d\x67\xb5\xd8\xef\x5c\x7e\x02\x2b\xbb\xc2\xe3\x1b\xea\xd7\xe8\xe0\x19\xb7\xe5\xfc\xdd\xa1\x76\x10\x7c\xe2\xa5\xcd\x4d\x55\xe4\x81\x90\x38\xf0\xed\x1a\x4f\x89\xc9\x18\x0c\x4c\x94\x61\x44\xe9\xcd\x2d\x83\x5c\x6e\x1c\xa6\xe3\xb0\x6e\xc1\x0a\x97\xd7\xf4\x2a\x41\xdf\xea\xb5\x24\xb2\xbb\x35\x3d\x8d\xec\x0a\x03\x34\x28\xd4\xc4\x91\x93\xa2\xe6\xbc\xb8\x17\xb6\xc3\x03\x21\xff\x9a\xb2\xd0\x76\xdb\x82\x65\x07\xf2\x96\xfa\xf4\x73\x8b\xc8\x87\x1b\x8f\x40\x5c\xce\x5e\xb5\x16\x3b\xb5\x16\x07\xc0\x6a\x83\x69\x90\x0e\xc2\x60\x48\x02\xb8\x88\x42\xa8\x90\x02\xb8\x8a\x6e\x9d\x65\xfe\xbf\x95\x40\xe5\xfd\x5d\x10\x65\x15\xab\x6c\xc9\x1d\x5e\xab\xc4\x79\x4b\x8b\xb0\x16\x26\x58\x4a\x1e\x03\xf2\xc7\xd9\xb2\xd8\xaf\x05\xe6\x6f\x19\xd9\x37\x12\x45\xcd\xea\x88\xbc\x57\x6e\x23\x5c\x7f\x43\x12\xfe\xc5\xa4\xc9\x36\x36\xe2\xc6\xc8\xb1\xa4\xb7\x81\xd5\xac\x20\xc4\x62\xb3\xa9\x6f\xe8\x99\xc0\xe5\x7c\x39\x7d\xe2\x95\x5f\xfc\x1c\x3b\xf4\x63\xc9\x11\x96\xdf\x37\x3c\x30\xd5\x5f\x35\xeb\x13\x80\x2a\x08\x28\x2e\x3b\x4f\x3e\x29\xd4\x6b\xb9\x8f\x2c\xb1\x58\x8e\x64\xc5\xdc\x2b\xbe\x78\xae\xcc\x0e\x08\x9b\xb3\x10\x4f\x9a\x1c\x37\x41\x95\xc6\xe7\x42\x8d\x3c\xcd\x1c\x23\x43\x5d\x93\x1e\x62\xfb\xc4\x47\xa4\x5a\x40\x65\xfc\xee\x5a\xd8\xb7\x9f\xf4\x73\xfc\xe0\xef\x12\x9d\x3e\x41\xc1\x8f\x72\xe9\x10\x9f\x36\x02\xe8\x93\xbf\xff\x88\xce\x9c\x48\xcd\x80\x82\x36\x44\x57\xd2\x5c\x97\x6f\x5f\x7d\xeb\x2f\xe5\xd2\xfd\x3e\x4a\x79\x1d\xd9\x1c\xae\xa6\x2c\x38\x51\x7b\x1d\x68\xe0\x72\x97\x59\x65\x99\x00\xce\xa3\xce\xf5\x83\x82\x9f\x79\x2f\x61\xe2\x78\x07\xac\xbf\xc8\x61\x88\x05\x26\x64\x7b\xa3\x59\x21\xc1\xff\xb8\x8b\x08\x87\x25\xe6\x38\x1b\x70\x46\x7d\x2c\xe8\xf3\xf7\x9d\x7f\x6b\xca\x5b\x28\x82\x77\x03\xec\x2f\x47\x77\x0f\x05\x12\xe7\xb5\xbd\xd8\xc8\xaa\xee\xa4\xd4\xe2\x5a\x98\xdf\xd5\x06\xf8\xb1\x9b\x52\xb6\x54\x16\xcf\x30\x95\xc3\xf6\x57\x2d\xb1\xfb\x07\xc4\x4f\x5a\xc7\xd2\x43\x55\xbf\x25\x1d\x97\x30\x18\xec\x37\x1b\x96\x30\x08\x32\x00\xd6\xea\x16\xa0\xba\xc2\x62\x7a\xc1\xb6\x7f\x2f\x51\x54\x47\x47\xaf\x9f\x21\x7e\xa5\xe2\x70\x72\x82\xee\xc8\x15\x94\xf9\xf2\x56\x00\xa6\x20\xf6\x3b\xfe\xdb\xc3\x1f\x8f\xca\x8e\xa4\x86\xa7\x3f\x86\x27\x7c\xfa\xa7\xc3\x46\x11\x05\x10\x65\x31\x3d\x9b\x0c\x4b\x1a\x32\x56\x44\xe9\xbc\x8d\x1a\x5f\x8e\x3a\x81\xbf\xd4\xf1\xb8\xac\xf1\xa8\xba\x53\x02\x19\x4a\xd2\x86\xc3\x9c\xe1\xb9\xef\x96\x42\x74\x00\xe0\x0f\x15\x4d\x88\x83\xf9\x06\xba\x2d\x8c\x94\x4d\xe3\xe7\xfc\xab\x0f\xdf\x77\x22\x2f\xb4\xc9\x5f\xd5\xcd\xe8\xaf\xc5\x61\x11\x37\xaa\xdd\xfc\x66\xb4\xd1\x3b\x16\xea\x4f\x00\xc4\x6c\xa5\x73\x20\xd8\xa8\x4c\x5a\x00\x2e\xd8\x7a\x88\x5f\xd2\xef\xd7\x40\x4d\x35\x75\x08\x4a\x6b\x4a\xd5\x91\x8e\xf0\x01\x96\x99\x21\xbe\x0d\xdb\xfb\xb5\x2f\x34\x2e\x95\xee\xf0\x2a\x7a\xf1\xa1\x71\xb6\x7f\x0c\x22\xd0\x67\xbf\x87\xfc\x08\x5a\x77\x66\x87\x15\x7b\x55\xfe\x6e\xda\x71\x95\xf6\x7d\xe3\xc4\xc3\x25\x50\xe0\x89\x6a\xc2\x4f\xaf\xc7\x4f\x8f\x02\x78\x00\x4c\x29\x10\x97\x54\x7c\xd9\x91\x74\x4c\xce\x2c\x84\x97\x34\x1c\x96\x24\x1c\x9a\x38\xd6\x93\x3c\x5e\xdd\x61\xb1\xc5\xb5\x28\xbb\x98\x71\x91\x41\x60\xb0\xc2\xac\xf9\xf1\xa1\x2d\x0f\x3a\x9d\xf5\xf6\x5c\xae\x92\xad\x98\x02\xac\x6a\xa3\x83\xd5\x90\x6c\x02\x4d\x89\x42\x72\xb0\xcf\x91\x13\x7c\xc0\x45\x07\xe0\x6f\xc7\xd7\xb8\xad\xc5\xae\x7c\x69\xb3\xbf\xb9\xcc\xcc\xaf\xa6\xaa\x4a\x16\xdc\x8b\xd5\x33\xf3\x15\x5c\x47\x4f\x78\x63\xa8\xb0\xd6\x76\xdb\x82\x18\x2f\x2c\x70\x53\xe4\x8a\x7e\x1b\x98\x2a\xb2\xc7\xc6\x9a\xb3\x23\x8e\x8a\xc4\x6e\xf4\x4e\x60\xfd\x03\x38\x3a\xdd\x59\xd6\xa6\xac\x15\x4e\xe9\xe9\x52\xf0\x31\x1e\xf7\xb5\x8f\x6c\xff\xc7\x3a\xf9\x15\xc9\x7c\x40\xb3\x3d\xf9\xeb\x76\x5e\x66\x3a\xb7\xe0\x1f\xa9\x6c\xe4\x16\x4a\x32\x63\x04\xfc\xdb\xa9\x30\xda\x62\xc9\xcb\x0b\xb7\x75\xa4\x3d\xdf\x05\x41\xb2\xcd\xd4\x77\x9f\xc3\xc1\x89\xf7\x50\x0b\x4e\x28\x33\xc6\x37\xf0\xf7\x4f\xff\x0c\xc8\xf5\x3f\x06\xdf\xab\x0c\x29\xce\x6e\x71\xdc\x76\x8a\x10\x72\x14\xdb\x1b\x0a\xa3\x25\x2d\x87\x59\x75\x35\xcb\x55\x59\x40\xe2\x6e\x10\x5d\x9f\x44\x7f\xfd\x41\xf1\x9e\xb1\x65\x6e\x4f\xb2\x21\x6a\x8c\x4c\x0c\x14\xd0\x71\x93\x41\x89\x63\x40\x06\x94\x9a\x27\xaf\x44\x83\xa0\x0b\x92\x06\xfa\x8e\x22\xc9\x01\x3f\x63\x6e\x68\x42\x34\x9b\x2d\x3c\x4f\x34\xf7\x1c\xa1\xec\x93\x69\x4d\x34\x8a\xf2\xf2\xdf\xcc\x88\xce\x6a\xb0\xfd\x41\x65\x6d\xd5\xc6\x14\xfe\xd7\x82\xb8\xfb\x66\x87\x39\x91\x90\xa8\x80\x16\xfb\x78\x3e\x7c\xc4\xa5\x21\xd5\xf6\xd3\x8a\x9c\xcc\xb1\x58\xd7\xac\xcb\x9c\x8e\x4f\x7e\x8d\xea\xb8\xcc\x69\x61\xea\xd9\x1f\x9f\x0a\x0b\x35\x55\x2b\x4a\xfa\x81\xe2\xee\x5b\xfa\xa4\xc5\x89\x64\xde\x45\x16\x23\x7a\x90\x16\xd7\x48\x0b\xa7\x1f\x08\xc5\xc2\xc5\x0a\xa7\x58\x19\xfa\x75\x17\xd3\xcf\x70\xd9\xa1\xae\xd8\x6e\x2b\x95\x96\xd5\x4d\x1f\x0d\xb2\xcb\x6b\xc1\xd4\x8a\xfc\x1c\xec\x76\xe4\x74\x59\xd3\x11\xcf\x19\xfc\xd5\xd7\x72\x5c\xd6\xb4\xe9\x83\xdf\x99\xad\x64\x1e\x8e\xcb\x92\xa4\xe1\xbe\x31\x0f\xf6\x9b\x57\xbd\x55\x5c\x09\x94\x9a\x30\x69\xd3\x7a\xdc\x42\xc3\x88\x08\xf2\x8d\x47\xf8\xb0\x91\x54\x16\x7f\xb3\xa1\xac\xd5\x45\x83\x2a\xee\xee\x8c\xe1\xd2\xe5\xc9\xb3\x7f\xcc\x4c\x8f\xc1\xa5\x44\x43\x92\x8a\xcd\xee\x31\xc4\x8e\x3f\xc7\xa9\x3d\xa1\xd2\x48\x9c\x2c\x33\x7d\x74\x59\x13\xa4\xf3\x0c\x3a\x7d\x09\xee\x05\x3f\x57\xc8\xae\x57\x78\xf7\x4d\xfd\xd7\xeb\x79\xba\x08\x83\x73\xe3\xd9\xb0\x96\x61\x48\x42\xfa\xca\x90\x29\x3b\x20\xf2\xe0\x1e\x19\x27\xc2\xf1\xee\xea\x1d\x27\xd2\x71\x59\x9b\xde\xf9\xa5\x6e\x2b\x41\x74\x15\x05\x70\x11\x68\x6e\xbf\x30\xb0\xa5\xf5\xa6\x08\x30\xb2\xec\x62\x14\x60\xe1\xd2\x47\x71\xeb\x31\x42\xa9\x2c\x48\xbc\xf2\x6b\xb6\x01\xa1\xc3\x01\x2f\xf9\xb6\x89\xa3\xdf\xb7\x13\x25\xc1\x3e\xb5\xf6\x92\xe6\xe4\x15\x00\x58\xbe\xd5\xe7\x8f\xed\x8b\x61\x43\x9c\x76\x45\x4a\x95\x91\xa9\x4b\x92\xd2\xc6\xa1\xc8\x2a\x7d\x5d\x1e\x20\x49\xf3\x00\x06\x52\xac\x74\xb2\xf3\x64\xe1\x0f\x65\xf0\xc9\x4e\x17\x18\xa1\x8b\x51\x44\xc0\xe6\xb4\x12\xe3\xc2\xd8\xff\x3c\x64\x6e\x5d\x5c\xdc\x72\x8c\xaa\x6f\x29\x7c\xf8\x42\x77\x47\xd8\x1e\xdd\x60\x22\x0f\xa2\x28\xd8\xa7\x18\x25\xed\x3b\x79\x0b\xd9\xe7\xdb\x58\x48\x2c\x52\x9c\xdf\x86\x16\xd2\xbf\x73\x72\x7e\xf9\xb3\xb0\xd1\x88\x95\x83\xe2\x0e\x83\x1b\x11\xae\xd7\xff\x79\x43\x57\xea\x28\xb1\x2f\xb5\xa7\x54\x28\xb0\xa2\x33\x1b\x12\xd8\x48\xa8\x06\xe1\x62\xa1\xd8\xc2\xb4\xf1\xa1\x50\x3d\x57\xe4\x57\xde\x0f\xc1\x81\xc2\x8e\x73\x6c\xcc\x53\x44\x47\xdf\x00\x4b\x02\x65\xc0\x16\xfa\x3f\xc4\x9c\x55\xae\xa9\x11\x85\xf3\x92\x31\x33\x46\xe1\x1e\x81\x96\x6d\x44\xe6\x15\x3b\x80\x8e\x43\x1d\xf9\x12\x0a\xf3\x7f\x4f\xa5\xf5\xfd\x4e\x49\x2d\x48\xd1\xe2\x10\x7b\x39\xe0\xc8\xe0\x0f\x6a\x38\x4e\x36\x0d\x3e\x80\xe8\x93\xac\x50\x9a\x89\x39\xce\x34\x90\x4e\x40\x2a\x12\xd9\x82\x63\x0b\x08\x2a\x21\x8c\x04\x04\x8f\x07\x3f\x10\xb2\x6c\x44\x64\x53\x2a\x5d\x66\x65\x8d\xbb\x7b\x79\x36\xfc\x43\x53\x49\xd7\xcf\x87\x26\xb1\x1d\x0c\xbe\xf5\x82\x61\x55\x21\x1f\xf4\xd5\x1a\xf1\xab\x61\x7c\xa3\x1f\xb2\xe2\x42\xdf\x94\x2e\x65\x01\xe8\x15\xf0\xc3\x29\x52\xab\x94\x4a\xe1\xd1\xa1\x2e\xdc\x7a\x5c\xda\x38\xd6\x09\xf7\x77\x87\x4c\x2c\xb2\x6d\xcd\xe3\xca\xf4\x1a\x5c\x13\x1e\x5b\xba\x5b\xe3\x4f\xa0\x4a\xf3\xc5\x95\xdf\x9f\xef\xfb\x53\xcc\x5a\x96\x55\xae\x5e\x3b\x67\x06\x39\x44\xd0\x6d\xe7\x77\x5b\x70\xe4\x11\x0b\x37\x7a\xf4\x43\xc6\x1a\x68\x6e\xbc\x1e\x08\x51\x80\x16\x5b\xb2\xa3\x38\x05\x14\x06\xc1\x55\x08\x96\xc1\x04\x29\x58\x79\xad\x2f\xaf\x4e\x49\x95\x6a\xd4\x68\xf3\x76\x03\xba\x31\xc9\xde\x03\x95\xde\xf0\xee\x23\xe1\x57\xe2\x6a\x81\x07\x81\xd6\x03\xde\xb7\x9f\x44\xc1\x93\x5b\x22\xa7\x6e\x8f\xc8\x3f\xa6\x10\xce\xab\xda\x62\x85\x25\xb6\x99\x49\x75\x98\x3a\x64\xd1\x03\x7a\x1b\xe1\x88\x12\x68\x75\xd0\xcc\x1b\xc9\x0c\x3e\x62\x6d\x6e\xa2\xe3\x27\x5c\x71\x36\x2e\x36\xff\xcd\x57\xf7\x60\x24\x9b\xd6\x58\x66\x0d\x4f\xe0\xd9\x70\xc7\x43\x82\xbd\x87\xca\x94\x3c\x05\x02\x1a\xad\xd0\x3a\x56\x92\xf1\x29\x01\x79\x9d\xfa\xee\x6b\x2b\xb7\x2a\xed\x60\x40\xa2\x8f\xd3\x4b\x87\x57\xc4\x32\x32\x5b\x5e\xb2\x67\x4d\x28\xc1\xaa\xcd\x98\xca\x9d\xf1\x10\xa3\x34\xa7\x42\x33\xb3\x5b\x9d\xd7\xb6\x30\x3b\x5f\xfb\xc5\x40\x54\x71\x04\xdb\x2f\x2a\x7d\x07\xba\x4c\x2d\x61\x3a\x2e\xb3\x86\x53\x40\xc7\xc3\x76\xa3\xf6\x9a\x18\x12\x37\x63\x8c\x9b\x0d\xd3\x95\x54\x73\xc2\xc7\xd6\x09\x56\xab\x2e\xff\x60\xb5\xb3\xcb\x70\xa8\xbe\xfb\xe6\xe1\x14\xa5\x38\xd7\x38\xf1\x34\xfa\x6f\x7f\xde\x8f\xb3\x9f\x50\xc1\x81\xa8\xe9\x60\x4e\x90\xe5\x5a\x8c\x3d\x33\xc5\xb8\xd2\xb0\x9a\x04\x3c\x71\x20\x15\xc9\x20\x4d\xef\xcd\x9c\xbc\xa7\x02\xce\x90\xc7\x7a\x55\x6c\x64\xf7\xf7\xb7\x0e\xe6\x97\x01\x58\x71\xff\x30\x1a\x41\x1a\x39\x49\x0c\x9f\x01\xe3\x3e\x5d\xef\x15\xcc\xed\xe6\xb7\xca\x34\xbe\x14\x41\x9d\x21\x64\xa1\x79\x0f\x78\x63\xf2\x66\xe4\x67\xf4\xe2\xf3\x2a\x73\x55\xca\xa8\x1b\x0c\x80\xde\x44\x04\x9d\x56\xa5\xbc\xff\x8b\x78\xba\xa2\x7b\x76\x2e\x24\xef\x61\xa0\xfe\xc7\xda\xa1\xd4\xab\x26\x8b\x48\x44\x61\x4d\xb1\x5c\xa4\x5c\x7f\xdc\xbd\xc2\xc7\x76\x44\xf2\x40\x7b\xcd\x84\xc9\xfb\x71\x5e\xb7\xba\x7d\xeb\xbc\x93\x5f\x93\xf2\x73\x7f\xed\xdc\x93\xfa\xd7\xf9\x09\x6f\x3f\x8b\x61\xfb\x12\x3b\x0d\x08\xd9\xdc\x54\x52\x9e\xde\x8d\xe5\x9b\xea\x76\x5e\x2f\x22\xdc\x37\x16\x0d\xd2\x61\xdb\x4c\x94\x3d\x3b\x8d\xf6\x3f\xdf\xa1\x03\x9a\xd9\x61\xfa\x41\x17\x51\xf0\x54\xff\xee\xf9\x6f\xad\x36\x5c\x61\xb7\xee\x94\x4e\x85\x92\x06\x43\x2d\x9f\x49\x10\xf1\x4d\x9d\xf5\x4e\x0f\x15\x3c\xce\xfb\x22\x1f\x8c\x87\xaa\xb6\xf2\xf9\xf0\x7b\xfd\xe6\xe3\x48\x96\xa7\x25\x00\xea\x7d\xd8\xbd\xfe\xab\x3b\x2b\xfa\x6e\xb4\xe1\x05\xc1\x7b\xc3\xdf\x90\x65\xf4\x88\x18\xe5\x87\xed\x15\xc6\xb3\x27\xcb\x90\x5c\x52\x62\x0c\xb9\x23\x1f\xbd\x53\x0a\xd3\x35\x37\xa8\x81\xec\x5b\x47\xa8\x2f\xba\xfd\x92\x84\xce\xd8\x0d\xf2\x7c\x8f\xfe\x3d\x28\x6e\x78\xb7\xdc\xc6\x82\x7c\xdd\x5e\x58\x4d\x5a\x30\x26\xd2\xf5\x6b\xb9\x4e\x22\x38\xfd\xa0\xb9\xe8\x05\x53\x4d\x44\xd8\x1c\x72\x46\x5b\xdf\xea\x04\x0c\x5c\xb7\x88\x18\x7d\x11\xfc\x7c\xb0\x58\x98\xf4\xda\x02\x45\x70\xfc\xe8\x87\xe6\x3a\x1f\x5c\x16\xb6\x97\x26\x7c\x64\xa1\x7f\xb4\x7e\xaf\xb2\x4d\xb8\x0b\x95\x35\xb5\xf1\x68\x72\x75\x6c\x3b\xdc\x12\x87\xc3\x72\x46\x1e\xbe\xaf\x6d\x4c\xc8\x3c\x63\xff\x3e\x0d\x6a\x3c\xf5\xa5\x76\x8b\x34\xae\x8b\x28\xf1\x07\xab\xee\x5f\x61\xea\x5a\x92\x06\x35\xe0\xa4\x39\xd4\xff\x0a\x46\xa6\x8a\x98\x89\xad\xff\xe5\xbd\x5f\xc4\xe3\xf2\x13\x04\x3f\xef\x49\x5b\xe1\xe5\x9e\xd2\xf0\x1b\xbd\xea\xa6\xf8\xd4\x7a\x6a\x28\xd6\xdb\x3b\x28\xb5\x5d\x97\x79\x73\xec\x31\xf1\x03\x8b\x61\x5c\x47\xbf\xb2\x97\x32\x9b\x5b\x9f\x43\xcb\x5b\x20\xba\xff\x12\xa2\xd9\x5c\x5d\xeb\x9e\xb7\x5f\xd4\x98\xe3\x0c\x76\x33\x3e\x9a\xaa\xe4\x1b\x66\x05\x75\xec\x56\x22\x47\xd3\x7c\xeb\x11\xad\xfd\x46\xb2\x38\x8a\x12\x0d\x5a\xea\x1a\xe6\xb6\x34\x66\x82\xab\x63\x6b\x72\x97\x58\x02\x18\x17\xf0\xfa\x83\x31\x6e\xa5\x6a\x43\xde\x60\x23\x9b\xb3\xa2\x10\x3a\xb6\x9d\x66\x70\xb6\x7a\xf0\x3d\x67\x3f\xba\x5c\x01\xe6\xb5\x5e\x4e\xa6\xa4\x13\x7c\x1f\xff\x9a\x99\x27\x54\xba\x79\x0d\xbf\xa4\x16\xc8\x80\xd2\x47\xf0\xe0\x6f\x5d\x9a\x72\x60\x6c\xd7\xd4\x87\x4b\xbd\xb6\x43\x1d\xe9\xe6\x50\x64\xb4\x44\xee\x3d\xa1\xb9\x39\xf5\xa9\xf1\x17\x2f\x36\x11\xd9\xf5\x42\xb6\x59\x4e\xd2\xf6\xaf\x2c\xc8\xf3\xdb\x2f\x31\x2b\x90\xfe\x88\x36\xe2\x29\x2e\x82\x04\x6b\x46\xbc\x77\x7c\x1b\x1a\xc0\x89\xb6\x37\x35\x27\x92\x85\xa9\x27\xc6\x33\xf2\xbf\x7a\xb7\x92\x86\xf5\x2c\xb3\x4f\x53\x55\x5f\x11\x05\x14\xcd\x9a\x0e\x8a\xec\x50\x90\x93\x91\x24\xbd\xa2\x83\xb8\xce\x8b\xe7\x27\x16\x3a\x66\xbb\xa8\xc1\x58\xf5\xbd\xec\x4b\x08\x3d\xef\xd6\x38\x0c\xdd\x1e\xd4\xb1\x3a\xba\xe2\x93\x2b\x62\x0f\x6b\xe2\x83\xe6\x7b\xa6\x5d\x16\xff\x50\x40\xa4\x2d\x58\x1f\x2f\xe1\x25\xe0\x4a\x59\x7d\xb3\x90\xbe\x63\x84\x31\xe0\xd3\xb4\x9c\xcc\xb9\xc2\x0d\x48\x7f\x14\x58\x9c\xd3\x97\x3c\xee\x19\xf1\x0f\x84\x5b\x92\xf4\x2a\xd6\x85\x90\x95\x78\x12\x49\x6e\x73\x73\xf8\x97\x3f\x21\xb1\x34\x83\x51\xa9\xe7\xe3\xcd\x7c\x78\x96\x02\x19\x90\x69\x39\x89\x2e\x9c\xfa\x99\x52\xf4\x62\x3c\x4b\x70\x3e\x43\x3e\xf8\x5e\xe1\x15\x8f\x29\x17\xa8\xcb\x76\x83\xf7\x87\x9d\x24\xde\xdc\xea\xbd\x2e\x2c\x93\x2d\x03\x7f\xea\x32\x6d\x8b\xc2\xd7\x78\xdb\x77\x1b\x01\x5c\x1b\x8d\x1a\x4f\x6d\xe4\xe2\xb6\x73\x3e\x0f\xe8\x25\x92\x22\x55\xb4\x0d\x10\xd9\xf1\x82\x60\xfb\x1d\x6d\x08\xc4\x74\x20\xa4\xa7\x4c\x7c\x7e\x6a\x28\x49\xc3\x08\x5e\x8d\x28\x8a\x76\x88\xb7\x40\xdf\x42\x9c\x6a\x3b\xa7\x22\x8b\x77\xae\x40\x9a\xdf\xae\x3a\xfa\x44\x16\xeb\xe3\xbf\xb5\xff\x9e\xd2\xc9\x20\xf1\x17\xfa\x2f\x95\xa6\xf9\x9a\x69\xb5\xb4\x75\x9b\xde\xbd\xd6\x0b\x8a\xf5\x8e\x5e\x3a\x1c\x29\xc4\xef\x0d\xbd\x1a\xf2\x95\xbe\x33\x3c\x1f\x56\xbc\x3e\xb1\x5c\xf6\x3c\x5e\xa0\x48\xcf\x79\x15\x1a\xcd\x85\xce\xb0\xa8\x37\x84\x88\x15\x73\x41\xcc\xbc\xb7\xd6\x67\x92\xa5\x28\x38\xa7\x0e\x07\x68\x14\xab\x1e\xfc\x56\xe9\x32\x18\xb3\xf1\x2f\x27\x44\xd2\x05\xcf\x0b\xa3\x90\xa4\xf9\xef\xdb\x6f\x2e\xbd\x96\x16\x47\x25\xc1\x76\x9b\xdf\xef\x3e\x61\xe2\xc7\xb3\x58\x6e\x4a\x19\x49\xa5\x08\x7e\x66\xfd\xf2\xe6\xc7\x9a\x1d\x7c\x71\xa1\xdb\x83\x53\x44\x36\x43\x6f\x09\x11\x06\x78\x46\x31\x20\x36\xae\x3f\x29\x04\x5d\x9e\x50\x56\xa2\xed\x2e\x37\xae\x4a\x29\x90\x33\xe7\x0c\x9d\x33\xdb\x40\x7d\x98\x5d\x47\x0f\xdb\x55\xcd\x8b\x80\xae\x3f\x86\x0e\x9f\xc8\x77\xaa\x3d\x6f\x6f\x4e\x14\x60\x75\x71\xaf\x48\x7c\x61\x2e\x2b\xa6\xce\x4c\xf4\x0b\x65\x83\xc9\x2f\x86\xe9\x6d\xc9\x4b\x76\x98\x38\xb7\xa5\x3c\xd1\x39\xf0\x79\x1d\x53\x95\x26\x05\x2a\xf1\x86\x78\x3e\x1e\x50\x23\x0c\x86\xcc\xfc\x76\x2c\x8e\xec\x96\xf3\xd9\xbc\x93\x5f\xbd\x93\x09\xab\xac\xaa\x85\x02\x8a\xca\x9a\x3e\x60\xba\xfc\x81\xa4\x5f\xac\x53\xdf\x6b\x8b\xf2\x94\x2c\x9e\x3e\x51\x5c\x7f\x20\x0c\x25\x9a\x3a\x1c\xc7\xce\xfc\x2b\x76\x84\x38\x52\x34\x3b\x14\xb1\xba\xec\x94\xca\xa8\x82\xb4\xb7\xb7\x52\x48\xde\xe0\x5b\xad\x6a\x79\x89\x10\xed\x5b\xec\xfc\xd8\x6b\x43\xb6\x4c\x2b\x0d\x37\xb9\x9a\xe9\xdd\x71\x5d\x89\xfd\x8a\x28\x4d\x99\x14\x7b\x32\xba\x4d\xb4\x5e\x96\x19\x06\xb2\xd6\x9d\xd6\x86\xef\x68\x8f\xfd\xe2\xa5\x18\xce\x61\x2d\x90\xa9\xcd\x2b\xda\x7a\xa9\xd0\x1b\x41\xa0\xdc\x18\xa6\x03\x92\xb2\xdc\x62\x52\xa0\x6d\xaf\xfa\x1f\x7b\xe7\xbe\xf6\x43\x18\x78\x8f\xea\xdc\x76\x98\xc4\x86\x0c\x7e\xc6\x8d\x6e\x81\x08\x73\xca\x48\x60\x51\xdf\x5d\xb3\x8c\x22\xe9\x71\xb3\x18\xf0\x33\x9a\x12\xa4\xf8\xfb\x7b\xc4\xcd\xad\xbb\xa7\xcf\xde\x55\x6e\x4d\xf4\x71\x7b\xb0\x37\x00\xeb\x2e\x3f\x64\x0e\xdf\x0d\x18\x6e\xbb\xa3\x49\xc1\x24\xaa\xc1\x0e\x81\x11\x41\x1a\xc4\x4b\x04\xa4\x3f\xef\x07\x6b\xfb\x5e\x29\x80\xf2\xb2\x6d\xa4\x0d\xc4\xbe\xa9\x8f\x20\x11\x8d\x59\x4d\x38\x00\x1a\x08\xa9\x1d\x08\x81\x45\x28\xd2\x41\x60\x01\x88\xab\x87\x9a\x1c\x3c\xb8\x0e\xa5\x46\x29\x50\x82\xf9\x39\x8b\x99\xdd\xbb\x86\xff\xc6\x40\x65\x4a\x71\x6e\x34\xae\xd1\xc5\x30\x49\x03\xa3\x91\xc7\x7e\xdd\x8a\x6f\xb7\xae\x5b\x0a\xaf\xf8\x11\x92\x27\x42\x47\x74\x1c\x06\x63\x32\xe0\x67\x36\xe1\x20\x48\x95\x96\x04\x72\x34\x08\xe0\x4d\xd5\x2b\x20\x48\x14\xfa\x54\xba\x35\xb6\x12\xe7\x16\x92\x49\x9f\x86\xbf\x66\xce\xc1\x78\xaf\x64\x9b\x81\x5e\xec\x39\x4c\xb8\x14\x68\x39\x0c\xe7\x86\xa0\xf2\xe7\x2f\xa7\x6a\x8f\xa7\x76\x88\xe9\xb2\x89\xb0\xcc\xab\xd9\x42\x8c\x97\xc8\xbc\xdc\xfa\x4b\xf6\x62\xd0\xa2\xba\x33\xbb\xfe\xab\xb5\x70\xbf\x9e\xad\x29\xbb\xac\x18\x8b\x97\x79\xa3\x7b\x4b\xba\xb4\x24\xe8\x2d\x2c\x84\x3f\x61\xe5\x3a\x8d\x21\x09\xfd\x07\xca\xf9\xe2\xd9\x33\x02\x28\x94\xd9\x60\xee\x95\x15\xa4\x5e\xf8\x57\xd7\x3b\xb6\xf4\x3d\x36\x02\xa1\x01\xad\x51\x95\x76\x3e\x35\xee\xde\x35\xeb\xf1\xc3\x5f\x93\x00\xea\xb5\x41\x6b\xc6\xdd\x94\x61\x4e\xff\xfa\x25\x34\x7e\xf8\x93\x48\x00\x64\x87\x9b\x4f\x0c\x48\x88\x07\xe1\x44\xb3\x1d\xff\x45\xdf\xd3\x03\xb9\x6c\x99\x92\x48\xf2\xe9\x41\xa8\x94\xa4\x51\x40\xa0\x21\x55\xf3\x1d\x88\xc6\xc7\x19\xe8\x7b\xfd\xef\x92\xc6\x07\xee\xf7\x60\x0a\x60\x23\x6e\x7c\xc5\x23\x79\xf9\xd3\x04\x02\x77\x7d\xd8\x28\x17\x42\x36\x76\xca\xec\x9a\x9b\xfc\xf2\x48\xd0\x3b\x99\x9e\x62\xc9\x8f\x6c\x50\x6a\x16\xd7\x1d\x44\x4e\xe9\x17\xf5\x3d\x78\x01\x6a\x38\xe2\x4b\x0e\x89\x9f\x31\x4b\x18\x30\x66\xad\x13\x89\x75\x04\xf0\x7a\x30\x40\x9d\x3c\xdf\x2a\xd5\x88\xe6\x1d\xc8\xe9\xb1\x94\x65\xc5\x9f\x2c\xd7\xae\x04\xff\x71\xde\x7f\x07\x7b\x36\x99\x33\xef\x11\x0a\xe2\x91\x85\x3f\x79\xf8\x33\xed\x08\xb4\xd0\x68\xa1\x14\xab\x8c\x06\xdc\x2c\xa6\xe4\xcc\x46\xfc\x4f\xa2\x02\xe3\xab\xec\x3b\xb7\xa0\xb3\x85\xf2\xd2\x92\x5e\x30\x69\x82\x20\x09\x9b\x45\xe3\x8a\x2b\xf3\x97\xcc\xf6\x49\x59\xd0\x8e\x41\x56\xed\xb4\xfd\xfd\xe5\xec\x88\xc3\x22\xe2\x18\xc9\x8d\x7d\x2b\xcc\x91\x12\xd9\x5e\xb5\x07\xf1\xe5\x39\x60\x62\xc9\x89\x68\x80\x74\x97\x7d\x13\xc8\x3f\x36\xae\xc5\x65\x48\x92\xba\x1a\xf0\x8a\x4c\x53\x6f\x61\x19\x5b\xbc\xcf\x4c\x92\x3e\x98\x1a\x15\x35\x47\x35\x25\x50\x69\xed\xd4\x67\x50\x32\x51\x94\x30\xd4\x77\x1c\xc8\x1c\x25\x95\xa7\x61\x05\xc1\x46\x7c\x4a\xd5\x95\x5e\x6c\xe6\x58\x6c\xe6\xac\x02\x89\x92\x03\xad\x6f\x9f\x94\x37\xff\xd3\x68\x9a\x9e\x2e\x74\x0c\xfb\x85\x88\x26\x1c\x28\xbc\x78\xcd\x4d\xb2\x03\xea\x4d\xbd\xa9\xf4\x72\x66\xd1\xc3\xef\xf4\x5c\xd6\x9c\xaf\x8e\x51\x04\x59\x00\x1d\x38\xcf\x87\x8c\xbf\xe7\x45\xc3\xf2\x1c\x4c\xfe\xdf\x70\xb9\xba\x56\x63\x0a\xf5\x97\x69\xa5\x1c\x31\x15\xd7\xf5\xdc\xd4\x25\xbf\xe6\xfa\x7d\x1e\x7a\x7f\x13\x37\xd0\xc7\x17\x4a\xc4\x66\x87\x47\xf5\xed\x17\xdb\x8b\x24\xed\xc5\xce\xe7\x62\x6a\x9d\x0a\x7e\xa5\xcc\xca\x8e\x5c\x9a\x52\x08\x67\x08\x89\x13\x80\xbd\xd8\x3a\xd0\xc8\x90\x9c\x18\x22\x34\x92\xef\x27\x39\x50\x1a\xd6\x6c\x78\xac\xe9\xb8\xf1\x56\x71\x3f\x43\xc3\x08\x76\xca\xdb\x70\x61\x79\x70\x61\x7c\xb9\xc4\x63\xc0\x62\xfd\x7b\xb5\x79\x68\x1e\x74\x58\xae\xde\xc9\x9f\x0f\x1b\x36\xba\xa6\x63\x4f\x86\x2f\x50\x95\x3b\x52\x45\x28\x4d\x76\x30\xf1\x9a\x9b\xc3\xe4\xcf\x48\x5d\x13\xf5\xe1\x14\x98\x22\xcd\x1c\xeb\x8f\xd7\xe2\xaa\x2b\xec\xcc\x25\x9b\x99\x24\x44\x83\x78\x19\x7d\x1e\x04\xe1\x9d\xd1\xd5\xf4\xe1\xe0\x93\x72\xe2\xfb\x9f\x76\xf1\x51\xdf\x1a\xfd\xc9\x01\x78\x71\x05\xab\xf5\xe5\x28\x52\xd4\x05\x6f\x43\xc6\x6a\x7d\x58\x73\x53\x95\xce\x7f\xf2\xa7\x97\x96\x44\x0b\x82\x7f\x0a\x95\xc0\xb3\x28\xdd\x3c\xcc\xed\x9d\x68\x86\x94\xd4\x17\x36\x27\x94\xbc\x82\xf9\xee\x36\x27\x60\xb9\x79\x13\x1b\xe1\x0d\xaf\xa2\x54\xda\xe3\xb3\x9c\xc0\xd8\x8e\xf4\xce\xab\x7a\x65\x07\x42\x9f\x28\x48\x1e\x60\x80\xc0\x07\x59\x9e\x3e\xfc\xd2\x95\xb2\xc1\x04\x50\x29\x03\xd4\x1f\x6e\x90\xe4\x5a\x93\xab\x43\xf5\x42\xb8\xd0\x3c\xfa\x77\x1e\x35\x84\x49\x46\x72\x66\x03\x1e\xfe\x80\x49\xd2\x7a\xbc\x4c\x58\x67\xb1\x09\x7c\xcf\x1d\x20\xf6\xcf\x60\x0a\x82\x20\x63\x61\xe0\x9a\x30\x5e\x06\x65\xae\xfb\x39\x50\xd8\x72\x24\x2b\x04\xa2\xdb\x7f\x00\xa1\x76\xeb\x7d\xa0\xc4\x4e\x4a\x08\xc7\x7c\x12\x28\xfc\xd8\x7d\xb8\x89\x22\x33\xa0\x43\x76\xfd\x1f\xd3\x0b\xb3\x2f\xb0\xf8\xef\x4b\x5e\x20\xb7\x6e\x20\x73\x86\x9f\x1f\xb0\x37\x5e\x36\x8e\x28\xc4\x72\xdd\x72\x4e\xb3\xd3\xc3\x41\x16\x0b\x51\x46\xe7\x15\x6d\x3b\x62\x9c\xf8\x9e\x19\x5f\xf2\xf8\x17\x40\x1c\xe7\xb5\x91\x2b\x61\x36\x3d\x20\xcd\xa0\x04\xa0\x56\x69\xa9\x9d\xca\x93\x6a\x33\xa7\xbd\xb2\x5a\x71\xa3\xdd\x60\x37\x46\x0a\x05\x30\x2c\x0e\xf3\x9e\x4f\xd9\xbe\x22\x6f\x80\x6c\x7e\x94\x49\x5f\xb1\x92\xd2\xb7\x8d\x94\x7c\xb0\xc2\xff\xf2\x36\x2b\x21\xae\xf5\x98\xa5\x92\xd1\x00\x30\xc4\x30\xad\x7b\xbc\xdd\xe0\x9a\xfa\x52\xe8\xef\x6f\x1c\x65\x55\xd7\x0e\x4f\x52\x4c\xad\xd4\x6c\x4a\x97\x67\xb2\x68\xef\x48\xb7\xa1\x58\xc7\x9b\xc9\xdb\x58\x6e\xf8\x41\xe0\x3f\x0d\xfb\x17\x63\xae\x3c\x2d\x64\x39\xeb\x22\x1f\x10\x4f\x5a\x54\x92\x8a\x17\x5c\xe7\xa0\x40\x65\x1e\x51\x89\x2e\xb8\xb9\x40\xd7\x23\x93\x30\x08\xa2\xe4\x40\x35\xfe\x4b\x64\x29\x70\x75\x44\x1d\x5e\x19\x99\x31\x4f\xfd\x2e\x62\xc1\x47\x93\x4f\x33\x28\x04\x05\x8a\x90\x35\xbf\xab\x7a\x82\x43\x0a\xa1\x79\x2d\xb5\x16\xdb\xfe\xc6\x9f\x6a\x4d\x9e\xeb\xb3\xe7\xde\x4f\x69\xe8\xca\x61\xc5\x08\xdb\x14\x99\x67\xe3\xa6\x37\x32\x60\xf1\xc7\xbc\xda\xf4\x2d\xd4\xc4\x90\x71\x2c\xb2\x0d\xe7\xfe\x25\x97\x79\x11\xe4\x66\x35\x9a\xfd\x27\xda\x7f\xf4\x3d\x31\xa1\x70\x4f\x0b\x9a\x56\x68\x1d\xa1\x24\xa3\xd0\x08\xcb\x28\xac\x2b\xf1\xf7\xdd\x87\x9a\xab\x85\x99\x81\x80\xf9\xab\x6f\x3a\xcb\x3a\xfd\x1e\x74\xa5\xd5\x61\xb1\x75\xb7\x9c\x6c\x7a\x2c\xb2\x67\x29\xb2\x27\xc4\xa6\xd0\x57\x1e\x48\xce\x97\xec\xe8\x06\x03\x24\x78\x7e\x8b\x8f\xfd\x18\x59\x09\x35\xaf\x77\x30\x80\x69\x2c\xef\x19\xbe\x50\x18\x94\x3f\x85\xe6\xc3\x99\x8a\xd1\x85\x10\xeb\xf8\xc0\xfd\xc6\x72\xef\xc2\xd7\x3f\xfb\xb5\xfb\xdd\x39\xfd\x90\x02\x22\x81\xb6\x3c\x68\x56\xd3\x9f\x3d\x2a\x61\xc0\x66\xd7\x2c\x0a\x42\xef\x89\xc1\x80\x27\x91\xd6\x2b\x3a\x1e\x45\x27\xbe\x0d\x2d\xcc\xcd\xf0\x61\xc8\x7b\xa8\x69\x0f\x42\xbb\xab\x8f\xec\xbe\xf6\x23\x12\x73\x22\xd2\x7a\x16\x58\x38\x84\x1e\xdd\x4b\xb9\xbe\xe4\x9f\x32\x1b\xb1\xf3\xda\xfd\x46\x41\x40\x0f\x25\x64\x45\x91\x1d\x71\x06\x27\x02\x48\x21\xd1\xca\x8a\xca\x94\x0a\x00\x94\x6d\xe5\x9c\xb8\xa3\xc7\x17\xd3\x95\x18\x79\x33\x39\xa6\xb6\xff\x18\xcd\x7f\xaa\x3f\x1d\xf1\x5b\x79\x59\xd2\xf0\xc4\x4a\xe9\xc4\x2a\x69\xc0\x7b\x3a\x0f\xab\xce\x5f\x54\x4f\xa9\xee\x84\x3e\xbf\x94\x2c\x5e\x78\xc5\x44\xb6\x8e\x1f\x3c\x45\xd8\x9a\x51\xfc\xca\x43\x4f\x1e\x88\xa4\xb5\x4f\x30\xb1\x38\xbc\xb9\xe0\xb7\xbd\xfd\x57\x67\xc8\xda\xa8\xac\x05\x00\x40\xd5\x52\x39\x59\xf6\x7e\x1b\x83\xad\xcc\x09\xa8\x9e\x52\xe6\x38\x1b\xcb\x2d\x07\x2e\xb3\xfa\xda\x65\x1e\xe2\xe0\x77\xdf\xd4\xf6\xc9\xcf\x77\x66\x8e\x64\x4c\xc6\x5a\x5c\x2e\x88\xd6\x1f\x68\xaf\x15\xe9\x0f\xcd\x46\xde\xb6\x79\xd4\x36\x89\xf6\xcf\x23\x76\xb1\x5f\x29\x3b\xa6\x2b\xdd\x03\xf2\x49\x1f\xc7\xbc\x9e\xc4\x16\x0c\xe8\x12\xe3\xb0\x82\xa2\x76\xdb\x4a\x9e\x7a\xa8\x08\xbe\xd0\xdd\x7b\x62\xe9\x3e\x35\xab\x0c\xc3\xd4\x8c\x30\x08\x76\x60\x4f\xa8\x50\x60\x2a\x37\xaf\xee\xf8\x79\x41\x10\x04\x29\x5c\x5e\xbd\x07\x6b\x8c\x7d\x00\x39\xaa\xbc\xeb\xba\xae\xba\x30\x2f\xac\x0c\x4a\x0c\xfc\x3c\xab\x6a\x6e\x2f\xc2\xdc\xd0\x2f\xf8\x2f\x76\x87\xdb\x32\x79\xc0\x1c\x42\x4c\x3e\xe2\xdf\xb1\x8d\xca\x9a\x9e\x5b\x8d\xc9\xdc\x69\xd4\xbe\xc4\xde\x95\x97\xec\xd0\xbf\x33\x2b\xe4\xd1\x63\x7e\x87\x31\x7e\x36\x43\xce\xdb\x43\x01\x8e\xdc\x75\xc9\x57\xcf\xae\x70\xf0\x4c\xc6\xb3\xe4\x12\xb8\xe2\xa3\x7b\xc2\xa0\x70\xfe\x80\x08\xda\x83\xa1\x5a\x10\x69\xf0\x3b\xd5\x2d\x23\xdb\xad\xb0\x6b\xdc\x37\x7e\x09\x0d\xd2\xd7\x8b\xaf\x85\x39\xb9\xe1\x83\x43\x84\x29\xf1\x14\x50\x99\x2f\xdd\x79\xcb\x9a\x8c\xd0\x4f\xe1\xe2\x88\x5b\x54\xbb\xe7\x05\x6c\x45\xf3\x7b\x52\x6c\x01\xd1\x78\xf9\x4b\xb1\x34\x94\x2f\x55\xff\xcb\xbf\x6c\x1d\x8b\xb5\x30\x86\xf0\xdb\x2f\xa5\xce\xd8\xee\x00\xd0\x50\x2f\x6e\x46\x9c\x9b\x12\x96\x9e\x46\x03\xa3\x05\x5a\x30\x47\xe6\x42\x87\x52\xf9\x17\x01\xfb\xc1\xcf\x16\x34\x21\x50\xdc\xe9\x65\x63\x3f\x31\xfb\x81\xfc\xbb\xff\xcb\xc6\x62\x03\x2e\x3c\x9f\x65\xbd\x9e\x8a\xea\xa9\x6b\xa0\xe9\x26\x08\xda\xeb\x07\x1c\x34\x6b\x67\xe6\x38\xc3\xb9\x2c\x98\xa6\xcc\xd8\x76\x18\x61\x76\x1e\xd5\xed\x67\x20\xa6\x0a\x71\xc2\xc6\xe2\x42\x38\x36\x61\x67\x92\x34\xce\x6f\x21\xd2\xd1\x59\x62\xea\xd0\x5c\x88\x19\x1c\x12\x3c\xef\xa4\x69\xc3\x9f\x2b\xbd\xa5\x6b\x2c\xb1\xdd\xf0\xaa\x5c\x90\x94\xa9\x54\x1a\x9b\x7d\x66\x9c\xfa\x99\x3d\x8e\xed\x4c\xc8\x0c\x1c\xd4\x3b\xbc\xb3\x6c\x4d\xf3\xc1\xd2\x19\x37\xc5\x31\xff\xf8\x5e\x64\x7b\x47\xb5\x4d\xe4\x10\x9b\x86\x79\xc3\xa0\x5f\x64\xce\x64\xec\xa0\x76\x65\xd0\x22\x81\x7a\x66\x58\x5a\xe6\xa0\x9c\x1f\x4b\x7d\x78\xca\x3d\x19\x1d\xc1\x11\x83\x61\x29\x1c\x94\xcf\x1e\xec\x92\x4f\xaf\xc7\xfd\xc0\x3a\xfe\xf5\x2b\xb1\x7f\x76\xca\xeb\xc5\x43\x97\x82\x03\xc1\x96\x9b\xe8\xb0\xe3\x07\xba\x4b\x6c\xb1\xca\xd5\x83\x3e\x79\xd4\xea\x96\x70\x16\xce\xd6\xf4\x2c\x78\x5d\x2d\x8f\x66\x4d\x1d\xf6\x0b\x34\x4b\x38\xdf\x41\xb8\x56\x51\x2b\x6f\xcb\xd0\x92\x85\x77\x83\xc0\x0f\xb4\x87\xa4\x6f\xc2\x12\x7d\xe5\xf7\xe7\x77\xb6\x01\xa1\xd9\xb6\x8b\x67\xa4\xb0\x8a\x28\x80\x0d\x2d\x5b\x56\xb5\x9d\x69\x93\xf4\x44\x98\xbf\x81\x2d\xd8\x12\x9f\xe5\xf3\xd5\x95\xbd\x78\x0f\xfd\x9f\x0c\x1b\x9e\x7f\xed\xb2\x9b\x57\x3a\x13\x52\x9c\xd6\xde\xba\xf1\xb1\xe2\x9b\xce\xaa\xda\xca\xd7\xe7\xa7\xe5\x74\xc2\xd6\xb9\x6f\xbf\x1d\xcd\x28\x65\x05\xea\x08\x8b\x49\xbb\xfe\x1b\x8f\xe4\xa6\x1e\x51\x46\x87\xe4\x60\x01\x12\x81\x0c\xa3\xfd\x4e\x95\xb7\xb1\x3f\xf6\xf0\xc4\x84\xd2\x3f\xe1\xf1\x03\xd6\xae\x7f\x8a\xe6\xf6\xeb\xba\xaa\x1d\x93\xaa\xb6\xf0\xb7\x66\xda\x78\xac\xe9\xd2\x08\x87\xb3\x5e\xe5\x1d\x90\x2d\x8f\x3e\xb3\x03\xcb\x7d\x06\xad\x92\x57\xf6\xf1\x5c\xc6\x31\x7d\xcc\x6f\x92\xc3\xb9\xe6\xe3\xed\xe0\xdc\xff\xdb\x1f\x00\x93\xc3\xb9\x96\x87\x38\x79\xec\xc0\x1f\xe0\x06\x93\xa2\x98\xf4\xcc\x4d\xa9\x40\xf0\xf3\x03\xcb\xce\x0d\x47\x09\x29\xc0\x55\x04\x92\x89\x66\x98\x1a\x34\xde\xd1\x55\xd6\xe6\xb8\x2d\xd9\x60\x8a\x34\xbf\x9a\xe8\x58\x5f\xb0\x1d\x3e\xb1\x03\xb8\xa8\xce\xe9\x4b\xe0\x13\xcd\x0e\xcd\x85\x9f\xcc\x55\xda\x79\x55\x57\xbd\x91\x43\x4f\x33\x90\x3d\x4e\xdd\x79\xb3\xfe\x35\x0f\x78\xc0\xae\x67\x65\xaa\xb2\xb8\x7a\x5b\x31\xd3\x9d\x51\x1e\x4b\xda\x6c\x24\xcc\xad\xe5\x1f\x7e\xa9\x96\xa7\x6e\xf9\x13\xa5\x4a\xcf\xf0\x15\x25\x56\xce\x6d\x54\xda\xd8\x94\xbc\xb9\x56\x71\x0e\xca\x16\x43\xa4\x5b\x18\xc8\x38\x3e\x54\x03\x4e\x85\x8f\x4c\x59\x74\xa5\x38\x37\x31\xe0\x3e\x45\x60\xd6\x5f\xf7\xe0\x2d\xe8\xd5\x3f\x91\x87\x89\x7f\x35\xbc\x95\xad\x5d\x7f\x54\x7e\x02\x6d\xb3\xb6\xff\xfb\xef\xfb\xe9\x48\xc0\x26\xe1\xdf\x00\x66\xec\xf4\x20\x50\xf4\x94\x6f\xc6\xcd\x79\x51\xac\x4f\xa6\x98\x89\x5b\xe7\xc2\x7d\x52\x3a\x37\x7d\x13\x3d\xa7\x62\x8f\xeb\xc4\xab\xc9\x05\x21\x7b\x81\x06\xb7\x60\x66\xad\x0b\x6a\x38\xd6\x77\x4a\x94\xa0\x9c\xf8\x0c\x6b\x71\xb1\x84\x46\xe9\x09\x7f\x15\x5f\xa6\xbf\x84\x81\xee\x13\xd1\x66\xb8\x53\xe9\xbd\x89\xcf\x2d\xef\x66\xe0\x4c\xfd\xf6\x87\x02\x2a\xa1\x59\xec\xaf\xce\xbb\x1a\xce\x39\x50\x83\x68\xcb\xcb\xd1\xdf\xad\xeb\xa0\x76\xd3\xfa\xe8\x51\x97\xf2\xd2\xb2\x44\xad\xea\xba\xf0\x91\x5e\xf1\xeb\x81\x93\x66\xe3\xa4\x31\x24\x98\x92\xb4\x9b\x22\xd7\x08\x90\x60\xc3\x5d\x8c\x44\xc9\x9b\xeb\x02\x66\xb3\xc2\xaf\xe7\x3f\x1f\x3e\x4a\x88\x2c\xd0\x0e\x2f\xe5\x83\xbf\xb4\x3f\x3e\xda\x82\xf5\xd3\xba\x5f\xf8\x00\xa0\x4e\x1e\xe6\x27\x23\xf7\x60\x8f\xff\x04\x13\x81\x63\x98\x22\x4b\x69\x80\x7f\x38\xfb\x3d\x34\x5c\x2c\xd9\x4a\xec\x1b\xf3\x0e\xc1\x30\x24\x9c\x77\x72\x8c\x20\xf4\x8c\xd5\xf4\x5f\x2f\xf2\x2c\x61\x6e\xbb\xe5\x6d\xe4\x9d\x64\x0b\x3b\xfc\x40\x77\x2c\x24\xef\x40\x98\xc1\x25\xf3\xab\x7f\x9b\x66\xc0\x4f\xf9\x95\xea\x6f\xc1\x00\xb0\xbd\x91\xf0\x8b\x17\x81\xcb\x0d\x69\x61\x86\x24\x0c\x1e\xdf\xe6\x07\x82\x24\x81\xea\x68\x1a\x8e\x91\xeb\xbd\x5e\x0d\x76\x39\x80\xf9\x4e\x76\x20\xd3\x97\xe3\x37\x53\x0c\x3d\xf7\x81\xf0\xc6\xb8\x5b\x4c\x0e\x52\xf0\x4b\x16\xaf\xe9\xba\x32\x0b\xe5\xd7\x33\xfb\xd1\x14\x56\x20\x02\xf9\x14\x27\x72\x87\x0f\xd4\x92\x58\x4d\x5b\x10\xd1\x41\xc1\x96\x20\xb5\xac\x46\x1b\xfc\xfe\x6c\x2a\xb3\x5b\x49\x7d\xf7\xf5\x8c\x30\x33\xff\x8c\x91\x76\x6e\x20\xff\x98\xd1\x9c\x57\xa5\xfd\xf3\xca\xb6\x2e\x61\x27\x9a\xff\x31\xe5\x42\xfa\x83\xe4\x50\xef\xff\xb1\x74\xd6\x5a\xd3\x32\x4b\x14\xbe\x20\x02\xdc\x42\x9c\xc1\xdd\x32\xdc\xdd\xb9\xfa\xb3\xde\xef\x3f\x93\x4c\x32\xb0\x58\x3d\x5d\xb5\x9f\xdd\x54\x57\x5b\xef\x1b\x73\x75\xc9\x8f\xb4\x73\xcd\x0c\x1c\xd9\xd0\xf1\x4b\x0d\xc6\xc3\xa6\xb0\x99\x5d\xae\xd9\x05\x90\x93\x04\x99\x5c\x0b\x39\x80\x8d\x94\xab\xd7\x74\x5c\x52\x82\x2e\x25\x4e\xf8\x03\xcd\xf7\xcb\x1b\x67\x46\x32\x6b\xb2\xf5\x73\xc4\xdb\x38\x14\x1d\x5f\x61\xf7\x50\x75\xf7\x2d\x84\x17\xce\x1b\x52\x2a\xb7\x30\x8e\x6d\x48\xa5\x0e\xe1\xf8\x41\xec\x17\x21\x9d\xa3\x25\xb0\x36\xaf\xf1\x82\x39\x4e\x66\xaa\x0c\x04\xa3\x62\x23\xd3\x24\xce\xff\x75\x1f\x4e\x91\xec\x36\xf0\xfc\x9b\x3c\x2d\x33\x25\xaa\x82\x83\x56\x87\x21\xe6\x9c\xd9\x4d\x56\x9c\x08\x9b\xbe\x4b\x85\x5f\x80\xd1\xf9\xb0\x98\x9b\x3a\x6f\xfc\x5b\xaf\xda\x37\xe6\xa1\x67\x96\xed\xaa\x08\x6e\x4c\x48\x1b\xf1\x32\x29\xaf\xc9\x51\xfa\xed\x62\x30\x50\x34\xa7\x0f\x33\x56\x60\xea\x4c\x38\xce\xc5\xd8\xff\xd5\x2d\x3b\x24\x61\x88\x66\x07\x46\x9d\x16\xc8\xac\x8d\xbf\x0d\xef\x53\x5e\x32\x79\x9c\x17\x4c\xd2\x30\x42\xe5\xaa\x18\x07\x87\xba\x0c\x92\x29\x2c\x48\xc1\xc4\x1c\xb7\x2f\x03\x33\xff\xfb\xcd\xfe\xc4\xac\x71\x1c\xa4\x29\xf1\xec\x03\xbe\x54\xc7\xdf\xae\x70\x34\x71\x28\x53\x68\xd4\x30\xb7\x97\x1c\x48\x5b\x2f\x6a\x2a\x3d\xad\x72\x77\x6d\xf5\x60\x21\xcd\x05\xd2\xea\x0f\x8a\x4d\x95\x96\xce\x74\x0d\xcc\x88\x88\x30\xcc\x4a\x1b\x8f\xb2\xb7\x20\x8e\x58\x89\x75\x16\x1a\x47\x08\x6f\xaf\xee\xff\xdb\x57\x3d\xc8\x44\xf2\x20\xea\x13\xeb\x79\xfb\x16\x3f\x22\xe8\x67\xcf\xab\x50\x7c\xaf\x54\xa9\x8e\x45\x16\x45\x71\x3a\xf6\x02\xbe\x86\xf4\xbe\x6c\x47\xbd\x6b\xb1\x43\xb5\x05\x1f\x36\xa2\x40\xf6\xd8\xfb\xa7\xc7\x52\xdd\x72\xb7\x25\x83\x40\x65\x92\xbb\x69\x5b\xc4\x99\x23\x95\x09\x80\xf7\xec\x1b\x4f\x42\xd1\xe5\x35\xd9\xb8\xd9\xce\x2b\xa1\xb7\x4e\xe2\x9f\x21\x7f\xde\xe0\xcf\x60\x40\xcc\x8a\x8f\x49\xc4\x66\xd6\x4d\x8c\xb1\x78\x71\xf7\x29\x62\xaf\xa8\xe1\xeb\x50\x87\x15\x3a\x22\x9b\x0f\x1b\x9f\xad\x1d\x79\x16\x1d\x07\xfe\xe3\x65\xa3\x49\xa8\x1e\xab\xa9\xd8\xaa\x07\xc6\x77\x4b\x4f\x26\x4b\xf4\x30\x9d\xdb\x9c\x39\x3e\x58\xbf\x9f\x63\x66\x9b\x52\x5d\xf1\xae\x7f\xba\xe7\xcb\xfc\xac\x73\x77\x29\xbe\xe6\xed\x7f\xb3\xc6\xdd\xd7\x85\xaa\x19\x22\xe0\x8e\x66\x3c\x05\x73\xe4\x67\xa4\x0d\x41\x10\x84\x52\xec\x26\x55\x8e\x1a\x9f\xdc\x3d\x44\xc3\xe3\x1e\x0f\xd3\x54\xb5\x6b\xfe\x6c\x3b\x4c\x22\x39\xbd\x4b\xbc\xd8\x2a\x30\x8d\x29\xf3\xbb\xb9\x02\xd3\x40\xda\x70\xe6\x63\x54\xdd\x2a\x8f\x17\x06\x13\x0e\x13\x9a\xd8\xbf\x85\x7e\xc9\x16\x49\x6f\xd4\x30\xf5\x76\xeb\x57\xea\x16\x80\xd6\xf8\x31\xd3\x4d\x51\xa0\xf9\xf5\x99\xa7\xe8\xaf\x9b\xf8\x57\x00\x1b\x36\xc3\x06\xd4\xa8\x14\x51\x08\xef\xab\x64\xa6\x68\x76\x88\x02\x4f\xe6\x2c\x4e\xef\x77\x22\xcf\x80\xec\xfb\x05\xf7\xaf\x05\x07\x6b\xb4\xdf\x00\x3d\x16\x86\xd5\xc2\x3e\x5a\x3b\x3d\xec\x25\xcd\x36\xa7\x55\xb5\x18\x31\x95\x08\x5b\x3a\x02\xc2\x6f\x86\xd8\xef\xf0\xce\xb3\x35\xcd\x06\x54\x21\x91\xde\x43\x05\xa2\x13\x30\xb7\xdb\xaa\x01\xf4\x92\xa7\x56\x33\xc9\xca\xad\xeb\xba\x99\x8f\xee\xb4\xa3\x30\x46\xfe\x5b\x58\x6d\x6f\xb7\xf4\x4f\x7c\x47\x65\x4c\xe8\xb2\x4b\x04\x38\xfe\x08\x8b\x71\xde\xe8\xc3\x38\xa0\xb2\x70\xa0\x9a\x98\x7b\x5d\xd7\x74\xb7\xf2\x07\x30\xfd\x58\xa8\x39\xd6\xc1\xcf\x6c\x02\x62\x14\xca\xac\x76\x1f\x14\xaf\xc7\x6f\xa3\xba\xe8\x15\xce\x52\x05\xea\xa1\xc2\xf3\x27\x4b\x23\x33\x8c\xe3\xfe\xb4\xff\x4d\xe9\xf2\x8d\x63\xdc\x60\x37\x83\xcb\x3c\xd1\x1d\xfe\xc2\xc3\x16\x86\x85\xf3\xfa\xcc\x98\xbe\xe6\xdf\x71\x21\xaa\xd4\xcc\xc9\x98\x9c\xd9\x4c\x0a\x3f\x7b\xa9\xf4\x64\x64\x22\x1b\x94\xff\xf8\x6b\x8e\xd4\xd2\xea\x15\xe4\x85\x8e\x5f\x06\xbf\x12\x89\x4c\xd1\x86\x0e\x30\x6f\xdf\xa6\x21\x3a\x54\xce\xd7\x05\x72\x57\x64\xe7\x60\xd4\x1c\x05\x44\xbf\xae\xb1\x84\x50\xe0\xf4\x58\x72\x67\x0a\x2c\xd7\x2e\x6e\x38\x9c\xbf\x8d\x3b\xb7\x56\x7e\x18\x06\x41\x48\x92\x2e\xaf\x88\x7f\x23\x77\x37\xf1\x39\x65\xdb\x1d\xd3\x1b\xe7\x07\x02\xd7\x55\xed\x7a\xfb\x2a\x09\x76\xfb\x80\x7e\x3f\x4d\xcc\xb4\x1c\xd3\xaa\xee\x7c\xf7\xbe\x98\xdf\x8e\xf0\xb6\x9c\xe0\x30\x62\x27\x5d\xa6\x59\x96\xfb\x49\xc4\xb0\x02\xdd\xf3\xfc\xe3\x4e\xd3\xd5\xd7\xf5\xc7\xa9\x35\xd7\x38\xac\x6b\x5b\xbc\x2c\xd7\xd5\xc9\xe6\x24\x8c\xa3\x85\x13\x6c\xe5\xb6\x90\xe0\x46\x59\xf8\x37\x59\x66\xd0\x15\x60\x90\x18\x05\x1a\xc0\x41\x06\xad\x6a\xbb\xcc\x42\xcd\x32\xeb\x9e\x03\x45\xa5\xfa\x13\x80\xdc\x43\xe2\xfa\x9a\xb8\xde\xbd\x00\x65\x05\xcf\x73\x3f\x3e\x47\x7f\x88\xd1\xd5\x75\x24\x3d\x7b\xfa\x2a\x5d\x4b\x4f\xd1\xe9\x69\x15\x0b\xdd\xdc\xf8\xa6\x99\x57\xc3\x50\xdf\x0b\xa7\x92\xe1\x56\xb4\x76\xe1\x37\x31\xec\x6d\x16\xa5\xd5\xc9\xf6\x70\x89\x4d\x02\x81\xb5\x16\x02\x37\x6f\x44\x36\x01\x7a\x72\xa6\xc8\x9d\xf2\xf9\x3b\xdb\x11\x2c\xc7\xfd\x60\x0e\x3e\xb2\x40\x07\xf1\x46\x3e\xd6\xb3\x98\x12\xd8\x30\x8e\x9e\x00\x15\x8f\xd9\x81\xea\x26\x24\xfb\x54\x91\x7d\xc3\x86\xf3\x92\xbb\xe3\x0d\x8c\x75\xaa\xaa\xca\x94\x33\x8d\xec\xd4\x71\x5d\x6b\xac\xd2\x6d\xed\x60\x6c\xf6\x17\x66\x06\xf1\xe5\x05\xb1\xb8\xaa\xef\x72\xee\x16\x10\xb8\xea\xf5\xb8\xc4\x73\x13\x8f\x81\x02\x65\x79\x90\x22\xee\x6d\x0a\x94\x7a\xce\xef\xe5\x62\xdf\xf9\x31\x0f\xe7\x8e\x9b\xd1\x10\x07\x71\x8b\xc6\xae\xdc\x84\xc1\x90\x24\xc2\xb1\xc3\x20\x08\xd6\xf5\xce\xfc\xca\x97\x8a\x04\x4f\x78\xcc\xd7\xfd\x52\xb5\x9d\xe7\xf3\xb7\xeb\x31\x24\x1e\x3f\x4e\xef\xc7\x74\x6c\x66\x44\xc5\xe9\x12\x72\x02\x64\x7e\x8d\xae\x44\x14\xb2\x2e\x60\xd3\x7d\x73\xc8\xdb\x33\xfa\xec\x92\x1e\xb8\xa9\xcf\x35\xe5\x0e\x61\xd4\xd3\xbf\x7d\xf1\x29\x2b\x13\xdd\xc3\x3c\x64\x20\x0b\xa5\xc1\xbd\x0e\x81\xd2\x12\x5a\xa5\x4d\xa4\xf5\x2c\x28\xec\xaa\x95\xbd\x0e\x89\x12\x6d\x6d\xd4\xae\x35\xc6\xf4\x92\x19\x6b\xe4\x0b\xff\x1d\xcb\xf6\x52\x95\x85\xa6\x85\x5d\x77\x43\xb5\x41\x04\xb4\xdf\x07\x3c\x48\x4e\x9b\x6c\xbf\xd5\xac\x67\x83\xcf\x90\x64\xd8\x47\xbf\xb4\x64\x0e\x5a\x54\x5f\xda\x66\x6c\x7a\xa9\x4b\xe5\x81\x1b\xb0\xb5\xc2\xe2\xee\x5d\xcb\x51\xf6\x7d\xd8\x92\xb5\x8d\x8e\x34\xb1\x88\xfb\xa1\xc4\x63\xb1\xb6\x53\x5f\x36\x68\xc3\x97\x31\x7d\x7f\x8e\xbb\xae\xc3\x56\x7f\xf3\x81\x93\xb2\x25\x33\xe7\xd5\xec\xc6\x2e\xb9\xf4\x2e\x4e\xd3\x71\x58\x51\xf2\x80\x14\xf1\xaa\x1b\x1d\xb7\x16\x84\xfc\x7d\xb0\x21\x74\x0e\xc8\x49\x37\xea\x1e\xa4\xb8\xc2\xd9\x61\x4a\x99\x32\xb8\x93\xff\x2b\x65\x27\x10\x8b\x71\x2d\xfb\x45\x75\xe1\xb7\xb9\x0e\xc6\x66\x7e\x66\x84\xed\x01\x40\xb3\x54\x0a\x56\x13\x0a\xa2\x13\x2c\xf4\xfb\x7a\xae\xbe\x95\x1d\xab\xa3\x02\x60\xd7\x30\xf8\xc9\x41\x04\x0d\x5a\xe4\x4f\x14\x1b\x3f\x94\xbb\x81\x92\x54\xc4\x54\x7a\xa2\xe0\xf5\xce\xf8\xdc\xa8\x48\x3a\x16\x97\x4d\x16\x5c\xd9\xbd\x22\xa1\x80\xb0\x88\xcf\x9a\x3e\x4a\x9e\x3a\x44\x56\x6b\x87\xf8\x6d\x7b\xdc\xe5\xe0\x74\xde\x58\xda\xaa\x8d\x3b\x43\x95\x25\x8f\x1e\x5a\xda\xf4\x58\xca\x6a\x56\xa7\x5e\x86\x6e\x42\xe7\x50\x9b\xfa\x1e\xbc\xbb\x2a\x7f\xfc\x02\x43\x72\x3f\x86\x88\xce\x47\x28\x46\xef\x75\x4c\x75\xbf\x47\x77\x85\xd5\x81\x8e\xbd\x50\x32\x5c\xc0\x6f\x3d\x10\x1d\x28\x97\x3f\x9b\x61\x5a\xbd\xd3\xbf\xdf\x6b\xfc\x32\x9e\xa6\x75\xdf\xd6\xa4\x73\x1c\x04\x23\x63\x61\x45\x1c\x92\x32\x12\x1e\xfa\x1e\x54\x21\xd4\x82\x33\x1d\x93\xd1\xd1\x2a\x8b\x3a\x73\x4e\xa7\x65\x17\x32\x97\x49\x7b\xf3\x5e\x31\x85\x1e\x9b\xed\xb3\xfa\x00\xf0\xd2\x6b\xee\x61\xd3\x86\x47\xab\x16\x6b\xe3\x51\x39\xed\xa8\x69\x9c\x7d\xea\x1a\x2a\x6a\x7d\x77\x76\x52\x2e\xbb\x77\x8d\x3b\xd1\xea\x14\xb8\xf7\x0e\xd1\xa2\xc7\x57\xdb\xa4\x52\xca\xd9\x60\x82\x2e\x97\xd3\x7f\x8c\x97\x39\x24\x34\xda\xc6\xc9\x5b\x6a\x5b\xfb\xf4\x4b\x33\x51\xaf\xfe\x89\x73\x3a\x44\xc9\x89\x52\xf2\x5c\x45\x64\xcd\x41\x85\xec\xd0\xc7\xf8\xe5\x6d\x58\x56\xdf\xac\x87\x63\x9e\xee\x6a\x66\xc0\xa2\x35\x3d\xbf\xdd\x17\x44\xf6\x0e\x2b\x3d\xd5\x6c\x96\xe1\x98\x01\x3b\x45\xfe\x77\x06\x82\x15\x21\x6a\x92\xc3\xfe\xbe\xa5\x88\x32\x34\x69\x35\x60\xc0\x88\xfd\xbe\xc2\xaa\xef\xbd\x0e\xc2\x20\x08\x0e\x54\x63\xd8\xcd\x27\xc7\x63\x24\x4b\x7b\x7a\x39\xbf\xca\x4b\xcb\xe3\x0f\xc6\x64\x34\x79\xf1\xe5\x1c\xf5\x07\x8b\x19\x1c\x85\xfd\x15\x80\xac\x83\x69\x23\xcf\x4a\x77\xef\x9b\xba\xae\xed\xc8\xb1\x35\x8d\x16\x42\xe0\xaf\x65\xd4\x60\x94\x5c\xf7\x19\x46\xb3\x22\xa6\xc6\xb2\xc3\xb5\x25\x85\xdf\xa6\xf2\x7c\x09\x05\x00\x15\x98\x97\x5a\x0c\xa5\xdd\xab\x77\x24\xfd\x02\x9d\xd7\xde\x4d\x03\x2e\x6a\xe9\xa1\xe4\x81\x0d\x23\xa4\x7e\xfc\x25\x2b\x18\x09\x13\x34\xb2\xb9\x0b\xd3\xa7\x0c\xc3\xd9\x42\x93\x67\x2e\xe1\xe6\xeb\x3a\xaf\x84\x8b\x0a\x75\x44\xb5\xc0\x7b\xcf\x2a\x52\x05\xf3\x9e\x01\xcc\x1d\xff\x1a\x5f\x7d\x13\x8f\xd0\xaa\x26\x2e\xc5\x39\x95\x14\xc8\x40\x49\xfa\xc5\x2b\x80\x2a\xa9\x50\x95\x3b\xa7\x35\x11\x46\xa1\xec\x72\xc2\xce\xdf\xb9\xfd\x5b\x07\x3e\xfa\xb7\xc6\x4c\x4e\x68\x74\xcf\x4f\x64\x3d\x08\x96\x54\x6d\x52\x2d\x8c\x12\x67\x80\x7e\x67\x7f\xde\xdc\xaf\xd3\x27\xdc\x99\x33\x49\x73\xd0\x82\x73\x62\x4c\x0b\xb7\x00\x3a\x34\x17\xef\x33\xc1\x28\x58\x86\x1a\x46\x22\x7d\xcf\xed\x20\xde\xd4\x31\x52\xa9\xf6\x57\xb5\x5d\x91\xac\x18\x55\x53\x63\x15\x18\x21\x8b\xef\xd7\x8c\xfe\xba\x2e\xab\x2b\x00\x8d\x77\xd3\x5a\x89\x96\x68\xb6\x0b\x1c\xe0\xbc\x67\x6a\xd0\x55\x20\x93\x8c\xc0\xd8\xb7\x0f\xc2\xa3\xd3\x27\x2d\x57\x0c\xd2\xd8\x6c\xc2\xfb\x1e\x9d\x88\x5c\x73\xad\xb2\xf1\x8f\xdb\x38\x50\x6c\x41\xba\xc3\x4b\x08\x37\xf9\x63\x2d\x3d\xd6\x8e\xc4\x10\x2d\xae\x5f\x80\xed\x41\x12\x06\x53\x82\x14\x75\x7d\xdd\x6c\xe9\x77\x33\xbd\xa5\xd2\x86\x10\xc6\xc8\x67\x4a\xef\xb6\xa1\xf7\x3d\x33\x02\x1f\x59\x38\xbb\x77\xa6\x7c\x3b\x06\x54\xe7\x17\x2f\x96\xe0\x9d\x53\xb5\x20\xeb\x41\x76\x38\x59\x9c\x53\x75\xc0\xf2\x84\xf5\xaa\x94\x19\xe3\x4b\x15\x60\x71\x7a\x3e\x64\xcc\x04\x75\xfe\x31\x5b\x8d\xd4\x11\x12\x22\xb7\x44\xcf\x3a\xed\xbd\x52\xa9\xf0\x7b\x99\x94\x01\x18\x45\x03\x37\x99\xdd\x9c\x12\x27\xd9\xad\x36\xc5\xa8\xb5\xf8\xd2\xe5\xc4\x1f\xa4\xd8\xc2\x06\x72\x65\x27\xba\x98\xec\x80\x66\x21\x55\x45\x9f\x12\x5c\xff\x7a\xf5\x58\x28\x4a\xb9\x1a\xcf\xf4\x6f\x61\xf6\x13\x92\x16\xc0\xe6\xbf\xb9\xc6\x31\x83\x6f\x31\x8c\xf3\x07\x05\x8e\xda\x2f\x7e\xc7\x43\xe0\x9a\x9a\xfb\x26\xbc\xf7\x61\x69\x6f\xbe\x64\x62\x02\x78\xbf\x3e\xba\x81\x09\x79\x93\x27\x8f\xc4\x15\x2e\x4f\xfb\x38\xbd\x36\x0a\x36\x18\x1a\xba\x01\x7c\x9d\x68\x21\x7e\x5a\xce\x57\x94\x5b\x03\xd3\x71\xd4\xd4\x71\x08\x97\x4c\xc2\xc0\xe0\x30\x8c\xb9\xba\x42\xa4\xbe\x3b\x9e\x5f\x91\xb7\xf4\xbb\x75\x02\x56\x74\x74\xf1\xa8\x02\x85\x90\xaf\xea\x7b\x79\x10\x6d\xf0\xf5\xac\x47\x9f\xb2\x83\x99\xde\xd9\xbb\xc8\x48\x1a\xc0\xa8\x9c\x9b\x26\xf3\x5c\x59\xf8\xa3\x57\x03\x66\x57\x50\x36\x68\x22\x0c\x17\x51\xfc\x13\xd9\x3a\x17\xf5\x6f\xae\xa7\x03\x2f\x13\xab\xcc\xb1\x88\xc1\x00\x30\xdb\x9d\xfc\x57\xad\xd1\xf6\x80\xd6\x84\xdf\x02\x2f\xd4\x11\x4c\x98\xdf\x9e\x49\xcd\xec\x6a\x1a\x47\x9c\xd7\xd6\xb7\x8e\xd9\xbb\xd7\x45\x01\x95\x15\x5c\x20\xd6\x96\xd4\xd0\xb9\x32\xe5\x32\x0c\x83\x07\x70\x10\x38\x11\xf3\x45\x60\x6a\xec\x85\x92\x14\x91\x0f\xdf\x85\x82\x59\x4c\x18\x3e\x79\x59\xbb\x89\x0e\x54\xd5\x6e\x6b\x5f\x4c\xa8\xf5\xda\x69\x87\xad\x5d\x91\xab\x90\x3f\x95\x32\x7b\x48\xe7\xab\x0b\x26\x31\xf0\x66\xae\x07\xec\x6e\x53\xed\xe8\x4a\x9a\x63\x24\x18\x9c\xe8\x89\x6b\x90\x3a\x3d\x6a\x50\xec\x28\x70\xc3\x12\x3e\xb2\x48\xee\x1e\x28\xb3\x26\x2f\xa6\xc3\x2b\xba\x68\x60\x02\x12\x2e\x39\x43\xd2\x18\xeb\xe8\xc3\x97\xd0\x1f\x78\xfe\xd4\xeb\x5d\xe5\xf6\x1a\xb8\xb5\x05\x21\x88\xc4\x28\x98\xbe\x01\x65\xb6\x1a\xcb\x1c\x29\x79\x9e\x65\x0e\x00\x2c\x9f\xdc\x13\x28\x2b\xaf\x79\x67\x81\x66\x0f\x63\x56\xfe\xa5\x2e\x6d\x78\xf1\xd4\xab\xf2\x91\xd5\x20\xbb\x85\xd2\x39\x50\xf1\x11\xf8\xd5\x25\x50\xc4\x7a\x3b\xad\xfd\x6b\x71\x6f\x0e\x9b\x6f\x0f\xdd\xd2\xf4\x72\x59\x93\x1a\xc8\xe8\xe5\x47\xb8\x05\xfc\xf5\x17\x2f\x48\xfc\xa6\xaf\x4e\x2b\x84\x7d\x44\xd6\x3e\xea\xe3\x3e\xaf\xef\x79\x6b\x7d\x0a\x54\x32\x28\xe2\x9a\x73\x9b\x09\x0d\x94\x57\x26\x09\xc5\x89\xc8\x32\x28\x23\x64\x71\x1d\x21\x59\x28\x33\x60\x3f\xd7\x68\xf6\x90\xff\x63\x46\x0b\x91\x9c\x0a\x6a\x1c\x0c\x2a\xbd\xec\xd3\xdf\x7d\x53\xc5\xfc\x0c\x7b\x14\x4a\x18\x4c\x55\xdb\x79\x1d\xbd\x64\x24\x32\x0a\x2c\x65\x9c\xce\xf2\x12\x3c\xaf\x48\x04\xc0\x3d\x10\x9d\xfc\x25\xa0\x8b\xb7\x75\x4d\xe5\xab\xbc\xd3\x55\xdf\xe4\xd9\xed\x74\xa2\xe4\x8e\x85\xdd\xdb\x35\x05\x01\xc0\x0b\xcf\xb5\xa5\xd8\xc7\x10\x6c\x31\x14\xac\x76\xc7\x34\x3c\x67\x09\xb7\xa0\x51\x72\xd3\xc7\xa0\x1f\xa2\xad\xef\x95\x18\x2e\x11\x0c\x49\x20\xe0\xb3\x1f\x88\x4f\x51\xf1\x4c\xdf\x94\xa4\xd8\x55\x7a\x59\x3f\xb5\xc4\x6c\xec\xaa\x34\x12\xbb\x34\x71\x28\x72\xdc\x30\xbc\x12\x96\x2b\x30\xe3\xec\x1c\x2d\x38\x7b\x26\x5d\xd1\x86\x2a\x03\xbe\x17\x51\x04\xf7\x73\xa8\x51\x97\xf5\x42\x4c\xbd\xfc\x58\x55\x11\x77\xbc\xfa\xfd\x10\x58\xfd\x57\x8f\x4e\x9c\xdb\x5c\xdf\xb7\x58\xf4\x3a\x4f\x96\x08\x39\xa2\x75\x2c\xb0\x54\xa8\x7a\x42\x63\x0e\x7d\xe6\x33\xae\xe8\x18\xfd\x0a\x5a\xc4\x5f\xd0\x2b\x20\xdd\x10\xe3\xb2\x46\xc6\xf8\xa5\xc4\xbe\x49\x9e\xfd\x04\x04\xe5\xf2\xbf\x6f\x81\x24\x4f\x39\xa1\xa2\x4f\x06\x2f\x69\x66\x38\x3d\xdc\x25\x7f\x4b\x8e\x2d\x08\xa0\x48\x2e\x67\x83\x80\x70\x58\x92\xe2\x44\xfd\x21\x3a\x3f\xfc\xd6\xf9\xec\x40\x88\xe6\x4a\x84\x20\xb7\xb4\xfe\xbe\x73\x4b\x60\xf2\x10\x03\x2d\x1c\xed\x88\x8e\xe3\x41\xae\xee\x0d\x00\xcc\x2c\x08\xc8\x59\xa6\x1e\xf6\x30\xe7\xeb\x3c\x7d\x8e\x6d\xb9\xb1\xc2\x37\x75\xc6\x96\xca\xca\x9a\x60\x4e\xe0\xba\x8f\x38\xfc\xad\x5e\x83\x85\x20\xd9\x6f\x2d\x49\x8c\x2b\x4d\x85\x84\x34\x13\x8b\xd8\x37\x47\xf5\x48\x0b\xde\x1f\x9c\x80\x6e\x30\x1c\xd9\x89\x68\x09\x55\x03\xc9\x58\xfc\xe6\xca\x7c\xb1\xdd\x9e\x55\xae\xae\xdf\xff\x8f\x83\x89\x81\x12\xe5\x4c\x83\x36\x03\xce\xbf\x7d\xed\x67\x74\x8b\x83\x3f\x6f\x82\x4d\x55\xde\x32\xdf\x73\x81\x17\xcf\x92\x5f\xd1\x83\x91\xe5\x62\x2f\x22\x78\x41\x6f\x60\xbe\xdf\xc7\xf6\xf8\x62\xe3\xdd\x2b\xf2\x9c\x92\x84\x41\xe0\x07\x45\x3b\xfa\x90\x2d\x24\x06\x22\x71\x86\x91\xb5\x5b\x80\xd0\xf8\x89\x66\x07\xd2\x5c\x0b\xff\x9a\x9d\xf1\xa2\xc9\x0b\xb6\xb7\xbc\xf2\x68\x01\x65\x86\xd4\x30\xbb\xce\xbd\xfb\xf6\xfb\x12\x8a\xa4\xdc\xba\x0f\x44\x27\x64\xf1\xbb\xb9\x4a\x0f\x94\xe0\xa7\x80\x79\x9b\x81\xe8\x17\xc3\x25\x5f\xaa\xb7\x05\xfc\x54\x45\xa9\xd3\x18\x4a\x8d\x50\x19\x86\xdf\xab\x77\xf9\x74\x8a\xd5\x20\x66\xc2\xb7\x8f\x43\xb8\x41\x36\x31\xa4\xd7\x15\x66\x31\xd5\xfe\x56\xc7\xd9\xb7\x51\x04\x72\x96\xad\x1f\xb6\xa9\x65\xc0\xfd\x92\x84\xcf\x66\x5d\x26\x80\x69\x73\x09\xab\xf7\x9c\x1d\xe9\x2d\xa4\xe4\x35\xb0\x45\x8d\xf1\xa3\x80\xca\x14\xb0\x4b\xbc\x60\xed\x47\xc4\xac\x87\x14\x53\xd2\x69\x5d\xd9\x4b\x9c\xcb\x39\x1d\x5e\x59\x38\x93\x98\xa9\x6f\xf8\xf9\xa4\x60\xfa\xe8\xf9\x35\x22\x1c\x5a\xc6\x0f\xbc\x0e\x1d\xea\x7b\x7c\x16\x0e\x27\x4b\xbb\x47\xdd\x49\xf2\x43\xbc\xf9\x83\x90\x8e\xdb\x0a\xed\x81\xa6\x08\x6e\xd4\xd9\x3a\xad\xba\xfb\xd6\x81\xde\xab\x88\xd7\xd8\xe4\xfc\xb7\xa4\xe1\x40\x57\xe4\x24\x42\x06\xd7\xce\xed\xd9\x77\xc2\x6b\x9c\xa4\xb3\xbf\xd5\x2a\xd5\xec\x4c\x5e\xd6\xf4\x41\x0f\x1e\x87\x12\x31\x99\xcf\x5e\x0f\x47\x9d\x22\xc3\xf2\x77\x9f\xfd\x0b\x42\xec\x2d\x4c\x81\x59\x45\xa8\xed\x70\xe4\x08\x37\x00\xac\xe8\xb6\x88\x67\x25\x2d\xac\xc9\x9b\x2b\x89\x6b\x87\x7b\xad\xac\x51\x10\xa9\x1b\xba\x99\x62\xe1\x0d\x40\x6b\xdb\xf9\xd4\xfe\xe6\xce\xe0\x47\x8a\xe8\xe4\x11\x65\x46\xe6\x73\x0b\xe8\x98\x8e\xcb\xfa\xc7\x45\xfb\x68\x65\xf4\x40\x55\xf3\xdb\x5f\x1a\x09\xfe\x1a\x4e\x16\x41\xed\xa5\x58\xb0\xba\x84\xf7\xbd\x5d\xba\x3c\x91\x6e\x26\x52\x61\x7f\x71\x08\x87\x38\xb0\xfd\xd7\x53\xd0\x9a\x36\x81\xf8\xb1\xfa\xc8\x3b\x8a\xb4\xee\xf0\x4e\x9a\x2f\xbe\x13\x49\x0b\x3a\xe1\x85\xce\xdd\xbb\x03\x95\x15\xe9\xa1\x80\xeb\x99\x0d\x42\xba\xec\xa5\x3a\xa9\x9e\x97\xcb\xd9\xea\x39\x71\x4d\x75\xd1\xe1\x6f\x91\x72\xbe\x6b\x44\xba\x56\x68\x67\x06\xa5\xf0\xb9\x19\x9d\xd1\x3c\x13\x6d\x74\x21\xb2\x9b\x56\xe2\xcf\x3c\xd0\x3f\xfd\xc9\x64\xb7\x3f\xb4\x16\xad\x11\xf2\x05\x4c\xb9\x1b\x5e\xeb\x62\x28\xa9\x75\x1e\x98\xc0\x4b\xb4\x42\x65\xad\xed\x03\xf2\x70\x9b\x9a\x30\x3d\x01\xfb\xe9\x94\x4a\xf7\xc3\xa6\x85\x4f\x11\x05\x1e\xbf\x58\x33\xf8\x51\xa0\xd1\x7f\x49\x13\x47\x22\x61\x20\x49\x1a\x62\xab\xd4\x21\x9d\xd2\x47\x5f\x63\x90\xd1\x41\x80\x05\x06\x02\xc1\x03\xa5\x52\x3f\x63\x3a\x91\xcb\xfa\x64\xb9\x77\xcf\xa1\x76\xbc\xaa\xed\xe8\x92\x74\x7c\x9b\xad\x2a\x35\x58\xc2\x35\x36\x11\x38\xf9\xf5\x6d\xf0\x7d\x8d\x5f\x5e\x09\x38\x3a\x6f\x9a\x8b\x14\x66\x84\x4a\xcf\x4d\x55\x02\xb3\xe1\x73\x9a\x8e\xc6\xf8\x61\xb9\xa6\x0c\x53\x07\xd3\xcb\x84\x57\xeb\x4a\x02\xb4\x53\x25\x44\x62\x10\xe5\x34\xf3\x11\x93\xd6\x43\x2d\x31\x42\x4a\xd2\x85\x13\xb0\x3d\x9f\x3a\x2e\xeb\xe8\x86\x81\xd0\x52\x7a\xc9\xcf\xd7\xe7\xdd\x52\xa2\x07\x2e\x3d\x7b\x62\xab\x28\x89\xc3\x04\x10\xdd\x39\xef\x68\xf5\x49\x96\x67\x66\x8c\x70\xb8\x72\x11\xc6\x03\xde\x20\x3a\x79\x91\xe6\x88\x5d\xbf\xb7\x56\xfd\x32\xf9\xcd\x5f\x20\x30\xc7\xee\x49\xdc\x39\x9c\x16\xd5\xd3\xc7\x4e\xe2\x33\x79\x9e\x01\x41\xbc\xb0\xd3\x90\x3b\x50\xc1\x41\xab\x7e\x6e\x4f\xab\x94\x0c\xe8\xc0\xbb\xb4\xa0\xdf\x19\x71\x4a\x32\xc6\x12\x5f\x13\x66\xfb\xc3\x7b\x5b\x0e\x87\x25\x45\xa3\x0b\xfa\x89\x4d\x8d\xab\x86\xec\x89\x00\x6b\xa2\x19\x65\x32\x90\xc1\x87\xa3\x0f\xf2\xf5\x9b\x9f\xd1\x2b\x86\x0d\x4c\x07\x6c\x12\x7a\x03\x0a\x9f\x6f\xff\x08\xae\x60\x1f\x84\x4e\x5b\x3f\x9f\x63\x6c\x81\xa3\x3e\xeb\x23\x69\x7f\x83\x3f\x4f\xc1\x80\xea\x67\x3d\xa1\x45\xcd\x19\x90\xea\x4c\xcd\x0d\x6d\x3c\x6a\xa5\xd2\xfa\x0d\x3f\x28\x30\x6e\xb0\x4d\xef\x37\xc2\xae\xf3\xa3\x96\xac\x9f\x89\xfc\x69\xd6\xac\x89\x2d\xe2\x43\xfd\xba\x23\xc7\x96\xfe\x69\x38\x9a\x3d\x0f\xf3\x22\x1e\x2c\x5c\xed\xde\x06\xda\x92\x54\x64\xf1\x7b\xaf\x8d\x94\x7d\x6b\x89\x99\x9d\x61\xad\x66\xaf\x4e\x64\x45\xb6\x19\x37\x21\xa7\x66\xab\x0b\x9d\xe9\x84\xce\xf6\x77\x5f\x54\x98\x97\x02\x7b\x56\x78\x65\x26\xaa\x5e\x09\x03\x27\xfb\xfa\xcb\x78\x72\x73\x00\x31\x8c\xea\x7e\xdd\x43\x81\x42\xfb\xdc\x02\xaf\x7c\xd0\x3c\xc9\x02\x03\x70\x8b\xdf\xf7\xfd\x7c\xb1\x9f\x41\x88\x91\x60\xf0\x31\xfe\x40\xf4\xde\xb5\x39\x5f\xa3\xdb\x40\x57\x27\x3a\x1b\xe1\x01\x5b\x6b\xfa\xee\x13\x4f\x4f\xe4\xbc\x47\xda\xc3\xa8\x66\xea\x2e\xab\x37\xc5\x53\xc5\x12\x33\xac\xb3\x0b\x8a\x23\x4a\x5e\xd2\x27\x5a\x78\x3e\xac\x2b\xd3\x6c\x86\x43\x93\x3d\xc4\xfd\x63\x7a\x85\x4c\x91\x63\xe6\x81\x9b\xaa\xf9\x43\xf5\x06\xa5\x11\x2a\xd1\x1a\xe1\x15\xce\xf6\x0f\x3f\xf9\x1a\x33\x19\xf8\xc9\x17\xd1\xc5\x1c\x4e\x49\xf6\xf0\xc3\x23\xb8\x02\xec\xeb\xa2\x1c\x1b\x3c\x51\x98\xba\x33\x14\x0f\x82\x24\xf4\xdf\x2e\x7f\xf4\x4a\x86\x69\xc8\xcc\x99\x8c\xe0\x15\xfa\xe2\x3f\x1c\x02\x23\x68\x50\x5c\x7f\x15\x2d\x14\x44\x32\x2b\xd3\xc3\x32\x1a\xf0\xb4\x19\xb9\xb9\x13\xb1\xc1\xdf\x82\x91\x4c\x77\x41\x54\xd1\xd7\x56\xdb\x32\x23\x7f\x2d\x21\x10\x32\x33\x4d\x14\x55\x35\xaa\xe7\x16\x86\x4a\x78\xce\xee\x09\x8f\xc9\xb1\x71\x2a\x35\x9d\x42\x8b\xb8\xd4\xdc\x58\x92\x84\xc1\xa1\xad\x15\x7f\xef\x1a\xf3\xf7\xc8\x11\x7c\xa4\x2d\x59\x21\xbf\x3f\x0e\x29\x23\x6b\xec\x45\x04\x66\xa8\xaf\x35\xf2\x35\xcb\x24\x6a\x35\xdf\x6b\x73\xaf\x6e\x98\xc0\x79\x7a\x30\xaa\x50\xd0\xc3\xb9\xa1\xd2\x8c\x65\xd9\x34\x18\xb6\x5e\xec\x0a\x25\x71\xc3\x70\x6e\xa7\x0a\x44\xc7\xdf\xc4\x16\xe9\xcc\xcf\x0f\x3b\x98\x06\xf3\x58\x62\xaf\x9f\x7d\x39\xfd\x97\x7c\xce\x0c\x09\xac\xda\xf8\x7b\xdf\x77\xe1\x16\x1c\xc1\x85\x3b\x93\x7f\x46\x53\xf7\xac\x3f\x91\x23\x7c\xf4\x4c\x2c\x0d\x77\xf1\xfc\x12\xea\x54\x4a\x6d\x06\x11\x3c\x09\x03\x0c\x63\xfc\x32\x58\xee\x02\xb5\x3c\x03\xd9\x81\x02\x63\xfa\xd6\xe3\xd0\x6c\xdf\x70\xe1\x40\xf7\x84\x67\xfe\x5e\x1a\xdc\x96\xd7\x89\x56\xd9\x17\x75\x0f\xba\xb2\x37\x47\x8d\xef\xf7\x60\x80\xf2\x17\x1b\x2f\x5e\xf4\xca\xe8\x65\xc7\xab\x78\xad\x6c\xbd\x14\x42\x17\x60\x7e\xee\x0f\x55\x5a\x06\x49\x35\xf6\x81\x59\x52\x4f\x3b\x1c\x21\x37\x04\x9d\x5f\xb5\x42\xf6\x15\x35\xed\x41\x4d\x95\x54\xa6\xff\x8b\x09\xd6\x7b\x37\x51\xea\x9c\xdd\x9d\x9a\x05\xc2\xaa\xeb\x93\x2f\x58\xa0\x5b\x8c\x30\x2b\x10\x49\x09\xc1\xac\x40\x8d\x5c\x19\x86\xf1\x43\x8d\x46\x22\x97\xa0\x07\x12\x7a\x61\x33\x35\x24\xd8\x70\xfd\xbd\x0d\x91\x34\xd0\xc4\x6f\x54\xa6\x39\x1d\xf1\xf5\x37\xb4\x44\x10\x55\x3d\x6c\x7c\x79\x06\x4b\x10\x96\x5b\x18\x21\x85\x35\xd5\xd8\xb9\xb0\x0d\x2d\x12\xa1\xdb\x7f\x7d\x32\x1e\xbd\x75\xe8\x49\xbd\x03\x46\xab\x45\x50\xec\xab\x3a\x05\xc0\x09\x93\xbc\x64\xf9\x6d\xe0\x8b\x21\x46\xbd\xf4\x28\xa9\x7d\xd4\xc1\x27\xfe\xc1\xff\x31\x19\xd9\xd1\x88\x97\x60\x58\x5b\x7e\xe7\x6f\x00\x9b\x7c\x9b\x88\xae\x0a\x16\x09\x73\x6f\x17\xb3\xbf\x9c\x9c\x3b\xfd\xad\xe8\x53\x6b\x89\x75\x6d\x67\x81\xbb\x6f\x42\xff\x89\xcb\x5e\xf6\x4e\xbc\x87\x64\x83\x95\x50\x7b\x81\x7f\xba\x46\x01\x99\xbe\x05\x40\x69\xf3\x83\xe3\x8b\xcc\x5d\x96\x6a\x4f\x98\xcd\xdc\x30\x8a\xd4\x3d\x83\x51\x0c\x8b\xed\xf4\x99\xd4\x95\x3b\x78\xae\xfa\x4c\xe5\x53\xf7\x77\x3d\x71\x6e\xeb\x2a\xc6\x4c\x6c\xaa\x5d\x59\x12\x3b\x12\x74\xac\x67\xa4\x4b\xfd\xe3\x35\x8b\x1c\x57\xc2\x2f\x20\x82\xc6\x8d\x6a\xd4\xe2\x23\xd8\xd1\x35\x24\xeb\x30\x38\xb2\x13\xad\xd0\x69\x87\x32\x03\x9a\x97\xe3\x75\x32\xb2\x18\xe9\x92\xb6\xa4\x2e\x50\x0b\x61\x74\xce\xd8\xc3\x07\x64\x70\x05\xe9\x64\xc0\x37\xc2\xa9\x48\x1c\x92\xb2\x1a\x55\xbc\x50\xb3\x46\xef\x9a\x9c\xfd\xb9\xc2\xf5\xd0\x60\xa0\x95\x17\x54\xd0\xfd\x13\x81\xe6\x41\x46\x09\xea\x31\x6f\x62\xb9\x55\xbd\xa6\xa3\xd2\xfc\x54\xa7\xaf\xfb\x14\x63\x90\x22\xe8\xd9\x3a\x96\xba\x1e\x37\x53\x11\x86\x7e\x8f\xad\x96\xdf\x4e\x0a\xef\x77\x03\x99\x35\xa5\xe1\xb0\x78\x0c\xa8\xc1\xe6\xe4\x49\x5d\x71\x6a\x1a\xfa\xca\x50\x20\x80\x7c\xf2\xf8\x83\xe8\x08\xc1\x56\xff\x79\x4b\x4a\x92\x39\x44\x16\x14\xce\xee\x7f\x6c\xc4\x60\xf2\x4f\x1f\xbd\xbc\x2b\xe7\xaf\xce\xbf\x79\xe8\x1e\x94\x98\x07\x30\x1a\xbf\x42\x9f\x14\x31\xbf\x55\xae\x5f\xca\x68\x21\x67\x1f\x63\x6b\x55\x70\x85\x46\x0d\xb5\x81\xe0\x7c\x5f\x71\xb7\x01\x20\x8f\xeb\xb4\xa2\xdc\xd5\xe0\x42\x76\xd2\xd2\xaa\x7f\x62\xe9\x4c\x7c\x40\x1e\xab\xd4\xcc\x81\x6d\x09\xce\xed\x5b\x12\x68\xca\x9a\x18\x77\x72\x46\x4e\xef\xbc\xaa\x4b\x7c\x46\xfd\x8f\x3a\xc0\x1e\x04\x60\x77\x83\x09\x3c\x08\x44\x47\xb8\x19\x19\x10\x30\xca\x54\x10\xb9\x3a\x2f\xff\xf7\xe1\xa2\x6b\xf9\xce\xdf\x70\x63\x7b\x69\x96\xb1\x7c\x84\x76\xbf\x68\xee\x57\x5c\x48\xe7\x99\xe8\x47\xe1\x66\x32\x72\x37\x1a\x5d\x48\x6c\x22\x73\xdc\x50\xa2\xd0\x23\x32\xd0\xd7\x06\xe2\x8d\x0b\x37\xbe\x21\xf1\xf9\xf0\x11\x4b\xd1\xdd\xb4\x65\x7f\x0b\xcc\x2e\xb7\x8b\x7f\x62\x91\x4a\x42\x7c\x23\x83\x40\xe7\x03\xb9\x85\x7b\x99\xf4\xed\x21\x0f\x82\xdd\xef\xd6\x1b\x46\x05\x24\xd3\x9a\x5c\x34\x45\xec\x48\x79\xa9\x99\xde\x17\xf9\xe4\x68\xf5\x2c\x08\x88\x8d\xe4\xf0\x7e\xd9\x9a\xd5\xe9\x8b\x67\x6b\x22\xd4\x06\x84\xcb\x4e\x91\x46\x71\x30\x3b\x60\xf7\x88\x5c\x57\x9f\x3e\x9b\x89\xc4\xdb\x17\xd9\xac\x57\x97\x9e\xa9\x43\x2d\x40\xba\xcc\x95\xdd\x56\x2f\x9d\xab\xb5\x28\x1f\x9e\xc0\xce\xae\xd7\x6a\x83\x88\xc2\xc2\xf6\xd2\x38\xd1\xad\x6e\x48\xdb\x30\x46\x2f\x0b\x50\xe3\xce\x19\xbb\xc2\x04\xef\x4d\x27\xbd\xea\xd5\xc0\xd0\x3d\x1f\x19\xca\xb9\x75\xfb\x45\x4b\x74\x24\x2f\x40\x52\x56\x7c\x8b\x29\x60\x82\x2e\xe4\x0f\x87\x42\x8b\x13\x7f\xd2\x3a\x3a\x15\x8f\x83\x3f\x81\x67\xe9\x1f\x22\xb5\x32\x7b\xc7\xf6\x05\xd7\x48\x62\x86\xe3\x5a\x56\xd6\x98\xf0\x86\x1b\x6d\x03\x35\xda\x61\xd6\x90\xce\x86\xa2\x64\x4c\x49\xa2\x6b\xe5\xae\xfc\xe9\xcf\x03\x8e\x96\x42\x5e\x9a\x95\x4a\x52\x13\x07\x06\x38\xc2\x8f\xed\xff\x58\xfb\x36\x80\x64\x5c\x16\x84\xf4\x13\x6c\xad\xda\x67\x86\x4a\x10\xbc\xf3\x84\x28\x07\x94\xa4\xef\xa6\x14\x6c\x91\x8d\x03\xc7\xea\x88\xd9\xb3\xd9\xfa\x3c\x4e\x34\x6b\x91\xfc\xba\xf9\xec\xb1\xe0\x34\x00\x27\xf5\x9f\xce\x66\xc2\xa5\xe3\x95\x26\xbb\x6e\x44\xc2\x9f\x31\x7e\x65\xa5\x87\xe7\x78\xfc\x00\x77\x5c\xb6\x95\x04\x4e\x39\xd2\xa2\x4a\xb8\x22\x68\x2d\xc1\x59\xf7\x47\xa5\x2d\x7e\x80\x2f\xd1\x13\xfe\x08\x9d\xf3\x6a\x35\xc3\x03\x3f\xed\xb4\xba\x7f\xef\xfe\xe9\xf2\xf4\x83\x24\x1c\xce\xc5\x51\x58\x4d\xb9\x3a\x98\x14\xa9\x37\xeb\xb0\xb7\xa0\x2b\x7d\xb4\x2e\x02\x6d\x13\x33\xd4\x60\x24\x75\x10\x39\x1d\x33\x92\x06\xfa\x05\x6e\xc0\xa5\xea\x13\xd3\xa3\xde\xba\xad\x47\x8e\x9d\xbc\x05\x7d\x56\x1b\x37\x2f\xbd\x36\xe7\x20\x57\x58\xa5\xb8\xd0\xb4\x60\x43\x29\x31\x27\x84\x18\x93\x31\x09\x18\xda\x09\x87\x99\xc8\x28\x32\x67\xc0\x0b\x75\x11\x9d\x13\x62\xc3\xff\xb7\x6e\xb8\x39\x0b\x9a\xcc\x70\x10\x04\x01\xc0\xa3\x9c\x05\x4e\x04\x80\x0b\x07\x65\x85\x1b\x0c\x8b\xd6\x84\x92\x13\x0f\x11\xa5\x29\xc8\x48\x8c\x0e\x57\x82\x54\xa9\x75\xd9\x2c\x23\x93\x6a\x0d\x5e\xa8\x82\xdd\xd9\x80\x9b\x5e\x0d\xf3\xbe\x4d\xf1\x4a\xff\x97\xd8\x12\xc5\x85\x76\xc5\x9c\xbc\x19\x36\x3c\xd5\xfd\x28\xc6\x94\xbd\x74\x8f\x40\x10\x80\x3a\x5c\x75\xfd\x49\x01\xb5\xe9\xab\x0e\xe0\xf1\x89\x65\x51\x9e\x71\x5a\xd6\x6c\xbc\x2d\x92\xd6\x89\xca\x52\xb9\x79\x1f\xac\xf4\xf9\x38\x54\x1f\x5e\xbc\x6a\x96\x9a\x07\xf9\xba\xac\x7c\x52\x84\xf6\x65\xfd\xb6\x17\x17\xd9\x6f\x46\x0f\x84\x44\x5b\x9d\x9e\x3e\x9a\xec\xce\x0d\xcc\x1d\x0a\xac\xc4\x96\x28\x7f\x23\xb8\xd1\x80\x86\xe3\x9e\xe4\xb9\xb5\x0f\x76\xc9\xc0\x8d\x28\x2f\x4c\xf1\x32\x98\x1f\xe7\x86\xde\xb8\x68\x23\x00\x56\xeb\xb2\xa6\xbd\xa3\xf1\x0c\xa6\xfe\x6a\x31\x0e\x44\xd7\x1f\xe6\x01\xb4\x81\xdc\xf0\x7c\xd8\xb9\xb1\x62\xea\x16\x7c\x3f\xbe\x31\x04\x49\x0c\xcb\x0d\x0c\x24\x85\x1d\x51\x62\x79\xd1\x2b\xe9\xd9\x53\xa5\x60\x52\x8d\x84\xee\x06\x04\x3f\x9c\x82\xe9\x08\x6c\x15\x4c\xe3\x88\xc8\xa4\x17\x94\x37\x67\x87\x11\xeb\x4e\x77\x65\x27\xc3\xbe\x6b\x47\x2a\x4b\xf8\xcd\x95\x82\x8b\x3f\x78\xa5\xb3\x03\x3c\xad\xa8\x50\x33\xd1\x13\xd7\x4f\x6a\xf0\xee\xe2\x5a\x25\xde\x0f\x8d\x7b\xfb\x2a\xf1\xfe\x78\xe8\x5b\x20\xc0\xa2\x2b\x4b\x73\x05\x5a\x5e\x81\xe4\x36\xc5\x21\x09\x13\x7d\xf4\xf8\xe7\x25\x78\x46\x34\x5f\x4a\x69\x72\xc5\x08\xc9\x74\x5e\xd5\xba\x26\x19\x06\xec\xfa\x72\xdb\x9f\xa9\xc7\xcf\xac\x25\x7a\xcd\xb8\xc2\x69\xc5\xf5\x91\x04\xe9\x26\x08\x83\x51\x71\x2e\x96\xfe\x57\x27\x4d\x56\x97\xc4\x0f\xd4\xa3\x43\x99\x41\xb4\xd5\xe9\x0d\x34\x3e\xbf\x12\xef\x5c\xae\xfe\xa9\xf0\x91\x85\xcd\x9c\x0a\x02\x87\x7c\xf9\xa9\x89\x5e\x73\xe3\x73\x3e\xd9\xa3\xc0\x74\x04\x7d\xd2\xf2\xae\x33\x28\x55\x09\x4a\x46\x13\xf7\x8f\x57\xc3\x61\xc9\xfc\xca\x64\x29\xf1\x82\xf7\x41\xa9\x50\x30\xba\x50\x5c\x45\x8f\x28\x1b\xa8\xb2\xb2\x67\x19\xf1\xba\xc6\x02\x7b\x93\x4e\x2b\xe0\x07\x9e\x49\xf0\xe5\x17\x88\x57\x0b\x04\x14\xbf\xed\xf4\xe7\xc0\x9a\xbe\xa3\x1e\x84\x9b\x8f\xe9\x4e\x0d\xc6\xef\x38\xa8\xc5\xfe\xe3\xc5\xce\x04\x7f\x55\x9f\x9c\x99\x89\x4c\xdc\x63\x70\xda\x40\x38\x68\xcb\xc7\x26\x7f\x65\x08\x7d\xfd\xf2\x44\x0a\x06\xdc\xfd\xf3\xe6\xc3\x96\xc2\xc6\xbd\xe9\xae\xe0\xb0\xe0\xe5\xff\x79\xc6\x44\xfb\x8c\x94\x69\x6e\x57\xea\x2c\x0d\x6d\xe3\x51\x5f\x4c\x36\xad\x3e\x18\x20\xab\x05\x81\x72\x52\xe0\x76\x06\xb0\xb2\x36\x87\x2a\x40\xe4\xb8\x0e\x1f\xf3\xae\xcd\xc7\x65\x4d\x6b\x45\x9f\x2e\xbf\x20\xac\xf9\x34\x9d\xee\xc2\x02\x78\x48\xc2\x60\x47\x33\x50\xbc\x25\xf4\x43\x3e\x76\x4c\x2a\xa1\xb9\xc9\xc3\x94\xbf\xbd\x5f\xff\xf4\x2b\xe1\xf4\x6f\x01\xbf\x47\xe2\x55\x21\xfb\xc9\xfb\x28\x68\xe6\x5c\x31\x7c\x82\xe2\x00\x78\xbd\x38\x05\xa7\x47\x28\x0e\xae\xdf\xfe\x2c\x2f\x25\xee\x5d\xe6\xd9\xcb\x1a\xb7\x80\x20\xdc\x1e\x38\xc5\x08\x71\xae\x0c\x02\xa9\xa3\xa2\xab\x05\x67\x22\xf1\x29\x2a\x6e\x1b\x7e\x74\xcf\x01\xd5\x46\xcc\xb6\xcd\xd6\x13\xbe\xa6\xe3\x62\xc9\x1e\x76\x27\x78\x65\x1e\x1f\x06\xdb\xca\xfb\x6b\xef\xbd\xd2\x42\x62\x71\xb0\x49\xc5\x21\x22\xb1\x6c\xa2\xce\x0c\x84\xa0\x8a\xea\x40\xd2\x33\x8e\x06\xbc\x8c\x40\x52\xfe\x68\x50\x06\xe2\x3f\x7e\x3a\xed\x1b\x66\x29\xb0\x52\x3a\x02\x01\x1c\x43\xfa\x12\x1b\x5e\xcc\xea\x40\x18\x20\x39\x20\x16\x66\xac\xe3\xd9\xcb\x60\x48\x42\x94\x3c\x1a\xf3\xcb\x0b\x9d\x9f\x4b\xaf\x4f\x84\xa7\xa8\x54\xdf\x48\x8f\xf0\x05\x10\x0e\x8c\x80\xa0\x5f\xd4\x37\x39\xc2\x75\x2e\xb1\x52\xa4\x88\x6a\x80\x83\x50\x2a\x8e\xf1\x0b\x5b\x20\x0e\x92\x9f\x60\xca\xfa\xf4\xd3\xa5\x18\x7d\x73\xc6\xbd\x77\x4d\x5c\x91\x4c\x18\xef\xdb\xbc\xa2\x6d\x33\x46\x54\xb6\xd8\xb9\x0f\x91\x63\xcb\x7c\xd9\x3c\x2f\x15\x2e\x24\xbe\xb9\xcc\xae\x46\xf3\xeb\xd5\x29\x4d\xfa\xfa\xdd\x94\x51\xf2\x20\x45\xef\xa6\x01\xd9\x12\x8b\x97\xa5\x45\x4b\x5e\xb3\xc1\xf2\x02\x10\xad\x01\x5d\x64\x76\xb1\xd1\x29\xb7\xd9\xe5\xe6\x49\x04\x92\xc0\x18\x46\x73\xdd\x49\xb9\xa9\xb8\xf8\xb0\x3f\x0f\x25\x36\x7e\xf0\x73\x96\x4a\x75\xa9\x0b\x25\x17\xfc\xa5\x9f\x64\x27\xc2\xa2\x3a\x5e\xbc\x23\x8d\x63\x47\xe2\x9f\xf9\xab\xf8\xb4\x48\x29\x24\x9d\xe1\x15\xed\xa9\xb6\x4f\x85\xe8\x4f\xf5\x4b\x6b\xb8\xaa\xd3\x8a\x42\x9b\xe4\x51\x27\xf4\x03\xf1\x89\x2d\x1a\xf8\x1e\xa1\x9b\xdf\x31\xef\xbd\x49\x82\xe9\xe2\xa0\x24\x06\xca\x30\x96\xc0\x0c\xf9\x9e\x2c\x0f\xa6\xb3\x62\x47\xa1\x2c\x49\xfa\xf1\x2f\xae\xa2\x9b\x32\xe5\xb2\xbe\xf1\xe2\x74\xe7\x49\xca\xe3\x01\xaa\xfb\x02\x21\xab\xbf\x7c\xd3\x1e\xe5\xaf\x4f\x2a\x5e\xa3\xf6\x7b\x8c\xd3\xa7\xcd\x27\x64\xf2\xce\xfe\x4a\xd0\x67\xa3\x3a\xe5\x9b\x09\xbd\xe3\x9c\x38\x56\x2b\x73\x4a\xb8\xa4\x7f\xa7\xbc\xf5\xb1\x46\x7b\xa9\xeb\x42\x35\x5c\xef\xf6\x76\x26\xdc\xfe\x90\xa6\x4d\xdc\x97\x34\x1f\xf1\x93\xbc\xd4\xc1\x25\x0a\x73\x5a\xd4\x72\xd8\x33\xf5\x00\x5a\x05\xa3\xca\x2b\xca\x90\xd1\xb7\x66\xa5\xfa\xb1\xc3\xc4\x2f\xd0\xae\x49\xcf\xaf\x38\x3c\x73\xca\x20\x44\x2c\xaf\xea\x27\x4f\x4c\x3d\x82\x50\x28\xbf\x26\xc3\xdd\x51\x08\x1f\x59\xd9\x0e\x3e\x03\xa0\x77\x9c\x5b\xf2\xc4\x95\xd6\xcd\x30\x0c\xc7\x30\xae\xef\xb0\x01\x37\x6f\x2a\x64\x30\x44\xf7\xd0\xd4\xe1\x65\x10\x70\x71\x40\x65\xb9\xe8\xac\xf0\x2a\xeb\x99\x23\xef\x6b\x4c\x05\xb4\x21\x58\x9d\x10\x52\x99\xb3\xf9\xdc\x70\x11\x05\x8c\x5b\x58\x7c\x43\xbd\xb4\xbe\x0d\x2f\xf1\x55\xd9\x19\x49\xb4\x73\xf1\xcd\x8d\x91\xf9\x59\xdf\x83\x6f\xe8\x55\x75\xc1\x60\x88\xaa\x09\xef\xec\xb5\xb5\x35\x50\x65\x42\x63\x57\x5c\xd7\xf4\x0d\x5c\x4b\x44\x2e\xdd\x3b\x4f\xa8\x3d\x13\x78\x21\x7c\xa5\xc7\xf9\x29\x53\x65\xa6\x66\xa9\xa7\x70\xde\x94\x21\x58\x6c\xca\x28\x53\xe2\x5a\xb0\xde\x3a\x3f\xaf\xee\x96\x5a\x2b\x91\x18\x23\x40\x96\x27\x51\xb0\xf5\x9d\x40\xdd\xd1\x0e\x2d\x5e\xfd\xc8\x56\xc0\x10\x16\x49\x4c\x67\xec\x0e\xd4\xb8\x78\x1b\x32\xea\x3a\x7c\xd5\xd2\x6c\xb7\x95\x3c\x97\x16\xc9\xd6\xfc\x96\x84\x07\x0a\x70\x2f\x72\x73\xe0\x1a\xb5\x42\x2d\x73\xc1\x4d\x50\xe3\xcb\xff\xe6\x59\xd7\x4c\x83\xf0\xec\x5e\x8b\x53\x19\xcc\x80\x8f\x5a\x3a\x68\x76\x80\x00\x80\x95\x73\xfb\x0a\x8d\xe3\xbb\x20\x6a\x74\x31\xc2\xd7\x84\xb9\x6a\x2d\x23\x30\xe5\xa0\xa4\xa1\xdc\x3d\xf7\xad\x1b\x47\xae\xf3\x4a\x4f\x84\xc3\x52\xfd\xf2\x76\x7e\xf5\xce\x1a\x3e\xfc\x6f\xd0\xae\x80\x4f\xda\x58\x6c\xc0\xe3\x78\x47\x47\x77\xad\x4d\x75\x05\xda\x02\x51\xb1\x53\x4b\x72\xde\x06\x02\x7f\x8f\xcb\x23\x40\x82\x06\x88\x5b\x4f\x4b\xb7\x73\x10\x83\xaa\x94\xf8\x08\x7d\x05\xd1\xe8\x0c\x84\xd5\x60\x83\x01\x7f\x0b\x08\x45\x61\x68\x3b\xeb\xc8\x64\x82\x37\x23\xda\x90\xfe\x5b\x06\x83\x82\x94\x37\x9f\x54\x24\x3b\xee\x3b\x21\x39\x92\xaf\xfc\x70\x0e\x44\xc5\xcd\xcf\xa9\x7d\x0a\x6a\xf4\xa6\x5f\x66\x9a\x5d\x5d\xdb\x3a\x6e\x7a\xfd\x6b\xbd\x7e\x26\xe1\x3b\xdf\xdc\x7f\xd9\xcd\xfd\x7d\x93\xb4\xad\x69\xaf\xfc\xf0\x35\xf1\x61\xba\x92\xd1\x53\xd0\xe3\x3f\x0e\x54\xb5\xf5\xbd\x58\x66\x30\x3c\x1f\xa7\xc0\x92\x22\x52\x40\x73\x66\x24\xd2\xf0\x77\x09\xfd\xad\xc8\xfa\x9c\xef\x02\xb8\xf0\xa3\x6f\xa1\x2a\xe2\xc4\x9f\xdc\xa2\xd8\xde\xef\x04\xd4\x60\x02\xf1\x89\xe8\x33\xaa\x2a\xfd\x47\x35\x73\x52\x70\x7d\xee\x6b\x55\x1f\xa1\x72\x87\xea\x9f\xfe\xfd\xac\xab\x35\xe5\xef\x28\x69\x9a\x1a\x84\x7d\x5a\x7c\xa2\x52\xa6\x8e\x19\xbb\xdb\x64\x2e\xae\x9f\x55\xa3\xa3\x9f\x17\x43\x61\x4c\x24\x8b\x13\x34\xbf\x1c\x09\x0e\xea\x02\x29\x53\xe6\x91\x23\xd8\xac\x5f\x69\xcd\xdc\xb7\x94\xc7\x89\x64\x71\xae\xcb\xf1\x2a\xd1\x10\xe5\x29\x3d\x96\xeb\xd1\xaf\x7f\xec\x82\x06\x01\xb2\x00\xb5\xe1\x7b\xc0\x0d\xdf\xc3\xd1\xb7\xb8\x5f\x1b\xef\x2a\x46\x65\xc6\xf8\x09\x8f\xee\xaa\x1e\x06\x99\x50\x21\x17\xc5\x85\xaa\x5c\x3d\xff\x78\x84\x17\xd9\xab\xe2\xd7\x6a\x52\x4a\xf4\x20\x1b\xbf\x32\xbb\xda\x8e\x57\x2b\xfc\x0a\xb5\x9a\x78\xb0\xce\xe5\x93\xec\x4d\x89\x2f\xa2\x00\x0e\x83\x64\x8b\x0f\x61\xee\x73\x47\x88\x8a\x13\x45\x7b\x5f\x70\x25\xef\x51\xae\x0a\x5a\x71\x1a\x80\xe0\xcc\x58\xcf\x75\xdd\xb7\x95\xe5\xe6\x55\xdd\x37\xf5\xdd\x37\x01\xbb\x54\x95\xdb\x09\x54\xa1\x65\xf9\xd9\x53\x17\x4c\xb2\x9a\x30\x5d\xdf\xed\x9c\xa2\x8d\x57\xb5\xed\xd9\x20\x70\x19\x1b\x92\x47\x4b\x1a\x37\xff\x2f\xfd\x0f\x4b\x1a\xb6\x79\x30\xe0\xa5\x0f\xe3\x4c\x9c\x87\xf4\x08\x9b\x93\x99\xe2\x16\x6f\x23\x9e\x31\x49\x9d\x67\x03\x3b\x56\x51\xac\xed\x07\x62\x10\xfc\xab\xa5\x66\x7e\xec\x10\x1e\x1b\xb1\x16\xd3\x86\x93\x5e\x42\x91\xf1\x19\x41\xf2\x60\xe7\x87\xe6\x42\x74\x65\x76\xff\xae\xb5\x7e\xb1\x49\x55\x1a\xb2\x5c\x7d\x3a\xa8\x60\x8f\x97\x61\xc1\xd5\xf5\x8f\xb3\xd7\xb1\xd4\x42\xf8\xdc\x66\xe2\x76\x29\xd5\x2c\x7a\x5c\x2d\xc1\x0a\x26\xf2\x6b\x5e\xdb\x79\x5d\xe7\x55\x60\x9a\xbe\xb4\xe8\x69\xa2\x25\x20\x86\xff\x6e\x2f\xdd\xb7\x74\x44\xdb\x8d\x43\x12\xe1\x2e\xb3\x23\x24\x92\x3f\x20\xee\xe2\x3e\xad\x3f\x5a\x53\x07\x3b\x49\x7c\xd7\xe2\x7c\x69\x7f\x63\x11\x4f\x02\x1c\xc3\x45\x86\x4f\xd8\x68\x34\xcc\xd2\xfb\xdd\x5a\xd0\x66\xc7\xba\x78\xe4\x04\x43\x32\xab\xff\x1d\x2f\xbe\xa8\xae\x70\xb4\xd0\xf6\xaf\x47\x16\x06\x54\xa6\x6c\xad\x85\x52\x9e\x91\x5d\x92\x3c\x9a\x40\xf9\xa8\xd1\x4a\x06\xe8\xed\x77\xd0\xc4\xe5\xd1\x25\x58\xce\x13\xb3\xfe\x39\x5d\x3c\xbe\xfc\xf5\xd0\x8a\x6b\xf4\x82\xa9\x18\xc6\xc4\xb4\x1c\x57\xd5\x23\xb9\xaa\xd0\x95\xae\x5e\x3c\xc7\x60\xc3\x13\x52\x43\x3a\x42\xb4\xfa\x00\x09\x9d\xf7\x5e\xd8\x47\x83\xa0\x73\xde\x5b\x1e\xdd\xeb\x89\xf0\xb5\xa7\x18\xa1\x8f\x0b\xc9\x92\xc0\x0f\x04\x47\x4b\x06\x3a\x9f\xd4\x57\x2a\x3e\x2f\x16\x67\xab\x11\x2e\xb0\x4b\x68\x00\x25\xa7\x8f\x37\x87\xc5\xed\xc0\x07\xda\xc9\xc9\x44\xa3\x59\x97\x79\xfb\xd6\xf1\xe2\xec\x78\x3a\x87\x9f\x31\x39\x33\x1e\xb8\xa9\xd2\xd2\xea\x4a\x7a\xf4\xed\x26\x51\xf1\xa2\x22\x11\x0e\x23\xa7\x6f\x03\xca\x5a\xa3\x2a\xfb\xad\x5a\xdb\xfe\xba\x1f\x62\x94\x8f\xf6\x90\xf4\x4b\x0e\x60\x74\x48\xdf\xf7\xd1\x01\xa4\x36\x90\xfe\xe5\xff\x22\x49\x27\xcb\x6b\x34\xa7\x6e\x59\xd3\x71\x7d\xc0\xc6\xf0\x42\x78\xcf\xc4\xc7\xf7\xe9\x32\x59\xfd\x41\xf1\x5e\xf0\x42\xe5\xee\xa1\x13\xa3\x06\xa2\x0f\xc8\x33\xc9\xf9\x73\x1b\x57\x86\x18\x35\x56\x94\x4f\x1d\x87\x22\x8b\xc0\x3a\x7d\x5a\x20\x83\xf9\xa6\x9c\xc8\xca\xfc\x37\xce\xf8\x47\x9a\x36\x96\x1b\x33\x4a\xa9\xcb\x90\x5f\x11\x8e\x59\x96\x0c\x92\x70\x8b\xae\x07\x18\xbf\x06\xef\xe6\x7a\x78\x8d\x17\x3f\x7c\x84\xa3\x1b\xa2\xe3\x07\xc2\xa3\x7b\xfe\x67\x7e\x78\x8b\xa4\x05\xd0\xc5\xbb\x3f\x7a\xca\x59\x87\x30\x7c\x18\x5e\x0e\xdb\xdd\x2b\x54\x16\xfa\xd6\x20\xd1\x9f\x51\x46\xea\x45\xc9\x32\x01\xdb\xd3\x4b\xea\xf6\x0e\x8f\x13\xa2\x45\x11\x56\x1b\x0d\x55\xb4\xa4\x67\xf3\x9c\x76\x5b\x5b\x87\x16\xc2\x5e\x0e\xeb\x9e\x8f\x1a\x48\xd7\x42\x79\xe4\xbc\xfe\x60\x78\x3d\x56\x6e\x5a\x8b\xad\xc4\x61\x65\x63\x88\x4f\x49\xc8\xd6\x98\xa9\xad\xc8\x5a\x5c\x13\x91\x48\xce\x79\x6b\x80\x16\x6e\x30\xf4\xbb\xa2\x68\x23\xe9\x16\x0d\xec\x07\xcf\xc9\x35\x3e\x46\xf2\x40\x08\x2c\xae\x00\xb0\xaa\xe6\x22\x3d\xa3\x49\xc7\x2d\xf1\x09\x15\x82\x6b\xe6\x52\x6e\x89\x73\x3b\xb1\x10\xef\x33\xfd\xf5\x67\x19\x25\x00\x4b\x6c\x5f\x3d\xbc\x21\x83\xa7\x40\xf9\x49\x26\xdf\xda\xe0\x15\x41\xc9\xe2\x65\x55\x37\xa9\xd0\x10\x2d\x17\xc2\xed\x97\x34\x0e\xc5\x26\x0d\x07\x3c\xd4\x9e\x52\x5d\xd3\x7e\x21\xb5\x57\x64\xef\xf4\xab\xe2\x84\xf2\xa7\x38\xfe\xb3\x2c\xc6\x97\x7f\x46\xb8\x15\x27\x59\x76\xbd\x62\x9c\xe1\x27\x05\xc2\x11\xa2\xe5\xc9\x0b\x98\x2a\xb6\x08\x4e\x73\xb5\x1d\x1b\xe5\x15\x5d\x28\x61\x84\x9f\x97\xec\x08\xad\x00\x14\x50\xd1\x27\x9f\x47\x22\x14\x88\x8e\xef\x03\xdd\xd4\xe1\xb4\xbb\x23\x48\x76\x85\xf5\xaa\xd6\x58\x4c\x67\xfa\x9c\x9b\x92\x1f\x4d\x76\x67\x79\x90\x7d\xe7\xd6\x1b\x3a\xba\x3b\x64\x58\x25\x13\x28\x79\xa0\x52\xd7\x1c\x57\x32\x92\xd2\x5b\x9f\xfb\xaa\xd6\x3f\x4b\xbe\xce\x7b\xf6\x35\xd1\x04\x61\x65\x38\x42\xf8\x88\x81\x19\xca\x49\x1a\x95\x46\x1c\x98\x0e\x44\xcb\x68\xd4\x18\x3f\xbd\xbb\xd9\xf4\xb9\x36\xc6\xc5\xce\x3f\x8d\x87\x68\xcd\x9d\x87\xa0\xe7\xea\x5d\xdb\x60\x84\x2e\xbb\x7e\xb7\xb8\x7e\x71\xdd\x78\x8a\xa5\xa7\xdd\xc7\xaa\x42\x67\x8d\xaa\xfe\xf8\x3e\x73\x0e\x74\x45\x77\x52\xc8\x70\x40\xf6\x0e\xb8\x09\xe8\x07\xdd\x90\x6f\x36\x78\x63\x44\x63\xdb\x32\x3d\xd8\xb0\x22\x9e\xc4\xe2\xba\x89\x58\x43\xf6\x48\xf8\x44\x3c\x05\x22\xa8\x36\xa6\x65\xd5\x74\xd5\x7d\x53\xde\x8d\x26\x68\x50\x2f\x0d\xfd\xb3\xa1\xc4\x2a\x03\x68\x32\x28\x90\xb9\xb2\x64\x47\x09\xda\x9c\xbc\x2d\xda\x51\x06\x4b\x40\xb5\xc5\x56\x75\xda\x30\x4c\x97\x0d\xe9\x2b\xbb\xa9\x23\x02\xd8\xbe\x0f\x0b\x05\xc1\x36\xa3\x8d\x7b\x05\x49\x07\xf9\x92\x83\x9f\xe6\xbb\xf6\x93\xa9\xe6\xa2\x17\x41\x93\x75\x08\x41\xf0\x82\xff\x04\xf9\xb0\x8b\x72\x89\xf2\x40\x74\xb2\xe2\x44\x33\x1c\xc7\xc0\x55\xc3\x5f\xfa\x0f\xe8\xfb\x92\x4b\x4c\xaf\x27\xb2\x63\x24\xd9\xfe\x12\x88\x3c\x13\x9f\xe0\xc5\xab\xaa\xb9\x93\xd1\x0d\xac\xf9\x16\xf7\x5f\xc3\xfc\x26\xeb\x42\xa8\xca\xfa\x52\xe4\xa9\x7f\xca\x45\xf2\xb0\x0e\xa4\x54\x43\xf5\xa6\xaf\x65\x21\x7a\x64\x07\x42\x9a\x68\x70\xa7\xc7\x79\xca\x1e\x32\x05\x80\x37\xe1\x24\xa0\xb7\xd8\x69\x74\x31\x8c\x0b\x89\x19\xf7\x02\xff\xbd\x99\x41\x00\xd7\x9c\x19\x23\xbe\xc3\xcb\x4b\x98\x09\xf1\xc7\xa7\x32\xff\x60\x44\x71\xd9\x33\xef\x56\x8e\x0f\xb1\xac\x9d\xcb\x1a\x82\x5e\x2e\x6e\xd0\xb1\xd1\xd4\x8e\xf0\x98\x5f\x7e\x0c\xef\x19\x41\x21\x72\x5c\xd9\x06\x13\xa4\x33\x14\x56\x87\x65\xd3\x31\xa1\x1b\xbc\xb3\x5f\x9d\xfd\x68\xf6\x23\x2a\x12\xe8\x7a\x44\x83\xc9\x8e\xc2\x8b\x93\x6a\x10\x14\x0f\xb9\x7a\x56\x65\xfa\x01\xf3\x2b\xba\xa6\xb7\xd4\x27\x7d\xbb\xc5\x31\x59\x53\xf2\x18\xd1\x00\x3e\x64\x1c\x81\x5d\x61\x41\xe5\x07\x04\x23\xba\x04\xed\x38\x0f\xd7\x21\xff\x52\xfd\x97\x8f\xeb\x7a\x6e\xa8\x5d\xe2\x55\x45\x52\x63\x8e\x4d\x66\x74\x9d\x93\x29\x74\xce\xca\xbd\x29\xe7\xa3\x06\x5a\x0d\x54\x13\xfd\x65\x8d\x5f\x53\x7c\x2d\x05\x4e\x00\x46\x65\x04\x50\xed\x5f\x2a\x66\xf6\x47\x68\x38\x60\xd5\x91\xb3\x67\xd2\xe4\x2d\x70\x7e\x6e\xf1\x5d\x90\xe5\xd4\x8e\x5d\xcc\xaa\x0a\x6e\x85\x1b\x4c\xcb\x8e\x9f\x08\x4e\x07\x13\x60\xd5\xa2\x19\x93\xbb\xe6\xe4\x2d\x07\x4a\xf6\xbc\x05\xcc\x95\x42\x5f\x03\x80\x02\x20\x47\x06\x07\x42\x26\x1e\x4e\x11\xc5\x89\xba\x21\x1c\xf2\x49\xf0\xe1\x34\x70\x79\x37\x38\x9b\xbf\xe6\x06\xba\x99\xb0\x16\x5d\xd1\xec\x19\xa9\xac\xa3\xbc\xa2\x38\x8e\xd9\x86\x32\x14\x4f\xf8\xf2\x3d\x0a\x32\x4f\xb8\x75\x7b\x17\xb8\x76\x5e\x55\xec\x4d\x75\xf9\x83\x47\xde\xf2\x69\x81\x23\xf8\xe5\x42\x2f\x84\xb4\x50\x40\x13\xbb\xc3\x9c\x31\x63\x04\x9d\x4b\x80\xb2\xe6\x49\x8f\x70\x4b\x76\x24\x50\xfe\xd0\x34\x4c\x60\xc5\xf5\xaf\x1f\xa2\xfe\x11\x96\x3b\x35\x9e\xaf\xe4\x11\xfb\x1c\x38\x36\x81\x3e\x90\x9f\x51\xf2\xa7\xa1\xd9\x70\xe9\xc4\xba\xae\xf3\xd0\x3e\x89\x37\xd4\xa5\x74\x1c\x2b\x92\x65\xc7\x00\x7c\x29\x6b\xeb\x9c\xf9\xde\xbd\xc5\xd1\x27\x56\xcd\x06\x55\x5d\x3c\x66\x0f\x47\x56\x82\x97\x29\x7b\x29\x72\x74\x36\x75\x00\x68\x76\xd4\x61\x69\xf9\xa5\xac\x03\xf2\x53\xea\x70\x76\xc8\x2c\x54\x48\x5f\x76\x0d\xa8\x36\xb4\xc8\x42\x04\xac\xaa\x28\x6a\xa4\xc1\x14\xd0\x62\x0b\x00\x5c\x9e\xf2\x92\x6d\xb4\x4f\xb0\x86\xcb\x0c\x26\x36\x71\x78\x82\xd5\xb1\x65\xd7\xa3\x8f\x0a\x35\xe9\x56\x43\x98\xd7\x46\xd2\x2b\xb9\x20\x93\xb7\x9c\xf5\x0b\xae\xef\xb7\x50\xe0\x8e\x0a\x72\x95\xdb\xc2\xa1\xa5\xb0\x86\x85\x52\x4c\x43\xf6\xf6\xe6\x93\xb2\x13\x99\x35\x5b\xc3\x24\xa6\xac\x74\x00\x64\x01\x23\x70\xec\x81\xf2\xd8\xe0\x4e\xf3\x5e\x1d\x86\xd2\x14\xad\xb9\xf7\x4e\x9a\xdf\x41\x96\xdf\x91\x49\xf0\x91\x7a\x4a\x0b\x08\xc1\x99\x8d\x24\x37\x7e\xd5\x43\x55\xf2\xb3\xc1\xa4\xfc\xe4\xa1\xf2\x84\x3f\x62\x52\x42\xa3\xcb\x11\x80\x01\x8f\x84\xa2\x16\x7e\xcc\x75\x99\x7f\x20\xa2\x54\x18\xe0\x07\x85\xe3\xb2\x66\x07\xb2\x15\x1b\x9a\x26\x86\x74\xe7\x72\x7b\xe5\x9a\xe6\xce\x6b\x79\x91\x3b\x12\x5f\xd3\x47\x98\xdd\x7c\x80\x5e\xea\x1c\xb3\x08\x57\x91\x92\x9f\x21\x35\xd5\x94\x61\xe4\x1a\x4e\x81\x7f\xd6\x8c\xff\xe6\x9c\x43\x84\x7c\x9c\x50\x00\xfb\x8c\x39\xc4\x81\xae\xf9\x81\x17\x08\x2a\xa6\xa4\x50\xd9\x90\xcf\x88\xde\xa5\xc8\xb1\xe5\xf7\xa1\xfd\x57\x41\xaa\x7f\x7f\xfe\x31\x8d\xa0\x33\x35\xef\x9d\x67\x68\xcd\x83\x68\xcb\x85\x3a\xcb\xd5\x61\xa3\x0c\x0e\xe8\x38\xac\xe9\xc3\x9e\x6c\xda\xbb\x3f\x99\x1e\x33\x63\xe4\x23\x3b\x73\x2c\xf1\x19\xde\x27\x2d\xcc\x3f\x6f\xf2\x9c\x26\xfb\x98\x25\x54\xc9\x53\x04\x52\xdf\xac\x4c\xc4\x09\x67\x07\x1e\x9f\x51\xab\x7d\xba\x9f\x13\x90\xe1\xf9\x96\xcc\x63\x3d\x2f\x1c\x34\xf0\x65\x17\x00\x5a\x33\x54\x9a\x90\xb9\xfe\x6b\x0e\x99\x29\x3b\xbc\x23\xf0\x98\x22\x46\x97\x22\xc3\x80\x64\xce\xe2\xc3\x0a\xcf\xf0\x3e\x6c\x24\xfd\x1f\x8f\x29\x6e\x25\x3f\x54\x08\xb6\xe8\xeb\x52\xa5\x6f\x3c\xf0\x8e\x5e\xa9\x31\xbe\x78\x16\x17\x08\x09\xec\x01\x12\xa9\x00\xdf\xca\x37\x2d\x47\xf0\x47\xbf\xa4\xda\x22\x5d\x8d\x03\x84\x07\x0c\x97\xf6\xe5\x59\xc9\xd5\xf3\xae\x70\xe5\x42\x96\x8b\x41\x03\x14\x18\xa1\xf6\x17\x39\x0f\x51\x5a\x32\x55\x11\x40\x7e\xdc\x32\x1c\xd6\x23\x4c\xd0\x25\x81\x74\xa0\x97\x18\x74\x6e\x62\x54\x25\x5d\x30\xe7\x27\xfb\xc6\x41\x3a\x9f\xa3\xc6\x67\x43\x86\xd4\xb9\x8f\xce\x33\xb5\xf6\xb3\xc0\x6f\x4a\x10\xa7\x4f\xfe\x08\x45\x0a\xc7\x25\x2d\xc1\xf2\x8a\xce\x68\x12\x1f\xdb\x17\x38\x54\xe0\xfd\xc7\xe0\x1f\x0c\x50\x38\xe4\xe8\x12\xe0\xb5\x82\xdb\xf4\x1e\xea\x26\x37\xb8\x60\x8f\x0f\x97\x55\xdc\x8b\x11\xfd\x6f\x1e\x6d\x21\xb2\x6f\x21\xbc\x7b\x02\x6c\xb4\x45\x71\x45\x03\x5e\xa6\xfd\xa2\xc6\xa3\xd2\xc6\xbd\xd2\xda\xbd\xc2\x95\xc3\x9c\xf2\x7e\x91\xfe\x69\xca\xe4\xb1\xcf\x6f\x98\x97\x86\xd2\x15\x53\x16\x5f\x9a\xc0\x8b\x96\xd1\x37\xba\x44\xa3\x82\x0e\x53\x23\xf1\xca\xd8\xa9\xd1\xad\xdf\x2c\x13\xf4\x41\xd5\x9d\x57\xa3\x8b\xff\x1d\x80\x96\x19\xee\xdc\xfb\x38\x2f\xff\xf9\x6e\x07\x2a\x64\x1b\x36\x3c\xef\xdf\x56\x0d\xb8\x48\x45\x7b\xb5\x1a\xb0\x2a\x87\x29\xf8\xd7\xa2\xea\xef\x53\x44\x2d\x15\xfd\xa9\x54\x8b\x9d\x4a\xdb\xfb\x75\x2f\x2c\x51\x8f\x97\xd9\x88\x4b\xde\x94\x98\x95\xff\x97\x23\xe2\x68\xe1\x0f\xd2\x44\x43\x52\x7c\x95\x2a\x24\x09\xdb\x97\xd9\x3f\x4e\x60\x7a\x74\x6c\xa9\xd1\x53\xb6\xd9\x42\xe3\x7d\xa7\xc1\xde\x42\xa1\x14\x2f\x87\x20\x1c\x56\x15\x59\x9f\x2e\x0e\xc5\x87\x34\x6d\xa5\xec\x49\xd8\x9c\x2e\xe2\x33\x29\xb2\xa0\x35\x64\xce\xad\x89\xfa\x23\x99\x64\x4f\x25\x2f\x38\x37\x15\xf8\xd7\x80\x77\xa1\x81\xba\x4e\x52\xa9\x59\x53\x69\xd9\xd3\xc8\x44\xb3\x82\x26\xdc\x74\x5c\x56\xa2\x24\xc3\x99\x6c\xb0\x43\x68\x7c\x5f\x34\x9d\x1e\x07\x1c\xea\xdb\x27\x62\x50\xc2\x70\x58\x56\x67\x1a\xc4\x24\xf4\x27\x3e\x05\xbe\xf0\x2a\xae\xf5\x08\x2f\xd0\x1a\xca\xc8\x2e\xad\x4e\x66\x48\xf9\xcd\x87\x05\x27\x30\x90\xe2\x2d\x30\xcc\xdc\x22\x24\xb8\x71\x99\xe2\xc5\x12\xee\x59\x54\x55\x33\x55\x55\xe4\x98\x52\x0a\xa4\x9b\x68\x83\xb1\x46\xb4\x58\x34\x07\x2a\x6b\xfe\x1a\x4d\xe0\xc3\xd8\xea\x48\xc0\xfb\xee\xd2\x83\xff\x8c\x17\x5d\x45\x44\xd9\x74\xff\x0e\x05\x48\x91\x7f\xdf\x73\x3a\x36\x3d\x57\xaf\x6d\xe2\xfb\xb0\xe1\xf9\x8f\xff\x0b\x18\x9b\xb3\x16\xe0\x9a\x55\xd6\xa6\x71\xba\x02\x57\x58\xf8\xad\x03\x29\xbd\xd0\xa5\xbd\xd4\xa5\x11\xd4\xbc\xbd\xf5\xa2\x3a\x52\xd0\x0a\x8b\xf0\x5b\xc4\x5f\x20\x36\x76\x28\xfe\xab\xfa\xb2\x27\xfc\xcf\xf1\x84\xc1\x90\x00\x95\x07\xea\x86\xc5\xf7\x79\xb7\x10\x00\xb8\x80\x24\x48\x78\x99\x26\x1f\x63\x1b\x6d\x44\x61\x48\x5c\xe3\xd9\x55\xc5\x32\x6a\xe3\x23\x46\xc7\xdc\x06\xef\x5c\x8b\x31\x6e\xf1\x2e\xc9\x7c\x33\x23\xd0\xa1\xd9\x90\xc1\x47\xfe\xdf\xe3\x37\xfe\x6f\x48\xc2\x40\x74\x72\xff\xf2\x32\xe4\xec\x83\x22\x0c\x7e\x8d\xea\x9e\x1b\x41\x09\x81\xe8\x24\xf4\x67\x9d\xc5\xdf\xf8\x2a\xdf\x35\xdd\x98\x6f\x10\x61\xb3\x19\xe8\x7b\xe1\x95\x45\xb4\x4a\x7c\x14\x91\x88\x3a\x56\xd7\xe7\xdf\x6c\x91\x34\x50\x59\x53\x5c\xe8\x23\xf7\x22\xa5\x39\xba\x76\x88\x9f\x03\x8b\x98\x2f\xe5\x49\xcf\xaf\x26\x69\x9f\x86\x93\x70\xd9\x52\x20\xea\x83\x17\xb2\x39\x2e\xd1\x27\xbe\x73\x5f\xdc\x22\xae\xd8\x11\x7e\x8b\x24\x23\x08\xe7\xb8\x3f\x97\xfc\xd8\xea\xcf\xa3\x4e\x35\x66\x40\x45\x14\x26\xe5\x3d\x68\xe2\x0b\xe0\x65\xa9\x4b\xeb\xa5\x5d\xc9\x8e\x24\x56\xa8\xf5\xae\xbc\x96\x08\x86\x6e\x06\x9d\xee\x61\x1f\x59\x7b\xa9\x57\xe5\xe8\xbc\x67\x70\x9f\x03\x21\x69\x62\x1c\x1b\xf2\x87\x93\xe4\xf2\x5e\xe4\x86\xea\x52\x03\x4e\x39\xdf\x66\x07\x82\x8b\xe7\x4e\x54\x41\xb0\xa4\xc4\x19\x13\xfd\x2f\xa5\x95\x85\x7a\x66\xa2\xd0\xa7\x78\x56\xe7\x19\xd4\xb0\x52\xdc\x82\x15\x96\x64\x93\xd9\xfe\xed\xc1\x49\xf2\xf3\x8f\x25\x76\xe0\x25\x10\x3e\x07\xa2\x9a\x52\xb9\x7a\x85\x8f\x2c\xda\x43\x2c\x5c\x56\xe2\x24\x49\x8a\x07\x94\xfa\xb1\x32\x7f\x96\x49\x9a\xa0\xc7\x0f\xfe\x82\x2e\xfb\xf4\xef\x7b\x4a\xfe\x0e\x0f\xd9\x6c\x7c\xad\x69\x20\x2a\x41\xc8\xe2\xec\x92\xe8\x87\xc9\x9a\x18\x5b\x86\x57\x30\xef\x55\x22\x02\xec\xb9\xef\xf5\x07\x9f\x43\xe9\x50\xc9\x99\x05\xdc\x09\x66\x07\xfd\x50\xc4\xaa\x96\xd7\x87\x61\xeb\x46\x8c\x4b\x9f\x58\x2c\x95\x13\x85\x89\x53\xb4\x0c\x74\x0b\xe9\x86\x5e\xc0\x03\xcd\xf0\xe2\x4d\x6e\xdd\xdb\xbf\xfe\xbc\x14\x51\x25\x00\x89\xe3\x88\x33\xa1\x24\x21\xa2\x91\x0a\x14\xd7\xe6\xa7\xc5\x56\xfa\xcb\x47\x9a\x26\x98\x9e\x50\xe9\xdd\x8b\xcd\x27\xbc\x0c\xc9\x89\xa6\xe1\x3e\x51\x58\x23\x6a\x5e\x5a\xbf\xbb\x9a\x81\xa8\x27\xd3\x57\xa7\x0e\xba\x10\x6c\x64\x79\x67\xcf\x91\x4d\xaa\x48\x30\x24\x3e\x6d\x79\xb4\x6d\x6e\x05\xd3\xfc\x9c\xcb\xee\x85\xba\xe7\x6a\xca\xb0\x9f\x27\x76\x5d\xd5\xd9\x25\xfa\xbe\x64\xb9\x7b\xa6\x0b\x40\x7f\x15\x9b\xbb\x9a\xfe\xe5\x88\x35\x7d\x9f\xf3\xec\x12\x4b\x38\x15\x57\x4f\xc9\x99\x8c\x38\x40\xeb\xce\xc3\xff\x70\x30\x2f\x49\x3d\xb4\x5f\x73\x87\x71\x27\x0e\x45\x4c\x92\x7d\x5a\x50\x93\xd3\x0a\x1e\x74\x41\x93\x99\xc0\x98\xf0\xbe\x6b\x82\x77\xce\x13\x25\x9b\x86\xc0\xc8\xa3\x9c\x16\x62\xef\x56\x79\xa4\xf3\xe7\xc5\x9b\x23\xce\x86\x0f\xbf\x09\x44\xae\x80\x89\x54\xdf\x1d\x31\x9a\x18\xf9\x4b\x2a\x09\x5a\x40\xa1\x21\xff\x69\xbc\x2f\x5a\xbe\xc0\xac\xe2\x48\x26\xbb\x54\x46\xdd\x8d\x91\x5a\x1b\x23\x8a\x53\x6c\x4c\x79\xf1\x10\x3d\x28\x8e\x9f\xca\x81\x9a\xf5\x7a\xa1\x07\x27\xb5\xb9\xee\xb4\xa4\xe1\xa4\xbc\xba\xa9\x73\xe0\x39\x45\x9f\x32\xff\x98\x4e\x62\xc7\x6c\x22\xef\x57\x12\x7a\x9a\x4b\x7e\x2e\x74\x6c\x30\x4e\x16\x87\x88\xe4\x6a\x5b\x10\xf4\xb9\xc0\x80\x59\x3b\x86\xf9\x82\x27\xea\x3d\xbf\xf9\x57\xf3\x08\x59\xce\x2a\x26\xdf\x27\xef\x57\xe2\x92\x86\xdb\x85\x90\xec\x34\x60\x94\xc9\x71\x91\xb0\x3f\x67\xf5\x35\xf7\x9f\xe8\x76\x4d\xa5\xd3\xe5\x15\x3a\x98\x74\x46\x03\xfe\x27\x16\x69\x6b\x56\xe0\x86\x51\xa1\x61\x7a\x35\xce\x7b\x1f\x86\x28\x4d\x0c\x03\x2a\xdd\x4e\x1b\x50\x53\x40\x9f\x18\xd2\xf0\x02\x46\xe7\xa0\x3b\x69\xae\xdc\xb0\xe8\x9f\x0f\x5b\x91\xb6\xab\xfa\x8e\x28\xbf\xd1\x68\x7d\xd9\x80\x57\x4b\xc3\xf9\x04\xd3\x55\x94\x60\x3b\x7c\x07\xc2\x8c\x9f\x07\x2a\x94\xd7\xe9\xf6\x42\xb0\xbc\x24\xf9\x44\xc7\xae\xba\xc7\x00\xe8\xa6\x0d\x73\xcf\x2b\xdc\xaa\xbc\xb2\x0c\xee\x8e\x7d\x53\xa8\xdb\x5b\x64\xe3\x20\xc9\x91\xf4\x8a\x34\x58\xfd\xb1\x4c\x2d\x1d\x0e\x3a\xa6\x89\x39\x92\x09\x80\xdd\xb1\x00\x02\xb4\xeb\x0f\xea\x3b\x6a\xe0\xc1\x83\x8f\xcd\xbf\x36\x70\xad\xeb\xda\x2e\x4b\xa7\x67\x47\x76\x9b\xd6\xb3\xf4\x44\x75\x8c\x24\xcf\xb6\x0e\x3a\xaf\x25\x92\x15\x67\xc9\x31\x6d\xac\x23\xd4\x99\x38\xbb\x54\xf9\xc7\x98\xb4\xf1\x28\x4d\xb4\xe6\xf4\xb1\x64\x53\x14\xd5\x93\xe2\xa4\x5d\x10\xe1\xa5\x15\x2e\xb9\xf3\xa6\x12\x74\x71\xfd\x59\x17\x6d\xa8\xf2\xc6\xe6\xf3\x0c\x26\xe8\x6f\x79\x41\xa5\x75\x76\x4f\xd6\xb6\x76\xa7\x2d\x07\x09\xc2\x1b\x2c\xad\xd5\x9a\x29\xd4\xda\xc8\xf2\xd5\x27\x69\x84\x4a\x94\xed\xb1\x45\x50\xd1\xd7\x75\x5c\x1e\xa7\x81\xdd\x7d\xdb\x3f\x2e\xee\x35\x63\x9f\xa7\xed\xfe\x49\x24\x0d\xc0\x03\x0a\x65\x38\x6d\x48\x57\x05\x52\x1e\xe3\x96\x47\x96\xde\x0e\xb7\xcf\xb1\xc4\x76\x54\x19\xf2\xe0\x6a\x48\xb1\x7f\x71\x70\x01\x77\x8f\xd2\x95\xeb\x4a\xec\x60\x65\x35\xcc\x97\xc9\x31\x65\x8c\x54\x42\x00\x25\x13\x16\x26\x42\x5f\xd9\x0c\x4c\xb6\x03\x58\x52\xe7\x15\x3b\xb2\x35\x24\x0a\x63\x5f\xfe\xe9\xc4\x24\x50\x4a\xd1\x90\xa0\x31\x88\x8f\x1e\x58\x96\x6c\x44\xc4\x9f\xbf\xfe\x7e\x9d\xbe\xb3\x32\xff\x87\x1a\xb0\xee\xd9\x82\x18\x48\xc3\x9f\x56\xcf\x6f\xa2\xd3\xc5\x11\x9b\x85\x2e\x80\xdf\x4d\x40\x45\x14\x90\x25\xf8\xde\x0c\x31\x31\x63\xf4\x3e\x68\x67\xde\x31\x6c\x77\xd5\xac\xc6\x12\x85\x46\x67\xf0\x97\xec\x87\x24\x70\xf6\xf7\xad\x67\x95\xb9\x8b\x6a\x85\x96\xfc\x8c\x54\xb7\x9e\x7f\xec\xe2\xc2\x85\xf7\x23\x2c\xde\x99\x71\xbc\x8c\x86\x8e\xf5\x6e\x79\xb2\x26\xd8\x9c\x3a\x32\x98\xc7\x40\x6b\x4c\xb5\xda\x2d\x1d\x03\xd6\xce\xbd\x15\x65\x48\xaa\x8d\x00\xae\xdd\x24\x15\xea\x4e\x5d\xb5\xfa\xa0\x6a\xeb\x5d\xd8\xd0\x52\x74\xd1\x91\x5c\x60\x07\x74\x86\x32\x7d\xda\xb2\xc5\x58\x95\xe5\x8a\x4b\xe2\x62\x8f\x48\xad\xbb\x15\xde\x33\xdf\x30\x65\x44\x0d\xba\x1d\xd4\x15\x24\x39\xab\xd1\x89\xa1\x9c\xbe\x32\x38\x46\xc8\x3b\xa0\x54\xee\x5a\x3e\x09\xb8\x26\x17\xcb\xa2\x6c\x41\x8e\xc8\xff\x98\x2f\xb0\x33\x5a\x56\x27\x3c\xa1\x2c\x98\xd4\xee\x9d\x8c\x62\x28\x35\x7c\xcd\xd0\x17\x04\x39\xae\xb8\x3a\xd1\xd2\xb8\x17\xbc\x10\x35\x7c\xf8\x94\x1a\xce\xa9\xea\x32\xd1\x00\x2e\x1c\xc1\x50\x1d\x41\x6d\xf2\x23\xda\x86\xf6\xcb\x56\x54\x0f\x00\x81\x87\x54\xd3\x72\x07\xc1\x6a\x78\x34\xdb\x26\x09\x8f\x2c\xe0\x89\x6f\x2a\x37\x47\xe7\x8c\x63\x4c\x52\x58\x9d\xab\x73\x16\x44\x8d\xae\x6e\x5f\xb1\x44\x0f\x1c\x42\x3e\x9d\x05\x68\xee\xd1\xbb\x9a\xe0\x13\x39\x23\x9d\xd3\x89\x03\x2f\x34\x31\x91\xd9\xa5\x4e\xc1\x46\x88\x3c\x4e\x1d\xa6\x8b\xb2\xaa\x5f\x43\xea\xba\x07\xc1\xb6\x76\x18\x9c\x30\x1b\xbf\x02\x00\xac\x28\x58\x84\xd1\x43\x77\x82\x4e\x22\x2c\x9d\x3f\x11\x97\x19\xca\x8c\x39\x36\x54\x0e\x4f\x3c\xbe\x75\xdc\xc9\x12\xb5\x50\x61\xbd\x55\xf1\x08\x31\x97\x8b\x7b\xac\x30\x97\x5b\xb1\xcc\x1c\x29\x79\x66\xec\x1b\x2b\x3a\xfd\x33\xd2\x31\x79\x62\xb7\x16\xfb\xb1\x5e\x35\xa7\xe1\xd0\xb8\xfc\xcf\x77\x80\x60\xad\x85\xed\x58\xfd\xe1\xdf\x3b\xa9\x78\x64\x6a\x66\xf4\x94\x51\x91\x4d\x77\x5d\x56\xb5\xc1\xca\xa9\x17\x1b\xdb\xb1\x46\xbe\x75\xa6\x86\xc2\xa5\x67\x8f\x63\xbe\x24\x2a\x22\x9c\x57\xbd\xcb\x8b\xdb\xd0\xdc\xf7\xf2\x31\x0d\x78\x8f\x31\x2b\xbf\x99\x2b\xcc\x29\x07\xca\x8f\xb4\xd1\x5c\xe7\xc1\xf2\x23\xa0\xc2\x8a\x50\x32\x98\x68\xe2\xc6\x77\x78\x02\x01\xcd\x7c\xef\x3d\x8d\x68\xaf\x92\x60\x08\x40\x2c\x52\xfa\x8b\x69\x24\x35\x04\x4e\xf4\x45\x5e\x5c\xe2\x90\x63\x14\xaa\x25\xd2\x50\x6c\xaf\xa9\xdd\x92\x15\x8e\x97\x9f\x99\xad\xc4\xb9\xc7\xa3\x32\xd6\x53\x60\x6a\x1f\x90\xd1\xc6\x98\x7d\xd7\x2d\x93\x7a\xbd\xfc\x82\x3f\x8f\xe2\xf8\x7b\x48\xd2\xeb\x87\xdc\x8c\xe2\xe4\x5d\x91\x06\xe5\x11\x66\x30\x0c\x7c\x99\x2e\x55\x8b\x95\x9c\x69\x2c\x72\xfb\x75\x70\xdd\x5e\x67\xcc\x9d\xfd\x33\x06\x1a\x88\x68\xc2\xe4\xe1\xf4\x8a\x66\x33\x2c\x2d\x26\xc4\xd9\x2e\xe7\x7b\x18\xf0\x4b\xa5\x28\xd9\xdf\xd4\xad\x0c\x5c\xf1\xb1\xc2\x5c\x39\x00\xdc\x47\xa0\x08\xaf\x91\xdc\xd0\x19\xd6\x05\xc5\xa8\xea\xeb\x40\xfd\xf2\xc3\xa5\xa4\x0f\x44\xcf\x87\xcf\x3b\x35\xa5\x8a\xbd\xe9\x38\x1c\xd6\x56\x9e\xc0\x55\x8c\x24\xbf\xa3\x00\x9c\xc2\xc4\x03\x21\x83\x0e\xcc\xc3\x75\x25\x4e\x74\x1f\x4f\xd1\xbd\x00\x11\xbe\xc7\x6f\x3b\x10\xe2\xbd\x18\xc1\xb9\x2e\x62\x6e\xb2\x79\x8d\x11\x9e\x01\x93\xb6\x19\x41\xe4\x54\xee\x3c\x8c\x86\x42\xf6\x16\x58\x11\x15\x93\xc6\x31\xda\x5b\x28\x5a\x47\xd1\x91\x17\x3e\x79\x2e\x97\x24\x99\x48\x0c\x31\x84\xe7\xda\x88\x38\xe0\xc4\x2f\x2b\x4e\xd8\x7c\x0c\xec\x9b\xc0\x3c\xe4\x1f\xb6\xba\x64\xf8\x0b\xd1\x1b\xd3\x19\x0d\x07\x2f\x84\xc0\x8a\x19\x64\x6a\x8d\xbb\xcf\xbd\xc4\x75\x4f\xb6\x7b\x7a\x8d\xc0\x79\xa0\xba\xf0\xc7\xce\x99\x04\x50\xe4\x2e\x83\x07\x30\x45\xd8\xd4\xf6\xc3\xb8\xac\x69\x75\x91\x80\x48\x19\xdc\x02\x11\x15\xbc\xb2\x2c\x03\xae\x15\x25\x31\x56\x41\x7b\xa9\x45\x0e\x64\xc9\xf3\xd8\x96\x26\x9d\x43\xdd\x09\x90\x14\xb2\xab\x97\x66\xa7\x90\x7d\x12\x16\x53\xf1\x7d\x35\x6e\xbe\x78\xfe\xe7\xa1\x44\x2e\x03\x1f\xe8\x2c\xc9\xdd\x63\x6c\xd1\x5b\x20\xda\xf3\x03\xc9\x69\x80\x03\xce\xa3\x40\x9c\x17\xb1\xc2\xcd\x6b\x52\x88\x83\x47\xe8\x2d\x3d\xa6\x62\xef\xde\xb5\x56\xe0\x20\x80\x13\x82\x2e\x5b\xc1\xa4\x54\x4a\xe2\x9b\x4d\x78\xcf\xf3\xdd\x35\xee\x3e\x5a\xe9\x56\xd6\x61\x83\x09\x4c\xe5\xea\x7f\x05\xbf\xec\x2f\x17\xf9\xe6\x46\xf9\x1c\x5a\x88\x3f\xaf\xb9\xf4\x6f\x51\x5e\x8a\xcf\xb0\x56\xe4\x6e\x6a\xb0\x6f\xda\xdb\xfc\x7a\xef\xc3\x6f\x12\x21\x83\xcc\x80\xdc\x4e\x1e\xb6\x04\x71\x20\x3a\x8f\x3e\xa7\xdf\xc3\x3f\x3f\xf1\x7b\x66\x2a\xc3\x7e\xf3\x91\x9a\x1e\x17\xcb\xd5\xca\xae\x0f\x46\x32\xf8\x90\x21\xd5\x94\x3f\x95\xe9\xe1\x3d\x11\xb8\xb5\x3f\x8f\x92\x13\x8f\x4e\x6f\xc8\x60\x53\x1f\xbc\x86\x39\x06\x38\x52\x6e\x3d\x92\x2d\x5f\x43\xd6\xdf\xdf\x67\x57\x11\x18\xed\x43\xb8\x4d\x1b\x4c\xe8\x57\x45\x89\x8e\xed\x3f\x33\x96\x35\xe8\x46\xa9\x6d\xec\x29\x3d\xdc\x02\x43\x74\x97\xda\xd7\xfc\xa0\x58\x88\xec\xef\xa1\x2d\xb5\x0d\xbf\x1f\x0f\x1c\xec\x82\xc8\xfc\x42\x8d\x11\xc9\x3f\x18\xa4\xe3\x65\x44\x06\x3a\x6c\xf0\x9e\x04\x58\x63\x4b\x4c\x80\x93\x65\xc6\x48\x3a\xf0\xa0\x3f\xbe\x0f\xfb\xc5\x07\x9e\x96\x84\x2d\xb1\x06\xd2\x9e\x74\xc7\xb2\x48\xe0\x96\x04\x5a\x6a\x2b\x36\x7b\x36\xbe\xe0\x03\x15\xbc\x3f\xf1\x07\x88\x2e\xc3\x97\xfc\xcd\x0b\xaa\xea\xa8\x82\xc5\x5e\x73\x4b\xb7\xe2\x4c\x72\xa4\xa0\x9f\xf9\x09\x63\xc9\x44\x42\x35\xfe\x1f\x7b\xdf\xad\x73\x31\x8f\x6d\xd7\xdf\xc7\x70\x67\xa8\x50\x4e\x30\x5c\x48\xe7\x28\xe7\x1c\x3a\xe5\x9c\xb3\x00\xbf\xbb\x71\xbe\x01\x8c\x7f\xdc\xdd\x7e\x08\xa8\x10\x41\x70\x8b\xe4\x02\xf7\x5e\x14\xc9\xf5\x26\xba\xdc\x51\x74\xb4\x9f\x00\x48\x5c\xd4\xa4\x08\xc8\x17\x58\xb4\x44\x17\x08\x3a\x3b\x8b\x95\x99\x22\x13\x43\x7b\xcd\x0c\x77\xb2\x10\x8c\xc0\xad\xe9\x0d\xe8\xbd\x43\xe4\xfd\xe4\x08\xb5\xf1\xa8\x3e\xd3\xa4\xe8\x6d\x8c\x5c\x50\x42\x6a\x69\x9d\xc9\x8a\xd3\x2d\xcb\xba\xd0\xa2\x44\x4c\xa5\x8a\xa7\x31\x9a\xcb\xd7\x9d\xd0\x69\x06\x94\xfa\x36\xbc\x7e\x8b\x6d\x8f\xe4\x8f\x31\x82\x25\x7c\xfd\x63\xa3\x0b\x07\x19\x35\x61\x74\xb3\x6a\xef\x00\x18\x09\x48\x05\x45\xa5\x1d\x20\x6f\x70\x97\xa7\xd8\x02\x44\xa1\x8d\xbe\x50\xe5\xe1\x0b\x80\xe7\x89\x12\xdb\xa1\x27\xbd\x1c\xf8\xa3\x3b\x61\x74\x01\x96\x27\x61\x73\xb4\x49\xe6\x00\xbd\x26\xc1\x6e\x9f\x62\xc2\x5b\xc4\x97\x46\x48\x7d\x85\x09\x7c\x82\x13\x30\x51\xed\x22\x1c\xf1\xc8\xe3\xd8\xd7\xec\xaf\xa3\x34\x46\x84\x1e\x53\x2c\x13\x28\xe3\x84\xe8\xd2\x3b\x6b\x2c\x77\x4b\x9e\x71\x39\xdc\xc0\x29\x20\x6b\x01\xd1\x19\x96\x64\x94\x71\xd0\x68\xc7\x2e\xaa\x79\xf3\xc3\xff\x9a\xb5\xcf\x10\x19\x7c\x9f\x15\xca\x5d\x4d\xfd\x60\x07\xb9\x19\xe2\x17\x0c\x49\x1c\xbd\x80\x92\x46\x04\x3a\x9c\x89\xa2\x07\xe9\x5e\x5c\x21\x52\x17\xd5\x23\xcc\x7f\x84\x6e\x49\x9a\x89\xd0\x54\xe0\x28\xa2\x73\x48\xe8\x64\xa7\xee\x8e\x16\xda\x33\x82\xe4\xa7\x9e\x12\x91\xe4\x18\xaa\x8d\x00\x1d\x7d\xb2\x9e\xb7\x3d\x4f\x7a\xd4\x72\x22\x8c\x5a\x1b\x5b\xb9\x07\x92\xc5\xf7\xeb\x2a\x5e\x1f\x80\xb5\x94\xaf\x6f\x19\x27\x0b\x5a\x99\xc4\x46\x67\x59\xa4\xe5\x8e\x72\x7c\x5f\x75\xc6\x11\x7e\x91\xeb\x73\x4d\x8a\x3b\x63\x18\x17\x5e\xf5\xa2\x3e\x94\xa8\xa0\xfb\x4c\x13\x61\xc8\x24\x44\x51\x5d\x71\x34\xd4\xd3\x63\xda\x6b\x61\x47\xb9\x19\x9e\xc3\x2a\x2d\xca\xa7\x1d\x4e\x88\x5c\x8b\x52\x2d\x0f\xd4\xef\x2f\xd5\xf3\x79\xbb\x30\xe9\x9b\x48\xc6\x19\x30\x46\x64\x85\xc5\xf6\x42\x52\x7d\x95\x1f\x85\x4a\xbc\x5e\xb6\xbf\xec\x45\x16\x47\x88\x90\xf3\x12\x0b\xe3\xb2\x47\x8e\xd7\xcf\x8f\xfe\xf9\xd3\x27\x9b\xa5\x68\x41\x9b\xa8\x07\xc2\xc6\x8c\x36\x07\x06\xb0\xab\x30\x83\x30\xf1\xcd\x57\x73\xe2\xee\x4a\xc7\x3b\x4b\x3f\x8f\x6f\xc3\x0f\x90\x6d\x1e\xc3\x13\x0f\x17\x47\x8c\xff\xe0\x71\x79\xa0\x93\x9f\x45\x1f\x82\x3c\x94\x61\xf5\x37\x62\x0b\x51\x3a\xf6\xf1\x02\x6d\x2b\x10\x84\x80\x92\x68\xcd\x38\xda\x08\xaa\xc8\x0c\xe1\x0b\x9e\x86\x48\x38\xe3\x1c\x43\x78\x59\xa4\x0a\x9e\xed\x18\x40\x54\x7f\x24\x32\x44\x69\xd0\x6e\x68\xac\xf7\x28\xa7\x71\x2c\x4a\x35\x5e\xec\xc7\x13\x09\xf0\xbc\x9b\x04\x82\xf6\xd4\xa2\x4e\xf7\x16\x8b\x71\x55\x9e\x3c\x91\xa0\x35\xa6\x09\x3a\x65\x32\x4c\x90\x18\xf0\x98\x16\xa5\xdd\x40\x43\x47\x0b\x64\x71\x9d\x51\xed\x29\x49\x47\xca\x91\x04\xea\xd2\x1c\xab\x87\xe2\x78\x3b\x0a\xa6\xbd\xa7\x59\x0c\x14\x13\xfa\x38\xc6\xe8\x58\x1d\x0c\x28\xcc\x38\xf0\x7a\xbe\x7e\x43\x9b\xb3\xb9\x6c\x0b\xd4\x5b\x50\xef\x50\x4e\x37\x68\x56\x9c\xee\x08\xd4\xc0\x77\xe9\x09\xa4\x40\xf3\xc2\x37\x24\x01\x9b\x08\x08\xf1\x1b\xd8\x08\xea\xc3\x36\x78\xb4\xc5\x67\xf1\x81\x48\x07\xda\x59\x53\x6c\x7b\x84\x2c\x3a\xc8\xf0\x2f\x65\x5e\x94\x4a\xcb\xea\xb3\x23\x0b\xf3\x85\x1d\x07\x0e\x29\xc7\xa3\x42\xde\x4c\x09\x2a\xd6\x90\xb2\x39\xbc\xd7\x9a\xb0\x5a\xdc\x9e\x4e\x5b\x52\x02\x28\xf7\x2f\xcf\x29\x8f\x14\x63\x7f\x7b\xc4\xf6\xe1\x8d\x78\xf6\xf2\x5a\x5e\x94\x0d\x85\x75\x8e\x70\x3d\x82\x67\xbd\x23\x6d\x76\xec\xce\xe5\xe9\x67\x77\x4a\xa1\x51\x47\x2a\xac\x5f\x7c\x83\x72\x6d\x74\xa7\x16\xbc\x6f\xcb\x13\x59\x20\xfb\x32\x9d\xf2\xad\xaa\x49\xfa\xe8\x0a\x54\xc2\x8a\xcc\x7f\x60\x1a\x3c\x29\x3c\x3f\x1a\xc6\x7b\x79\xdf\xf5\x50\xf3\xbd\x37\xd8\x65\x9c\xcc\xe5\x6e\xc9\xfe\x86\x30\x5d\x7a\xf4\xb9\x52\xe2\x41\x6e\x75\xef\xec\x69\x61\x86\xbb\x21\xb6\x00\xdb\xbb\xf3\x36\x2d\xc9\xf0\xe3\x01\xc5\x92\x19\xa2\x53\x01\x6a\x9f\x1f\x03\xcc\xf3\x76\x16\xf8\x0f\x5c\x1a\xa3\x3b\xb8\xc3\xbc\x28\xf4\x99\xf9\xda\x6b\xbc\x60\xa5\x09\x34\x7b\x5a\x27\x2c\xb4\xf6\xf1\x6e\xac\x52\x8c\x29\xbd\x5c\x9a\x3b\x11\x41\x85\xad\x3b\x1e\xe4\x68\x26\xf3\x0d\xdd\xa0\x51\xca\x73\x52\xdb\x43\x98\xf1\x9d\x3d\xa2\xec\x32\x71\x31\xda\x89\x34\x7a\x61\x74\xd9\xb5\xd2\x3a\xd9\x5c\xdf\x8c\x34\xcc\xb1\x44\x52\xa3\xe5\x9d\xc7\xeb\xe5\x52\xbe\xce\x7c\x4d\x90\xce\x10\xbf\xf7\x46\xa4\x72\x05\x71\x73\xea\x94\xe8\x9d\x85\x30\x0d\xd2\xfb\x85\x3f\x66\xd5\x7d\x13\x5d\x04\x1c\xab\xf7\x4e\x07\x00\x66\xda\x55\x25\xfe\x48\x77\x02\xa3\xb2\xd2\x40\x51\xfd\xc9\x6a\x46\x01\x0a\xb7\xc3\x55\xbb\x82\x32\x55\x60\xab\xf4\xc9\xa8\x81\xaf\x23\x7f\xf5\x17\x44\x29\xd8\xf6\x96\x50\x7a\x8b\x05\x07\x8a\xcd\xee\x31\xc8\xa3\x44\x40\x59\x95\x66\x31\x3b\x42\x88\x94\xbf\x2d\x06\xa4\xe6\x28\x68\x2b\xce\x77\x3e\x7f\xff\xfc\x63\x05\x95\x46\x22\xe3\x14\x90\x48\xec\xb0\x51\x65\xa9\x42\xaf\x31\xba\xee\x8d\x81\xfd\x06\xf8\x93\x9a\x20\x7b\x63\xd0\xa6\x8c\x92\x34\xc7\x27\x4c\x33\xc8\x85\x59\x5f\x0f\x91\xea\xbf\xe8\xff\xc3\xf8\x84\xa4\x02\x9f\x06\x29\xdf\x0d\x29\x4d\xab\xa1\x8b\x63\x3c\xc7\x6f\x9f\xd0\x43\x3c\xc4\x74\xf1\xfa\x0b\x54\x9c\xe3\x91\x0e\xf8\x28\x3b\xb4\xc3\xb3\x57\x44\x35\xd2\x61\xd7\xea\x3a\x28\xcd\x34\xbb\xf5\x45\x1e\xd6\x7b\x65\x71\xec\x04\xad\xbd\x94\x26\x4a\x19\xdf\x6a\x21\xb2\x02\x26\xe2\x21\x1a\x08\xe0\xf0\x1e\x20\x4c\x10\x7d\x21\x0e\x75\xc1\xbe\xf5\x35\xd5\xd7\x09\x39\xdd\xae\xae\x98\x5d\x7e\xd7\xf9\xaa\xb2\xb0\x34\x14\x78\x5f\xbf\x88\xf0\x65\xc7\x0a\x6b\x14\xba\x15\x07\x48\xb4\xe9\xd0\xe3\xd9\x09\x4a\x15\x7e\x01\x4e\xa6\xac\x4c\x42\x42\x84\xaa\xd7\x1f\x20\x0f\xcf\xfb\x9a\x3c\x4f\xe4\xea\x34\x78\xb7\x8e\x66\xe5\xc1\x79\x79\x03\xf5\x1d\xe7\x41\xf8\x9a\x2a\xc5\x3b\x5b\x09\xb4\x2f\x2f\xeb\x33\x05\xb9\x31\xe6\x88\x08\xc8\x75\x8a\x50\xe7\x78\x0b\x58\x0e\x96\x07\x94\xe9\x22\x57\x4b\xce\x79\x7a\x22\x70\xae\x04\xba\xe8\x42\x61\x5d\x39\xd5\x6c\x6a\xd4\x4a\x8f\xd6\x10\x62\xed\x03\x64\xb1\xfa\xc0\x40\x2e\x57\x1f\x0b\x37\x65\x7d\x42\x26\x32\x33\x54\x75\xe0\xdd\xe6\x74\xff\x5c\x21\xaa\x30\xf9\x4e\xda\x7f\xf3\x35\x0e\x94\xe3\x93\xa9\xd0\xa7\x4a\x58\x13\x25\x7f\x7d\x73\x6f\xa9\x70\x5b\x00\x2d\x07\xbe\x1f\xc2\x30\xee\xad\x51\x9c\x7f\x42\x92\x06\x5a\x0f\x53\x39\x62\x58\x17\xb4\x9c\x91\x2d\xcc\x2f\x42\xef\x17\x38\x79\x8c\xaa\xd0\x5a\x90\x36\x93\x60\x9c\xf7\x64\x0f\x7f\xfd\x54\x9e\x28\xb1\xfb\x2f\x5f\x6b\x65\x8b\xb8\xc9\x4b\xe6\x0f\xee\x60\x74\x61\x7a\xb0\xe6\x1a\xee\xeb\xd6\x53\x29\x82\xd4\x61\x94\x7d\x4e\xad\xa7\x39\xbe\x80\x7a\x17\xca\xa3\xb5\xa2\x9b\x13\x75\xe8\xf5\x72\xe0\xc1\xba\xeb\xfb\xa1\x0f\xe5\xdc\xbd\xbf\x1f\x74\x57\x95\x67\x4f\xfd\x1e\x7a\x22\x1c\x7c\xba\x57\x7b\xb5\x59\x2b\x45\xfa\x00\xff\xf4\x60\x7b\x69\x16\xb0\x55\x79\xa8\x95\xbb\x1d\x09\x11\x0f\xde\x19\x2f\x9e\xb7\x7d\x3f\x0f\xb8\xa4\x53\x6a\x48\x77\xbd\x57\x53\x9d\x6b\x8d\x63\xed\x86\x72\x31\x7f\xe8\xf0\xbd\x41\x71\x9c\x66\x5a\x23\x77\x84\xb8\x65\xf5\xc3\x79\x9f\xf3\x84\xc9\x17\x94\xde\x0c\x55\x90\x6e\x4d\x6e\x68\x56\x1d\x42\x1f\x6a\xd3\x25\x79\x73\x50\x98\xfe\xe7\xed\x03\x76\x5f\x13\xe3\x07\xb5\x21\x1d\xf0\xd4\xd5\x53\xdc\xf8\x34\x97\x74\xcf\x10\x2a\xbf\x19\xe1\xd3\xbd\x26\x7c\xd1\xa8\xd0\x05\x09\x9f\x34\x38\x67\x2d\x8f\xa7\x4a\xf1\x7d\x10\x32\x9b\x49\x58\xe7\xa6\x41\xb9\x0f\xee\xda\xd4\x2e\x14\x0a\x22\x6d\xb3\xb5\x55\x94\x6d\xfa\x54\xc9\x92\x0c\xcb\x02\x94\x83\xf6\xe3\xd7\xaa\x03\xfd\x86\xe4\xdc\x2a\xca\xad\x33\x5d\x75\xa1\x46\x6a\xa5\xdb\x2c\x64\xd6\xed\xfe\xee\x04\x6e\xa6\x59\x41\xf6\x35\x81\xf7\x24\x58\xe3\x03\x22\x12\xdd\xba\x8f\x0f\x8b\x2d\x72\x93\x8e\x08\x4c\x87\xe9\x8e\x50\xe1\x53\x0a\xc8\x48\xaf\x09\xb2\xc3\x24\x3d\xb2\x77\x9e\x05\x5c\xe6\xf6\x76\xe8\xc3\x34\x0e\x26\x79\x02\x17\xa7\x39\x11\x5a\x6c\x04\x6d\xad\x9d\xfd\x3e\xbc\xfe\xd2\x83\x60\x39\xa2\x24\x9d\x68\xcf\xee\xb0\x14\x91\x3a\x54\x79\x7a\x0b\x1e\xc2\x4a\xd0\xcf\x89\xe5\x09\x40\xc0\xdf\x81\x5c\xc7\xa1\xef\x67\xb5\x76\xba\x5b\xe5\x5a\xb7\xfe\xb5\x5e\x8d\x34\xae\x4d\xfd\x3c\xfb\x9c\xe2\x4c\x75\x68\x2d\x4a\xed\xf2\x7b\x92\x3f\x46\xb3\x23\x64\x51\x9a\xe1\xc2\x47\xc7\x77\x22\x84\x1a\xca\x49\x7b\xf3\xcd\xf1\xad\x21\x32\x0f\xec\xdb\x19\x27\xa6\x63\xf5\x66\x2b\xdb\xe0\xf5\x37\x74\xed\x69\x90\xaf\xa1\xbd\x73\xbf\x8f\x85\x35\xcd\x34\xa8\x3f\x7a\x96\x01\xa0\x3b\xc3\xe9\x16\xc6\xfb\xf4\x61\x98\xaf\xd4\xfc\x69\xed\x36\xd8\x21\x95\xa8\x48\x3f\x60\x0e\xd1\x00\xb7\x91\x2b\x51\xf4\x30\x49\x0b\x6d\x81\x81\x07\xfd\x77\xce\x4a\x41\x4d\xf3\x19\xb1\x45\x69\x02\x32\x3f\x42\x92\x46\x54\x37\x1a\xea\x1a\x72\xcb\x06\xda\xd5\xf2\x84\xc8\xe2\xfc\x79\x62\xe0\x6c\xc9\x85\xc7\xd8\x5b\xfb\x92\x80\xeb\x76\xb8\x61\x73\xa6\x74\xe3\xa2\xdd\x12\x6a\x3b\xa4\xa2\xba\xe6\x9f\x29\xff\x00\x79\xb1\x21\xdf\x01\x4d\xf6\x01\x16\xaf\xd4\xe6\x18\xe6\xbb\xfc\xe2\x68\x54\x85\x5f\x1a\x24\xe8\x82\x02\x9a\xe9\xd9\x56\x05\xe8\xc1\xab\x52\xd8\x49\x30\xc6\x9f\x6f\xcc\x8b\x33\x9b\xad\x73\x18\x18\x86\x61\xb4\xe6\xa5\x55\x07\x92\x31\xa0\xb4\x39\x9f\x37\x43\x10\x69\x45\x18\xf7\x7b\xdb\xf3\x8d\x77\x27\xf3\x8d\x34\xc2\x74\x82\x34\xf1\x6f\x4d\xe1\xeb\xd3\x04\x89\x0b\xa6\xd8\x22\x4a\x61\x5a\x98\x36\xcb\xf7\x1b\x93\x26\x4e\xff\x1d\xde\xed\x13\xbc\x38\xc2\xf4\x54\xfc\x0c\x13\x18\x33\x65\x01\xb3\x06\x3b\xa8\xea\xb2\x54\xb8\x21\x5f\xb4\x7f\xc8\x5a\x61\x80\x56\x9a\x6d\x8d\xa8\x25\xa6\xd0\x8a\xe2\x6e\xc3\x8d\x37\x24\xd5\x11\xd0\x9e\xba\x50\xfd\x5b\xca\x3f\x2b\xec\x7b\xb1\xcf\xb4\x3e\x14\x68\x0c\xae\x0c\x01\xa5\x29\xa2\x0f\x20\xf2\x82\x5d\x25\xfb\xdb\xc1\xac\x95\x89\x3f\x18\xb2\x57\x26\xd4\xab\xa7\xa5\xec\x55\x6a\x7c\x95\x31\x8c\xf4\x01\x17\x62\x98\x9b\x08\x30\x5b\xec\xd0\xcb\x12\x5d\xe0\xec\xc0\x7a\xea\xd6\xfb\xe5\x98\xe7\xc0\x1f\x63\xba\x54\xd6\xf9\x4f\x7a\x48\x5f\x1d\xa4\x7c\xa1\xb2\xbf\xd6\x5c\x92\x65\x35\x1a\xf4\x76\x7c\x71\x82\x2e\x4c\x91\x82\xb3\x3d\xdc\x35\xc8\xbc\x01\xf3\x7e\x35\x54\x0f\xc8\x7c\x46\xd3\x59\x1f\xc0\xc4\x68\xe7\xb6\x62\x36\xc6\x10\x58\x10\x24\xfc\x13\x41\xc8\xfc\x0c\xa9\x54\xb8\xb3\x52\x94\x1f\xaf\x9f\x97\x60\x80\x82\xd5\x54\x59\xcf\xe7\x7d\xed\x5f\x07\xe7\xa1\x6d\x75\xae\x5d\x75\xa0\x5d\x75\xe8\x67\x63\xac\xcf\xb3\xad\xf2\xcf\x94\x4d\x01\x2a\xff\x90\x0d\x06\xeb\xb6\x37\x38\x6f\xed\xf3\xba\xbb\xfd\x62\x1a\x01\x0d\x56\x81\x44\xbd\x85\xb7\x1a\x71\x62\x19\xe6\xc3\x88\xb8\xe0\x8e\x25\x4a\xd0\xc5\x81\xa6\x34\x35\xc8\x4e\xd4\x9f\x28\x92\x8e\x31\xac\xbf\x64\x8e\xa9\x73\xb1\x2e\xc4\xbe\x26\x43\x3c\x24\xc8\xbe\xc6\x01\xdc\x47\x88\xf1\x15\x52\xdf\x32\x61\xdf\xf7\x13\xba\x38\xa7\x47\x6d\xa0\x5d\xf9\x05\x1e\x8e\xf9\x62\x58\xa4\x6f\x08\xf9\x0c\xfd\x9c\xec\x6d\x84\xc0\xad\x87\xa8\x7d\x83\x6e\x01\x99\x0b\x43\x7d\x31\xf4\x26\x62\x80\x02\x16\x7b\x0e\xd0\x05\x5e\x52\xf4\x00\x27\x41\x3f\x0f\xf6\x10\x0f\x7f\xbf\xff\x9a\x08\xe6\xa8\xeb\x37\x8b\xc2\x89\xa0\xfb\x6c\xc5\x30\x2c\x87\xcb\x36\xc7\x7b\x79\x18\xc2\xb4\xe9\x24\x42\xdd\xc1\x7a\x30\x2f\x89\xde\x7a\x8f\x5e\x49\x02\x0c\xb1\x8c\x78\x97\x45\xd8\xe3\x56\xa7\x7e\x10\xca\xe9\x70\xe3\x07\x7a\x07\x4d\x92\x7e\x86\xfa\x56\xfa\x91\x00\x44\x6f\x99\x49\xe8\x5d\xeb\xd6\x5c\x0f\xf9\xa1\x68\x4b\x93\x47\x6f\xa3\xcb\x2a\x7f\x30\x2e\x7b\x58\x76\xbc\x7e\x71\xba\x59\xb0\x87\x58\xb0\xc7\x99\x08\xc6\xf9\xf7\x5d\xbf\xda\x58\xcb\x12\xfc\x15\x8c\x29\xb3\xa7\x43\x9f\x2c\x6e\xab\x30\x85\xd6\x82\x12\x1d\xc1\x37\x44\x77\x2d\x44\x73\x39\x44\xaf\x23\x89\xfd\x54\xd5\xf4\xc1\xb0\x43\x6d\x66\xcd\xe5\x72\x00\x2c\xfe\x94\xd8\x92\xa2\x8f\x09\x67\xc4\x01\xb7\x73\x14\x90\x5d\x88\x63\x15\xee\x2e\xfd\x53\xf0\xbb\xb7\x74\xc0\x9b\x24\xe0\xf7\x14\x83\x36\x93\x31\xf6\xba\x94\x1f\x0a\x54\x1f\x6a\xd4\x80\x32\x7b\x69\x10\x3c\x2f\x5c\x7f\xa9\x53\x7d\x33\xd4\x7f\x2c\xa7\x2f\x71\xf3\x4f\x3a\xd2\xf3\xf1\xa2\x0c\xcb\xec\x5b\x65\xe2\x04\x8c\x93\xf2\xa9\xfe\x84\xab\x95\x56\xc2\x41\xdd\x67\x19\xd9\xaf\xbc\x4f\xf5\xb7\x6f\x4f\x69\x96\x25\xe8\x97\xa4\x4d\xad\x74\x67\x99\x05\x13\x86\xbe\x4f\xbe\x6c\xbf\xe8\xb9\xf8\xeb\x5f\xd7\xa2\x2e\x86\x91\x38\xa9\xe3\x06\x7f\x88\xc5\x97\xa0\x3f\xdd\x68\x3c\x19\x29\xbf\x1b\xa8\x92\xc0\xb4\xa9\x9f\x6b\xe3\x59\xcf\xeb\xa5\x81\x9f\x0b\xbf\xff\x73\x78\x5b\xd7\xcd\x4b\x22\x34\x44\xb8\x10\x4e\xe7\x8b\x45\xfb\xaf\x05\x50\xd9\x81\xf6\xaf\x07\xe9\xff\x82\x2b\xb4\xbb\xbf\xc7\x83\xf5\x34\xd0\x82\xba\x81\x74\xd7\xf2\xf8\xdc\xfc\x62\x1c\xff\x1b\x83\xa7\xb7\x7d\xbf\x08\x3b\x5c\xb0\xfb\xf8\x37\xa0\xb5\xe7\xff\x02\x38\x38\xf4\xc7\x4f\xb7\x73\xd2\xc8\xc7\x41\xdc\x47\xa1\x59\x94\xbf\xea\x73\x1b\x49\x8f\x15\xd7\xd1\x14\xd8\x8b\x7b\xf7\x3d\xfe\x5b\x51\xe0\xa8\x28\x63\xe0\xf9\xbe\xed\xfd\xda\xfe\x77\xb7\x8a\xcf\xe6\xa1\x0f\xe7\xcb\x07\xdf\x20\x85\x65\x2f\x28\xd1\x73\xd1\x86\x84\xd6\x5f\x7e\x8e\xa8\x4d\x20\xd2\x8a\x52\xe5\xa1\x8e\x77\x5b\xd5\xc5\xee\xe1\x27\x5b\xe5\xd7\x04\xb2\x80\xb7\x03\xc5\xfe\x78\xbd\xd4\x8b\x39\xc5\xed\x7d\x9c\xf8\x9c\xb4\x0c\xff\xda\xa0\xb8\xac\xeb\x7d\x9a\x38\xf8\xa1\xbf\xea\xef\xb5\x53\x61\xa3\x93\xe1\x3c\x42\x84\xaf\xbd\x21\xd0\x80\x7a\x1b\x2f\xb4\x76\x05\xa5\x44\x31\xae\x49\x30\x2f\xd2\x26\x31\x54\x95\xb6\xd8\xc5\xfc\xbf\x74\x71\x0a\xbc\xa7\x21\x36\x32\x1a\x83\x62\xe5\x9d\x59\xcc\x3f\xd2\xc7\xb2\xdf\x87\x63\xfe\x2d\x7d\x3b\x4e\x4b\x45\xe6\x9f\xe5\x38\xa6\x88\xc2\x2b\xfb\xb7\x62\x86\x65\xdf\x5b\xc6\xfe\x33\xcb\x62\xf1\xba\x64\xaf\x7f\x2b\x86\xb3\xa2\x70\x5f\xff\x34\xc1\x32\xe2\xc0\x53\xe2\xff\x6f\x33\xfb\x8f\xcd\xff\xd8\xfc\x6f\xd8\x3c\xa1\x72\x05\xea\x3e\x34\x76\x71\x40\xba\xbf\x5c\xc7\xf3\x0d\x5b\xc1\x3f\x91\x24\xfd\xef\xff\xf1\x3f\xff\xd7\x7f\xfd\x9f\xff\xfa\xbf\x01\x00\x00\xff\xff\xbb\x94\x2f\x4d\xe9\x06\x02\x00") +var _pagesAssetsStylesContainersCss = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\xb9\xd7\x8e\xe3\x68\x9a\x28\x78\xdf\x4f\x91\x3b\x83\x05\xce\x39\xec\x6c\x7a\x57\x85\xbd\xa0\x11\x45\x27\x7a\x7f\xb3\xa0\x27\x25\x7a\x27\x92\x85\x79\xf7\x85\x22\x22\x23\xb3\x4c\x57\x75\xcf\x6c\x20\x95\x11\xfc\xf9\x7f\xde\x7f\xfa\xdb\x3f\xca\xbe\x2f\x9b\xfc\xeb\x56\xcf\x6b\xdc\xd4\x67\xbc\xd4\x7d\xf7\x75\xe9\xfb\x26\x89\xa7\x5f\x8a\xbe\x5b\xbe\xce\xf5\x99\xff\x04\x43\xd0\xff\xfd\x5f\x7f\x7a\xf9\xcb\x9f\xbe\xfd\x9a\xef\x43\x3f\x2d\x5f\xeb\xf7\x4b\x7f\xff\xff\x01\x55\x16\x2f\xf1\xff\x08\x4f\xb5\xb4\xcd\xd7\xb4\xcf\xf2\x5f\xda\x78\x2a\xeb\xee\xeb\x54\x97\xd5\xf2\xd3\x3f\xe0\xbc\xfd\x73\x51\xbf\x43\xbe\x78\x69\xe2\xee\xed\xed\xbb\xb6\x9e\xf9\x1b\x92\xa4\x6f\xb2\xbf\x40\xd2\x3f\xbe\x26\xeb\xb2\xf4\xdd\x2f\x43\x9c\x65\x75\x57\xfe\x84\x0c\xfb\x5f\xc0\x2c\x53\x1d\x77\x65\x93\xff\x32\xf4\x73\xfd\x7a\xf3\x53\x9c\xcc\x7d\xb3\x2e\xf9\xcf\xef\xcc\x43\x3f\x2f\xfd\xf0\x13\xf4\x17\x68\xd2\x78\x78\x7f\x8e\x93\x26\xff\xe5\x59\x67\x4b\xf5\x66\xe1\x9f\xbf\x71\x02\xfd\xfc\xae\x92\x9f\xa0\x9f\x93\x7e\xca\xf2\xe9\xf3\x8f\xaf\x69\xdf\x34\xf1\x30\xe7\x3f\x7d\xfb\xe3\x2f\x68\xcd\x6d\xdc\x34\x5f\xb3\x3a\x6e\xfa\xf2\x83\x14\x0e\x41\x7f\x29\x69\x52\x97\xbf\x06\xa2\xfe\x05\xa0\x1f\x69\xfd\xb9\x6b\xfc\x88\xff\xf7\xba\x4c\xe2\xf4\x51\x4e\xfd\xda\x65\x2f\x71\xfb\xe9\xa7\xff\x4c\xe1\x8c\x2e\x8a\x6f\xca\x80\x87\xfd\xcb\xdc\x37\x75\xf6\xe5\x3f\xd1\x18\x27\x49\xec\x53\x71\xd4\xbf\xc5\xe3\xd7\xe4\x5f\x67\xf3\x6b\x52\xfe\xf2\x7b\xbe\xb2\x2c\xfb\xf9\xf7\xfc\xbf\xf9\xc0\xcf\x4d\x5e\x2c\x7f\xe9\x0a\xbf\x62\x67\xa9\x97\xbf\x8a\xcd\x1f\x39\x7a\xbb\xfe\x07\x4c\xe5\x50\x9e\x15\xf9\xcf\x1f\x4f\x10\x04\xfd\x9c\xae\xd3\xdc\x4f\x3f\x0d\x7d\xdd\x2d\xf9\xf4\xa3\xb6\xbe\x73\x3f\xe5\x4d\xbc\xd4\x5b\xfe\xf3\x0f\x59\x07\x19\x96\x9f\x7f\x1b\x56\x3f\x6f\xf9\xb4\xd4\x69\xdc\x7c\x8d\x9b\xba\xec\x7e\x6a\xeb\x2c\x6b\xfe\x1d\x2f\xfc\x9a\xf6\xdd\x92\x77\xcb\xbf\x2e\xe8\x07\xc0\x1f\x88\x5a\x14\xc5\xa7\x34\xd8\xb0\xff\x8a\xd9\xae\x9f\xda\xb8\xf9\xb9\xdf\xf2\xa9\x68\xfa\xe7\x4f\xf1\xba\xf4\xff\xb6\x35\xbe\xa6\x4d\x3f\xff\xbb\x36\x79\x07\xfa\x81\xdd\x9f\x96\x29\xee\xe6\x21\x9e\xf2\x6e\xf9\xb2\x4e\xcd\xff\x7a\xbb\xf0\xff\x26\xfd\xfe\x8f\xb2\x2e\xfe\xf7\x97\xae\xff\x3a\xe5\x43\x1e\x2f\x5f\xe6\x74\xea\x9b\xe6\x4b\x9a\xbf\x19\xaa\x7a\x97\x04\xc6\x7f\x34\xd4\x6f\x52\x0e\x0c\x0d\xfb\x9b\xc7\xbd\xac\xf9\x91\x4b\xf0\x7f\x2f\x0c\x3e\xf4\xfb\xa5\x2e\xa6\xb8\xfd\x37\x84\xfd\x35\xdc\x8f\xd9\xe5\x1b\xe7\xe4\xdb\xc3\xef\xc2\x36\x69\xe2\xf4\xf1\x5f\xff\x48\xab\x78\x5a\xe6\xaf\x75\xd7\xd4\x5d\xfe\x35\x69\xfa\xf4\xf1\xcb\xef\xfd\x31\xab\xe7\xa1\x89\x8f\x9f\xbe\xb6\xfd\xf9\x79\xb7\xdf\x3f\xcf\x7f\x04\xff\xaf\xff\xf3\xe5\x55\x1c\xbe\xfc\x11\xea\xbf\xff\x9f\x9f\x8a\x7a\x9a\x97\xaf\x69\x55\x37\x19\xf0\x4f\xef\xfd\xf2\x6b\xc4\x9f\x6c\xb6\x79\xb7\xfe\x68\xd3\x37\xe7\xfb\x9e\x95\xdf\xd2\x54\x9a\x7e\xf9\x4f\x82\x20\x3e\xfe\x4b\xd3\xf4\xdb\x85\x79\x39\x9a\xfc\xa7\x37\xe9\xbf\x1d\x7d\xd8\x6a\xd8\xbf\x85\x67\x96\x17\xf1\xda\xbc\x47\xdc\x87\xf7\x7e\x81\xd1\x61\xff\xc2\x4c\x75\xdc\xfc\x7d\x8e\xbb\xf9\xeb\x9c\x4f\x75\xf1\xbd\x3a\xf4\xeb\xf2\xe2\xf1\xa7\xae\xef\xf2\x1f\x03\xe1\x0b\xf4\x07\xfe\xf2\xd2\x5e\x96\xef\x3f\x21\x10\x04\x41\xbf\x12\xeb\x5b\x25\xfc\x51\xba\x2c\xcb\xde\x5c\x15\x04\xe7\xb9\xf9\x47\x39\x2f\xf1\x52\xa7\xff\x48\xfb\x16\xcc\xb3\x7a\xe9\x27\xf0\x1d\xe6\x6b\x52\xfe\x63\xe8\xca\xff\xfd\xe5\xdd\x81\xbf\xee\x5f\x96\x7e\xf8\xf2\xca\x7d\xdf\x4b\xd7\x3f\xcf\x44\x4d\x3d\x2f\x1f\xba\x79\x13\xe1\x43\x30\x64\xd8\xff\x58\x34\xe8\xe7\x25\xdf\x97\xaf\x59\x9e\xf6\xd3\x9b\x5b\xbe\xbf\xfe\x27\x29\xe9\xf7\x12\x7e\xed\xd7\x25\x9f\x5e\xee\xf3\xf7\x3f\x7a\x5b\x77\xdd\xfb\xdb\x5f\xfe\xb9\xd9\x3e\x84\x89\xe3\xf8\xb7\x64\x97\x7e\xf8\x73\x9a\xbf\xfc\xa6\xaa\x7f\x77\x81\x97\xbd\xbe\x89\xf8\x87\x38\xbe\x73\xf6\x0d\xc7\x97\xaf\xf0\x67\x68\x7d\x20\x82\xbe\xbc\x8e\xbe\x21\x7a\x79\x0e\x36\xec\xbf\x0d\x89\x3f\xc6\xfa\x56\xac\x5e\x18\xff\xec\xfa\xb4\xfc\xf1\xf9\x77\x01\x3f\xd1\x7c\x24\xa7\xb7\x9c\xfb\xdf\xc0\xf8\x9d\xb1\x1f\xf1\xfc\xf3\xf8\xfd\x4b\xa1\xfe\x35\xd0\xbf\x66\xe7\x0d\xe5\x6f\xe4\xfb\xe9\xa7\xa9\xef\x97\x3f\x02\xfc\xe5\x2d\xab\x7c\xe4\x42\xe8\x4f\x2e\xfe\xa8\xc2\x7f\x15\xe4\x07\xa6\xfe\x55\x90\x8f\xc6\xf3\x57\x00\xef\x89\xe6\xcf\xa0\xb2\xa9\x1f\xb2\xfe\xf9\x87\x60\x7f\x78\xbf\x9e\x5f\x8d\x6d\xf6\x63\xbd\xae\xdb\xb8\x7c\x8f\xe5\xff\xab\x6e\x5f\xf3\x43\xdc\x2d\x3f\xf7\x43\x9c\xd6\xcb\xf1\xd3\x3f\xd0\x9f\xdf\x92\xfb\x0f\xcf\x45\xdd\x2c\xf9\xf4\x53\xdc\x0c\x55\xfc\xbf\x3e\xce\xff\x1f\x14\xfa\xdf\x7f\x4a\xf0\xcf\x35\xfb\x87\x11\xff\xa7\xa0\x9f\x1a\xfe\xf7\x41\x3f\x34\xfd\xef\x03\x7e\x2a\xfb\x23\xcf\xa0\x28\xfa\x83\xc6\x7e\x9d\x83\x68\x9a\xfe\xfe\xee\xcf\xc2\xec\x1b\xc5\x3f\x2b\x81\x7f\x68\xc1\xef\xf9\xf8\x95\x5a\x7e\x60\xe4\x33\x5b\xfd\xfa\xfc\x0f\xcd\x53\xbd\x5a\xb0\xff\x86\x6d\xfe\x39\xdc\xef\xf2\xf4\x37\x85\xa4\xc5\x97\xff\x24\xe8\xfc\xe3\x3f\x32\x2e\xfe\x82\xb5\x38\x7d\xb5\x18\x7f\x48\xbd\x1f\xf2\xee\x0f\x3a\xce\x24\x49\x7e\x9c\x4f\x3e\xab\x6c\xd2\x2f\x4b\xdf\xbe\x15\xbe\x3f\xa4\x54\xf4\xe9\x3a\xff\xb7\x5c\xf4\xcf\x20\xff\x89\x22\xfa\x29\xee\xca\x3f\x2e\x80\xdf\x52\xc0\x77\x03\xbe\xb5\x0b\x5f\xa0\x7f\xb5\x9a\x7d\x3a\xe8\x8f\xcd\xe9\x7b\xfd\x21\x5f\x05\xe9\x7b\x0b\xf1\xa7\xdd\xc3\xfb\xaf\x8f\xa6\xf2\xbd\x83\xf8\xde\x05\x7f\x45\x29\xea\xad\x28\xfe\x8b\x3c\x7d\x9b\x85\xdf\x57\x08\x7f\xa8\xc5\x5f\x5f\xf9\x6f\x98\xe1\x5f\x40\xf0\xdb\x22\xfd\xb1\xd1\xf8\xe3\x8a\xfe\x89\xef\xe5\x32\x7f\x4e\xf1\x75\xe3\x2f\xaa\xc6\x07\xc1\x6f\x33\xe7\x7f\x0b\xdb\xef\xd8\x7f\xc7\xf6\x2d\xe9\x7c\x2b\x7d\xdf\x46\xef\xa2\x28\xfe\x9a\xd0\x1f\x5e\xa8\xf2\xf4\xf1\xaf\x3a\xf4\x6f\xa9\x66\x59\xf6\x2b\xaa\xf5\x92\xb7\xbf\xfc\xd0\x60\xfe\x75\xf3\xfc\x4f\x7a\x4e\xe8\x57\xfd\x33\x99\xb7\x6f\x81\x81\xbc\x4d\x55\x55\xbd\xe4\x5f\xe7\x21\x4e\x5f\x20\xcf\x29\x1e\x7e\xc7\xc2\x6f\x9f\x5f\xad\xc4\xb7\x18\x7b\x97\x81\xcc\xdb\x6f\x14\x3e\xfc\x02\x79\xdb\x56\xfc\xa8\x84\xae\x7f\xd3\x4d\xd2\xef\x5f\x7e\x8b\xf0\xef\xbf\xb9\x58\xa7\x7d\xf7\xbb\x4b\xbf\xa6\x08\x23\xbf\xc7\x1f\xa7\x69\xde\xfc\x73\xb8\x0f\xc6\xa0\xdf\x00\xbe\x49\xf4\x6d\x0a\xff\x77\x94\xfd\x7b\x2c\x7f\x58\xfc\xde\xde\xbc\xb1\xf6\xf7\x7f\x03\xe0\x37\x0c\xa5\x69\xfa\x4f\xb2\xfd\x5f\xe0\x79\xa9\xf2\x97\xff\x69\x2f\xf2\x86\xa9\xaa\xcb\xaa\xf9\x5d\x0a\x7a\x7f\xf5\x2a\x66\x7f\xb4\x41\x22\x72\xba\xa0\x7e\x53\xd8\x7f\x7d\xf8\xee\xab\x59\xbf\x2c\xf9\xef\x87\xc7\xef\x93\xc3\xd7\xf7\x12\xf4\xea\xfb\x3f\x8f\x96\x7e\x78\x3d\xff\x81\x31\x3f\xfc\xec\xf7\x8c\xbe\x69\xe3\x07\x3e\xdf\x53\xf2\x4f\x9f\xc9\xf9\x73\x29\x41\x0c\xfb\xfb\x8e\x8b\xf8\x93\xed\xc4\xab\x41\xfe\xe3\xf1\xec\xdb\xae\x82\xf8\x23\xf6\x7e\xdb\x88\xff\x05\xcf\x7f\x78\xfd\x4d\x90\x37\x06\xdf\x98\x78\xe7\xe7\x47\x6a\xfd\xfb\x16\x76\xce\x9b\x3c\x5d\xfe\xd0\xc1\x7e\x4b\xf0\x2f\x21\x7e\xa3\xbc\xff\x49\x09\xc4\x5f\xe1\xfb\x05\xfa\xbd\x6e\xde\xe2\xe4\x97\xef\x2d\xe0\xcf\x59\x3d\xe5\xe9\x9b\xf2\x9b\x65\xfa\xf9\xbb\xc4\xdf\x4b\xfc\x9f\x59\xe8\x63\xa4\x7e\x37\xcd\xdb\xd1\xbf\x68\x8e\x77\x3e\x3e\x6a\xc4\x0f\xd6\xfe\x01\xdd\xef\xba\xa1\x37\xc0\xb6\xcb\xdb\xbe\xab\xd3\xaf\x55\xdd\x2d\xbf\xfc\x76\xa0\x5f\xbb\x2c\x9f\x7e\xb7\x78\xf9\x35\xe0\x9c\x0f\xf1\x14\x2f\xfd\xf4\xa3\x1a\x7e\xb5\xb7\xfc\x1e\x02\x6f\x0c\xbe\xe6\xe0\xbf\xfd\xed\x1f\x2f\x1b\x7c\x6d\xe2\x24\x6f\xbe\xfc\xf2\xb7\x2f\x5f\xbe\x7c\xf9\xdd\x72\xf3\x6f\xff\xf5\xb7\x7f\xac\x5d\xfd\xeb\x5b\x1f\x44\xa8\xb7\x9f\x9f\xbf\x03\xbe\x07\x66\xbd\xc4\x4d\x9d\xbe\x41\xbe\xb7\x93\x5f\xd3\x61\xfd\xa7\xf8\x7f\xc0\x07\xbd\xfd\xbc\x01\xd6\xdd\xef\x40\x7f\x43\xf4\xbf\xfe\xf6\x8f\x29\x7e\x7e\x7d\x49\x30\xff\x88\xbc\x88\xdb\xba\x39\x7e\xfa\xf2\x1f\x5c\xbf\x4e\x75\x3e\x7d\xd1\xf2\xe7\x7f\xbc\x93\xf9\xb1\x6c\x7d\x19\xa6\xfc\xeb\xab\x72\xbd\x93\x9b\xfb\xe6\x63\xab\x57\x2f\x4d\xfe\x6b\x8a\xc2\xdb\xcf\xdb\xbd\x21\x2e\x5f\x03\x5f\x9c\xe5\xd3\x97\x0a\xfe\xb8\xf6\xec\xa7\xec\x0d\xd5\x4f\x5f\x92\x29\x8f\x1f\x5f\x5f\x07\x6f\xd7\xdf\xbe\xcf\xf8\x3a\xf5\xcf\x3f\x64\x30\x7d\x67\xf0\x3f\xfe\xfe\xe5\x3f\xda\xbe\xeb\xdf\xf8\xfa\x8f\x1f\xb5\xf9\x32\xdd\x97\xb7\x76\xf2\xed\xf0\x07\x3f\xfa\xf2\xe6\x5e\xef\xc7\xbf\x49\x26\x5f\x96\x7e\x78\x7f\xf1\xb1\x6e\xfa\xf2\x89\xe0\xc7\x2e\xe6\x0b\xfa\x9b\xd3\x77\x87\xfd\xdd\xf1\x2b\x61\xfe\xee\xf0\x23\xb1\xbe\x9f\xff\xd7\xdf\xfe\x31\xaf\xc9\xab\xf6\xc4\xf5\xab\x57\xf9\xd8\x16\x7e\xad\xbb\x61\x5d\x3e\x04\xff\x15\x65\xec\x1b\xb6\xf7\x84\xf7\x05\x83\xde\xd1\xfc\x67\xd3\x97\xfd\x37\x4d\x35\x7d\xbc\xbc\x85\xcb\xfb\xd5\x8f\x1c\xfb\x05\x79\xdb\x9f\xfe\x8e\x41\xe4\xf3\xf4\xf7\x89\xfa\x7b\x0e\xf9\xdd\x8d\x77\x0d\x7f\xb0\xfe\xbb\xb7\x9f\x09\xe2\xdb\xf6\xf9\xb7\x17\xde\x67\xf8\x57\x4a\xfb\x8f\x2c\x5e\xe2\x9f\xde\x9e\xc1\xa1\x2b\x7f\x4e\xe2\x39\x27\xb0\xbf\xd7\x1e\xab\x5b\x4f\x48\xb9\x96\x3d\xc3\x30\x8c\x66\xbb\xd5\xc5\x2d\x19\x86\x25\xdd\xd7\x63\xcd\x31\x12\xc3\x30\x1c\x8b\x98\x03\xca\x30\x8c\x62\xfb\x0d\x6f\xc2\xec\xe9\x3e\x32\x4e\xba\x34\x82\x79\x86\xbb\xee\x84\x88\xea\x48\xeb\xcd\x64\x98\x5c\x1b\x33\x17\xcd\xc2\x07\x98\xfa\xc6\x02\xa2\x91\x49\xfb\xe6\x72\x7f\xce\x51\x36\x97\x26\x53\x4b\xc4\x53\x3a\x58\x36\xea\x2b\xce\x66\xae\xd7\xcb\x85\x95\x99\x3d\xf4\x25\xae\x09\x5d\xc1\xba\xa6\xb6\x10\x5c\xca\xc5\x55\x6c\x19\x91\xb0\x91\xe9\xd7\x6b\xc6\xd6\x7d\x15\xd7\xb0\x97\xea\x3a\x56\xa0\x4a\x0e\x17\xc1\xb4\x91\x39\x4a\xc2\xe0\x4a\xe1\xf8\xd9\x81\x37\x1c\x3f\x9f\x00\x73\xb1\xa4\xf6\x91\xc7\x75\x5c\x32\xb8\xdb\xbb\x94\xbe\x94\xc5\x1d\xa2\xa5\xfd\xd6\xd2\x25\x23\x79\xb6\xce\x5c\x24\x96\x6f\x0f\x4a\xee\xd8\xd4\x65\x18\x83\xe1\xf3\xa0\x02\x32\x95\x01\x41\xe6\xba\x25\x4f\x86\x63\x44\xde\x53\x6b\xd3\xde\x01\x11\x2c\xc9\xc1\x94\x9b\x94\x91\xe4\x0b\x83\x71\x92\x8d\x3f\x67\xf6\x21\x48\xcc\xc1\xe1\xee\xc1\x9a\xcc\xc1\x96\xcc\x2c\xe8\x67\xc4\xf4\x4c\xc3\x30\x4c\x22\x36\x94\x59\x4b\xcc\x58\x42\x0c\x69\x58\xb2\x51\xba\x0c\xa2\xb4\x0e\xca\xa6\x0c\xa2\x48\xe9\x68\x96\xcc\xdd\xbc\x30\xad\x5d\x89\x32\x23\x33\xeb\x93\x2d\xbd\x50\xb0\x55\x46\x65\x9e\x25\x57\x4e\x17\xa6\x56\x4b\x87\x99\xca\x2b\xe3\x27\xe1\x61\x3d\x15\x66\x33\x5d\x26\xeb\xf1\xf0\xc1\xd0\x4c\xc8\x70\x4c\xeb\x4a\xf3\xc9\x30\xa5\xce\x48\x8c\xb9\x63\xce\x69\xde\x4a\x87\xd1\xcb\x9b\xa6\x80\x98\xd2\x14\xa9\xe4\x63\x36\x73\x61\x9e\xb7\xd5\x3b\xa3\x4c\x85\xee\x37\x77\x95\x03\xf3\xda\x15\x07\xa5\xf3\xb5\x94\xab\x5c\xa2\x0a\x5c\x68\xa5\xc3\xe5\x1a\xca\x38\xa7\xa4\x0d\x3e\xdf\x2f\xd0\xbd\x63\xa0\x53\xe7\x19\x86\xb9\x6e\x9d\xf5\x2c\x30\xcc\xc0\xf9\x0e\x5f\x9d\x0a\x6f\x93\x2e\xd1\x41\x68\x8f\x55\x20\x06\xd9\x4d\xba\x63\x15\x95\x81\xc0\x26\x3d\x39\x96\x61\x4c\x67\x06\x37\xa4\x01\x1b\x3c\x0b\xf9\xf4\x96\xb2\xb3\xe8\x83\x31\xd1\x24\x8c\x36\xf8\xcc\xf5\x5e\x00\x1e\x74\x22\xcf\xb6\xf4\xcb\xc9\xcc\xb7\x56\xcd\x9d\x3b\x65\xb2\x44\xde\x7b\xbc\x64\xb0\x50\xca\x95\xb3\x4a\xda\x5e\xdd\x27\xd0\x13\xc3\x66\x00\x3c\x8d\x2a\x01\x38\xb5\xd8\x12\xca\x64\x49\x00\x41\xa7\x27\x23\x5e\x63\x53\x6e\x97\xa7\xa2\xf3\x60\x20\x54\x49\x17\xaf\x6e\x4f\x76\xc7\xd3\xe2\x6e\x3e\x89\xe2\x87\x01\xec\x98\x07\x82\x21\x36\x40\xb9\x04\xcd\xde\x73\x16\xc2\x69\xf7\xa5\x16\x73\x39\x59\xdb\xd1\x16\x77\x07\xe1\x22\xa5\xad\x87\x55\xea\x50\x9c\xea\x2e\xe2\x09\xcd\x45\x1a\xca\x9b\xdc\x5c\x3d\x40\xc6\x99\x42\x71\x49\x97\xf9\x29\x92\x2b\x63\x2a\x2a\xdf\xdc\xed\xac\x64\x99\x62\xbb\x2d\x77\xb6\x57\x1f\x26\x63\x46\xa7\x79\xe4\x45\xca\x30\xb2\x0e\x02\x21\xcd\x28\xcf\x0b\x73\x46\xf9\x03\x7b\x32\xcc\xa6\x97\x61\xa0\xd4\xae\x00\x9d\x57\x8e\x61\x18\x4b\xe1\x58\x65\x62\x59\xf6\x79\xb2\x82\x72\x32\xfc\x70\xb2\x62\x72\x32\xc2\x53\xae\xd8\x87\xc5\xf0\x9c\xc3\x18\x2c\xcb\xb1\xab\xcd\x88\xcc\x58\x5d\xe5\x88\xbd\x7a\x0e\xa3\x2f\xe5\x65\x49\x18\xfe\x3a\x32\x56\xbb\x5e\xcc\xe5\x76\xd9\x75\x06\xed\x30\x56\x4e\xcb\x7c\x79\x0a\xcf\xa8\xbc\xca\x24\x77\x55\x8f\x2b\x67\x5a\xec\xc1\x94\x4e\x68\x5f\x88\x9a\x15\xea\xfb\x52\xd6\x67\x59\x06\x6d\x88\x5d\xa4\x56\x90\x34\x59\x61\x06\x8e\x1f\x6a\xd6\xb2\xa4\x39\x78\x39\x21\x37\xba\xf2\xf8\xf0\x60\x49\x02\xa5\x25\xad\x4c\xfc\xee\xed\x0c\x30\xf0\xc9\xc1\x31\x47\xbd\x1d\xca\x76\x49\xf7\x46\xba\x1e\xae\x69\xb8\x4c\xe7\x31\x6b\x5c\x16\xd7\x7e\xba\xf6\xa2\x61\x65\xbc\x69\x04\x21\xe7\xf7\xed\x66\x5d\x27\x33\x37\x42\x0e\xe9\xc7\xc2\xb2\x81\x0a\xce\x18\x96\xae\x7b\xfc\x5e\x6a\x97\x9d\x6f\xe3\x1b\xe2\x0b\xe9\x5d\xca\x84\x50\x6a\xc2\x6b\x9d\x88\x42\x6b\x34\x3e\xef\xf9\xf7\xd3\xbb\x8b\x5e\x89\x66\x95\x9b\x3c\x65\xbc\x22\x9f\xe5\x83\xe9\x57\xe6\x02\x3c\xa5\xa0\x37\xcd\x67\x7b\x70\x0f\x99\x15\xfd\xcb\x1e\x9a\x44\xd9\x90\x9c\x0e\x5d\xa6\x5e\xf2\x9e\x2d\x5f\xf1\x22\x27\x1f\x8a\x33\x5e\x00\x48\x0e\x7a\x1b\x48\x2d\x2c\xbc\x8f\xfb\x43\xa8\xae\x13\x97\xac\x92\x85\x55\x5d\xed\xe5\xf2\x13\x73\x02\x5c\x93\x64\x86\x94\x1f\xab\xa5\x73\xf5\xa5\x79\xf0\xb2\xcc\x41\xcd\x64\x6b\x8b\xa4\xc6\x96\x33\xf0\xa4\x7d\xb9\xb6\xa9\xc3\x25\xd3\xe3\x8a\xca\xe8\x6a\x4a\x73\xcf\xcd\x92\x9c\x5b\x0b\xce\x18\x8f\x0b\x60\x03\x54\xc7\xe3\x6a\xea\xea\xb3\x2f\x61\x8f\xa7\xdc\xd7\x96\x3d\xc8\xa6\x6d\xdf\xbd\xc6\x00\xdc\xcb\x10\x51\x87\x39\xb4\xe5\xd8\x88\xda\x18\x91\x2e\xa7\x8c\x92\x3e\xd0\xb1\xed\x23\x65\xd6\x30\x9b\x8b\xe5\xc3\xe5\xb4\xe0\xb6\x0e\xb4\x76\x8f\xf8\x83\x30\x97\x9e\x0e\xcc\xb6\x6d\xba\xb8\xc8\x7a\x15\x68\xa0\xb5\xec\xb2\x72\xc7\x85\x86\xe6\x02\xa8\xdd\x7a\x16\xe8\x3a\x22\x34\xa0\xec\xf9\xd4\xaa\x12\xe1\x77\xa8\xaa\x15\xb6\x99\xae\xc1\x1c\x0d\x7b\x8d\x5e\x6d\x20\x72\x94\x50\x80\xf0\x3c\x3a\x04\xf3\x09\x9f\x52\xd2\xfa\x79\xc8\xe3\xd5\xe6\x79\xe5\x48\xd7\x03\x25\x44\xa2\x72\x19\x5c\xa2\xf1\x62\xaf\x18\x82\x6e\x6c\x2c\x2f\x85\x5d\x10\x0e\xda\x91\x83\xed\xc9\x33\x81\x29\x68\x22\x7a\x78\x44\xde\x0c\xf3\xf7\xd1\xec\x5c\x61\xdc\xbc\xd1\x82\xbd\x95\x78\x20\xd3\x8c\xdb\x19\xac\x18\xee\x8d\xb6\x02\x42\x07\x02\x10\xe1\x80\xf6\xa6\xcd\x7d\x2c\x58\x23\x77\xb5\x15\xcd\x0d\xc9\xe2\x7a\x0d\xa1\x4a\x98\xc3\x23\x2a\x84\xd0\x5c\xa2\xc3\x2b\x8d\x49\xbd\xae\xa1\xe3\xcb\xc9\xbc\x75\xb3\x23\x86\x02\xb0\x2f\x88\x8a\x2e\x37\x17\xd2\xce\xb1\x8d\xe7\x68\x10\xda\x2e\x8f\xf6\xd3\xbc\xe8\xfb\x24\x96\x14\xdf\x45\xdd\x65\xd2\xa5\x14\xd3\xce\x6e\x5e\x98\x03\x02\xd5\x44\xf4\xe3\x11\x6e\xa7\x80\x58\x34\x7a\x20\x90\x89\x8c\xc3\x15\x3e\x48\x2b\x59\x08\x7a\xda\xb2\x80\xcb\x4c\x19\x9a\xbb\xc2\x25\x12\x67\x42\x33\xb3\x25\xb4\x69\xb0\xd6\xde\x27\x10\x34\x30\x88\x3b\xe9\x84\xc0\x82\x13\x66\x07\x1f\x9b\xab\x13\x19\x6a\x59\xa0\xb7\xd0\x8a\x01\x0b\xc0\xb4\xc1\x2b\x68\x89\x67\xfa\xd4\x98\x12\x79\xb0\xcd\x7c\x73\xaa\x3b\x75\x3b\xcc\x10\xa8\xa6\xf6\xda\x62\x50\xc1\x5d\xb0\xb8\xb4\x0e\xa4\x26\x1e\xa9\xd5\xf7\xb4\x20\xf7\x47\x8e\xc9\x2b\x21\x46\xc0\x0d\xce\xd9\x14\x88\xc6\x7d\x13\xaf\xca\xac\x69\x78\x83\x5e\x67\x2a\xf4\xe5\xe1\xb6\xac\x8f\x5d\x71\x04\x5c\xc5\x86\x33\xb2\xa6\xb6\x8d\x09\x28\x71\x9c\xec\x76\xde\x0e\xee\x4c\x27\xad\x9e\x8f\xfc\xd0\xf5\xcc\x4f\x63\x1c\xaf\xb6\xa5\xa5\x10\xf0\xd0\xaa\xd8\x13\xba\x9b\xa7\xd4\x67\xac\xee\x78\x12\x37\xdd\xa9\xb2\x51\xa4\x35\x5b\x3e\xb7\x42\x08\x78\x5d\xe3\xd3\x5e\x12\x4c\x97\xce\x0f\xf4\x79\x26\x10\xd4\xf7\x90\xdd\xbc\xc2\x79\xc2\xcb\x33\xd1\x52\x2b\x32\x2a\x48\x32\xa5\x16\xd2\x22\x68\x21\xe8\x92\x7b\x3f\x3a\x5b\x29\x3c\x27\x59\xfa\x9d\x34\xce\x99\x5b\x8f\x98\x64\x92\x45\x2f\xa2\x8a\xae\x03\x24\x29\x12\x43\x4c\x9f\xe9\xa5\x42\x7d\x1d\x35\xc9\x42\x94\xa1\x2c\xdd\x91\x44\xe4\x28\x82\x70\x2a\xed\xa6\x0f\xbb\xd2\xf1\x84\x97\x75\x57\x24\xaf\x52\x1c\x3d\x35\x2b\x6f\x6d\x9c\x57\x2f\x43\x86\x34\x7b\xe6\x24\x71\xae\x0c\x08\x51\xe4\xf3\xea\xe1\xd6\xb4\xae\x8b\x02\x43\xa0\x7f\x6f\xc9\x35\x41\x15\x34\x4a\x32\x27\x3b\x79\x54\x28\xe2\x8d\x74\x6c\x22\xb9\x52\x4d\xf6\x3c\xd1\xcd\x2c\x9c\x27\x0d\xdf\xeb\x34\x3d\xef\x5d\x9c\x27\x03\xa5\x9e\x0f\x3d\xdd\x82\x2e\xca\x7d\x8f\x24\x37\x0b\x37\x66\xfc\xd9\x76\x4b\xf8\x4c\xd2\x7b\xb2\x0b\x5b\xb9\xa5\x0b\x19\xf2\x3d\x60\xf8\x01\x71\xae\x0f\xde\x98\x43\x80\x19\x68\xa2\xc3\x72\x60\x98\xb0\x1a\x3d\x11\x40\x42\x49\xbf\x18\x1a\x80\x05\x8f\x1c\x38\x37\xe2\x00\x11\x03\x50\xc1\x2a\x2f\x24\x9f\x72\x31\x21\x64\x44\x96\xc9\x5d\x76\x0b\x2f\x71\x99\xdc\xf8\xf8\x79\x2b\x58\xf9\x71\x19\xcc\x14\x66\x43\x48\x58\x9f\xe1\xc6\xf3\x98\x67\x96\x4f\x99\xf7\x1e\x45\x3a\x68\xdb\x93\x2d\x8e\x2b\xd8\xdc\x6f\xf4\xde\xa9\xf2\x64\x09\x69\x2f\x75\x96\x17\x5f\xc1\xfe\x79\x72\xd1\x4d\x5c\x42\xeb\xde\x27\x06\xa7\x51\x66\xd1\x02\x2d\xe0\x9e\xc3\xa9\x86\x52\xad\xb3\x62\x22\xd5\x98\x88\x3c\xa2\xd6\x2a\xf0\xc2\x32\x07\xe5\xae\x8c\xea\x3a\x58\x8b\xa0\x41\x69\xe0\xa0\xaa\x16\xcf\xb8\xd0\x91\x11\x89\xe8\x54\x08\x0c\x77\xf5\x41\x22\xc8\x88\xa4\x5d\xd4\x4d\xd7\x61\x76\xdb\xfd\x4e\xca\x2b\x56\xaa\xf5\xb8\x4a\x02\x56\x81\x95\x40\x45\xa9\x9d\xb1\xb7\x07\x97\x23\x32\x7e\x73\xc2\x9d\x47\x64\x0c\xbd\x71\x38\x72\xc4\x37\xfc\x7c\xb4\x8d\xed\x67\x57\x7d\xc5\xb3\xa3\x5e\xad\xa2\x25\x52\x52\xed\xba\x94\xf2\x1d\x2f\x7a\x88\x4f\xac\x8a\x4a\xbb\x21\x25\x10\x0f\x6f\xa5\xd9\x30\xdb\xf5\x02\x36\x03\x82\x6b\xa2\x65\x1c\x95\xbc\x0a\x0e\xe4\x03\x7d\x8f\xd4\x6e\x72\x45\xd7\x4b\x4f\x3d\xd6\x38\xf3\x86\x00\xd1\x35\x88\x2c\x5c\x83\x88\xa5\xc8\xb0\xba\xe0\x7a\x7b\x36\x86\x5b\x2f\xf8\xc3\x6a\xed\xd5\x36\x4e\xc7\x8b\x2e\xf0\x42\xcc\x27\xdd\xec\x88\x0d\xe1\x8d\x7c\xb8\x9d\xa2\xd9\xbc\xdf\xfa\x0b\xb7\x05\x37\x35\xed\x17\x88\x6e\x95\x63\x75\xf8\xdc\xc2\xf1\x16\x55\xae\xb8\x2e\x1e\x5b\xf1\x90\xc0\xe6\xa4\xe8\xd9\x4b\x24\xef\x06\xc5\x3a\xe4\x8e\xb8\xc9\x57\xd1\x4d\x25\xca\x29\x17\xe6\xd0\x47\x6b\xcc\x56\x61\xc9\x5f\xcc\x13\x45\x12\x80\xd8\x1e\x77\x0e\x93\xfc\x42\x45\xcc\x59\xe0\xb0\x59\x35\xc2\x55\xac\x22\x2a\x67\xea\x0d\xf2\x49\xa1\xd6\x55\x77\xcf\x50\xf5\x2e\x64\xee\x34\x3c\xf2\x6b\xf7\x84\x3a\x54\x8d\x6f\xeb\xd1\x6c\xb0\x89\x35\x95\xed\x15\xf2\x98\x67\xe7\x71\x6f\x14\x1e\x1a\xfd\xc6\x3c\x56\xce\x3b\x6d\x6f\x8c\x1c\xf9\x40\x88\x95\x16\x46\x3c\xdc\xbd\xaa\xf1\x2d\x8f\x44\xc6\x00\xf6\x16\xbf\x38\xfa\xa0\xf1\x80\x39\x55\x8e\x7e\xda\xea\xa7\x35\xdd\x5b\xac\xc9\xf4\x80\xf6\x28\x3a\x68\x16\x64\x99\x72\x7b\x2a\x14\x32\x5f\x24\x82\x70\xa6\x64\x3c\x51\x3f\x01\x36\x7b\x82\x47\x33\x0b\xcc\x6a\x63\xb4\xc9\xda\xf6\x7b\xee\x26\x01\x02\x2b\x68\x67\x57\x79\x3b\xc1\xf7\x19\x81\x5e\xd9\x81\x39\xbd\x4a\xcb\xe1\xe4\x54\xe3\x19\x01\x95\x4a\x1b\x0d\x24\x1f\x60\x30\x48\x17\x1f\x8c\x91\x69\x21\xc9\x48\x5e\x26\x7f\xa3\x61\x34\x07\x6f\x7a\xb9\x43\x9e\x4e\x93\x5b\xa3\x1f\xd4\x62\x2f\x98\x87\xac\xb4\x15\xa0\x70\xb2\xa8\x6b\xb9\x20\xc6\xb5\x95\x92\x75\x02\xdb\x62\x41\x30\x0b\xcd\x8c\x3d\x22\x8c\x9c\xf0\x48\x16\x8c\x04\x12\x58\xc6\xec\x4a\xcf\xa8\x02\x80\x1b\x29\xc9\x89\x27\x21\x77\x0b\x97\xf4\x8e\x6f\x5a\x8c\xdd\x90\x61\x5b\xda\x2e\x87\x53\xa1\x1f\xf9\x59\x66\xf1\xf4\xd2\xa1\x44\x44\xe5\x80\x66\x12\x8a\xa8\x3f\xe9\x93\x3f\x33\x20\x2a\xc8\x02\xe6\xf4\x45\xca\xed\xcd\xd6\x27\x1a\x9e\x38\xe4\x9e\xc4\x0c\xdd\x27\xe8\x46\x48\xc7\xbd\xb3\xe0\x5c\x74\x73\x60\x01\x69\x80\x3c\xc5\x7b\x88\x06\xa2\x16\xd2\x41\x37\x72\x99\xc2\xa3\x3b\x90\xdb\xdc\x03\xd0\xbb\x48\xc9\xf3\xce\xc5\x40\x23\xb3\x23\x54\xec\x50\xc7\x38\xa2\x18\x86\xad\x20\x81\x3b\x93\x02\x72\x63\xcf\x20\xf2\x71\xa0\x01\x58\xc8\x14\x4d\x76\x77\x18\x7d\x6e\x22\x0f\x49\x5b\x56\x23\x87\x8f\xda\x04\x44\x4f\x77\x58\x2c\x3a\x9e\xc6\xdc\x7a\xf3\x11\x70\x8a\x8a\x42\x25\xbd\x94\x9a\x3a\x07\x72\xf2\x99\x84\x72\xd0\x29\xc0\x61\x7c\x6a\xde\x09\x83\xf7\x40\x6d\x90\x22\x4f\xd0\x6d\x5f\x9f\xc9\x82\x60\x2d\x6a\x1d\x98\x9c\x4c\x30\x20\x26\x08\x69\x65\x28\xb7\xe2\x37\x54\x80\x89\x3b\x5a\x6f\xa0\x4a\xc6\x60\xde\x8b\x40\x09\x8e\x05\xe9\x00\xb3\x1f\x30\x49\xce\x63\xe1\x8d\x2d\x4f\x4a\x8b\x78\x80\xd2\xb9\x30\xcf\xc1\xc1\x01\x21\xac\xa8\xa0\x47\x71\xb3\x68\xf7\x6e\xd5\xba\x76\xc7\xa1\x42\xdc\x67\xed\x3a\xa1\x47\x74\x23\x1c\x2c\xce\x8f\x61\x37\x8c\xa3\xc3\x7d\x10\xed\xa6\x94\xd8\x3a\x14\x44\x6b\x00\xda\x0c\xdf\x38\x43\x8a\xd9\xf0\x9a\x02\xc0\xa9\x6b\x07\x0a\x55\xb7\x91\x68\x37\x7d\x4e\xef\xfb\x0e\x50\x67\xf7\xa0\xa9\x38\xa1\xa6\x94\x4c\x8a\x1c\xc0\x48\x72\x2a\x76\x12\x2a\x5a\xfb\x4e\x99\x06\x6e\x52\xb4\x48\x84\x54\xde\x41\x17\x10\x35\xd6\x8c\x02\xb7\xe7\x46\x63\xce\x09\x40\xa0\x2c\x45\xd4\x6b\xce\xbb\x86\x8f\x45\xb4\x18\x1e\x64\x0c\x8a\x01\x09\x8e\x1e\xf0\xdb\xeb\x5c\x4e\x2f\x55\x94\x3e\x19\x46\xad\x6f\x0c\xc3\x61\x77\x26\xa8\x1b\x03\x0d\x5f\xef\xc4\xec\xd2\x68\x96\x59\xe4\x6c\x69\xda\xd2\xdd\xb8\x33\xeb\x6b\x52\x64\x4a\xe6\xd2\x5c\x4c\xcf\xc2\x90\xf5\xcc\x32\x84\x4d\x60\xb4\x43\x53\x5f\xb2\xa4\xcb\xc5\x96\xd5\x3b\x3a\xd4\x96\x9e\x84\x4d\xbc\xd4\x41\xd4\xac\xe6\x92\xa7\xfd\x7d\x8e\x90\x87\x9f\x22\x3a\x74\x44\x0b\x46\x5a\x1b\x84\x5a\x0f\x7d\x6e\x93\xa1\x6d\xae\x18\x09\x37\x3e\xa5\xc5\xf0\xc4\xb4\x15\x64\x29\x22\x27\x35\xe4\x1d\x62\x1f\x25\xcb\xde\xa5\x0d\xe5\x09\x0f\x10\x4d\x5a\x90\x7a\x81\xe4\x31\x7e\xc3\x9f\x00\xfa\xb0\xdb\x14\x3f\xc9\x8e\xba\xa9\x14\x56\xb0\x69\x93\x15\x13\x6f\xbe\xc9\xc4\x48\x5c\x89\x89\x0c\xc1\x3f\x5d\xa6\x60\x24\x73\x5e\x69\x28\xbf\x5a\x8d\x2c\x62\xdc\x34\x28\xa1\xf0\xed\x92\xeb\xc1\x0e\x40\xc5\x49\x02\x3f\x3a\xb9\x2d\x85\x52\xe0\x1d\x88\x36\xc8\x0a\xa4\xf1\x75\xd2\xa7\x16\x0e\x19\x0f\x02\x05\xb0\xbb\xbc\x81\x98\x1a\x17\xb6\x51\x87\xdf\x77\x90\xde\x75\xef\x50\x3d\xe4\xc8\x35\x84\x58\x1c\xd1\x82\x0a\xf3\x62\x6a\xfc\x44\xe4\x10\x9d\x1b\xac\x69\x23\xcb\x64\x3f\xa6\x43\x38\x41\x50\xb7\x7a\x2e\xe0\xd1\x23\x35\x5f\x58\x58\xc6\xd5\x1a\x10\xd3\xb9\xea\x09\x15\xdc\x73\x56\x20\x7f\x45\x8f\xc8\xdd\x82\xc9\x83\x1c\xc1\xf2\x04\xff\xc9\x5c\x86\x82\x04\xc0\xf2\x26\xf2\x30\xdc\xc7\xc9\x23\xa5\x32\x18\x25\x4f\xb8\x19\x6c\x1f\x62\x93\x8a\xc0\xf5\x77\x59\x4b\x85\xad\x17\x7a\x03\xc1\x30\x05\xb3\xa9\x49\x1b\x10\xe8\x9c\x31\x27\x3b\x59\x4a\xee\x6e\x1c\x33\x07\xdb\xf3\xcc\xf3\x26\x76\x67\xe7\x0a\xda\x49\xa1\x5b\xd5\xdd\x1f\x2b\x39\x2e\x05\x10\xc9\xca\x10\x4b\xf8\x53\x73\xef\xe2\x33\x7d\xc3\xc7\xca\x33\x8a\xd2\x03\x4e\x74\x8e\x07\x80\xe0\x23\x7d\x8c\x40\x36\xc3\x23\x3d\x48\x3c\x27\x9a\x6f\xa8\xd0\xb3\xa3\xc0\xd6\xa1\xe6\x65\x59\x37\x3f\x10\xa8\x36\x80\x58\xb2\x6c\x3f\x79\xba\x32\xa7\x76\x27\x81\x07\x4e\x01\xb2\xdb\xd9\x2c\x48\xa4\x0d\xbb\x33\xa6\xb1\x62\x11\xff\x9c\x55\xee\x79\xdc\x36\x45\x4e\xd0\x5d\x1b\xe9\xc0\x96\x73\x2e\x7e\x48\x14\xff\x6e\x17\xd5\xbc\x51\x14\x00\xd3\x16\x85\xd9\x06\x28\xd8\x0f\xbc\x94\xd8\xb3\xf2\x1d\x6a\x5e\x37\x7f\x4a\xe0\x1d\x7b\x86\x31\xeb\x3d\x27\xea\x9b\x5d\x58\x76\x8c\x91\x0d\x24\x73\x10\x4c\x25\x77\x83\xf4\x06\x3a\xf0\x70\xf5\x93\xa5\xcd\x02\x0b\xd2\x74\x88\x93\x94\x13\x00\x7b\x01\xe5\x1b\x68\xb8\x9a\x94\xca\xd1\x5a\xaf\x09\x20\x48\x0c\xd9\x3d\x6c\xe5\x5e\x40\xb9\x05\x90\x21\xf5\x0d\xe1\x85\x31\x8f\x79\x53\x1d\x17\xd7\xaf\xd5\x33\x05\xa6\x72\x4b\xd5\x26\xdd\x21\x20\x9d\x94\x71\xf5\x13\x19\x83\xb2\x11\x8e\xa1\xf3\x61\x97\xcc\x05\xa2\xb7\x84\xa6\x88\xea\xb9\x67\xb1\xbf\x91\x35\xbd\x24\x35\x56\xfa\x17\x8c\xe2\xe7\x59\xa4\x3e\x14\xcb\xa5\xce\x80\xac\xf9\xa6\x2f\x9d\x57\x9c\x8d\x37\x2e\xd9\x76\xad\x2b\x7f\x0b\xa6\xbe\x55\xef\xb1\xb7\x31\xac\x13\xc0\x59\x40\x9f\xa3\xc8\x4f\x56\xba\x05\x6b\x10\x14\xc0\xb8\x5b\xa8\x76\xcc\xeb\x23\xb9\x31\xdb\xdb\xdc\xc9\x30\x1c\x83\xe9\x16\xa4\x6f\x20\x8c\xa2\x14\x76\x3b\xb5\xb3\xf1\x80\x0d\xf2\xcb\x51\x99\xc7\x2d\x20\x0b\x3f\xe1\x13\xc7\x30\x39\x25\xbe\x82\x9b\x2e\x3a\xd8\xa1\xf8\xc8\x32\x6e\xa0\x02\x35\xb2\xed\x81\x04\xb6\x66\x83\xe8\x30\x64\xa7\x50\xdc\x1b\x7f\x4c\xa5\x63\xb9\x58\x09\xb9\xd7\x0c\xc4\x3a\x4e\xa0\x3a\x19\x16\x6e\x24\x39\x12\xcd\x48\x31\xbd\xf4\x31\x4d\xf4\x1a\xb8\x32\x65\xf0\xd0\x45\x92\x17\xb5\xa1\xa7\x5c\x0f\x0e\xac\x27\x8c\xd3\xdf\xc0\x9a\x6a\x3d\x95\xb0\xb0\x67\x0c\x38\xf7\x6f\x31\xb6\xf8\xce\x8e\x8d\x9b\xb1\x1f\x80\x23\xeb\xdd\x8c\x24\x13\x01\x74\x86\xc0\xed\x51\xba\x20\xf8\x12\xb7\xbd\x92\x98\x4e\xe9\x41\xa8\x26\x60\x58\xaa\x67\x53\x06\x9d\xba\xba\x11\x9b\x17\x57\x99\xdf\x68\x1a\x1f\x6a\x89\xb4\x3f\x3f\x6c\x6d\xcf\xb4\x61\x43\xb8\x1e\x34\xcf\xd4\x60\x63\x90\x90\x1e\x76\x9e\x26\xd7\xaa\x8f\xaf\xd8\xac\x70\x04\x40\x26\x2d\xec\x47\x75\xa2\xb1\x3a\x9b\x04\x23\x88\xa2\x46\x7b\x7d\x42\xb9\x7e\xae\x52\x4e\x47\x64\xcd\x16\x37\x8d\x2d\x0d\x8d\xbd\xc6\x5a\xc1\x3e\xdf\x4d\x7e\x7b\xfa\x2a\x3c\x62\x39\x05\x26\x33\xb8\x92\x2f\x38\xf3\xb1\xa9\x55\x14\xfb\xa8\x7c\xa4\x04\x1d\x2f\xaa\x7d\x97\x06\x9c\x62\xf0\x07\x61\x0f\xbc\xd6\x11\x18\x96\x17\x41\xf6\xc8\x44\x47\x5f\xc8\x4a\xa0\x59\x3d\xe4\x40\x79\xff\xf0\xa1\xa7\x0f\x57\xd6\xd6\x15\x45\xd1\x83\xe2\x0d\x4f\x83\x38\x07\xfb\x58\x07\x7b\xfb\x42\x1b\x2b\x42\x66\xeb\x2a\x09\x14\xf0\x18\x4b\xad\xd9\x57\x1a\xf5\xb4\x24\xd8\xb6\x6e\xbb\x85\x53\xa0\x83\x99\x77\x93\x1b\x0d\x28\x2a\x3c\x63\xf3\x98\xfb\xce\xe7\x71\xb3\x5a\xcd\x0a\x7d\xc1\x49\x41\x23\x0d\x88\x11\x5d\x00\x30\xc2\x71\xdc\xb0\x1f\x43\xbc\x22\x11\x6c\x05\x6d\xe4\x8f\x48\xbb\x99\xdc\xfa\x98\x8d\x4a\x17\x7d\x82\x98\x1d\xd9\x85\x35\xc7\x25\x71\xe2\x74\x5d\x55\x81\xcd\x51\x48\x20\x01\x7f\xa8\x0d\x1d\x4c\x03\x53\xbe\x87\x6e\xcd\xbb\x70\x80\xde\x5b\x24\xcf\x28\x00\x1f\x77\x79\x31\x46\x38\x5e\x7d\xb5\x69\x67\x08\xe8\xfa\x9b\xc4\x4c\xad\x7c\xa4\x86\x48\xd6\xb8\xde\x8e\xc4\xd0\x46\x57\x0b\x00\xa6\xcb\xd8\x00\xeb\x18\x80\xf4\x30\x6b\x8b\x56\x85\x1c\x19\x1a\x4f\xec\x0d\x29\x27\xaa\x67\x5d\x8e\x9a\x35\x1a\x5a\xb4\x19\x55\x0c\x8f\x33\x3c\x20\x64\x36\xd1\x27\x61\x70\x0a\x9e\xce\xa3\x47\x10\xd2\x5d\x3a\x1e\xe7\x93\x01\xc2\xab\xf5\xc0\x6f\x35\xb9\x03\xd3\x74\x92\x5d\x96\x0c\x00\x98\x0b\x51\x2b\xaf\x17\xec\x26\x16\xf3\x67\xae\x52\x29\x14\xed\xce\x29\x13\x7c\xb8\x83\x31\xca\xe0\x77\xea\xc8\x65\x61\x84\xf3\xad\xbe\xfb\x51\xcc\xfb\xcc\x2b\xd9\x90\x03\x91\xf7\x50\x6e\x68\x1e\x80\x49\x97\xd6\xee\xb0\x18\xa3\xa3\x70\x51\xe0\x74\x1e\xe8\x80\x11\x55\x01\x14\xdf\x83\xc0\x04\x6f\x83\x62\x83\x48\x70\x3f\xd7\xec\xac\x6e\x9d\x83\x23\xeb\x58\x63\x8b\xea\xb5\x67\xdc\x78\x63\x5c\xf6\x1a\x0b\xc9\x3e\x85\xf9\x7e\x48\xe5\xfa\x82\x66\x10\x20\xba\xee\xe2\x0d\x99\xdf\xc8\xb7\x05\xbb\xe2\xd2\x4e\x7d\xa0\xb3\xc1\x4e\x3e\x67\x42\xbf\xf7\x47\xda\xc8\xe0\x13\xad\xa8\xe0\x3e\x12\xeb\x34\x11\xeb\x74\x52\xa0\xea\x4f\x02\xee\xf9\x42\xc5\x20\x98\x68\x62\x41\x4c\x9e\x74\xe2\x89\xe7\x82\x14\x62\xf5\xb0\x64\x9c\xf4\x1b\xcb\x15\xae\x90\x88\xf7\x38\xf5\xc9\xa3\x60\x8e\x9e\xdf\xc8\x54\xd8\x3a\x03\x62\x86\x04\x2c\x27\x08\xf1\x8a\x6f\xfb\x31\xf0\xfe\xe4\xc1\xbe\xdb\x20\x0f\xe8\xba\xf5\x06\xcb\x5f\x49\x34\x20\x0f\x58\x6f\x9c\x81\x58\x28\x00\xe7\x78\xe5\x98\x27\x35\x0e\xce\xf2\x0a\x02\x1c\x26\x9e\xdf\x62\xab\x8f\xb9\x69\x05\xb7\x00\x04\x83\xe2\x56\x52\xb1\x60\xf9\x70\x3a\x07\x87\x90\xae\x81\xe5\x5a\x97\x1b\xe3\x31\x0f\xf6\x11\x89\x0e\x0c\x80\x37\x3b\x9a\x51\x77\x6d\xa0\x4d\x4f\x1d\x37\x3e\x33\x8a\xa2\x4c\x74\x55\x7a\xd6\xf8\x9e\xf3\x18\x7b\xbe\x4b\x67\x08\x41\x6d\x4d\xbb\x23\x4e\xb0\x8e\x0b\x1b\x24\xbf\x3f\xa9\x74\x73\x32\xcb\x9c\x27\xa5\xc9\x57\xfb\x52\x49\x21\xa7\x30\x92\xc6\xfb\x70\x47\x52\xfb\xad\xf5\xe1\x26\x01\x8d\x80\x3c\xb9\x4d\x3d\xd2\xb2\x13\x19\x8f\xaa\xbe\xd9\xbd\x92\x46\xfb\xb1\xca\xf7\x1d\x02\x25\x83\x20\x1d\xc2\xd9\xe1\x75\x1d\x20\x2f\xf0\x2a\x33\xbd\x8e\xc7\xe0\x0a\x6c\xd1\xdd\x58\x8b\x93\xd0\x2b\x8e\x06\x05\x82\x63\x47\xa6\xf3\x25\xa6\x2b\x4d\x75\x8d\xdd\x87\x9c\x3e\x45\x8c\xdb\x1e\x1f\x0e\xca\xba\xa7\x4c\x81\x0e\x2e\x50\x20\xaf\x6c\x30\xbd\xbd\x95\x6f\x64\x18\xc3\x87\xca\x1e\x78\x38\x09\xf5\x9e\x8c\x19\xdf\xb7\xac\x59\x4a\x17\x5c\x77\x06\x88\xc8\xa6\x71\x5c\xe7\x6c\x5b\x8e\x26\xae\xbd\x71\xeb\xce\x63\xa9\xad\x92\x2d\x24\xeb\xc3\x54\x92\xad\x90\xca\x31\xd7\x52\x2d\x11\x79\x33\x44\xe9\x1c\x8a\x55\x83\x53\x04\x92\x01\x4f\x2a\x0d\xd4\xe1\x41\x85\xbd\x77\x20\x9a\x26\x8f\xe1\xf5\x22\x44\x86\x03\x80\xfb\xe3\x78\xcb\x67\xb4\xd8\x53\x12\xed\xdb\x8f\x61\x5f\x35\x6b\x0d\x05\x8a\x0d\x3e\x7b\x88\x51\xe0\x26\xd8\xf5\x84\x2a\xf0\x60\xda\x96\xdc\xdb\x09\xd1\x45\xe1\x64\x2f\x9f\xec\xf0\x12\x9c\x90\xbb\x40\xad\x01\x0e\x04\xcd\xed\xc2\x31\xfa\x42\x2f\x34\xd9\x9d\x3c\x91\x69\x1e\xea\x40\xb4\x59\xb9\x48\x01\x62\x54\x54\x16\x4f\xa9\xf8\xcc\x59\x52\xeb\xd0\x5b\x57\xf3\x38\x70\xf5\x6d\x99\x77\x11\xcd\x49\x11\xcd\x2e\x2e\xfb\x41\x27\x4b\xe7\xec\x7d\xef\x52\x59\x49\x9b\xf0\x72\xc7\x72\xc1\xf3\x32\x17\x5d\xae\xa9\x6f\xb8\x5b\x8b\xb5\x7b\x75\xb2\x46\x69\x02\xce\x7b\x32\x91\x24\xd3\x15\xac\xa8\xd7\xd8\x81\x3d\x1f\x0d\xb6\xe1\xa2\x05\x65\x30\x89\x2e\x38\x88\x5d\xd9\xc8\xcf\x5b\x25\x4a\x37\x1b\xa2\xf5\x50\xbd\xb0\x7a\x30\x02\x28\xe0\xca\xaa\xe7\x05\xf0\xee\xae\x81\x7c\x79\xd8\x33\x54\x9d\x81\xb0\xdb\x6e\xad\x38\x6c\xf1\x23\x6e\xa7\xa7\x8b\x31\xed\xf4\xfd\x55\x87\x2e\xea\x0d\x6c\xd2\x25\xf0\xbb\xb1\x5d\x8c\x48\xcf\xbb\x29\xa2\x90\x44\x3d\xe6\x46\xc9\x31\xb3\xc3\x5a\x0a\x6e\x47\x82\x18\xa6\x78\x5c\x67\xaf\x9c\xcb\xa1\x50\x30\x7b\x64\x4e\xa2\xa5\x82\x9a\xce\xde\x43\x8b\xbb\xdc\x5f\x31\x0f\xa0\xe9\xe2\xcc\xa5\x7d\x77\x43\x5e\xf7\x9a\xc1\x9d\x7d\x15\x09\x49\x3d\x67\xcf\xf7\xba\xb0\x4c\x8e\x0b\x2f\x36\xb2\x4e\xa5\x4a\x3b\xd4\x70\xc9\xc1\xdc\xbc\x03\xc1\x83\x1a\x9f\xfc\x53\x2e\x62\x83\x79\xbe\x17\x7d\x1b\x12\x4b\x83\xe9\x1c\x2c\xe0\xd2\x69\xec\x81\xc0\xf3\xb3\x76\x18\x8f\xc8\x10\xeb\xca\x0a\x1a\xb6\xd7\xd9\xab\xe9\xb4\xb1\xb8\x53\x9b\x92\x20\x8e\x8e\x44\xf3\x7e\x73\x2e\x4f\xc1\x5d\x1e\x44\x77\x50\xa4\x12\x6e\x3d\x83\x1e\xd2\x47\x09\xe0\x2e\xcd\xec\x5c\x76\xfd\xde\x13\xb7\x4e\x3e\x4c\x6e\x5c\x13\x1a\xc4\x14\x24\x5b\x69\x32\x5f\x09\x00\xcb\x6f\xa2\xbd\x87\xcb\x99\xa2\x7a\x68\x01\x6c\x9c\x3b\x47\x5e\xcd\x71\xdb\x0e\x53\xbc\x6c\x09\x62\xf2\xbb\x29\x39\xf3\x64\x13\x73\x73\x94\x1a\x75\xbf\x52\xe3\xbb\xfd\x79\x49\x49\xef\x86\x8e\xea\x88\xba\xf2\xca\xab\x9f\xcc\x82\x57\x4b\x12\xc0\x54\x21\xee\xf5\x1e\xf9\x9e\x6c\x07\x7b\x1a\x08\xb7\x68\x3b\x28\x74\xa4\xbb\x0d\x6c\xe8\x60\x7c\xfd\x13\x1f\xd3\x09\xda\xa0\x0b\x10\xf3\x06\x7a\xca\x0c\x5f\x67\xb4\x29\x88\xc1\x55\xaa\xbe\xdf\x00\x72\xed\x26\x0d\xce\x8a\xa0\x3b\x1f\xb4\xe1\x3c\x8b\xe7\x3d\x9e\xcb\x8e\x78\x5c\x0a\x89\xfc\xcc\x41\x52\x9e\xcd\x41\x81\xaa\x13\xf9\x8c\x29\x1e\xb4\x47\xd8\x8a\xda\xe8\xe1\xbd\xf5\x25\xb6\x6b\x5d\x2c\xe1\x46\xd0\xc1\x99\x4f\x2f\x2f\x36\x10\xc2\xef\xc4\xe5\x66\x54\xab\xf1\x34\x2a\xca\xa8\x5a\xb9\x8d\x52\x67\xcc\x6e\x10\xd1\x41\x40\x03\x9d\xf7\xaa\xeb\x78\x6d\x6d\xe8\x6d\x48\xa7\x26\x55\xa7\x22\x21\x83\x98\x0c\x6c\x32\xc0\xdd\x9d\x82\x1f\x5a\xe1\xcc\x2d\xec\x8f\xb0\x3f\xd2\x5e\x36\x2c\xc1\x98\xa9\xd3\x93\x32\xd8\x27\x18\x81\x40\xff\x9a\x2b\x0a\x6a\xc2\x1e\x20\xe8\xed\x54\x74\x4d\xbb\x41\x5f\xb2\x31\xb9\x9e\x73\x22\xa0\xf4\xab\x9f\x99\x93\xeb\x2e\xa9\x75\xaa\xa2\xe7\x3a\x13\xed\xa0\xb8\xcf\x09\x13\xb6\xcf\x3a\x55\xc9\x07\x07\xf2\x87\xa7\xac\x2c\x7a\x17\x5b\x1a\x9d\x89\x75\x7a\x7d\x5e\x35\xb0\x4a\x3d\xa1\x89\xdc\xe6\x06\x14\x1e\xe5\xec\x1c\x22\x54\xa1\x4d\x04\x31\x22\x1f\x69\x3b\x1c\xf1\x3c\xb4\x2d\x3e\xc6\xed\xf0\xfa\x1c\xf1\xfc\x80\x35\x24\x47\x1d\x6d\xda\xb2\x64\x4a\x49\x91\xc1\xe5\x47\x65\x0f\x4e\x24\x3d\xec\x48\x4e\xd5\x25\x9d\xc1\x11\x8f\xa7\x90\xa0\x54\x4e\xaa\x6f\x77\xa9\x7e\xb9\x8a\x52\x4f\x4a\xcc\x9b\xf1\x3d\x4d\xfa\x51\xb9\xf7\xad\x5e\x87\xc4\xb5\xd6\xac\xf9\xc6\xb5\x32\x91\x1b\x5d\x0e\x60\xf9\x19\xad\x23\xb9\x92\x23\x87\x2b\x8a\x61\xd9\x7c\xa9\x03\x8c\xb1\xb5\xe5\x7b\xcf\xf9\x60\x6f\x9e\x10\xf9\x6e\xf3\x4a\xa4\xac\x99\x36\x80\xb1\xe7\xf2\x18\xb7\xf8\x1a\xb7\x4f\xe8\x6a\xa2\xe2\x5e\x4e\x8a\xfd\xd8\x4e\xca\x50\xbd\x58\xcc\x0e\x87\x10\x63\x52\xf5\xe2\x29\xca\xf6\x7a\xf6\xc6\xc5\x1b\x37\x21\x1c\xda\xb4\x93\xc7\xb0\x93\x8f\xb0\x15\x19\x4c\xbc\x08\xf8\x11\x49\x8f\x20\x24\xc9\x09\x64\xe0\x47\x74\xb5\x6e\xce\x65\x1f\x86\x3a\x6c\xe5\x7b\x8f\x68\x55\xf8\xa0\x1e\x6e\x7c\xaa\xc2\x3e\xfb\x71\xd8\x2a\x1c\xd8\xd6\x41\x1b\xb5\xd1\x3e\x26\x2d\x05\xa4\x89\x3a\xf9\xf0\x9a\xa1\xab\xa4\xae\xc4\xa6\xee\x69\xb2\x20\x8c\x81\x73\x9f\x71\xe8\x06\x78\x1e\x2c\xfc\x03\x03\xe8\x5d\x67\x0f\x78\xf1\xb3\x31\xa6\xe8\x8b\xf5\x08\x57\x9f\xe7\xc0\x45\x31\x31\xf5\x4c\x51\x8e\x0f\x5d\x07\x82\xb5\x7b\x78\xe8\x2f\xca\x77\x0c\x30\xee\x18\x62\xdc\x31\x40\x7b\xfd\x46\x90\x27\x72\xf6\x27\x7d\x53\x5f\xfa\x2f\x12\xf6\x19\x5e\x8d\x8d\xef\xe2\xc9\x25\xe4\x3a\xb9\x8a\x6c\x1a\x44\x18\x76\xee\x54\x20\xec\xa9\x1f\x9d\x81\xb8\x47\xdd\xcd\xec\x6e\xcf\xd3\x70\xd0\x0c\x15\xcf\xaa\xb8\x68\x77\xf7\x96\xd4\xe4\xd1\x49\x07\xa2\xe1\x0c\x48\x32\x40\xfe\x31\x3b\x09\x6f\x49\x17\x22\xc5\xdb\xc6\x1a\x70\x73\x2d\x8f\x6b\x3f\xaa\x1c\xb6\x75\x27\xf7\x9c\x55\x5a\x1c\x69\x80\xf1\x49\x34\x27\x2b\x2c\xba\x56\x58\xfe\xca\xa9\xd7\xb7\xcf\x23\x7b\xfb\x3b\x6a\xad\xc6\x9f\xe4\x37\xd9\xda\xfd\x56\x88\x32\x65\x76\xfb\x2d\xed\x64\xce\x7c\xc8\x76\x70\x6d\x29\x88\xa5\xd2\x59\xf4\xc6\x15\x1e\x8b\x86\x0b\x11\xcd\x89\xe0\x3c\xd8\x81\x9b\x0c\x0c\x1d\x41\x0c\x05\xa8\xdf\x31\xa0\xab\x5c\x15\x60\x4f\xf2\xb3\x3f\xb0\xcb\x4e\x3f\x67\xe9\x5e\xf3\x60\xbd\x3f\x75\x0b\x45\xe9\xb5\x98\x88\x74\x1f\x35\xb9\x4e\xc9\x9c\x26\xa8\xbe\x6b\x84\x2a\x57\x1e\x09\x4b\x18\x11\x52\xf5\xb9\xc8\xf6\xb1\xc8\x3e\x29\x51\x9a\x93\x49\x08\x1b\xbe\x8f\x0c\xde\x89\xc8\x22\x5c\x57\x10\xa5\x71\xd4\x9a\x34\x79\xc2\x00\xa3\x7e\xbc\xd2\xde\x2e\x39\x0f\x5c\xb7\xdd\x46\xb7\xb0\x19\x6e\xcd\x19\x6e\xa9\xa0\xa5\x0d\x07\x00\x84\x66\x48\x5a\xee\x80\x0a\xb4\xbe\x15\xbb\xa2\xd4\x7d\x79\x17\x19\x87\xae\xc0\xf3\xa3\x26\x5c\x1c\x9a\x56\x9a\x82\x67\x28\xa3\x02\x38\xe1\x65\x03\xc1\x4a\x1e\x38\x50\x74\x38\x50\x34\xd7\xf2\x79\xd5\xc3\x1c\x41\xfc\x93\x06\x4d\x8d\x06\x0b\x94\x66\x9e\x3a\x5f\x42\xaf\x08\xd1\xab\xd9\xb8\xa1\xaf\xd9\x71\x5d\xed\xc7\xa0\xdb\x2e\x2e\x3b\x0f\x5c\x75\xde\xda\xe6\xc6\xe0\x9f\x29\xff\x50\x67\xa1\x9c\x85\x03\x80\x83\x64\xc7\x08\x4f\x4e\x10\xd0\x81\x32\xa6\xd2\xcd\x57\x3f\x56\xa0\x28\x9a\x2e\x36\x6b\x1c\xec\x67\xbf\xae\xc8\x39\x0a\x0a\xd7\xcc\x85\x17\x45\xcc\x98\xc9\x7b\xb9\x7d\xd1\xe1\xad\xd5\x45\x48\xd1\x45\xd7\x34\xba\xbb\x71\xc1\x9f\x17\x58\xcb\x02\x07\xa1\x8b\x04\x41\x67\x5c\x18\x80\xb0\xbd\xf2\x15\x9b\x5e\x8f\x5b\xb4\x69\xb1\xdf\x0e\x71\xdc\x0e\x84\xdf\x0c\xaf\xbf\xc1\x7b\x4d\x44\xc8\x3e\x4f\x8a\x7e\x8e\x84\x7e\xce\x24\x42\x87\xe8\x4e\x19\x37\x8b\xdc\xda\xe0\x1c\x1a\xdf\x53\xe0\x86\x2c\x79\xc0\xbc\x3f\xdf\x7d\xcb\x6c\x2c\x8d\x8c\xba\x20\x58\x68\x56\xa4\x31\x47\xd8\xf3\x57\x0c\x59\x8f\xe8\xd5\x9d\x8c\xf1\x63\x90\xa8\x95\x40\x1b\x75\xc8\xa7\x85\xcc\xb7\x81\xcc\xd1\x81\xc8\xd1\x81\x2c\x82\x70\x12\x4d\x44\xe7\x5a\x77\xec\x6a\x0c\xb8\x9d\xb7\x45\x3a\xa1\xe5\x76\x42\x8b\xe1\x40\x8d\xdc\x65\xe8\x81\xef\xfe\xb4\x90\x3b\x15\x5e\xd9\x1a\xdb\x44\x79\xd6\xaa\x95\x2e\x52\xba\x33\x40\x63\xb8\x4b\x3b\x04\x94\x3c\xc6\x00\xf9\x67\x4c\x7a\x42\xd4\xb2\x0f\x18\xdb\x40\x71\x7f\xd5\x17\xea\xe5\x4b\x78\x01\x0e\x54\xf3\xb0\x1f\xc3\xed\x2e\x51\xe8\xab\x6e\xbf\x62\x5f\xb4\x5c\x4f\xb4\xb4\xe5\x2e\x9d\x2e\x11\x38\x5b\xe7\x26\x0b\x1d\x4c\x4b\xb6\x6d\x0f\x5c\x77\x4a\x48\x64\xd1\x0c\xcd\x3a\x0f\x2c\x4c\x88\x65\xc5\xa1\x37\xdd\x89\x4e\xc9\xdb\x5d\x3a\x6e\x77\xf9\xa0\xdb\x3d\xf3\x85\x3a\x74\xc6\x4c\x75\xa0\x4d\x3d\x6f\xa7\x74\xde\xf6\x9b\xfd\x18\x6f\xb7\x6a\x2a\x40\xe8\xaa\xca\x97\xc7\x79\x3b\x6f\xc8\x8a\xc2\xe3\x2b\xf7\xa6\x9d\x9a\x86\xd5\x23\xfa\x88\xb0\x58\x2c\x49\xad\x84\xf5\xb5\x6a\xe2\x57\xfd\xd7\x7a\x13\x60\x36\x92\x14\x62\xe6\xa3\x76\xf1\x66\x23\xd7\x61\xcc\x33\xb1\xf8\x84\x2e\x93\x31\xbc\x32\x9f\x17\x4f\x31\x14\x18\x80\x5c\x04\x21\x29\xc9\x6e\x77\xe4\xfc\x49\x85\xf7\x29\x3f\x01\xef\xc8\x02\x0b\xc9\x3a\x0b\xd9\xb6\xb4\x23\x36\xaa\xb1\x23\x35\x21\xe3\x48\x80\x8f\x7e\x51\x3d\xff\x0e\x65\x8c\xf8\xbc\x80\xee\xf2\xde\xa0\x30\x17\x55\xb5\x21\x5a\xee\x9f\xf5\x81\x05\xdc\x5b\x8d\x43\xd6\x67\x27\x52\x56\x96\xdd\x67\x77\x53\xf7\x9a\x78\xfa\x90\x72\x61\x50\x5c\x7f\x07\x29\x33\xc5\x6f\x86\x4c\xba\x30\x9c\x74\x17\x9f\x05\x55\x77\xdf\x06\x28\x8e\xe0\x64\x8c\xe1\x38\x42\x43\x76\xa7\x74\x3e\xe7\x76\xde\x6c\xd2\x09\x48\x4d\x86\x7b\xd5\xd0\x3a\x7c\xc8\x34\x23\x3e\xc5\xcf\xd9\x96\xef\xeb\x7b\x9e\x3c\x29\x83\x27\xcc\x1b\x73\xb3\xdd\x4e\x8e\x1b\x57\xd9\x7a\x06\x60\xc0\x4f\xba\x03\xe7\x76\x32\x7d\xb9\x30\x92\x9a\xf7\x82\x62\xc7\xa6\x5a\x7a\xdf\x77\x03\x66\x33\xec\x37\x90\xb1\x18\x97\xeb\xd2\x9e\x61\xc0\xcf\xb9\x49\xb2\x21\x9c\x73\x90\xfb\xf3\x66\x7a\x82\x6c\x4b\x0f\x88\x01\xa5\xe1\x73\xfe\x81\x2a\xc9\xc9\x8d\x30\x63\xae\xb8\x69\x4a\x65\xfd\x59\x67\x55\xfb\x62\xe9\x52\xc7\x70\xaa\x4d\x80\xee\x7e\x73\x44\x88\x29\x98\xef\x7b\xa2\x4b\xad\x6a\x51\x03\xe9\x15\x4c\x33\x1c\x17\x57\x92\x85\xb5\x25\x8f\x5d\xc1\x6f\x73\x00\xef\xbe\x7a\x1b\xc8\x34\x99\xfe\x2e\x9c\xf1\x53\xa5\xee\xe8\x37\x5d\xf1\x23\xa7\xbc\x54\x58\x1b\x64\xc9\x44\x9f\xfb\xaa\x8b\x92\x21\xee\x53\x62\x5d\x9e\xf1\x71\x3d\x92\x4b\x1e\xb0\xee\xd8\x87\x0a\x2a\x58\x1e\x74\x59\x2e\x19\xd3\xb1\x0f\x54\x9e\x79\xf0\xb8\xa5\xec\xdb\x2b\x45\xd4\x05\x56\xe3\x2f\x4f\x4e\x92\xfb\x28\xb9\x00\x2c\xf8\xb9\x3f\x13\x98\x20\x3c\x94\x0b\x53\x33\xe9\x85\x4c\xdb\xa7\x01\x4a\xdf\x73\x30\x46\xe8\x43\xdf\x32\x2c\xa7\x38\x63\xe1\x10\x60\xf0\x92\x11\xfd\x94\x51\x29\x4f\xeb\x91\xd8\x2c\xc3\xd6\x68\x94\xdf\xac\xa1\x67\x0e\x89\xfd\xd4\x1d\x31\xab\xfc\x13\x75\x09\x31\x80\x18\x89\x21\xee\x8f\x14\x5c\x18\xbe\xfd\xcc\x11\x17\x36\x8c\x2e\xee\xc5\x61\x98\x0a\x72\x01\x15\xf5\x4b\xf6\xbb\xac\x2c\x7f\xd5\x13\x8a\xd9\xd5\x7e\xa8\x02\xcd\x61\x56\x8b\x31\xca\x77\xd5\x31\xc6\x03\x63\x6e\xc2\x6a\x97\xd1\x6a\xf7\x4c\xfa\x29\x27\x53\xaa\x30\x56\x38\x2c\xc3\xd7\x90\x94\x9d\xee\x53\x7d\x98\xfc\x87\x7e\x2e\x5c\x68\x8d\x7a\xc5\x99\x1c\xa6\x98\xd7\xaa\xfa\xe6\x97\x12\xf3\xbc\x47\x7d\xcf\xf5\xac\xa4\x1d\xc1\x91\xba\x2f\xd5\xbe\xf3\xc7\x48\xd6\xa0\x19\x07\xc3\x5f\x98\x32\xbe\xbb\xa8\xec\x72\x3e\x79\x7a\xf1\x47\xff\xce\xc8\x62\x33\x30\x25\xbb\xe7\x2a\x9a\x36\x0c\x83\x7f\xe7\x9d\x69\x6d\x4b\xd6\x18\x8e\xd1\xef\x54\xc6\x5f\x9e\xcc\x8f\x72\xb5\x8f\xb0\xae\x19\xa6\xbe\xf0\x00\xc4\xe4\xd1\xb7\xbe\x81\x31\xf1\x61\xdf\xa1\xd2\x64\xb0\xa1\x8e\xda\xf8\x22\x97\xcc\xf9\x69\x0b\x4e\x34\x6e\xfa\xb5\xe4\x18\x59\x58\xed\xd5\xb9\x97\xec\x0f\xef\xe4\x35\x97\x2b\x4a\x94\x58\x46\xd4\x0e\x8f\x11\xd6\x6f\x7e\xcf\x9a\x38\xe8\x11\x89\x13\x26\x85\xc9\x94\xf7\xbe\x93\xed\xe8\x37\x36\x7a\xb5\x7a\xf7\x67\xde\xcd\x94\x6c\x72\x8c\x73\xd9\x6f\x10\x13\x31\xec\xe7\xec\xa3\x59\x91\x2a\xc4\xaf\xb4\xd3\x43\xb4\xe1\x18\x8a\xe4\x3c\x18\xf1\x33\xee\x60\x06\x32\x78\xfa\x76\x61\x74\x35\x6b\x1f\x51\xcf\xac\xec\xf6\x83\x6f\xa1\x16\x8f\xe9\xa5\x30\x3b\x17\x59\xfd\xee\x57\x31\xd1\x2d\x78\xda\x32\x2c\xab\xa9\xbf\xf1\x39\xc9\x45\xec\x41\x23\x73\x63\xeb\x19\x93\xb7\xb3\x20\x83\x7e\xed\x73\x97\xfb\x44\xae\xf8\xbe\x7a\x27\xc4\x84\x6c\x5f\xf5\xf1\x95\x14\x43\xb2\x30\xf9\x1f\x72\x0d\x6f\xb7\x35\x9d\xf6\xcc\x83\x61\x39\x4b\x76\x1f\x8b\xaa\xa0\xbf\x8d\xeb\x31\x69\xdb\x07\xc7\xf0\x17\x58\xe3\x13\x8b\xbd\x71\xdb\xf7\x78\xb9\x4e\x5b\x02\xdf\x33\xd1\x76\x8d\x92\x31\x5d\x68\x99\x08\xc0\x79\xc8\xe2\x0b\x47\xf6\x89\x63\x7d\x10\xb9\xfe\x70\x1d\x86\xb9\xab\x18\x78\x4d\xa7\x71\x6c\x79\x8c\x07\xbf\xc7\xab\xec\x16\x20\xc5\x54\xc4\xcc\x5c\xb4\x43\x7e\x10\x79\x3b\x10\xc5\xc0\x00\x4c\xf7\xa9\x23\x96\x83\x23\x80\xce\x27\x3a\x53\xc5\x27\x27\x9d\xba\xf0\xdc\x44\xa3\xbf\xbd\xd1\xb2\x3e\x69\xd5\xe3\x30\xc5\x35\x9d\xc5\xe3\xcd\x60\x6e\x16\x4a\x2d\xce\x70\xce\xd8\xfa\x56\xe7\xbe\xe7\x94\xd7\xb4\x03\x91\xcb\x01\xb7\x27\xc6\x70\x4c\x6d\xcb\x3c\x0a\x62\x8f\xcd\xaf\xcd\x26\x66\x8a\x3a\xad\x1b\xaa\x61\xbe\xcd\x76\x6a\x8c\xd0\x6b\x96\xe7\xb2\x78\x16\xcc\xe5\xd2\x6c\x77\x0c\x90\xd2\xbc\xe0\x98\x5d\x0a\x05\x9f\xe1\xf1\xcb\xb7\x98\xe3\xb8\xab\x78\xdf\x9f\x74\x6a\x30\x99\x1d\xc9\x3d\x1b\x72\xcc\x65\xc7\xf2\x62\xc9\xb3\x0c\xb7\xbd\x66\x61\x94\xa7\x50\x7c\xb7\x63\x2a\x0e\x99\x4a\x42\xd4\xb8\x68\x67\x8a\x40\xcc\x83\x65\x7c\x12\x19\xd3\xc5\xd0\x34\xa2\x71\x95\xa6\x67\x4f\xf6\x7b\xdd\x00\x24\xe4\x76\xbf\x93\x27\x69\x18\xfc\x93\xd2\x21\xe6\xc1\x98\xc7\x6d\x53\xb7\x02\xcc\xf3\x43\xba\x4b\xc8\x43\xb8\x5d\x8a\xcf\x9d\xb4\xa4\x65\x54\x9f\x07\x8d\x07\xe3\xb0\xce\x1e\x10\xf3\xe0\xcd\x5a\x5a\x35\xb4\x20\x19\x30\x6e\x07\x25\x31\xb9\x96\xd9\x3e\x77\x18\x37\x7e\x23\xc8\x49\xa0\x81\x27\xfd\xc4\xf8\x9a\xc1\xd8\x5b\x2e\x4e\xf8\x42\xe0\x50\xb3\xa7\x3e\xc4\x0c\x25\xfc\xe9\x63\x22\x7f\xe4\xfc\xb1\x6d\xa0\xd8\xb6\x8e\xd6\xc5\xa6\xcf\x84\x8a\x67\x8f\x4e\x3c\x6f\x80\x48\x10\x74\x62\x71\x2d\xe7\xa3\x90\xf3\x7d\x47\x7e\xd5\x51\x04\xa7\x71\x72\xeb\xf4\xb3\xc6\x63\xc6\xe6\x7b\x2f\xb2\x83\x24\x41\x27\xa7\x87\xaf\xe6\xb1\x32\x31\xe4\x02\xfc\xb7\xb0\x6a\xb5\x7b\x88\x82\x18\x76\xbe\xf5\x83\x10\xf3\xe0\xa4\xe3\xd6\xe9\x45\xc7\x93\xa0\x75\xb1\x2e\x89\xc9\xad\x5c\xf2\x89\xbf\xd7\x54\x0f\x2d\x50\xe8\x46\x81\x11\x19\x44\x31\x63\x73\x0f\x99\x0b\x1f\x86\xa1\x4a\x26\x7a\xdd\xe7\xd7\x7d\x71\xfa\xce\x8f\x26\x86\xb0\xe1\xd8\x07\x01\x10\x01\xf7\xe2\x87\xb9\x58\xae\x27\x18\xc5\x69\xd2\x9b\x0d\x69\x42\xcf\x06\xdc\xf7\x1d\xcf\x43\xa9\xc7\xa9\xeb\xce\x29\x42\x6c\x59\xbb\x28\x0c\x27\xa8\x13\x9d\x6d\x05\xc0\x01\xa0\xfa\xa4\xfc\x52\x48\xf9\xef\xbb\x9b\xe7\xdb\x9e\xdf\xc0\x78\x16\x32\x96\x71\x36\xf8\xcc\x8d\xef\x37\x5b\xaa\xa5\x59\x2e\x39\x86\x32\x60\xa0\x81\xf4\x01\xd6\x07\x4f\x19\xe0\x7c\x7c\xce\x3c\xb7\xc1\xc0\x06\xe7\x0d\xf4\x9a\x8f\x97\xec\x3e\x66\xd0\x93\x10\x43\x7a\xa8\x3a\x32\x49\xc8\xec\x36\x5f\x19\xb3\x6e\xdc\xde\xbb\x5a\x8f\xe8\x1a\x34\xf8\x61\xcd\xeb\x84\xd6\x9a\x35\x6a\xf6\xe8\x99\xa3\xe7\x55\x61\xcb\x59\xb2\xf8\x68\x8e\x15\x7e\xae\x0b\xbd\x55\x29\x5f\xa6\xf7\xb5\x1b\xe2\x68\xd3\xc8\x2e\x26\x4d\x62\xb0\xfc\x11\x46\x10\x3a\x26\x8f\xc7\xa0\xfa\xbc\xc1\x04\x24\xb1\x7f\xc6\x24\xa3\x66\xd3\x02\x93\xf7\x81\xc2\xae\xa9\x19\x8c\x5a\x2e\xbe\x46\x43\x79\x76\xc4\x57\x0f\xdf\xe6\x46\xed\x70\x91\x93\x28\xcf\x41\xa9\x07\x65\xa0\x0d\x0d\x5b\xdc\x87\x9d\xa8\x51\xa2\xc5\x32\x17\x6f\x82\xde\x62\x91\xb8\xcf\xc9\xeb\x73\xdd\xfb\xbc\x1b\x8e\x34\xac\x61\xf6\x22\x0a\xfb\x1c\x3e\x64\xce\x8f\x4f\xc4\x3f\xf4\xfa\x0c\x84\xb6\x95\xb5\xd7\x6c\x79\xbc\x9a\xc3\xb3\xc3\x96\x68\x8c\xda\xa8\x4d\xdb\x49\x8e\x27\x36\x9e\xf0\x42\xe1\xc4\x1d\xca\xae\x3b\x16\x71\xb9\x9a\x06\x0a\xec\xa3\xe8\xa4\x36\x6b\xa0\x36\x93\xe2\x97\xdc\xfc\xfd\xfb\xb2\x97\x0e\xc1\x8d\xb8\x22\xa0\x0f\x6b\xe6\x15\x09\x3c\xc1\x7a\x44\x8f\xa8\x8d\xdf\x1b\xf6\xd7\x67\x4c\x2e\xfb\xcd\xb9\xa9\x55\x4c\xc9\xaf\x7e\xf9\x35\x73\x80\x28\xcd\x80\x28\x0d\x66\x10\x6d\xc4\xfa\x3d\x3c\x9e\xcf\x83\x0b\x71\xdd\x7e\x4c\xb7\xf3\xd6\xc8\xb6\xbb\x68\xb6\x3b\x28\xf6\x83\x32\xb4\xb3\x90\xa0\xd1\xd6\xf8\x10\x30\x16\x06\x84\xb4\xa2\x40\xe9\x22\x80\x69\xd4\x21\x50\x93\x28\x89\xb6\x51\x86\x60\xc4\x87\x65\x0d\x54\x8d\x10\xe2\x59\x5d\x35\x19\xfd\x61\xbf\xc9\xd8\x91\x0a\x51\xe3\xba\xc1\x59\x5e\x70\x18\xa9\xd6\x21\xa2\x9d\xe9\xa6\x1d\xe9\xa4\x1c\xf3\xa4\x38\x52\x6d\x34\xc7\xc5\xdc\x48\x65\x18\xe3\x76\x9d\x56\x62\x25\x57\xf2\x35\x5c\xe4\x68\x43\xfa\x57\x74\x57\x7b\x5a\x78\x44\x7e\xd4\x46\x48\xd6\x44\x88\xd7\x44\x6d\xd2\xd3\xc1\x24\x90\x47\xca\x97\x98\x9e\x75\x1e\x9c\xa1\x19\xba\x90\x39\x84\xb4\x27\x84\x01\xd3\x09\x54\xeb\x16\xa0\x28\x43\x13\x4f\x14\xd3\xf5\xcf\xde\x8f\x91\x2f\x9d\x4d\x1d\x00\x05\x6b\x77\x57\xd0\xd8\xa0\xcf\x84\x3e\x9d\x94\x93\x2b\x67\xb5\xc6\x7a\xe5\xb2\xdf\xec\x4a\x41\x9d\x68\xd2\xb2\x4d\xac\x87\x21\xf1\x61\x3a\x57\x1a\xc2\x6f\x75\x95\x2a\xe6\xd6\x3d\xda\x7e\x56\xb8\x67\xaf\x70\xaf\x79\x7f\x77\x8c\xfd\x0e\x18\x70\xbe\x6d\x04\x00\x2f\xb1\xdf\x2c\xb1\x8f\x2e\x24\xf0\x94\x68\xc3\x5b\xc9\x68\x0b\x34\x8d\x46\x25\xa4\xbc\x01\x97\x4f\xbd\xf0\x4f\x56\xec\xb1\x69\xf1\x46\x7c\x77\xbd\x66\x88\x5d\x81\x75\xbd\xa3\xe3\xf4\xea\x4a\x91\x28\x8d\x66\x45\x42\x54\x73\x7c\xdd\x1f\x85\xa1\xa4\x45\x50\x9f\x79\x62\x43\x83\x6a\x43\x83\xe2\xe2\xba\xc3\x8c\xcb\xbe\x2e\x67\x8a\x68\x11\x05\x16\x24\x41\xf7\x90\x6e\x33\x77\xef\x2a\x1d\x92\x68\x94\xe4\x67\x2e\x7d\x32\xd6\xaa\x2b\xb8\x3f\x02\xa9\x27\x44\xb1\xd7\xcc\x44\xe2\xed\x56\x38\x17\xb5\x98\x27\x68\x7e\xef\x49\x9d\x7a\x42\x1a\x4e\x11\x45\x87\xe3\xc8\xcc\x9d\xfb\xc3\x7d\xb6\x2e\xe4\xe1\xbe\xdf\x0c\xb6\x3f\x28\xf6\xc8\x16\x35\x05\xb7\x31\x52\xcd\x7a\x57\x00\xc5\x25\x13\xa3\x83\xf7\x37\x84\x5c\xda\x61\x59\xd1\x5b\x1d\xb3\x3c\xc1\x7c\xce\x9e\x97\x9a\x0a\x56\xf0\x54\xc9\x33\x84\xd8\xaa\xc4\x92\x12\x57\x5b\x52\x7e\xcc\xbe\x2a\x1c\xc8\xad\xee\x7b\x65\xae\x9a\x7d\xa5\x11\xed\xee\x42\xf0\x92\x78\x0b\x69\x21\xb3\x03\x1f\xed\xc7\x3e\x3b\x9e\xbb\x6a\x37\xdd\x2b\xe0\x8f\x90\x3e\x8f\x73\x72\xb5\x90\x65\x8c\x1f\x74\x4a\x26\x7a\x34\xdd\x04\x6f\x0d\x82\xc9\x5b\x48\x8c\x0a\xb5\xe9\x02\x96\x60\x7a\x17\x19\xf3\xdd\xe9\x70\xc5\x0b\x82\x11\x99\xc6\x27\x10\x76\xe4\xe0\x8d\x65\x70\xe2\x04\x1c\x6e\xf7\x27\xaa\x41\xc5\x8e\x23\x28\x81\x50\xf7\x3e\x16\x3d\xd8\x09\x3c\x8d\x4c\xcf\x1b\xcf\xcc\x70\x74\xf1\x44\x8b\xa0\xd3\xd9\x79\xf6\xc6\x73\x22\x18\x09\x14\x21\xf4\xac\x9e\x7d\xeb\x0c\x67\x4f\xae\x4f\xa5\x27\x25\x28\x81\x1b\x1a\x44\x9d\xde\x57\x36\x1e\x44\x89\x4e\xfe\x56\xe7\xca\x5c\x85\x35\x9a\x98\xa7\x11\x93\x6b\x41\x86\xf5\x05\xad\xdf\xbe\x6b\x80\x8b\x6b\x4b\x41\x95\xe6\xa6\xe7\xed\x34\x3c\x2d\x71\x3b\x67\x20\x0e\x7f\x44\x66\x44\xe6\xec\xc7\x70\x50\x79\xe3\x35\xbe\xd7\x0e\xd6\x78\x93\x47\xf3\xe5\x4c\x33\x99\x6d\xe7\x05\x08\xe3\xab\xc9\xb7\x59\x71\xdb\x5a\x27\x9e\x47\x8f\x94\xeb\x32\xdd\x3e\x6b\x6b\xe9\x8b\x01\x79\x47\x45\x07\x7f\xba\x1b\x45\x48\xdc\xcd\x18\xbd\x28\x50\x9b\x03\x51\x6b\xa9\x94\x90\xb4\xb5\x65\xbd\xa1\x4c\x4a\xb7\x24\x80\xd2\xf9\x12\xd9\x33\x49\xcb\x93\x07\xa9\xaf\x93\x37\x51\x9b\x71\x96\x1b\x11\xc8\x0a\xc9\x84\xd3\x25\x35\xd0\xeb\x31\x4e\xd3\x95\x0c\x42\x35\x18\x07\x65\x48\xa6\x3b\xbd\xac\xbe\xda\x74\x37\xa8\x0c\xb8\xef\xfd\xb5\x58\x1d\x9a\x80\xd2\x00\x99\xa8\x68\x6d\xe5\x13\x0c\xd0\xf1\xa4\x78\x91\x3b\xd1\x41\x4f\xbb\x7b\xe8\x0b\x6b\x02\xe7\x33\x90\xe9\x20\xa8\xf7\xe9\xed\xde\x13\x1a\x24\x1d\x55\x13\xb5\xd1\x1c\xa8\x14\xa0\x1e\x40\xe0\x35\x4f\xe4\xe6\x08\x3b\x70\xbb\x5b\xed\x32\xc4\x77\x28\xb5\x57\x97\x80\xfd\x0e\x05\x8b\x22\xf5\x55\xfc\x6e\x34\x8c\xf8\x51\xcb\x18\x06\xca\x02\xbf\xeb\x9a\x65\xdf\xe7\x1a\x7c\x72\x13\x04\x3c\x46\x8f\xc0\xc3\x19\x1d\x3c\x45\xa7\x22\xf0\xac\x6d\x59\x0b\x82\xee\xdc\x9e\x67\x7d\x55\x77\x57\x42\xcb\x47\x14\xc7\xeb\x78\xa5\x9d\xd4\x15\xac\xc0\xae\x4b\x2a\x59\xe0\x52\x17\x9d\x81\xc2\xdb\x23\xc3\xc4\x7c\x71\xce\xf3\x70\x2f\x38\xa2\x6d\xba\x70\xb9\xa4\xdf\x6a\xb3\xe9\x6f\xba\xd0\xb4\x79\x91\xc3\x84\xbd\x6b\xc8\xcd\xf7\x84\x3c\x5b\x74\xed\x8a\x68\x75\x38\xd4\xb3\x82\x72\x24\x9a\xe6\x7a\x77\x56\x07\x2d\x20\x1d\x04\x3d\x50\xa6\x19\xa0\x4c\x73\xae\xbc\x35\xfb\x6a\xfc\x94\xf0\x4c\x73\xa7\x39\x61\x30\x9d\x5f\x0d\x1f\x40\x97\xc4\x55\x7d\x42\x64\x43\xe3\x0e\x61\x18\xd5\xca\x47\x7a\xfa\x66\xd5\x7c\xce\x1a\xa2\x08\xd3\x45\x00\xe2\x53\x0e\x5e\xce\xc8\x3a\xdd\xca\x40\x49\xb9\x71\x91\xe8\xca\xde\x35\xb8\x79\xe5\xf6\x2d\x59\x0c\x98\x25\xf1\xfa\x58\xb8\x8b\x15\x4f\x7e\xe4\x45\xb9\x07\x13\x74\x7c\xad\xb0\xc2\xb9\xc7\x9b\x0b\x6b\x36\x15\x01\x60\x3e\x27\xc2\x7e\x4b\xe7\xb3\x75\x8f\x02\x4c\x8f\xae\x1f\xf5\x7b\x6b\xb7\x17\xd5\xf8\xa8\xe1\x12\x57\x4e\x42\x32\x4d\x28\x49\xb7\x54\x27\xdb\xa6\x30\xd5\x58\xf9\x44\xc5\xd3\x71\xc2\x39\x50\x7d\x53\x25\xed\x13\xa2\xb7\x18\xd8\x56\x43\x2b\x53\x81\xf5\x2b\x81\xe1\xca\xe9\x48\x48\x10\x6e\xa6\x1a\xd3\x85\x61\x9f\xb2\x7d\xa5\xc9\x4d\xbc\x57\x10\x51\x68\xce\x9d\x32\xab\x30\x68\x87\x91\x18\xd3\xa2\xc0\x1d\xc3\x60\xb9\xe3\xd3\x7f\x38\x25\x6e\xa3\x0e\xa7\x7d\x10\xc8\x51\xf6\x69\xdf\xf8\xd8\x7e\xac\x4a\x42\xb7\x87\x6e\xf7\x63\xdd\xb6\xb1\x16\x82\x22\x05\x10\x3b\x86\xa5\xba\x88\xc6\xa4\x11\x58\x7c\x2a\x5f\xad\x4b\xb4\x6c\x5b\xd7\x55\xcf\x72\x88\x6f\x56\xf1\xea\x11\xb7\x0d\x14\x5a\x0f\xce\xd1\x52\x0d\x34\xe5\x18\x08\xe2\x76\x11\x01\xd5\x34\xfa\xb0\xf9\xd6\x37\x97\xa2\x02\x06\x00\x95\x77\x1d\x02\x0f\x89\xe5\x2f\xf3\xab\xdd\xd2\x96\x6d\xf3\x03\x00\x14\xbb\xdb\xc3\x05\x6e\x77\x89\x5c\x11\x43\x68\x2d\x24\xed\x26\x8b\xf5\x73\xaf\x1b\x9a\x19\x49\xee\x97\xf0\x3e\x15\xbc\xdc\xe5\x30\xe4\x15\x41\x5e\x14\xb3\x60\xf0\xda\xf8\xf2\xf9\x33\x5a\xd6\xa8\xc7\xcc\x72\xfe\x96\x83\x78\x0e\x29\x0c\x04\xed\x72\x43\xb4\xf2\x65\x1c\xd4\x9a\x0d\x8a\xb4\x68\x96\x8b\xc5\x98\x0e\x7e\xd1\xf2\x09\x2c\xb2\x90\x4a\x0d\x91\x3f\x7b\xca\xad\x20\xf3\x28\xaf\x25\x48\x22\x3e\x42\x2d\xca\xb1\x36\x42\x82\xc1\xda\x9d\x04\xa0\x79\xdd\x5e\x7d\x7c\xdd\x24\x21\x3c\x6d\x6b\xa0\xee\xbd\x64\xe9\x17\xf7\x72\xfb\xf4\xd1\x03\x6f\xcf\x2c\x06\x28\x60\x18\x4c\xf9\xd5\x54\xd5\x48\xa2\x1d\x70\x36\x0f\xf7\x88\x8a\x74\x60\x9b\x88\x20\xe8\x70\x38\x12\x6d\xd7\x2b\xcb\x5a\x3f\xb7\xa9\x6b\xe1\xc4\xb1\x43\x67\x2a\x18\xd1\x8e\x24\x7d\xeb\x8c\xfe\x15\xcf\x55\x3a\x35\x8b\x30\x2d\x21\x46\x03\x52\x3d\x9f\x8f\x67\xc3\xd9\xdc\xe5\x5b\x6a\x97\x06\x15\x00\x41\x3a\x07\x81\x18\x55\x71\x4b\xe9\x06\xcd\x71\x1b\x71\xdb\xd0\x21\x36\x1f\x37\x7e\x06\xe4\xc0\x21\xb2\x03\x8f\xd1\x62\x69\x13\x1d\x32\x9b\xc7\xd8\xf8\x13\xcd\x67\xf9\xe6\x93\xcb\x75\xf2\x12\x87\x4f\xc0\x91\xd4\x07\x82\x8a\xbd\x40\xae\xc3\x76\xe7\x1e\x83\x7e\x9f\x08\x0e\x07\x49\x65\x27\xd6\x52\xe3\x08\x22\xba\x7e\x9b\x85\x2f\x37\x07\xa2\xc1\x74\x03\x01\x1c\xbc\x0b\xfd\x8c\xc9\x36\x1c\xfb\xa2\x98\x4c\x49\xd1\x67\xfc\xd0\x5a\x3b\xf6\xaa\x21\x1b\x99\x9d\x82\x79\xae\xde\xa5\x7a\x44\x79\x4e\xd1\x08\xa8\x9e\x54\x5b\x0b\xc6\xf5\x89\x20\xd8\xbc\x6d\xb4\xea\xc3\x35\xae\xdb\xce\x03\x47\x42\x9a\xa6\x2a\x8d\xd2\x7b\xd5\x34\x9d\x8f\xbd\x00\xcb\x0b\x22\x48\x62\xd9\x75\x9b\x2c\xf7\xaa\x40\x7e\x09\xf9\x67\xb4\xf9\x13\x3e\x2b\xc5\xad\x9e\x49\x02\xd8\xda\xbc\x28\x68\x61\x5c\x32\x49\x69\x63\xbf\x1b\x4e\xf8\x89\x92\x48\x3a\xda\xd2\x69\x47\x2a\x22\x9e\x15\x46\x6b\xf0\x1d\x1b\x6c\x68\xec\xd4\xe6\xe5\x3b\x2d\x3b\x5e\xb8\xcf\x58\x63\x4c\xbe\x82\x80\x79\x03\x19\xd3\xbd\x2a\x75\x48\xe8\xe8\x46\x32\x85\xd5\x0d\x44\x7a\xa8\xd2\xba\x23\xc0\x8b\xd7\x60\x97\x87\x0b\x02\x9b\x3e\x28\x63\x8f\x57\x1b\x28\x0b\x23\x1d\xd8\xdc\xc3\x85\xb4\x7b\x4c\x53\xcb\x30\x39\x63\xda\x5b\xd0\xb2\xbc\xfa\x72\xac\x74\xd5\x96\xcd\x2f\xcc\xf6\x8d\x0e\x7f\xbc\xdc\xbb\x00\x37\xd2\x80\x6a\x5b\xbe\x48\xbb\x92\x76\x32\x7f\x02\x90\x61\x87\x6d\xd5\x35\xd6\x83\xb3\xe4\x84\x3c\x71\xef\x81\x2e\x57\xaf\xaf\x4a\x7c\x59\x51\xb5\x42\x4e\x7f\x22\x7a\xe5\x00\xc0\x8d\xe8\x3a\xb4\x41\xd2\x4e\x56\xaa\xab\x54\x4b\xb5\x02\x50\x6a\x0b\xd6\x3d\xa2\x45\x1e\x66\xd2\xb7\x6f\xfe\xce\x28\x33\xbd\x2d\x06\x08\x36\xd3\xd3\xc2\x6d\x01\xae\xcc\x02\xa7\x69\x9a\xb8\xc3\xc0\x1a\x09\xfb\x25\x35\x3a\xd9\xed\x34\x6a\xe4\x2e\x43\x3c\xf9\x49\x12\x04\xb9\xd7\x0c\x89\xc9\x55\x42\x38\x65\x09\x98\x17\x63\xd4\xc7\x42\x75\x09\x27\xf9\xbc\xdd\x31\x5a\x49\x73\xec\x60\x0f\x96\x47\xb5\x07\x44\x01\xb6\xf5\xde\xf7\x32\xcc\x40\x80\xbd\x1f\xbc\x9c\xda\x52\xaa\x07\x11\xa9\x02\x4d\x1f\xf4\x3d\xce\xf6\xba\xbe\x75\xfa\x39\x57\x52\x41\x72\x95\x15\x23\x34\xd8\xf0\xd1\xd5\xb9\x48\x4f\x0a\xf4\x84\x2a\x34\xb9\xf9\x12\x3a\x4c\x28\x34\xb1\x4b\x83\x09\x7c\x0f\x01\x43\x81\xc9\xdb\x7e\xb3\x2f\x08\x00\xad\xc7\x8d\xee\x82\x8a\x4f\x01\xa6\x94\xde\xa5\xe2\x1e\x10\x1c\x78\x02\x3c\xc4\x2f\x5a\x67\x3e\x78\x19\xbd\x03\x57\x7c\x6e\x2c\x4f\xcb\xbd\x2d\x29\xe9\x6d\x81\xf5\x99\x14\xe8\xe9\xda\x8f\xca\x5d\x14\xd1\x19\x52\x4e\xdd\xf1\xca\x4a\x98\xe8\x14\x04\x75\x1e\xc1\xca\xe0\xb4\xec\x7b\x8b\x18\x17\x47\xb4\x90\xcc\x61\x46\xee\x0a\x80\xc4\xbb\x3c\xbc\x70\x97\x08\x9d\xec\x1a\x72\x7a\xd6\xde\xa4\x82\x15\x30\x2c\xdb\x3a\x5c\xa2\x8b\xa7\x55\x66\x2a\xf2\xfc\x09\x85\x62\x85\xeb\x0a\x7a\x0a\x11\x2f\xd3\xc4\x26\x88\x8f\x5d\x72\xe2\xd0\x67\x34\x76\xd2\x8a\xa2\xc8\x5d\x58\xc8\x87\xae\x6c\x1e\xb5\x4d\x83\xfa\xf0\xea\xe5\xcf\x37\x3a\xc6\x47\xcf\x79\x61\xa2\x60\x04\xd1\xc1\xcb\xf2\xcc\x19\xb3\xba\xc9\x8f\x3c\xd3\x34\x0d\xde\x41\xd1\xb9\xd5\xf3\x72\xa4\xdd\x25\x05\x01\xb7\xba\x54\x92\xe0\x9f\x02\x4c\x6d\x46\xe7\xdc\x4f\x2a\x4e\xce\xc4\xe1\xda\xeb\x00\xe7\xc3\xe2\x03\x74\xbc\xb6\xf6\x8e\x0d\x5e\xa8\xca\x31\x9a\xad\x78\x6d\x55\xfc\x0d\x45\x08\xe8\x5d\x1e\xae\xac\x15\x63\x13\xd7\x6d\x20\x3c\x9f\xba\x73\x48\xa2\x11\xc8\x38\x5d\x4d\x62\x97\x73\xab\xdb\xc8\x5e\xde\xb1\xfd\x2a\x56\x10\x31\x1c\xe1\x19\x5c\x0d\x03\x1c\xe4\x07\xd5\x5c\x94\xa7\xdd\xb9\x63\xb3\x6e\x00\xea\x47\xee\x1a\x28\x02\x29\xb7\x89\x06\x9f\x56\x16\xb7\xec\xe5\xa2\x80\x9f\x31\xc4\x55\x7d\x2e\xf2\x24\x7e\xda\xc6\xe5\x48\x96\x03\xdb\x3a\xb5\xe1\xc6\xf0\xee\x2a\x76\x3b\xdc\xc9\x03\x92\x8c\x0a\x73\x76\x26\x8b\xa2\xdb\x30\x54\xa2\x01\x0a\x52\x26\x9a\xd0\x64\x96\xd2\x6b\x66\x59\x9a\x65\x1b\xe3\xb2\xf7\xc8\xc3\x16\x7d\x80\x22\x02\x06\x7b\xd1\x90\x8b\xcf\xf8\x49\x37\x81\xdc\x14\x02\xa0\x5f\xb3\x64\x63\xc3\x77\x34\xeb\x5f\xf5\x75\x8d\x9c\x9e\xb3\x91\xa8\xc5\xfb\x9b\x61\x28\xbc\x17\xb7\x77\x61\x71\xa4\xf8\x1e\x81\xd4\xce\xe9\xaf\x49\x10\x92\x7a\xae\xf7\x24\x29\x3d\xce\xf2\x35\x82\x33\xb6\x6b\xb1\x9e\x32\x2e\x19\x8d\x5b\x81\xba\x33\x88\x23\xb5\xf2\x91\xa1\xe4\x32\x69\x19\xc7\xf4\x8f\xf0\x63\x26\xd5\x2d\x84\x5a\xac\x61\x77\x69\xdf\x4f\x04\x0a\x09\xa1\x44\x3b\x99\xa0\xb4\xcb\x4d\x05\x0b\x20\x07\x68\x6f\x84\x7d\x08\x5b\xfb\x88\x97\xa2\x97\x5c\x4f\xb3\x7b\x52\x93\x59\x62\xf1\x14\x10\xaf\xbc\xce\x81\xea\xd9\x70\xe8\xfd\xd4\x5a\xc2\x46\xe0\xc5\x61\x72\x8e\x12\xd6\x9b\xf0\x5e\x2f\x18\x9c\x37\x77\x3d\xe9\xc0\xe7\x45\x68\x62\xb8\x7e\x86\x06\xb6\xf8\x67\x78\x0a\x98\x30\xa1\xd3\x51\x23\x08\x41\x57\x0e\x9e\xca\x21\x7c\xdd\x0a\x14\x00\x70\xc5\x79\x20\x8d\x50\x6a\xf2\xb8\x14\x45\x91\x21\xb4\x52\xcf\xd2\x09\xdc\xe2\x44\x3f\xa1\x9c\xa7\x75\x56\xfa\xac\x47\x9c\x09\xa7\x44\xb6\x22\x51\x1b\x55\xe6\x2c\x71\x37\x1a\x8c\xf0\xc5\x87\xdd\x31\xab\x28\x71\x19\x3d\xb5\x19\xf7\x64\xc5\x42\x20\x90\x10\x92\x9c\x9e\x77\xed\xcc\x45\xa3\x34\xa1\xcd\x23\x41\x4a\x32\x92\x10\x51\x5c\x83\x5f\x80\x4c\x74\x20\x48\xbf\xdc\x4f\xc6\xe7\xf8\xef\x7e\x0c\x4c\x31\x14\x5c\xf7\x31\x11\xb8\x67\xa9\xdd\x13\xe4\x99\xa3\x28\x81\x9a\x14\xc6\x5f\xc2\x44\x3b\x9e\xdb\x78\x40\x15\x8f\xa7\x48\x98\xc0\x30\x9c\xbb\x0d\xad\x8b\x12\xcb\x38\x17\xd0\x78\x9a\x4f\x70\xa4\xf3\x8b\x02\x77\x3c\x84\xfa\xda\x3d\x44\x8c\x1b\x1b\x4a\xf2\xb7\x1d\xa7\xc9\xc5\xb6\xcc\x75\x64\xc5\xe7\xb4\xb1\x1d\x56\x45\x46\xa2\xe5\xba\xbb\x40\x09\xec\xe0\xc9\xcb\x96\x6f\xfa\x5d\xda\x21\x7f\x13\x76\x65\x9e\xd4\x69\x02\x57\x7d\xeb\x4d\x4d\x50\xec\x07\x00\xd0\x38\x82\x09\xdc\xab\x27\x74\x16\x94\x44\x2e\xf5\xe5\xf2\x7d\x37\x7b\xbb\x37\x00\xb6\x3a\x12\xa1\xd4\x1c\xeb\x1f\x34\x45\x91\x97\xfd\x62\xb6\xd2\x50\xf5\x4b\xd6\x8b\xe4\xb4\x05\xb1\x74\x3c\x04\x0b\xf0\x1f\x83\x4c\xed\x00\x82\x42\x61\x5a\xba\xb0\x15\xae\x57\x91\xe6\xcd\xde\x4f\x75\x81\xb3\xa1\x65\x52\x76\x9d\xed\x2f\x97\x4f\x9d\x97\x14\xdc\xe6\x40\x61\x86\xa6\xab\xfa\xea\xd2\x66\x81\xb8\x33\x75\x58\x03\x37\xe7\xb2\x6b\x3b\x7a\x9e\xf0\xa8\x99\x85\x02\xe2\xad\xc4\xdd\x6e\x3c\x4d\x83\xa2\xeb\xca\x2c\x91\x0c\x70\x0c\x14\x9a\x02\x2b\xcf\x52\x40\x48\x14\x0f\x1f\x75\xd8\x30\x29\xf7\x6d\x9c\xe3\x25\x2c\xe5\xd9\x6c\xb7\x28\xf5\x9a\x01\xc1\x4a\x5e\xf9\x7c\x8d\x2a\x9f\xf3\x5a\x7c\x39\x21\x0c\x42\xa1\xbe\x40\x57\x74\x4b\xbc\x4e\x4b\x45\x11\x92\xec\x7a\xf4\x88\x95\x58\x32\xca\x4f\x4e\xb7\xf7\x6e\xbc\x74\xbc\x2d\x44\xfb\x5f\xe1\xc5\xd0\xf4\x2e\xd9\x17\x74\xb9\xc6\xde\xf5\x8e\xf5\x07\xda\x19\xa8\x36\xf9\xb1\x88\x5d\x79\x24\x1f\xbc\xa6\x30\xba\x93\xb8\x19\x0d\x01\x27\x2b\x11\xf7\xb3\xcc\x00\x2c\x7f\xe5\x9f\x01\x0f\x16\x45\xd6\x6b\xec\xd8\x70\xdd\xbd\x83\xae\xd7\x0e\xa2\x9a\x8b\xc4\xdc\x9f\xdf\xf6\x23\x8c\x4a\x93\x39\x40\x03\x77\x2a\xbf\xc3\x7a\x95\x90\xe4\xe9\xc4\xbc\x79\x70\x90\xb8\x4d\xf7\x65\xc5\x81\xeb\x14\xfa\x42\x45\xf8\xcd\x40\x20\x00\xd6\xdd\xb1\xcb\x16\xd9\x6e\xc3\xce\xa8\xb2\x45\xd3\xcd\x89\x13\xb2\xe0\xac\x9e\x7a\x72\x65\xc9\x7f\xcc\xd3\x3c\xb3\x91\x03\x99\x0f\x63\xdc\xe2\x6e\x5c\xe4\xbe\x82\x87\xbe\xc0\xa6\x3b\xa7\x48\xf5\xed\xae\xed\x04\xbc\x43\x15\xda\x3b\xcf\x54\x83\xe8\x0c\xa1\xbd\x54\x98\x77\x5c\xb5\xc7\xf9\x2e\x6d\x2a\x9c\x0f\x90\x0b\xa4\xc6\xd5\x3f\x91\xa8\x14\x58\xbe\x5f\xd4\xf7\xfa\xc9\x00\x9a\xcf\x96\x00\xd5\x6b\x81\x65\xbb\xab\xaa\x69\xcf\x60\x99\x33\x4b\x1f\xc8\xb7\x9d\x20\x3a\x90\xf9\xb6\xa0\xd9\x33\x15\xd9\x3a\x9c\xe5\x7a\xce\xea\x76\x18\x9e\xeb\x80\x1a\x57\x25\x68\x69\x7d\x84\x90\x40\x01\x8c\x9a\xe7\xc2\xb7\x9d\xd5\x75\xe7\x8a\x8b\x4c\x58\xdd\x10\xe7\xdd\x80\x64\x53\x22\x47\xcc\xa4\x74\x95\xe9\x0b\x82\xbd\x06\xa2\x58\xe3\x4a\x6b\x82\x41\x92\x6c\xd7\xc6\x29\xc4\x9d\x72\x05\x22\xde\x01\x6d\x48\x10\x70\x8b\x75\xb0\x27\x01\x2a\x11\xd9\x32\xbd\xee\x2b\xec\x2b\x4f\xc7\x1b\x37\x87\x88\xf6\x5d\x5f\xa0\xb5\x81\xe2\x3b\x94\x37\x10\x96\x8b\xd5\x2d\x8e\xc3\x96\xad\xde\xf6\x64\xb2\xfd\x1a\x3b\x20\xd8\x70\x20\x5a\x3d\x29\xf0\xc5\x3d\x20\x8e\xb5\x3b\xb6\x08\xfa\x1a\xcb\x50\x10\x1f\x81\x6e\x88\x5d\x6f\x35\xdd\x5c\xdf\xa6\x05\x87\x28\x84\x3a\xed\x6b\xf9\x99\x03\x8d\xa3\x9f\x89\x6d\xc2\x66\x4f\xad\xcb\x63\xf1\xc6\x15\x49\x12\x58\x1d\x9f\x48\xde\xa1\xcf\xc6\x4e\x26\xb3\xd0\x85\xb4\x28\x10\x3c\x05\xb2\xf8\x65\xc2\x1c\x1d\xda\xc2\x1f\xb4\xb0\x95\xeb\xe2\x21\xb0\xb6\xdf\xc8\xb9\xeb\x09\xaf\x97\xd1\xd5\x7a\x40\xa2\x33\x0c\xd5\x8c\x2e\x53\x94\x8b\xec\x23\xe3\x53\x61\x5b\x08\x95\x2b\x47\x4f\x15\xca\xc2\x11\x8a\x00\xa2\x35\x3b\xd3\x83\x6d\x45\xd5\xee\x5a\xf5\xb1\x7e\x2f\x89\xe0\x38\x83\xa9\x81\x06\xc5\x19\x30\xb0\x80\x71\x3c\x77\xbd\xc8\x0a\x9a\x3d\x0d\x44\x13\x65\x9b\x3a\x6c\xe5\x75\xf3\x83\xd3\x3e\x00\x70\x9c\xef\xfd\x40\xac\x23\x4c\xe0\xd1\x99\x34\x57\xee\x5a\x8e\x13\x9d\xa9\xc2\xc7\xec\x5a\x6c\x0a\x89\x82\xa0\x31\x24\x21\x62\x85\x51\xfe\x9a\x59\xd3\xeb\x33\x6f\x0e\x6b\xc8\xaf\x75\xbf\x6d\x64\xea\x23\x00\xe5\x7a\x9a\x83\x81\xba\x83\xaa\x5e\x9c\x78\x0d\x9e\xe7\xfe\x78\x4c\xde\x04\x03\xc9\x03\x2f\x23\x76\xa7\x7b\x52\xef\x48\x72\x3d\x81\x0b\xb2\x65\x71\xae\xde\x57\x1b\x06\x29\xb3\xe3\xa9\x68\xe6\x1f\xf8\xd5\xea\x2c\xaa\xa8\x4c\xac\x40\xc9\x6c\x26\x75\x9e\xef\x29\x3a\x37\x6a\xa9\x0a\xe7\xce\x19\x8e\x79\x52\x5e\x3e\x6f\xdc\x6b\x04\x01\xc1\x9d\xf0\xb3\xd8\xaf\xa9\xf1\xd6\x39\xcd\x51\xf9\x68\xa2\x75\xb7\x76\x87\x38\xe1\xdb\x77\x09\x37\xb6\xc8\xbb\x81\x2c\x40\xbf\x00\x89\x40\x82\xae\xd1\x89\xb2\x29\x08\x9e\x6d\xe6\x54\xd9\x43\x4d\xbc\x78\x2a\x36\x94\xec\xb6\x69\x4a\xf2\xb8\xc5\x27\x8a\x2a\x46\x3c\x0f\x1e\x89\xd6\x9e\xe1\x76\xf5\x54\xbf\x1b\x62\x93\xcf\xc6\x7d\x5f\x36\x6f\x29\xdc\x6a\xb1\x51\xe4\x80\x13\x6d\x84\xd3\xd9\x56\xb1\x69\x5c\x87\xd9\xef\xd0\xa9\x81\xc9\x6c\x05\xf2\x4e\x3e\x21\x20\x5c\xff\x3f\xaa\xae\x63\xcd\x55\xa4\x59\x3e\x10\x8b\xc2\x43\x2d\x25\x21\xe1\x8d\xf0\xb0\xc3\x7b\xef\x79\xfa\xfb\xf5\xf9\xe7\xf4\xcc\x5d\xb7\x3e\x75\x43\x55\x66\x46\x64\x46\x46\xcf\x58\x36\x7e\xaa\x33\x5c\x77\xbd\xe7\x3b\x9c\xa6\x65\x5b\xfe\xf6\x88\x34\xaf\xee\x08\xe0\xae\xe5\xfa\x88\xe9\x48\x90\xe8\xdc\x01\xc0\xb9\x66\xc8\xbb\xb9\x40\x41\x4a\xdf\x81\xe8\xaf\xcb\xca\xff\xd1\x17\x73\x0f\x01\x32\xd1\xe6\xad\x64\xf8\xb6\xcc\x21\x43\x00\x03\xe9\x77\xc0\x5d\x1f\xdc\x7b\x1c\x1f\x29\x71\x08\xa2\xf7\xfb\xef\xe2\x2b\xe5\x91\x08\x7a\xbd\x33\x20\x5f\x69\x6c\xe7\x4f\x7c\x9d\x23\x0c\xd2\x6c\xe2\xd3\x48\x3d\x71\xa2\x25\xf6\x12\x9a\xb5\x4e\xe1\x56\x03\x21\x5f\x0b\x0b\x3c\xcc\x6a\xac\xcf\x35\x54\xe2\x65\xc0\x7a\x5d\xb7\x17\xe4\xa4\x6d\x56\xde\xa7\x6a\xcb\x6e\xb4\xe1\x44\x8c\xae\x72\x3d\x4d\x63\xe4\x62\xe7\x26\x1c\x64\xb0\x32\x7b\xca\x20\x60\xa7\x6a\x86\x61\x18\x74\x58\x76\xb8\xc7\xb8\x54\x39\xbd\x74\x7d\x9b\xa7\x5e\xa5\xdf\x08\xf8\xeb\x35\x9d\x65\x7a\xd7\xac\x57\xa0\x7c\xe1\xe4\xef\x63\xfe\x33\xf6\xf8\xdf\x99\x3c\xf6\x1f\x0c\x94\x22\x4c\x34\xb0\xf5\xf7\x63\xfa\xed\x84\xa5\xee\xb3\x4f\x19\x14\x2a\x76\xcc\xf4\x18\xb6\xdf\x6c\xd1\x1b\xfd\xbd\xb6\x8b\x36\x22\x20\xef\xe0\xce\x90\x02\x44\x88\x7c\x27\x26\xcc\x9b\x8e\x41\x7e\x15\xca\xb6\xf0\xd1\xea\x76\x9b\xac\xda\x0e\xaa\x0d\xf1\x0b\x52\xa3\x3c\xfa\x33\x39\x64\x84\xf9\xf5\x51\x7e\x7d\x91\xec\x2e\xcb\x15\xb9\xc7\x71\x4c\x21\x49\xaf\xdc\x5f\x42\xbf\x6f\x8c\xc8\x18\xc2\x42\x7d\x65\xbe\x11\xbc\x5b\xb1\x10\xd9\xd5\xb0\xd7\xf8\xb0\x89\x1f\xab\x55\x40\x23\xc3\x5e\x16\xbb\x37\x94\x64\xb3\xdf\x83\xd5\xb9\x08\x4b\x16\xf7\xaa\xd1\x9c\xe3\x4b\xf4\xf9\x3b\x4f\x62\xc5\x15\x22\x07\x40\x61\xee\x2b\x06\x4e\x91\x9b\x4e\x97\xcc\x17\x0c\x30\x1f\xbf\x91\x20\x6d\xf1\x46\xa6\x29\xbc\x33\x0c\xe6\x3e\x35\x42\x8e\xa8\x92\x5e\x42\xd9\xc4\x00\x42\xd7\xe1\x60\x48\x95\xa9\x1a\x26\xe5\x71\xcd\x88\xd5\x8c\x4a\x84\xaf\xdb\xea\x76\xfb\x57\x21\x8d\xf6\x7f\xfb\x18\xc7\xf0\x79\x46\x2c\xd1\x3d\x83\x85\x88\xd7\x63\x70\x14\x9b\x42\x59\xdd\x46\x21\x13\xb0\xe5\x0f\x97\x20\x56\xfa\xa6\xa0\x25\x35\x57\x37\x4e\x59\x3f\xd2\x5f\xc1\x11\x24\x57\x6e\xc3\xcb\xf1\x84\xbd\x9b\x8d\x93\xdb\x20\xb6\x0d\x90\x0f\xf8\xe0\x91\xcb\xc5\xb3\xfc\xb7\x5f\x83\x1c\x38\x82\xef\xfb\x4c\x71\x4d\x65\x53\x25\xc3\x94\xc8\xb6\xc9\xd5\x30\xbe\x46\xe8\xd1\xac\xc1\x61\x3c\x8c\x62\x5b\x8e\x6c\x09\x65\xe8\x28\x28\x47\x04\x58\x12\x79\xa5\x38\x93\x0e\xa1\x70\x22\x79\x4f\xb1\xf9\x57\xc5\x24\xdb\xe9\xc6\x69\x22\x25\xa6\x70\x0b\x55\xaf\x08\x83\x78\x02\xf6\x89\x63\xf9\xe1\x8e\x74\xd6\xc5\x5a\x87\x6c\x7f\x94\x7f\xe5\x7c\xa1\xe7\x93\xfb\xa2\x1a\x93\x6e\xf8\x36\xd3\x5b\xae\x11\x10\xf1\x3f\x67\x32\x08\x36\x1f\x72\xb2\x56\x8b\x0c\x3d\x5f\x98\xaf\x72\x03\xbd\xf5\x4a\x5b\x4d\x0f\x96\xfc\xa6\xcf\x5f\x6d\x81\xfe\x64\x4e\x92\xce\x37\xa1\x3e\x9b\xfd\xae\x7f\x80\xeb\xff\x6a\xb0\x1b\x09\xde\xa0\x29\x57\x33\x5e\xc9\x6e\xe8\xc4\x3d\x2c\x06\x8b\x34\xcc\x53\xc3\x53\x92\x42\xde\x8c\x4b\xa7\xc6\x47\xb2\xdc\x6e\xb9\x9b\x02\x2f\xc5\x1f\xa8\x76\xaa\xa9\x90\x1b\x27\xf7\x76\x08\x50\x9f\x03\x5a\x66\x86\x79\x87\xbc\xdd\xd8\x76\x9e\x13\xda\x6c\xe1\x2b\x91\x32\x31\x34\x91\x7e\x64\x52\xe6\xc6\x91\xb9\x6c\x73\x5d\x78\x95\x81\x70\xbd\x1c\x93\xcf\xfa\xd1\x4d\x63\x9f\xed\x51\x4c\xd7\xcf\x73\x8c\xa2\x4e\x2a\xfc\x96\xd6\x51\x31\xf9\x8b\xcf\x0e\x35\x69\x2c\x7f\x1e\x5b\x37\xed\xc3\x2e\x6e\x04\x30\xe3\x38\x4e\x49\xb6\xd3\x48\x77\x1c\xf7\xd4\x16\x77\x14\x92\xe4\xb8\x00\x1e\x33\xad\xaa\x5c\x41\x23\x8b\xa7\x0b\x17\xe2\x4d\x93\x5c\x88\x95\xae\x79\x6a\xad\x58\x13\x3d\xcd\x86\x49\x8e\xf2\x2b\x81\x44\x77\x67\xd9\x9a\xa5\x62\x5e\x3d\x9d\x4f\x19\x74\x9f\x68\x26\xe2\xe7\xf9\x45\x0c\xaa\x5c\x99\xcc\xdf\x11\x8d\xdc\x5d\x9a\x3f\x97\x08\x22\x77\xd6\x50\x9e\xdb\x85\x9e\x2b\xf9\x54\xcc\x3d\x22\x0e\x47\x88\x91\xd6\x2f\x27\xc7\xeb\xb8\xe5\x09\x73\x18\x3c\xf6\x71\x29\xc5\xef\x9c\x49\xb8\x96\x29\x6f\x5f\x69\x80\xbe\x4a\x6c\x68\xa4\x2b\x4f\xb7\xce\x55\x64\x03\x79\xe5\x28\xa5\xd7\x75\x9f\xe5\xfa\x9d\x06\x04\x99\x9a\xa1\xdc\x2c\x78\xae\x0b\x35\x76\x85\x3a\x5f\x99\x99\x98\x1b\xe6\x3b\x13\x36\x0a\xd5\xeb\x8d\xd9\x06\xe9\x3a\x3b\x10\x2e\x55\x83\x69\xb6\xdb\x7c\x98\x33\x9a\x5d\x8a\x00\xbe\xd7\xdb\x14\x19\xf1\x66\x7f\xd2\xe8\x03\xef\x70\x62\xc7\x89\x55\x28\x9b\xcc\x53\x5c\x76\x03\xb3\x36\xa1\x98\x64\x4f\xf4\x38\x13\x3c\x4d\xdc\xba\x16\xcd\x5a\x24\xb3\xb3\x22\x5e\xef\x3a\x7b\x3e\x3f\x45\x7a\x4a\xff\xe8\x19\xb8\x63\x8e\xe6\x29\x6a\x7c\x9e\xf0\x39\x8e\x0b\x4f\x24\x83\xb9\x37\xc7\x79\x92\x4d\x8b\xfa\xc0\x70\x36\x33\xf8\xd3\x26\xee\x6b\x34\xd3\xfc\xa3\xb0\x7d\x84\x45\x3b\xba\xed\x5c\x81\x23\xdb\x4f\xe2\xbb\x9a\x51\xf9\x12\xf7\xd3\x44\x43\x3e\xdf\x59\x01\x9c\xa2\xfb\xb1\x7c\xce\xe3\xd6\x2c\x07\xa8\xcd\xdd\x90\x5d\x62\xd7\x65\x25\x32\xe0\xc4\x00\x9b\xae\xbd\x15\x60\x3e\xc3\x6f\x0c\x93\x9c\x15\xad\xbf\xba\x85\xc7\x33\x5f\xda\x62\x69\x2f\x1a\xbb\x6c\xb8\x79\xed\x3c\x81\x1f\x58\x28\xc0\xc0\xd3\x0a\xa2\xdf\xcb\x6e\xac\x0e\xe1\xfb\xa7\xb5\x7e\x0f\x06\xc1\x90\x1c\x01\x08\x48\x4e\xc9\xe8\x1b\x98\x58\x89\x95\xdb\xba\xde\x73\x85\xef\x99\x01\x25\xb2\x3e\x58\x2f\x73\x38\x67\x35\x2c\xb0\x07\x0c\x41\x85\xfd\xac\xd2\xfa\xdd\xb0\x00\x4c\x1d\x01\x91\xc3\xdc\x5f\xc9\xec\xd2\x97\x58\x8b\xd7\x37\x87\xc7\x1d\x22\x0b\x2b\x14\x70\x7e\x0f\xfc\x07\xbf\xfe\xb9\x87\x52\xae\xf7\x32\x6e\x03\xf8\x39\x02\xef\xc8\xf2\x13\xe1\x8e\x9c\x7b\xaf\x14\xfd\x8d\xe3\x12\x66\x20\x45\x90\x59\x10\xca\x23\x12\x4c\x9d\x91\xa6\x65\x72\x9f\x83\x5a\x76\x7c\x40\xe9\x76\x37\x8e\x4a\x15\x60\x3e\x13\x22\x05\x1a\xbb\x13\x96\x49\x09\x0c\xb8\x21\xaf\x2f\x1f\x90\xcc\x02\x11\xb6\xd5\x4d\x9e\x56\x7c\x1a\x66\x8b\x5f\x11\x60\xe3\xa2\x78\xce\xdb\xd7\xeb\x58\xac\x22\x70\xff\xe9\x69\x72\x85\x85\xe7\x33\xbd\x39\xb9\xc6\x35\xd4\x14\xbf\x8e\x42\x8f\x40\x88\x74\x57\x68\x2e\x78\x3c\x9a\xeb\xa6\xc5\x1e\xd6\xb0\x10\xb6\xa5\x52\x31\x15\xd5\xa9\x9f\xc2\x53\xa8\x25\xb2\x9f\x1a\x1b\x0f\x58\x04\xca\x07\x6d\xe0\x08\x1c\xb0\xfd\x12\x9a\x0b\x11\x0c\xc3\xd8\x17\xc2\x26\x4e\x85\xd2\x90\xe4\xe7\xb3\x93\x4b\xef\x2e\xf2\x58\x49\xfe\x48\xa7\x93\x7f\x96\xfe\xdf\xfc\xf1\xb8\x89\x14\x85\x03\xad\xd7\xec\x80\x30\xe1\xea\x4c\xcb\x22\x1a\x99\x71\xda\x35\xbb\x50\xc4\x9f\xf1\x7d\x0e\xe8\x59\xeb\xa3\x85\x15\x71\xea\xe5\xca\x54\x38\x6b\xd2\x5c\x74\x9f\xcb\x96\xa3\xa8\x29\x55\xa2\xce\xaf\x00\x1a\x3e\x61\xb7\x0b\x76\xd5\x7d\x4d\xa2\xf2\x7b\xcf\x73\x5e\xd1\xc6\x55\xbb\xc5\xf8\xa7\x0e\xb4\x16\x95\x8b\x06\x4e\x29\x11\xb2\xa3\x90\xfb\x2a\xef\x82\xff\x6d\x4a\x92\x9c\xd6\xd3\xa7\x1d\xe8\x66\x91\x0a\x2e\xe6\xb9\xcd\x7c\x1b\xd7\x83\x50\x70\x90\xa6\xd9\xde\x08\xda\xa7\x5b\x08\x62\x95\x31\x96\x4a\x73\xb9\xe9\x1e\xb8\x54\x0f\xe4\xa6\x5c\xac\x52\x2f\xdd\xea\x2b\xcd\x15\x29\x12\x22\x10\x73\xd3\x38\x69\x23\xcc\x3c\x32\xef\xb4\xd2\x83\x9d\x2d\x35\x67\xa2\xeb\x31\x1c\x52\x41\x14\x72\x8c\x32\x24\xea\xa6\xce\x35\xa8\xbf\x45\xf1\x9b\xff\x35\x70\x64\x33\xaa\xd5\x0e\x71\xd5\xd8\x46\x25\x85\x1b\x92\x4e\x46\x33\x79\xc0\x84\x4f\xc6\x24\xfd\x07\xcb\x06\xd8\xee\x8e\xf8\xf6\x9e\xd5\x72\xc2\x92\xcf\x81\xa9\x34\xf4\x67\x88\x56\x9e\xe2\xa5\x64\xde\x8e\x9d\xbb\x7a\xd5\x49\x48\xd8\x18\x77\x0c\xc2\xb6\x10\x58\xc9\x99\x52\xdc\xfb\x54\x23\xaf\x7d\xf5\x27\x9e\x49\x14\x2b\x3c\xf8\xcf\xef\x73\x3f\xb7\xb4\x77\x31\xdb\xc1\xd4\x8b\xa3\x67\x3b\x4a\x1a\xd9\x60\x19\xf9\x1a\xbd\x73\x89\xdf\x17\x87\x8c\xa8\x3e\x03\xec\xf6\x80\x87\xc1\x44\xc1\x25\xae\x19\x65\x9b\x05\xca\xf9\xe8\x61\xfc\xa9\xce\x60\x4a\x26\x0a\xfd\x6a\xc5\xc2\x9c\xef\xb9\xbd\x1d\x42\x23\x00\x76\x61\xf0\xd4\x9f\xc7\x4b\xeb\x6c\xf7\x2a\xfd\x8b\x83\xb3\x89\x5b\xfd\xb2\xd8\xc6\xa3\xa4\xfe\xf6\x65\x9f\x2c\x49\x5c\xdc\xa3\x5f\x78\x8b\x9c\x14\xf1\x12\xab\xc8\xa0\x04\xb4\x9b\x30\xf9\x5a\x5c\x9c\x7b\x43\xc3\x26\xe0\x1d\x41\xb8\xc7\xfa\xed\xb0\xaf\xec\xa9\xf2\xbd\xec\x1f\x45\xfa\xf0\x66\x97\x86\x66\xa8\x9d\x37\xcc\xfd\x1c\x49\xd7\xdd\xa2\xc4\xc6\x6b\x0c\x0b\x5d\xff\x30\x88\x46\x68\x2d\x6b\x54\xa3\x2e\x6c\x83\xfa\x51\xf8\xbf\xfc\x4b\xa3\xa6\x48\x61\x0b\x20\x7f\xfa\xd2\x47\x6a\x03\x9d\x3c\xb9\xe6\x96\x53\xcd\x79\x6c\x26\x4e\x2c\x17\xce\x64\x15\x83\xa5\xe3\x40\x1e\x89\x87\x4a\x3c\x7a\xed\x5a\x4e\xd5\x6a\x06\x6b\xb6\x26\x86\xb0\x28\x51\xca\x42\x32\xc1\xe9\x4c\x9d\xd9\x2a\x31\xee\x60\x31\x01\x82\x10\x88\x98\x3d\xb5\x60\x58\x9e\xb6\x5e\x6e\xae\xce\x2f\x4a\xf7\xfc\xbb\x3b\xfb\x7a\x70\xc7\xa1\x72\x9f\xb1\xcd\xd1\x32\x0d\x1a\xe9\x95\xf6\x11\x1e\xc9\x28\xe9\x12\x50\xef\x79\x49\x6e\x85\xf5\x8a\x5c\xb8\x33\x90\x65\xd9\xc0\x16\xc8\xf3\x33\xa5\xf5\x37\xba\x13\x59\xae\x16\x0e\x9a\xed\x52\x11\x45\xfa\x2a\x48\x9d\x7b\x0e\xda\xb3\x17\x06\x47\xf7\x05\x66\x69\xd3\x3c\xcb\x7c\xd7\xc5\x48\x33\x0e\xc4\xe9\x5a\x56\x54\x1f\x09\xb5\x60\x42\x22\x55\xca\x44\xf9\x81\xac\x26\xf7\xdb\x43\x30\xfa\x57\x39\x3a\x3e\xf2\xfd\xfa\xea\xab\x61\x13\x03\xe7\x4f\x4f\xb5\x46\xe9\x52\xeb\x2f\xcb\x0b\x2d\x8c\x7d\x2c\x07\xcd\x79\xb3\x5e\x1c\x90\x2f\x48\x24\x3a\x14\x46\xe8\x79\x9d\xe4\xbc\x1c\xaa\x4f\xe8\x3e\xdf\x87\x7b\x75\xa7\x22\x7f\x35\xbd\x84\xe6\x4f\xd4\x07\xd7\x70\xb2\x44\xcf\xb1\x3b\x3e\x8b\x63\xeb\x13\xe1\xa6\x11\x5b\x45\xe5\x51\xc6\xa7\xef\xef\x7f\x38\x7b\xf1\x52\xa8\x4c\x69\xaf\x8e\x56\x01\xba\x73\x5f\xd4\xc8\x10\x04\x12\xea\xfd\x38\x4f\x6b\x95\xa6\xd5\xea\xae\xfe\x66\xd8\xbc\x42\x00\xa2\xc9\x4f\x90\x6f\x9f\x6e\x8a\xde\xa5\x68\xa1\xab\x66\x3e\xc0\x8b\x9c\xfd\xfa\x99\x83\x81\x78\xb8\x20\xae\x05\x40\x88\x8c\x1f\x28\xae\x2b\xf3\xc3\x8a\x3c\x94\x31\x9f\x71\x02\x30\x4c\xd2\x4b\x56\xf4\xa1\x9d\xf6\x93\x49\x3b\x52\x28\xce\xc4\xca\xdf\xd7\xfd\xab\xe1\x93\xaa\xb9\x0b\xbb\x90\x58\x49\xa4\xfb\xe6\xda\x95\xd0\x7a\x3d\x90\xde\x54\x80\x3c\x03\x12\xe3\x4e\x0b\xa6\x57\x21\x3f\xc9\x53\x35\x9c\xde\x46\x61\x00\x50\xdf\x94\x2b\x50\xed\x05\x1d\x0b\xf5\xd3\xd2\x9f\x5c\xeb\x58\x06\x36\xd3\x73\x85\x9e\x68\xf8\x39\x51\xeb\x26\x35\xb8\x47\x02\x77\x3c\x21\x00\x7b\xfb\x80\x57\x42\x48\x4d\x0e\x02\xe0\xec\x8b\x80\x66\x8f\x5f\xb9\x9c\x7a\xba\x26\x9e\xf5\x40\x4d\xd5\x8a\x0a\x5c\x63\xdf\x55\xf5\xf6\x5d\xcc\xfb\x94\x81\xf9\x36\xdf\x8e\xe8\x86\xc3\xe9\x20\x02\xef\x34\x94\x6e\x03\x50\x37\x0d\xaa\x60\xa3\xfc\x6d\x58\x43\x39\xab\x47\xc9\xed\x8d\x64\xf4\xfb\x29\x92\x88\x51\x13\x80\x05\xe0\x3c\xbc\x4f\x17\xa9\xda\x63\x76\xa7\x77\xf1\xec\x67\x48\xb3\x2a\xf7\x24\xe2\x15\xaf\x52\xd5\x5e\xdd\x31\x75\xda\x63\x61\xe9\x67\x4d\x22\xda\x4b\xb0\x83\x4f\xae\x1f\xc4\xeb\xf5\x4f\xeb\x55\x3d\x4d\x0f\xdb\x98\xfb\xa5\x4f\xae\x3c\x59\x64\xe1\x57\xa9\xfc\x22\x56\x90\x01\xb0\x1b\x3e\x03\x9d\x0e\xdb\xbf\xa2\xf3\x86\x9a\x9d\xe0\x3a\x46\x64\x22\x4a\xc9\x24\xae\x55\x2a\x34\xec\x07\x52\x91\xb3\x3f\x3f\x1f\x47\x64\x34\x06\x72\x0b\x06\xb8\xcb\x1e\x4a\x06\x99\x46\xcc\xfb\xe1\x43\xa8\x26\x80\x37\xd9\xfc\x43\x27\xeb\xe8\x8c\xac\x54\xd9\xc0\x3b\xd4\xe3\xf5\xaf\xb6\x96\x8a\xba\x7d\x6f\x84\xfe\xcd\xe8\xd7\x32\xc9\xc5\x90\xd8\xf3\x51\x20\x00\x54\xac\x51\x6e\xc6\x0d\x70\x04\xc3\x07\x2f\x14\x1a\x61\x44\xd8\xd4\xb4\x7f\x6a\x07\x39\x5b\xed\xaa\x7c\xc9\x45\x61\x14\x4a\x03\x20\x3b\x7c\xa1\x3e\x89\x78\x25\xf2\xbc\x3f\xa3\xc0\x8e\x1f\x9f\x98\xd1\x25\x47\x8c\xdd\x68\x27\xcc\x1c\x9c\xdf\x5e\x89\x1b\x8a\xff\xc6\xb5\xaf\xec\x90\x07\x33\xfd\x28\xb8\xf2\x77\x6e\xfb\x0a\xde\x74\x6e\xeb\x76\x41\xca\x86\x5e\xb0\xe4\x20\x9f\x59\xff\x28\x72\x82\x99\xe8\x6d\x22\xf6\x2d\x1f\x6a\x3c\x8a\xd1\x49\xcd\xaa\xa0\x97\x08\x70\x62\x1f\xa3\x5a\x65\x6f\x58\xb8\xd7\x63\xff\x89\xc1\x0e\xdb\xe2\x81\x2c\xce\xb8\x4e\x99\x12\x00\xf6\x9d\x83\x9c\xfc\x68\x55\xec\x17\xd2\x95\xf5\x4f\xb9\x6d\x00\x43\xc1\x35\xf6\xb0\xa2\xb4\x6a\x67\x6a\xe4\xd7\xb1\x36\xc2\x6c\xcd\x13\xb5\x11\x96\x58\x19\x7f\xf3\xb4\xfa\x4a\x7e\xe8\x9d\x25\xa9\x0d\x1b\xc9\xef\x83\xcc\x62\xf5\x07\xad\xe4\x20\xec\x17\x0f\x1a\x29\xc9\x92\x0b\x62\x9c\x99\x31\x33\xc8\xe1\x8c\xe1\x02\x36\xe3\x8b\x6a\x5c\x20\x3a\x0d\x55\x05\xfc\x73\x9d\x5f\xa3\xbd\x2f\xeb\xfd\x47\x13\x24\xa3\xc5\x79\xdf\x80\xf0\xdf\xd0\xa8\xa9\x42\x31\xaa\x63\x99\xe5\x54\x18\xde\x37\x2e\xdd\x80\x5e\xd2\x7c\xfe\x44\x56\x20\xae\xfd\xcd\xc6\xfc\xa9\x86\x4b\x80\x83\xee\x29\x5f\xbf\x71\xac\x5a\x71\xbe\xd0\x6e\xa9\xda\xef\xf3\x72\x46\xb1\x68\x01\x78\xb1\x17\xab\x72\x8f\x8b\x05\x28\x42\xeb\xf5\xfe\xe9\x42\x1c\x21\x5a\xfa\x87\xa7\x8b\xaa\x4b\xef\x90\xee\x72\xf0\xb1\x12\xaa\xfc\x7a\x9f\x87\x2d\xa7\x54\xcf\x5e\x59\x2c\x74\x61\x15\x20\x06\xb1\xe7\x3b\xe8\x53\xf2\xb9\x25\x96\x54\xa0\x31\xf9\xf4\xb0\x35\x4a\x37\xd4\xa1\x1c\x3e\x7d\xfe\x30\x9c\x12\xe8\xc4\xea\x6b\x9f\x5e\x16\xeb\xbf\xfa\x4c\xee\xd1\x1b\x36\x0e\xd5\x81\x1d\xec\x36\x1c\x56\xb8\xe5\x79\x3e\x8f\x1c\x38\xcb\x2f\xab\x51\x89\x17\x0a\xbd\x41\x44\x2d\xf4\x5d\x2c\x44\xe9\xcc\x95\xbf\x41\xa1\x0c\xd5\xd3\x3d\x87\xfd\xec\x7b\x82\x89\xb4\xdd\xed\x9f\x54\x41\x0a\x22\xa5\xb4\x17\x2d\xad\x7a\x10\x97\xe1\xc4\x10\x80\x21\x72\x62\x61\x85\xf2\x3d\x55\x41\x25\x5e\x6a\xf5\x45\x18\x1e\x65\x67\xf8\x7c\x68\x99\xfc\xef\xf9\xe4\x30\x67\x28\xc6\x2b\x62\x0a\xf1\xbb\x97\x6f\x2c\xca\x7d\xc3\x7d\x75\x07\xc0\x8a\x19\xa6\xc5\x7e\x15\xcb\xd9\x3a\x45\xa4\x17\x27\x02\x8b\x11\x00\xf1\xc6\xa7\x9c\xf4\xda\x8d\xf1\x93\x8b\x68\xb0\x4a\x7c\xfe\x0c\x67\x2d\x2c\xdf\xd3\xe2\x6e\xab\xa2\x30\x0c\xfa\x4e\xb6\xc5\x1a\xcd\x36\x9c\xdd\xe8\xe7\x5d\x94\xd2\x84\xec\x2b\x93\xf5\x1e\x02\x25\xa7\x38\x7f\xde\xc9\x37\x67\xf9\xc7\x7f\x3c\x05\x0e\xfd\xd4\x6c\x14\xc6\xca\x33\x0a\x9f\x2c\xa1\x5f\xa5\x52\xe2\x5d\xda\x85\x2d\xc5\x82\xc1\x31\x00\xd3\x7f\x82\xae\x5d\x3e\xbc\x28\x4c\x6c\x5e\xeb\x7e\x5f\x8d\x7c\x39\x8f\x13\x5f\x7e\xc3\x97\xf0\xd9\x66\x0c\xd9\xec\x26\x34\x78\x70\x1d\x66\xc8\xf2\x9f\x32\x88\x10\x16\x2a\x4a\x1a\xef\xe0\xe8\x4f\x16\xe4\xcd\xf5\xfd\x53\x3b\x2e\xa1\xe0\x38\xe6\x86\x9c\xfe\x72\x25\xd9\x6d\xad\xfe\xae\x0c\x02\x52\xba\xfa\x15\xfe\xf6\x19\x78\x0e\xa7\x34\x36\xe6\x9f\x4b\xcc\x9f\x68\xaa\x24\x32\xc6\xc0\x89\x62\x11\x15\xe1\x61\x75\x5b\x64\x61\x97\xf8\x36\x4d\x68\xca\x9b\x85\x99\x68\x0b\x60\x59\xaf\x7d\xca\xe5\xbb\xcf\x66\xfa\xa5\x70\xbc\x9c\x72\x53\xaa\x30\x00\x45\x9b\x1c\x99\xe8\xd9\x76\x13\x83\x3b\x2e\xd1\x91\x93\x8a\x20\x8a\x14\x4d\xfd\x20\x33\x7c\xe9\x5d\xbe\x87\x4b\xbd\xa5\xc6\x18\x24\x36\xe9\x65\xcb\x24\xcc\x57\xf2\xab\xdd\xe2\x42\xfe\x9c\x2c\xfd\xb4\x95\xb6\x27\x00\xc9\xe6\xd4\xa5\x12\x75\x8b\x60\x30\xa3\x01\x47\xba\x2f\xf6\xa8\x53\x7c\x7f\x09\xdd\x52\xb7\x27\x10\x2a\x83\x7f\x83\xf5\x3a\xa5\x4e\x0f\xda\x9c\x52\x68\x02\x92\xf4\x4f\xa2\x6d\x76\x20\xd1\x7b\xbc\x67\xf8\xd4\x92\xe5\x57\xe5\x1e\x74\x6a\xf4\x67\x78\xbf\x5b\x40\x22\xda\xf6\xe9\x0d\x93\xce\x83\x79\xd9\x6f\xf5\x94\xd4\xb0\x97\x58\x4e\xf8\x95\xdf\xaa\x57\x06\xb4\xcd\x7e\xa6\xad\xe2\x61\xa9\x8f\x41\xc1\x64\x7d\x01\xa4\x9d\x54\x05\x4b\x3a\xcf\xd7\x1a\x07\x0e\x24\x20\xfb\xe6\x9e\xac\x99\x68\x2d\xbd\xbb\x14\xb7\xf8\xfb\x0d\xc9\x23\xe2\x44\xab\x39\x9d\xc0\x94\xd3\xed\x0e\x92\xc0\x05\x08\xab\xdc\x37\x0b\xd4\xfb\x55\xc6\xa4\xad\x01\x20\xab\xa4\x09\x20\xf8\xf2\x98\x66\x3b\xd0\xa8\x8f\xca\xbd\x0c\x78\x5e\x6c\x2d\xca\x3a\xe2\xbe\x9f\xfd\x3f\x58\xe8\x59\xc0\xed\x6b\x18\xb5\x78\xa3\x8d\x47\xb2\x5c\x9a\x11\x84\xd0\x2a\x44\xdd\xd1\x51\xe6\x12\x8e\x82\x6c\x10\x20\xf6\x5a\x2d\xea\xeb\x58\x62\x2d\xd7\x7b\x93\x36\x4c\xf0\xbe\x82\x4e\xaa\x9e\x5c\xb7\x51\x88\xd6\x84\xb2\xad\xe4\x2f\x8e\x37\xf1\x71\x8f\xee\xd5\xf7\x09\xe4\xa3\x9c\x45\x2e\x10\x60\xc5\xdc\xc4\x9d\x85\x27\x19\xbd\xeb\xc0\xa7\x0a\xf1\x75\xec\x77\x36\x63\x7b\xd5\x7e\xb2\xb0\x89\x78\xe2\x63\x70\xa0\xe7\xc5\xfa\x2f\x1f\xe0\x9e\x8c\x48\xeb\x17\xb5\xa9\x51\x6d\x0f\xa3\xec\xf4\xe7\x44\x04\x74\x1f\xe4\x59\x96\xe5\xb2\x7f\xab\xac\x61\xdc\x0c\x01\x30\x7d\xfa\xce\x46\x7f\x6f\x44\x84\xa4\x7a\xe3\xe6\x3a\xad\x97\x03\x96\x35\xd3\xdb\x1a\x25\x0b\x45\xcd\xcd\xe5\x47\xd9\x91\x1d\x08\xe2\x38\x06\xe0\x33\x83\x5e\xcb\x01\xd6\xc5\x82\xa9\x4e\xdf\x11\xb0\x9b\x9e\xeb\x12\xa6\x37\x51\x7a\x31\x98\x8f\x3a\x22\xe9\x4a\xf9\xf6\x6a\x41\xbb\x3e\xe6\xd2\xfb\x57\x77\xde\x59\x12\xdf\x56\x41\xa3\xe8\xef\x50\xb5\x9a\xfb\x1b\xd5\x99\xe8\xe7\xfb\x1d\x24\xf1\x9e\x82\x32\x03\x8c\xd4\x00\x63\x25\x32\xac\x0a\x41\x66\xd1\x3e\x0d\x01\x02\x53\xf0\xf6\x6f\x56\x80\x1b\x6b\x0f\xc1\xed\x60\x1f\xcb\xdc\xa8\xef\x64\xf6\x17\xc8\x21\xcc\xfb\xea\x8e\x31\xcd\x32\x72\xf0\xad\xcd\x64\x60\x92\xfc\x22\x17\x39\x91\x76\x2f\x2d\x02\x9d\x63\x37\x5f\x97\x5b\x8a\xff\x36\x32\xe7\x46\x9b\x93\xa0\xb6\xb4\x91\x19\x6a\x89\x95\xfa\x9b\x83\xd1\xc5\x7e\x97\x2f\x53\x4a\x06\x3a\x68\xa5\xa3\x39\x06\x37\x0f\x4d\xa5\x4f\xe7\x1d\x30\xf5\x0a\x24\x80\x94\xb7\x02\x49\x7a\xcb\x56\xc5\x22\xaa\x99\x02\x5c\x79\xd0\x4a\xa2\x9a\x72\xd2\xca\xd6\xf5\x41\xad\x21\x3e\x94\xcf\xc5\xa0\x70\x37\xb3\xdb\xcd\x37\x82\xc9\xf7\xdb\xce\x1b\xa3\x71\x9b\x4c\xa8\x49\x81\x02\xac\xdf\x70\x89\x7a\xb0\xe2\x5b\x97\x67\x2c\x9b\xdf\xe5\xe3\x4b\x99\x8c\x19\xcc\xf1\x99\x7e\xbb\x7f\x75\xcb\x5e\x28\x02\x68\x72\xe5\x37\xf0\x2c\x49\x42\xe9\x2c\x17\xa7\x9d\xa9\x57\x74\x72\x01\x92\xe5\xfb\x45\x34\x3a\x62\x50\x10\xc1\x28\x12\xec\xdd\xc3\xe3\x10\xbb\xa1\xd4\x2a\x96\x53\xcd\x7e\x0f\xce\xda\xb8\xd3\x6a\x0f\x88\xd1\x32\xa7\xda\x90\x89\x41\x30\x98\x0f\x6e\x2e\x07\x88\x41\x4d\x41\x87\x7b\xe3\xad\x8f\x18\x00\x08\xb1\x7b\xaf\xeb\xb9\x38\x06\xd5\xd5\xf3\x78\x9e\xdb\xb8\xf3\x7b\x46\xfb\xa6\x6e\xa3\xe0\xae\x8b\xf6\x64\x18\xeb\x1f\x50\xf3\x93\x12\x0d\xef\x15\xb6\x42\x2b\xf4\x31\x89\xa6\x93\xae\xc1\xc3\x8e\x72\x79\x57\xb0\xfe\x66\x21\x80\xe3\x97\x5d\x43\x14\xc9\xf3\x91\x21\x08\xf4\x2a\xa4\xe8\xfb\x12\x3e\xe5\xae\x35\xcb\x51\xa8\x87\x41\x89\x94\xd2\xd0\xef\x07\xe9\xea\x42\x0d\x4f\xe8\xb2\xac\x7a\x1b\x37\x66\x41\x30\xc6\xdb\x6c\x91\xf1\xcb\x78\x32\x46\x0a\x00\x22\x20\xf3\x44\x17\x52\x7b\xf8\x29\x9e\x1a\x31\x01\xad\x36\x33\x07\x65\xd1\x30\x79\x66\x78\xd6\xfb\xaf\x67\xcd\xad\xc9\xce\x6d\xc5\x77\xa5\x87\xe4\x82\x48\xbe\xef\xfb\x34\x54\x7c\xa4\xf5\xb2\x3c\xcb\x02\xdf\x00\x08\xb5\x13\x99\xd9\xdf\x23\xc5\xb6\x7b\x8e\xc8\x71\xcc\x7c\xe9\x42\xf2\xcd\xa8\x78\xa7\x68\x34\xbb\x51\x73\xb5\xfc\x6d\xd0\xe2\xa2\xbf\xfa\x91\x4e\x62\xbe\x9c\x36\x3a\x82\x81\x22\x05\x7b\xbe\xf1\x8a\x60\xa2\x69\x5e\x11\x2a\xe5\x4b\x90\x61\xf6\x1c\x04\xab\x71\x95\xae\xe6\x86\xfe\x15\xdd\x4c\xa6\x18\x19\xbd\x2c\x3d\x12\x16\x9f\xc4\xf8\x3b\xbb\x56\x1f\x8b\xed\x60\xda\x65\x5f\x95\x6e\x51\x3a\x1b\x6d\x5b\x0f\xdd\x0e\x8b\x72\x40\x91\x00\x80\x37\x00\x27\xe2\x5e\x48\xd4\x4d\xc6\x0f\xca\x42\xb4\xd9\x27\x12\x52\x10\x79\xac\xfc\x8c\xb1\xb7\x7d\x41\x0f\xd0\x1a\x1b\xfd\x50\xe1\x43\x60\x7e\xea\x83\xcb\x72\xe3\x46\x24\xae\xa5\x85\xe7\x91\x40\x04\xa9\x0d\x0c\xe6\x3e\xb2\x81\x96\x65\xf1\x35\x27\x98\x94\xf0\xb1\xaa\x98\x22\x64\x76\x09\x2a\xbd\x72\x9a\xba\x4e\x9b\xd2\x1f\x26\xff\xb7\xcf\xdb\xbd\xbc\x98\xb0\x98\x3e\x79\x87\xf5\xf8\xa6\x7b\x07\x44\xd6\xee\x32\x3b\x92\x1b\x7d\x08\x52\x22\x65\x18\x0c\xeb\x92\x0c\x60\xe6\x1e\xd3\x70\xb7\xa8\x29\xec\xf2\xde\x17\xc0\x5d\x64\x10\x42\x75\x7e\xe2\x75\x0b\xba\x74\xde\xf6\xd4\x43\x2e\x83\xb5\x22\x9b\x57\x70\xe9\x0e\x70\x77\x7f\x47\xe1\x48\xf4\xde\x44\x78\x37\x00\x6e\x48\x01\x8a\x84\x2a\x02\x58\x5c\xa2\xa6\xa8\xb3\xef\x9b\xbd\xe4\xd6\x1e\x0f\xe5\x5c\xdb\x66\x3f\xf8\xe6\x60\x2b\x48\xc2\x01\x72\xe1\x6f\x8d\x79\x86\xdd\x1c\x2b\xc8\x52\x60\x0e\x79\x5f\xa2\x17\xfa\x2e\x4c\x93\x62\x64\x3a\x6a\x8b\x84\xf5\x06\x24\xc3\x19\x46\xc6\x1d\x59\x0f\x4d\x3b\x8d\xe7\xa9\x09\x92\x2c\xdf\x09\x17\x3b\x24\xe2\x63\x3b\x84\x56\xb5\x59\x81\xf6\x4e\xfe\xb9\xde\x05\x86\x44\x1d\x9e\x80\xaf\x49\x1b\x14\x92\x1a\x46\xee\xfb\x3d\x80\x2f\x93\xd8\x18\x9b\x1c\x4f\xfd\x79\x01\x64\x73\x76\x03\x10\xe4\x08\x46\x16\xd8\x9d\x1d\x0b\x23\x9d\x95\x5f\xa3\xba\xd4\x5a\x64\xdc\xc8\x4b\x10\x6c\x65\x77\x26\xb0\xc6\xc3\xfe\x57\xe3\x7a\x6b\xba\xee\x40\xcf\x52\x3b\xa9\x1c\x4e\x0b\x50\x41\xb7\xcf\xc2\x07\x96\x59\x3f\xd3\x53\x08\x10\x46\x17\x56\x0e\x61\xf2\x15\x43\xac\xf1\xd3\x81\x7b\xf0\x3b\xaa\x83\x90\x25\x9b\x3f\xbd\x15\x3f\xc4\xd3\x47\x8c\xcb\x4a\x4b\x2d\x6e\x7c\xf0\xef\x30\xf6\xce\xf3\xeb\x89\xec\x2b\xda\xef\x5a\x70\xe9\x6a\x9c\xfc\x76\x40\x48\x03\x80\x09\x00\x6c\x05\xb9\x81\x2c\x7e\x62\x4f\xe9\xcc\x00\x1c\xef\x09\x82\x30\x36\x81\x00\x6c\xf9\x16\xa0\x4b\xd6\x58\x36\x83\xbc\x3a\x83\x5d\xc6\xa6\xa7\x35\x79\x72\x15\xba\x58\x64\x4f\x99\x9d\xa2\x87\x68\xa1\x8b\xf2\x8f\x3e\xe4\x26\xa9\xcb\xd4\x72\x59\xac\x86\xd1\x02\x92\xb8\xfb\xf1\xdd\x34\x30\x1d\x63\x1c\x00\x04\x81\x2c\x05\x33\x70\xcd\xf7\x5a\xaf\x0b\x11\xa7\x93\xd3\x49\x26\xda\xfa\xc0\x24\xfd\x47\x62\xf4\xe6\x40\x61\x0b\x13\x02\x3e\x70\xb8\x4f\x0a\x3f\x1f\x85\x62\x2c\xb4\xfe\x7c\x2a\xad\xca\xe5\xe8\x63\xe1\x0f\x65\xd7\xb4\xfa\x71\xa8\x46\xee\xb3\x7e\xc1\xce\x0c\x73\x5d\x03\xe3\xfb\x10\x39\x92\x4c\xf7\x3c\x26\x58\xfc\x38\x54\x07\xa6\x8f\x40\xb2\xa7\xbe\x8b\x31\xe9\x06\xa9\x80\xcd\xf4\x26\xe4\x4d\xf2\x53\xdf\xed\x45\x65\xb3\x5c\x0d\x8e\x12\xf0\x03\x7c\x93\x5a\x4d\x6e\x38\xf1\xa8\x0c\x3e\x78\x20\x5c\xfd\xdf\x9c\x49\xb1\xf5\x63\xea\x4d\x8f\x2f\x03\x17\x4b\x89\x9c\x81\xab\x8b\x20\xb9\x96\x03\x0d\xe6\x40\x50\x90\xa2\x70\x95\xf6\x22\x57\xf9\x02\xc6\x9d\x60\x92\xed\xec\xcc\x57\xf8\x92\x7a\x3d\x47\x78\x2a\x98\x84\x66\x26\xbe\x8b\x61\xe3\x1e\xaa\x07\x87\xbf\x59\x76\xa4\x73\x6d\xb6\x3c\x8b\x5e\x65\x59\x0f\x6e\xf7\xc3\xde\x7e\x4e\xfc\xa0\x01\x28\xbc\x8c\x93\x31\x08\x26\xce\x89\x7d\xc7\x70\xff\x73\x27\x68\xe7\x2b\x6d\x75\xa6\x5e\x6b\x3a\xad\x84\xa7\x9f\xbc\xef\x5b\xa4\xff\x1f\x9f\x58\xe7\x08\x5f\xe8\x44\x77\x35\xb8\x35\xf3\x49\xa4\xd1\x8b\x40\xc3\x4e\x42\xe7\x4c\xb3\xd9\xfb\x00\xd4\x59\x89\xeb\x5f\x8d\xe4\x61\xe1\x64\x2e\x44\x12\x6c\x2d\xb1\x12\x4b\x0b\x50\x06\x8d\x18\x23\x02\xc6\x35\x9f\x63\x6f\x5c\x62\x7e\x5c\x80\xa6\x1a\x74\x3b\xae\xee\xc7\x72\xd4\x64\x1a\xdc\x6e\x5f\xd2\x71\x5d\xaf\x32\x6c\x8a\xbc\x10\xec\x37\x34\xbc\xeb\xf0\x42\x13\xbe\xe5\x44\xb1\x90\xfa\xa5\x99\x13\xe1\xd3\x94\x9b\xb1\x06\x81\x64\x5a\x7f\xb2\xf9\x07\x66\x20\xdf\x70\x66\x27\xd8\x9d\x45\x3b\xe7\xe4\x07\xec\xb3\xab\x4a\x99\x2a\xe3\xcf\xf9\xbd\xcf\x77\xfa\xe8\xa7\x57\xed\xba\xa1\xe0\xef\x6a\x62\xdf\x4f\x5d\xfe\xfe\x87\xe3\x49\x4e\x9f\xe5\xda\x59\xa8\xaf\x12\xb9\x7c\x95\xfa\x1a\xcd\xb4\xa5\xd8\x56\xb1\x6e\xb5\xb8\xe5\x2b\xbc\x04\x0c\x02\x67\xee\x45\x56\x59\x14\xf3\xed\xe7\x4f\xa8\x0e\x67\xb5\xba\xd3\xea\x76\x8b\xa8\xf8\x65\xb8\x55\xb3\x43\xef\x8a\x4a\xba\x8d\x52\x28\xe0\xcc\xdf\xff\xcb\x55\xc7\xeb\x96\x04\xa1\x09\x38\x61\xfe\x28\x77\xd7\x04\xdd\x1e\xd6\x22\x62\xc8\xee\xf6\x20\xd7\x0d\x2d\x23\x6c\xca\xf9\x2b\x81\x66\xe7\xf8\xa1\x43\x09\x54\xc2\xfe\xe0\xe6\xdf\x99\x29\xff\xf2\x24\xa1\xbf\x93\x10\xd3\x47\xe4\xc6\xec\xe6\x54\x2d\xfd\x7a\xc1\x87\xf1\x81\x07\x35\xaf\xee\xe4\x5a\x95\x1b\x79\xa7\x99\xb3\x2f\x49\xe5\xbe\x0b\xa2\x7f\x22\x9f\xd2\x7d\xfd\xe5\xf3\xe6\xdf\xbd\xc8\x70\x5f\xea\xd1\x9d\x3d\xe1\x9a\xdc\x9b\xcd\xa5\xd7\x8e\xd6\x4f\xdd\x78\x7d\x5f\xed\x18\x74\xf2\x0b\x66\xed\x6d\x4f\xa9\xcd\x06\x89\x4c\x0b\xd6\x25\xcc\xdd\xe2\x8e\x9f\xc7\x77\xe3\x40\x27\xbf\x3f\x7f\xb1\x33\xea\x15\x68\x52\xd0\x7d\xeb\x67\x24\x47\xad\xa7\x3e\xb6\x68\x4e\x70\xba\xfe\xd6\x56\xf5\x9b\xca\x96\xad\xde\xdc\x1a\xf5\x39\x8c\x59\xfb\x5d\xad\xca\xec\xf1\xcf\xa7\x52\x05\xbd\xf3\x3f\x9f\x2e\x3e\x94\x44\x04\x20\x49\xe8\x46\x64\x08\x4b\x3f\xb3\x6b\xb0\x7d\x50\x39\x85\xc1\xea\x4e\x24\x8b\xb9\x6f\x5c\x50\xe6\x64\x6e\xc3\x1e\x50\x21\x7b\xd1\xed\x87\xb9\x96\x09\xdd\x6a\xac\xe6\xc6\xf7\x33\xfe\xc5\xad\x6b\x0d\xad\x6b\x3e\x38\x27\x6b\xec\x61\xc5\xb6\x65\x91\x8b\x38\xba\x1e\x14\x89\x28\x6f\xad\xbb\xa9\x41\xc7\x16\x5b\xdc\xb3\x07\xc1\x9c\x97\x70\xe5\xec\xfd\xfd\x3b\xc7\x65\xd4\x1a\x29\xb9\x6c\x5f\xd6\x6c\xc6\xbc\x29\x84\x3f\xe4\xf8\x93\x8f\x58\xee\x98\x2f\x8c\xb2\xf6\xea\xfb\xac\x29\xac\xbc\xbf\x79\x55\x94\xf9\x91\x0b\xa0\x30\xb2\x7f\xf3\xbb\x5a\xd6\x68\x52\x30\x7d\x6b\x99\xed\x8c\xa5\xca\xa9\xab\xb9\x3e\x11\x57\x6d\x3a\x65\x6d\x50\x85\xe5\x52\x12\x6b\xce\xaf\x03\xeb\x6f\xd0\xf6\xb7\xb5\x42\x35\xfc\x30\xc7\x73\xd0\x9f\x56\x90\x1d\xbf\xfa\x65\x45\x6b\x50\x75\x89\xb2\xb8\x80\xc2\xe4\xed\x2e\x8b\x34\x08\x69\x64\x83\x70\xd6\x86\x33\xed\x73\x81\xd8\xc6\x4b\x69\x48\xfa\xa4\x8c\xdb\xc1\x0d\xf9\xeb\xed\x12\x1d\x4b\xc7\xff\xab\x87\x21\x24\x11\xa6\xf5\x29\x27\x10\x4c\x95\x93\x8c\x8d\xbb\xdf\x2f\x4d\x8b\xd7\x74\xe8\xbd\xcf\x98\x2b\x67\xfa\x7d\x4b\x21\xa6\x7f\xce\xb7\xf9\x76\xe5\xb9\x02\x44\xfd\x5f\x0e\xd0\x76\x96\x9e\x3d\x2d\xe3\x85\x6b\x55\xb0\x9a\x53\x3e\x30\xb8\x11\x1a\xd2\xc8\x1f\x4f\x61\x80\xc2\x61\x68\xae\xa9\xd6\x82\x1d\xbf\x3b\xac\xcb\x0f\xc5\xaf\x29\x97\xca\xff\xac\x47\xba\x2c\xc7\xe8\xc5\x3f\xdf\x33\xc9\xd5\xb0\x7d\x93\xbb\x16\x6a\xc7\x08\x69\xd7\x9a\xdc\x97\xfa\xf1\x58\xb1\xb9\x3b\xca\x59\x0f\xfd\x73\xbc\xb4\xc2\x3b\xd9\x4b\x65\x5b\xd5\xfe\xb0\x39\xcd\xa9\xd3\x76\x2f\x15\xb3\x4d\x72\x3d\x50\x25\xf1\xfa\x6f\x1e\xb9\x05\x13\xf5\x45\x5d\x42\x65\xb1\x0b\xe5\xe8\x9a\x6f\x66\x9f\x96\x51\x4e\x06\x4f\xe4\x0b\x84\x19\x4b\x6f\x17\x43\xe7\x33\x1f\x60\x1b\x27\x4f\x5e\xb8\xa9\xe9\x7d\xcd\x44\xd3\x8f\x8a\xb0\xdf\xcf\xfe\x6f\x1c\x0d\x9b\xaf\x9d\x08\x86\x93\x8b\x58\x84\xdc\x23\xb8\xdf\xf3\x2e\x50\x48\xd6\x97\xd1\xa1\x9e\xb4\x41\x09\x5f\xda\xcb\xb9\xa7\x74\x3b\x98\x9d\xbf\x23\xc6\x90\xb7\x2c\x8f\x8c\x2f\xfe\x1f\x6c\x1b\x4c\x7a\x63\x3b\xf9\x0a\xdb\xa5\x6a\xe1\x15\x76\x01\xcf\x22\x54\xcc\x9b\xbd\xbc\x12\xa6\x5c\xc8\x4e\x1b\x76\xef\xb0\x0b\x9b\xc5\xb1\x21\xea\xe6\x0d\x16\x53\x9b\x15\xe3\xed\xa8\x3d\xa7\x1a\x4d\x2a\xf6\x8b\xd5\xff\xfd\x3e\xa5\x76\xbd\xd1\x55\x44\xed\xcf\x33\xba\xf0\x7b\x28\x87\xe0\xa3\x5b\x3f\x16\xb6\x76\xe2\x98\xed\xb0\x83\xf7\x7e\x3a\x6e\x1b\x5a\xc9\xfd\x9a\x31\x36\x9c\x8d\x0f\xce\x69\x38\x62\xc6\x5e\x15\xd9\x56\x42\x11\xaf\xed\x19\xfd\xea\x5e\xf8\x50\x9a\xa0\x2f\x67\x89\x2d\x9f\xaa\xf5\x26\xc7\x34\xf9\x72\xcc\x16\x81\xe6\x83\x04\xb3\x2f\xaa\x8a\x85\x6c\xf5\x4b\xaa\xd2\x90\xa5\x4f\xaa\x07\x1a\xbd\x87\xea\xa5\x54\xf8\xdd\x45\x19\xfb\xed\xfe\xfd\xfb\xf0\x50\xbd\x11\xe5\x19\xb5\x5f\xf6\x5c\x08\xed\x12\x2b\xb1\xea\x84\x24\xef\x6a\x08\x8a\x4c\xc4\x5e\xd2\x66\x54\x0c\xd7\x51\x17\x12\xdc\xd7\x8c\x0d\xb1\x85\xae\x52\x15\xf0\xbc\x3b\x77\x12\x9d\xa5\x73\x9f\x2c\x05\xea\xcf\xe0\xcb\x3d\x87\xc7\xfc\xbb\x23\xaf\x5e\x59\xff\x86\xde\xaa\xb5\x4b\x3c\xa7\x31\x9e\xad\xee\x04\x9d\x76\x8c\x7c\x75\xa3\xe1\xa0\x49\xb3\xa5\x6e\x3e\x13\xc5\x9f\xe8\xcc\x2e\xe3\xf4\x24\x73\x17\xe3\x8b\xed\x45\x6f\xaa\x00\xe3\xb9\x6f\x33\x2a\xf6\xcc\xe6\x48\x7d\xf9\xd5\x78\x3f\x3b\xe5\x6a\x4e\x27\x61\xca\x73\x89\x85\x4f\x0c\x1b\xb2\x11\x73\xb9\x2a\x86\x37\xff\x06\x60\xad\x83\x4e\xaa\xe1\xdb\x13\x9e\x2d\xe5\xb9\x53\xea\xbf\x4d\x86\x66\x0a\xf7\x3b\xe4\x87\x1a\xf2\x94\x8b\x1d\x56\xd2\x49\x35\xb7\xbf\x9f\x0d\xf1\x77\x56\xc0\xbc\x9c\x5e\x62\x1b\xaa\x57\xaf\xf3\x6e\xd2\x68\xed\x8f\x90\x45\xc8\xd9\x3a\xdc\x61\x5c\xf2\x7a\x09\x26\x3a\xc7\xae\xf7\xd3\x84\xa3\xf6\xa2\x82\x45\x7c\x61\xf4\xac\xc6\x17\xa7\xb4\x18\xb8\x83\x4d\xbe\x67\x79\x6c\x85\xfd\x2d\x2d\xbf\x7c\x52\xeb\x6c\x3d\x2f\x02\xde\xc4\x4c\x20\x29\xca\xb5\x6c\x9a\xe3\x7e\xd4\xdd\x81\x8e\xdb\x06\x93\xbf\x18\x03\x2e\x15\x4d\x96\xf8\xcb\x0d\x6b\xb2\x7b\x99\xd2\xd5\x00\x7b\x6a\xbd\xc5\x0d\x81\xf8\x67\xf7\x4f\x3d\x9d\x5d\xbc\xcb\x20\x6f\xb7\xf1\xf9\x36\xdf\x56\xd4\xde\xd8\x22\xcc\xc3\xaf\x81\xdb\x91\xfd\xc4\x4f\xc8\x9b\x4d\x2c\x6a\xda\x5d\xa0\xb3\x51\xb4\xd1\x6d\x45\x1f\x6b\xf3\x54\xe0\xde\xe1\xfd\x8d\x3e\x17\x15\x8d\x42\x11\x1e\x87\xb8\x4c\x03\xa8\x59\xa7\xaa\x07\x5a\x97\x5b\xe6\x33\x41\x6f\xf2\x26\x94\x15\xd9\xa6\xe7\x50\xe5\xc9\x46\x18\xd3\x11\xf7\xfb\x15\x0e\x9f\xdf\xdf\x21\xd1\xcd\x28\xc7\xb2\xa6\xd9\x5f\x52\x52\xc5\x96\x60\x9e\xc8\x03\xa8\x48\x8a\xef\xfe\x69\x12\xb9\x5a\x3c\x1e\xe2\xe3\x29\x99\xef\x8f\x33\xa7\xda\xa0\x89\x93\xaf\xda\x05\x2a\x09\xb2\x5f\x16\x96\xc8\x9c\x89\xa3\xa0\xa5\x66\x7b\xfe\xe7\x5e\x09\x9a\xcb\x16\x77\x46\x79\x4b\xe2\x7a\xa2\x66\x77\xea\x59\x08\x2a\xf7\x1b\xc7\x4f\x5d\xad\x15\x2b\x34\xd7\x39\x12\x6a\x91\x04\xef\x21\x5f\xda\x19\x9b\xc8\x93\x7f\x9c\x2a\x30\xcf\x15\x57\x4a\xb3\xe4\x14\xfa\x79\x84\x6f\x66\x14\x0b\x2a\x22\xf2\x8b\x4f\x5f\x09\xe5\x7d\x9b\x17\xf7\x52\xd0\x4b\xad\x07\x9c\xf2\x5f\x4c\x58\x2e\xbb\x7a\xf1\xdc\x24\xda\xf5\x3f\x33\x1a\xee\xd1\xf3\x66\x77\x95\x29\xd8\x34\x3c\x6b\x4c\xa1\xad\xaf\x07\xc1\x3f\xce\x19\xa3\xb1\x9a\xde\xf2\xd7\xe7\xb9\x31\x4b\x2f\xb7\x2f\xc9\x47\x21\xc8\x14\xe2\x55\x3e\xaf\x79\x1f\xa9\x71\x0d\x57\x41\x43\x49\x14\x25\xca\xec\x25\x37\xbb\xaa\x3d\xc6\x22\xfd\xed\xc9\x49\xb7\xb8\x73\xf8\xb1\xd1\xa9\x50\xca\x1b\x42\xb2\x24\xd3\x19\x13\x85\x2b\x67\xbb\x60\xd1\xec\xc6\x97\x7a\x25\xad\x34\x11\xee\x8b\x7a\xc7\x18\x34\x42\x7a\x4f\x06\x60\xcc\xd6\x30\xf1\x35\x02\x8c\x6e\xc1\x99\x8e\xf4\x83\x2b\x36\x1b\x68\xcc\xf8\x59\xf3\x01\xc1\x6f\x4a\x41\x8e\x25\xfd\xfa\x4f\x6c\x49\x3f\x75\xba\x63\xf1\x8b\x4b\x73\x17\xb3\x88\x97\x6b\xc1\x70\xab\xbf\xb7\x4a\x19\x61\x7f\xe2\xae\xd5\xfb\x42\xef\x67\xb2\x23\x36\xc5\x85\x34\x32\x63\xc4\xe3\xfb\xfb\xce\x50\xb3\xf1\x3e\x7d\x2c\xf8\x7e\x76\xa5\x7c\xb6\x0e\x93\x58\x49\x57\x2a\x94\xef\x81\xde\x98\xb1\xe1\xaf\x4d\xb9\x33\x55\x7c\xfe\xbb\x0b\x49\x5d\x0b\xa1\x35\x1e\xba\x24\x5f\x45\xdb\x10\x04\x96\x74\x97\xf7\x9e\xaf\xb4\xa4\x5b\xb8\x75\x25\x68\x16\x7c\x37\xe1\x82\xc7\x24\x2d\x55\x9e\x80\xdd\x0b\x09\x21\x44\xe7\x6f\xd9\xea\x23\xa1\x3d\xce\xd2\x58\x13\x42\xbb\x3b\x7f\x12\x7f\x35\x25\xdc\xd3\x47\x6c\x99\x17\x65\xad\x0e\x30\xcd\x76\x2e\x29\x81\x19\x46\x10\xcf\x77\x2a\x24\xc4\x72\xb3\x0a\xfe\x3c\x59\x67\x7c\x2a\x27\x24\x11\xe1\x07\x33\x57\x2d\xdf\x1c\xde\x8b\x8c\xe0\xea\xb9\x1b\xbb\x8c\xb2\xd7\x12\xf1\x13\xd9\xbf\x95\x0b\x0d\x1b\xb5\x46\x89\x3d\x81\xdf\x34\x01\x5b\xbf\xa5\xbf\x1e\x09\x5c\x27\x57\xd0\xc0\xca\x57\x63\x51\x15\x99\x68\x0b\x4c\x6f\x83\xc1\xe8\xe9\x98\xf8\xfa\x63\x55\x7c\x76\xeb\x2d\xaa\xef\x6b\x4b\xbc\x17\x06\x39\xfc\x5c\x4e\x86\x0e\x12\xfb\xbe\x3b\xc8\x8e\x31\x3a\x3f\x43\x87\xe3\x70\xca\x9f\x48\x4f\x24\x95\x89\xa6\xdc\x55\xf9\xc1\xff\xb2\xd5\x4b\x37\xc7\xbc\xcd\xfb\xdf\x59\xd3\x4f\x79\x3c\x55\xf3\x8d\x07\x11\x83\x69\x41\x77\x89\xee\x9e\x03\xee\x39\xc9\xc5\xf0\x59\x1e\x18\x92\x9b\xbd\x51\x69\xa2\x94\xa1\xa9\x10\x8a\x4d\x95\x22\x27\x1a\xf3\x67\x12\xac\x9f\xf2\x3d\xab\x6c\x16\xad\x38\x13\x9d\xe5\x04\x25\xa7\x97\x2f\x51\xce\xd7\x92\xf4\x3e\x7e\x7c\x0c\x04\x8b\xdb\x3f\x67\xf3\xf9\x7b\x36\x8f\x47\xde\x7d\x52\x47\xd6\xd6\x40\x89\x50\xff\x19\xf7\x13\xad\xd7\xc3\x93\x09\xf7\x10\x44\x3b\x31\x28\x7f\x26\x35\x58\x31\x5a\xb6\xbc\x1c\x2f\xa4\xd9\xb7\xbe\x34\xdb\x8f\x98\x23\xd9\xb7\xff\xc9\xf5\x9e\xda\xfa\x0c\x23\x08\xc2\x6a\x8c\xe9\x4b\x72\xc8\xfe\x41\x6e\xfd\xde\x20\x88\x17\x8b\x65\x9f\x7f\xc4\x17\xf5\x3b\xcb\x32\xea\xc7\x2e\xb4\x9f\x65\xc0\xda\xc5\xad\x78\x13\x0d\xde\x5b\xce\xad\xdc\x8d\x0e\x14\x64\xa8\xaf\x91\xb5\x0a\x01\xa5\x9e\x97\x22\xe5\x08\x55\xa3\x62\xf4\x10\x95\xcf\x25\x7e\x1b\x06\x10\x45\x19\x34\x13\x95\xc5\x1d\xd1\xcf\x4c\x1c\x88\x4e\x42\x77\xab\x1d\xc7\xdc\x09\xdd\x09\xda\xf7\x95\x7c\xbf\xe2\xdf\x58\x2a\x1e\xb0\x09\x71\x5f\xda\xa2\xd5\xe9\x25\x1c\x3c\x07\xa3\x4c\x30\x40\x1a\x8f\xbd\x88\xa6\x3b\x5f\x78\xa1\x1c\x1a\xef\xf5\xe1\x54\x5a\xab\x03\x5c\x2b\xc9\x61\xf6\xd6\x39\x02\x29\x69\x88\xa2\xc3\x22\x1c\xc7\xd1\x5e\x33\x2a\xaf\x6c\x66\x43\x7f\xdf\x43\xb7\x76\x82\xda\xe2\x3e\x17\x51\xc7\x36\x0a\x65\xd5\xac\x54\xe5\x27\xe1\xfc\xce\xc9\xc5\x25\x57\xa0\x3f\x1d\x83\xc4\xe2\x41\x38\x47\x1e\xea\x52\x70\x20\x62\xd8\x5a\x6e\xfb\x73\x11\xaa\xab\x12\x2b\x71\x3f\xc3\x91\x25\x4a\xc5\xc2\x97\x47\x9a\x80\x31\x15\x42\x34\xfd\x79\xc7\x42\x49\x22\x52\x86\xb0\xa5\xe7\xc1\x0f\x82\x32\xf3\x3c\xb7\x13\x0c\x44\xa7\x38\xfd\x09\xfa\xf7\x0a\x5f\x1d\x07\x81\x50\x2c\x8f\x47\xae\xe6\xea\x5f\x51\xaf\xfe\xb0\x25\xcb\xff\x2c\x03\xe1\x12\x2b\x55\xae\x70\xff\x6c\x79\xb8\xe7\x48\x1e\x17\xe5\x71\x2a\xf2\x0f\xff\x3f\xfb\x32\x1d\x51\x42\xe3\xbe\xfc\xa7\xd0\x3e\xf2\x5c\xf0\x30\xce\x17\x9d\xa6\xd3\xe8\x9e\xe4\xfb\xc9\x9b\x3e\x51\x57\x1e\x22\x3b\x28\x24\x52\x90\x37\x94\x6c\x2e\xc7\xe9\xf8\x0a\xfe\xd0\xc4\xcf\xeb\xd7\x43\x25\xe7\xb5\xcc\x71\x3f\x64\xa6\x14\x6c\x80\x76\x16\xa6\x71\x81\xa4\xd2\x18\x85\xd8\x5d\xbe\x2b\xa6\x89\x35\x9c\x40\x67\xe8\x6a\xd8\x19\x82\xb0\xf6\x48\xd1\x52\x15\xf4\xfa\x3c\x31\x0f\xba\xfb\x70\x01\xa5\x36\x56\xa6\x38\x2c\x89\xf6\xfd\x50\xc9\x55\x27\xad\x39\x75\x1b\x39\x22\xdc\xdf\x1a\x0f\x49\xed\xa6\x9e\x6b\xcf\x5b\x4d\x2b\xd9\xcd\x29\x06\x0f\xb4\x70\x7e\xe3\xf1\xc9\x7e\xe9\xdd\xbd\xf6\x86\x81\x59\x12\xf3\x83\x45\x83\x37\x8b\xc0\xae\x1c\x23\xab\x57\x48\x55\xad\xc4\x4b\x07\x08\x09\x72\x87\x6e\x1d\x72\x34\x51\x63\x7d\xe0\x35\x99\x7d\xca\xb4\x5f\xe1\x1c\xe3\x69\x2f\x4c\x96\x73\xaa\x40\x8f\x88\xbe\x7b\x52\x39\x6d\xcb\x6e\xe8\xc6\x2b\x8e\xda\xe3\x49\xef\x18\xdc\xbf\xb2\x86\x44\x0a\xa2\xe9\x95\xf8\x7c\xfd\xea\x07\xa5\xee\x42\x4b\x25\x3c\xbb\x71\x67\x18\x2e\x8c\x1f\x0a\x2c\x64\xcc\xcb\x11\x24\x07\x43\x2b\xa0\x3d\x11\x60\x0c\x83\xc0\xe0\x52\x39\xf9\x23\xeb\x3d\x82\x88\xe6\x83\x91\xdd\x39\xd4\x20\xc6\xb2\x74\xac\x75\x58\x5f\x0a\xf8\x03\xef\x00\xd8\x04\x40\x3d\x27\x49\xa4\xf5\x7a\xe9\x25\x3a\x6d\x78\x9a\x72\xce\x3a\x79\x1d\x8f\x74\x27\x98\xdf\xb3\x9d\xcd\xc0\xfb\xfc\xe4\x0b\x76\xb8\x5b\xe8\xcf\xf3\x4d\xa1\x3e\xbd\x2b\x40\x20\x76\x42\x30\x81\xef\x67\x39\xc8\x17\x3a\x51\x67\x6c\xc0\x78\x6f\xbe\x28\x25\xf9\x52\xf7\x5b\xb6\xbe\xa5\xdd\xb0\xc3\x23\xbc\xde\xcb\x7e\x6c\x8b\x90\xf9\xcb\xe8\xa7\x17\x15\x0b\x4c\x4d\x6e\x3b\x82\x2d\x45\x0a\xb3\x76\x9e\xbe\xef\xb7\xf4\xd2\xc7\xc7\x1b\x1d\x0f\x12\xb3\xbc\x6d\xb1\x30\xc2\x63\x14\x77\xbb\x2d\xd8\x83\x4d\x38\x9e\xb5\x90\xd8\x56\x84\x86\xfd\xde\xf9\x0d\x84\xe0\x53\xc4\x96\xa1\x21\xd9\x7c\x1b\xbe\x8e\xd0\x02\x8e\xb0\x77\xa3\x59\xe7\x8c\x50\x1b\x40\xdc\xa9\xd2\xd0\xc6\x25\x70\x04\x27\x08\x40\x3f\xe1\xb1\x60\xa3\xdc\x8d\x53\x4e\x3f\x1f\xb6\xea\xb1\xc8\xab\xfc\x22\x2d\xea\x4d\x45\x09\x12\x37\x07\x9e\x3b\x47\xd7\xde\x23\x66\x77\x29\x06\x80\x06\xa0\x38\xf1\xf9\xf8\x46\xad\xcd\xc0\x0d\x25\x98\x4f\xa5\x59\x54\xea\xbd\xf6\xb8\x7e\x9c\xe9\x2f\xb7\x91\x9f\x65\xd8\xae\xd9\xa2\x6d\xb4\x0d\xe3\x15\x67\xcc\x68\x7d\xac\xe9\x8a\x1b\x2f\x89\x00\x4d\x13\xb2\x72\x32\x4f\xf4\xb6\x83\x7b\xde\x4f\x69\x75\x95\x15\x67\xee\xcf\x68\x78\xfa\x89\xbc\x79\x13\xdf\xa8\x58\x79\x46\xc2\x01\xf8\x1d\x80\x70\x57\xca\xc9\x04\x70\xee\x77\x50\x7d\xd2\x30\x4d\xc4\xd5\xfa\x36\x4b\xd7\x34\x71\x62\x29\x0c\x43\x1a\xe1\xc1\xab\xcf\xfc\xd7\xc3\x42\x8c\xd0\x5b\x7d\x1e\xa1\x60\x2e\xf1\x9b\x67\x6b\xf6\x99\x03\xf2\x81\x6c\xbb\x3e\x4c\xf7\x60\x4f\xf4\x36\xfe\xdc\x45\xa0\xea\x97\x39\xe7\xba\x73\x7b\x21\xc1\xc0\x76\x26\xa6\x79\xc3\x15\x57\x59\x5f\x6f\x4c\xb3\x73\x08\x6c\xc0\xee\xd6\xad\xf4\x86\x4e\x10\xc2\x3c\x33\xc4\x3d\x39\xb4\xac\x45\x3b\xf2\xec\xbb\xef\x15\x25\xd7\xe9\x7e\x1c\x8c\x2d\x83\xd1\x79\x72\xbf\xf8\xfb\x3b\x4d\xfb\x4c\x23\xf3\x74\xfb\x4b\xc2\xed\x1f\x1c\x95\xb6\xd5\x9d\x01\xed\x02\x64\x8b\x37\x66\xa3\x4e\xe3\x41\xe7\x36\x62\xc2\x1c\x60\xa3\x16\x07\xd8\xf0\xb8\xd7\x8b\xe2\x0b\x52\xe0\xc2\x3d\xc8\x77\xda\x6e\x30\xdd\x1e\x06\x7c\xaa\xb6\x7d\xdb\x46\x25\x07\x4c\x1f\xfa\xcc\xb8\x11\x75\x7a\xfa\xe0\x4e\x09\x76\x26\xd5\x27\xce\xf7\xa5\x11\x26\xfe\x15\x8f\xbb\x76\xd6\x83\x98\x5c\xbf\xd2\x28\x07\x97\x1d\x7e\x2d\xf3\x20\x47\x9e\x0f\x71\x7c\x7f\xe7\xaf\x84\x6f\x00\x05\x3b\x60\xb0\x73\x89\x79\xb6\x3e\x9e\xda\x9b\x60\x5a\x1a\x20\x11\x9e\xf6\x23\xf9\xac\x71\xa6\x65\x56\xd8\x2b\x32\xf2\x80\x62\xb0\x7f\xd8\x95\xd7\x04\x40\xa8\xe1\x0d\xe3\xb5\xcc\xe0\x3d\x31\xfa\x0c\x18\x66\xbf\x36\x59\x5c\xe7\x87\xb9\xe4\xfc\xb3\x08\x78\xce\xde\x13\x29\xcc\x96\xe0\xf0\xde\xf4\xbf\x5e\x46\x32\xc3\xa1\xfe\x6c\xbb\xcb\x0b\x7f\x5f\x5d\x3a\x41\x93\x35\x71\x74\x10\xe4\x83\xcb\x10\x98\xe0\xf4\x36\x8f\x57\x47\x60\x1f\x27\x67\xca\x1e\x30\xe7\xb2\xfb\xf3\xd3\xbc\xe3\x53\xb7\x1b\x34\x32\x84\xfb\xc3\x94\x7e\xbc\x13\xe3\x47\x1c\x1f\xdf\x1c\x47\x28\x00\x08\x0d\xcd\x01\x56\x84\xfe\xfe\x89\xbc\x76\x46\xe8\x73\xcf\x93\xc7\x1e\x96\x02\x31\x63\x1d\xce\x0b\xd1\x86\xaf\x53\x84\x8d\x81\xe3\xbe\xe5\x90\xfb\xbd\x97\xdc\x53\x43\x21\x75\xa3\xee\xcc\x71\x73\xda\xd2\x4c\xb8\x68\xd5\x5a\x08\xde\x38\x47\x0c\xa0\x18\x8f\x25\x34\xce\x72\x6e\x68\xcd\xd3\x3e\x13\x00\x14\x06\x86\x9d\xcc\x1c\xd6\x1c\xce\x48\x6d\x74\x34\x8e\x6e\x46\x4a\xe5\xb7\xc6\xc5\xcb\x27\xd0\x3b\x7f\xef\x6d\x9f\xf0\x33\x1f\xcb\xbc\x6d\x8d\x88\x75\x67\x00\x54\x62\x90\x2b\x08\x17\x1c\x9b\x70\x78\x60\xc7\x79\x3f\x22\x61\x98\xa6\xe7\x2e\x0a\xea\xaf\xa7\xea\xc3\x21\xf6\x19\x6b\xd1\xce\x0a\xd5\xeb\x0c\xbb\xb0\xcb\x0e\x32\xfb\x28\xaa\xce\x39\x4b\xb4\xef\x3a\x64\xee\x00\xd3\xa8\x9c\x1b\xb5\x7d\x85\x13\x40\x10\x06\xa6\x2b\xce\x73\x9d\xaa\x2b\xe6\xa4\x31\x70\xa6\x5c\x7e\xf2\xb3\xa5\xf1\xbf\x19\x2a\x7a\x15\xe2\x2a\x39\xa0\xfa\x1e\x30\x4b\xc8\xce\xcd\x77\xcd\x53\x22\x27\x56\x86\xa0\xfb\x65\x96\xac\x5b\xff\xac\x39\xa1\x09\xfa\x88\x9e\x8c\x8e\xed\x14\x8f\x99\x83\xf8\xaf\xbe\xa8\x74\xd3\xd4\x4d\x7d\x93\xe8\x98\x80\x4c\x29\xf2\x0b\x4f\x78\xa4\x0e\xa6\xa5\x80\x01\x3b\xb2\xf8\xb3\xde\xad\x7c\x9e\x50\x20\x07\x18\x01\x01\x49\x0e\xf3\x7c\x9a\x78\xa9\xed\x39\x21\x7d\xf4\x11\xdd\x6a\xbc\x35\x6c\x94\x4c\x74\xe1\x0c\xf1\x4c\x02\x0c\xd0\x33\x00\xf9\x14\x84\xb3\xe7\x65\x7a\x85\xe6\x3b\x41\xc7\x04\xd3\xe3\x37\x9d\xdb\xec\x5d\x8e\xae\x3c\x32\x1e\xd8\x54\xfd\x3c\x5a\xfa\x62\xbc\x8f\xeb\x62\x35\x1d\xfc\x6a\xdd\xf9\xa7\xe9\x0b\x04\xe6\x06\x41\x1a\x11\xcf\x81\xd6\x6d\xc9\xef\x3e\x0a\x99\x1b\x14\xd8\xd7\x98\x80\xe7\x01\x82\xd0\xea\xb3\x9d\x1f\xa7\x78\x27\x80\xce\x69\x5f\x25\x7f\xa0\x45\xd7\xc0\x0d\xdf\xb1\x56\xd6\x7c\xb0\xae\x76\x8a\xfa\x66\x8a\x67\x42\x0a\x19\x9d\x02\x20\xce\x76\x7c\x23\x70\xa7\x26\x7a\x02\x80\x27\x07\xee\x1e\x80\x8e\x46\x91\xc6\x49\x87\x2f\x93\x22\xb8\x7e\x7a\x0a\x2f\xbc\x37\x8f\xb2\xb6\x7d\x93\x4e\x97\xbb\xc2\xcf\xdf\xbb\xe4\x3d\x7f\xc8\x0c\xa6\xb4\xd7\x71\x32\xc2\x63\xdf\x67\x33\xa4\xf8\x71\xe8\xf6\xd0\x00\x40\xdc\x7b\xb0\xbf\x28\xdc\x0d\x95\x99\x97\xf6\x5d\x9e\x26\x84\xa1\xc8\xf8\xe7\xf3\xa4\x9b\x85\x5c\xbc\x76\x88\x61\x4e\x7a\xeb\x1a\xf1\x73\x88\xde\xb5\x49\x22\xb2\xa1\xca\xf3\xcc\xf4\x1d\x02\xf2\x4c\x04\x95\xb4\x7c\x5f\xf2\xb0\x1f\xc0\x07\xcc\x45\x80\x7b\x25\xc8\x99\xf5\x1b\xba\x75\x26\x04\x81\xf3\x58\x96\xd8\xc6\x84\xb3\xef\xd3\xb8\xef\x16\x68\xf7\xdb\x37\xfc\x0c\x9b\xe9\xb1\xa5\x01\x41\x92\xeb\x44\x98\xb0\xa2\xc4\x22\xb5\x9e\x23\xe9\x08\x80\x09\xcb\xed\xf4\x7b\xab\xe5\x1d\x02\x80\x84\x02\xcc\x4d\x63\xf0\xde\x20\x5a\xd5\xb7\x9a\xd5\x0c\x40\xce\x31\xb6\x32\x02\x81\x88\x26\x75\x4d\x9c\x7d\xbd\xeb\xf2\xe0\x1e\xe3\x19\x01\x51\xe2\xf6\x7d\xbf\x55\x2f\x2d\x42\xd6\x09\x10\x5e\xca\x30\x37\x6a\x2f\xc4\xab\x66\xf4\x9f\x73\x7c\x89\x39\x77\x96\xa7\xd2\x4c\xd6\x4c\x10\x9b\xfb\xb4\x1f\xeb\x2f\x97\xe2\xa5\x70\xf1\x4f\x2a\xf5\x3d\x2c\x08\xf2\x18\x6a\x92\x52\x2c\xd9\xe7\x33\xec\xed\xb2\x2c\xdb\x06\xc0\xf3\x49\xd3\xec\xb5\x9b\x27\x3a\xca\xad\xc5\x30\xf7\x9e\x41\x84\xea\x13\x20\x03\xea\xbb\xe2\x77\xfa\x19\xf6\x8f\x85\x00\xa8\xd9\xe2\x95\xed\x5c\x21\x26\x6f\x81\xf6\x23\x66\x07\xf3\x0e\x00\xd3\xee\xe0\x52\xde\x5b\x85\x2d\x9b\x40\xd3\x08\x04\xa4\x28\xb8\x11\x16\x28\xc6\x42\xbd\xc5\x5d\x88\x68\x9c\x0d\x32\x89\xd8\xb5\x5d\xa4\xb9\xf3\xfb\xab\x2d\x0f\x94\x8f\xd8\xcc\x76\x99\xa7\x49\x94\xce\xd5\x9f\x15\xd3\x92\x44\x44\x9f\x8a\xe4\x6a\x98\x01\x30\x66\x1f\xec\xf1\x0e\x16\xba\x11\xbe\xdd\xe2\x6e\x3b\xaf\xdf\x79\x06\xd8\x07\x27\x63\xd9\xfa\xee\x11\x14\xf5\x4a\xf4\x82\xe1\x3a\xe6\x83\x97\x11\xc2\x4d\x75\x07\x2e\x03\x92\x34\x80\x62\xd6\x90\x22\x00\x40\x7d\xdf\xb7\x4c\x2b\xdb\x17\x08\x76\x40\x00\x00\x77\x00\x46\x1f\x30\x21\x09\x78\xd1\x80\xd3\x8a\x93\xf7\x2c\x55\xa6\x84\x0e\xf1\x4c\x2c\x69\xf5\xc4\xd4\xdf\x99\xa3\x22\x2b\xee\x84\xe5\x2e\xa6\x7d\x83\x74\xae\x7b\xbf\x2f\xca\x86\xed\x82\xa1\x96\x89\x1c\xe1\xa0\x41\xf4\x24\x69\xae\xdb\xae\x7d\x92\x37\x9c\x33\x62\xa7\x53\x86\x20\xf6\x9b\x01\x6c\x18\xe4\x5c\x10\x0c\xcf\x9d\xb1\x29\xca\xf9\x56\x4e\x34\xef\x33\x10\xce\x2a\xc4\x99\x74\x5c\xab\xe9\x4d\x68\x76\x0a\x10\xa2\xcf\x01\x3b\x01\x70\xb5\x17\x51\x5a\xa6\xb4\xed\x04\x64\x08\x40\xa2\x4e\xba\x6d\xb9\xe3\x39\xc7\x41\x7e\x1e\x43\xb8\xf9\x2d\x4b\x05\x73\xbb\xc6\x1e\x36\x0b\xdf\x43\xcb\x9b\xd9\x07\x8d\xec\xfe\x03\x91\x94\x27\xed\xa3\x44\x57\x50\xf4\xe3\x3c\xc9\xf9\x13\xf4\x11\x33\x47\xdd\x89\x9a\x96\x9b\xd8\x45\xca\xd3\x4c\x9e\xe8\x5d\xdb\x9e\x24\x93\x75\xe7\x95\xb4\xca\x81\xe6\x33\x04\x35\xa0\x9b\x1b\x20\x34\x03\xa8\x8e\x99\x68\x7d\xd6\x93\x0c\xd7\xea\xe0\x86\x0c\xdb\xf4\x65\x87\x30\x4d\xe8\x3d\x0b\x56\x93\x14\xac\xea\xe4\x60\x98\xcd\xb4\x82\x39\x98\xde\x0c\x33\xd4\x42\xd0\x64\xba\xf0\xe4\x01\x0f\x15\xb9\xb5\x76\x80\x44\x3b\xd8\x76\x41\xaf\xed\xf8\xd6\xc4\x9c\x3a\x6d\xc3\xe0\x0a\x52\xe7\x7b\x75\x99\xa0\x4b\x63\x14\x1a\x13\x1e\xe9\x5f\xe7\xa9\xfe\x6a\x19\x94\x17\x19\x75\xe3\xc4\x95\x28\x12\xac\x35\x47\xee\x83\xf0\x5c\xd3\x14\x3c\x29\x0d\x65\x4f\xab\x67\x26\x7a\xab\x17\x84\xdd\x78\xaf\x5d\x23\x84\x58\xe9\xb0\x33\x9b\xa7\x89\x94\xf8\x76\x03\xd8\x25\x80\x05\x36\x80\x24\x35\x61\xd1\xe5\x4a\xea\xca\x53\x10\xa9\x6b\x1b\x3d\x35\x3b\xcc\x71\xe4\xda\x5c\xab\x73\xae\xce\x73\x52\xc1\x0b\xc2\x45\xc5\xba\x0d\xc8\xd3\x44\xd3\x80\x0d\xa2\x9f\x52\x80\xec\x35\x79\x3a\x40\xf1\xb0\x35\xd8\x01\xe2\xe6\x40\xed\xfc\xbd\xb3\x9b\x13\x03\x97\x76\x17\x28\x34\x6c\x7b\xbf\xed\x25\xe1\x77\x6f\x56\x98\x05\x6d\xd6\x80\xc4\x9f\x78\xd3\x32\x51\x8a\xbd\xff\xf1\x69\x11\xc9\x24\x53\x9a\x3d\xcf\xe1\xa5\x70\x9c\x1f\x2b\x1b\x59\x40\xc8\x3e\xf4\x6e\xa2\xe4\x8d\xbe\x7e\x12\xa4\xfd\x00\x36\x8b\xe4\x46\x75\x88\x6f\x55\x7e\x3e\xc4\x98\x61\xee\xfb\x9e\xf7\x3d\x99\xf7\x74\x9c\xf6\x9e\xb4\x34\x41\xe8\xeb\x63\xf0\x25\x32\xc7\xba\x15\xab\x01\x5d\x5c\xd0\x71\x5d\x2f\x8c\xb7\x6f\x6f\x53\x0c\x8b\x7e\xc1\x9e\x11\x82\x3d\xf6\x5f\x5a\x49\xaa\xef\x36\x6f\x8c\x01\xba\x91\x60\x7a\xa4\x2c\x77\x35\xb5\xd6\x81\x29\x94\x0b\xf8\xfb\x66\xf8\x38\x4d\x50\x35\x27\x07\xf3\x35\x12\x80\x2d\x09\x1a\xf4\x87\xa3\x87\xcd\xaf\xf7\xca\x71\xf4\x28\x40\x76\xfe\x24\xd9\x44\xd3\xe1\xbd\x03\x88\xe4\xb1\x6d\x4a\x2d\xe5\x11\x38\xe3\xde\x14\xa2\x1b\x46\x1e\x47\x13\xcd\xba\xaa\x6d\x26\x9b\xaf\xb4\xe5\x4d\xb1\xe7\x85\x13\xc4\x34\x4d\x53\xd9\xce\xdd\xfb\xe8\x76\x20\x68\x31\x4b\xa8\xb7\x14\x4b\x08\xbd\x61\x63\xe6\xdc\x3d\xa0\xf2\x71\x58\x51\x8a\xbc\xb4\xba\x26\x70\x5a\xd6\x41\x66\xcc\x7f\xdc\xae\x83\x01\x7b\x3a\xa1\x35\xcf\x4c\x4b\x93\x14\x46\xf8\xf3\xb9\xf5\x6e\xb7\xf3\xcc\x4f\x69\xb9\x7b\xfb\x1c\xf1\xcd\x48\xb3\x34\x73\x56\x0d\x82\x37\x3a\x04\xd6\xaf\x2e\x95\xe3\x5e\x97\x5a\x0f\x2b\xc3\xdc\x84\x77\xe0\x11\x7b\x97\xd1\x34\xcd\x3f\xb8\xf5\x66\xbb\x2b\xd6\x68\xf9\x4e\x6e\xd5\x30\x76\x55\xa5\xb6\x2c\x47\x84\xe2\x08\x74\x1e\xe4\x52\xe6\x62\x29\x01\xbe\xe8\x0f\xce\x64\xe2\x8c\x58\x99\x28\x40\x08\x8a\xa2\xe0\xdc\xac\x1e\x93\xb9\x98\xbb\x2f\x75\x01\x61\xa0\x11\x9f\x0f\xb5\x12\x40\x6a\x55\x6a\x75\x7d\xbf\xef\xf1\xce\xcb\xf0\x18\xae\x1b\xf5\xd2\x5c\x9d\x59\xe8\x98\xb9\xc7\x71\x5d\xb7\xed\xc6\xbe\xbc\xfd\xdc\x61\xa2\x94\xac\x00\x21\x49\xed\x67\xa6\x5d\xc1\x71\x37\x49\x5d\x03\x80\x8e\x20\xb8\x53\xdd\x13\xe2\xe2\xf1\xcb\xd1\xdf\x1f\x04\x6e\xa6\xad\x13\xf9\xa6\x6e\xe7\x81\x31\xc4\x8e\x17\x84\xbf\x47\xec\x9b\xae\x03\xd6\x30\x6e\xf9\xba\xe0\x4e\xc4\x52\xcb\x50\x34\xbc\x3c\xeb\x62\x35\xfe\x21\x09\x2e\x9a\x7e\x4e\x0a\x0b\x5d\x7f\xcf\xf3\x09\xf3\x56\xaf\x45\xb9\xcf\xb3\x08\x84\x1d\x24\xc0\xb8\x2a\xe2\x54\xb5\x6d\x05\x40\x12\x00\xfe\xe4\xed\xbd\xec\x5f\x6a\x54\xc7\xcc\x89\x6c\xeb\xbe\x1b\x0c\xbf\x76\x44\x1c\x96\x59\x2f\x32\x48\x25\xf3\xbb\x3f\xcf\x04\x7b\x55\x7b\x96\x42\x0c\xb3\xa8\x64\x53\x02\x21\xcc\x91\xca\x34\x89\x4d\xcd\x8a\xeb\x3a\x3d\x99\x02\x71\x18\xcf\x4c\xc9\x1a\xc1\x12\x59\x85\xf1\xab\x15\x17\xb5\xcf\x00\xf5\x1d\xc0\x00\x0b\x14\x3e\x56\x49\xcf\x0c\xc1\x26\xfa\x46\x9b\xda\xbe\x8b\x5e\xcf\xe0\x16\x48\xb0\xe3\x4c\xba\x03\x6c\x66\x20\xf2\xa7\x56\x0e\x68\x8f\x07\x0e\x37\x64\xf5\x1e\x63\x90\x8a\x46\x37\x82\xd1\xe6\xf9\xbb\x69\x86\x9e\xdb\x5a\xae\x3d\x3a\x47\xc7\x22\xc4\x9e\x77\x10\x4d\xe5\x43\x25\x70\x71\x76\xaf\x1b\xe0\x1a\x46\xb4\xc9\x65\x0c\xc8\x4e\x37\x89\x2e\x40\x84\x49\x37\x02\xe4\x60\x50\xff\x78\xad\x0d\xa0\x6b\xbc\x17\x4d\x6c\xf4\x36\x27\x2c\x11\xad\xd1\xea\xcf\x0c\x33\x0c\xc3\x96\x28\x6d\x2e\x7d\xf4\xf9\x4b\x26\xaa\x01\x36\xb3\x3c\x0e\xca\xf7\xb0\x9d\x29\x31\xc0\xee\xef\x2e\x46\xbe\xcf\xfd\xb7\x1f\xc5\x71\x3a\xac\x53\xc2\x30\x7c\x54\x39\x0e\xc6\x7f\x6b\x10\x64\x00\x10\x1b\x12\x6d\xcb\x2e\x3f\x93\x5b\xbd\xa5\x83\x4d\x3c\xe9\x48\x84\x67\x93\xf1\xe6\x12\x73\xdf\x80\x53\x19\xf5\x16\x6d\xae\x71\xbf\x57\xe7\x14\x1d\xf4\x4e\xcf\x75\x5d\x0d\x45\x0e\xbd\x64\x41\xcf\x82\x1d\xba\xae\xeb\xfe\xb1\x78\xfb\x12\xd8\x6d\xa1\xab\x45\x05\x8b\x9c\xba\x37\x23\xba\x9a\xc2\x30\x34\xbe\xad\x2d\x72\x46\x4c\x82\x68\x02\x36\x9d\x99\x1f\x8a\xc0\xab\xbf\x96\xee\x0b\x55\xf8\x81\x39\xc3\xd6\xc7\xa5\x71\x5f\x54\xb5\xdf\xa7\x7e\xab\x37\x7f\x1c\xdd\xfb\x68\x9a\xa5\x6d\xe4\xd1\xb3\x42\xb7\x75\xdd\x04\xef\xb2\x18\x45\x7a\x98\xcf\x2c\xd8\x67\x1e\x99\x67\x1a\x99\x7c\xad\xa2\xd6\x08\x47\xf6\x89\x5e\xe6\x2b\x96\xde\xe2\x28\x00\xb0\x46\xc4\x81\xf0\x62\xe0\xe4\x2c\x91\x40\x64\x9b\xbd\xb6\xcc\x1c\x55\x0a\xb4\x7f\x9a\x57\xcf\x42\xf8\x10\xfe\x0e\xfa\x95\x21\x69\xb5\x7e\x96\xf8\x29\xef\x79\x06\x5d\x4d\x83\xd4\x83\x4a\xd6\x37\x41\xb7\x36\x84\x27\xe3\xdf\x6d\x2f\xd8\x51\xcc\x7c\x11\x04\x31\x1a\x79\xa7\x10\x7b\x70\xae\x6d\x7a\x00\x96\x92\x7f\xf0\x17\x19\xa5\x8b\xfd\x53\x5f\xd3\x3c\x4b\x79\x79\x80\x56\x30\x39\xff\x0b\x8a\x07\xc7\x59\x92\x84\x9e\x47\x02\x02\xc5\x89\x20\xdc\x87\x88\x46\x00\x9a\xee\xb1\x89\xf8\xca\xd7\x3e\xb2\x1c\xb9\x4f\x89\x1d\x59\x11\x7f\x54\x8d\x5f\x2f\xac\x95\xa8\xd9\xf9\xd8\xaa\x6e\x17\xee\x11\xe3\xcb\x4d\xc3\x37\x40\xe0\x1e\x8e\x94\xcc\xab\x38\x7f\xf7\x47\xde\xea\xea\xc7\x63\x03\x3c\xcf\xe7\xfb\xfe\x7d\x52\x0c\x39\x79\x1f\xed\xc6\x3e\x6f\xfb\x66\x30\xa8\x9f\x4b\xc3\x4d\x89\xb1\x62\xf9\x5d\x62\xdd\xb5\xe4\x24\x62\x54\xf7\x7d\x63\x3b\xf5\xfc\xec\xfb\xd6\xf3\xef\xbe\x78\x28\xc2\x43\xff\x8d\x61\xed\xb3\xed\xfa\x87\xee\x59\xd8\xe3\x3a\xff\x3a\x32\xae\xb6\xc1\xe5\x80\x49\x6e\x99\x04\x4d\x75\x10\xe4\xb4\xfc\xd6\x93\x3b\x02\xc0\x50\xa4\x86\x21\x9c\x21\xf8\xb6\x0d\xdb\x09\x42\x4f\xa2\xad\x33\xc4\xe9\x62\x79\xd4\x16\x2a\x0a\xd6\x0a\x94\xfb\xa1\x7f\x67\x9f\xef\xd8\xd6\x74\x94\x63\x18\x90\x5e\x2f\x35\xd3\xfd\x79\xa1\xdc\x34\x1b\x73\xb9\x9d\x7a\x58\x90\x52\x6e\x18\xef\x25\xbd\x97\xb2\x83\x4e\x92\x65\xd7\x11\x84\x5c\x0a\xa3\x54\xc2\x40\x94\x00\x04\xf4\x01\xa2\x94\xa5\x89\x7d\x1d\x2a\x20\x8d\x3c\xe0\xe4\xe1\xb2\x7e\xb1\x6b\x35\xbd\x98\x09\xa8\xf0\x43\xa3\x18\x3b\x14\x7b\xec\x33\x14\xad\x7d\x6a\x57\x61\xd0\x52\x35\xca\xfe\xbe\x51\xeb\x15\x72\x27\x42\xe4\xef\xa1\x7d\x0b\xc4\x3d\x90\xfe\x97\xb6\xad\x56\xbe\xa9\x68\xf5\x0e\x80\x0a\x9c\x7d\x55\x5a\xba\x6f\x32\x3d\x4d\xc3\xfc\xff\xbf\x7f\x33\x70\x1c\x67\xb0\x2c\xc6\x78\x61\xe7\x4f\x89\xa2\x83\x6e\x9c\x2a\x76\x72\x9a\x86\x6d\x58\xa0\x96\xf5\xe0\xbe\x44\x67\xbe\x53\x52\xe7\xbe\x0a\xc1\x50\x9c\xe0\x0c\xaa\x95\x32\x19\x05\x41\x10\xea\xa6\x89\x9f\x53\xd4\xdf\xbb\xeb\xa6\x56\x16\x7e\xa7\x7f\xfd\x42\x85\x80\xc9\x03\x86\xd9\xab\x42\x7a\x77\xbc\x60\xbf\xbd\x0a\x02\x40\x1e\x29\xe5\x76\xc9\x98\x65\xf3\xe8\x03\x2b\xd5\x1b\x27\x25\x96\x5e\xf7\xe3\x8d\x88\x35\x4c\x16\x9c\x99\x3f\x55\xfb\xfd\x3d\x58\xd0\x2c\x68\x04\xef\xef\xc7\x4e\x55\x04\xd9\xde\x4c\xad\x9c\xdf\xc7\x11\xfc\x62\xc0\x8f\x8a\x5f\x75\x41\x73\x02\x28\x70\xd5\xee\x29\xf2\xad\x1a\xe1\x2d\x86\x55\xcb\xbe\xfc\xbe\xc7\x56\x17\x67\x20\xf6\x12\x5e\xc4\xde\xe1\x59\x62\x08\xf5\xf8\x16\x9c\xf1\x21\x3d\x65\x0a\xb0\x89\x71\x76\xd2\x29\x1e\x90\x46\x49\x94\xf8\xf3\x8f\x15\xfa\xcb\x51\x8d\x5f\x3f\xcf\x93\xf0\xe3\xf9\x8f\xfd\xbc\x13\x64\x4b\x04\x76\xec\x0d\x01\x98\xac\x8d\x3e\xed\x1c\x33\xe2\xdd\x60\xb8\xf1\x0c\xb3\xe1\x20\x0e\x16\xee\x85\xfd\x20\x75\x0e\x53\x05\xc7\x99\x7a\xe7\x71\xb0\x88\xc6\x39\x30\xce\xcc\xe7\xc6\x6c\xb4\xd2\x42\xc0\x27\xae\x10\x59\xea\x2f\xe7\x6d\x68\x71\x48\x21\x42\x39\xae\xeb\x8a\x07\xa0\xb1\x76\x67\xaa\x4f\xa8\xcd\x71\x46\x30\x64\x97\x83\xe0\xa0\xef\x8f\x3a\xb5\x39\x73\x3e\x6e\x24\x1c\x8d\x0e\x89\x3c\xef\xc6\x93\x24\xcb\x69\x09\x04\x2a\x27\x16\xc4\x04\x89\x01\xb2\xc8\x5b\x75\x84\xcb\xfb\xd5\xda\x3b\xbc\x85\xad\xb6\x43\x10\x84\x55\x1f\xdf\x14\xc2\xd8\xf7\x7d\x62\x87\x69\x30\xee\xe6\x8e\x72\x8f\x1b\x22\xec\x2b\x1f\xb5\x37\x76\x25\xe5\x4b\x32\x75\x06\x92\x3e\x7e\x2f\x8c\xee\x1c\xbd\x89\xfa\x39\x18\x6f\x82\x42\x58\xed\x59\x8a\x07\x9c\x56\x77\x20\xf2\xcd\xe6\xec\x29\xb3\x91\xc9\x68\x7e\x67\xb5\xe2\xbc\xb3\xbb\xe7\xcf\xee\x75\x12\xfa\x8b\x33\x76\xf0\xaa\x7e\xa0\x14\xda\x9f\xb7\xe7\x92\xa1\x6e\x0a\xf0\x46\xbe\xf6\x2b\x18\xcd\x88\xea\x42\xd2\x20\x80\xf1\x42\x9c\x57\xf9\x96\x9d\xf3\x8d\x01\xaf\xef\x77\xe6\x43\xcc\x39\xce\xfb\x77\x12\xd1\x08\xb5\x4d\xf8\xda\xe6\x53\x2d\xfc\xf6\x54\x5f\x6f\x56\x7a\x1b\x04\xf6\x69\x91\x8f\x51\xe4\xbb\xa0\x84\x44\xbc\x93\x5f\xde\x18\xeb\x37\x62\xec\x0c\x87\x20\xb4\x71\x22\xba\x13\x74\x7c\x37\x4e\x21\xc8\x34\x00\x51\xaf\x72\x9c\xb1\xfe\x24\x2c\x02\x16\xde\xe8\x95\x41\x35\xf0\x55\xc8\xf6\x8d\xa7\x21\xf6\x61\xdb\x47\xff\xbb\xd7\xf4\xe6\x0f\x1c\xbf\x12\x24\x9f\xb3\x2e\xfa\x06\x39\xa4\x13\x90\x41\x85\x44\x1e\x8d\xa2\x72\xe2\xc5\x0e\x1a\x93\xa3\x15\xde\xd1\xd9\xc9\xe6\xef\x9c\x30\x76\x2e\xe1\x4a\x55\xfe\x5a\x3d\x0d\x86\xfb\xbe\x19\xe2\x1c\xa3\xf5\x59\x02\x9f\x01\x46\x9a\xa7\x29\x6f\x85\x72\xf0\x18\xbf\xd2\x6f\x2e\x7a\x95\x53\xd4\x0d\x08\x54\xdf\x06\x2f\x80\x44\xa2\x20\x9d\x2d\xbb\x37\xa7\x66\x1e\x3e\x74\xae\x00\x39\xcb\x2a\x94\x58\x0a\x2f\xf6\xa1\xc6\xfc\x39\x43\x70\x33\x04\x2a\x96\x59\x38\x6f\xab\x61\xec\x3e\xda\x4e\x44\xcc\x71\xf4\x36\x54\x45\x8e\xc0\x32\xe5\x86\x8c\x93\x1d\x8e\x6a\x14\x70\xb5\x11\xfc\x3e\xff\x79\x1e\x51\x1f\xa7\xe8\xd4\x25\x53\x00\xa0\x50\x05\xc1\xf0\x19\xb6\x7c\x99\xd2\x6c\xa1\xab\x2d\xf2\x0f\xa4\xa1\xd3\x6d\x83\x82\x81\x92\xcd\x7b\xb4\x2a\x87\x97\x4f\x4f\x05\xcf\x2f\x86\xa5\xbd\x43\x5b\x8e\xf1\x69\x16\xd8\xf4\xf9\x3e\xdd\x86\xbf\xe7\xea\xce\x7c\x53\xe2\x3e\x1e\x7f\x7e\x7e\x3c\xaf\x62\xfe\xab\xbd\x78\x28\x4f\x62\x65\x72\x94\x7b\x60\x9a\x8d\x1e\x41\xbe\x32\xd9\xae\x50\x1f\x31\x55\x2c\x02\xc5\xca\x87\x2c\x26\xba\x00\x18\xe9\x7b\x20\xbd\x53\xb5\xb2\xe3\x56\x92\xa9\x4b\x8f\x9b\x15\xf0\xb5\x8d\x8c\x13\x55\xb1\x53\xfa\xc8\xa3\x49\x21\x94\xc6\x67\x05\x99\xa9\x06\xb0\x5c\x17\x3b\x91\xdb\x76\x7d\xf5\x70\x06\xed\x07\xa3\x63\xdf\x7f\x3c\xf3\x5e\x4e\xc6\xdd\x48\x9f\x01\xe4\x66\xed\x66\x34\x78\xc1\x9f\x79\x36\x43\x34\xc1\xa0\xe5\x49\x9e\xd5\xa2\xe8\x88\x78\x05\xe0\xea\xb2\x5c\x54\xca\xc8\x18\xa7\x96\x88\xd3\x5b\x6c\xbb\xcf\xf3\x27\x77\x22\x27\x24\x13\xbe\x1c\x70\xa3\xe4\x56\x26\x23\xf2\x7c\x04\x79\x30\x31\xc5\x55\x72\xbf\xf5\xe8\xf9\xd1\xec\xf7\xc9\x42\x02\x22\x6f\x71\xff\xd4\x1c\x40\xd1\xca\x55\xf2\x2d\xb3\x05\x22\xa3\x7c\xf5\x7c\xbd\x13\x14\xf6\x02\x37\xaf\x1b\xee\x04\xf3\x47\x6a\x76\x1b\xc1\x71\x04\x32\x9a\xa4\xc8\x8e\x7b\xd6\x01\x6e\x10\x00\xd4\x4f\x3c\xec\xaa\xf5\xe1\xef\xd3\x74\xb1\xbb\x9e\x23\x37\xce\x10\x09\xb9\x0c\xcf\x6b\x2f\xb2\xdf\x79\xd7\x09\xf3\x18\x23\xf2\xac\x16\xe8\xdc\x8e\x32\xd5\xf0\x5d\xa1\x04\x88\x2d\x18\x58\x18\x7b\xec\xb2\x7c\xde\xce\xdf\x9a\xaa\x0b\x77\x2b\xcb\xdf\xf3\x3e\x33\x7f\x8e\x99\x9e\x92\xa6\xdd\xb5\xaa\x4e\x26\xd7\xfa\x24\x49\x96\x45\xda\x82\xf5\x87\x6f\x87\x5c\xb4\x4f\x13\x3a\x80\xcc\xf6\xfc\x8a\xcf\xf0\x2b\x34\xbf\x9e\xfd\x15\xbf\xef\x1e\x58\xb2\x1c\x91\x7b\x33\xef\x5d\xd7\xf5\x5d\x8e\x00\x43\x41\xaf\xfe\x8c\x8b\x1e\xc7\x05\x3f\xf1\x55\x7c\x0f\x68\xd8\xef\xd3\x7a\x0d\x3a\xb6\x72\x76\xfe\x83\x0b\x56\xb8\x6a\x72\x70\x35\x34\x18\xe6\x99\x61\x70\x74\x8b\xfc\x8a\xf0\x4a\x04\xb9\x7a\x40\x03\xfe\x9c\x5c\xf5\xa9\x0e\xde\xaf\xa7\x10\x5f\xec\x3b\x50\xa6\xb1\x4c\x73\x79\x8d\xcd\x1f\xac\x0a\x12\xf4\x27\x45\x29\x6a\xa9\x95\xb5\x44\x67\x05\x0a\x58\x46\xcf\xa5\x50\x11\x8b\x19\x89\x2e\xc9\xa3\x92\x7d\x46\x1c\xba\x56\xdd\x24\x86\xf4\x2c\xec\xa7\xbf\x82\x9a\xf5\x6f\x36\x20\x12\x4c\xa9\x9c\x55\x73\x8a\x9f\x77\xc2\x56\x21\x9b\xe9\xfd\xd5\xc8\xef\x8c\xf0\x26\x8c\x20\x18\xf7\xa4\x82\xc1\xb5\x1f\xb6\xcc\xff\xed\xa1\xbd\x9f\xa8\x41\x80\x6b\x32\x51\x26\x3d\xfd\x99\x56\x31\xcd\xf6\xd3\x0d\x88\xb5\x41\xe1\x6a\xf5\x69\x9d\xae\x21\x7a\xa4\x74\xb0\x78\xed\xc8\x99\x51\x57\x84\xde\x15\x7f\xeb\x26\xc1\xa2\x43\xd9\x80\xed\x19\x26\xe0\x4b\x0e\xb1\xbf\x54\x77\x9e\xec\x9a\x46\x5f\x08\xd3\xbe\x13\x4c\x99\xe3\xb2\x09\x79\x41\x20\x5a\x34\xe9\xe5\xc9\x7b\xe8\xe4\x2f\x26\x37\xf5\xa7\xfa\x2f\xa7\xfb\xf6\x11\x33\x73\x37\xb8\x68\x91\xcf\x0a\xd6\x89\x0a\x0d\xdb\x00\x7c\x08\x04\x03\x27\xca\x99\xbb\x60\xe4\xd5\x3a\x60\x01\xb2\xd6\xb5\x78\xe9\xf5\xd3\x07\x2d\xdf\x80\x83\xa3\x21\x73\x57\x93\xfb\xdd\xc7\x33\x9b\xe6\x29\xa3\xe1\xaa\xea\x61\xd0\x27\x0b\x6d\xf4\xb7\x50\x22\x15\xb6\xf4\xb5\xbf\x72\x19\x1e\x6b\x04\x4a\x0e\x5b\x68\x2b\x61\x10\x10\x75\xbe\xa6\x79\x92\x18\x28\x4f\x15\xf2\x77\x07\xb2\x3b\x63\x8f\xbf\xba\x38\x51\x7a\xb7\xd1\x3e\x5f\xcc\x4e\xc0\x22\xeb\x27\x5a\x7e\x68\xe2\x08\x55\x68\x7c\xff\xf8\x0b\x44\x78\x3b\xb8\xa6\xf7\xe1\x5f\x4d\xed\x43\x3b\x25\x18\xe1\x26\x1b\x27\x7b\x1d\xe9\xb3\xda\x05\xb2\x89\x72\x95\x0e\x9a\x9d\x45\xe9\x60\x6f\x55\xff\xba\x54\x72\x0e\x3b\x4d\x0f\x0e\x92\xd0\x6d\x0f\x83\x34\x06\xb5\xd4\x77\xdb\xd0\x73\x7b\xd9\xfb\x7c\xbe\x07\x9f\x03\xe5\x10\xef\x95\x60\xaa\xb9\x1a\x57\x5f\x51\x66\xa2\xc1\xda\x75\x0b\x2b\xf2\xf1\x02\xfb\x3f\x1a\xdf\x67\xd7\x38\x47\xc7\x30\x3d\xb1\x0f\xc2\xf3\xb0\x64\x7d\x88\x94\x57\xce\x26\x82\x01\x6b\x0f\x5b\x46\xf4\xa3\xda\x0b\xa3\xdf\x46\x4f\xcc\x34\xae\x08\x75\x45\x0c\xce\x7a\x11\x48\x78\xe7\xac\xd3\x1a\xd6\x38\x51\xee\x0d\x39\xab\x47\x2e\x12\xdd\xeb\xda\x9f\x95\x8d\x1c\xd7\xff\xa3\xea\xba\x76\xdd\xe6\x99\xed\x03\xe9\x42\xbd\x5d\xaa\xf7\xde\x75\xa7\x2e\x59\xbd\x59\xe5\xe9\x0f\x92\x2f\xd9\xf9\x8f\x01\x23\x31\xe0\x4d\x8a\xe6\x70\xca\x9a\xe1\x9a\xef\x79\x88\xe7\x81\x5a\x84\xb8\xfb\xd5\x37\x02\x69\xea\x18\xa6\x84\x14\x31\xa1\xa8\xae\xec\x8c\xb5\x13\x00\x33\xcf\xa6\x8a\x84\x21\xff\xdd\xbb\x95\xb4\x1b\x61\xc0\xf3\x1b\xd1\x7d\xaa\x79\xd8\x45\x7e\x30\xdb\xad\xcb\x5f\x72\xf0\x4b\x17\x4b\x61\x7f\xc5\x65\x85\xe6\x31\xfd\x42\xd8\xfe\x15\xab\xc5\x9d\xd8\x0a\x99\xb3\x63\x72\xd0\xca\xae\x8a\x33\x7c\xc2\x0d\xfc\xe2\xe5\x39\x26\x57\xb0\xd0\xa7\x7a\x03\x26\x00\x02\x08\xba\x85\x1d\x30\xd9\x06\x22\xd3\xb6\x0f\x41\x14\x40\xd7\xfe\x2d\x3c\x95\x6e\x61\xef\xb9\xad\xb8\x5d\xdb\xf7\xb9\xd2\xb1\xf6\x61\xd8\xf7\xc7\x07\x95\x12\xef\x13\x64\xbf\x2c\x87\x7d\x53\xb9\xd4\x7d\x76\xe3\xa0\x0f\x18\x20\x10\x74\x1b\x16\xd1\x05\x35\x51\x62\xa1\x1c\x00\x27\x7b\x02\xe6\x5a\xe2\xf9\xbd\x53\xd7\x02\x3c\x14\xf3\x0b\xfa\x09\xf6\x64\x07\x62\x66\xfe\x83\x12\x37\xf8\xa2\xa2\x5c\x58\x1a\x5e\xa2\xdf\x2f\x08\x21\x1f\xaa\xe6\xb9\xf4\xd9\x72\x3e\x23\xf3\xfa\x8b\xf8\x1e\x7d\xfa\x6b\xe8\xdd\x2e\xac\xe2\xa4\x3a\x4c\xe0\x2b\xdc\x88\x9b\xa6\x83\xc6\x18\xd4\x0f\x07\xe2\x28\x5c\x84\xfc\x05\x01\xbe\x7a\x8a\x51\xc5\x76\xa0\xdf\xf2\x68\x38\x8e\xf3\x9b\x45\xc8\xb1\xa4\xea\x98\x95\x89\x6e\xe7\x1e\xbe\xd4\x35\xaa\x0b\x9f\xf1\x0d\x88\xcf\xd6\xf5\x40\xe5\x94\x22\x00\x41\x09\x2d\x77\x5a\x1d\x6e\x23\x42\xd8\xea\x16\x75\x1f\xc8\xf7\x0f\xe9\xfc\x9e\xf5\xa6\x69\x0f\x31\x71\x20\x48\x4d\xfe\x9b\x02\xd4\x31\x4d\x2d\x74\xd0\x1d\x44\x77\x54\xec\xa1\x47\x8e\xac\x25\x88\xb3\xb2\x21\x4f\xca\xbf\xb8\xc8\x5e\x45\x87\x50\x71\x92\xbc\xcf\x7a\xe4\x3e\x00\x49\x47\xbf\x1c\xcf\x81\x20\xc8\xba\x50\x51\x9f\x88\xdc\x4a\xef\xb8\xfc\xad\xc8\xe4\xac\xce\xe1\x4d\xcf\x0f\x14\x76\x6e\xda\x6f\xe0\x0c\x9c\xd5\x06\xca\xc6\xb8\xe2\xd0\x7b\xd3\x05\x05\xac\x77\x3e\xc9\x52\x00\xcb\xed\x17\x88\xc6\x11\xce\xf6\xd9\x44\xb5\x13\xef\xb2\x69\x22\xcf\xf3\xb7\x9f\x39\xef\xd7\xc8\xff\x3a\x33\x20\x5d\x0c\xf6\xaa\x6d\xda\x97\xe1\xa7\xad\x21\x7f\xf8\x7b\x8d\x6b\x95\x2f\x10\x7c\x1b\x74\x9f\xd8\xb2\x15\xf3\x4d\x4c\x48\x39\x27\xcd\xb0\x02\x69\x50\x87\x87\x5b\x00\xbf\x29\x72\x12\x50\x65\xc9\xbf\xce\x71\x7f\x88\x28\xa4\xe7\xa1\xff\xa5\xd6\xe2\xa8\x0d\xb8\x86\xd3\xd1\xb6\x36\x04\x20\xcc\x14\x0a\xb7\x4e\x94\x32\xe8\xab\xf3\x7e\xb9\x67\xd2\xb2\xe3\xcc\x0d\x9e\x50\xd3\x53\xe7\xb2\x6d\x24\x80\x27\x50\x41\x4c\xae\xd8\xda\xc6\x4e\x58\x1f\x12\xef\x12\xe0\x2e\xee\x0b\xd3\xff\x71\x09\x2e\x4b\x69\x2f\xf1\x4b\x92\xa0\x8e\xd3\x00\xa0\xde\x41\xba\x68\x27\x8d\x9c\x19\x41\x00\xc0\xa1\xea\xf0\x22\xf2\x53\x2c\x43\x84\x2e\xc4\x26\x42\x55\x52\x3b\x47\x29\xff\xd4\xdf\x5a\x1f\x8e\x73\xbe\x85\x3b\xbe\xf6\x12\xdd\x68\x30\x41\xb9\xf7\xa4\xde\x31\x28\x61\x04\xc0\xb6\x55\x6a\x9b\xd0\xa4\xfd\xee\x8a\x2a\x10\x31\xf5\xf2\x44\x72\x88\x1a\x03\x4c\x31\x9e\xca\xbe\xe3\xa9\x2e\x81\x87\x20\x71\x49\x1d\xd9\xb7\xf9\xfe\xf3\x97\x86\x03\xcd\x89\xcf\x17\xa5\x9a\x9e\x1a\x0b\x5b\xee\x3b\x71\x3e\x55\xe3\xe5\xec\x1b\xb5\x0f\xb3\x02\x68\x80\x47\xbf\x5a\x55\xd1\xb8\xbe\x9e\xf2\x7b\x00\xa0\x1a\xc6\xf1\xe7\x5b\xa2\x0f\x18\x5c\xec\x46\x41\x30\x80\xa5\x8b\xd1\x8f\x9e\x61\x48\x29\x71\x00\x66\xae\x9b\xd8\x0c\xc2\x67\xac\x9f\x04\x40\x03\x1b\xac\x0f\xdd\xb5\xe0\xcf\xe1\x99\xec\x56\x1e\x5f\xd4\xd4\xc3\x13\x55\x5d\x45\x2d\x82\x7c\xa0\xeb\x35\x1b\xf1\x7d\xf0\xf2\x85\x02\xb3\xd0\xfc\x04\x47\x6a\x29\x1c\xfa\x53\xab\xa4\x7e\xec\xa7\x24\x80\x29\xff\x22\xaf\x0d\xe1\x23\x85\x22\xf4\x37\x95\x94\x51\x6a\xdd\xb3\x85\xac\x8e\xa0\x71\x55\xd7\x61\x18\x1c\xcb\xed\xab\x67\xe3\xe6\x11\xb5\x9b\x03\x60\xfa\x45\xf2\x05\xce\xa5\xbb\x78\x59\xb9\x3a\x23\x92\x44\xd6\x68\xf2\x45\xd9\xa8\xc8\xb2\xa8\x10\xbd\x30\x05\x1a\xcc\x23\xfc\xac\x4e\x20\x0c\xd1\x14\x2e\x83\xd5\x0b\xc5\x7c\x02\x2d\xbf\xc7\xd5\xec\x40\xc0\x7c\x47\xd3\x43\x5b\x92\x20\x8b\x84\x50\x9c\x6d\x39\xe6\x63\x36\x08\x73\x74\x69\x1b\xe8\x14\x31\x5a\x65\x0c\xfc\x27\x4f\x63\xa8\xc3\x65\xd9\x2f\xb5\xec\xe8\x42\x4b\x40\xa7\x89\x68\xec\xe3\xe8\xca\x5d\x19\x3f\x37\x70\x38\x9c\x11\x9c\xc1\xd9\x37\x46\xe9\xd4\x41\x61\xfa\x29\x68\x5d\x63\xfd\xec\x88\xbf\x36\xfd\x29\xd1\xd6\x30\xdb\xbe\xa8\x97\x12\xf2\x41\x5a\x49\x27\x51\x3a\x24\x4a\x9e\x2b\x39\x43\x41\x28\xcf\x36\x3f\x2e\xb2\x50\x0c\xc3\x78\x92\x71\x3c\xfa\xc6\x9b\x9e\xa7\xb9\x16\xd2\xd7\x79\x12\xf5\xa6\xa1\x25\x7d\x22\x64\x52\xd9\x32\x1b\x3e\xca\xba\xd9\x44\x7c\xf8\x61\x38\xe0\x68\x1c\xc7\x83\x59\xd1\x1c\xc2\x54\xff\xf4\x5a\xd6\x11\xe0\xaa\x8f\x30\x90\x8e\xcb\x02\x00\x24\x65\xf3\xb6\x6b\xfb\x10\xa0\x5b\xd5\x4b\x00\xdb\xa2\xc3\x2b\xb4\x3d\x25\x73\x8d\x14\x54\xd9\x28\xde\xc1\x00\x59\xb6\x18\x55\xbc\x51\xf6\x2e\xed\xe7\x7d\xbd\x2f\x1a\x97\xf0\x1d\x59\xdd\x4a\x1c\x2a\x98\x93\xf8\xa2\x5b\xce\x4b\x84\x63\xbb\x9e\xdb\x1a\x7a\xdf\x78\xcb\x4f\x4c\x65\xc1\x46\x72\x56\xb1\x3c\xc1\xf4\x44\xbf\xe8\xd4\x0d\xae\xfe\xb6\x70\x19\x07\xb0\x59\xd5\x38\x6e\x48\xeb\x50\x1c\xb1\x6e\xd1\xa3\xec\xc8\xd6\x4f\x7f\x21\x9e\xf3\x99\x17\x91\xf9\x16\x27\x6e\xc4\x7d\x2b\xb9\xc5\x6a\xf1\x2d\x96\x07\x58\xd7\x6d\x79\xf0\x64\x86\x57\x38\x83\x8f\x3c\xd6\xf6\xf1\x45\x4e\xb7\xa9\x24\x7f\xb8\x66\xa9\xfe\xe5\x73\x09\xa8\x3e\x80\x49\x9c\xc0\x74\x54\xa0\xa0\xa1\x2d\x6a\x12\x94\x58\x86\xfb\xb9\x0f\x96\x15\xd4\xab\x5f\x49\xdc\xb2\x75\xfe\x46\xdb\x81\xc0\xc5\x1c\x8a\x6e\x10\xa6\x5f\x3a\xed\xf7\xa9\xef\x3d\x53\xa6\x6f\x0a\x2b\x2c\x7f\x36\x22\xf1\xcb\x06\xa3\x77\x48\xbe\xda\xf7\x5c\x43\x6d\x2f\xb8\x95\xe1\x8a\xc6\x7c\x69\x3a\xf2\xfb\x5e\xa1\xf3\xc8\x85\xf9\xc3\x1d\xc4\x5d\xe3\x42\x24\x48\x8e\x62\x70\x45\xc9\x75\xca\x15\xa3\x3a\x67\x72\x7b\xf9\x60\x5e\xd6\xed\xae\xd1\xc9\x77\x7a\xc3\x67\x89\x8e\xcd\x43\x46\x8e\x5a\x64\x17\x2a\x6c\x9a\xbd\x28\xc8\x34\x5e\xc3\x17\x17\x74\xb2\x97\x6e\x4b\x3f\x07\x01\x56\x0a\x90\x27\x7d\x3b\x0d\x04\x7e\x9c\x1c\x15\x1f\x4b\xed\x8f\x71\xae\xf5\x49\x19\x6b\x43\x76\x44\xdb\x97\x27\xc1\x2a\xc9\x2a\x6f\x0d\xad\x69\x83\x10\x1b\xe3\x59\x0c\xab\xac\x27\xcd\x5a\x50\x7c\x23\x0d\x8d\xf0\x33\x1d\x3f\x54\x49\x36\x19\x78\x9f\x26\x84\xa2\x79\xd7\xb2\x16\x69\x33\x49\xf8\x53\xdf\x62\x28\xe5\x6e\x7e\xbf\x14\xf7\x50\xa2\x43\x88\x28\x58\x6c\x29\x26\xbd\x20\x51\x5a\x23\x61\x87\xed\x41\x7f\x33\xe4\x37\x0b\xcb\x4c\x0c\x68\x88\x7d\x43\xbc\xbd\xf1\xc1\x50\x7b\xa0\x22\x79\x98\x20\xcd\xee\xc6\xa8\x5f\xfe\x4c\xaa\x63\xb2\xe7\x6d\x65\x0f\x22\x35\xbb\x5f\x5c\xd8\xad\xa0\x6f\xba\x70\x06\x54\x72\x3b\xac\x39\xe1\x66\xc8\xba\x0e\x68\x08\x95\x1c\x9e\xc3\xc7\x5b\xa8\xb2\xaa\x59\x2f\x76\x17\xd5\x9b\x50\x95\x2d\x7f\xc0\x1a\x99\xfc\x8d\xcc\x9c\x9a\x0b\x5a\x6f\x38\xf2\xd8\xc4\x62\x8e\x22\xe0\x12\xd1\xcf\x13\xcb\x2c\x78\x85\x5b\xb7\xf9\xa9\xfb\x34\x98\x02\x3d\x16\x10\x33\x3a\x37\x01\x6d\x0a\x1b\x1b\x08\x39\xea\x38\xc3\x96\x80\xc0\xde\x6c\x5c\xb6\x08\x5e\xf2\xff\xda\x4c\xf6\x89\xbb\x7f\x55\x41\x39\xe4\xda\x46\x39\xe2\x0b\x82\x01\x51\x5a\x93\xbf\xa0\x56\xca\x2b\x43\xd9\x67\x47\xd6\x7e\x30\x44\x0b\x73\x97\x3d\x42\x4b\x92\x41\xa4\x39\x51\xd5\x37\xb5\x27\x07\x3e\x66\x80\xe4\x16\x20\x6b\x79\xcd\xa2\xbf\x4b\xcd\x84\x41\x70\x42\x2f\x1f\xf1\x3d\x23\xce\x3d\x5c\x8a\x06\xdc\x57\x72\xeb\x61\xf3\x77\x1f\x5d\xf2\x04\xbf\x36\x2a\x3f\x8b\x3e\x9d\x0c\x3d\x1d\xf4\x92\x45\x61\x8f\xfa\xf8\xd7\x16\xfb\xce\x2d\x4b\x00\x0d\xc3\x30\x0a\x75\xee\x64\x7e\x38\x6f\x24\x46\xa3\x80\x71\x2f\x66\x3c\xd0\x8e\x38\x87\xef\xd3\xbe\x8b\x17\xa3\xc0\x85\x42\x7f\xad\xa1\x0b\x4c\x3e\x38\x23\xd3\xf4\x43\xf8\xd8\x0e\x77\xf3\x33\x4f\xab\x7d\xe1\x36\xfc\x06\xb2\x79\x07\x32\x79\xd0\xf9\x15\x59\x5b\x1f\xd5\xa5\xb2\xda\x22\x32\xf1\xde\xb7\xdb\x9a\x3d\x0a\xb4\x50\xf8\x44\xe7\xec\x01\x50\x93\x6c\xf6\xe7\x82\x4c\x3a\x40\xcc\xcf\x2f\xa5\xe4\x40\x26\x9d\x88\xcc\x25\xad\xb0\x34\xf1\x45\xc1\x37\x94\xcd\x5f\x8f\xc9\x5f\x94\xc5\xb7\xd8\xb9\x51\xa0\x30\x8f\xd9\x1e\xcf\xd5\x87\xac\xf0\xd3\x1f\xa7\x6e\x9a\x0a\x72\x39\xce\x73\xc0\x90\xc5\xe8\x18\xec\x87\xaf\x99\xeb\x46\x94\x9e\x86\xbc\x04\x31\x60\xf2\x17\xcc\x64\x21\xb0\xe0\xa9\x78\xa6\xe1\x30\xfc\xed\xab\x36\x01\xd7\x18\x2d\x7f\x01\xdb\x05\x6c\x74\xbd\x94\xc2\xea\x1d\x3a\x13\xfb\x9c\xe3\xed\x94\xd8\x14\x7c\x82\x19\xef\xfe\x5a\xbf\x8b\x54\x7a\xdc\xfe\x08\xb7\xe9\x47\xe1\x9b\xa6\xb1\x77\x30\xc5\x68\x77\xf4\x0a\x53\x5e\xfa\xbe\x1f\xa5\xa2\x50\xf3\x00\x0a\x38\x0b\x9e\x9a\xfa\xea\xfb\x70\xff\x9a\xa5\x0c\xdd\x40\x61\xb9\x79\xd3\x40\x08\x40\xe9\x0b\xdf\x88\x6d\x6d\x56\xaa\xed\x83\xb6\x37\xdb\xae\x98\x52\xa4\xfc\xcd\xc6\xa6\xfe\x72\xa8\x9a\x42\xba\x01\x7b\xac\x6d\x00\x29\xc7\x75\x5d\xe7\xfc\x97\x1d\xa6\x14\x18\x8a\x1c\x79\xfc\xd7\x67\xc9\x45\x31\x8b\x6b\x9b\xab\xb0\x75\xf0\x93\xbb\x50\x26\xfd\xf2\x0f\xbd\x95\xda\xc3\xf5\x1b\x92\x71\x86\x4f\x1c\x91\xa3\x55\x76\x21\x08\x84\x8c\x44\xba\x74\xab\x9a\x6c\x65\x86\x4f\x99\x47\x6c\xe7\x49\x4c\xfd\x3a\x04\xeb\x10\x28\x8b\xa8\xa8\xac\x7e\x57\x9c\x92\x75\x58\xca\x61\xa1\xb3\xde\x71\x0f\xe3\xb7\x15\x7f\x73\xa9\xab\x07\xce\xb9\xca\x07\x04\x73\x58\x76\xdb\x35\x19\xe4\xb2\x3b\xe8\x6f\xa1\x6f\x75\x99\xc4\x75\x42\xfa\x19\x19\x87\x76\x78\xc6\x66\xd5\xaf\x1e\x5e\x41\x71\x9a\xf3\x59\xfe\x7a\x99\xff\x44\xce\x3a\x04\xf3\x98\x80\x22\xa6\xb3\x99\xee\x72\x9a\xeb\xe9\x3e\x24\x7c\x58\xb5\x5f\x92\x3d\x72\x67\x85\x65\x73\x8a\xf4\xcb\xbd\x12\x69\xb1\x4d\xc1\x97\x1b\x23\xeb\x87\x88\x5d\x77\x1d\xcd\xf6\x29\xee\xa1\x07\x6a\xfa\x26\xf8\xf9\xee\xbe\x4c\x83\xdf\x67\xac\x10\x66\xe4\xc3\x7c\xe1\x6a\xf2\x6d\xad\x72\xc8\x40\xd9\xfa\x94\x84\x3e\xb2\x08\x40\x7d\xee\x89\xad\xb5\x6e\x45\x8e\xb7\xc0\x26\x87\xa0\xd0\xd1\xe5\x36\x10\x6c\xdb\xf3\xec\x89\x66\x7c\xda\xe3\x68\xff\xf0\x04\x3b\xc7\x89\x57\x08\x35\x92\xf0\x9b\xe3\xa0\x89\x96\xdc\xf3\x22\xd1\x1b\x08\x40\x95\x7c\xe4\x56\x20\xec\xea\x88\x9e\xd7\xa6\xb5\x7b\xb0\x3e\x4c\xed\xaf\xe5\x86\xe4\xba\x65\x1b\xe6\xdd\x51\xc1\x63\xfb\xe6\x4b\x9a\xed\x4d\xcc\xfd\xf3\x93\xbb\x60\xd9\x38\xa7\x81\x85\xaa\x25\xb5\xd9\x9e\x00\x8d\xc8\x2f\x21\x7e\x46\x2a\xee\x16\x6d\x27\x6f\x68\x11\xc2\x2e\x77\x33\x94\x97\x77\x09\x69\xb2\x71\x1d\x1a\xfd\xcb\x9d\x5a\xb7\x6f\x08\xfd\xcd\xc5\x20\x58\x1e\x6d\x2f\x09\xbe\x0f\xcc\x98\xf2\xcb\x8e\xa9\xff\xe1\xe8\xc2\x10\x10\x03\x0d\xa0\xbb\x06\x28\xda\xa3\xeb\x15\x72\x1c\x79\x08\x7b\x28\xb6\xa7\xc9\xec\x95\x52\x4f\xa6\xd1\x07\x0d\x82\xc2\xe1\x97\xad\xae\x64\x47\xea\x12\x4e\x44\x32\x38\x5a\xce\x6c\x5c\x8a\x3d\x0a\x37\x46\x83\x1b\x62\x2a\x89\xb7\xbc\x01\xd8\x0d\x47\x70\x7f\x9a\x3f\xb8\x37\x6b\x00\x17\x08\xa9\xac\xa6\x36\x1b\xbd\xfb\x01\x7c\x64\xc8\x91\xcf\x71\x5a\xcf\x1f\x77\xb7\x4a\x01\x97\x50\x23\x22\x80\x4f\xee\x77\x44\xde\xf9\x6b\xca\x86\x0f\x83\x46\x78\x7d\xac\xaf\x4f\x3e\x20\x85\xbd\x8e\xcc\x62\xa5\xe4\x52\xd5\x6c\xfc\x2f\x46\x20\x38\xaf\x41\x5a\xb4\x0a\xee\x11\x51\xc4\x65\x3e\x95\x53\x8a\xd7\x39\x99\xe5\xd6\xa8\xda\xd6\xb3\xec\xd6\xe6\x6d\xda\x1c\x99\x2a\x59\x45\x1f\x0d\x6f\xb8\x58\x60\x45\x37\x62\xdd\x89\xe0\xae\x14\xa0\x80\xb8\xac\x16\x83\x9a\xfd\x7f\x63\x7a\x24\x4d\x12\x61\x5c\x3c\x94\x3b\x48\xc1\xb1\xa5\x48\xb5\xa2\xcd\x47\x47\x69\xbd\xb0\xde\xfa\xb4\xec\x13\x9c\xe6\x6e\x6a\xb5\xa5\xc6\x30\x1d\x1e\x23\xe9\x3e\x73\xa9\xdd\xf3\xad\x4c\x45\xde\x96\xb8\xb9\x64\x5d\xf9\xf0\xd4\xa2\xff\xe9\x57\xe5\xbc\xeb\xe4\x83\xae\xe1\xf7\x57\x52\xac\x5a\x36\x1e\x1b\x42\xc7\xa5\x01\x65\x0d\x59\x26\xd3\x88\xa4\x04\xca\xb2\x38\xa2\x9d\x7a\x8d\x96\x03\x4b\x4d\x6a\xed\x13\xe1\x83\x6c\xba\xd5\x5d\x73\x23\x1f\xbf\xac\x11\xe7\x54\x3e\xcc\xbe\x03\x27\xff\x60\x91\x11\xaf\xa7\xa4\x8d\xf1\x76\x92\x94\xef\x39\x40\x7a\xa8\xc6\x31\xfe\xb1\xba\x5a\xce\x12\xc4\x84\xdd\x96\xbc\xdb\xc0\xda\x0c\x78\xe2\xa4\xf4\x77\x7d\xc9\x2f\xfb\x81\xd0\x0d\xc9\x93\x07\x31\x04\x6b\xcf\xdb\x4e\xb5\xd8\xc6\xec\x0c\xc8\x7f\x22\xd1\x88\x9d\x25\x7f\xa0\xe8\x53\x64\x86\xad\x29\xfb\x03\xc1\xe6\x1b\xc0\xa6\x9f\x2b\x7b\x00\x52\x0f\x8d\xbc\x36\xb2\xde\x67\x60\x9c\x65\x53\x4d\x0c\xb8\x8b\x52\x36\xe3\x6f\xa6\x6e\x11\x7c\xa4\x55\x08\x97\xdf\xc2\xae\xa6\xef\x86\xfb\x3b\x69\xb2\xc4\x4c\x3f\xce\x3f\x59\x56\x94\x45\xc0\x6a\xcb\x4d\x02\x87\x3c\x68\xb8\x84\xca\x29\xc3\x51\x17\x1f\xe7\xf4\x31\xd9\x09\x26\x81\xe1\x34\xac\x32\x79\x6d\xb7\x74\x0b\xf5\xfa\x08\x4c\xfb\xfb\x3b\x31\x47\x7e\x78\x1c\x0d\xe9\xbc\x46\xe4\x88\xd1\x02\x89\xfc\xa9\xc1\x62\x3e\x93\x53\x3c\x5d\xe8\xb5\xf1\x46\x68\xeb\x7a\x6c\x11\x6c\xfa\xce\x1e\x95\xae\x8c\x05\xe2\x35\x5e\xc4\xe1\x8d\x41\x04\xa6\x86\xc0\x62\x55\x3c\xa9\xeb\x3e\x1f\x33\xe6\xc0\x52\x30\x2c\x79\xb4\x04\x7b\xde\xa1\x1f\xfa\xbf\xce\xbc\x7d\x99\x3c\xb8\x13\x0e\x9f\xef\x5f\xdd\xcf\x29\x31\xf7\xcd\x61\xda\x75\x69\x29\x0c\xc3\x70\xaf\xd3\xaf\x33\x7f\x5e\xc3\xc2\x31\x0c\xfd\xc4\x73\x8b\x75\x2b\xf4\xb5\x65\xf8\xe6\xd7\x58\x92\x50\x00\xfc\x12\xe7\xf6\x54\x1b\x2c\xca\xd7\x90\x3f\xa2\x78\xfe\xeb\x8f\xa7\x60\x90\x6c\x5c\x2b\xb0\x77\x1f\xc1\xf7\x0f\x3b\x47\xf8\x4f\xe6\x0d\x04\x1c\x7f\xd0\x52\x16\xb6\x5c\xac\x40\xa8\xac\x2b\xc8\x4f\xfc\x0d\xbf\x24\xed\x8e\x58\x57\xcc\x22\x7c\xc9\xa2\x65\x1d\xc6\x50\x6c\x6d\xd9\x78\x77\xa2\xb2\x0f\xd4\xfe\x6a\x2c\xb5\x1b\xd7\x1f\x8c\xdd\xb9\x8c\xd7\x20\x0d\xbe\xe9\x61\x95\x1f\xcd\x45\x23\xd1\x08\x1f\x17\xfa\xea\x21\x6a\xd5\x5b\xb8\x38\xa0\x79\x9f\xa0\x73\xd1\x5d\x38\xf5\x52\xb1\xd9\x87\x79\xd5\xba\x95\x38\xf3\x05\x8c\x55\xda\x9b\xf9\xf6\xa2\x36\xe5\xb8\xd7\x5b\xb9\x98\x7f\x1c\xe2\xca\xaf\x73\xcc\xa9\x1f\xe5\xbf\x7d\xce\x2b\xc4\xec\x7a\x8d\x59\xde\x50\x4c\x91\x69\x9c\x73\xfe\xa1\xb5\x1d\x3b\xe2\xcf\x83\x9e\xf8\xe4\x25\xcd\x0e\x8f\x23\x79\x22\x8f\xae\xec\xc8\xf1\xe1\xb0\x98\xfd\x92\xcf\x34\x72\x52\xc4\xfd\xe4\x1e\xe4\xcb\x17\x70\x9b\x57\x08\x9b\xe3\x47\xfa\xfc\x20\x45\x23\x48\xdc\xf8\xf4\x65\x34\xe0\xe4\x57\x8b\x1f\x9d\x32\xc7\x24\xa1\x4e\xd5\xdf\x97\xfc\xed\x5c\x75\x5d\xc7\x45\xac\x9f\x57\xf3\x11\x99\x1f\xab\x37\xf2\x12\x1d\x4f\xfe\x16\xe6\xb3\x4e\xcd\x94\x04\xdf\xf4\x80\x33\x8c\xd1\xf0\xe0\x47\x2e\x1d\xdb\x41\x4f\xfb\x2b\x56\xa9\xc5\xfb\x1e\xc4\x97\x69\x5c\x7e\xe2\xda\x96\xaf\x75\x70\xa5\x65\x4b\x87\x07\x08\x1f\x3d\xfc\x46\x51\x58\xc6\x2a\x34\x7a\x8f\x65\x19\x77\xa4\x4c\xfe\x37\x79\xe2\xa7\x91\x7f\xee\x32\x71\x5a\xfe\x9f\x5d\xa5\xd0\x3b\xe4\xff\xd3\x39\x48\x49\xd7\x3e\x6a\x93\xde\xe5\xbf\x37\x9e\x44\x14\x37\xda\x8f\xc9\x82\x38\xe9\x6f\xa1\x07\xdb\xda\xb8\x22\x87\x1f\xc0\x26\x59\x25\x79\x95\x63\x37\x99\x63\x9e\x2d\x23\x82\x66\x15\x3b\xfb\xfc\xd1\x11\xf3\xbe\xe0\x7c\x0b\x41\x50\xd4\x8f\xde\x2f\x09\x3e\xb7\x95\x00\x63\x0c\x2c\xe0\xfa\x61\xc7\x84\x03\xae\x2a\xe7\xbe\xe1\xda\x06\x68\x9e\x64\x9a\xee\x0e\x61\x58\x86\x65\x7c\xd0\x74\x9d\x64\x6a\x7b\x23\xda\xdd\x93\x30\xed\xb3\xb4\x26\xff\xf5\x25\x35\x2e\x51\xbb\x6a\x72\xfb\x5a\xf6\xf9\x72\x0f\xc3\x28\x1f\x97\x2d\x47\xe8\xef\xc1\x37\x95\x1f\x99\x4b\x65\xe9\x0d\x31\x79\x30\x53\x4f\x2a\xb5\x59\x24\x56\x71\x28\xac\xf3\x45\xf5\x21\xaa\x77\x27\xad\x76\x2e\x24\x43\x7e\x67\xbc\xb6\xdc\x39\x3c\xab\x8a\xef\xe7\x9f\xfa\xaf\x59\xfd\x18\x47\x3c\x51\x32\x47\xc4\x27\x6a\xa6\x38\xad\x0d\xde\x1d\x8a\x89\xb7\xe8\x70\xec\x07\x47\x0e\xa3\x51\x8a\x7d\xe7\xf3\xeb\xed\xd8\xf7\x3a\x04\x40\x79\xa5\xec\x3d\xf2\x0c\x39\xb6\x0c\x19\xeb\xaa\x65\x4b\xf1\x97\x43\x83\x8a\xa2\xa5\x42\x7d\xf2\x57\x67\xce\xe3\x32\xa6\x48\x2a\xb9\x41\x6e\x83\xa2\x22\x32\x69\x5c\xa3\xd4\xb2\x66\xd2\x2e\xd4\xa7\x52\x88\x15\x38\x29\xd4\xd0\x7a\x0f\x50\x84\xf4\xd7\x43\x3f\x8c\xbb\x3c\xa2\x71\xa6\xe2\x70\x66\xd2\x67\x26\xad\xf7\xa9\xf4\x7b\x04\xe1\xde\x92\xdf\x49\x63\x27\x2d\x30\xbf\x3f\x7d\x71\x5d\x99\xc7\x9f\x43\x70\x81\x93\x4c\x33\x02\xd8\x56\xbf\x2c\x6b\xc9\x36\x02\x0f\xe9\xbf\x32\x8c\xdc\x98\x44\xdb\x69\xe5\x9a\x2d\x9c\xba\xf7\xa4\x34\x5e\xa8\xc2\x65\x1c\xa2\x34\xa8\xad\xeb\xb9\x79\xa7\x24\x44\x9f\xde\x91\x80\xa6\x4a\xa2\xd4\xb9\xfe\xe5\x33\x45\x41\x59\x24\xb6\x29\x44\x0f\x98\x7b\xc8\x8f\x8e\x35\x1b\x56\x64\x33\xed\xa3\xfc\x58\xa0\x1a\xa5\x79\x4e\x95\x10\xac\x47\x17\x89\xa2\x1f\x57\x2c\x2e\x7f\x01\x09\xe8\xac\xd1\x10\x75\xe1\xb2\xac\x63\x93\xea\x9e\x58\x98\x41\x18\x10\x3e\x37\xde\x16\x03\xcf\xff\xd4\x69\x17\xee\x85\x57\xdb\xd5\x4f\xe0\x3b\x16\xfc\x27\x43\x8e\x25\x8f\xe8\xbd\xb0\xf5\xf5\x6d\x25\xef\x81\xcf\x5f\xff\xf5\xd8\x8a\x73\xdb\x0a\x80\x13\xf3\x0a\xf6\x3c\x22\xd0\x94\x1e\x9f\x77\xcd\x46\xb5\x47\x7d\xef\x04\xa6\x11\x27\x52\xa3\x32\xfd\x41\x11\x71\xa5\xfd\x7b\x7c\x5c\x56\x59\xe4\x99\xf4\xbe\xc3\x4d\xe2\xb8\xad\xe2\xc3\xf3\xae\xf9\x88\x7f\x4d\x00\x69\x80\x49\xb2\xc1\xc4\xb2\x99\x92\xe3\x63\x12\xdd\x96\xc7\x58\x84\x71\xaf\xd0\xa1\x7b\xe2\x87\x8e\x2b\xf4\x20\xa3\xf0\xd4\xd1\x8b\x6e\xb9\x4f\xab\xfe\xab\x6d\x57\x14\x5f\x45\xf7\x77\x0c\x5e\x15\xc4\x1b\x60\x22\x81\x6d\x25\xce\x7b\xcf\x85\xbe\xc4\xa3\x68\x8e\x4a\x12\x3e\x73\x88\x76\xcd\xf6\xc5\x08\xa0\x3c\x97\xa8\x6f\x2f\xcb\x45\x8e\x2d\x42\x0f\xb2\xa5\x8e\x10\xf8\x22\x81\xbb\xd7\x52\x3a\x56\xbc\xcf\x2f\xda\x4f\x6f\x38\x83\x9d\xa3\x10\x02\x8b\x4c\x43\x6b\x99\xac\xba\x79\x61\x42\xb8\x46\xa1\x35\xd4\xbd\x62\x32\xb7\x11\x78\x80\x83\x55\x2e\xf0\x01\xc1\x6d\xe4\x33\x52\x23\x5c\x13\xd6\x3e\x86\xdd\x66\xd2\xb0\x44\x79\xb6\x43\x80\x7d\x0f\x06\x6d\xd7\xb5\x8f\x15\x59\x52\xa9\x3f\x3d\x90\xd8\xe6\xb0\x49\x9f\x21\x45\xec\xab\xdd\x40\x2a\x8c\x1e\x33\xcb\xda\xb6\x92\xe7\xbd\xa7\xd2\x48\xa1\x92\x15\xb1\xd5\x0b\x46\xfb\x14\xd5\x3d\xea\x97\x69\x1d\xb5\x18\xe7\x06\x9b\x1a\x10\xd3\x4e\x44\x43\x59\x8e\x8f\xbf\x82\x71\x88\x60\x20\x00\x4c\x3e\x83\x0d\x7d\x3f\x19\xcc\x92\x94\xff\xe6\xd0\xf5\xb2\x7d\x5a\x15\x3a\xee\x8a\xda\x73\x49\xdd\xce\xf2\x7b\x7a\xa3\x69\x00\x91\x16\x6f\xe7\x1b\x36\x8d\xb1\xd4\x3a\x5c\x6e\x19\x7c\x48\xf7\x6b\x7d\xe1\x61\x2c\x44\x63\x19\x8c\x28\x46\x4f\x16\x88\xf8\xbf\x3a\x35\xd1\x3a\xb5\x2b\xa6\x85\x48\xfb\x47\x87\x54\xd1\x0d\xc2\x29\x85\x4b\xdd\x23\xbe\x3e\x51\x79\x81\xa1\xa2\xb8\x2d\xeb\xef\xa5\x28\x15\xde\xf4\x6c\x95\xa3\x15\xfd\xcd\xac\x5f\xef\xfd\x5c\xc7\x62\xc0\x26\x7f\x79\x83\x32\x85\x7d\xa6\x8b\xa4\x7f\xe3\x7e\x3e\x2a\x84\x7b\x2d\x3f\x54\x1b\x59\x7d\x12\x78\x20\x2b\xac\x96\x31\x22\xc8\xd4\x56\x55\x11\x11\x23\xe1\x09\x61\x75\xaa\xed\xb7\x82\xaa\x63\xc1\x6a\x7d\x01\xf1\x16\xde\x46\xf7\xf0\x3a\xc5\x2b\xba\x23\xee\xb9\x2b\xf9\x6e\x0a\xa5\x21\x66\xc3\xfb\xb9\x87\xf3\x51\x4d\x45\x65\x32\x35\x44\x89\xfc\x01\xc2\x35\xff\xb5\xe9\xd6\xb3\xaf\x4c\xab\x08\x3b\xfd\x98\x4a\x81\xfa\x93\x23\x62\x18\x8e\x02\x23\x80\xd2\x71\x6a\xb8\xd0\x10\xbc\xc1\x1a\xe1\xd5\x2f\x51\x22\x4f\xbc\x76\xd9\xea\x73\x64\x9b\xc6\x43\x3f\xb1\x8d\xf2\xfa\x08\xba\xaf\x3d\x05\x7b\x2d\x59\xbd\xc8\x8d\x98\x9f\x04\xd5\x07\x02\x98\xd7\x6c\x0c\xd3\x24\xe2\x38\x8e\x08\x7b\xbf\x28\xa3\x29\xae\xc0\x67\x8d\xa0\x01\x03\xce\x59\xf1\xc6\x54\x1a\xb3\x3a\x86\xf9\xe2\x8c\x37\xa2\xfb\xe6\x30\x1b\x0e\x89\x46\xa1\xe5\x07\xab\x78\x26\x98\x1b\x6c\x56\x7f\xf2\x71\xfc\x67\xb2\xda\x81\x04\x24\x51\x53\x16\x2f\x84\x2b\xb4\xb6\xa7\x47\x0d\xce\x68\x9f\xc4\xb0\x90\xbf\x44\x04\x65\xfc\x77\xab\x41\xe0\x39\xe4\x81\xaa\x3c\xe9\x1b\xdc\x22\xfe\x6b\x4f\xfd\x00\x35\x11\xfa\xab\xaf\x04\x32\xce\x83\xbe\x7d\x9b\xb4\xbe\xfa\x01\x95\x02\xd1\x24\xff\xa0\xc0\x1c\xcf\x2b\x80\xe1\x76\xaa\x4b\x7c\xf9\xf3\x4b\x1c\xf9\x96\x46\xe1\x90\x22\x65\x1c\xc1\x07\xe9\x13\xf5\x87\x48\xb6\xe4\x50\x61\xa2\x1c\xab\xa3\xaa\x29\x0c\xf5\x83\xa3\x8e\x7b\x63\x5e\x3e\xf3\xb7\xcc\x14\x96\x4d\x8e\xf5\x79\xbf\x39\xca\x93\x60\xe9\xf7\x76\x8f\xc1\x71\x41\xf4\xd4\xe0\xb0\xb8\x63\xd8\xdb\x1f\xee\x03\x9e\x55\x72\x84\xaa\xdd\xa2\x53\x11\x00\x46\x4e\x1a\x59\x42\x59\x7d\x8a\x49\x5d\xb3\x6e\x2d\xfd\xc1\xf0\x67\xf7\xca\xd7\x77\xbf\xe3\xe3\xa8\xec\x78\x1b\x63\x18\xc3\x0e\x7e\xf5\x53\xdf\xea\xb8\xa8\xcb\xc2\x61\x48\x45\xa7\x11\x52\x71\x1c\xa5\xcf\xa0\x2e\x83\xc2\x33\x90\x6d\xbe\x09\x12\x30\x6b\xc4\x7a\x6f\xf2\x1f\xbe\xc4\x74\xaa\x2b\xa9\x73\xa5\x38\x84\xae\x70\x6b\x36\x85\x30\xfd\xeb\xf7\x46\xda\x35\x9b\x32\x12\x75\x5d\x41\x33\x40\x68\x33\x63\x38\x46\xd0\x4b\xc6\x2f\x95\x64\x8f\x79\xee\x92\xae\x7f\x7c\x15\x07\x2d\xf8\xd8\x72\xf3\x08\x95\x56\x16\x81\xa4\x47\x61\x5a\x64\x84\xa1\x01\xd1\xbf\x79\xd2\x0f\xad\xa2\xeb\x0b\xaa\xfd\xfb\xb9\x95\xfe\xf4\x51\x00\x84\x81\xfa\xd1\xdc\x51\x5d\xb3\x49\x25\xaa\xf4\x73\xe2\x9c\x90\x8e\x60\xfc\x25\x47\x1a\xf4\x61\xa9\xba\x23\x93\xac\xf2\x26\xee\x20\xde\x39\x04\xa6\x11\x98\x25\xd2\x9c\x76\x8d\x36\x6c\x54\x9f\x98\xc1\xb0\x29\x23\x35\x26\xd4\x18\xb1\xf8\xd3\xb0\x97\x71\xab\x63\xf8\x46\x1c\x83\x8c\x04\x02\x7c\x57\xe9\xdc\x3e\x58\xc2\x0b\xb8\xed\x63\x21\xa7\x84\x68\xac\x05\xbb\x9b\xfb\x2b\x55\x1d\x91\x07\xb6\xf6\xe7\xc8\x21\x1f\x0e\x5d\xde\x14\xb3\x0d\xae\xea\x37\x0c\x1d\x01\xb0\x6e\x91\xa9\x46\x02\x3d\xbe\xf1\x81\x6e\xa1\xd7\xaf\xac\xd0\x33\x7f\x6c\x22\x9b\x94\xaa\xe5\x07\x94\xd8\x74\xfc\x1c\x40\xe2\x93\x73\x2a\x68\x0f\x10\x31\x2d\x44\xf5\xfb\x8d\x7d\x05\x2c\xf5\x46\x0d\x49\x3f\x1f\xba\x32\xcb\x52\x4c\xdc\x4f\x69\x2f\x5c\xe3\xac\xa0\x8d\x69\x2b\x1e\x23\xd7\x1c\xc7\x25\xb0\x41\xeb\x10\x6c\x55\x9c\x5c\xde\x28\xd0\xb8\x97\xbc\xff\x61\x35\xdc\x4d\x96\x2b\x4d\x53\xbe\x1a\x9f\xeb\xb5\x4a\xda\x8d\x18\xaf\x41\x1b\xaf\xe1\x73\x6b\x38\xb7\xb3\xd4\x3f\x57\x84\xb5\x21\x8b\x48\x17\xf2\xa5\xe9\x3a\x70\x65\xc7\x54\xe0\x55\xd7\x96\x6d\xdb\x88\x75\xdb\x32\xc9\xda\xf9\x1d\xe1\xa9\xff\x7c\x7e\xb1\x2d\xa2\x10\xfd\xca\x77\xd7\xf3\xf3\xd7\xc4\x62\x06\x93\x15\x92\x00\xe6\xf4\xb2\x78\x87\xb6\x7d\x83\xb4\x20\x9b\x2f\x29\xc3\xf9\xb4\x55\x39\x77\xbf\xed\x43\x37\xef\xfa\x52\x8a\x8a\x99\xe0\x17\x81\x06\x14\xf0\x19\x34\xac\x1a\x98\xff\xb8\x35\x0c\xae\x93\x5a\xac\xf2\xbc\xcc\x87\x0d\xe1\xca\xb7\x5a\x26\xeb\x64\x8f\xf3\x19\x92\x98\xd7\x7a\x8d\x47\x70\x7a\x89\x2b\x16\x67\x8d\xc2\x94\xb2\x17\xb0\x26\x51\x8b\xb4\x43\x9f\x4f\x14\x8b\x77\xf0\x51\xd7\x71\x75\x18\x43\x9d\x81\xff\x61\x3c\x3a\x41\xfb\x4b\x3d\x2f\xcd\x97\x37\xcc\x67\xb3\xd3\x9c\x67\x30\xeb\x35\x5e\xe1\x22\x64\xae\x58\x5c\x69\x74\x53\x7a\x5a\xe9\x7a\x03\x6b\xb2\x4e\x15\xa1\x47\xae\xee\xb1\x4a\xbf\xa1\x0c\xfe\x13\xa0\x66\xa8\x68\x23\xfb\x53\xef\xd0\x08\x59\xe8\xad\x95\xdc\x0a\xd5\xe2\xb8\xbf\xf7\x19\xf8\x52\xf6\xbe\xed\xc7\xb0\x6b\xac\x57\x4c\x29\x64\xb6\x81\x96\xfa\x4a\xae\x5d\xb2\x13\xb8\xb5\x8c\xd7\x22\xf1\x7d\x01\x18\x11\xb0\x53\xc7\x4c\x7f\xa9\xbf\xea\x3d\xda\x24\xe7\x34\xfd\x7f\x12\xad\x1c\x53\x21\x73\x89\x26\x26\x4e\xb8\x7e\x43\x1c\xb4\xc9\x5a\xc6\x4b\x98\xb2\x5b\xc8\xba\x77\x83\xed\x0a\x3f\xab\xfd\x90\xc9\x38\xe8\xf5\x4d\x02\x8c\xf1\x9a\x27\xe2\x07\x0a\xe0\x96\x57\x10\x73\xc1\x0d\x75\xea\x03\xad\x7f\xcc\xbf\xba\x7d\x57\x3e\x04\x7b\x25\x73\x33\x36\xbd\x57\xab\xd8\x97\x25\x6c\x18\x51\x53\x69\x86\xa5\x86\x12\x1b\x83\xf5\x8c\x1b\x51\x03\xb6\xd7\xca\x25\x20\x26\x08\xf8\xc2\xd5\x06\x01\x23\x05\x4b\x4e\xe1\x05\x6f\x94\xc8\xec\xfc\x3b\xf7\x3b\xb3\xc5\xa3\xff\x8c\x2b\x76\xde\x5a\xfa\x33\xc2\xb7\xb0\xa0\x4e\x3e\x11\x3b\xc4\x14\x87\x70\x59\x0c\xbd\xd1\x3a\x5a\x1b\xa8\x2c\xb2\x78\x9f\x8e\x75\xc5\x22\xff\x96\xf2\x0c\xc6\x04\x18\x2b\xc6\x26\x85\x5c\xed\xd8\xbc\xca\xd9\x17\xfd\xc4\x72\x30\xff\xb7\x76\xb1\xe9\x9b\xcb\xe4\x55\x0d\x1e\xe7\x68\xc8\x7e\xab\xc0\x28\x1a\xb6\xf3\x74\x3d\xc9\x39\x35\x0c\x94\x31\xd8\x91\x3a\x08\xf1\x54\xa5\xe6\x89\x5a\xc4\x6c\x96\xb0\x21\x60\xec\x15\x2b\x79\xec\x0f\xe1\x7a\x9c\x9f\x60\x17\xf8\xfe\xf8\x3c\xdc\x15\x8d\xeb\x99\xf4\x9e\xab\xe6\x5b\x9d\x93\x71\x1e\xc3\x74\x35\x05\xeb\x74\x6d\x98\xeb\xad\xae\x2f\x3a\x8e\x9c\xe6\x7c\x86\x22\x80\x8d\x9c\x26\x74\x1e\xcf\xab\x8b\x86\xd7\xef\xdb\x0c\xfc\xde\xf8\x23\xd5\xb4\x88\xf5\x7e\xe2\x04\x21\x72\x7d\x2e\x97\xef\xae\x9a\x1b\x62\x5c\xd6\x61\x71\x83\x50\x74\x83\x48\xba\x23\x56\x53\x6f\xf1\xf6\xe4\x79\x14\xaa\x6c\x9e\xc4\x3e\xf7\x4d\xb5\xdc\xc8\x2f\xa1\x47\x70\x18\x72\x83\x92\xb1\x4d\x22\x3d\x45\xd9\xc6\xff\x7a\xd1\x8b\x86\xef\x50\xce\x64\x85\xc1\xa8\xbd\x48\x74\x21\x11\x12\x0e\x4b\x11\x76\x9e\xe7\x78\x7d\xf0\x9b\xcb\x53\x50\x46\x19\x19\x2a\xb3\x7c\xef\x27\x7e\x7e\xe9\x6e\xf8\x3a\x8d\xaf\xec\x99\x3c\x76\x8f\xc1\x33\x98\x5c\x12\xe3\xbd\xf3\xa3\x4f\x44\xc5\xe4\xaf\xdb\x6b\x43\x03\x1e\xf0\x44\x64\xfe\x1b\x2f\xf3\xa1\xa8\xe9\x85\x79\x90\x66\x5c\xc4\x6c\x8d\xdc\xac\x6f\xb0\x14\xb8\xd2\x5b\x29\x6c\x2d\xb4\x01\xd3\x28\xa0\x39\xdf\x25\xc0\x36\x3d\x25\x4d\xde\x29\x84\xe3\x15\x88\x88\x91\x62\x49\xfc\x8f\x77\x86\x09\xe2\x2e\xf3\x25\x17\x4a\x17\x87\x1c\x1c\xd3\x80\xfc\xd2\xa6\xc0\x66\xd5\xef\x54\x37\xd3\xd5\x76\x3d\xdb\x8f\x33\xc3\xf4\xc2\xb5\x62\x21\x60\x80\x2c\xf5\x7e\x55\x61\xec\xd4\x31\x1c\x7a\xf9\x2a\x96\x7f\xf8\xc9\xfc\x55\x65\x77\x8f\x9d\xd1\xb9\x51\xed\x53\x86\x6b\x99\xef\x74\xbe\x95\x13\xb9\x02\x4b\xc8\xcd\xd9\xa7\x48\x3b\x28\x6a\x20\x69\xfe\x8a\xc6\x72\x79\x6a\xe0\x86\x61\x52\xf3\x86\x5b\x29\xd6\x7d\x59\xe6\x3c\xc6\x67\x29\xbb\x7b\x70\x8d\x17\x6a\x46\x49\x90\x16\xdf\x3f\x06\xa2\xf5\x4b\xa1\xb8\xc3\x44\xd7\xd6\x15\x98\x2e\x60\xa2\x28\x94\xa6\xcb\x40\x6b\x47\xf3\xae\xb4\x3b\x26\xc4\xfb\xcc\x87\x79\x3c\xd3\xcb\x62\x21\xeb\xa4\x4c\x56\x51\xf0\x77\x29\xed\x05\x2a\xa5\xbb\x9d\x53\xc2\x6b\x64\x1f\x0b\x67\xe9\x3f\x79\xea\xa8\x24\x5d\x37\x4c\x82\x83\xb0\xb6\x96\x71\x07\xc9\x95\x6c\xa8\x1e\x9a\xe4\xe4\x2b\x62\xb6\x8a\x2b\x37\x6b\xa9\x2e\x7f\x55\xcb\x0d\xe8\x9e\x6a\x0d\xc8\x09\x23\x23\x05\x9f\x07\xfd\xdd\xcb\xb9\x2e\xf3\xad\xcc\xb6\xd4\x11\xc5\x97\x81\x8c\x5e\x01\xf8\x8f\xf1\x53\x87\x23\x9a\x9f\x80\x96\x1d\x4a\x5e\x0b\xeb\xe4\xd9\x34\x3c\xee\x5f\x7f\x77\xd8\x5f\xe3\xbb\x86\xce\x3a\x06\xcf\x08\x21\x83\xd8\x07\x56\x1b\x70\x4d\xf2\x91\x85\x4e\xcf\x09\xfc\x46\x06\xe2\x1b\x12\x24\x9a\x92\x66\xda\x86\xe2\x51\x0e\x7b\xce\xd0\xf2\x36\x67\xf2\xe9\x41\x5a\x00\xa8\xdd\x9f\x63\x3b\x1b\xa2\xe2\xe9\xca\xb3\xfe\x7a\x45\xdd\x13\x91\x2b\x69\x9d\x4e\x41\x2d\x87\xbd\x14\x7c\x53\x70\x73\xcd\xcf\x21\x83\x09\xcd\x28\x5f\x80\x78\x59\x6c\xa6\x13\xf9\x62\x9d\x6e\xce\x64\x5b\x40\x4c\x01\x81\x22\x00\x7c\x83\x78\x86\x7f\x2b\x99\x4d\xa4\xf6\xca\x94\x66\x4d\x22\xb1\xdb\xcb\x8f\x41\x6a\x2f\x08\x83\x48\xc0\xe8\x46\xf3\x3f\xeb\xba\x28\xa6\x15\x2f\x45\x57\xf7\xcf\x15\x7f\x86\x7d\x17\xfb\xd0\x05\x65\x02\x44\x01\x0a\x04\xae\x03\x8a\x13\x5a\x50\x16\x71\x36\x59\x4d\x75\xad\x52\x50\x0e\x31\xd1\xd9\x4c\x87\x32\x71\xf8\xac\xeb\x5a\x60\xa0\x59\xf2\x63\xe9\x2f\xb5\x8e\xc7\xba\x09\x16\x62\x7b\xe0\xc0\x42\x96\xe7\x9d\xde\x37\x1e\x83\xb7\xfc\xeb\x5c\x59\xc3\x8b\xfd\xf1\xab\xc4\x41\x68\xfa\x65\x2c\xc7\x95\x31\x22\x52\x84\xfd\x5f\xf1\xf7\x3b\x4e\x46\xd1\x27\xeb\x90\x2c\xd2\x4c\xff\x9e\xc3\xbc\x5b\x4b\xad\xf8\xa7\x1a\xa5\xe3\x92\x67\x50\xc6\x40\x31\xd0\x86\x20\x93\x28\x6a\xf8\xdd\xdf\x9c\xcf\xf2\x37\x92\x50\x04\xb8\x91\x93\x82\xc7\x9d\x1e\x00\xba\xbc\x8d\x96\xb2\x97\x92\xc3\x76\x22\x4c\xc3\xa4\xd3\xfb\x34\x6e\xb1\x54\x1e\x50\x15\x8d\x98\xc7\x4d\x4a\xef\x0f\x76\xac\x69\x9c\x0b\xf9\x22\x11\x3f\x4c\x0d\x64\x1e\xb4\xae\x1b\x51\xf3\x64\x6d\x01\x77\x03\xc7\x01\x3d\x06\xd7\x28\xcc\x93\x80\xda\xb4\x0e\xf6\x33\xf1\x1b\x70\xdf\x87\x40\x5b\x60\x6d\x29\xd5\x17\xc4\x45\x6b\xa1\xad\xd4\x37\xb2\xa4\x87\x47\x4f\x15\x42\x57\x14\x87\x28\x14\x7f\xfd\x1b\x86\x66\xf8\xfb\xe5\xc2\x79\x0f\x66\xf8\x32\x02\x6f\x68\x42\xaf\x6b\x46\xd8\xdd\xee\x32\x55\x54\xe9\x9a\x8d\xfd\x3a\x04\x93\x59\xf6\x0d\xf7\x62\x86\xf2\x47\x3f\x76\x3d\x54\x0a\x77\x37\xb3\x94\x66\x2c\x5d\x1b\x1d\xd3\x31\xa0\x36\x08\xef\xcf\xaf\x67\xea\xa1\x61\x32\x96\x01\xb2\xa6\x1d\x00\x86\x87\xdb\xce\xe3\xad\xf1\x0f\x39\x3e\xae\xda\xef\x41\xe7\xa5\xaa\xd8\xaf\x0f\x08\xf6\xe0\x86\x50\x9d\x36\x77\xab\xa6\x1b\xc7\x4b\xe6\x80\xee\x2f\x4a\x1f\xa5\xc9\x3b\x3a\xd7\xa4\xb8\xc9\xb8\x02\xc7\x27\x01\x4c\x7d\xf8\xb8\x0e\xb7\xb4\x0b\x94\x09\x7f\xfd\x2b\x4e\x69\x69\x79\x1a\x3c\xb7\xfc\xc8\xfa\xde\xbd\xfa\x3e\x24\xc0\x1d\x16\x21\x35\xc9\x7d\x82\xb8\xbf\x1b\xbd\xe3\xa3\x36\xfc\x29\xfc\x63\x9c\xa5\x37\xd9\xd1\xbe\x2d\x39\x64\xec\xd6\x23\x12\xb6\xba\x01\x66\x2d\x8d\x94\x8b\xa1\x52\xbc\xbb\x86\x7d\x7f\x38\x6a\x59\xc7\x18\xa9\xca\xb5\xf1\x48\x39\xe6\x8f\x88\x03\x89\x40\xb5\x86\xd5\x3e\xb6\xab\xc9\x11\xd3\xfd\xcf\xdd\xdc\x74\xf2\x8c\x29\xb1\x15\xf1\x16\x8f\x20\xf9\xda\xaf\x33\xf5\x5f\x97\xbd\xe6\xdb\x1b\x9d\xbf\xf9\x5e\xae\x31\xd0\x48\x4b\x47\xb5\xeb\xf9\xbd\xf1\x85\x08\xba\x3b\x0a\x1c\xeb\xca\x3d\x6a\xf9\xe1\x94\x9d\x15\x7e\xf0\x40\x25\xb8\x8d\x17\xf1\xbd\x35\x9a\x19\x50\x6b\x1f\x5d\xe9\xbd\xd2\x6b\x23\xb3\xa3\x3e\xe0\x63\xf6\x09\xc8\x87\xff\x7c\x14\xd1\xf1\x32\x0e\xe4\x86\x5d\xee\xa8\xd5\x3c\xa9\x4f\x4d\xfe\x92\xcf\x47\x0d\x98\x2a\x61\xff\xd6\xae\x69\xcc\x75\xef\xe5\x62\xcf\x38\xd5\x17\x54\x34\x10\xca\x4c\x9a\xb4\xcc\xa4\x19\x6f\x44\x44\x7c\x7d\xfc\x7f\xf7\x57\x7d\xd1\xa2\x50\xb9\xff\xd4\x58\xe5\x66\xc5\xec\x33\xf2\x63\xfa\x29\x6b\x37\xa4\xff\xc3\x77\xc1\xfa\xcc\x6b\x5d\xfb\x5b\x54\xae\x8d\xa1\x34\x06\x5d\xec\x2b\x01\x5c\xcd\xbe\xe3\x4f\x2f\x58\xae\x63\x9e\xa4\x5f\xba\x99\xa7\xd6\x6c\x91\x84\x64\xa6\x58\x77\xc3\x2d\x5b\x09\xbb\xc0\xff\x50\x7e\x0b\x37\xdc\xf8\x13\xf3\x71\x23\xdb\xf5\x67\xb2\x6a\x0c\x4f\x7c\x5e\x43\xd4\xd4\x50\x6c\xd2\x73\x34\x98\x7f\x77\xd2\x99\x94\xb3\xe7\x87\x9f\xed\x87\x64\x44\x95\xb3\xe7\x9b\xd3\x99\x1e\xe8\x1b\xe1\xdf\x38\x54\x1b\xd9\xf7\x63\xb9\x9a\x1c\x32\x53\xeb\xa6\xc7\xb0\xa8\xda\xbc\x1a\xe0\x69\xcc\x9c\x44\x64\x57\x77\x27\xe5\xdf\x9e\x71\x95\xfa\xd2\x5c\xa3\x58\x03\xa5\x1b\xce\xb7\xc7\x9e\x4e\x18\xd3\x29\xcd\xf9\x54\x97\x3d\x06\xf8\xc1\x70\xf4\x5f\x46\x3b\x59\x98\xaf\x2e\x5f\xef\x20\xce\x5c\xa3\x9d\x60\xe7\x7e\x91\x0b\xb1\x5e\xe8\x75\x6e\xa8\xb1\x57\x1c\xfd\x63\x9b\x29\x3f\x40\xfc\x0f\x61\x30\xa6\x9d\xd9\x1d\x26\x1c\x38\xf7\x31\xa0\x9d\x3b\x02\xbc\x61\xff\x62\x65\x8a\x08\xfa\x10\x72\x79\x2e\x23\xce\x24\x4b\xc5\x29\xa7\x61\x41\x22\x72\xa3\xa4\x2e\x0d\xfb\xbf\xeb\x51\xb4\x36\xd4\x16\x51\x0e\x19\x70\x6e\x07\x37\xed\x95\x7c\xf0\x1a\xc1\x6c\x40\x7e\xf8\xa7\x2f\x71\x29\xf2\x58\xc6\x11\xe7\x71\x13\xf6\x59\x35\x58\xbe\x58\x03\x86\x6f\x0b\xb5\x23\x97\x9b\xa9\x7f\xe6\xe5\x28\xaa\x71\x20\x31\xc9\xc3\x4c\x0c\xc7\x91\xef\xd4\x06\x7c\x1b\xcd\x39\x6a\x98\x48\x87\x94\x68\x78\x37\xbc\x69\xed\x4f\x1c\xab\xdc\xd7\x7b\xb3\xd7\xca\xe2\xdd\x66\xb0\xc1\x72\x37\x15\x23\x69\xbd\x2b\xff\xc5\x72\x1e\xe6\x8b\x5b\xcc\xc2\xcd\xe4\xa8\x19\xa2\x1b\x0e\x9e\xa7\x5a\x33\x23\x32\x67\xf4\xd3\xcf\x7e\x97\x39\xd7\x53\x7d\x64\xef\x1a\x85\x9a\xa4\x91\x21\x22\xb9\x66\xdf\xee\xa7\x16\x86\xbb\x8a\x09\x87\xe5\xd9\xe5\x15\xa3\x85\x66\xbb\x10\x1d\x4d\xfd\x9a\x41\x53\x29\xff\x2e\x6e\xb2\x17\x44\xda\x3b\x1f\xb8\xad\x60\x34\x9a\xd9\x22\x90\x22\x14\x6c\xaa\xdb\x69\xc2\xb3\x82\xec\x31\xcf\xcf\x1d\x59\x43\x11\x53\xcc\x08\x18\x27\x6c\x95\x9b\x9e\xac\x81\xd3\xd2\xa1\x1f\x15\x3e\xff\xe1\x3a\xba\x76\x48\xe2\x7c\xdc\x61\x94\x79\x8f\x08\xb8\x17\x19\xb5\xb5\x5e\x3a\x51\x07\x66\x8c\xe4\xfd\x8f\x37\x70\xfe\xf2\x65\x3f\x98\xd3\xc2\x8e\x8d\x41\x12\x32\xee\x09\x6f\x72\xf6\x8c\x71\x5c\xaa\xd9\x0e\xf2\xfe\xf0\x15\xb0\xce\x2a\x39\xc3\x50\x2e\x17\xa9\x4c\x10\xf2\x06\x8e\xd7\x3e\xba\xb6\x6b\xcc\x02\x3b\x36\x99\xd8\x17\xc9\xfe\xd4\xbe\xb3\x99\xb2\x6d\xca\x2c\xfe\x92\x8f\xb7\x59\x72\x4e\x94\x95\xc0\xfb\xfc\xe7\xd3\x40\xcd\x4f\x5f\x71\x83\xe1\xd8\xc4\x68\xbb\x99\xe3\x30\x69\x74\x94\x1e\xd3\xcc\x07\x76\x38\x7d\x3d\x21\x44\x11\xff\x71\x21\x33\x7b\xc0\x35\xc5\xd2\x34\x33\x67\xc0\x52\x7d\xc7\x6d\x61\x1a\x1c\x1b\xee\xbb\xd3\x5d\x91\xc0\xd6\x3f\x31\x48\x93\x34\x54\xc0\xb2\xd1\x0a\x47\x03\x9c\x87\x98\xa9\x6e\xdc\x0c\x73\x3a\xeb\x97\xbe\x52\x2b\x99\xcb\xfc\x89\x2b\x98\xdd\x57\xbf\x2a\xa3\x94\xbc\xdc\xa0\x03\xdb\x79\x54\x84\x0a\x8f\x78\xfd\xeb\x59\xac\x0b\x09\x2c\x19\xef\xfd\xf0\x49\xd8\x24\x38\xf0\xf4\xac\x2b\xf6\x51\x94\x5a\x1d\x0b\x5e\x2e\x63\xaf\x18\x87\xfd\xeb\x77\x7f\xb0\xc5\x1d\x04\xdd\x8c\x8e\x18\xa0\x5a\x7c\xf8\x55\x02\x87\x6f\x0b\xc5\xe9\x1c\xf1\x1f\xef\x5a\xeb\xf1\x5f\x47\x13\x9c\xab\x99\x29\xb3\x7c\x08\x87\x91\x7a\x08\x18\x30\x5b\xc0\x4a\x85\xfb\xb7\x7f\xce\xec\x7c\x70\xcb\x98\x79\x41\x4d\x8a\xac\x92\xe0\x4f\x31\xf7\x42\xd3\x7b\xad\xd0\xa8\x7f\x71\x5d\x83\xe5\x5c\xe3\x23\x07\x4e\xf7\xe8\xdc\xa9\x39\x2d\x2a\xdc\x1a\xa4\x8b\xcc\x8a\x86\x9c\xb0\xff\xc8\x8b\xa0\x38\xe5\xca\x34\x9e\xc4\x8b\x53\x0b\x67\x28\xc1\x5c\xa7\x16\x68\xc9\xbf\x1e\xda\x51\xd3\x80\xfa\xfc\xf0\xca\x0d\xc5\x12\x39\x61\x01\xb7\x95\x81\xe2\xbd\x3f\xba\xe9\x5d\xb4\x25\xfc\x15\x13\x37\xa4\x32\xe1\x30\x15\xdb\x8c\xba\x7f\x4c\x56\x63\xd8\x73\x0d\xd4\xe4\x27\x16\x50\x6f\xd1\xf3\x17\x47\xe1\xf3\x2e\x24\xe5\x34\x8f\x33\x8d\x11\x13\xf3\x7f\x31\xdd\x77\xd5\x6e\x49\x08\x19\xb0\x73\xd5\xe4\x0b\x60\x98\xd4\x31\x0d\xa6\x29\x77\x65\x04\x4c\xa4\xfc\xf8\x7e\x38\x77\x45\xaa\x4b\xcb\x4c\xd6\xad\x5a\xdf\x7b\xa5\xcb\xa4\x60\x18\x3a\x82\x39\xec\x66\x8f\x0b\x7c\xf7\x8f\x07\xb3\x68\x33\x93\x0a\x06\x5c\x64\xf8\x74\x05\x1b\xc8\xcf\xcd\xac\x4b\x68\xa7\x53\x22\x5e\x22\x76\x47\x68\x8b\x3f\xb9\x1d\xde\xea\xf8\xc1\x11\x6c\x0f\x31\x9b\x28\xf7\x33\x43\xd1\x0c\x9e\x73\x22\xe3\xe4\x94\xbf\x38\x28\xe7\x18\x88\x62\x89\x8c\x4f\xde\x10\xf4\xf8\xe5\x2b\x39\x8c\xec\x8c\xcc\xd1\xfd\xc4\x51\x96\x67\x98\x9b\x32\x73\x8a\x1a\x17\xd0\xe5\x23\xe0\xed\x72\xfc\xd3\x09\xc8\xff\x7b\xfe\xca\x87\x19\x01\x17\x86\x30\x0c\xc7\x68\x4d\x6c\xec\x54\x10\x96\xe9\xa0\x02\xb7\x0c\xe7\x87\xbf\x80\x63\xd0\x5f\xbe\xc5\x10\x2c\x2e\x0b\x78\x2a\xde\x2d\x63\x92\x9b\x4b\xdf\xb9\xbf\x3f\xff\x7e\x7e\xf0\xdf\x77\x53\xf6\x9a\x39\x7d\x41\x46\x25\xe8\x3d\x3f\xc9\xcd\xd7\x72\x40\x7a\x63\xc1\x49\xfa\x37\xb9\xca\xe4\x46\x83\x05\xac\x08\x91\x3e\x1f\xa6\xb8\x65\x36\x64\xc0\x2e\xff\x9e\xdf\x19\xa7\x91\x07\x2d\x66\x63\x30\x8f\xa9\x4d\x8a\xee\x11\x17\x0a\x98\x78\xd0\x54\xd5\x20\xc2\x84\xfd\x27\xdb\xfa\xa0\xf6\x0a\xf5\xc9\xdf\xa7\x80\xe0\x37\x21\x15\x90\x22\xf2\xb5\xf2\xb9\x6c\x61\x20\x97\xff\xa7\x2b\x21\x96\x05\xf5\x49\x91\x28\x58\xcc\xbf\x65\xba\x05\x9e\xd2\xc9\x5f\xdc\xb3\x59\xaa\x91\xdb\x42\xea\xf9\xbf\x73\x3b\x05\xc7\x7e\xcd\x61\xd9\x46\x05\xb1\x35\xc5\x07\x2a\x1b\xd1\x1a\x57\xf8\xff\x63\x69\xf1\x0b\x92\x77\xc9\x2f\x25\x6d\x8b\xd3\x0e\x0f\x25\x0c\x1d\xe4\xcb\x44\x85\x31\x32\x16\x99\xf7\x8d\xf2\x73\x2f\x89\x3d\x85\xd3\xff\x3c\x0a\xd5\x7b\xf5\x04\xed\x14\x3e\xee\x50\x68\x23\x85\xe6\x76\x9c\xab\x2e\x3e\x3a\x16\x24\x3f\xc8\xcf\x55\x6b\xc2\xf4\x73\x1e\x51\x9d\x05\x65\x79\xf2\x4c\x0e\xa9\xd3\x23\xcb\x96\x26\xe2\xe7\xe0\x1a\x23\xe2\xbd\x7d\xe8\xb6\xac\x16\xb2\x20\x0c\x4d\xfb\xca\x4a\x5e\x5c\x9e\x8e\xcb\xf8\x15\x22\xa4\xbe\x28\x08\x9d\x92\xfe\xc8\xcb\x08\x7b\x5a\xc3\x98\x78\x51\x79\xb4\x40\x4f\x5f\xfa\x82\x4c\xfe\x2e\x07\x4a\x13\x9d\xc8\x00\xd7\x6e\xd5\x92\xac\x78\xdb\x5d\x25\x43\xb8\x7c\xe5\x9a\x4f\xcd\x30\x0a\x53\xfc\xbc\xc4\xe7\x2a\xb5\x9f\xfe\xc5\x1c\x83\xb2\xec\xab\x04\x20\x46\xf6\x61\x18\x1e\xd1\x94\x21\x66\x87\x5d\x9e\x69\xab\xaa\xf5\x49\x10\x2b\x1c\x1c\xc9\xff\xc4\x03\xd4\x44\xc6\x09\x18\x1e\x8b\x27\x46\xe7\xf0\xff\xb8\x2c\x51\xcd\xf5\xb2\x64\x65\x6a\xfe\xb3\xb3\x49\x9b\xae\xc4\xbc\xa6\x48\x3a\x35\x94\xe4\x24\x85\x6d\xa5\x9b\xc1\x33\x98\x15\x36\xc9\x2c\x9a\x58\x71\x63\x78\x0b\x66\x80\x3a\x3b\x09\x46\x0d\x18\x05\x01\x7e\x27\xfc\xe4\xb0\x8d\xd6\xb4\x7a\x08\xf8\xcc\x91\x9f\x66\x5f\x73\x39\x63\xd0\x1a\xb2\xed\xc8\xd6\xa7\x98\xda\x5e\x95\xc8\xa0\x78\x28\xb9\xc5\x92\x5d\xec\x8f\xdf\xa5\x78\x9f\x7a\xd8\x49\x96\x96\xa7\xad\xa3\xb9\xe6\x7f\xce\xe8\xc2\x6a\x0b\x1a\x96\xcb\x85\x27\x86\x7e\xd2\x09\x3b\x6c\x6e\x12\xd6\xc3\x19\xb1\x4d\x21\x1a\xea\x66\x27\xd7\x8d\xd5\xd3\xd5\xc9\xca\x1b\x87\xb0\x97\x14\x05\x1c\xb5\xf1\xc1\xb6\x07\x74\x7a\x37\x3f\x1a\x6c\xf0\xf5\xe3\x2b\xee\x7e\x7c\x59\xcb\x69\xde\x0a\x03\x71\xa3\x4c\x4f\x1e\x55\xfa\xce\x0f\x43\xb1\x9a\x82\x75\xc4\xe3\x52\xd3\xd5\xbb\xd1\xfc\xe0\xbf\x7d\xfc\x50\xe5\xb1\x7d\x04\x52\x66\x08\xfb\x86\xad\x19\x26\xdd\x6c\xf3\xb3\xfc\x13\x1b\x09\x87\xe7\x1d\x06\x15\x0e\xff\xef\x9c\xc9\xa3\xf3\x2e\xec\x57\xef\x4a\x36\x7b\x5c\xac\x8e\x09\x54\xc0\x48\x55\x54\x7e\x85\xd6\xb9\x2a\x48\x93\xd0\xf4\xed\x5c\xca\x3a\x9b\xea\x7e\x80\xc0\x44\x91\x16\x11\xc1\xdc\x20\x92\x13\x99\x2e\x71\xb6\xe6\xfb\x52\x32\x33\xd0\x8f\x3f\xcd\x3e\xbd\x41\x38\xf9\xc8\xb7\xaa\xd0\xaf\x15\x03\x6e\xe3\x39\x67\x1d\x15\x3c\xa6\x1f\xd0\xd6\xbd\xef\x80\xc2\xbd\x48\xf5\x32\x98\xc8\x20\x23\x18\x12\x36\x13\x14\xa7\x3b\xc9\xb6\x52\x44\xb8\xd1\xc8\x25\xdc\xb4\xff\x64\x9e\xb1\x8c\xf3\x05\x9c\x9b\xdc\xcd\xe1\xcb\x4a\x70\x43\x4e\x0e\x61\x23\x08\x72\x47\x42\x2a\xb9\x80\x7d\x57\x92\x58\xd7\x12\x7b\x69\xea\x60\x78\x8b\x1a\xc4\x25\x67\x22\x52\x52\x50\xf8\xe8\x16\xf7\xca\x8b\x8e\x37\xb4\xff\x74\x06\x4f\xcf\xd4\x47\xc6\x48\xd1\x0b\xcb\x2d\x5b\xe7\xc1\x4b\xb7\xdc\x84\x51\x06\x87\x09\xeb\x93\xc0\xb2\xd2\x4a\xfa\x1d\xea\x2d\x8d\x9c\xeb\xb3\x34\x87\x38\x9f\x9d\x66\xcd\xdd\x97\xbb\x13\x27\x61\xff\xdf\xbe\x38\xe4\xca\xda\x7c\x53\xbe\x7b\x88\x45\x01\xea\x83\x32\x51\xdf\x91\xaa\x2e\xa1\xc2\x37\x58\x88\x93\x63\x70\x49\xea\x5a\x8c\x9b\x9a\xe8\xba\xb2\x87\x70\x18\x1a\x67\x5e\xfb\x9c\x9a\x3f\x86\xa0\x35\xad\xca\xfd\xd3\x07\xf2\xc2\x7e\xb5\xbb\x32\xc5\xb5\x0c\xa3\x10\xfe\x65\xe1\xb9\xd7\xcd\x64\xb7\x37\x4d\x70\x8a\xbb\xc4\xeb\x2a\xc9\xed\xf3\x1e\x0c\xe1\x44\x4a\xc5\x1e\x41\xd2\x86\xef\xde\x70\x7c\x08\xbd\xc4\x05\x06\x87\xe4\x80\x5e\x81\x6f\xc0\x04\xca\xcf\x92\x71\x81\xb7\xd3\xae\xe7\xc4\x3d\x1d\xf7\x07\xae\x32\xee\xab\x17\x63\x38\x24\x16\x69\x43\xb4\xfd\xfb\x02\x05\x87\x12\x8b\xcf\xe5\xbe\xfd\x72\xc9\x1b\x25\xbf\xd6\xbc\xae\xb3\xa4\x71\x5f\xcb\xb4\x42\xee\xae\x6c\x7b\x2e\x55\xcd\x6e\xa0\x40\x58\x7e\xee\x1d\x73\x4e\x83\x3c\xd1\x16\xd3\xc9\x6c\xc4\xe7\x03\x0b\x29\xa7\x75\x9a\xf1\xad\x27\x2c\x95\x44\x1b\x2c\x9f\x42\x6a\xe6\x21\xd8\x4c\x30\x1f\x89\x73\xdd\x1a\xfe\xf6\xe8\x4b\x6d\x3f\x69\xdb\xbd\xcb\x3a\x69\x1b\x55\x18\xb8\x4e\x28\xbd\x42\xf3\xfa\x0f\x2f\x23\x53\xa4\x72\xd4\xf1\xde\xeb\x24\xdf\x7a\xe8\x39\x6f\x39\xb7\x65\x25\x77\x80\xd8\x1a\xca\xe0\x39\xb2\x49\x12\xee\x97\x9e\x70\x3d\xd9\x3f\x5f\x34\x23\xd1\xaa\x86\xa5\xcf\x34\x5e\x4e\x64\x55\xe5\xab\x18\xe1\x37\x71\x66\x9c\x12\x5b\xf9\x29\x9c\xee\x27\xe7\xd5\x33\x56\xc7\x7b\x4f\xc2\x14\x34\x4c\xdb\x03\x55\x2e\xc7\x41\xee\xc0\x52\xca\xcb\x95\xf0\xdc\x94\xe3\x94\xc9\xae\xd5\xb8\xec\x20\x75\xe4\xd9\x88\xcc\xdc\x23\x4a\xba\x11\x5c\x99\x3d\x06\x25\x94\x9c\x92\xce\xd9\xa8\xac\xb8\x9f\x1f\x5b\xcf\xef\x1f\x79\xff\x65\x28\xda\xc9\x1f\x34\x7b\xdc\x14\x06\x93\xe6\xd3\xfc\x28\xaf\x45\xdd\xdf\x0f\x55\xf8\x69\x0c\xdc\xc8\x79\xa1\xb9\x1a\x22\x98\x91\x9f\x86\x94\x5b\x3a\x58\x99\x8d\x10\x48\xcb\x4f\x4c\x66\x79\xdc\x14\xd3\x89\xa0\xd6\x7a\x56\x7a\x6c\xcc\x11\x31\x41\xdd\xf5\x82\x94\x63\x3a\xe6\x15\x98\x8f\xf1\x18\xdc\x63\x22\xf3\x97\x2f\x2b\x93\x17\xaa\x66\x99\x47\xec\xed\x4d\xc1\x28\xfd\x1a\x93\x6d\xcb\xf7\x70\xba\x79\x57\xd6\xeb\x23\xfc\xc4\xae\x46\xc3\x5e\x4e\x12\xdd\x7b\x53\xde\x64\x00\xc3\x51\x01\xce\xb2\xda\x97\xbc\x13\x55\x78\x3d\xc5\x0d\x66\x80\x67\x8e\x0f\x6d\xa6\xdf\x55\xfd\x22\xd1\x93\xf5\x75\x80\xa3\x78\xfb\x51\x66\x20\xd5\x2b\xb7\x11\x02\xf1\x7f\x9f\x53\x28\x18\x43\x4f\x2a\xc7\x97\x23\x08\x19\xc1\x6a\x4f\x61\x7f\x3d\x3f\xd8\x65\xa9\xad\xb5\xd0\xe0\x42\x40\xa5\x7c\x13\xba\x4b\xa8\x23\x18\xad\x68\x44\xac\x8b\xd6\x00\xc7\xfb\xfd\x08\x76\xe7\xdf\xe3\x82\x1d\x94\xac\x5d\xad\xc6\xc4\x3f\x3a\x3d\x66\xd9\x43\x91\x13\x02\x47\xa8\x5f\x1f\x19\x2f\x70\xd9\x50\x2e\x91\x13\x42\xa2\xd4\xb3\xec\x48\x34\xcb\x34\x8f\x01\xf5\xeb\x2a\x0b\x6d\xf0\x0d\x64\x13\x80\x3b\x93\xaf\x81\x49\x0b\x91\x4d\x2f\x48\xc4\x4f\xcf\x35\x9a\x01\xf3\x2f\x1f\x8e\xf4\x0c\xf0\xa1\xe3\x0d\x4d\xfc\xef\xec\x37\x0d\xb4\x37\xa6\xbd\x18\xdf\x4e\xe9\x56\x45\xd0\xba\xcd\x02\x97\x32\xd1\xf9\xe4\xb1\xa6\xdc\x1a\xa8\xf8\x43\xfd\x3a\x63\xb1\x05\x1a\x21\xeb\x0e\xdc\x9a\x66\xd4\x7f\xb8\xd9\xb3\xf5\xa6\xd8\x4a\xc9\x4f\x5f\x20\x86\x4d\x62\x0b\xef\x68\x4b\x9b\x37\xfe\x46\x3d\x30\x36\x5e\xe3\x85\x5e\xe3\x49\xbf\x14\x46\xa3\xa0\x56\xf0\xe5\x16\x86\x39\x00\x77\xe9\x7e\xc7\x28\x9b\xc5\xe9\x9b\x33\xc6\xf3\x53\xc3\xd8\x2b\xd6\x3b\x75\x8c\xf2\xda\x56\xd1\xa7\xaf\x48\x18\x61\x7b\x84\xeb\xa6\xed\xf4\xf1\xf8\x10\x61\xb5\x39\x4d\xc7\xc6\x97\xbd\x32\x39\x58\xa7\x3a\xfb\xb2\xc7\x4e\x4f\x54\x3d\x50\x6e\x86\x44\xa7\x91\x1f\xbf\x6c\x9a\x18\xb3\xcc\xbe\x71\x8a\xc3\xfd\x8f\x6e\x17\x1a\x01\x2f\x4e\x17\xb2\x94\x05\x46\x06\x37\x00\xf8\xd0\xb4\xc0\xa3\x2e\x45\xa8\x96\xd9\x5b\x5b\x68\xe3\x33\x13\xe6\x64\xd3\xf4\xe3\xf7\x30\x5c\x96\xb2\x9e\x66\xca\x6d\xe1\xd0\xa9\x37\x1f\xa5\x72\xa5\xbb\xd8\x71\xe6\xdf\xb9\x64\x8b\x7a\x46\x02\x40\x08\xbc\x33\x16\xc2\xc3\xf9\xca\x08\x60\x7f\x4a\x7e\x2b\x61\xb4\x5c\xab\x69\x1b\x51\x59\x20\xa6\x88\x40\x05\x06\xe6\xb3\x71\x1b\x2d\x24\x9d\x82\xa2\xd8\x1f\x38\x23\xd1\x22\x49\x3f\xed\x15\x0b\xb5\x7a\x73\x72\x6a\x05\x8d\xf2\x9f\xef\x6e\x3f\x77\xec\x7a\xb8\x36\x62\x00\xf9\x95\xb0\xc9\x21\x65\xab\x06\x12\x6f\xad\x6b\x70\x09\x60\xd3\x8f\xb8\xd0\x78\x3f\xd3\x09\x24\x19\xb6\xd3\x69\x5b\x1c\x8f\x2a\xbf\x4a\xaf\x64\x5c\x2e\x5e\xee\x9f\x71\x8a\x9d\x95\x31\x5c\xf0\xdd\x34\xd3\xbf\x7b\x84\x2d\xdf\xbc\x74\x10\xaf\x86\x9f\x6f\x10\x8e\xdd\xfb\x51\x07\x4a\xe6\xfb\xaf\xb2\x5e\xad\xc0\xbc\x7f\xe2\x29\xa6\x61\x81\xab\x76\x21\xbc\x60\x26\x67\x2f\xca\xcf\x11\xaf\xe0\x74\x8d\x25\x7c\x38\x5c\x79\xa3\x46\x5d\xc0\xb1\x7b\x03\x03\x64\x49\x6e\x8a\x7f\x5e\x19\x14\xc7\x63\x51\x05\x87\x54\x9f\xfe\x2f\x36\x05\x78\x86\xee\x40\x64\x91\x9c\xd7\x77\x5b\xc7\xf3\xbb\x73\xc7\x14\xb0\x62\x9b\xcd\xeb\xbc\x84\xa7\x3c\x9a\x91\xeb\x76\xd3\x03\x4c\xbd\x6d\xd4\x54\x9f\xd0\xfc\x29\x5b\x9e\xf0\x07\x2b\xc4\x7b\x6a\x44\x76\x8e\xf2\x52\xa5\xf7\x6c\xca\x0c\xc3\x60\xd3\xbe\xef\x64\x13\xe1\x70\x66\xeb\x1a\xec\xfc\x3a\x46\xb1\xd1\x7c\xde\x40\xd4\xd2\xd4\xd5\x3f\x2a\xb7\xcb\x97\x8d\xfc\x89\x73\xf9\xbe\x41\x76\x9e\x42\x2a\x18\x29\x02\x22\x59\xf7\x4d\xa3\x2e\x62\x5c\xd7\xc5\x43\x18\xe4\x58\xda\xe6\xd5\x8b\xce\xbd\xae\x8d\x93\xc3\x4c\x69\x71\xc3\xfb\xef\x4c\xf0\xfa\x86\xcb\x3d\x86\x37\xe8\x01\x7d\xd4\xd5\x7b\xf5\x21\x8d\xd8\x02\xc4\x5d\x88\x74\xb4\x2f\x0c\x26\xcc\x46\xf2\xa3\x18\xb5\xf7\x7a\xcd\xdd\x01\x9f\x38\x72\xc9\xda\xf5\x31\xaa\x61\xf1\xfe\x72\xd2\x83\xdc\xd8\x4b\xbf\xc6\xa0\xf7\x03\x96\x3f\x15\xba\x59\xe4\xb9\x2d\x1a\x25\xd4\xd6\x34\x82\x06\x9c\x69\xe6\xe8\x26\xdb\xc8\x15\x40\xe3\x38\xff\xe2\x68\x59\xc7\xcd\x50\x1f\x45\x68\x32\x3a\x42\x99\xcb\xed\x30\x57\x4e\x54\xfb\xc1\xf5\xe3\x38\x1e\xb7\x8b\x90\x2f\xe0\x7b\x73\x22\x2e\x05\xd8\x9d\x1e\xec\xde\xfe\xe6\xd1\x17\x7c\x4a\x7e\xb3\xe0\x4f\xa0\x7b\xd5\xc4\xda\x05\x8c\x6f\xb4\x50\xad\xbc\xa6\xbf\xd1\x29\xbe\xb0\x83\xea\x07\x08\xa2\x79\xf3\x0c\xa6\x73\x5d\x6e\x13\x3b\x5c\xfc\x7d\x4d\x3d\xcb\xfc\xc4\x72\xec\xe3\xe4\x23\x57\xb5\x26\xec\xc7\xe2\xe1\xe7\xb9\x98\xfc\x97\x5f\x12\x2e\xef\x15\x41\x3b\xca\xd0\x5d\xb2\xc6\xdb\xd9\xc6\xe6\x3d\x77\x81\xe5\xff\xc9\x0d\xb5\x7c\x0f\x48\x91\xde\xfd\x58\x8f\x7c\x29\xfc\x8f\x2c\xe3\xbf\x9c\x13\x17\x47\xfd\x38\x82\xbb\x6a\x5e\xc0\xa5\x5d\xf6\x44\xc7\x37\x8f\xef\xb1\x9b\xff\x1e\x9a\xf3\x07\xff\xe4\xf1\x9e\x80\xa3\x95\x8e\xb2\x37\x4a\x16\xd6\x23\x19\x3c\xf6\x34\xd0\x7c\x75\xd5\x81\xcc\xe6\x03\x44\x10\xea\x7e\xca\xed\x28\x62\x76\xba\xb6\xa3\x01\xf9\x46\xba\xeb\xfb\x8f\xde\x60\x8f\x6c\xb3\xd8\xb2\xf2\x6f\x8e\x7c\x12\x24\xf6\x9b\x13\x5c\x9a\xc1\x8b\x15\x7c\x4a\x5f\x3c\xb1\x47\x86\xf8\x1f\x9f\xec\xba\x52\x26\x56\x9b\xde\x8b\xfd\x5c\x13\x2c\x0e\xbd\x3d\x56\x30\x76\x59\xce\xe7\x46\xfe\xfe\x8b\x2f\x4c\x01\xf9\x70\x7a\xc3\xb4\x94\xd1\x3e\xaf\x0a\x6b\x7c\x79\x1e\xf4\x69\x48\xa0\xd9\x92\xb6\xca\xda\x0d\xd9\xfe\xeb\xe9\xac\xf2\xf6\x8c\xe9\xc9\x4d\xdf\x55\xaa\x0e\x48\x25\x5e\x01\x53\x5d\xb5\xc1\x16\xff\x13\x57\xb5\xcc\xc3\x9c\xaf\x60\xbe\x43\xae\x95\xd5\x48\x98\x98\x88\x44\x3c\xa8\xde\x9c\x52\xfe\xf8\x11\xd2\xa5\x58\xb1\xc2\x97\xf9\x5a\x8b\x85\x39\x3d\x15\x7f\x05\x8c\x5b\x66\xf6\xd6\x60\x0e\xff\xfe\xff\x58\x86\xad\x1b\xd5\x13\x8e\x84\x58\x3b\x48\x52\x44\x11\xbe\x80\x53\x87\x34\xec\x7f\xd6\x3b\x23\x3d\xd2\xcd\x2c\x8a\x7d\x23\xef\x83\xd7\x28\x57\x30\xa6\x30\xdc\x0d\x90\x73\xee\x07\x32\xb0\x7f\x5c\xe6\x6f\x42\x56\xfe\xa7\xd2\xca\xb7\x01\x92\x15\x4f\xb0\x08\xd7\x3f\x8e\x5a\xe8\xd2\xaf\x78\xee\x7f\xd6\xfa\x51\x00\x9e\xb1\x1e\x26\x6f\xb3\x75\xe5\xb7\xb3\xb4\x4e\xfe\x48\xc5\x21\x12\x95\x80\xf8\x7f\xb1\xc9\xaf\x67\xb4\x40\x01\x07\x43\x8f\x95\x3f\xf3\xee\xd8\x11\xbf\x34\xf2\x0f\x7e\xc7\x97\x94\xc3\x92\x93\xfd\x8a\xf3\xd6\x68\xfa\x1d\x69\x77\xa5\x0f\x66\xf3\xb5\xdb\x1d\x2a\xfe\x5f\x1c\x6c\x2e\x87\xd9\x77\xfe\xda\xc8\xd6\x55\xb9\xf7\x1b\x0a\xe1\x6d\xb5\x6f\xc0\x54\xd5\x05\xa1\x8d\xec\x74\x7f\x79\x14\xf9\xde\x5d\x94\xde\x2f\xad\x99\xa1\xc8\x76\xc9\x76\xa9\x9e\x8c\xa7\xd0\x0d\x01\x27\x94\xdb\x2e\xfe\x27\xee\x64\xc7\xdc\x41\xe5\x53\xab\x44\x88\xb2\x1c\x7a\xb2\x64\xb8\xfa\x76\x01\x57\x90\x6d\xa1\x72\xb6\x03\xfd\x5b\xcb\xc4\x8a\x2f\xc6\xbe\xbb\x48\xe5\x69\x2a\x0e\x88\xb5\xa0\x01\x93\x03\x74\x21\xb3\x1f\xd1\xfb\x1f\x1c\x40\x79\x12\x06\xa9\x7c\xd5\x98\xb6\x39\xef\xb5\x7a\xfe\xda\xf7\x03\xf9\xbb\xcc\x33\xec\x3f\xbc\x09\x9f\x49\x4a\x7b\xcf\x14\x39\x11\x37\x5d\x1a\x0c\x1b\xa9\xfa\x38\x86\x4d\x9d\x62\x1e\xb9\x76\x41\xfe\x3c\x4c\xb7\x0d\x7f\x71\x96\xe7\xe6\x5d\x76\xa2\x68\xb1\x60\x23\xfa\x73\x84\x33\x2d\x7f\x4a\x98\x61\x76\xd9\xd5\xe9\x24\x70\xfe\xd3\xfb\x8e\x46\x14\xfe\xa8\x58\xa0\x32\xef\x0f\x82\xa1\x86\xf7\xb1\x9a\x7b\x9f\x2c\xc1\x0c\x45\x31\x0c\x57\xc7\x10\xdc\xbf\xdc\x54\x46\x97\x3e\x54\xe4\xd3\x20\xb3\xc9\x33\x2d\xff\xfe\x6d\x6d\xb3\xd4\xc4\x47\x9b\x07\x46\x8e\xb8\x2b\x5a\xa8\x43\x14\x3c\x66\xfc\xc1\x8e\x0d\xa2\xf0\xbb\xca\xd5\xac\x1c\xab\x9c\xeb\x83\xb6\x83\x4d\x14\x4e\x61\xa2\x65\xa8\x78\xdb\x3f\xcc\xa7\x73\xd5\x79\x31\x76\x9d\xcd\xf4\x65\x9c\xbe\x25\x97\xf6\x3b\xdc\xbb\xee\xf2\x51\x53\x61\x21\xb9\xc6\xfb\xee\x34\x38\x61\x08\xc4\x75\x54\xdc\xa9\x2c\x20\xaf\xff\x83\xc5\x39\xc9\x1e\x46\x1c\x91\xd4\xc3\x7a\xc0\x03\x83\xc5\xe6\xdb\xaa\xa9\xad\x7e\xd8\xcf\x4d\x55\x8a\xfb\x35\x88\x6c\xf9\xf4\x45\x79\x60\x4b\xc1\x0f\xa9\xd1\xf1\x6b\x98\x3a\xde\x1f\x1d\x18\x35\x3a\xc3\xf7\x9d\x5b\x9e\xf2\x29\xe9\xc6\xd9\x06\xa6\xd4\x21\xc8\x85\x48\xd7\x23\x1d\xbc\x8a\xa9\x1a\xdf\x94\x75\x76\xb7\xee\x15\x75\xd8\x17\xc0\x6e\x26\xfc\xe1\xfd\xe3\xe4\x22\xff\x94\xa2\x26\x1b\xe7\x00\xda\x9c\x0f\xd3\x90\xd5\x3e\x6d\xc1\x75\xae\xc7\xa6\x43\x25\xd8\xfc\x85\xe2\x60\xe4\x79\x6f\x73\xd4\x29\x0e\x86\xd9\xe6\xb8\x8e\xce\x4e\x3d\x76\x33\xc1\xf0\xfc\x27\x07\x97\xa0\x62\x1c\xff\xd5\xa6\x03\x9c\x6a\xba\xfa\xae\xd9\xba\xcb\x6c\x53\x98\x3a\x2e\xd3\xc7\x07\x3b\xb5\x74\x11\x31\xf2\xeb\x61\x93\x52\xe6\x69\xb6\xb9\x98\x7d\xd0\xb6\x36\x76\xae\x8a\x8a\x9a\xb8\xff\xcf\xef\x81\x79\xf9\xa5\x8d\x80\x36\xef\xd7\x44\x9b\x0e\xf4\x56\xe1\xf3\x9b\xb3\xce\x1f\x91\x84\x6b\x20\xeb\x35\xf3\x30\x1c\x52\xfb\x30\xda\x4a\x45\xa7\xe2\xd9\x45\xb5\x78\xdb\xdd\x38\xa8\xb6\xe1\x3a\xec\xf8\xa3\x6b\xb9\x47\x68\x29\x84\xc1\x2b\x94\x9b\x8a\xda\x0e\x07\x38\x14\x13\x54\xbc\xf7\x5c\xfe\xb8\x43\xa2\x2c\x42\x22\xb7\xc4\xb2\xae\xf3\x7e\xe0\x58\xa5\x1c\x75\x4a\xc4\xee\x3c\xee\x19\xd9\x7e\x03\xee\x9f\xce\x67\x1b\xc5\x75\xc5\xbd\xfc\x1e\xb0\x26\xee\x6f\x4c\xcb\x3b\xf7\x9f\xbd\x7c\x8a\x71\x49\xb8\x0e\x42\x34\x5b\x4b\x79\x9d\x66\x30\x4b\x9b\x07\x37\x08\x55\x3f\xff\x30\x6d\x85\xbf\xfa\xc2\x26\xe0\x8b\x7d\x4d\xe9\x7f\x7b\x97\x34\x65\xe3\x45\x7a\x5a\xf8\x6b\x35\xae\xb5\x0d\x02\xfb\xc2\x38\x62\x14\x60\x95\xe4\xba\x9c\x76\xaf\x8c\x91\xcb\xec\x62\x2d\xa1\x02\xc3\x61\x3c\x58\x1e\xd5\xc1\xf4\xf4\xa5\xa0\xd1\xbb\xe5\x53\xbe\x1d\xc5\xe1\x7f\x38\x42\x39\xf6\x2b\x62\xb6\xeb\xb5\xb6\xe8\x46\xf8\x91\x5a\x91\xaa\x8d\xc4\xd9\xad\xf9\x5b\x27\x36\x2f\x25\xcf\x18\x04\x19\x5c\x7e\x92\x8a\x73\xbc\xc2\x6f\x0a\xb9\x07\xd2\x69\xe9\x3b\x18\x1e\xa3\x34\x03\x64\x0d\xca\x53\xbd\x4d\xab\xae\x61\xdb\xff\xc5\xba\x6b\xfd\xa2\xb4\x64\x7d\x1a\x86\xcf\x70\xd0\x4e\xf3\x3c\x0d\xb4\x4f\x80\x99\xfa\x95\xcc\xd7\x2c\x35\xb8\xe9\x07\x77\xa6\x2f\x79\x36\x9e\xcb\x3d\xb9\x80\x43\x00\xcf\x81\x79\x5d\xa5\xb3\x1e\x53\xfc\xe0\xb4\x8c\x76\xab\x39\x43\xe9\x79\xe7\x96\x02\x6c\x1e\x45\xd8\x75\x81\xd3\x25\x88\x21\xd6\x73\xda\xdb\x2a\x96\xc8\xed\x08\x47\xdc\x5d\x7d\xa8\xba\x2d\x54\x1f\x67\x99\x21\x4f\x89\x9a\x4f\x72\x3b\xad\x98\x2a\x94\xe0\xd0\x9d\xa4\x18\xdc\x66\x5f\xf9\x87\x4d\x7f\xfb\x04\x3a\x1a\x20\x15\xfa\xb5\xd0\xc0\x4b\x44\x19\xfd\x77\xce\x58\x4d\x75\x35\x57\xd7\x6c\x4a\x31\x1b\xb4\xd1\x09\x92\x8f\x41\xaa\xc1\xcb\x02\xd7\x1c\x99\xf2\x70\xaa\xde\xd9\xdf\x7b\xd6\xf3\x04\x31\xe5\x1e\x6a\xe9\x36\x30\xcf\x9d\xf0\x6c\x5e\x53\x89\x34\xd5\xfd\xce\xf5\xc3\x40\xdf\x3c\x77\x66\xc1\x2a\x67\x29\xba\x99\x21\x4d\x55\x4b\x50\x25\xcf\x6d\x2d\xc6\xe8\x8f\x4e\x6d\x91\x57\xff\x8c\xa8\xea\x50\x16\x01\x81\x28\x10\xf0\x92\x26\xfb\x27\x52\xaa\xaa\x05\x7d\xbf\x70\x48\xf8\xca\x42\xdb\x2f\x50\x35\xbd\x5d\x57\x38\x0e\xf4\x4e\x86\x97\xd8\xb4\xd2\xe8\x59\xe8\xad\x21\x8c\x1e\x7c\xde\x95\xcd\x91\xb4\xc5\x9e\x77\xcb\xbf\x24\x26\x5c\xab\xc7\xbe\xcf\xdf\x7b\xe3\x0c\xcb\x7c\xda\x40\x69\x7b\xaa\x77\x5a\xe2\xe9\xa0\xa8\x49\xda\x79\xc2\x5f\x00\x42\x32\xc2\x55\x13\xd8\xf2\xad\x9b\x71\x16\xac\xec\xbf\xd9\xde\x8a\xcc\xec\xb2\x92\x00\x49\x92\xb3\x77\xaf\xe4\x15\xcb\x0b\x5b\x2d\x6c\x2a\xf7\x18\xc6\x21\x5b\xb0\x9e\x63\xb5\xcf\x9f\x44\x1a\xc3\x29\xb2\xab\xd4\x48\x91\xf6\xa6\x91\xbf\x38\xaa\xe1\xb6\xae\x25\x26\x7b\x59\x30\x79\x08\x04\x27\x2f\xe5\xa7\x26\x6b\x9e\xe0\x14\xa9\xcb\x8e\xe1\x63\x48\x1c\xa7\xde\xf8\xd8\x69\x3a\x5c\x7d\x2e\x59\xd3\xa6\x75\x97\xaf\x72\x71\x50\xac\xf8\x33\x28\x93\xb4\xe5\xab\x45\x8b\xe2\x5e\x2b\x1e\x6b\x91\x76\x60\xc4\xf5\x46\x6b\xab\x29\x73\xd3\x89\xcc\x1c\x5a\xf4\xda\x49\xa5\x1f\x8f\xb4\x1c\xa1\x07\x89\x9b\xf4\xb8\x63\xe3\x48\xf8\x1f\x54\x77\x2f\xcb\x7b\x64\x8b\xca\xaa\x77\x71\xef\x69\xa6\x85\x24\x2a\x77\x2b\x0a\x34\xfb\x73\x8d\x8e\x79\xfa\x7f\x75\x05\xa3\xc8\x89\xa4\x33\xb1\xa9\xee\x39\x26\xce\xb4\x6c\x30\xaf\x7f\x5e\x9f\x12\x07\x2d\x1c\x2d\x90\x3b\xf7\x0a\x87\xe3\x3d\x1c\x4d\x45\x70\x7f\xee\x4d\x18\xed\x5a\x53\xb6\x95\xb4\x16\x2d\x4f\x39\xde\xf6\x21\x4a\xa6\xc8\xc6\x32\x8e\xc4\x1c\x94\x4a\xce\xf5\xe1\x54\x64\xca\x12\x9a\x9c\x3a\x8d\xbf\xcb\x71\x4e\xbc\xcd\xcd\x40\xcf\xe6\x68\xc8\xb6\xbb\x9a\xfc\x59\x4f\x73\x6b\x3e\x92\x83\x8c\xd0\x93\x84\xf9\xed\x94\x55\x03\x7c\xe9\x8c\x90\xd0\x33\xae\xc2\xf5\x10\x37\x2e\x56\x4d\xe1\x57\x84\x94\xec\xae\x8b\x59\xed\xbb\x93\xd6\x18\x3c\xe6\x66\x7f\x72\x41\x8b\xdf\xf5\x69\x43\x6e\x73\x97\x46\xe9\xf9\xef\x70\xff\x9d\xac\x53\x61\x6b\x23\x4c\x47\xeb\x04\x52\x62\x20\x69\x8f\x75\x47\x14\xc2\xf4\x35\xc1\x60\x97\xf0\xe7\xbc\x2c\xf6\xd2\x39\x04\x32\xf7\xad\xef\x18\xe5\x97\x6c\x24\xb3\xf8\x19\x42\x3d\x45\xca\x20\xe5\xed\xac\x57\x8d\xc6\xfc\x24\x50\x07\x77\x8f\x97\xab\x6f\x98\x6d\x61\xa1\xef\x25\xa9\xb5\x67\xc3\xb6\xa5\x22\x83\xfa\xdf\xba\x4a\x46\xe0\xcf\x6c\xfe\xb8\x19\x5f\x69\x0e\x15\x74\x9e\xa9\xac\x04\x7e\x94\xb3\x69\x7b\xa7\xd6\x20\xf2\x05\x46\x67\xbb\x0a\x31\x1a\xe1\xcf\xf9\x58\xb4\xd2\xd9\xbb\xb9\x9e\x98\x4e\xa7\x9d\x79\x54\x64\x6c\x8c\x5f\xb7\x24\x0b\xa8\xd3\xfc\x27\x5e\xe2\x5a\x2a\x91\x49\x80\x46\x85\xc6\x23\xc4\x61\xe0\x53\xe0\x89\xfe\x2f\x86\xc0\x70\xcc\xa9\xf1\x82\xf7\xc9\x1d\x22\xf6\x3b\x7c\x84\xd4\x66\x63\x45\x3c\x3c\x65\xb4\xb7\x3d\x02\x8c\x90\x03\x3d\x88\x1c\x99\x21\x69\x09\x35\xb5\x22\xac\x27\x72\x9e\x23\xdc\x86\x6e\x05\x26\x3c\x66\xe1\x52\x4e\x09\xbb\x24\x67\xbc\x24\x52\x39\x2a\x69\x66\x4c\x02\xcf\x16\x78\x66\x0b\x21\xc7\xd6\x9c\xbf\xbe\x09\xa3\x70\x58\x67\x2a\xb1\xe0\xf9\xd6\xa4\xf0\xe1\x77\x58\x4d\x77\x0d\x3d\xfa\x20\x21\x20\x7e\xe8\xda\x63\x64\xac\xa1\x04\x69\x39\x81\xca\xb9\xd5\xfe\x93\x6d\xa6\x2b\x7d\x61\x64\xbd\x01\xfb\x05\xf0\x25\x8b\x70\xc5\x41\x25\xe6\x42\xcc\x4f\x9a\xea\x52\xa6\xcd\x9b\xd2\xff\xb2\xf7\xd8\xe1\x64\x76\xc7\x30\x0b\xc3\xe3\xc5\xdf\xa3\xa4\x34\x3a\xd1\xb9\xfd\xd7\x70\xc3\x8e\xcc\xd3\xdc\x14\x50\x84\xde\xe9\x78\xa5\xd1\x99\x29\x5f\xdb\x02\xd6\xf2\x1a\x39\x48\x3e\xed\x78\x05\xe2\xe7\xb6\x3d\x60\x06\xc6\x2c\xb7\x53\xd8\xdc\x32\xf8\x90\xd3\xb1\xfa\x5c\xd4\xe7\xae\x43\x80\xee\x24\x23\x4b\x76\xe1\xee\xc0\xb7\x25\x55\x96\x9d\x66\x5a\x72\x3a\xdb\x60\x15\x16\x7e\x4e\xff\x25\x50\xe6\x61\x4c\x4b\xb8\xd8\xbf\x1b\x29\x0b\xb7\x64\xbb\x33\x94\x4e\xe9\xcb\xa5\x7e\xb3\x65\x7b\xc8\xb5\x90\x35\xa2\x90\x9d\xc0\x66\xfe\xa5\x18\xdb\x73\x4b\x3a\xc3\x9c\x1b\x8c\xab\x85\xfb\x68\xd9\x47\x98\x68\x31\x7a\x79\xd3\x4e\x10\xd6\x31\xd8\xd6\xc8\xe2\xa1\xe4\x93\xa7\x09\xbc\x83\xe5\x00\x45\xee\x50\xce\x63\x2d\xd3\x80\xb6\xb2\xde\x28\x9b\xd4\x12\xbe\x25\xb9\xb1\x67\x81\x58\xf8\xd1\x21\x1c\x84\xfa\x56\x8b\x78\x31\x3b\xe8\x73\xc8\xcd\x70\xb6\x09\x79\x49\x8f\x47\xaa\xba\x38\x5c\x08\x4e\xcc\x87\x27\xd9\x02\xb6\xe7\xe5\xe3\x71\x01\xdf\x17\x24\xe4\xcd\x56\x07\x93\xe1\xe0\xce\x55\xa3\x15\x89\x0a\xbe\x2d\x42\x80\xca\xaa\x1e\xd4\xb1\x68\x39\xf3\x73\x3a\xc9\x21\x59\x86\x72\x16\x68\x72\x0c\xd3\xe5\x35\xd5\xd6\x52\x77\xe4\x4c\x1f\xcc\xef\x30\x9c\xb1\xfe\x57\xae\x2a\x76\x30\x3d\x45\xed\x75\xdd\x4e\x07\x93\x3f\xf6\xe7\xcd\x17\xcc\xac\x3b\xcb\x2c\xe8\x8c\x9c\xab\xf7\xaa\x72\xf2\x2b\x0b\x28\x64\x5f\x2f\xab\x46\xea\xba\xc2\xc0\x1e\x16\x53\x3f\x8b\xb2\x02\x9b\x5c\xda\x01\x34\x61\x7d\x8e\x0c\x31\x57\xb5\x06\x53\x7b\xef\x7c\x5b\xdf\x87\xf5\x10\x4f\xe0\xf1\x95\xf8\xd7\x9c\xd1\xff\x9b\x53\x1c\x8c\xb4\x71\x5a\x85\x1a\x7a\x63\x40\x3b\x08\x2f\xa2\x1d\x1f\x47\xa2\x6a\xc9\x5a\xc6\x41\x88\xae\x69\xa5\x86\x81\x1a\xa4\x81\x7e\xc8\x74\xd5\x1f\x87\x15\x81\x10\x0d\x12\x1c\xb4\x44\xcb\xb7\x80\x3a\x1a\x98\x0d\x56\xd1\x5c\x98\xf8\xf8\xd9\x12\x6a\xae\xaf\x57\x0b\xae\x19\x05\xa1\xda\x97\xc0\xad\x3d\xe7\xa9\x57\x12\x41\x8d\x7d\x61\x7f\xb7\xfa\x36\x8e\xdd\x71\x95\xde\x81\xea\x68\x50\x1e\xf7\xe3\xfb\x3c\x29\x9b\xd3\x49\xd5\x1f\x2a\x6e\xf0\xcf\x7a\xa0\x66\xe0\xa3\x5d\xe7\x28\x4e\x47\xc4\xd2\xf5\x12\xb8\xe0\x32\x5b\x98\x6d\x70\xb4\x72\x8a\xb8\xe5\xc8\x76\x95\x72\xa4\x1e\xc3\x59\xe3\xd5\x52\x6c\x32\x43\xf8\xaa\x34\xc2\x51\x6a\xbf\xd1\xea\xa4\x29\x16\x16\xdb\x21\xce\x94\x48\xea\x36\xbb\x9a\x51\x97\xf9\x8a\x83\x35\x75\xd1\xfd\xcd\xad\x32\x8e\x7e\x43\x51\x87\x3e\x8a\x3a\xca\x69\x39\xf6\x47\x40\x41\x52\x02\xed\x0f\x98\xe1\x3b\x35\x1d\xdb\x8a\x6c\x38\xa1\x12\x6e\x90\xf4\x87\x1a\x0e\x35\x9a\xe7\xf5\x73\xa0\xe6\x1b\xe1\xcc\x25\xde\xd5\x14\x47\xe7\xbd\x70\xb3\xcb\xb2\x82\x90\xf1\x4e\x99\x7f\xdb\xa8\xcc\x27\x0e\x1e\x63\x85\xf3\x1a\x9f\xdb\x7b\x8e\xf8\x77\xa6\x19\xc6\xc5\xf8\x8d\xe0\xd9\xd7\x57\xed\xd4\x52\x85\xc9\x4b\xb7\x18\x3a\x43\xc3\x69\x25\xee\xca\x5f\x3c\x5b\x42\x75\x31\xad\xa5\xbf\x7b\xe1\x1c\xa0\x82\x80\x94\x9c\xc7\xe2\x35\xe6\x74\x88\x40\x2b\x7c\x9c\x28\x5a\x16\x18\xd7\x84\xee\x54\x0f\x26\x4b\x81\xe6\xe7\x0d\x96\x29\x91\x37\x8e\xe1\xaf\x88\x71\x3c\x06\xfc\x53\xaf\xc8\x30\x9c\x76\x8d\x2a\xa3\x3d\x5e\x63\x5c\x8f\x77\xb4\xfd\x28\xf8\x6b\xe9\x6b\x06\x5e\xb6\xc7\x59\x01\x62\x16\x81\xe5\xb8\x83\x0f\xca\xcb\xce\x2a\x12\x03\x64\x52\xd1\xab\x11\x00\x3c\x67\x08\x60\x83\xe6\xb1\x9b\xdf\x60\xe1\x54\x02\x2e\x8a\x34\xc3\xae\x97\x0a\x0c\x2e\x71\x96\x85\xa9\xdb\x1f\x03\x2f\x24\x7a\xd2\x88\xa9\x9a\x32\xf8\x26\xdc\x95\x45\x93\x56\xb5\xde\x1a\x1a\xff\x1f\x57\x6f\xb1\x2c\xbb\xb2\x73\x8d\x3e\x90\x1b\x66\x6a\x9a\x99\xd9\x3d\x97\x99\x19\x9f\xfe\xc6\x5a\x7b\x7f\xe7\x9c\xff\xc6\xec\xcd\x08\x57\x65\x65\x4a\x1a\x63\x28\x65\x89\x70\x1d\xad\xac\xb4\xee\xf6\x39\xe3\x8d\x5a\x03\x34\xaf\x89\x22\xa7\xe2\xfb\x89\x87\xfa\x30\x0a\x6d\xff\x98\x54\x1c\x91\x02\x3f\x95\x1d\x5a\x87\x60\x1d\x83\xd2\xa4\x07\x5e\x1d\x8c\xe5\x30\x17\xc1\x29\x14\x8e\xfa\xef\x79\xd5\x4c\xbd\xaf\x61\xad\x72\xf7\xc4\x19\xca\xe7\x66\x42\x07\x50\xc4\x12\xac\xbd\x98\x99\xa9\xd2\x67\x25\x1c\x4b\xaf\xbb\x7c\x52\xe2\xdf\x1f\xe7\x56\x64\x09\x1f\xe8\x49\xdf\x00\x19\xbe\x95\x49\xfd\xe1\x17\x32\x83\xcb\xbf\x61\x3d\x86\xd3\xab\xea\x94\x7d\x27\x0d\x8a\x38\x1a\x27\xa4\xdc\xd2\x0c\x22\x73\xec\xe5\x3f\xe5\x23\x0c\xc3\x8b\x40\x3c\x68\xc1\x1b\xb9\x82\xcc\xab\xbc\xd7\x64\xb6\x0b\x3a\xc8\x0d\x7c\xd7\xa6\x43\x43\xff\x4d\xfc\x0c\x1c\xa2\xa9\x4f\x45\x1d\xb0\x75\x24\x5e\x33\x99\x0f\xaa\xa0\xb8\xdc\x5c\xf0\x5b\xc1\xf5\x79\x52\x4f\x52\x94\x85\xde\x1a\x7a\xb3\xb3\x60\x4d\x30\xd5\x2b\x87\xf0\x49\x83\x96\xff\x17\x80\x18\x46\xf0\xd8\x46\x68\x46\xcf\x8c\x5b\xca\x69\x53\x5f\xfa\x39\x13\x11\x9f\x65\xa5\xfe\xa2\x2a\x9d\x4c\x76\xe9\x8d\x61\xea\x51\x40\xbc\x4f\x93\x0c\x0d\xdb\x2d\xa7\x00\x3e\x0b\xc2\x99\x52\xca\xc1\x35\xfc\xe9\x53\xfc\x7d\x08\xbe\xe9\x10\xed\x89\x58\x46\xf6\xd8\xea\xbf\x18\xa9\x30\xd8\x1f\x3e\x90\xde\xd1\x63\x21\xa5\xfe\x70\xc6\x15\x7f\x3e\x6f\x9f\xa6\xd9\x92\x13\x3d\x71\x6d\x8a\x87\x22\x4e\x12\x3a\x44\x5c\x21\x7e\x95\xe4\x39\xd1\x44\x2a\xcd\xb8\xcc\x39\x63\xaf\x3e\xcd\xa0\xbf\xbc\xc5\xfb\x5c\xaa\xe7\x53\x51\xd5\x72\xfd\x3f\xee\xcc\xb7\x86\x7a\x99\xa6\x12\x55\x63\x39\x9a\x69\x38\x76\x77\xca\x67\x55\xc4\xce\xc8\x78\xbf\x6e\x41\x75\x3d\x11\x02\xdb\xab\x41\x52\x0f\x43\x12\x34\x56\x1f\x5d\xfd\x7e\xa8\x51\x83\x3d\x57\xef\xe5\xf8\x7b\x70\xa6\xd7\x52\x69\x38\x0a\xcd\x30\x20\xe2\x4b\x1d\x46\xdf\x6a\x8b\x39\x14\xe5\xd2\xff\x63\xcb\x8c\x35\xae\xaa\x91\x36\x76\xdb\x9b\xc9\x2e\x4e\xe6\x2a\x62\xb4\xec\xf2\x15\xaf\x32\x1d\x97\x6d\xa1\x96\x64\x54\xfb\xe3\xd3\x1f\x6d\xa0\x48\x31\x83\x31\xf1\xa0\xf3\xed\x7e\xf6\x09\x2c\x45\xdd\xf2\x10\xba\xda\x31\x01\x84\xef\x18\xb8\x65\x7b\xfb\xdd\x26\x3a\x2b\x0b\xf4\x05\x37\x4e\x6c\x77\x2a\x7f\xcc\xe8\x7f\x7c\x95\x77\x71\xb3\x39\x38\x41\x27\xa4\xb5\x1e\x8b\x4a\x73\x55\x73\xf8\x7d\x03\xb1\x84\xd6\x0a\x45\xe7\x20\x78\xd9\xe0\x10\xb2\x3b\x91\x02\xdd\xf5\xf4\x0e\x3a\x1a\x6d\x85\x24\xb4\x3d\xb2\x44\x9b\xef\xa8\xf5\x83\xa7\xfe\xa2\x49\x92\x82\x4b\x81\x70\xa8\x90\x6f\xcf\x09\xff\xd7\xbe\x4c\xa1\xd6\x53\x3d\xda\xd8\x44\xc5\xbd\xc0\x2c\x37\x38\xd7\xdb\xca\xd2\xed\xfe\x10\x7e\xc9\x07\xc3\xbd\x5e\x9f\xdc\x5a\xa0\x32\x5d\x4e\x01\x62\xfe\x06\xba\x9a\x40\xc8\x8e\x7e\xbe\x54\x9e\xb0\x50\x96\x5f\xbf\x5f\x83\xb5\x40\x16\x49\xe5\xd0\x12\x3a\x2b\x80\x3f\x7f\x80\x1d\xc3\xff\xa3\x31\x58\xc6\xe4\xb2\x2e\xf0\xfe\xc4\x3f\x72\x59\x6d\xe0\x25\x54\xef\x43\xb2\x27\xdb\xc2\x6c\x08\x85\xf9\x08\xdb\x4f\x97\x66\x78\x82\xad\xde\xca\xd3\x77\x43\x9b\x90\xee\xa8\xaa\xfb\x55\x05\x36\x9f\x74\x23\x19\x69\x6f\xf4\xcb\x4d\x70\x41\x92\xe2\x57\xa8\x2c\x88\x66\x75\xcc\x7e\x2a\xb3\x02\xd5\xe4\x7f\xb9\x3e\xc7\x8c\xe7\x8d\x84\x29\x6d\x79\xac\x5a\x2c\x19\xed\xaf\x28\xfb\x92\xa6\x61\x0e\x46\x7a\x2c\xe3\x4c\x00\x0f\x50\xa1\x26\x8a\x16\x0d\x6d\x5f\x34\x58\x5d\x08\xf2\x47\x37\x35\xaa\xb9\x71\x61\xa2\xbe\x03\xc4\xe5\x51\x72\xdd\x8a\x23\x63\x39\x54\xa3\xff\xcf\x67\xab\x42\xe5\xf5\x41\xd6\x85\xeb\x10\x08\xc7\xfb\xd8\xdd\x30\x7b\x25\x92\x9c\x21\x85\x22\x53\x66\x3f\x80\xfd\xe4\xcd\x13\x93\xc8\xaa\x90\xfb\x5f\x3f\xcc\xf5\x2e\x4f\x68\x55\x2b\x18\x56\x33\x76\x2a\x6c\x87\xf3\xb7\xb0\x6c\x2d\x78\xa9\x82\xb9\xce\xb2\xb0\x77\x9e\xfd\xc7\x0d\x45\xf3\xf4\x6b\x4c\x53\x08\x27\xee\x5b\xf7\x08\x57\x0f\x6a\x76\xb8\x37\x75\x65\x47\x80\xca\x52\xb7\x92\x3c\x56\xa2\x6c\x81\x4d\x95\x12\x41\xe1\xe9\x22\xfd\x20\x8e\x79\xd8\xd1\x6c\xfa\x1a\x2f\x31\x06\xaa\xaf\xff\xe2\x5c\x2b\x8c\x4b\x68\x75\x30\xa1\x21\xb1\x7f\xb9\x84\x1c\xaa\x99\xe1\xb1\x2a\x34\xfe\x0e\x84\x16\x03\x41\x71\x6b\xc3\xec\x26\x7f\x0b\xb5\xa5\x90\x66\xe1\x27\x37\xc6\x72\x18\x60\x4e\x33\xb5\xea\x0f\x78\x99\xd4\x9c\xca\x22\xff\xc5\x6c\x86\xe9\x3d\x51\xec\xfb\x64\x6e\x37\xb5\xcd\x62\x6b\x2f\xf8\xb5\x70\x56\x01\xc3\x29\x73\x37\x1b\xa7\x68\xeb\x50\xed\x8f\x14\x48\x5f\x96\x79\x27\xf5\xcb\x6d\x4e\x36\xed\x3b\x77\x99\xff\x73\x24\x96\x89\x85\xd6\x5d\x58\xe9\x43\x72\x57\x39\xf2\x14\x8d\xba\x66\x3a\xa2\x88\xf6\xd7\xc2\x49\xe4\x01\xe8\x6c\x87\x98\x42\x6d\x71\xd8\xcf\xab\x7b\xad\x31\xd8\xcd\x0d\xdd\xd8\xe3\xff\x87\x77\x2b\xbc\xf6\x21\x89\x4b\xc8\xd1\xc8\xa6\xe1\xf8\x71\x5e\x54\x62\x84\xea\xad\xbe\x93\x7d\x01\x0f\x7a\xb8\xd2\x0f\x67\x25\xbf\x09\x19\xd2\x48\xdf\x9a\x54\x69\x29\x02\x43\x38\xd3\x7f\x75\xd6\xa3\x14\x82\x17\xae\x37\x92\x3d\xf3\xd2\xcc\x20\x40\xb1\x8d\xa9\x8e\x86\x6e\xec\xb5\x6b\x8b\x8e\xb6\x84\x84\x1c\xce\xd9\xa2\x48\xd4\x25\xde\x02\x53\x28\xee\xff\xe8\x9a\xda\x9e\xf3\x8e\x87\x24\xc7\xd2\xff\xec\x9f\xb9\x0a\x49\x85\xfd\xe1\xdb\x35\x4c\x56\x20\x4a\x64\x4b\x0b\x8f\x06\xa0\xf8\x36\x7e\x99\xde\x20\x2a\x49\xd0\x67\xf7\xc7\xe0\xc5\xfa\x41\x0d\xe9\x09\xf5\xbd\xff\x87\x03\xea\x6f\x25\x7f\x1d\x9b\x98\xed\x51\xa8\xc2\x94\xed\x79\x5a\xd9\x6c\xb6\x2d\x31\xc5\xc1\x88\xf2\x28\xc4\xbe\x71\x73\xda\xe4\x5a\xca\x12\x7b\x41\x9d\x18\x2c\xa9\xb7\xa9\xf8\x5f\x63\xfc\x07\x9f\xec\x66\x6e\x77\x35\x73\xda\xfe\x93\xea\xe7\x1d\x9d\xe7\x11\x98\x83\x87\x50\x68\x3d\xf3\xa3\x37\x56\x83\xbd\x9c\x45\x4c\x3e\x25\x4d\xd3\xf6\x11\xe6\x74\x6b\x5a\x96\x56\xfc\x7f\xdf\x51\xfa\x3b\x1d\x59\x84\xda\xc8\x79\x78\xc9\x82\xa3\x86\x91\xf3\x74\x32\x97\xb3\xd9\xd4\xf5\x89\x38\x64\x25\x5b\xac\xee\xbd\xf4\x79\xfb\xf7\xd5\xe6\x4b\x7d\xf3\x87\x91\x3d\x33\x57\xbb\xff\x10\x69\x96\x51\x4e\x7f\xc0\x57\x0e\x6b\x78\x3f\xfd\xf9\x89\xd1\x95\x8d\xd2\x9f\x37\x4d\x10\x6c\xa0\x0d\xf2\x2f\x8e\xdd\xf5\x08\xd7\xc9\x81\xbd\xe1\x95\x32\x78\xe7\x5e\xdd\x12\xb8\xec\xde\xf3\xe6\xff\x3e\x45\x65\x7c\x7d\x9f\xb2\x9d\xc6\x9d\xf2\x08\x57\x51\x95\x2c\x4e\xb2\x5d\xe2\x0c\x1f\x4b\x50\xfc\x22\xed\x03\x48\xa8\xa5\xa9\x58\x7d\x75\x4c\x01\x5b\x11\x78\xe2\xee\xfe\x7d\x77\xef\x8f\x2f\xad\x01\xe3\xb9\x96\xaa\x04\xfd\xfb\x60\x04\x32\x31\x43\xbf\xc3\x23\x1c\xad\xb0\xb7\x06\xce\x2c\xd4\xd2\x50\x7b\x9e\x32\xb7\x8d\x6c\xaf\xcb\xe2\xf8\x9b\x9d\x24\xce\x7f\x21\x71\xf7\x9b\x57\x0b\x17\x69\x92\x1c\x7e\xbe\xd2\x8e\xcb\x64\x2e\x0b\xdd\x15\x35\xbf\x9c\x49\xda\x3a\x78\xa4\x19\xd2\x1a\x87\x7d\xfb\x1b\x21\xbe\x77\x60\x73\xb6\x50\xd2\xff\xda\x07\xe7\x68\xcc\xc7\xe3\x1c\x1b\x6b\xec\xf0\x9e\xf9\xb3\x9b\xad\x60\xb5\x49\xfa\x39\xf3\x18\x8d\xb8\xc5\xa3\x6b\xe5\x61\xb1\xdf\x5a\xad\xa8\xfc\xfe\x87\x67\x38\x8c\x12\xe2\xfc\xee\xb0\xca\xa4\x38\x61\xe2\x1a\xb3\xca\x29\x7e\x9e\x76\x49\xcb\x8b\x26\x03\x4d\x25\xcd\xe4\x6e\xf8\xff\x7b\x46\x1f\xf0\x32\x7a\x79\xc5\xc0\x16\xf5\x73\x13\x91\x6b\x34\x3e\xed\xb9\xf5\x0a\xef\x55\x1b\x9b\xdb\xef\xf1\x33\xb9\x2d\x43\xe5\xc7\xde\x04\xfe\xe3\xd0\xac\xa6\x2c\xe7\x40\xe9\x9c\x68\x9b\x69\x3a\xfc\xfc\x64\xc3\x6a\x26\xad\x5f\x5f\xbd\xcc\x3b\x7f\x38\x79\x68\xeb\x35\xe7\x18\xad\xb7\x80\xff\xc9\x1d\xe8\x82\xc6\xe1\x22\xe3\xfd\xe4\x5c\xe6\x0f\x51\x71\x0c\x46\x0a\xa2\x21\x68\x7a\x93\xdd\x9d\xa6\x29\xdd\xb7\xf4\x48\x2b\xe8\x79\xe2\x26\xf2\xff\xf8\xad\xaa\xfe\xcc\x6c\x88\x96\x50\x69\xf8\xb5\xe9\xa0\x72\x0b\xb3\xce\x6f\x18\xb6\xc1\x7f\x3f\x31\x0d\xa2\x60\x6c\x98\x31\x07\xa2\x66\x61\xd8\x9e\xfd\xdf\x75\x9e\x82\x59\xd5\xcb\xdd\xe9\xc6\x94\x91\x7c\x2c\x31\x9d\xe0\xba\xa1\x84\x59\x70\x0b\x4c\xe4\xce\x59\x19\x66\xd4\x06\x86\xf5\x1c\xf3\x1f\x6a\xd3\x47\x83\xd8\x30\x89\xe6\x08\x5b\x8f\x9f\x59\xbb\x33\xcd\x60\x6c\x61\x92\x76\xc1\x4f\x50\xaa\x01\xb2\x56\x48\xed\x93\xd2\xa8\x0d\x5a\xf9\xbf\x87\x3c\xbe\x3d\xc2\xd1\x82\x23\x96\x93\x55\xde\xc8\xba\x68\x86\xb5\xe3\x60\x9a\xcd\xfe\x6d\xbc\x96\xf2\xb2\xa8\xec\x8b\xde\x2a\xb5\xc3\x1a\xb5\xfd\x5f\x8c\x50\x19\xc3\xd3\x92\x7e\x0c\xee\x71\xa7\xcf\xbd\x65\xcc\xde\x56\x95\x9e\x02\xd5\x37\x70\x6a\x72\x8c\x5a\xed\x1e\x42\xb6\xe7\xa5\xff\xd1\xd9\x6c\xff\xca\x39\x91\x5a\x6d\x3f\xbc\x77\xef\xf9\xab\x63\x24\xde\x3e\x87\xce\xac\x4b\xc9\x87\x2d\x0c\x54\x26\x18\xb5\x99\xa3\xdc\x29\xf2\xff\x38\xaa\xc3\xfb\xa1\x20\x12\xa6\x96\xea\x62\xa6\xbb\x99\xe8\x28\x0c\xe1\xd8\xde\x1d\xed\xb0\xb6\xb8\xec\xeb\xcd\x84\x87\x09\x82\xc4\xfc\xbf\xcf\xb9\xaa\x17\xf4\x6e\xbd\x30\x82\xb6\xc4\xda\x8f\xcf\x86\x8a\xcf\xb1\xae\xbf\x70\x7b\xf6\xf9\x24\x10\x1c\x63\x3a\x31\xa1\x2c\xca\x6e\x7b\xf2\xb1\xcf\x46\x0d\x10\xa0\x6c\x8b\x8b\xb4\x37\x72\x45\xb2\xeb\xf5\x3f\xfb\xcb\xaa\xc9\xc1\x6f\x81\x63\x34\x50\xd4\x66\xc5\xe9\x0f\xde\x28\xda\xa5\x8a\xd9\xf4\xe5\xf5\x45\x03\x10\xe7\xb6\x7f\xd7\x5e\x1b\xd8\xf1\xea\x69\x38\x92\x68\x42\xf0\x8e\xa6\x85\x43\x93\x74\x9c\xca\xaa\xff\xab\x07\x71\x6e\xe0\x93\x36\x66\xa9\x49\xfd\x24\x25\x73\xef\x85\x54\xf0\x58\xbb\xe0\x67\xb7\x1a\xd8\x82\x99\x17\x3a\x16\x3a\x58\xc3\x3c\xb3\x2d\x4b\xc9\xfd\x0c\xf4\xa5\xca\x25\x64\x26\xf7\x1a\xe2\xb7\x60\xad\x12\x84\x98\x64\xdc\xed\xff\x2d\x90\x73\x8c\xdf\x4a\x9c\x17\x49\x83\x75\xa9\x56\x4b\x86\xa3\x24\xf6\xda\x1f\x55\x41\x09\x6e\x5e\x9d\x59\xe5\x3f\xe6\x15\x98\xba\x16\x95\xa1\x36\x9a\x1e\x96\x66\x18\xa2\xf7\xb2\x36\xc9\x3a\x93\x16\x14\xef\x4e\x07\x3c\x8a\x6a\x5b\xbc\x56\x5b\xce\x61\x97\xe4\x4a\xb0\xc2\xc5\xea\x0e\xa8\xe2\x18\x8f\x77\xdc\xdb\x11\x56\x99\x19\x13\xd0\x9a\x54\xb6\xf9\xcf\xef\x51\x78\x03\x2a\x26\x97\xaa\x4a\x10\xf4\x50\xf7\xfb\x54\xe2\x63\xbb\xfe\x41\x4c\xa9\x8f\x5c\x35\x2f\xe5\x46\x81\xa9\x46\xd4\x54\x5f\x6b\x1c\x64\x85\x43\xef\x7c\x3d\x7f\xf7\x3e\xfd\xd8\xbc\xfc\x18\xe6\xe3\x8d\x9f\xb2\x7a\xd9\xfb\xdc\xa2\x63\xd1\xeb\xd9\xc3\x92\x32\x4f\x70\xa9\x5f\x9d\x4e\x07\xe8\xc0\x0f\x20\x0d\xa6\xdd\x69\xd7\x75\x4f\xf8\x63\x8b\x27\xf2\xe3\x82\xff\x98\x31\xf3\x86\x6e\x16\xf4\xcf\x50\xf6\xd3\x8c\x6b\xe6\x82\x9c\xb2\xd2\x23\xdc\xf1\xcb\x2e\x11\xb5\xf0\x85\x04\xba\xdd\x37\xe7\x2f\xef\xbe\x6b\x9d\x8a\x54\x97\x92\x77\x24\x92\x14\x6b\x1e\x53\x5f\xaa\x01\x31\x45\x26\x98\x9e\xb1\xa7\x86\xd9\xe4\x18\xab\x97\xfe\xd7\x6e\xb9\x3e\x68\xd7\xb0\x1b\x9c\xa0\xde\x9b\xbd\xfc\x8d\xb8\x6e\x1c\x5a\x6e\x14\xf3\xd8\x65\xbf\x4f\x41\x4c\x3f\x18\xb7\x02\x90\xd6\x82\x16\x8c\x99\xae\x96\x7d\x9c\x39\xc5\xc0\x69\xc2\x0e\x89\xfa\x96\xd2\x58\x95\x4c\x8d\xa3\x45\x81\xe3\x3f\x85\xff\xaf\x89\xbe\x7e\xfa\xba\xec\xc8\x29\x9c\xa6\x23\xd9\xe7\xae\x5c\x44\x96\x6f\xe6\xf7\xd9\x6f\x29\x97\x52\xc7\x1f\xb3\x24\x09\x6d\x79\x4b\xf4\x03\x88\x30\x4a\xef\xf4\xf1\xe7\x3e\x3f\x21\xa4\x09\x2c\x1e\x53\xd8\x2c\xf4\x96\xc8\x0e\xf0\xd3\x69\xd3\xff\xf2\xdd\x5a\x72\x3e\x29\x81\xa2\xf6\x19\x1e\x41\x09\xb3\x2d\xc0\x5f\xb8\x9c\x1a\xa5\x9f\xbc\x6c\x93\x21\x5a\x71\x73\xde\x81\xd4\xca\xd3\xec\x87\xbe\x4e\x2a\xaa\xa1\xbf\x24\x35\x09\x3e\x26\x96\x4f\x19\x67\x29\x17\x26\x22\x5a\x6a\xcc\xba\x11\x87\xc5\x73\xb5\x04\x12\xc6\xff\xe5\x14\xc1\xc8\xf7\xad\xac\x08\x4a\x8c\xed\xef\xb7\xbd\xf9\x1a\x81\x13\xba\xd2\xfe\x2a\x3a\x85\x5c\xa0\xe6\x57\xa2\xaa\x55\x2f\xa0\x8c\x83\x3d\x7d\x98\xfd\xe8\x22\x7a\x83\xdb\xc9\x72\xf3\x99\xeb\x4e\xc3\x18\x14\x65\x3c\x29\xc4\x3c\x7b\x6f\xaa\x79\xca\xee\xb0\x46\xa3\xff\x6f\x8c\xe1\xfe\xf8\x65\x17\x64\xbc\x65\xf8\x70\x8f\x13\xed\x64\x8f\x56\x21\xf6\xa5\x04\x8d\xd3\xb1\x65\x70\x11\x7b\x0a\x0a\x59\x1e\x46\x36\x44\x88\x19\x35\x27\xaf\xb4\xdf\x12\x71\x0a\x5a\xbb\x27\xbf\x79\x5c\xf6\x61\x4a\x86\x99\x5a\x09\xba\x6e\xaa\x9c\x17\x0b\xee\xfd\x71\x0a\x83\x38\xfd\x27\xdd\x7f\x04\x1b\x04\xd1\x64\x19\x0f\x70\x11\x87\x70\x88\x16\xbf\x8c\xb8\x6c\x1c\x94\xf1\x4a\x61\x59\x05\x08\xb1\xe3\xbc\x08\x29\x2f\x6d\x39\x89\x1d\x86\x91\xd8\xc4\x9d\xa1\x19\x25\x8f\xcb\x06\x14\x67\xa7\xdf\x7c\xe2\x7d\x8d\xcd\xae\x50\x9b\x61\x09\x2d\xe2\x10\x2d\xba\x40\x55\x2b\xdd\x35\x9f\xcf\x71\xf9\x35\x76\xa0\xfc\x81\x85\x20\xfd\x85\x7d\xf9\x26\x90\xb5\x63\xef\x19\xbc\xac\xc5\x5a\x02\x12\x39\x1a\xab\x28\x85\xec\x42\x58\x66\x78\xd2\xdc\x36\x10\x3e\x3b\x46\xf3\x97\xdb\xb2\x4c\xb9\xb6\xc7\x1a\x4e\x00\x54\x69\x47\x44\x56\x11\x7c\xb8\x4a\x6e\x17\xe3\xa7\xbe\xea\x3f\xc9\x87\x27\x0e\x42\x99\x9d\x3d\x53\x06\x6d\x8c\x71\x1a\x81\x6f\xe9\x71\x87\x90\xc9\xcf\x70\xfa\x08\xc7\x03\x3d\x5e\x2a\xe7\x8f\x2a\x3c\xb1\x55\xe7\xf6\x30\x03\x0a\xb2\xff\x5d\x3f\x12\x74\x5c\x76\x14\x1e\x49\x97\x25\xb3\x9b\x0c\x1a\xbb\x5e\xf8\x32\xdf\x3c\x10\x00\xdb\xc6\xb8\xce\x0f\x3e\x99\xd8\x15\x6e\x09\x35\xd5\x12\xb8\xd1\x99\xa0\xf0\xcf\xb1\xac\xd1\x71\xfe\x26\xb2\xa5\xf2\xe2\xba\x6c\x42\x2a\x68\x1a\x77\xf1\x4d\xcd\x1d\x86\xa9\xd9\x06\xec\x43\x3d\x78\x65\x35\x50\x12\x40\x0c\xd7\xc3\x7c\x70\xc5\xcd\xb7\x21\xdf\xe8\xdc\x65\x59\x41\x6b\x8b\x94\xdf\x0f\xb4\xa3\xe2\x9a\x62\x05\xe1\xe6\x25\x03\x19\x7d\x06\x15\xac\xf2\x53\xfa\xfe\xb5\x69\x90\xc5\x2e\x57\xe5\x99\x62\xed\x9b\x1b\x16\xea\x62\x7d\xad\x46\xed\xfe\x32\x26\x56\x41\x32\xa2\xb0\x10\x80\x04\xd2\x14\xce\xe8\x5c\xad\x67\x76\xfe\xb3\x57\x78\xdb\x2f\x7a\xfa\xd3\x53\xa5\xe3\x77\xae\x0e\xda\xcd\x9f\xdf\x78\x88\xc2\xf9\x14\xe7\xb6\xe5\x14\x7d\x25\xab\x92\x1c\xf1\xae\x45\xf1\xcb\x26\xd1\x92\x1c\x37\xb2\xd8\xbb\xf1\x4e\xff\x7c\x77\xd2\xd4\xa3\x4d\x03\xac\xe7\xab\x79\x37\xdd\x72\xd9\x87\x4c\xba\x9b\xec\x1f\x1e\xe0\xe8\x33\x80\xfe\x36\x18\x73\xf2\xca\x59\x07\x81\x61\x78\x25\x61\x1e\x73\x20\xac\x9d\x8c\xb9\x5b\xb7\x6c\xa3\xc0\x99\x7e\xcc\xb0\xee\xeb\x98\xb1\xff\xe4\x97\x24\x33\xa4\x18\xa9\x1c\x45\x0d\xac\x9b\x2c\x5c\x6a\xd5\x8d\xb3\xdc\x77\xcc\xc5\xba\x95\x41\xd4\x52\x70\xbd\x4c\x3a\x0e\x8b\x79\x8a\x7b\x8d\x5e\x3e\x8b\x20\xf0\xea\xe0\x4f\x7c\x21\xaa\x0b\xf1\x7c\xc2\x53\x8d\xd2\x90\xe6\x1e\x73\xa4\x94\xff\x9b\x07\x12\xb8\x76\x5d\xd7\xb5\xba\xd0\x04\x23\x44\x87\xd3\x84\xbf\xf1\x37\x1c\xf0\x48\x1b\xcd\xf4\x91\x44\xe5\x02\x84\xe5\x5d\xc6\x6a\x02\xb0\xe1\xd8\xa1\x31\x00\x68\x3d\x93\x96\xf7\xa1\xd3\x25\x66\x39\xa1\x5c\xca\xcd\xc4\xe1\x63\x87\x17\x11\xf1\xcb\x12\xc7\x0c\xda\xce\x46\xd3\x87\x0b\xf4\x78\x6b\x35\xe6\x26\xd9\x4a\x22\x28\xea\x21\xe9\x52\xd6\xfe\xcd\x2f\xb3\x2d\xfc\x95\xc9\xbb\xfe\x83\xb3\xbf\x78\xe3\x28\x8a\xe0\xc4\xdd\xda\x7a\x39\x4f\xfc\x25\x21\x6c\x5a\x4b\x71\x4a\x5d\xca\xfb\x58\x52\xee\xa9\xba\x51\x00\xbe\x8a\x57\x2a\x54\xbd\x42\xe0\x86\xfd\x8e\xef\x2d\x78\x03\xe2\x7d\x65\x27\x8b\x83\x53\xf3\x5b\xbd\xe7\x2d\xd0\x1f\x12\x46\x43\x30\x0f\x21\x08\x09\xe7\x80\x9e\x34\x3a\x31\xd8\xa4\x6d\xf5\xa1\x7b\x79\xb8\x1e\x81\xe4\x82\x13\xcc\x6b\x7f\x78\x69\x27\x24\x65\x30\xbb\xcf\xf7\xcc\x58\xf1\x87\x4b\x3d\xff\xf2\x3b\x47\xd9\xa2\x2d\x84\x49\xd2\xac\x9f\xe7\xcf\x72\x74\x5d\x68\x59\x71\x30\x06\xa7\xb1\xf1\x32\x97\xf5\x23\xdb\x72\xc2\x80\x11\x05\x92\xe4\x6d\xfb\xe6\x82\x9b\xe3\x2d\xa6\xe4\x45\x39\xb6\x12\x2d\x49\xb4\x44\x97\x03\x19\xf1\x4e\xaf\x1d\x98\xa8\x89\xb3\x31\xbc\xb0\xa1\xe2\x95\x16\x85\xd8\x35\x55\xc8\xcc\x03\xec\x38\x69\x64\x84\x33\x6d\xa4\xbb\x1e\xa3\x26\xc5\x2c\x3b\xb4\x65\xd8\xde\xfe\x37\xc7\xbd\x44\xdb\xda\x33\xfd\xdf\x33\xd9\x6f\x5f\x37\x3c\xee\x6e\x64\xdc\x0f\xb3\x7d\xbc\x5d\x35\x38\xb4\x63\x5a\x29\x70\x20\x90\x0f\x28\x7b\x0a\x9c\x8a\xaf\x44\x69\x93\x0c\x08\xd4\xd1\x9a\x40\x17\xe4\xd4\x56\x23\x35\x1d\x22\x78\xb1\xd4\xaf\xaf\xc1\x9e\xaf\x0b\xe9\xf8\xfd\x68\xf9\xee\x14\xfa\x7b\xbb\xe9\xa5\x06\xa7\xd5\x8b\xd8\xf6\xc4\x9a\xe2\x8f\x9e\xff\xe7\xbb\x79\x95\x28\xbf\xdd\x96\xbb\x5b\xb1\xfe\xe0\x22\x9b\x2e\xbf\x97\x0a\x15\x6c\xc0\x19\xe3\xfe\x83\x05\xfb\x37\xf1\xf1\xe2\x4a\x33\x36\x59\xfe\x96\x15\x67\xe8\xe2\x0a\x27\x59\xf9\x17\x40\xb4\x9e\x5d\xaa\xf0\x07\x1b\x22\xe5\x1a\xe9\xa8\x25\xa7\x02\xa2\x49\x17\x49\x47\x28\x93\x19\x8a\x24\x16\x34\xf2\x9e\x11\x0a\x0f\xd9\xeb\x26\x6b\x0d\x22\x6f\xe1\xed\xaa\x77\x04\xb1\x7d\x94\x1e\xc5\xa5\x09\x26\xe4\xd7\x92\x1c\xf1\xdf\x7c\x48\x8f\xd3\xed\x93\xbc\xe3\x5e\xd6\xfd\x9f\x58\xee\xf1\x86\x65\x48\x5d\xe0\xe5\x65\x63\x2f\xb4\x9d\x92\xbf\xcc\xa8\xa4\x81\x26\xa6\x00\x9a\xd6\x53\x5e\x38\x46\xa1\xab\xb8\xa2\x95\xb9\xe7\x0c\xb5\x37\x75\x45\xcd\x2c\x5c\x83\x48\x3f\x4d\x25\x7c\xff\x31\xb3\x54\x53\xbd\xda\x64\x57\xe9\xfa\xa6\x51\x1f\x9a\xdb\xcf\xf0\xf7\x0e\x42\x10\x96\xbc\x91\xd2\x42\x05\x48\xaf\x6b\x45\xe0\x2e\x19\x0e\xbb\x8f\x31\xa7\xdc\x7f\x48\x3c\xeb\x61\x08\xfe\x2a\x77\xcd\x30\x2a\x27\xbe\xfb\xa1\x05\x16\x51\xf3\x18\x2c\x61\x54\xa7\x7f\x85\x1b\x2f\x30\x33\x17\xa2\x90\xfe\xe1\x84\xc4\x80\x9c\x4e\xbc\x28\x66\x97\xc0\xae\x57\x20\x59\xba\x9d\xa4\x4f\x2d\xd6\x62\xab\x9e\x69\x42\x57\x7e\x89\xd4\x3d\x54\x47\xc1\x07\xb8\xfd\xc3\xc1\x5f\x26\x84\x55\x19\xe5\xcb\xe3\x77\xa3\x94\xdc\xb5\x32\xdd\x1f\x83\x21\xbd\xb3\xe9\xed\x22\x54\xf1\x4f\x91\xf7\xd6\x16\xa6\x99\x34\xea\x3f\x90\xc3\xaa\xae\x20\x06\x7f\xf4\x8c\x27\x60\x33\xac\xfc\xf9\x2f\xcb\x98\x17\xce\x2c\x59\xf7\x05\x16\x86\xb4\xfd\x3a\xbc\x53\xd5\xa7\x55\xba\x1d\x62\x7f\x40\x6e\x28\x3a\xad\x74\x5e\x07\x0e\xc3\x7d\x89\x1c\xbf\x90\xbc\x5c\xd4\xa5\xe1\xce\x48\xe7\x45\x0b\xc5\x95\x57\x85\x71\x7b\x0a\xdd\x17\x50\x05\x36\x39\x07\x62\x1a\xc2\x3e\x68\xb4\x83\xa2\x2e\x20\x7a\xaf\x94\x55\x75\x9f\xb4\x88\x49\x8b\x9e\x5b\x60\x5b\x38\xd6\x6e\xf9\xb3\xde\x3d\x32\x25\xc5\x21\x1c\x37\xa4\x40\x81\xf9\x63\x3b\xdc\x12\x78\xeb\x53\x30\x18\xc3\xe8\x6a\x10\x44\x12\x6b\x6f\x16\x11\x18\xbf\xa7\x0a\xf8\xe5\x93\x4b\x25\x9e\xe1\x2c\x1b\xda\xc8\x74\x27\x9b\x10\xba\x85\x02\xcf\x1f\x2a\x23\xd7\x52\xd9\x78\x65\x40\x87\x5e\xc8\xe8\x2b\x79\x3b\x14\x35\x0a\xa1\x93\xcb\x5f\x52\x70\xc8\x05\x5a\xa0\x39\xaa\x6a\xf5\x02\xf2\x24\x18\xc0\xa7\x09\x8d\x9e\xb9\xee\x07\x1b\x8b\x67\xdf\x7b\xf9\x12\x30\xe7\xf0\x04\x18\x85\x0b\x6d\xf7\xd6\x97\x4e\x21\x77\xbc\xde\x4c\x0d\xfd\xeb\xe7\xb2\xfe\x1a\x13\x32\x48\x7f\x7b\x15\xf3\xd2\xe4\x53\x91\x13\x32\xab\xc8\xc4\x16\x68\x3c\xe5\x61\x5e\x7a\xd2\xc9\xe8\x1b\x9c\xc8\x81\xee\xd4\x34\xd2\x84\xa7\xa4\x7f\x7d\xdd\x24\xa1\x56\x21\x9e\x28\x7b\x54\x4b\x3e\xda\x3d\x9c\x95\x3a\x3a\xae\xcc\xea\x36\x81\xa4\x63\x35\x0f\xbf\xbe\x28\x27\xd8\xda\x86\x7c\x98\x15\xac\x24\x1c\x2f\x7d\x07\x47\x63\x7d\x0b\x94\x1e\x1c\xba\x61\x4e\x5f\x4a\x5b\xee\x7a\x8e\x45\x64\x45\xdb\xff\xd1\xa4\x9e\x41\x28\x82\x67\xf6\x46\x33\xb5\x7f\x82\x00\xe2\xf5\xe8\x83\x13\x0b\xe9\xb5\xb2\xa7\x89\x8e\xa5\xa2\x16\x69\x4c\x7e\xe0\xba\x83\x51\xe5\xc7\x8f\x46\xfb\xe2\xba\x68\x26\xd2\x98\xe4\x18\xb4\x25\xd4\x26\xd8\x4b\x1b\x94\x76\xd7\x21\xc8\x3a\xb5\x6e\xdb\x3d\x98\xfa\x78\x41\x38\xc5\xf4\x48\xa9\x6b\xad\xf9\xed\xb4\x6c\x13\xfb\x32\xbd\x07\x67\x1d\x02\x4b\x45\xd3\xa9\x55\x2a\x0b\x49\x82\x75\x13\xf9\x63\x97\x0c\x16\xaa\x91\xe9\x2c\x6d\x99\x1f\xbc\xa2\xd4\x85\x35\xb5\xa6\x71\x59\x33\x90\x27\xaa\x0f\xa0\xaa\x4b\x3e\x30\xfa\x60\x67\x93\x03\x69\x3a\x8a\xf2\xdd\x29\x9d\x37\xf2\xfd\xa5\x07\xc1\xc2\x3a\xa2\xe6\xcf\xd9\xeb\xba\xff\xd8\xf2\xb4\xc1\x27\xa8\xc5\xdf\x68\x1d\xc9\x3e\x87\xcc\xcc\x2d\x1f\x6e\xaa\x70\x11\x63\xa3\xe9\xf6\xe6\xaf\x44\xf7\x59\xb4\x56\x5a\xc5\xb5\x53\xf7\xb0\xb5\xe0\x4b\x94\x65\x54\x5a\xa0\x2d\x35\x43\xe2\x74\xa2\xe0\x95\x2c\x49\xa4\xc9\xed\xe6\x9c\x96\x38\x0f\xa5\x26\xe2\x02\x6c\x3c\x9e\xf3\xa2\x69\x5b\x50\x7c\x35\x15\x9e\xcc\x1a\x74\x2d\x4f\x13\x65\x08\xf5\xe5\x33\x80\x6b\x25\x28\xbb\xa1\xec\xb4\x00\xeb\x86\x96\x97\x22\x8b\x7a\xd5\x9e\x8d\x1a\x79\x19\x90\x5f\x18\x8a\x05\x7f\x57\x0c\x2f\xee\x5e\xa2\xfe\xef\x8f\x35\x25\x9d\xe4\xe8\x29\x7d\x8e\x68\xf4\xfc\x0c\x87\xc5\x58\x11\x7e\x69\xee\x2f\x56\xea\x5a\xe4\xaf\xf9\xa6\x79\x3d\x10\x84\x51\xf1\xf3\xe8\x7c\x0d\x99\x40\xe4\xdb\x2f\xd7\x21\xc5\x4b\x15\x64\x25\x43\x5b\x85\x5e\x0b\x83\x88\x72\x3d\x05\x6c\xdb\x01\xfe\x08\x57\xd3\x5b\x07\x03\xd9\x48\x15\x8f\x35\x3a\x3e\xab\xa7\x14\xac\xa6\xd7\xce\x66\xce\xf9\xa5\x93\x81\xdf\x95\x84\xa6\xa4\x24\xcf\x08\xbd\xca\x4e\x78\x73\x68\xcc\x5c\x8a\xe2\x47\xec\x67\xe3\xcb\x95\x86\x0c\xe5\x1e\x23\xf6\x72\xfb\x39\x6c\xbf\x4a\xcf\x5c\x8a\x13\x4d\x7f\x24\xf6\x80\xc5\x11\xff\xcc\xa8\xf5\x65\x16\x2e\x9f\x94\x19\xff\xa0\xa7\xf3\xe8\x7f\xdb\x2d\x97\x39\x23\xff\xb5\xa1\xa8\x3c\xa9\x3d\xb3\xbe\xad\x58\x93\x60\x9d\x8c\xef\x74\xb1\x46\x20\xa6\xd0\xdb\xc8\xa3\xf6\xf7\x89\xf4\xcc\x54\x6e\xfd\x4b\xa2\xa3\xf6\x0d\x5e\xfd\x40\x0b\x7b\x2a\xbe\x1c\x7f\x7e\x14\x38\xd1\xbf\xa9\xfa\x60\x78\x47\x54\x74\x12\x16\xf7\x99\xd6\x1f\x94\xee\x04\x5c\x2c\xaf\x71\xc1\x08\xa9\xa3\xbd\x24\xff\x94\x1f\x65\x09\x3c\x47\x16\x2d\x80\x81\xe9\xef\x47\x9f\x74\xf6\x14\x9c\xe1\x7a\x64\x01\xc0\xef\x19\xbe\x0a\x73\xb3\xac\xa0\x80\x34\x30\x49\xf4\x9d\x30\x7f\x39\x69\xb1\xe8\x6a\x2a\xd9\x72\x67\xac\x25\x4c\x1c\xde\xea\xac\xef\x75\x71\xcf\x17\x6f\xe7\x79\x19\x52\xfd\x91\xb8\x0a\x61\x49\xd3\x49\x69\xa7\x8a\xe0\x41\x46\x70\xf9\xed\x44\x72\x60\xc1\x83\x4a\xe7\x86\x1e\xf5\x1f\x02\xec\xbc\x2b\x40\x0a\x3d\xf2\x2d\x5a\x1b\x72\xb5\x36\x89\x63\xa0\xac\xf9\x92\x5d\x2f\x46\x81\xb2\xcb\xd0\xbd\x0a\xc4\x12\xf3\xd8\x0b\x44\xd0\xa9\x2c\x4f\x04\xb6\x12\xbb\x3e\xbb\x6d\x9b\xf3\x2d\xb5\x49\xca\x82\xd5\x2c\x23\x08\xae\xe5\xef\xe4\x15\x83\x2f\xed\xae\x26\xab\xfd\x95\xa4\x67\x38\xa4\x41\xc6\xdb\xcb\x5e\x6a\x41\xd6\x39\x3f\x35\x4f\x94\x3a\xe4\xd6\x21\xec\xae\xc4\xc4\x81\xfe\xf8\xe9\xfb\x87\xdc\x2f\x1d\x26\xea\x0f\xf9\xcb\x8f\x58\x6d\xd0\xf7\x13\x2d\x9f\x62\x9d\xa3\x4c\xd8\xf1\x3f\x6b\x0b\x8c\x56\xe7\xea\x2b\x21\xa5\x26\xb2\x62\xb5\x2f\x33\x27\xd4\x03\x26\xfa\xa3\x85\x14\x45\x5e\xb3\xf1\xac\x4a\xfb\x8b\x41\x8f\x0e\x6b\x4a\x3a\x22\x1d\xcb\x1d\xc7\x52\x2c\x19\xcc\x4f\xb3\x02\xa8\xaa\xe4\xea\x5a\x13\x98\x9a\x61\xf6\x86\x55\x05\xd5\x98\xfc\x96\x57\x79\x0e\x90\x57\xda\x5e\xfe\x68\x9a\x4e\x06\x52\x07\xf1\xbe\xa8\xce\xd5\xa6\x18\x76\xef\x08\x89\x78\x5e\x5b\x9d\x4d\x3f\xdc\x04\x9b\x5c\xef\x4a\x38\xe7\x3f\x50\x66\x50\xe8\xe8\x7a\xfa\xba\xc0\x11\x6e\xe2\x35\x74\x96\x98\x49\x1a\xc4\x6f\xf4\x2e\xb8\x47\xed\xbd\xf7\xd7\x6e\x07\x5a\x5a\xe1\xf2\xf2\xe7\xc0\x6d\xce\x57\xdc\x94\xee\x36\x7a\xc8\xa6\x49\xfa\x18\x14\x17\x55\x12\x71\x06\xe5\x1d\x95\x79\x76\x77\x58\x45\xd3\xa0\xc3\xf6\xcd\x82\x26\x00\xb2\xfa\xfd\xf0\x58\xc1\xe7\x5a\x60\x18\x73\x92\x7e\x7c\x12\x8a\x4f\x38\x52\xcc\x0e\x49\x09\x62\x5d\xe2\x6d\x14\x1c\x90\xfa\xfc\xcf\x71\x1d\x73\xcf\x87\x23\xa5\xc7\x1c\x92\x2c\xb7\xc2\xa1\x81\x9f\x43\x8e\x5e\x11\x31\xa9\x68\xb2\x44\x4f\x12\x2e\xa7\xe3\x5a\xb6\x9f\xb3\xe3\x7e\xa7\xf3\xc1\xfe\x3e\x49\x24\xb6\x75\xd0\xd6\x0f\xd0\xa5\xc6\xd8\x69\xc6\x92\x32\x75\x53\x22\x32\xc2\xbc\xbe\x56\x3b\x86\x6c\x1f\x20\x69\x95\x8a\x23\xa1\xfe\xc2\x2a\xed\x18\xac\xd3\x09\x14\x23\xad\xc1\xd9\x51\x59\x5c\xc3\x12\x85\x31\xa5\x5b\x8a\x52\x18\xcb\x30\x3c\x21\xc9\x15\x10\xc9\xa3\xa4\x1f\x04\x14\xb5\x30\x72\xde\x30\x8f\xf2\xf9\x1e\x7f\x15\x4f\xb2\xc0\x39\x68\xc0\x7b\x40\xc1\x70\xc4\x10\x10\x23\xc0\xef\x8b\xed\x17\xa8\xbe\x13\x5f\x7e\xe5\x88\xae\x85\x7e\xc6\x15\xa1\x39\x79\x4a\x0c\x4d\xa6\xaf\xf7\xad\x0d\xd1\xfa\xe9\x2f\xec\x4f\x6f\x3e\x99\x5f\xfe\xf1\x5a\x6f\xc6\x57\x9e\x38\x8c\x22\x88\xd1\x06\xbf\xaa\x62\xf8\x7b\x7b\x9b\x55\x28\x86\xe1\x90\x66\x91\xd2\x38\xcc\x15\xbb\x04\x98\x40\x28\x1a\xfb\x9d\xef\xe7\x6a\x7d\xf0\x0e\x24\xf3\x2b\x6e\x4c\xfe\x02\x81\x64\x15\x13\x15\xfa\xe4\x06\x2b\xd9\x49\x22\x1d\x1b\x41\xcd\xa1\xf6\xb0\x09\xc3\x70\x8e\xa5\x0e\x86\x39\x92\x79\x19\x5f\x08\xb9\x9f\xbd\x08\x45\x5e\x2a\x11\x56\x9c\x81\x71\x82\x7e\xe4\x8c\x4c\xd9\x8f\xcf\xe0\x43\x7d\xad\x6b\x30\x9a\x01\xda\x9b\x98\xfd\xa7\x4e\x05\x88\x5f\xe0\x35\x10\x84\x9e\x32\x11\x3b\x6e\x52\x3c\x15\x2c\xa4\x4c\xb0\x44\x55\xc9\x3d\xe8\xf5\xf0\x78\x8b\x48\xa2\x56\x93\x0a\xa0\x1a\xcd\x2e\xf8\xbe\x22\xf2\x22\x25\x4d\x5b\x55\xa0\xd2\xae\xe2\x64\x5d\xf1\x29\xab\xe7\xda\x3f\x7e\x2f\x09\x96\xea\xee\x71\xa7\x7e\x28\xf8\x70\x96\xc2\xe4\x66\x5a\xc1\x0c\xf1\xb7\xef\xcb\xa3\x8f\x05\x46\x95\xd6\x3f\x79\x74\x76\x99\x7f\xed\xbc\xeb\x19\xa2\x4f\x1b\xfe\x0b\xd7\x2e\x2b\x56\x77\xc0\xcb\x25\x83\x45\x31\xf1\xb3\xd4\xb6\x90\x32\x10\x5d\x5f\x19\x0e\x6d\xc5\x45\x61\xa7\xcf\x5d\x85\xc0\x90\x36\xf1\xfd\x27\xeb\x3a\x5e\xa9\xac\x38\x1b\x7a\x3d\xb0\x15\x89\x2d\xe7\x2e\x39\xaf\x34\x8c\x91\x9b\x7e\x3e\x15\x1c\x58\x3c\x60\xa7\x86\x98\xf6\x84\x90\xe6\xc8\x16\x30\x3a\x0d\x5d\xc8\xcd\x03\xdb\x94\xaf\x6c\xad\x1b\xc3\x79\xc2\x07\x14\x5b\x36\x2e\x9c\x6c\x69\x0f\x7e\x5a\x76\xf0\x83\x8a\x7f\x52\x62\xda\xe8\xab\xa4\x62\xa8\xe3\x7d\x73\x87\xf7\x3e\x9d\x4a\xe5\x32\x1c\x45\xd2\x11\xae\x57\x98\x6d\xa1\x40\xf7\x48\x13\x58\x0d\x74\x1e\xc4\xaf\x6e\xf9\xe0\x84\x5f\x9e\xb6\x96\xfd\x27\xcd\x1a\xcb\x68\xb4\x89\x8c\x19\x79\xa2\x5f\xeb\xb7\x93\x42\x5c\x62\xbf\x94\xbf\xfe\xe9\x87\xcc\x78\xc6\x00\xed\x53\xfd\x98\x25\x47\xc6\x72\x85\x97\x33\xe4\x98\x9d\x86\x6f\xe0\x02\x2d\x2f\x20\xae\x4d\xec\xdf\x6b\x20\x56\x90\x66\xb8\xdc\xbe\x67\x7e\x2c\x1a\x2e\x9b\xa0\x5d\xf5\xf6\x9e\xbf\x23\x7d\x89\xd3\x76\x89\x2b\x50\x4e\xf5\x6e\x39\x55\x18\xc9\x87\x4d\x60\x84\x22\x76\x88\x85\xb4\xbc\x8a\xe1\xad\x16\x01\x49\x4f\xc9\x2b\x2a\x03\x8c\x64\x93\x4f\xf5\x02\xa9\x15\x35\x2b\xc7\xbd\x69\xb9\xc5\x6f\x7a\xb3\xe5\xb0\x89\xb0\x60\x02\x33\x8e\x35\x55\xb9\x47\xe6\x6f\x5d\x1a\xc7\xf1\xba\x4a\x31\x4a\x76\x26\x97\xd8\xb2\xff\xa6\x09\x95\x3e\x33\x34\xde\x4a\x3b\x6b\x24\x41\x74\xa5\xaf\x95\x67\xc6\x12\xc3\x8f\xc8\xd3\x60\x7b\x73\x08\xd4\x7d\xcf\xf0\xf1\x2c\x2a\x7b\x37\x63\xc0\x9e\xc1\x87\x80\x0a\x36\x7f\x53\x59\x35\xe0\x40\x5d\x67\x16\x19\xd8\xcc\xbd\xd1\x1f\x6e\xc7\x32\x13\x6f\x31\xde\x1a\xac\x7a\xbb\x18\x04\xd6\x9a\x72\x07\x6b\x0b\x1d\xb6\x1a\x23\x28\x7f\x74\x80\xd0\x7b\x1b\xe4\x31\x0a\xe5\x1f\x00\x28\x77\xc3\x00\x53\xed\xff\xf5\xc8\xce\x73\x29\x78\xcb\x31\x89\xdf\x54\x85\x46\xe2\x4b\x83\xc7\x6c\x56\xd7\x5d\xe9\x30\xeb\x82\x44\xdb\xbc\x8f\xdc\xe1\xce\xa8\xb2\xb4\x2d\xc9\x4b\xc6\x40\x19\x41\xeb\x57\x74\xae\xe8\xfc\xa1\xcb\xeb\x0e\xed\x11\xd6\x93\xc5\x78\x93\x1c\xe5\x4b\x32\xb4\xd0\xaa\x9b\x5c\x8f\x23\x34\x38\xd3\xc2\x5d\x1e\x8e\xc2\x73\x2a\xc3\xda\x7a\xc7\x30\x43\x63\x2c\x47\xa8\x2d\x4e\x20\x1a\x92\xf7\x04\xfb\xff\x0d\x8c\x53\x20\x6b\x84\x70\x8c\x2a\x7a\x98\x24\x7d\xd3\x82\x20\x7c\xac\xe0\xf6\xaa\x02\xaa\x6b\x5e\xd5\x1c\xd1\x78\xd4\xe6\xf6\x1b\xaf\x31\x16\x46\xfa\x39\xaf\x8f\x66\x78\x91\x0b\x62\x6d\x08\xbc\x29\xc3\xb7\xd1\xef\x74\x59\x08\x5c\x7f\xfa\xe4\x30\xd5\xc7\x05\xce\x66\x69\x20\x90\xc3\x59\x2c\x11\x92\x29\x03\x26\x25\x4f\x60\x3c\x95\xff\x1e\x1c\xb1\x15\xc3\xbd\x30\x2c\x31\xe5\x10\xbb\xfe\xcf\x46\x04\xcf\x94\x06\x2d\x76\x9d\xa1\x14\x9d\x41\xcc\x43\xad\x93\xa0\x65\x48\x3c\x7f\x14\xd3\xcf\xc9\x22\x35\x33\xaf\xb6\xf4\xc4\x33\xc7\xb6\x55\x7a\x34\x97\x51\xe8\x69\xa4\xfc\x5c\x0f\xaf\x54\x58\xbb\xa4\x12\x93\xe9\x37\x5a\xb4\x3e\x43\xcb\xef\x7b\x30\xd2\x25\x42\x36\x65\x4b\xe6\xf7\x2a\x14\xaf\x1a\xeb\x30\xc0\x88\xa4\x18\xde\xb5\x10\xc5\xf4\x24\x73\xd8\x32\xff\xdc\xdf\xd0\xea\x8c\x14\xb1\x56\x5d\x9a\x03\xfd\x71\xbf\x79\xd7\xb8\xc3\x6c\xc2\x96\x11\xd5\xbe\x4b\x37\x14\x77\xd0\x07\x15\x26\x9f\x0e\xa1\x42\xf6\xa1\x33\x04\x36\x38\x6b\x03\xaf\x76\x8e\x00\xe9\xa1\xf1\xd4\x5a\x67\x0e\x39\xa5\x82\x8b\x23\x9c\x0f\x05\xca\x82\x24\xaf\x99\xb9\x15\xfe\x70\xb6\x39\x27\x00\x0a\x1a\x9d\x57\x71\xc8\x27\x82\xb3\x47\xfc\xb7\xc5\x4f\x2d\x62\xeb\x10\x6d\x5c\x29\x0c\xd5\x26\x0c\x6e\x28\xe2\xe5\x6f\x24\x2c\x39\x3f\x5e\x03\x3f\xac\x05\x0e\x56\x80\x48\x05\x2b\x21\xb7\x9c\xa2\x53\x03\x18\x9d\xc4\xd3\x87\x97\x41\x3d\x82\x04\xb3\xf4\x2d\x27\x3f\x45\x8c\xdb\xd1\x67\x80\x18\x4d\x81\x61\xfa\x56\xd0\x1a\xc0\x98\x8a\xe2\x35\x2c\x2a\x3b\xb7\x95\x3e\xe1\xec\xa9\xda\x7f\x73\xe4\x74\xfb\xe7\xc7\xe1\x1f\x0e\x1d\x37\x3a\x04\x4b\x1a\x8b\xde\x0c\x03\xfd\xed\x9b\x13\x93\x6e\x51\x41\x7d\x82\x54\xc3\xd8\xd8\xbf\x62\x24\x25\x54\xff\x16\x77\x5e\xc2\x78\xb9\x8d\x9e\x7e\x06\x40\x6a\xb3\x52\x01\xd5\x3c\xe1\xa0\x7f\xb0\x5f\x3a\xcf\x2a\xcb\xce\xb0\xb2\x5c\xf7\x6b\x43\x4c\x38\xa7\x7f\x6a\xfd\xef\xa5\x1d\x7d\x15\xd9\x82\x6c\x9d\x8e\x3f\x08\xf1\xfe\xaa\x29\x5e\xc4\xee\x40\x24\x66\x9f\x5d\x6d\x29\xdc\xd5\x54\x0c\xaa\xd6\xb4\xf9\xb0\xae\x8a\xe3\x85\xce\xfb\xc3\x2d\x8a\x29\xd7\x5e\x22\x4c\x84\xab\xec\x4f\xa5\x4d\x49\x8c\x4f\x6e\xf7\x1e\x6a\x03\x76\x6a\xce\x09\x74\x1a\xa0\xf6\x31\xdd\x91\xa4\xe1\x27\x39\xbb\x46\xf8\xe7\x41\xbb\xfe\x7f\x76\x74\xb9\xad\x34\xfa\x38\xad\xf5\xa1\x8a\x87\xe5\x11\x73\x04\x48\x18\xfa\xd2\xbf\x99\xb1\x20\x53\x86\xe5\x0d\x9f\xa7\xf1\x9e\xfc\xcc\x94\x7d\x86\x6d\xa7\xc7\xc0\x5d\xa0\x9f\xe2\x62\x81\xeb\x06\x18\x45\xc0\x51\x0b\xaf\xc7\x75\xbe\x5e\x0b\x95\xa6\xcc\x49\x45\x8d\xb1\xd6\x82\x43\x58\xe2\x7e\x12\x67\xf1\xfb\x0f\x26\x04\x86\xe3\x5d\x94\x5e\xab\x12\x90\x71\x10\x9c\x58\x1a\x94\x42\x8d\xc5\xf2\x07\x45\xa0\x82\x9d\xfe\xdd\x6f\xde\xf5\x85\x5f\x07\x44\xb6\xec\x49\xa2\x4d\x4d\xea\xb7\x2d\xcf\x37\x7a\xc6\x75\x80\x79\xd9\x52\x61\x1e\x5a\x9d\xb0\x61\xec\xae\xa8\xb6\xca\x33\x3f\x3a\xaa\x00\x3d\x75\xfb\xef\x1c\xd4\xe1\xf3\x9d\x59\xf7\xd0\x88\xf8\x79\xee\xf8\xba\x6a\xd7\x1d\x20\xc9\x05\xf1\x87\xfb\x5c\xea\x61\x90\x1f\xd3\x1b\x52\x07\x98\xcc\x08\x06\x6b\x0f\x4f\x6d\x78\xe0\xe7\x75\xab\x8b\x56\x05\x5d\xe2\x24\x48\x6e\x54\xdb\xee\x7d\x84\xa5\x06\xd0\xe1\xd9\x31\x7f\x53\x99\xff\xeb\xf6\x9a\x66\xca\xe2\x58\x0e\xf8\xe9\xdf\x0d\x5c\x47\xf1\x4f\xc3\x95\xfe\xd3\xaf\x35\x8a\x39\xf2\xa2\x60\x2a\xeb\xb2\x6c\x3f\xc4\xce\x25\x51\x52\xb1\xc5\x4c\xc7\xb3\x37\x36\x86\xfa\x10\x87\x47\xea\x19\x08\x92\xae\x30\x45\x81\x3b\x6a\xd1\xc9\x9a\x8c\x43\x6b\xe7\xc3\x3b\xd1\xd3\x57\xe7\x2f\x26\x68\xc1\x65\x22\x96\xf9\xeb\x7b\xc2\xf0\x78\xf9\xe4\x5f\xe8\x31\xc2\xe9\x6a\x4f\x60\x67\xf6\x70\x51\x54\x55\x3c\x1b\x91\x1d\x40\xeb\x19\x69\x52\x70\xe8\x1e\x52\xfd\x0e\x44\xfb\xbf\xd9\x99\x32\x6d\x9c\xa3\xaa\xad\x62\x3a\xe3\xc5\x71\xe5\xda\x9d\xea\xd5\x15\xa6\x41\xbd\xa6\xed\x9a\x2e\x3b\x39\x5d\xd6\x24\x89\xb1\x46\xc6\xc4\x23\xe5\x9d\x54\x3e\xa3\x72\xea\x5c\x5d\x64\x53\x8e\x3a\xb4\xea\xab\x02\x52\x99\x17\xd7\x84\x9d\x2f\xe9\xed\xfd\x07\x77\xf6\xc7\x67\x38\xfe\x16\xd5\xa8\xd6\x7c\x8f\x00\xe3\xea\x2c\xd7\x95\x84\x1f\x38\x2c\xdd\x31\x84\xcb\x18\x05\x28\x9b\x65\x74\x36\x11\x19\xa7\x31\xfd\x13\x01\x2c\xdb\x1e\xd2\x8f\x58\xdc\x24\x7a\xf7\x55\xbb\x36\xb7\x03\x68\x82\x56\x4b\x50\x98\x04\x7b\x78\x9f\xd2\xb8\x4e\xcf\xfb\x38\xfd\xdf\xeb\xad\xec\x25\x95\x38\xee\x9a\x03\x23\x92\xe3\xfd\x7d\x79\xc0\x65\x7e\xe7\xd2\x68\x3c\xc9\xee\xf9\x33\x2d\x51\x08\x42\xb1\x6b\x7f\x11\x84\xa1\x26\x59\xf6\x20\xbf\x83\xaa\x42\xa6\xa9\xad\x41\x25\xa4\xe4\x30\x7a\xda\x5f\x94\x8a\x11\xaf\xfc\xdc\x50\xb0\x5c\x02\x6f\x38\xa9\x60\xf3\xf3\x45\x34\xa0\xc8\xe9\x3d\x8a\x87\x22\x0e\x17\x5c\xc5\x65\x9e\xc9\xf8\x3f\x9c\x3a\x67\x15\xc6\xb9\xad\x3f\x9a\x21\x34\xd5\xd9\x54\x17\xb6\xc4\xa1\xc9\x8f\xc3\xf0\x48\xba\xbf\xfc\x1f\xdf\x91\xaa\x02\x86\x61\xc0\xa0\xc4\xaa\xf4\x24\x84\x01\xf5\xc8\x11\xba\x04\x7f\x01\x5c\x04\xf8\x14\x90\xc6\xdf\x4b\x2d\xb6\xbe\x45\x84\x79\xed\x0a\x21\xd3\x47\xa7\x75\xce\x94\xbb\xc0\x9b\xd5\x3c\xaa\x61\x08\xc8\x2c\x99\xb4\xd1\x09\x55\x4b\x7f\x99\xae\xd1\x6e\x1b\x5b\xd0\x1e\x9f\xd1\x16\xba\xdd\xc5\x25\xd4\xa2\xb0\x9a\xb0\x55\x7f\x17\x9c\x55\x3d\x55\x83\xa2\x16\x32\xbb\x00\x71\x47\x64\x85\xa5\x21\x37\xd4\x5b\xe4\x0b\x6d\xe3\x5c\x78\x6c\xec\xe0\x5e\x41\xc9\x55\x6b\xe3\xb1\x91\xc1\xd6\xf1\x4a\xe7\xea\xe9\xc3\x7b\x66\x9f\x0d\xe7\xfe\xca\x52\x13\xee\x5a\x9f\xd0\x9d\xb2\x0b\xd9\x85\x54\x49\xb0\x53\x5f\x4f\x9b\x02\xc8\xd4\x05\x1a\x9f\x9a\xf1\xec\x45\x61\x18\x96\x17\xf6\x35\x08\xb1\x09\x33\x2a\xa0\x5c\xc9\x87\x11\xf6\x98\x77\xef\xb1\xac\x80\x14\x4e\xc6\x27\x10\xe6\x48\x57\xfb\xeb\x3a\xaf\x07\xec\xfe\x99\x07\x6b\xba\x1a\xeb\xc5\x31\x2a\x88\xd8\xc2\x89\x7f\xe2\x23\x42\x6f\xd9\x4a\x2c\xb6\x7a\xb9\x29\x2c\x6d\xe1\xa2\xf4\x7e\x3e\xd9\xbb\x51\xbd\x87\xbb\x42\xa7\xfe\x9e\xcb\x9a\xa1\x28\x10\xdf\xc9\x86\xbd\x3d\x0c\x5c\x3f\xe4\x98\xf9\xdb\x63\xd5\x7e\x4a\x76\xb9\xa7\xee\xe6\xe2\xc7\xfc\x04\x67\x20\x82\x7a\x2a\x87\xf0\xe5\xe5\x97\x4c\x8b\xae\xdf\x6a\x5c\xc2\xa3\xe6\x3e\xdd\xd6\xf0\xb7\xe7\xa5\xeb\xc0\xa4\xa5\xbf\xe2\x66\xe2\x5f\x13\xc5\x72\x87\x12\x70\x2a\x75\xcf\xa3\x12\xaf\x73\xa8\xfd\x61\x9c\xa3\xa6\x46\xeb\xc7\x86\x8c\x81\xa9\x2a\xfe\x47\x46\xd5\xd2\xdf\x19\xa4\x6e\xfa\xd4\x78\x91\x97\xeb\x60\x6d\x6f\xae\x24\x27\x11\x86\xd8\x4a\x3a\x32\x09\xbf\x43\x40\xdb\x3e\x94\x18\xff\xbc\x7f\x3a\xff\x68\x5d\xa0\xc2\x6c\x53\x03\xe1\xb7\x20\x03\xf1\xa9\x8d\x1b\x52\x04\xad\x6c\xbf\xbe\x64\x7c\xde\xd0\x82\xc1\x75\x33\x5d\xf2\xa1\x9f\x01\x47\xfd\x32\x2f\xe5\x82\x5b\x9d\x8e\xec\xad\x96\x78\xc4\xbe\xe0\x32\x6f\x7f\x23\x61\x39\x98\xe3\xb2\xef\x9d\xc6\xf2\x15\x26\x11\xb0\x0d\xdc\x2a\xa0\x4e\x00\x1b\xc1\x98\x96\x31\x86\x3a\x44\xd2\xb7\x16\x88\xe6\xba\xca\x0f\x23\xad\xd5\x1b\x0e\xbf\x25\x64\x92\x1c\x46\x61\x95\x44\x81\x2c\xd7\x72\xdb\xdf\x3b\x40\xc3\x93\xe8\xf2\x24\xcf\x6d\x11\x50\x8b\xa7\xd8\xca\x06\x01\x02\x16\xe7\xa4\xde\x55\xb6\xd4\x61\xe0\x2a\x76\x19\xa3\x9b\xfa\x9f\x0b\x31\xa0\x35\x55\x6a\x55\x2f\x3f\x38\x84\x20\xb9\x72\x54\x7d\xf1\x10\x7e\x10\x74\xe9\xdc\x83\x6d\x43\x6e\x52\x3f\x52\x09\x44\x7f\x4e\xd5\xbb\x8a\xf3\xec\x94\xee\xa9\x02\xec\x1e\xdc\x41\x15\x2c\x26\x89\xa4\x13\xa5\x70\x1b\x64\xae\xf3\xe4\x17\x66\x2b\x2d\xf4\x64\x77\x9b\x02\xe3\xb3\x82\x49\xb4\x90\x69\xed\x10\xbf\x39\xcc\xe3\x37\xe3\xa6\x3b\x3a\x18\x37\x53\xa6\x80\x47\x78\x9f\xda\xdb\x2f\xe6\x72\x18\x49\x7f\x87\x70\xf2\xea\x57\x17\x77\xf7\xd9\x67\x5d\xd5\xca\x9b\xa6\x73\xf5\x22\x74\x6f\x27\x86\x8c\x92\x28\x40\x2c\x30\x8a\x9a\x9c\xc2\xcf\xc2\xbc\x51\x74\xa1\xc4\x53\x05\xf4\xb8\xf4\xa1\x85\x3c\xf3\x0b\x0d\x37\xd2\x07\x84\xff\xe7\xf2\xd6\xe2\xf6\x94\xd7\xd3\x4e\x39\xc7\x1d\xd0\x86\x69\x0a\x08\x32\xbb\x53\x91\x22\x28\x42\xbc\x5a\x7c\xe3\xa0\xa8\x87\xf1\x28\xe8\x5f\x02\xb6\xb7\x7b\x3b\x64\xda\x7c\x2a\xe8\xb2\x84\xae\x3a\xed\xba\x02\x83\x5b\x02\xe0\xfc\x01\x02\xfa\x7b\x9c\xdb\x46\x5c\xe5\x37\xdf\x39\xe4\xf7\xb9\xe5\x0d\xb8\xe5\xca\x62\x1f\x25\x25\x82\x38\xb8\x5c\x0b\x5a\xe5\x04\x8d\x8d\xd7\xdc\x48\x05\x54\xdc\x9b\xec\x04\xa7\x59\x32\x78\xb3\xcb\x3e\x23\x3a\xe0\x7f\xb0\xfc\x8c\xbf\x1c\x28\xcb\x47\xe7\xae\xc8\xdc\xa1\x11\xc2\x7d\x55\xf8\x44\x9b\x51\x92\xbf\xb1\xe5\x27\x57\x14\x60\xcb\xda\xdb\xcf\xae\xf9\x35\x20\x78\x7d\xf3\x08\x85\xa2\x9b\x1a\xa3\xff\xa8\x0f\x80\x47\xe1\x90\x97\xb6\x9c\xdb\xcc\xdf\x1a\x12\x83\x4d\x89\x38\x22\xb2\xc2\x1a\x83\x49\xb8\xa0\x90\x57\xf0\x3f\xda\x77\xa1\xc9\x27\x0a\xf0\x36\x1b\xdc\x9b\x67\x0d\x2f\x96\x1a\x34\x4a\xcd\x42\x83\xc2\xf1\x62\xc2\x5c\xa6\x66\x9e\x87\x2e\x11\xd8\x6c\xa1\xc5\x81\x12\x9a\xec\xe4\xfd\x9a\x3b\xbf\x5d\x0e\xdb\x26\x55\x75\xa5\xd5\xf4\x9f\x13\xab\x10\x44\x67\x3a\x5a\x9d\x9b\x8b\x47\x01\x95\x08\x66\xad\xa3\xc0\x0f\x9f\x2e\x4f\xf0\x2e\x0f\x5b\x5b\x77\x86\x76\x90\x22\xc2\x10\xf7\x22\xd1\x28\xc1\x2a\x0d\xb5\xb2\x90\x44\x88\x96\x7c\x01\x61\xb1\x1f\x64\xfd\xc1\x74\x5d\xcf\x8e\x44\x3b\xf5\xa8\xec\x8e\xcf\x1d\x4e\x2c\x53\x25\x0c\x20\x6f\xc1\xa3\xfe\x99\xef\x93\x48\x2c\x7e\xfa\xbd\xaf\xce\xaf\x0e\xe4\x54\xda\x34\x3d\x99\x5e\x2e\x7a\x75\x16\xba\xe9\x4e\x0f\xfd\x06\x6b\xa4\xe0\x2c\x18\xee\xbe\xa7\x7a\xef\x90\x91\x14\x12\xf5\x8c\xc2\xdb\x12\xfd\x64\x25\x01\x3c\x6e\x7d\x79\xc8\x16\x46\xc1\x88\x4d\x06\x58\xbf\x7c\x97\x49\xa6\xb9\x2c\x7c\x80\x63\xae\x8d\xdb\x32\xc8\x42\x6f\x58\xca\x50\x62\x10\xa9\xd1\xb1\xf3\x0c\x6f\xa0\x2c\x34\xe1\x8c\x8d\xf3\xa5\x68\xf4\x91\xbf\xa4\xff\xc9\xfb\xc6\xf6\xb1\xd6\x78\x73\xe5\x50\xb7\x6d\xb4\x36\x83\x78\x70\xc2\x73\x58\xb1\x43\x99\xe0\x17\xd8\x9b\x45\xb1\xcb\x2f\x9f\xbb\x1d\x18\x3d\xbe\xba\x9b\xdf\x5c\x25\x5d\xf1\x66\x4d\x27\xb9\xff\xee\x7f\xcf\x86\x79\xfb\x45\x4f\xda\x16\xf4\x68\x1b\x6a\x6d\xed\x5d\x49\x8c\x4e\x81\x37\x09\x9c\xc7\x54\x91\x22\x0e\xbd\x65\x28\xc3\x71\x79\xa3\x3a\xa8\xf2\xfd\x9d\xb3\xa0\xa7\x15\x97\xe2\x84\x58\x0a\x4b\xb1\x13\x27\x0c\x5f\x6a\x58\x91\x85\x13\xca\x76\xb5\xe2\xcf\xc3\xf6\x34\x59\x96\xaa\xb9\xf3\xab\xa6\x2b\xf9\xd5\x53\x2d\x18\x89\x90\x41\xcd\x4b\xdd\x4a\xec\xa8\x81\xa3\xfa\x9c\x64\x92\x4d\xd3\xa2\x94\x0d\x73\xd0\x3e\x57\x8c\xab\xf6\xc8\xfc\xb5\xe0\x2f\x43\x48\xf4\xd6\x91\xef\x4a\xc9\xce\x2b\xda\xd8\x08\x35\x80\x92\x32\xb0\x73\xa3\x6d\xd2\x38\xe5\x5f\x59\x78\x0a\xd0\xd2\xe0\x05\xa2\x1d\x9c\xd5\x77\xf5\x0f\xdf\x7a\x5a\x50\xef\xbe\xc9\x20\xa9\xb9\xc2\x90\x65\xed\xbc\xfe\xd4\xaf\x98\x1f\xa6\xfb\xea\x3e\xbc\x52\xfb\x8d\x24\x7a\xcd\xd4\x9b\x97\x8d\xd0\xcc\x2b\xf8\x82\x8d\x64\x70\xc9\xc3\x68\xfe\x61\x25\x7b\x54\x42\xb8\x5b\x82\xc7\x2e\xb4\xfd\x9c\xc9\x55\xc7\xd6\x5e\x0b\xc5\x43\xa2\x1d\x20\x03\x43\x78\x9f\xfa\x0b\x2d\xc7\x1a\xb5\x70\xb6\xa9\x99\x7a\xc3\x18\xa5\xea\x29\x45\x65\xe5\x6a\xc6\x7d\xeb\x4d\xbe\x3a\xf4\x3b\xdc\x3f\x9e\xc3\xb6\x80\xeb\x4a\x1c\xc3\xbd\x48\xa1\x0d\x49\x93\xc6\x9a\x77\xaf\x63\xd7\x2d\xa4\x16\x4b\x38\x85\x54\x3e\x76\x12\x8c\x56\x70\xb0\x4a\xfc\xc3\x17\x55\xce\x5d\x7a\xa4\xa8\x0c\x4c\x10\x20\xd4\x4f\xa3\x60\x40\xc1\x09\x99\x8c\xd1\x57\xdb\x36\x6c\x1b\xff\xdc\x02\x62\xd2\x3d\xea\x82\xcb\x11\xa2\x22\x40\x53\x15\x8f\x83\x94\xfd\x9d\x63\x21\x41\x1c\x99\x4d\x01\x05\x4d\x0c\x82\x19\xa4\x89\x8b\x29\x1f\x69\xee\x14\x68\xbf\x94\xca\xb5\xe6\x92\x71\x05\x1a\x21\x74\x48\xbd\x68\xb1\x3d\x36\xd1\xad\x54\xd7\x17\x0c\x07\x00\x2d\x2d\x96\xeb\x98\x18\x0a\xa8\x80\xf7\xef\xfd\x4d\x5a\x37\xcc\x4f\x65\x2a\x96\x6a\xca\x87\xc4\x88\xe2\xea\x1c\xc5\x15\x50\x4b\x02\xcb\x4b\x91\x81\x17\xc9\xbe\x2f\xad\x71\x87\xf2\xa7\x75\x25\x56\x6b\xfa\xa3\x6a\x13\xf7\x9f\x73\xf8\x91\x4f\x70\xb8\xc6\x0b\x12\xc3\x21\x62\xf4\xae\xb3\xba\x71\x5a\xb2\x38\x00\xf0\xaf\x9a\xfc\x65\xbe\xe3\x35\x0c\xbc\x51\x92\xa5\x74\x3e\x75\x99\x21\x26\x87\x90\xda\xe8\x78\xda\x04\xae\x36\x16\x1f\x25\x67\xe5\x14\xe6\x6b\xf4\xb4\xfe\x7a\xde\xe9\x95\xa8\x15\xbc\xf6\x04\x51\x83\x04\x9f\xc7\x2d\xb7\x87\xcd\x71\xf0\xf6\x53\xf7\x0d\x1f\xdb\xc2\xaa\xfb\x1d\x5e\x1d\xcf\xd5\x76\x3f\xf5\xfd\xd3\xbf\x54\x54\x26\x38\xfa\xc1\xb1\xfe\x6c\xca\x7e\x5e\xa8\x87\x28\x2f\x8d\xce\x22\x94\x19\x99\xba\xc3\x5a\xed\x8e\xa2\xc0\xec\x31\x88\x29\xb5\xc3\x44\xd1\x95\x06\x9d\x14\x27\xc6\xd1\xef\xeb\x79\x5e\xe3\xea\xb0\xef\x93\x17\x36\x12\x7d\x91\x0a\x60\x21\x41\xa4\xc6\x28\x70\xae\x71\x77\xff\x8e\x8a\x44\x14\xba\x6c\x96\xfe\x8f\x0d\x98\xe3\x07\x1e\xa4\xf5\xed\x57\x68\x2d\x81\xea\xc5\x43\x9a\x45\xe2\x66\xec\x1a\x25\xd2\xe6\xd9\xc1\xd2\x16\x4b\x70\xc1\x27\x50\x21\xb8\x37\x4e\x6d\x4d\x34\xff\x68\x44\x30\x2a\xa2\x56\x26\x44\x53\xe1\x86\x6d\x9e\xcd\x63\x5d\xd3\x92\x6d\xea\x1a\xd0\x51\x13\x21\x5a\x44\x81\x75\xf4\xd2\x6b\x23\xf8\x71\x5b\x91\x02\xd4\x1b\x35\x77\x6d\x5d\x06\x93\x26\xc8\x7c\xfb\x25\xc2\x6b\xd3\x6a\x17\x67\xfa\xd5\x4f\x53\xe3\x53\xf7\x60\xef\x8d\x87\x34\x53\x5e\x2d\x0b\x4b\x86\xbe\x9d\xb4\x36\x8c\x28\x56\x6d\x3c\x30\x74\x04\x68\x12\xd0\xda\x7b\x9f\x3a\x7e\xaf\x8c\x7b\xae\xe5\xa1\x06\xf9\x85\x1b\x40\x99\x04\x91\x2e\xdc\xdc\xe2\xab\x23\xff\xf8\x13\x33\x6a\xa4\x0e\xad\x05\xaf\xe3\x4a\x42\xb3\xd3\x98\x3a\x2a\x62\xe6\xbf\x31\x43\x62\xef\x7c\x9b\x28\x6c\xe8\xe3\x35\x74\xb5\xba\x65\x0d\xf5\xb4\x20\x8e\x48\xf3\xa2\x88\xc2\x19\xca\xcc\x5e\x4a\x5f\x5b\x51\x1b\xc2\xd0\x38\x90\x51\x27\x70\x62\xb6\x7e\xb1\x04\xcc\x62\xcb\x96\x10\xa5\x1c\x73\x47\x40\xbb\x2c\x26\x49\xb6\x06\x04\x6a\xc5\x76\xdb\x09\xf8\xc3\x2d\x78\x2d\x97\xc3\x86\xd0\xc7\x16\x92\x0d\x03\x2a\x26\x70\x43\x5c\x2e\xc7\x3f\x3b\x03\xac\x38\x60\x1c\xf4\x16\x45\x2e\x1c\x9e\x7c\xe4\xce\x34\x2d\xcd\x69\x29\xc2\x75\x81\xde\xb2\x34\x23\x14\xc7\xb0\xc2\x80\xd7\x26\x60\x19\xa1\x2f\x41\x65\xfc\x54\x5c\x18\x93\xc0\xd1\xdf\xea\x74\x6a\xd6\x91\xd3\xec\x40\xd3\xa3\x40\x4b\xb4\x65\x77\x2a\x43\x2a\xcb\x6e\x92\x3d\xb6\x1b\xb0\x02\x57\x3f\x32\xa8\x84\x9b\xc3\xa8\xd2\x7d\x8a\xee\xae\xeb\x96\xea\xbf\xfd\xf7\x6f\x8b\x0d\xbc\x33\xde\x23\xdf\xe2\x6c\x1f\x1e\xfb\x77\x34\x8b\xe1\xbc\x60\xa4\x87\x88\x3d\x19\x0f\xaa\xea\x44\xc7\x6d\x34\x99\x6e\x79\x89\xf6\x9a\xf5\x68\xab\x3e\x1d\x4e\x6d\x2d\xc6\x0e\x4d\x18\xcc\x5d\xab\xb8\x9d\x94\xe9\xd5\x94\x5a\x69\xa0\xe3\x31\xa7\x12\x68\xf4\xfa\xf6\xb9\xfd\x7e\x24\x4e\xf9\x2e\x8a\xd4\x74\x97\xec\x99\x62\xbb\xa7\x4d\x2b\x5d\x88\x78\xba\xfb\xa7\x18\x74\x1a\xef\x81\xba\x4d\x02\x68\x72\x16\x2c\x55\x83\xb1\xd9\x5e\x45\xf8\x8b\xd2\x6f\x61\xa6\x7f\xd1\xea\xe6\x63\x42\x8d\xbd\xd2\xc8\xdf\x31\xc2\x56\xe7\x2a\xcf\x9e\x49\x97\xfd\xe6\xfd\x60\x68\x09\x87\x25\x15\x84\x39\x6a\x9a\x69\xe3\x02\xbe\x90\x40\xe4\x50\x46\x12\xcb\xba\x29\x48\x89\xbb\xf1\x1e\xce\x57\x12\xff\xe5\xc3\x0a\x63\x2c\x87\x38\x43\xed\x8e\xcb\xbd\xec\x17\x3a\xd6\x6d\x64\x3b\x3b\x1c\x11\xf3\xa5\x10\x57\x33\x50\x00\x68\x4e\xf9\x44\xee\x10\xf5\x8f\xd0\xf1\xf0\x9e\x3b\x1f\x22\x48\xad\x74\x89\x27\x6f\xfc\x8c\xf2\x1f\x08\xf8\x59\x0c\x58\x24\x3e\x60\x6d\xfa\x24\x0f\x30\x66\xbe\x49\xc2\x2b\xa1\x91\x5b\xd5\xfb\xc3\xc9\x2f\xc9\x3e\x27\x68\x54\xc2\x69\xc7\xdc\x3a\x2d\xda\x51\xda\xfb\x36\x2e\x6e\xc3\x5a\x60\x0d\xb3\xe5\xdd\x0f\x8d\xe4\xd3\xed\x6c\x7d\xf9\x42\xc7\xaf\x47\x3c\x40\xc1\xa8\xb6\x77\x31\x59\xb0\x0c\xb3\x71\x19\x26\xd5\xa4\xce\xc5\xa8\xd2\x8e\x97\xe0\x61\x20\x0b\x5a\xfa\x6f\xec\x89\xe9\x04\xbe\xf7\x85\x42\x0a\xad\xb3\x98\x20\x8d\xc2\xfa\xb8\x7f\x3c\x56\xe5\x53\x58\x5a\xe0\x3f\x58\xf6\x45\xd9\xb0\x1a\xb2\x97\x78\x29\xc0\xbc\x16\xf4\x54\xa6\xb0\x99\xbc\x07\x42\x84\xa7\x58\x1f\x55\x41\x0b\x29\x0f\x91\xd8\x81\x3e\x18\x43\xbf\x5b\x54\x52\xe0\x77\x2f\xd7\xc3\xf3\xac\x55\xc6\x37\x89\x8b\xdc\x07\x33\xa5\xe7\x9c\xc5\x93\xc9\xe0\xe0\x5c\x27\x3a\xed\xb3\xe2\x76\x20\xbf\xb7\x82\xdb\xa3\x98\x22\x08\x8e\x29\x40\x24\x35\xa9\xba\x37\x6f\x7d\xde\x4d\x12\xeb\xf1\xff\xe4\x9a\x15\x54\x88\xfd\x07\xf8\x76\x22\x87\xdc\xe4\x8c\x5a\xff\xcc\xe0\xf1\x2d\x99\x5e\xfa\xb7\xb0\xf9\x95\xfb\xd1\x2b\x96\x13\xde\xb4\x27\x58\x69\x29\x07\x1d\x41\xf8\x01\x52\xfc\x80\xfd\xae\x9b\x72\x89\x3f\x84\xc6\xb1\xe1\xad\xd3\x2d\x06\xde\x4f\x06\x7d\xdf\xce\xd9\xb0\xa9\xbb\x9f\x68\x18\x62\x7d\x3d\xaf\x98\x76\x28\x88\x14\x4c\xe5\xdf\x7b\xd1\x64\xe2\x94\x79\xfa\x4d\xbb\x08\xa7\xe9\x47\xc3\x8d\x14\x24\xd5\x50\xe7\xe2\x5e\xea\x0b\x89\xc1\xd5\x8c\xf3\x47\xaf\xa7\x9c\x54\x7d\x64\x01\xa0\xfe\xec\x9a\x68\x67\x7d\xcb\xe7\x3e\xf3\x5d\x18\xe8\x06\x65\xac\x54\x81\x3f\xa3\xb0\xe8\xe7\x34\xb1\xe4\x6f\x3d\x14\xa3\x6b\x6c\xcc\x25\x9b\xd6\xce\xb3\x2e\xf3\x5a\x2a\x23\xbf\x16\xd8\xf8\x13\x9c\x51\x9b\xbd\xcf\xe0\xac\x1b\xce\xc7\xd6\x34\x67\xc2\x66\x21\x7d\xea\x77\x4a\x94\x56\x50\xd5\xd9\x5e\x38\xc3\x9b\xfc\x2d\x28\x75\xc7\x49\xc7\xfd\xa2\x39\xf1\xe4\xab\x01\x76\x60\xf3\xc3\x8f\x97\xf5\xb8\x7e\xbf\x48\x03\xb5\xcd\x42\x81\x3a\x57\x60\x6b\x8d\x02\x49\x86\xa9\x5f\xe2\xa3\x4b\x05\xb5\xf1\xc7\x7c\x6a\xc8\x5e\x3e\x76\x38\xea\xac\xd1\x5f\x79\x2c\x63\x38\x63\xb5\xf3\x9f\xe2\xe5\x5e\x08\x21\xeb\x44\x14\xaa\xd3\x7e\xb0\xab\x16\xbf\x3d\x16\xb6\x27\x58\xfc\x8a\x35\xc5\xe4\xa0\xc8\x36\x34\x01\x83\x71\x40\x98\x19\x4d\xb6\xe3\xc7\x4f\x20\x90\x80\x69\x4e\x59\xc7\x68\x6c\xe1\x7d\xbc\x14\xa6\xb6\xab\xc1\xb1\x6b\xc2\xcf\x12\x68\xfa\xfb\xcc\x35\x21\x68\x27\x05\xd6\x57\xa2\xea\x97\x53\x0c\x6e\xf0\x74\xe2\xb8\x7b\x9b\x05\xea\x5e\x10\x6a\xad\xb6\xed\x9a\x57\x8c\x53\x47\x0e\xee\x9e\x75\xf6\xd1\x52\x0a\x9f\x73\xe0\x67\x41\xd1\x0a\x2b\x27\xc9\x53\x67\xac\x97\x29\x29\xbf\x7f\xeb\x0d\x58\x86\x6e\x59\x15\x1a\x09\x74\x48\xcb\x90\x88\x03\xa2\x6e\xd9\xde\xd9\x0c\xa2\x12\xbb\xdc\x55\xa4\x30\x26\x7e\xd3\x71\xa3\x9b\xfe\xa2\xb9\x57\x3f\x66\xd3\x53\x54\xbd\x83\x5d\xdc\x82\x57\x99\xf3\x5a\x18\x75\x1c\x4b\xfb\xcd\x8d\x2e\x40\xf9\x92\x28\xb0\x76\x75\xec\x90\xa7\x7c\x7f\xae\x55\x30\x95\xd1\x7b\x93\x32\xdd\x7f\xa2\x92\x89\x87\xc4\xc4\xb9\xb4\xdd\xda\xf9\x06\x6d\xbb\xe8\xdc\x3d\x53\xf9\x20\xc2\x70\xac\x2e\xf3\x38\x04\x98\x4d\x1c\x63\x23\x76\xc7\xd3\x87\xdf\x28\xea\x03\xee\x0b\x84\x61\x1c\xbf\x68\x6b\x6f\x5e\x69\x6f\x28\x2e\xe0\x1f\xf8\xb7\xd6\x52\xe1\x76\x05\xb7\x1e\xd6\x89\x71\xd5\x0b\xc5\x70\x5a\xe3\xc8\x8c\x52\x05\x0e\xf6\xd0\xd0\x1e\xd9\x22\xe9\x24\x06\x43\x11\x58\x43\xe7\x8a\x1d\x19\xa2\xac\x9d\x9e\xa5\x85\x4d\x55\x4a\x88\x9a\x94\x42\x29\xe2\xc2\x87\xa0\x6f\x39\x03\x14\xb7\x77\xda\x4b\x98\x69\xc3\x14\x84\x7f\xe9\xaf\x5e\xf1\x76\xd2\x38\x16\x23\xe9\x5b\x94\x89\x3e\x24\x55\x17\xfd\x19\xea\xcc\x40\xcc\x85\xe7\x09\xbf\xef\xe3\xbb\x10\x2a\x84\x27\xd4\x00\xe9\x70\x8b\xd5\x9c\xab\x0a\x55\xd9\x27\xf5\x48\x14\xff\xee\xed\x30\x68\x5b\xc8\xa5\x6f\xf7\xb6\x3f\xa1\xae\x47\xdf\xa8\x67\xe5\x74\x0c\x30\x09\xea\x64\x1b\xa4\xe0\xa9\x80\xca\x66\x64\x43\xf0\xe4\x21\xa4\x23\x71\xda\xa0\x4b\xc5\x7f\x5f\x7d\x95\x86\xbe\x4d\xcd\x1d\xe7\x20\x31\x0a\xc2\xd3\xc8\x44\x7e\x75\x71\x03\x50\x68\x8d\x75\x07\x5b\xa1\xa0\x99\x04\xe8\xd1\x81\xd7\xe3\x72\x3f\xff\xb1\x7f\xa5\x1e\x0b\x89\x8b\xd9\xaf\xc8\x93\x89\x0f\x6a\xf7\x3d\x33\x82\x53\xdb\x37\xe1\xb8\xbf\x95\x3c\xe9\xf2\x8a\x5e\xb0\x65\xc7\xa2\x32\x86\xf3\xea\x80\x20\xaf\x7a\xb1\xc1\x4a\x67\x09\x20\x3f\xef\x51\xa4\x22\x89\x73\x59\x73\xc1\xf9\x5b\xef\x2f\x30\x08\xb7\x05\x0e\xd8\x5a\xed\x4f\xe9\x27\xd8\x71\xd9\xc8\xee\x07\x63\x15\x9e\xa3\x45\xe6\x1b\x0f\x78\xd0\x2d\xd4\x46\x82\xa9\x27\x6f\xcb\x09\x48\x6f\xf2\xdd\xf8\x07\x6a\x9b\xe6\x65\x14\x77\xec\x62\x0a\x07\x22\xba\x7b\xb3\x9b\xa4\xf5\xa4\xad\x07\x9f\x83\x72\xf3\x92\xb7\xa6\xd3\x54\x59\x13\x63\xbf\x14\xcc\xaa\x24\x42\xae\xc4\xb1\x69\x81\x7a\x8b\xb5\xc0\x70\x73\x3c\x9c\x71\xd9\x42\x46\x2c\xc3\x89\x18\xce\xf2\x65\xe6\xa7\x2b\xe3\x4c\x81\xf4\x75\xd1\xca\x88\xdf\xe8\x75\x8b\x46\x05\x1e\x77\x1f\xe8\x6e\x71\xfb\x8e\xcb\xac\xe2\x68\xa8\xd6\x3f\x39\xe0\x7e\x49\xf0\xa3\xe1\x58\x2b\x77\x23\xcb\xd4\x4d\x61\xc5\xc3\x4a\xbe\x8c\x68\x30\x74\x75\xa7\x49\x5b\x58\x08\x0f\xd2\xc0\x03\x95\xcf\x40\xe0\xbb\x1f\x88\x95\x07\xaa\x2b\x14\xd9\x05\xc6\x32\xfa\x43\x6e\x51\xf6\x19\x70\x1a\xc6\x74\xb4\xef\x56\x65\x49\x25\x82\x60\x6e\x2d\xca\x08\x3c\x88\x8c\xca\xc7\xc6\x07\x1d\x3b\xd5\xa3\xd6\x9c\xc3\xf8\x2f\x76\xb3\x69\x6a\xf5\x9e\x91\x9c\x9b\xc8\x3a\x91\xbf\xcc\x94\x56\x26\xe0\x84\xc6\xcf\xf3\xf7\xcd\x7e\x9f\x8e\x7e\xf0\x1a\xfa\x4c\xf2\x70\xf5\xdf\xde\x1d\x8e\xf9\x5b\x90\xf6\x8a\x86\x69\x51\xb2\x9b\x44\xe3\xd1\x1d\x0a\xe4\x9e\xef\x3c\xf7\xb6\x18\x3d\x07\xe6\x34\x17\x1f\x01\x83\x39\xb6\x83\x42\xe6\x1e\x8d\x42\x9b\x42\x4c\x3c\x65\x0d\x9c\x2d\x58\x6d\xed\xd9\x44\x43\xb5\xc8\x9e\xd2\xe6\x92\xf8\xb5\x81\x2c\x9c\xb0\xa7\xab\x7c\xc3\x2e\x9c\x4e\xb2\x82\x54\x5f\xf5\xcc\xb8\x2c\x52\x33\x1c\x43\x86\x62\x1a\x4d\x5d\xdd\xa4\x83\x04\x4a\x7f\xb4\xd9\x6a\x2e\xe5\x41\x52\x80\xa7\x9f\x3e\x52\x4c\x38\xb9\xc2\xc4\xc7\xff\x13\xbc\x54\x16\x7e\xb4\x3d\xb8\x53\x81\x60\x40\x7a\x99\xd8\xd3\xe5\x3c\x98\xfe\x35\x99\x3c\x8d\xba\x7d\xc9\x46\x6d\x15\xcf\xcf\x34\x25\x53\x49\x83\x4c\x21\x69\xf8\x94\x69\x8e\x73\x84\x89\x7e\x4b\x0d\xb3\x71\xb2\xbf\x3b\xe1\x7a\x8e\x47\x1c\x7b\xdc\x01\x98\xbe\xae\xb2\x9b\x11\x18\x87\x15\x84\x9b\x13\x44\x8e\xf2\xbf\xaa\xbc\x16\x28\x87\x4e\x1d\x06\xae\x83\x38\xbf\x9d\x3c\xff\xfc\x6d\x27\x71\xce\x1a\xcb\x28\x8d\xbe\x7b\xa2\xd3\x7d\x76\x03\x29\xcd\x54\x53\x1f\x6f\x18\xb7\x29\xa0\x3e\x6f\x9c\x2f\xaf\x5d\xd1\x2a\x06\xff\x0c\x43\x6e\x97\xd0\xda\x50\x2b\xe5\x9d\xcc\xf7\xb2\x0f\xc9\xdf\x57\xd7\x14\x6d\x0f\x87\x68\x0f\x7d\x61\xcb\xe3\x7c\xf4\xed\xac\xa8\xec\xe9\xaa\x64\x82\x78\x3e\xb9\xfd\x85\xb1\x18\x9c\x51\x2c\xdb\x87\xe9\x36\x5b\xab\xa0\x32\x86\x0c\x88\x49\x96\xc8\x74\xa5\xd8\x25\x12\xf6\x42\x9c\xdb\x0b\x54\x28\x10\xa3\xc0\xb4\x60\xf5\x03\x0a\xd4\x07\x1b\xc1\xfb\x53\xf6\x9b\x64\xdf\x33\xb8\xc7\x9e\x34\xf6\x95\xce\x66\xde\xc9\x78\x28\x72\x5e\x29\x41\xdc\xea\xad\x03\xae\xff\xa7\xff\x4f\x7f\x9d\xdf\x8e\x1b\x0a\xac\xac\xb0\x11\x8f\x98\x56\x11\xbd\xe2\x59\x10\x8d\xa6\xde\xc2\x0c\x60\x50\x36\xe8\x26\xb3\x44\x2b\xd3\x3a\x07\x43\x51\xb5\x44\xbf\xab\x16\x78\xed\x9d\xb2\x9c\xfb\xa6\x84\xf5\x1b\xa9\xb9\x40\xc7\x35\xe9\x11\x6e\x49\xc7\x99\x5a\xd8\xd9\x4d\x1a\xf8\xf2\xd8\x10\xd7\x76\xbf\xa3\x96\x82\xc7\x80\x18\x02\x49\x8a\x98\x4d\x89\xa9\x53\x6a\xb3\x71\x59\xd6\x0c\xd9\x36\x02\xb8\x56\x02\xb8\x36\x02\x58\x7f\xe3\xb2\x58\x07\xe9\x41\x07\x99\xeb\x75\xcb\xb7\x3b\xd2\xc2\xd1\x72\x15\xf8\xc4\xf7\xd6\xf3\x5a\x93\x79\xe7\x0d\x65\xba\x91\xe8\x05\x13\x4e\xdb\xfd\xd8\xf4\xcc\x6c\x2f\xe4\x49\x96\xbb\x88\x79\x93\x3f\x76\x91\xf7\x46\xe3\xcf\x1c\xab\x9f\xba\xf4\x70\x43\x16\xa5\x7f\x63\x99\x29\x77\xf3\x9d\xd9\xee\xdc\xf1\x67\x41\xb9\xb0\x57\xac\xfc\xf2\x60\x53\x67\xae\xe9\x46\xfc\x9d\x40\x78\xad\xe3\xb3\x67\xe5\xb4\x90\xd8\x81\xdc\xef\x58\x69\x85\xbf\xe6\x1f\x8a\x4b\xd5\x84\x03\x55\x0c\x3f\x5e\x30\xa8\x9e\x73\x18\x50\xf2\x4f\x3f\xac\xf6\x8f\x4d\x6c\xda\xdf\xf1\x95\xcb\x96\x01\x5b\x0f\x06\xf9\x65\x9b\x1c\xf4\xcb\x76\xa1\x77\x66\xab\x17\xe5\xcd\x75\x91\xe6\xc2\xf6\xee\x5d\x5f\x23\x73\xae\xd0\xfe\x88\xf4\x5a\x88\x3a\xe7\xec\x58\x1b\xea\xf6\x97\x25\x00\x19\x66\xa7\x1b\x94\xd9\x98\x31\x3a\xeb\x0a\xda\xc8\xd4\xbd\xae\x62\xc4\x45\x0b\x25\x70\x5c\x1f\x80\x81\xe0\x41\x5d\x44\xbf\x0e\xd0\xb8\x7e\x48\x09\x23\x29\x26\x7d\xb7\x38\xd5\x3a\x77\xcf\x5a\xbb\x69\x5f\x8e\x0c\x9a\xe9\x16\x8c\xd1\x50\xaf\x02\xca\xad\x55\xe1\x18\x55\x5a\xb2\x4f\x23\xa0\xdd\x3d\x09\x55\x02\xd6\xf4\xa3\x00\x5b\x3e\xc9\xd1\xcd\x99\xe3\x97\x22\xc5\x94\x22\xe1\x00\xcf\xed\x46\xf8\x73\xc8\x05\xa1\x1b\x84\xe2\x12\x45\x83\x9a\x59\xe3\xcf\x18\x4d\xb1\x7b\x46\x86\x55\x40\xef\x29\x8f\x78\x75\x38\xa6\xc6\xea\x5f\x47\xe4\x77\xfa\x2b\xa5\xb6\xf1\xf4\xf1\xb9\x47\x6e\x15\xdb\x17\x37\x65\x59\xa6\x4a\xc5\x01\x01\x9e\x5e\x64\xbe\x79\x4f\x70\xe3\x1f\x0a\x30\xb9\xc6\xc1\xf2\x5f\xcc\xe1\x8f\x0f\x93\x48\x36\xfe\xd9\xae\x63\xb3\xc0\x4e\x14\x49\x14\x7d\xad\xf8\xed\x0f\xdd\x0b\x32\x53\xea\xdc\x1a\xa7\xb6\xef\x7b\x96\xcc\x96\x14\xf9\x6f\x55\x77\x6d\x49\xeb\x96\x8d\xcb\x9f\xc8\x8d\xed\xfe\x23\x50\x85\x2d\xf3\x34\xb2\xe9\x08\x7d\x55\x25\x28\xee\xb7\x26\x78\xfa\xf0\x50\xb1\x66\xdb\xe9\xf9\x18\xef\x05\x27\x3f\x94\xe7\x0f\x5b\xf9\x79\xdb\x8d\x87\x05\x6c\xf7\x49\x81\x0d\x4e\xb3\x5e\x1d\xe0\x65\xe5\x4b\xc2\x70\x3f\x41\xad\xcf\x35\x22\xe2\x0e\x9e\x60\xa1\x44\x82\xdb\xe7\x08\x53\x42\xb5\x01\xe0\xb1\x66\xe3\xb1\xa6\xe3\x07\x90\x05\x54\x80\xfd\x4b\x6d\xba\x3b\x63\x99\x19\xeb\xfd\xab\x4a\x9d\xbb\x44\xf1\x80\xd8\x0d\x47\x71\xf7\xa1\x7b\xd0\xa4\xdd\x40\x59\xa9\xfd\x51\x5c\xc7\x6f\x4a\xd0\x09\xc1\x17\x2b\x46\x89\x83\xa7\x1c\x8e\x49\xb8\x3a\xe0\x18\x1c\x1d\x53\x5f\x4b\x1d\x4d\x75\xb5\x25\x58\x8f\xe3\xb8\x0e\xcb\xef\xf1\x62\xdb\xe9\x0f\x10\x12\x23\x56\x85\x20\x14\x5d\xe8\x4a\x7c\xa0\x12\x1b\xca\xbe\xe8\xf8\x21\x29\x6a\xe7\x58\xb0\xba\x50\xd2\xa7\x5f\x40\xe7\x30\x24\x4f\x31\x96\xd1\xd8\x19\x2a\x6d\xb9\x83\x09\x53\xd6\xfc\xe5\x1c\x5b\x6c\xd5\x9e\x84\x2a\xac\x91\x6f\x9e\x77\x71\x16\x73\xdb\x8b\xe4\x2e\x74\xed\xf5\x83\x3f\xb1\x65\xd0\x06\x9f\xc3\x4e\x2d\xeb\x89\x42\x18\xaf\x0a\x5e\xe1\x82\x26\x6d\x92\xa4\x8b\xd9\x64\x1b\x48\x60\xfc\x86\x50\x85\xee\x44\x7f\x27\xb2\xc1\x24\x0d\x88\x7c\x03\xab\x30\xc4\x60\x52\x0d\xe5\x96\xfc\xe1\x2f\x20\xf3\xfd\x32\x10\xd7\xf4\xb9\x38\x75\x3a\x4b\x4f\xe4\x76\x21\x4d\xfb\xdf\xb6\x00\xc2\x20\x38\xa1\x6b\x4c\xe9\x66\x7c\xe8\xfe\xb7\x1e\x5f\x27\x44\x67\xb5\x75\x5d\x07\xab\x03\x81\x2b\x3c\x2d\xa3\xe6\xdd\x57\xab\x9b\x09\x56\x5e\x86\x4d\x5c\x11\x69\xef\x99\x5b\xe5\x64\xed\x1d\x5e\x4f\x61\xea\x34\xf7\x57\xb9\x7a\x4b\xe4\xa2\x73\x8d\x25\x67\x05\x37\xca\xc1\xe9\x64\x43\xbb\x79\x02\x52\x1c\x3b\xdb\xcd\x8a\xd2\xdc\xe4\xa2\x45\x61\x04\x59\xe2\x88\x79\xeb\x07\x6a\xb2\x56\x53\xc5\xda\x7d\x10\xfa\xd5\xb3\x68\xd9\x52\x87\x53\x50\x56\x68\xfd\x9b\xda\xf2\xd4\xe5\xd1\x19\x6b\x0c\xd8\xcd\xa4\x75\x53\xa5\x2d\x36\x38\x65\xec\x3c\x00\xe6\x95\x8d\x12\xd4\x4a\x5c\xdb\x64\x01\x6f\xc2\xf3\x33\x1d\xcf\x6a\xbd\xb6\xa7\x35\x1a\xf0\x2e\x64\x9d\xe4\x87\x04\x6d\xb6\x4f\x42\x14\x60\x06\xae\x18\x55\x1a\x3e\x30\xe3\x7c\x75\x7d\xf9\xba\xae\x6b\x31\x7e\xbf\x6b\xb2\xa7\x14\xa6\x72\x40\xd0\x05\x67\x1f\xd5\x1b\x34\x29\x38\x19\xd7\xb5\x31\xc3\xf7\xa3\x33\xbf\xe1\x04\x08\x18\x73\x63\x8a\xd7\xc3\x9a\xfc\xa5\x47\xaa\x6d\x70\x56\x23\x63\x53\xc2\x38\x79\x16\xdc\xf3\xb4\x54\x21\xf4\xf7\x6d\xae\xf9\x65\x20\x13\x7f\x20\xe8\xc9\x36\xb6\xc2\x47\xc0\xd4\xbb\xbc\x85\x6b\x73\x9c\x17\x2b\x77\xd3\x16\x70\x75\xce\x4e\x16\x9a\xee\x00\xe8\xfb\x64\x71\x22\xe5\x77\x97\x6b\x89\x94\x30\x70\xd1\xa9\x69\x8d\xaf\x14\xf8\xe1\x7a\xc0\xf8\x1e\x7d\xe9\x0e\x00\xa9\xad\x05\x6f\xd5\x10\xff\xa0\xa7\xcb\xca\x3c\x0d\x54\x56\x69\x7d\xe8\xef\x04\xb5\xef\x7d\xa9\xa5\x36\xf8\x36\xb0\xa9\xfe\x69\xd1\x34\x94\xb9\x18\x73\xcd\xd1\x72\x4c\x45\x70\x4a\x4f\x04\x7b\x41\x35\x3c\xdc\x09\x94\xdc\x16\xc0\xe6\x55\xd9\x81\xd7\x1f\x0a\x14\xd8\x0b\x9c\x53\x6a\x17\x90\x03\xb4\x54\xbc\x7e\xd4\xba\xe4\xae\xe9\x46\xbb\x93\x75\x24\x1e\xdd\x06\x84\x2c\x00\xa8\xe5\x04\xad\x09\xc4\xfb\xfe\x15\x8b\x68\xbc\x04\x91\x19\x89\xd1\x52\x11\xe7\x39\x7a\xaa\xb3\xb7\x15\xa2\x78\x0a\x14\xa3\x25\x17\x27\xfa\xf3\x0f\x00\xd4\x9b\x04\x46\x16\xb4\x88\xe3\x03\xfd\x99\x13\x89\x63\xf4\x8f\x44\xf1\xa5\xcf\xba\xd1\x4f\x69\x64\x7e\xd3\xfd\x25\x70\x5a\x55\x05\xd8\x78\x98\x6a\x7a\x18\xe0\x8f\x8d\x91\x2c\x70\x93\xa9\x6d\xa3\x11\x89\x46\x13\xed\x9c\x7f\x02\xfb\xb6\x12\x4b\x1a\x85\x3a\xb3\x5b\xe8\xf4\xe2\xca\x31\x7e\x6a\xfd\x64\x48\x7a\xfe\x46\xd2\x2b\x5a\x26\xe1\x8d\x6d\xc8\xf7\x77\x16\x59\x15\xa5\x7e\x8d\xa1\x6e\x17\x21\x75\x22\x99\x88\x38\xfd\xf9\xd1\x05\x5e\x41\x28\x14\x15\x12\xb6\x4f\x59\x5f\x0f\x26\xd6\xb3\x54\xd3\x27\xfa\x2b\x4e\x2c\xc9\x6d\xe4\x57\x9c\x43\x8e\xa2\x36\x26\x2e\x59\xd4\xce\x91\x4e\x0c\xc9\x25\x78\x17\x4c\x01\xb9\x19\x5d\x88\xfa\xb3\xc0\xa2\x94\x44\x4f\x75\xa6\xce\x63\x94\x3a\x60\x6a\x99\x21\x26\x12\xda\x6b\xcf\xd7\xf8\x12\xea\xc5\x9a\x92\xa0\x48\xcd\x6f\xc4\x2e\xf6\x66\x17\xe3\x59\xb9\xd3\xef\x67\x7a\x2b\x1d\x24\x31\xcb\x88\x1f\x68\x93\x34\x80\xfe\x40\x63\x1a\xb7\xf0\x44\x4c\x3e\x47\x79\xb0\xb2\x3b\x77\x61\xf3\x15\x50\x02\xe9\x14\x1c\x7a\xbb\xd1\xa5\x45\x67\x96\x5d\xbd\x40\x65\xb8\x58\x5b\x2a\x4d\x71\x29\x0f\x23\x82\x24\xb2\xd9\xb3\x69\x21\x1f\x8f\x9c\x53\xef\x1f\xa0\xc7\xbc\x79\x97\x99\x59\x3a\x22\x32\xdd\xd1\x9f\x26\xaa\xc6\xff\x47\xd2\x59\x2b\x49\xaf\x43\x41\xf8\x81\x1c\xc8\x0c\xe1\x18\x66\xcc\xcc\x99\x99\x99\xfd\xf4\xb7\xf6\xbf\xd1\xd6\x26\x2e\x59\x6a\x9d\xfe\x5a\x75\xc6\x1a\x6c\x09\xce\x0b\x53\x3c\xd1\x91\x3b\x47\xa3\xbe\x8c\x57\x8e\x60\xfc\x2c\xa9\xb9\xbc\x53\x4b\xf9\x5a\x8e\xf9\x60\x52\x06\x5e\x03\x2b\x0f\x4c\x65\x0f\x10\x51\x3b\xfa\x68\xea\xc5\x1a\xe3\x42\xee\x2b\x8e\x27\xfa\xef\xcd\xb7\xd2\xf8\x11\xc4\x43\x32\x94\xaa\xac\x2b\x85\x0c\xd7\x64\xc9\x53\x4d\xf5\xb8\xf0\xf8\x7d\xec\xa5\x08\x0a\xa2\x4d\x24\x18\x07\xde\xd5\x7a\x45\x61\x8f\x6b\xe0\x0c\x2a\x8d\xd1\xdd\x31\x70\x88\xdf\x2f\x4d\x53\x14\x75\x11\x19\x94\x17\xe6\x91\x29\x13\x23\x4c\x50\xad\x21\x07\x83\xa4\xfa\x00\xe2\x2d\xd0\xc5\xdc\xf7\xfd\x00\xd9\x53\x85\x75\x9c\x07\xf2\x7f\x13\xf5\x83\x57\xd1\x5d\x90\xcf\x3e\x84\xed\xa5\x8e\xf7\x28\x23\xfa\x18\x1f\xa1\xc8\xdb\xb5\x80\x15\xe6\x77\x13\xdc\xf9\xb5\x77\x7d\xd2\x45\x73\x78\xa7\x3e\xa3\x1a\x4e\xf1\xc8\xdc\xa0\xca\x9d\x1e\x91\xca\x8f\x04\x9e\xe4\x47\xe2\xce\xf4\x50\xe9\xed\x0d\x4b\xf9\x76\x9c\xfd\xc3\x21\x4a\x9f\xf4\x49\x22\xda\x42\xf5\xcb\x2d\xa6\x0d\x19\xe8\x7a\x8e\xe6\x9e\xe7\x45\xcd\x48\xeb\xe4\xce\x34\x04\xf2\xc1\x78\x29\x9c\x2c\x56\xe7\xaf\x34\xfc\x81\x01\xb5\x3c\x5b\xc4\x73\xc9\xc4\x8b\xf0\xd3\x39\xce\xec\x73\x0b\x12\x2c\xe8\x3b\x29\x1c\xbc\xab\x36\x6d\x9b\x03\x4c\xe6\xc6\xee\x0f\x3b\x79\x46\x73\xa5\xa9\xf5\xc8\x40\x04\xdf\x47\x85\x21\xbe\x4c\x11\xfa\xa1\x0f\xe7\x62\xcd\x80\x9d\x18\x37\xe8\xd3\x78\xa5\xbb\x7f\x95\x59\x67\x2b\x4d\x4e\x82\x7e\x4e\x82\x9e\x20\x10\x80\xcd\xce\xec\xbd\x1a\x95\xd8\xb8\xc6\x9e\xe5\x81\xfa\x26\x11\x07\xff\xee\xf9\x42\xfc\x38\x14\x71\x7a\x88\xe0\xe2\xe5\x63\x5e\xd9\xc3\x75\xfb\xd9\x48\x21\xc1\xb8\xc0\xb3\x4c\x0c\x9d\xbf\xd4\x9d\x3b\xac\x30\x38\x82\x84\xc9\x55\x80\xdc\x4a\xad\xab\x8a\x50\x10\xd5\xa9\x75\xb7\xcb\x0c\xf0\x63\x2c\x61\x64\xbf\x63\x9d\x7f\x98\xf9\x76\x1c\xfb\xd6\x8a\xf8\xc7\x66\x81\x22\x39\x6e\x74\xa0\x66\xb0\xb8\xdf\x89\x94\x69\xe1\xb6\x04\xc9\x57\x2f\x66\xf5\xc3\xf0\x86\x99\x78\x7b\xb3\x23\x34\xce\x81\x3f\xc5\x94\x8f\x5b\xb4\xab\x95\x00\x3d\xd7\x77\x46\x29\x69\xa5\xbf\x7f\x7e\xf4\xc7\x51\xbd\x49\x9c\x55\x4e\x43\x34\x7d\xf9\x19\x49\x5a\xc4\x4a\xed\x10\xf6\x96\x68\xeb\xaf\xfa\x08\x1e\x27\x51\x5d\xfc\xc4\x88\x52\xa5\xa4\x14\x2a\x9d\x6d\x97\x28\x6a\x7d\xdf\xa6\xed\x5f\xfb\x37\x34\x0e\x9a\x7e\x54\x58\xb7\x73\xb1\x61\x16\xa4\xd8\xc1\xd0\xc6\xdb\xac\x8d\x2e\x9a\x9a\x0b\x19\xcf\xc5\xb6\x7a\x4a\x9e\x67\x05\x04\xf7\x3d\x82\x2f\x4f\x3f\xcc\x7f\x93\x87\x00\x70\xbf\x77\xa8\xf6\x17\xf4\xd8\x52\xd3\x2c\xf2\xf0\xc6\x3b\xf1\x52\x14\x66\x54\x95\xac\x9d\x9c\x8b\x33\x57\x56\x02\x53\xfc\x9a\x78\x83\x5f\x71\x5e\x96\x41\xf8\x10\xda\xd8\x50\x87\x6b\x4f\x70\x01\x0a\xb0\xe5\xd2\x40\x5d\xdb\xb6\x9d\x25\x94\xc6\x18\x2a\xc2\x1a\xb2\x87\x3e\x53\x2c\xbd\x73\x84\xbe\xf3\xea\x8f\x61\xa2\x87\x29\xb6\x2b\xb1\x63\x98\xed\xbf\xc4\x56\x77\xdb\x66\x31\x0f\xf8\xb6\x37\x4a\x02\xc6\x4d\xee\xfd\x4c\x80\x0b\x18\xf4\xdd\x1f\x42\xa8\xde\x01\x69\x90\xd4\xc4\x5a\x00\x4a\x8c\x62\x86\xd0\xcc\x09\x5c\x16\x9e\x34\xcb\xf6\x63\xdf\x1e\x12\x0a\xcc\x85\x0a\xfa\xdd\xa5\x21\x9c\xa2\xc8\x3b\x3d\x1e\x22\x57\xcf\xcd\x92\xdc\xa3\xf2\xbe\x37\x74\xdb\x01\x86\x61\xbd\x18\x05\xdf\xfa\x6f\x8f\x91\xcb\xba\xe4\x0c\x01\x15\x8c\xf9\x6b\x3d\x63\xe3\x64\xf1\xe8\x58\x48\x2c\xd4\x36\xa7\x6a\xaf\x54\x7d\x08\xc9\x06\xbf\x8d\x50\x93\x0e\xb0\x13\x38\x3e\x73\xaf\x55\xad\x4a\x9f\x80\x95\x04\x3f\x59\x0f\xa4\xa1\xb0\x9f\x10\x4e\x2b\xb6\x70\x32\x75\xcc\x5b\x56\xe7\x5d\xc3\x26\x41\x7d\x23\x6e\xbd\x42\xaa\x69\xb1\x9c\x90\x28\xb9\xfa\x43\x56\xf9\x54\xab\x95\xa3\xb0\xeb\x1c\x5f\xf9\xdb\xa7\x70\xc6\x0b\x5a\xc6\xfc\xb4\x9f\xb5\x05\xea\x5c\xa1\xa5\x3b\x0d\x2e\xbd\xee\xa8\x4a\xb5\x67\x08\x18\x12\x18\x4c\xc1\x98\xc7\x8e\x7f\x04\x1a\xd2\xf2\xf0\x00\xca\xff\xf3\xf7\xfc\x41\xa3\xff\x70\x38\x41\x9f\xfa\x79\x00\x0c\x4d\x48\x88\xd2\x37\x72\x93\xf7\xc4\x14\xae\xcc\x10\xbf\x2d\x3d\x5e\x10\x96\xea\x78\xc6\x4d\xc1\x43\xa5\xe1\x6b\xaa\xcc\x4a\x90\x44\x6e\x60\x45\xfc\x16\xc9\xba\x52\xfd\x80\x9d\x37\x71\x84\x3d\xa1\x05\x60\x87\x49\x24\x1d\xb1\xf5\xd1\xa1\x3c\x3b\xef\xce\xa8\xa9\x7c\xc3\x56\xb9\x85\x70\x92\xa2\x28\x6c\x9c\xe7\xfd\xbc\xab\x5b\xdb\x7f\x9e\xef\xc0\x61\x16\x74\x5f\xb6\x2a\xfe\xef\x17\xb1\x27\x38\xd1\x83\x74\x43\x35\x43\xe4\xa7\x01\xb9\x1c\x34\xd5\xa9\x02\xd9\xe7\xf9\x82\x5b\x82\x47\xf2\xd4\x2b\x4d\x1e\x82\x6e\xe8\x04\x7b\x7b\x32\x68\xb9\x24\xef\x8b\xdf\x76\x26\x9e\xa0\x6d\x88\x14\xa2\x73\xcd\xd0\xf5\x87\xa4\xcb\xf9\x1b\x2c\x74\x80\xdf\x44\xcc\x40\x1a\xec\xba\x10\x55\xee\x0f\xad\x9e\xcc\x96\x19\x22\x5b\xe1\x9b\x23\x9f\x18\xf5\x34\xee\x33\x87\xab\xf9\xa5\xc1\x5d\x86\xd8\x4a\x60\x37\x43\x77\xf2\xb7\xd7\xaf\xc8\xe6\x3e\x76\xb0\xf6\xcd\xbf\x3e\xd7\x94\x5a\x09\x62\x06\x19\xc8\x4d\x4d\x51\x3a\x8f\x9b\x42\x88\x3e\x32\xa0\x82\xfe\xca\xbc\xe4\x10\xae\xcc\x42\x40\xba\xa3\x44\x4e\x1f\x62\x1b\xbe\x33\xdc\x9c\x3d\x33\x39\xf3\xbb\x77\x8a\xea\x6b\x8d\xa0\x03\x23\xc5\x77\x5a\x92\xc2\xd9\x0e\x3b\xeb\xb6\xed\x49\x0a\x1d\xb6\x9b\x1b\x5d\xfa\xf4\x6d\x9d\x58\x50\x10\xe3\x18\xec\xe6\x4e\x38\xdc\x97\x2b\x66\xfe\x86\xa6\x0c\xf9\x14\xa1\x51\x5c\xef\x17\x80\xf3\x5d\x89\x0d\xb8\x64\xe8\x44\x16\x3d\x86\xf0\xae\x3e\x27\xc5\x10\xc9\x06\x0e\xcc\x6a\xbe\x9c\x01\xe1\x12\x9c\x41\x85\x09\xf1\xae\xbd\x61\xcc\xe9\xc2\xce\x6f\x15\x43\x33\x44\xde\x17\xa3\x28\xf3\x04\xd0\x9e\x31\x26\x38\xe3\x55\xda\xc7\xbb\x24\xe5\x8e\xf2\x79\xe5\x6a\x39\x99\xd8\x50\x50\x9c\x79\x8e\x6d\x94\x10\x14\x2f\xb2\xa7\xc1\xce\x10\x34\x4d\xa0\x18\x46\xe0\x77\x06\x45\xfb\xce\xc0\x89\x75\xff\xca\x9c\x21\x48\x88\xa4\xcf\xf3\x7d\x61\x18\xfb\xd5\x9e\x51\x9b\x25\xa8\x5b\x0b\x36\xf9\xea\xa2\x99\xda\xac\xc6\x77\x12\x78\x96\xb6\xf3\x60\x3c\x73\x20\x76\x32\xba\x07\xdf\xe6\x4e\x94\x81\x05\xa3\xf1\xa3\x08\x9b\x3e\x00\xf4\x82\xf2\xba\x5f\x49\xfb\xd2\x85\x69\xb6\xc1\x80\x82\x1d\x55\x71\x66\x0d\xf6\x1f\x8f\x20\x69\xc0\x1c\xeb\x4a\xbd\x0f\xd3\xef\x67\xe8\xe6\x27\xb6\xa1\x18\xf6\x7b\x5e\xf4\x15\xc4\x7a\x21\x9f\xed\x54\x6a\xe8\x66\x24\x50\xfb\x00\x5e\xbf\x00\x00\x9c\x4a\xd7\x93\x61\x30\x06\xcf\x32\x55\x68\x7c\x35\xdd\x21\xea\xa0\xa6\xf2\x28\x21\xfe\x53\x59\x7a\x6c\xbc\x53\x76\x53\x18\xf5\xb6\xbb\xd8\xde\x37\x81\x5b\xf4\x39\x8f\x27\xc5\x48\xa4\xd1\x56\xe5\x71\x5c\xa0\x5c\x3e\xc2\xef\x7b\x5d\xbf\x3f\x6f\xa2\x0b\xda\x04\x3d\x92\x7a\x8f\x49\xde\x63\x77\x9f\x75\x84\xec\xb1\xdc\x30\xe5\xd8\x39\x8a\x46\xaa\x9a\x74\x57\x8e\xc4\x0d\x7c\xe7\x48\x51\xe3\x19\xb7\x24\xd5\x8d\x4d\xda\xf2\x8a\xc6\x3f\x6d\x1d\xec\xb3\x9c\xc5\x00\xf3\xac\x41\x0f\x70\x53\x65\x4a\x59\x55\x29\x0c\x7a\x31\xa6\x28\x18\x04\xd1\x19\x86\x74\x60\xc2\xe0\x8f\xd8\x81\xeb\x0b\xe4\xc7\x3d\x32\xcc\x9b\x62\x3b\x69\x78\x9a\x3b\x6f\x37\x4c\x81\x33\x3c\x4e\x90\x1e\xa8\x63\xf6\x63\xf2\xec\x6b\x88\x75\x78\x94\x8d\x6e\x4c\xd1\x97\xa4\x99\xbd\x44\x47\x42\xae\x8d\x6e\x9f\x42\xd2\x77\x78\x0a\x53\xe4\x20\x6b\x2b\x40\x11\x9c\xf4\xd7\x83\x82\xb6\xca\xb0\x75\xee\x1f\x9c\x01\x90\xa2\x95\xe5\x49\x8b\xc3\x0b\x88\xd8\xa8\xc1\xf1\xc7\xc1\x73\x5e\x16\xcd\xf0\x4b\x90\x10\xa2\xe9\x1f\xc0\x30\x6c\x14\x3d\x0b\xca\x32\x3b\x37\x35\xf2\x59\xd5\x26\x37\x12\x3d\xd7\x82\xc2\x38\x7e\xa1\xde\x3f\x48\xde\x9c\xb3\x3d\x14\xee\x7b\x86\xe7\x09\x15\x63\x6e\x82\x17\xe2\xa9\x98\x81\x86\x21\x90\xb5\x9c\x82\xc2\x07\x62\x18\x86\x4a\xe9\x1f\x0d\x65\x34\x60\x90\x3b\x4d\x9e\x6d\xb8\x10\x2e\x33\x44\xb1\x39\xc7\xf3\x78\x8b\xb4\x2b\x04\x3b\xf4\x99\x8c\x67\x18\x04\x30\x46\x6d\x3b\x05\xe2\x2b\xac\x4d\xd2\x26\x45\x6d\x6f\x47\x60\xc3\x79\x62\x2b\x0c\x4a\xa8\xad\xf7\x2f\xf1\x8a\xd6\xf5\xa3\xa8\x13\x8b\x41\xfd\xe3\x5d\x1e\x27\x40\x9d\xa6\x14\x4e\x50\xc4\xbc\x86\x58\x7b\x40\x9e\xd5\xfd\xe5\x67\x92\x8d\xda\x36\x7f\xe7\x2f\x8a\xce\x7b\xc7\x5d\x2c\x27\x72\x0c\xc6\x39\x38\xcf\x02\x59\x92\xc6\x3b\xe8\x45\x57\xea\xed\xfb\xbd\x1a\xe4\x7e\xa1\x9b\x9e\x71\xf2\xfe\xa0\xa3\x40\xfd\xbe\xd6\x0c\x9d\x00\x94\x22\xf6\x5c\x89\x49\x72\x33\x41\xd1\x6f\xd2\xbb\xf3\xcb\xe5\xdd\xc2\xdd\x53\x21\x16\x07\xc4\xb0\x59\x7b\x1b\xc3\x1b\x1f\xa7\x19\x0e\xc3\x0d\xe1\x14\x46\xca\x32\x9d\x53\x54\x79\xd2\x7f\xe4\x02\xe5\x58\x18\x8e\x1d\x52\x1f\xbb\xbb\x3d\x1b\x10\x2b\x52\xf4\xb1\x07\x2e\x75\x91\x66\x1d\x9a\x66\xb0\x87\x7f\xf0\x80\x30\xe7\xcf\xc9\xdf\x14\x4d\x38\x06\x38\xfe\x9e\xf5\x57\x33\xc7\xd7\x60\xdf\x82\xc2\x1c\x08\xa2\xcd\x23\x1a\xa1\xd3\x3f\x29\xc2\xf1\xfd\xe0\x3b\x70\x39\x67\xc3\xa5\xf8\xb7\x67\x9b\xb4\x65\xd1\x38\xcf\xa4\x01\x57\x0c\x62\xfe\xe3\xec\x65\x58\xf6\x2f\xf6\x63\x5d\x8f\xf9\xe3\xe8\xed\xe2\x5d\xf5\xf3\x45\x18\x9a\x8e\x0d\xaa\x1c\x5f\xcc\xc5\xc8\x44\x9d\x81\xc2\x65\xf2\x1d\x6a\x61\x23\x5b\xe7\xf0\x86\x60\xef\xfd\x82\x3a\xa8\xed\xd6\xac\xc8\xe6\x3f\x51\x09\x32\xef\x14\x92\xb0\x18\x51\x07\x14\x87\x15\x9a\x33\x63\x1a\xeb\x09\xf7\x2f\x00\x50\xb4\xf1\x21\xca\x4e\xd2\x99\x1e\x68\x69\x98\x27\x80\x99\xb2\x2c\x01\x4f\x31\x80\xce\xda\xfa\xba\x6b\xb1\x92\x04\xa8\x65\x08\x82\x84\x10\x02\x41\x5e\xea\xd9\x76\xe0\x06\x4d\x72\xef\x9b\x9f\xe2\xe4\xb9\xce\x1d\x9c\x78\x7e\x1a\x78\x92\x1b\xcd\x68\x39\xb7\x9c\xfa\xb7\x36\xfa\x57\x4d\x67\xff\x71\x37\x73\x7a\x99\xb1\x66\x28\xd9\xe8\x26\x62\xdf\x98\xc6\x3e\xdb\x51\xc8\x18\x20\x2e\x48\x5a\x16\xb9\xa9\x9f\x23\xb6\x2c\x5b\x22\xb0\xdd\x33\xdf\xac\xf0\xc5\x29\x5d\x14\x4d\x51\x80\xcd\x28\xdd\x51\xaa\x2c\xb3\xfc\xdc\x5b\x40\x3d\x3e\xe2\x8d\x70\x73\x97\x37\x26\x66\x3f\x12\x02\xe5\x56\xec\xfb\x85\x61\x1c\x3a\x05\x65\x4e\x6c\x81\x0a\xa0\xf9\xd8\xcf\xc2\x48\xe7\x99\x38\x92\xa1\xe5\xc3\x9a\x54\x17\x5b\xf7\xce\xf2\xa4\x24\x06\x22\xf1\x0c\xca\xc7\xf1\xc2\x9e\x4d\x05\x80\x2a\xc0\x4d\x70\x1f\xbc\x1d\xcf\x7e\x8e\x33\x9b\xf2\xec\xe8\x0c\x15\xff\xf8\xa0\x0f\xd1\xa0\xd1\xc8\x7c\xf2\x71\x65\xaa\x8f\x37\x28\x85\xa5\x80\xa8\xeb\xbe\x73\xf7\x4d\xfe\xcf\x4b\x26\x66\x62\x7c\xc8\x63\x64\x54\x97\x41\xcb\xde\x92\xc4\xea\x04\x0e\x4a\xef\x4b\x43\x09\x00\x24\xc0\x3c\x23\xc7\xa4\x1f\xc4\x40\x49\x0c\x1b\x5b\x2c\x59\x81\xde\xe2\xf7\x71\x9e\x0f\x69\x8f\x6e\x0f\x51\x7a\xdb\x00\xec\x91\xac\x68\x44\xdf\x7d\x3f\x67\x68\x20\xdb\xcc\x10\xf9\x06\x00\x92\x33\x34\x49\x60\x01\x1e\xb2\x78\x92\x85\x4e\x5d\x9a\xc3\x09\x5e\xc7\xb9\xe1\x5c\x8c\x7a\xe8\x38\x52\xb4\x10\x41\x58\x82\x4a\x4e\x98\x44\xed\x6b\x68\x64\x45\x71\x99\x26\xd7\x56\x61\xf8\xb1\x16\x7b\x6a\x63\x0c\xdf\xf8\x06\x40\x9f\x3b\xb1\x1c\x4e\x22\x18\xc8\xad\x51\x23\x3f\xa2\xdc\x98\x9d\x4f\x7d\x9f\x26\xbd\xcc\xc1\x9a\xf6\x35\x0c\x0a\xa8\x2c\x2f\x21\xac\x9b\x54\x7a\x0e\x6f\xb5\x95\x37\x1f\x06\xd8\xa0\x74\x00\xa6\x1b\xd0\xb4\x22\x52\xe2\xdb\xa2\x88\x5f\xce\xd0\xb3\x2c\x0b\xb9\x03\xc8\x84\xcb\xd3\x1c\xc9\x1f\x6d\xe2\x84\xb6\xfc\x60\x17\x21\x19\xa3\x31\xcb\x93\xe7\xef\x9b\x61\x78\x15\x0a\x22\x8c\xef\x1e\x59\xa4\x4a\x2c\x2d\xcf\xb0\x2c\x3d\x04\xf1\xb1\x92\x01\xa7\x4f\x9d\x37\xb9\x87\x37\xe8\x3c\x94\xca\xcf\x95\x3c\x56\x8b\xac\xbf\x3b\x66\xcf\xbf\xd1\x1d\x5a\x23\x86\x8d\x06\x8a\xf2\xe9\x27\xfd\x98\x97\x40\x83\x42\x6f\x23\x34\xe2\x8a\x51\x22\x81\x07\x01\xe6\x7e\x77\x86\x79\x48\x9c\xda\xd1\xfb\xa7\x6f\xb4\x2b\xe2\x60\x00\x26\xc0\x66\xa6\xdf\x0f\x33\x65\x81\x49\xaa\x76\xcd\x80\xbf\x32\x40\x89\xcf\xc7\x28\x9d\x17\x7a\xf1\xcc\x09\x7c\x1e\x10\xf5\x7e\xc6\xfd\x21\xcf\xa2\xd9\x18\x06\xc5\x30\x0c\x61\xde\x2f\x02\x68\x1c\xc6\x3c\x6d\xd2\x26\x15\x98\x26\x00\x74\xdb\xef\x67\x01\x51\x71\x0c\x43\xa5\x39\xa3\x59\x4c\xde\x7b\xdb\x9c\x75\xa1\x2c\xb2\xd1\x57\x7d\xb4\x30\x15\x9c\x27\x94\x23\xf2\x7a\xb6\xa7\x8b\x94\x20\x4d\xea\x4c\xd9\xf7\x28\xf8\x8a\x8c\xa4\xd6\x57\xf1\x6d\x5e\x0e\x54\x53\x7f\xfb\x14\xe6\x36\xa2\x06\x52\xbb\x79\x5c\x83\x41\xb4\xa6\x7f\xb9\x3b\x81\x68\x26\x4d\x03\x53\x6a\x8e\xb1\x2c\x4b\x3b\x67\x1b\xe1\xab\x3a\xf4\xe6\xb6\x2f\x80\xf0\x1b\xe4\x79\xc9\xa9\xae\xda\x5a\x1d\x64\xe0\x3e\xc2\x10\x44\x4e\x52\x14\xda\xaf\xc9\x7d\x2a\xf8\xab\xd1\x26\x43\x15\xd8\x49\x52\x33\x01\xa0\xf1\x3d\xc0\x70\x0f\xed\x00\xcb\x54\x7e\x40\x10\xc8\xe3\x74\xc4\xc0\x71\xb8\x24\x12\xf2\xf9\x8e\xb1\x36\x1f\xc4\x34\xc4\x0a\xdf\xe8\x44\xf1\xbc\x6a\x9e\x03\x94\x27\x8a\x44\x75\x82\x36\xf7\xfd\x37\xf5\xaa\xff\xc0\x8e\xe6\xec\x75\x66\xd6\xb3\xed\x47\xd6\x4c\x40\xca\x1c\x6d\x47\x48\xb1\xd7\xb4\x7e\x1d\x48\x02\xe7\xcf\x41\xc9\x63\x5d\x46\xe6\x45\x70\x86\x86\x18\xdb\x3a\xc4\xe9\x41\xf3\x6c\x0b\xfc\x6f\x3a\xbf\x10\xc0\x78\xf2\x78\x1d\x49\xc5\x4b\x95\xac\xfb\x1f\xa5\x5a\xc7\x19\xac\xde\x41\xd9\xa2\x2e\xda\x57\x6e\x44\x96\x0a\xc3\x79\x9a\x40\x7b\x73\x96\x63\x88\xb5\x18\xf3\xfc\x31\x5d\xcc\x53\x27\x5d\x2e\x38\x3c\x5e\xa7\x9f\xd5\x9b\xbd\x0b\x86\x15\x7c\xb9\x5d\xbe\x5a\x17\x9d\xee\xc5\x71\x9a\x9a\xbe\xdc\x04\x31\xb6\xe2\x9b\x7d\x9f\x9f\x27\x58\x36\x0b\x02\x47\xa2\x50\xb7\xdd\x10\x39\xaa\x04\xf9\xc4\x66\x50\x71\x6c\x0c\x4c\x06\xc8\xaa\xca\x20\xf4\x85\x63\x29\x1d\x76\xcb\xf8\x97\x7f\x98\xa4\x7a\xcc\xed\xca\xec\x17\x23\x8e\xfd\xc4\x54\xf6\x82\xd3\x05\x19\xe2\x24\x41\x29\x9d\x62\x20\x14\xc3\xda\xc4\xf3\x83\x20\x1e\x59\xa6\xa4\x73\xf1\xf9\xdd\xd1\x11\x9c\x94\x9d\x35\xb7\x2b\x1c\x65\xca\x65\x4c\xa8\x7e\xc9\xe7\xcf\x5f\x5a\x22\xda\x49\x0a\x79\x29\x82\xca\xf1\xee\x8d\xc3\x74\xb3\xa3\xf0\x27\x0f\x77\x3e\x04\x65\xb2\x2c\x24\x63\x5c\x03\x65\x0e\xfc\xe8\xa6\x6b\xa1\x16\xf7\xfd\x69\xdb\x40\x17\x79\x06\xa5\xc4\x1d\x2b\xcf\x92\x00\x20\xcf\x84\x85\x10\xde\xea\xe1\x24\xe5\x4e\x8e\x62\xf9\xd5\xd1\xd1\xb4\xf4\x1d\xa7\x07\x54\x9c\x56\xcb\x7d\x74\xda\xf0\x10\xc0\x02\x9a\x87\x3e\x6f\x35\x8c\x44\x18\xe2\x70\xff\xe8\x1d\xaa\xc1\xf9\x63\xc2\x96\x23\xed\xa3\xfa\x6d\xe6\xfd\x38\x44\xb1\xad\x27\x4c\x2e\xa1\x99\x97\x9f\xee\x40\xea\x60\x0a\x2e\xca\x42\x2f\xfc\x61\x27\x04\xbf\xfd\x50\x48\xc8\x69\xc6\xe7\xb9\x6f\xbe\xc6\x47\x65\xf9\xfb\xa9\xb8\xd2\x11\xfc\x4f\xe2\x4b\x15\xc7\x49\x91\x54\x03\x6a\x40\x15\x3c\x0c\x26\x00\xf0\xf9\x48\x65\x5f\xb0\x96\x07\xdd\xcf\xe4\x8f\xdb\x70\xc2\xf8\x95\x32\x84\x2a\x1d\xfd\xa0\x19\x9d\x8f\x9c\xa6\x5c\x94\xde\x40\x38\x14\xa5\xe9\xf8\x22\xbd\x5f\xd2\xe5\xc8\xfc\xce\x8d\x0f\x60\xb2\xfc\xc7\x19\xcd\x0b\x68\xfa\x7b\x95\xe1\x3c\x4c\x57\x11\x90\x04\x99\x63\x26\x30\x61\x98\x40\xf3\x31\x6e\x57\xfc\x58\x37\x4a\x7c\xd1\x21\x07\x0c\x21\xe4\x18\x92\xbb\x9e\xf0\xb1\x6f\x48\xfe\xe0\x62\x3c\x18\x22\x3f\x70\xa8\xb4\x12\x1a\x73\xae\x58\xf9\xca\xf1\xbf\x33\x0c\x71\x43\xad\x84\x97\xac\x9e\x72\xfb\x9f\x77\x7c\x4e\x1f\x65\xce\x54\x47\xb1\xac\xe6\xba\xba\x4c\xbe\x16\xa9\xfa\x61\x78\x62\xc5\x98\x33\xdf\x91\xca\xa2\x5c\xb4\x61\xd3\x3c\x35\xea\x26\x69\x70\x09\x9e\xe6\x58\xd2\xb4\x4e\x89\x19\xd3\x62\xd3\xdd\xc8\x73\x96\x35\xa1\xf5\x0b\xaf\x39\x9f\x2b\xe9\xa0\xc1\xc7\xa9\x51\x4c\x99\xa1\xfa\x38\x90\x20\x6e\x8c\x4b\xd1\xb8\xa0\x9b\x00\x62\x4e\x0f\x0b\x38\xec\xa6\xbb\x93\x7c\x8c\x38\xf7\x51\xfb\x2d\x89\xeb\xdc\x9f\xaa\xe1\x60\x65\xeb\xf3\xab\x51\x2c\x6d\xa2\xac\x08\x8b\x65\x56\xa0\xa7\x02\x6e\xd2\xe0\xbb\x51\x88\x53\x1b\xf2\xb2\x60\x5e\xb7\x23\x12\x08\x05\xf3\x0e\xf9\xa5\x37\x1f\xa4\x9e\x03\xaf\x16\x8b\x63\x08\x55\x37\x2f\xcb\x8a\xf3\x4e\x3a\xd8\x37\x6b\x0a\xfd\xa9\x94\xc9\xd3\xa3\x69\x70\x36\x3c\xc3\x40\x25\x6d\x07\xa6\x11\x51\x14\x28\xb5\xf3\xdf\xfb\x5c\xd1\x1c\x13\x0e\x0b\x95\x82\x89\x8d\x27\xe8\xe7\xfd\xc0\xd6\x7d\xe4\xa1\x98\xc4\x8f\xb7\x3c\x01\x40\x03\xf4\x4f\xe2\x3b\x10\xc8\x33\x4e\x9c\x8f\x95\x88\x09\x25\xf8\xcd\xbd\x7e\x9f\x7b\x5a\x8a\xd1\xa6\x2d\xfd\xed\x98\x2b\xa6\x89\x09\xa1\xd1\xf4\x7e\xd9\x26\x3c\xfa\x60\x0b\xab\x7b\x9e\x5c\x41\x74\x19\x94\xd2\xa8\x34\x1d\x41\xcb\xe4\xc5\xfd\xe5\x85\xc2\xa0\xe2\xf3\x84\x82\x61\x20\x7d\xcb\x7a\xcc\x2f\x06\x34\x3a\xec\x9e\xdf\x86\x4e\x65\x70\x0c\x8c\x2d\xd2\xea\x0d\x7f\xb4\xdb\xd0\x67\x97\xd2\xe2\x3e\xc0\x29\x5e\xe6\xa6\x10\x4e\x29\x66\x47\xd0\x87\xc4\x52\xf3\x84\xe1\x3d\x3d\xee\xb2\xa3\x37\x8c\x80\xca\xc2\xcb\x21\xb4\xdb\x6c\x96\x13\x5c\x32\xdf\xb1\x1a\x86\x60\xff\xb6\x3a\xfa\xfd\x51\xb2\xe6\x8b\xad\x2d\xaf\x1e\x2e\x14\xde\x6c\x35\x84\x49\x55\x3a\x7c\x26\x3b\x06\x64\xd3\x14\x49\xef\x6b\x76\x01\x05\x55\xcc\xfd\xf8\xa1\x59\x86\xeb\x42\x1e\x31\x01\x08\xcc\x85\xc1\x99\x1e\xf7\x64\x89\xaf\x1a\x24\x7b\x20\x45\x40\x37\xce\x60\x5d\xcf\x19\xaa\xb7\x67\x25\x79\x45\xe2\x89\x64\x3f\x5e\x68\x64\xb0\x91\x26\x30\xa6\x7a\x96\x96\x3e\xf4\x3d\x0d\x30\x0c\x0b\xcf\x73\x04\x14\x7e\xc3\x9f\xaa\x52\x87\x22\xe1\x61\xc6\x2c\x01\x7d\xbf\x8c\xb0\xca\x61\x5c\x2d\x46\xd4\xfc\x00\x35\x07\x4a\x91\x8f\x11\x39\xc6\xc6\xe8\x0a\x63\xf2\xce\xa7\x4e\x4c\xfb\xf8\xde\xf1\x5d\xfd\x65\x73\xbe\x8e\x5b\x3b\x50\xe1\x33\x3c\x50\x08\xd0\x2f\x56\x00\x1f\x87\x6f\x61\x32\x45\x1c\x8c\xbd\x44\x31\x38\xb9\x8d\x25\x6f\x3a\xf3\x78\x21\x52\xb7\xd9\x9a\xc1\x6d\x73\x2f\x6f\x2d\x17\xbd\xc6\xf5\x31\x85\x9a\x12\xf9\xcf\x6f\x1c\xff\x32\x6f\x66\x80\x9c\x79\x2d\x4c\x27\x73\xda\x65\xd2\x3d\x4a\x17\xb1\x08\x42\x3a\x40\xcf\x48\xb5\x4a\x68\x78\x94\xce\x16\x26\x8d\x15\x67\xc1\xa0\x29\xea\xf1\x64\x4d\x13\xc6\x79\xee\x12\x75\x77\x86\xd6\x3f\x9a\xbc\x78\xc5\xb6\xd0\xa4\xd0\xf8\x19\x50\x5a\x3e\x59\x51\x34\x27\xf1\x90\x30\xbc\xdf\x86\x79\x66\x67\x1d\xb8\xed\x29\x96\xe6\xb8\xef\xe7\x19\x60\x39\xba\x06\x51\xd6\x7e\x0d\x5e\x79\x70\x63\x2e\x82\x7e\x4e\x44\x11\x7b\xa7\x26\xcb\xca\x14\x05\xe6\xb1\x7a\x46\x81\xa5\x27\x38\x7f\x26\xd5\xae\xab\x9f\x17\x71\x77\x83\x12\xca\xfb\x71\x67\x10\xfa\x21\xc9\xfc\x78\xf9\x10\x46\xac\xb0\x39\x7e\x12\x61\x1a\x4c\x81\xee\x6b\x02\x87\x59\x24\x4a\x64\x06\x47\xee\x0e\x6f\xa3\x65\xb3\xea\xb7\x01\xec\x2d\x7f\xaa\x96\xce\x5f\x8c\xc2\xb0\xf3\x74\xc2\x13\xb7\x39\xc9\xb3\x79\x9b\x85\xce\xf0\xc4\x72\x03\xc2\xe9\xa2\x94\xbb\x7c\x25\xe8\x8a\xf9\x49\x46\xc8\x06\xc5\x88\x2d\x2b\x22\x9b\xee\x32\x1c\xba\x12\xba\x24\xc5\xf3\x9f\xdf\x20\x10\xa0\x1f\xcb\xf3\x84\x45\x04\xd0\x15\x6c\x3f\x9a\x8f\x40\xf4\x66\xea\x17\x59\x85\x5b\x80\x9b\x34\x34\x3d\x45\x01\x74\x81\x03\x9c\x39\x59\xb8\xb5\xd0\x41\x5a\xf6\x5e\xc5\xbd\xac\x2e\x1b\x9e\xb8\xf6\x93\x43\xc6\x8f\x6b\xfc\x30\xfc\x21\x4e\xf8\x90\x6a\x88\x86\xd0\x31\x2a\x22\x83\x60\x6c\x3f\xc6\x33\x4d\x37\x7d\x44\x48\x0b\x06\xcd\xfb\x04\x2f\x3a\x97\xa9\xb1\x3c\xf4\x18\xba\x6c\xce\x12\x9c\x9e\x47\xf4\x09\xff\x2e\x37\x86\xb5\x77\x1a\xea\x96\xf2\x12\x49\x79\x6a\xba\x4e\xa6\xf4\xb0\x64\x9e\x71\xa6\xfe\x1f\x30\xd1\x42\xb6\xab\x0e\xc2\x50\x1b\x24\x00\xb1\xc2\x45\x91\x9a\x5f\x8c\x21\x70\x95\xa2\xd0\xc7\xbe\x70\x97\x17\xc0\x28\xac\x49\x8b\x5a\xa9\x9a\xc0\xd1\x32\x48\x01\xaa\xc7\xcc\x31\xaf\x6b\x37\x3e\x0e\x51\xc1\x85\x2f\xdb\x5e\xfc\x93\x7e\xc8\xfb\xd2\x59\x81\xaf\x82\xbd\x30\x07\x48\xf3\x67\x37\x92\x26\xd6\x52\x0f\xd3\x19\x0a\x50\xa7\x85\xad\x08\x5a\xd6\xc9\x43\x1f\x4a\x16\x09\x86\x72\x34\xf8\x6e\x8b\xf3\x62\x79\x4f\x84\xb3\x5f\xdb\x19\x50\x6f\x95\xf1\x41\xd1\xe4\xc1\x39\x23\x9e\xc7\x69\x49\x33\x57\xbf\x81\xc2\xaf\x4c\x31\x45\x2a\x6e\x5b\x1a\x48\x41\xc8\xe8\x23\x66\x00\x23\x46\xec\xd0\x3e\x60\x9b\xb6\x24\x1f\xc5\x27\x42\x09\x1a\xa4\x74\x0e\xff\xa0\x93\x1a\xb7\x34\xa2\xd7\xe2\x56\x4a\x46\xb3\x33\x1a\x66\x84\xa9\xcf\x7e\x8e\xac\x33\xa7\x6b\xed\xb8\xbd\x43\x2c\x1a\x46\xe1\xc8\xcc\x51\xc3\x44\x40\x34\xe5\xed\xc8\x07\x94\x02\x85\x10\xc6\xf9\x4c\x94\x3f\x9a\x0a\x27\x4a\x2f\x95\xdc\xfb\x4b\xb4\xc2\xb2\xf9\xc4\x77\x5a\x90\xe4\x38\x0e\x33\x40\x71\x82\xc9\xad\xf7\xcd\xc4\xb6\x74\xf1\xf5\x0a\x20\x7e\x33\x59\xca\x94\x2e\x00\x80\x3c\x8c\x23\x44\x2d\xe4\xb6\x3e\x9e\xee\xfa\x48\x8e\x31\x38\x01\xc1\xe3\x3a\x28\xa6\x81\xbd\xf8\x1f\x6b\x97\x25\xa0\x54\xe2\xdf\x79\xe1\x2d\x48\x65\xc7\x4a\x91\x95\x0a\x46\xe1\x6f\x68\xfa\xb7\x8f\xe1\x71\x7d\xf5\x41\x3c\x84\x53\x3a\xa8\x85\x44\xc6\x97\xd6\x92\x44\xcc\xb9\xf7\x8f\xef\x28\x8a\xc2\xc5\xbd\x10\x9c\xe7\x28\x5f\xb3\xa4\xa6\x08\x00\xec\x7e\xc1\x6d\xca\x8c\xaf\x62\xaf\xd2\x2b\x5d\x04\x52\x5d\x8c\x8f\xc5\xeb\x0d\xc0\x1f\xa6\xd8\x06\x71\xfa\x0d\x4c\x67\xf4\x96\xfe\xd7\xde\x64\x1e\x9e\x1b\x5d\x94\x52\xd7\xf3\x06\x47\xe9\x6e\x8e\x85\x98\xe9\x62\x14\x89\x46\x78\xee\xed\xc7\x6d\x7d\x00\xf1\x30\x99\x07\x0e\xbe\x3a\xa9\x41\xca\x99\xe2\x6e\x01\x92\xc3\x49\x49\x66\xd8\x19\x02\xb4\x3c\x57\xc3\x68\x9e\x75\x03\xe5\x52\x88\x54\x45\xb0\x4e\xe7\x7f\x6d\xc1\x67\x79\xee\xde\x58\x57\xf8\x28\xe1\x19\x06\xa3\xa1\xef\xe9\x41\xf5\xa9\x3c\xdf\x05\xf5\xb9\x85\x8f\x67\xb3\x2c\x6b\x55\x5f\x76\xa8\x84\x7f\x1f\xc8\x6a\x5f\x26\x08\xed\xb2\x7e\xb5\x87\xd1\xa0\x92\x39\xcd\x34\x0e\xed\x08\xe4\x10\x35\x8e\x6b\x7f\x53\xe7\x50\xc0\x8c\x73\x6e\x70\x2d\x60\x22\xcc\x22\x3b\x17\xfb\x74\x41\x83\xb2\x6d\xe3\xe7\xf9\x74\x6f\x05\xff\x26\x8c\xc7\x83\x7a\x49\xe6\x2a\xfa\x51\xc1\xb5\xa5\xc9\xbd\xb7\xd6\xa3\x8d\x2f\xa5\xb7\x98\xf5\x42\xcc\x37\x3a\x94\x9d\xb8\x9e\x89\x4f\xba\x49\x08\x8f\x06\x0d\x7d\x3d\x10\x51\x7b\x3c\x8f\x93\x74\x2e\xdb\xf2\x6f\xf2\x36\xe5\x72\x7a\xeb\x6d\x2a\x14\x0e\xe4\x8f\x2f\xc5\x4f\x6c\x50\x15\x4f\x34\xcb\x6e\x66\xd3\xbe\x26\xae\xd3\x3b\x1b\x65\x60\x67\x03\x99\xe2\xf8\x12\x98\xf8\x98\xcf\x17\x22\x34\x43\xcc\x39\x81\xa2\xfa\x17\xa2\xd2\x70\xd1\x7f\x58\xca\x20\x9c\x42\x51\x17\x38\x31\x8a\xf4\x8d\x11\x39\x18\xc5\x39\x30\xba\xf8\xce\xc9\xb0\x42\x27\x46\x1a\x62\x20\x92\x92\x24\xd2\x6e\x0e\x45\x5b\xaa\x6c\x8a\xe3\x51\xc3\xaa\x41\x1f\xcb\xd2\xfc\x2f\x6b\x7d\xd8\x44\x6b\xa6\xf7\x43\x3f\x12\xfb\x91\xbe\xcd\x5b\x9c\xc1\xc0\xdb\xc7\xad\xc5\x77\x67\x88\x22\x0d\x99\xce\xec\x41\xc1\x62\x2f\x6c\xca\xa0\x94\xf2\x6c\x33\x4e\xaa\x10\xfb\x40\x21\x4a\xed\xba\x48\xd6\x0c\x6f\x79\x02\x9b\x6f\x8a\xf0\x1a\x7c\xb2\xdc\x05\x7f\x25\x7c\x10\x33\xa9\x07\x8f\x32\x9c\x6b\x76\x5b\xd3\x55\x6a\xb2\xc4\x49\x9c\x56\xec\x78\xbd\x4c\x98\x37\xbe\x14\x74\x0c\x53\xd1\xfc\x95\x4b\x60\x7c\x4c\x7c\x8a\x2a\x34\xb9\x73\xcc\xbf\xdd\x3f\x79\x77\xc0\x50\x06\xb9\xc2\xda\xee\x6e\x90\x8c\xcf\x31\x86\x50\xa4\x0f\x76\x62\x2f\xb1\x31\x09\x44\x6e\x26\x48\x0f\x6c\x0c\x09\x7c\x0b\xb4\x1d\x1a\xa3\xce\x63\x34\x8d\xad\xb4\x6f\xe0\xf7\xa1\x0f\xe7\xda\x6b\xba\x7d\x83\x6e\x6b\x02\x6f\xed\xe7\xd2\x11\x3d\xf4\x03\xee\x1a\x2c\xfb\x33\xb6\x04\x14\x69\x61\x98\xf4\x2f\xf5\xed\x6f\x88\x3f\x0b\x7b\xd0\xe0\xf0\xe4\xa0\x56\x5c\x5b\xbb\xd0\xb8\xb1\x3d\x49\x28\x15\xf1\x73\xd6\x09\xfc\x7e\x19\x64\x34\xbd\x7a\xf0\x7b\xd7\x55\xc4\xd5\x08\xd2\xef\x86\x26\x91\xfe\x83\x82\x9d\x50\x42\x3b\xd0\xf7\x68\x20\xf6\x96\x2b\x47\xe0\x21\x48\x7e\x86\x29\x32\xac\xc3\x4b\xa0\xbc\xda\x06\x0c\x3e\x00\x02\x7d\x12\xf5\x1b\xc2\x3b\xa2\xc4\x57\x79\x62\xe1\x4b\x4c\xee\x27\x0b\x07\xe2\x08\x6b\x4b\x28\xac\x55\xcb\x7f\xd6\xeb\x7b\x9d\x29\x0f\xaf\xd9\xcd\x8a\x33\x62\x14\x63\x69\x5e\x29\x1b\x09\x74\xaa\xbd\x83\x5f\x16\xe9\xcb\x71\xe0\xf6\x2d\x7a\x5a\xaf\x96\x7e\xa7\xda\xb1\x89\xee\x64\x3b\xeb\xe0\xf0\xf6\xa6\xc1\xb3\x8d\x50\xb6\xf6\xd9\x8b\x08\x2d\x9d\xd6\xa0\x02\x9b\xc4\x23\xbe\xef\x8d\x8e\xca\x9c\x26\x76\x91\x6a\x4b\xdd\x9a\x8b\x0d\x3a\xb3\x98\xad\xfc\xa1\x13\xf3\xa2\x0d\xf1\x61\x7d\xda\x1c\x81\xb1\xea\xbd\xac\x22\xa4\x05\x79\x06\x54\x6a\x59\xb4\xfb\xee\x8d\xb2\x7e\xe0\x15\x63\x29\x23\x07\x54\x09\xb5\x36\x93\x34\x75\x9f\xdf\x6c\x1a\x5b\x80\x44\x8c\x7a\xea\x60\xd4\x9f\xee\x95\xd8\x8f\x53\xa0\x12\x9e\x74\x73\x55\x60\xe2\x49\xe4\xd0\xfc\x12\xaa\x39\x12\xf4\x20\x4b\x4e\x69\xac\x3d\x8a\x24\xf3\xec\x3d\xc9\x19\xae\x22\x9d\x08\xad\xb4\xc6\x1b\xb6\xb5\xd2\xbb\xe5\x4d\x03\xe7\xfc\x9c\x73\x5e\x6f\x07\xc8\xae\xda\x7a\x2b\xa1\xdb\xfa\x20\xd1\x9e\x66\xaf\xe6\x9a\x3e\x9c\xfb\x8e\xd7\x17\x07\xf8\xd7\xa3\xe3\x63\x24\x42\xc8\x7b\x40\xe5\xd0\xee\x4e\x6d\x4b\x90\x81\x75\x95\x24\xff\xb1\x59\x41\x28\x8d\xcd\xd5\x80\xde\xc5\x96\xc2\x49\x4e\xd8\x9a\x1d\x41\xb0\xdf\x5a\x2a\x81\x3e\xf6\x8f\xe3\x75\xb2\x3d\x30\x3c\x6d\x86\x98\x7d\x9b\xd0\xd9\x48\x9d\x1c\x0e\x26\x1d\x2d\x7a\xe3\x09\x6b\xb9\xb5\xf9\x81\x95\xbc\xd5\x19\x00\xf0\x86\x3b\x9d\x9b\x55\x1e\x2b\x8c\x34\x48\x5a\x0b\x52\x2d\xbb\xe4\xde\x4c\x8b\x32\xcf\x49\xb0\xad\x8a\xd3\xcd\xca\xf3\xb7\x5c\xd3\x22\x35\x35\xbd\xd7\x9a\xdf\x69\xf2\x64\x7e\x63\x5d\x9c\x73\xe5\x6b\x29\xac\x25\x43\xe1\x0d\x85\x01\x9c\xb7\xff\xee\xe3\xa8\x7d\xdf\x0f\xfb\x38\x08\xfa\x38\xf0\xfb\x39\x09\x7b\xa2\x08\xf1\xa5\x66\x67\x4e\xa2\x4b\x33\x5b\xe5\xc7\x12\xbc\xde\xbc\xbb\xf7\x5c\x34\x9a\xbd\xd8\xe2\x26\xcd\x76\xba\x37\x2c\x9d\x90\x7b\x1a\x60\x45\x6b\xcd\xf1\xed\x19\xbb\x8b\x7f\xec\xd9\x01\x65\x36\x65\x44\x4f\x03\x64\xe7\x3d\xff\xab\xfb\xbe\xef\xbb\xbe\x1f\x07\xf1\x2a\x46\x2a\x9b\xae\x0e\x02\x49\xc7\x0a\xcd\x6b\xd2\x4c\x0a\x07\x9d\x0b\x57\x9d\x2a\xa0\xe8\x23\xe5\xaa\x4a\xbb\xf9\xaf\x4e\x32\xc5\x9b\x1d\x92\xc8\xbf\x48\x03\xef\xc1\x56\x7f\x15\x98\x06\x25\x00\x6a\xff\x50\x63\x35\xe3\xa6\x12\xa1\xc5\x79\x52\x82\x13\xeb\x12\x3d\xfc\xb6\x55\x81\x19\x7b\x7a\x24\xee\x03\x44\x1c\x28\x7e\x97\xe8\xa2\x97\xba\xd6\x93\x51\x46\x7a\x72\x96\x14\xd8\xeb\x15\x5d\x15\x3e\x98\x3b\xd9\x7a\x72\x7b\x43\x2e\xc0\x8c\xc0\x67\xbe\xa8\x3d\x4a\xc6\xad\x0c\xc9\xaf\xf9\x1b\x52\xa5\x24\xec\xa4\x38\xc7\xa4\x34\xfb\xc5\x56\x54\x9d\xa9\x73\x29\x47\xad\x10\xf1\x5c\xc2\x3b\xf6\x47\x08\xe9\x4f\xc8\x79\x1e\x57\x25\xdc\xb0\x2f\x49\x96\x23\xcb\x17\xa5\xeb\xba\xfa\x95\xed\x60\xf7\x04\xc3\x52\xa8\xf6\xe9\xd7\x73\xe2\xf0\x0e\x34\xbc\x2b\xf9\x1f\x49\xab\x69\xe6\xa1\xde\xf2\x36\xa5\x9f\xf5\xfc\xb6\x68\x90\x9b\x64\x98\xb7\xf4\x77\x4f\xf1\xef\xee\x52\xe1\x76\x72\x5c\xe8\x2e\x38\x69\xb3\xb8\xd1\xd2\x2e\x53\xa7\x52\xc5\x43\x89\xe0\xfa\xeb\xcb\x4f\x25\x33\x62\xd0\xb2\x2a\xf7\x46\x1d\x6f\xdc\x64\xb4\x7b\x62\x68\x77\x6c\x68\x42\x20\x1e\xff\xef\x4e\x32\x41\xb0\xf6\xf0\x4d\xba\xce\xc8\x2c\x8c\x09\x36\x8c\xbe\xf5\x38\xbc\x86\xf3\xab\xcf\xb7\x85\x0e\x24\xba\x4f\xa4\x31\xcc\x4b\xf2\x27\x99\x66\x5a\x9e\x6d\xd5\xd4\x7a\x53\x99\x9f\x81\x80\xcf\xf7\xaa\x04\xa0\x2e\xe4\xf0\x6f\x65\xd6\x87\x3e\x15\x85\x3e\xd5\x07\xde\x55\xc7\xc5\x83\xaf\xcf\x0e\x42\x7f\x56\xb4\x96\x7f\x40\x64\x9c\x68\x9a\xa1\x93\xbe\xb2\x02\x7d\x5a\x25\x6c\x32\x5a\x6a\x3c\xef\x0c\x83\x1e\x93\x0a\x0b\x37\x38\x1e\xba\x40\xb5\xd0\x4e\x99\x41\xed\xe7\x2a\x70\xa6\x68\xed\x15\x99\x1e\x07\x2a\xbf\xb9\x81\x45\x05\x6f\x6d\x2d\x6e\xe5\xae\xd7\x4b\x8f\xf6\x66\xaf\xde\x46\xb7\xe6\x0a\xb7\xd6\x46\xa8\xce\xab\x1e\xb2\xff\x69\x28\x1d\x32\xbe\xd6\x3b\xdb\x93\x58\x2d\xfc\xde\x9e\xff\xb5\x23\xef\xcb\xfe\xc9\xd4\xa6\x41\xa9\xbd\xc6\xbb\x51\xca\xab\xbd\xea\x43\x1b\xb4\x51\x2a\xe6\x70\x87\x8b\x0b\xa5\xb4\x3c\x95\xce\xdc\xc1\xe9\x65\x04\xab\x4f\x32\xd9\x58\x8a\xa2\x88\xb5\xa5\xca\x86\x45\x8c\xfa\xfa\xf0\x4e\xfb\x48\x58\xfd\x2b\xa4\xbf\xfb\x4f\xfd\x47\x3a\x10\x43\x3a\xc4\x4b\x32\xcc\xeb\x4c\x16\xfd\x9c\x04\x13\x66\xfd\xed\x2a\xa8\x6c\xef\x08\x89\x87\xf8\xdf\x2e\xf9\xfb\x3b\x27\xc5\x99\x52\x0c\xe4\xc4\xeb\x97\x87\xc9\xdc\xf0\x9a\x58\x0f\x56\x1f\x6b\x29\xe3\xda\x68\x20\xfe\x88\x50\x0d\xd7\xef\x46\x15\x99\x2a\x90\xc7\xbc\x00\x33\x9a\x95\x66\x39\x82\xb4\x08\x1e\x28\x70\x88\x47\xd7\xc7\xb7\x6b\x65\x27\x24\x90\x6c\x0f\x0b\xd1\x92\xaa\xa1\x5e\x63\x50\xa6\x14\xc5\xb8\x0c\x30\xcd\xb1\xdd\x46\xd3\x81\x77\xd9\xf1\xbe\xfc\xe3\x35\x34\xee\x7c\x6b\x99\x54\x9b\xc7\xac\x59\x44\xb7\x3c\xfe\x73\xeb\xae\xf7\x26\x68\xbc\x45\x83\x52\xa5\x28\x5b\x25\x0a\xf7\x17\xd3\xfa\x35\x41\x3b\x46\x3a\x7b\xcc\xa0\x21\x21\x0b\xc7\xdf\x29\xb0\xcd\xb6\xcf\x07\xa9\xfd\x69\xe5\xaa\xfc\x64\x34\xe1\x7b\xd0\x15\x5b\xde\x85\x4f\xf5\x68\xab\xf6\x6a\xab\x06\xcc\xc9\xfd\xda\x9e\xff\xb7\x1a\xd6\xa3\xb5\xa6\x8b\x30\x66\x88\x30\x9f\x4b\xe3\x73\x0b\xd6\x0b\xff\x80\x6d\xc5\xb4\xd1\xdf\x15\xd4\x34\x52\x67\x4c\x95\xf1\xf3\xa9\xe2\xa1\x42\x88\xfc\xca\x20\x38\xa5\xd7\xd4\x43\x0f\xae\x3e\x20\x67\x89\x2a\x77\x56\x68\x23\x9d\x86\x8b\x99\x11\x42\x12\xf9\xae\x0f\x44\x22\xc3\x8c\xe8\xd5\xe5\xb7\x53\x4b\x11\xdf\x09\x80\xd3\x07\x18\xaa\xcb\x46\x34\x57\x40\x34\xd7\x43\xf5\x36\x42\x90\x5c\xf4\x52\xd3\x75\xd6\x59\x71\xba\x4d\xf9\x54\xf2\x53\x8e\xa0\x3c\xb1\xc3\xed\x08\x8a\x21\xfa\x79\x45\x88\x0b\x32\x6f\x47\xac\x2f\x1a\x93\x5f\xf9\x72\x8c\x80\x62\xee\x0a\xc3\x28\x66\xac\x0d\xd0\x8e\xaa\x4f\xe2\x11\x71\x33\x56\xf0\xad\xa6\x91\xda\x4b\xff\xeb\xc0\xb9\xf9\x2d\x39\xab\x93\x7d\xbe\x5f\x9d\xf2\x37\x1f\x69\x82\xe6\x5b\x81\xa6\xf1\x81\xa5\x8b\x1f\x95\xfd\x55\x09\xdc\x9d\x6e\xa7\xab\x10\xd9\xd9\x15\x96\x82\xa5\x01\x96\xee\x0b\x9a\xef\x11\x8a\xec\x11\xaa\x77\xf1\xcf\x36\xd4\x94\x8b\x78\x16\xce\x45\x1b\xce\x45\x07\x5a\xca\xb3\xa5\xc1\x9f\x48\x86\x79\x45\xa0\xf1\xec\x08\xbd\x3c\xbd\x45\x77\x03\x5f\x19\x68\x48\x08\xbf\x56\xf8\xbd\x34\x03\xbd\xd9\x60\xd6\x08\x12\xd2\xa0\xa8\xb2\xab\x72\x7d\x86\xf9\x48\x33\x34\xad\x6e\xac\xc7\x23\xd5\x96\x06\x2a\x43\x53\xb2\x04\x6b\xd0\x55\xbf\x80\x7b\x82\x35\x41\x76\xe8\xc2\x8d\x34\x5f\xe0\x24\xf0\xd7\x85\x4c\x06\xb6\x51\xd6\xff\x3b\x91\x88\x02\xc0\x76\x1e\xf6\x64\xd1\xcd\xef\x43\x0f\xa1\x83\xc3\x85\x19\x3e\x51\xef\xa4\xc8\x1c\x8f\x3e\xc2\xa4\x6f\x9c\xdd\xd1\x19\xe4\x32\xb0\x44\x28\x6e\xdf\x29\xfd\x11\xbb\x1e\xbc\xa9\x39\xb8\x7e\xfa\xf6\x66\x17\x72\x04\x86\x43\xb1\xfe\xaf\xf1\xd4\x9c\xbd\x94\x8f\x29\x92\x8a\x77\x98\x8c\x0d\x37\x44\x73\xe5\x28\xc6\x9b\x48\xf6\x60\xc0\x8a\x63\x0a\x1d\x0d\x3e\x88\xd0\x20\xd1\xd1\xdd\x07\x77\x3e\x0c\x25\x94\x85\xbd\xeb\x35\x57\x7f\xb0\xfc\x61\x64\x73\x7a\x9c\x6e\x96\x77\x92\x49\xd0\x35\x40\xf7\xd5\x13\x4a\x6b\x0f\x48\xbd\xc1\xc1\x79\x08\xf8\x4c\x3d\x8a\x5d\x7f\x84\xd1\x22\x5d\x9c\x6f\xc1\x24\x7f\xc7\xbb\x54\x1f\x7a\x76\x3b\x42\x76\x3b\xc2\x9c\xb1\x32\x5c\x49\xa8\xd9\x83\x35\x77\x6d\x24\xf9\xd7\x78\xf5\x37\xbc\x75\x4d\x8e\x7d\x21\x4d\xc5\x39\x5d\xe0\xd4\xae\x15\x38\x17\x3a\xd0\x48\xb3\x31\xfd\xf6\x9d\x4e\xc6\x24\x56\xdc\x3e\x58\xc4\xd9\x96\x6d\x08\x01\x1d\x90\x67\x43\xd6\x73\x42\x70\x68\x30\x22\x87\x9b\x0e\x27\xf1\x59\x94\x86\x3c\x69\x10\x17\x77\xee\xb7\x1a\x33\xf9\x45\xc9\x0e\xfb\x33\xc0\x48\x26\x08\x9a\xa5\xfd\x68\xe8\xdc\x5f\x3e\x40\xfa\xb4\x04\xa8\xec\x70\x2e\xd9\x0d\xbd\x5c\xcc\x22\x4c\xe9\xa3\xdb\xaa\x59\x4c\x70\x3d\xa1\x38\xdb\x1f\x61\x21\xe8\x21\xf5\x25\xf0\xdf\x8c\xd6\x5e\x1a\x06\x16\x92\x87\xf9\x1e\x96\x26\xa3\x43\x23\x71\x94\xe3\xc3\x1c\x10\x86\x1f\x60\xc7\x54\x22\x3b\x25\xba\xcc\x33\x20\xf9\x7b\x12\xe4\x63\x82\xf6\x73\x14\xd4\x53\xfc\xb3\xbb\xe4\x97\x2c\xf3\xe2\x74\x8b\xd2\x4a\x11\xcf\x92\x26\x43\x0d\xb8\xc1\xc1\xbb\x59\x86\x08\x79\x65\x26\xb5\x64\xdd\x0c\xb7\x8f\xca\x22\x40\x07\x44\x6e\x57\x89\x6e\xea\xe0\x05\x2f\x29\xd2\x67\xa7\x02\xe4\xaa\x98\xf0\xb7\xba\x5c\xf2\xba\xf9\x81\xba\x1d\x61\x30\x07\xb6\x07\x54\xbc\xd8\x31\x51\x3b\xa3\x8f\xe4\xce\x28\x2b\x19\x54\x9a\xd8\x89\xe3\xda\x4f\xbd\xd9\xc2\x1b\x10\xf2\x89\x72\xf3\x97\xee\x28\x85\x63\x03\x0d\x47\x96\xc0\x42\x20\x3f\xad\xdc\xca\xf4\xa2\xc0\x6c\x98\xcc\x67\x9b\xf4\x3f\x74\xa2\xa9\x1f\x6f\xdd\xa0\x64\x49\xc6\x99\xcc\x2a\xa9\x65\x63\xb9\x9b\x97\xc4\x38\x31\x6c\x49\x82\x7e\x0e\x1c\xdc\x29\xd6\xe4\xcd\xc2\xef\x9d\x05\x50\x19\x9e\x32\xc2\xa8\x01\xa2\x5b\x74\x01\x56\xf8\x5f\x9b\x1b\x0a\x4a\xc5\xa9\xa6\x4e\x91\xcc\x9c\x9f\x19\x30\x49\xa0\xbe\xf0\xd4\xae\x59\x01\x9d\x37\x2c\xa2\x30\xf6\xa8\xc9\x33\x50\x3d\x27\x1f\xee\x0f\x4c\x8f\xca\x73\xce\x2b\x5c\x48\xd8\x12\x2a\x49\xad\x06\xe8\xde\x93\x68\xd4\xf9\x6f\x03\xa5\x1d\x10\x2b\x44\x77\x83\x54\x47\x61\xf2\x87\x1d\x3f\xeb\x2c\x9a\x4a\x67\xa7\x9d\xfb\xf2\x35\x4d\x16\x06\x5e\xea\xf1\x76\x7e\xa8\xdd\xf5\x10\xe3\x2d\x76\x67\x7b\x4c\x41\x39\xfa\xf6\xaf\xea\x2f\xc5\x89\x59\xb6\x0a\xdc\xc7\x12\xfc\xef\xed\x57\x27\x75\xd1\xa5\xd9\xb5\xb7\x46\xf1\x71\x6a\x9e\x28\x53\xc3\x54\x5e\xac\xd1\x12\x1b\xc8\x9e\xfa\x51\xb2\x28\xdf\xde\x73\x1a\xee\x11\xe9\x3b\x95\xe3\x31\x16\xe3\x5d\x86\x9c\xb1\x99\x3e\x1f\xee\xf3\x71\x3c\x9b\xf5\x45\x41\x6e\x6f\xcd\x11\x5b\x08\xf5\x55\x3c\xc1\xf5\xd4\xeb\x0a\xf6\x3b\xa7\x4e\xcd\xdd\x21\xf1\xd9\xaa\x9c\xff\x4c\x72\xba\x0f\x0b\xe2\x32\x2f\x87\x2b\xe4\xd1\x4e\xd0\x49\x92\x47\xdb\x03\x11\x00\x19\x2b\x7b\x38\x0f\x1f\xbe\xa2\x41\xc7\xd1\xfd\x13\x91\xa2\x45\x56\xf4\xb7\xd2\xea\x6e\x23\xe6\x12\x68\xfe\xb0\x01\x40\x8b\x59\x61\x35\x63\xcb\x50\xfa\x79\x98\xc1\x9f\x55\xb9\x80\x10\x2d\xd8\xf7\x72\x67\x71\x26\x6f\x50\x65\x15\x78\x82\x95\x9e\x47\x9b\x32\x16\xfa\xda\x4c\x8e\x33\x43\x1d\x09\x9b\xb9\x48\x8a\xb0\x1c\x56\x60\xcf\x49\xd0\xbb\xb7\x52\xdc\x3a\xc4\x64\x0f\x80\xc4\xae\x7f\xda\xd1\x22\xcb\x8c\xa4\x8f\x02\x2f\xc4\xb1\x86\x40\xdb\xf5\xb2\xe3\xad\x7d\x83\x82\xef\xcd\x98\x70\x06\x81\xec\x0c\x17\x73\xc4\x28\xdd\x7c\x31\xd6\x52\x59\xaa\x47\x29\xb7\xbe\x28\x77\x59\xa8\x04\xdd\xd2\x1f\x5d\x1a\xed\x44\x1a\xed\x98\xee\x68\x1b\x5c\x8a\xe2\xe0\x8b\xd1\x6e\x68\x6a\xda\x9d\x2f\x38\x03\x29\xf8\x1a\x47\x5a\xda\x39\xde\xe8\xbe\x26\x45\x68\x0e\xe6\x5a\x83\x10\x99\x4a\x4b\xf8\xb2\x93\x7f\x62\xe9\x8e\x50\x8c\xc8\x6f\x98\x39\x84\x3d\x39\xcb\xa3\xb1\x2e\x64\x55\x62\x14\x03\x41\xb9\x7b\x42\xd4\x8e\x3e\x5b\x2b\x2b\xe2\x4f\xd5\xda\x37\xb3\x19\x54\xe4\x3f\x59\xe9\x0f\x73\x13\xd5\x42\xfe\xed\x86\x1f\xb8\xcc\x6d\xa6\xf2\x87\x59\x50\x63\x74\xeb\xea\xea\x0e\x11\x21\x99\xd2\x2d\x09\x7a\xc1\x52\xc3\x9a\x97\x78\x30\xdf\x02\x2b\x4f\x2c\x7f\x74\x9d\x84\x30\x5f\x79\x96\x40\xed\x1f\x22\x3f\xb9\x66\xff\x15\x12\xfc\x1b\xb0\x60\xe4\x87\x42\x7a\x91\xde\x6c\x19\x43\x90\x6c\x49\x56\xe7\x0b\x9c\xaf\x8c\x3f\x65\x71\x8a\x5d\x6b\x22\x34\x93\xdc\x47\xea\x25\xba\xc8\xdb\x15\xde\x99\x8f\xfa\xf4\xd0\x38\x4b\x96\x50\x33\x0f\x51\xfa\x3b\x54\x34\xda\x33\x78\xef\xb8\xf6\x18\xf3\x01\x0f\x70\xf2\x54\xfa\xb0\xfd\xa9\xc1\x32\x6f\x0d\x44\x76\x22\x84\xeb\x3d\x9a\x59\xdf\xea\x8b\x03\x95\x1f\xf6\x60\x9d\x5d\x3f\x0f\xbf\x7d\x7c\x84\x87\x29\xee\x28\x99\x63\xe6\x86\x7d\x3b\x27\x05\xc5\xdb\xd7\x5c\x59\x2e\x81\x67\xeb\x43\xb1\x7d\xf0\x21\x11\xc1\x68\xc0\x2e\x31\xa5\x4e\x96\x7b\x18\xcd\x49\x65\x83\x4a\x65\x53\x7d\xe9\xac\x9c\xe9\x12\xbc\x1d\x37\xf9\xfc\x87\x00\xda\xf7\x53\x7d\xcb\xbf\xd8\x5d\x0b\x4a\x8d\x1b\x00\x70\x96\xc7\x55\x5e\x35\x09\xd3\x22\x58\xcb\xd7\x92\x59\x49\x06\x80\xa4\x1c\xce\x72\x84\xc8\x96\x64\x4e\x85\x7a\xf4\x63\x83\x02\x2a\x16\x40\x01\x18\xaa\xe0\xdf\x84\x18\x33\x00\x8c\x7e\x2f\xb9\xbd\x94\x2e\x69\xbd\xbf\x9e\x02\x53\x08\x28\x12\x6e\xa0\xf4\x09\xac\xeb\x27\xdd\x25\x15\x9d\x8e\xcd\xe2\x50\xc1\xb3\x80\x5a\x9f\xdf\xce\x26\xb1\xa1\x56\x68\xf6\xe3\xdb\x2f\x49\x83\x42\x0b\xa2\xa3\xd4\x44\xd5\x5f\x69\xec\x37\x8d\x7c\x82\xf6\x00\xbc\x83\x9d\x41\xa5\x26\xe3\xe3\x9a\x9a\xec\x43\x64\xe7\x8a\x40\x54\x7e\x86\xcf\xb7\x18\x83\x03\x7b\xe4\x99\x81\x46\xb7\xf4\x62\xe3\x8e\x4d\xb8\xa4\xf3\x14\x94\xa7\x78\xc1\x14\xd4\x58\x75\x15\x4a\xc8\x2f\x85\x3c\x11\x40\x56\x4f\xe5\xd7\x95\xa9\x24\x87\x36\x5d\x03\xc3\xea\xc0\x90\x0c\x04\xdc\x03\x66\xce\x0f\xac\x7e\xe6\x17\xfb\x89\x7c\xfd\x6a\x0f\xbd\xb9\x0c\x59\xee\xb0\x22\xf0\x7a\xa9\xb6\x26\x4b\x9c\xc8\x76\x81\xb1\x25\x08\xe9\xec\x75\xed\x6e\x34\xb5\x23\x8c\xf4\xc4\xf2\x50\x9d\x91\x04\x95\xd2\x2c\xe9\x2a\xe1\xd3\x1e\x18\xb5\x63\x74\xa6\x7e\x0f\xac\x3c\x3a\xd5\xf8\x22\x7d\x76\x99\x0c\x25\x04\x44\x17\xc3\x64\xc4\x73\x1d\x99\xeb\x1b\x26\xf6\xb7\x1b\xa2\xfb\x9b\x85\x02\x83\xf1\x34\xff\xd5\xdd\x3f\xd7\x28\x8e\x10\xcf\x6e\xe9\x12\x1e\xed\xd5\x46\x37\xae\x23\x65\xf1\x6e\x12\xb3\xc8\x33\xda\xdd\x2f\x96\x99\xed\x43\x18\x6e\x2d\x43\xfb\xaf\xaa\xac\x45\xfc\x77\xee\xe2\xd9\x82\xcd\x40\x20\xef\xe9\xa9\xc1\xc8\xd1\x9d\xdb\x1e\xcd\x67\x97\x00\x62\xcb\x54\xe2\x7d\x51\x18\x91\xa0\xf3\x81\x91\xc1\x1f\x45\x31\xa5\xef\x7b\xbd\x83\xd0\x98\xed\xf9\x5f\x0f\xd1\x5d\x0e\xde\x54\xe5\x39\xd7\xb7\x23\xf2\x57\x7b\xb5\xc5\x0c\x89\x07\x9e\x55\x1c\x32\xcf\x33\x5c\x09\xfa\xc0\xd2\x1d\xa2\x1f\x6d\x5b\x95\x67\xb0\x5b\xa2\x3c\x43\x98\xf8\xfd\xea\x29\x26\x68\x28\xd5\x07\xe2\xc0\xb6\xd5\x47\xb2\x51\x76\xd8\x60\x5d\x6d\x01\x5f\x94\x67\x2b\xff\x81\x88\xec\x78\x5d\x49\xa1\x54\x7e\x60\x77\x16\x7e\xb7\x7e\xf6\xda\xb3\xbf\x0d\x47\xb8\xb5\x50\x7c\x6a\xa9\x73\xca\x61\x1e\x34\xb6\xc9\x0f\xcc\x2a\xcc\x9f\x23\x65\x4c\x71\x08\x03\xc6\xfa\xb2\xae\xcb\x1f\x7a\xa8\x3f\x98\x5a\x60\xaa\xce\xc5\x47\xcb\x7e\xcd\x16\x60\xe0\x81\x1c\xc5\xd6\x92\x6d\x46\x49\xe8\x90\xa2\xe8\xf0\xb8\x65\xfd\x8c\x24\x01\x91\x27\x91\x95\x63\xc7\x2c\x48\xda\xdc\xd9\x91\x0c\xbc\xea\x57\x81\xa1\x88\xf7\xb7\x4f\xe3\x44\xa2\xe8\x70\xb2\xf3\xb8\x40\x15\xe8\x4d\x88\xcb\x90\x9f\x62\x5d\xc8\x96\x25\x0e\x0c\x97\x22\x7b\x2f\xa1\x27\x14\xcc\xb0\x79\xd9\x98\xd5\xfd\x78\x76\xba\x7f\xbe\x15\xea\x0f\xb1\x20\xc5\xe1\xf0\xa5\xae\x8d\x14\x57\x9b\x82\x2b\xa3\x3c\xac\x4f\x0e\x07\xcc\x17\x3f\x80\x30\x2b\xd9\x78\x6a\x3b\xf1\x97\x14\x02\xff\xeb\x58\xe6\x8d\x25\xb4\xf9\xe2\x98\xbb\x63\x7e\x73\xa7\xee\x94\x2e\x01\xeb\xf9\x7d\x89\xf3\x50\x45\xe7\xe6\xfe\x72\x57\x25\x0b\x1e\xf7\xb1\x46\x7e\x26\xe8\x8d\x83\x40\xa0\x32\xeb\xdc\x3b\x51\x8b\x8f\x23\x0d\x95\x3a\x5a\x1c\x84\x6c\xb6\xc1\xbe\x26\x16\x9e\xe9\xa2\x1b\x6f\x83\x42\x16\xe2\xe8\x9b\x1f\xeb\xfa\x4d\xe4\xa1\x3a\x48\x16\xf6\xf8\xa2\x34\xa3\x7e\x67\xa9\x68\x6b\x7f\xa1\x3c\xac\x87\x9e\xc7\x3e\x32\xa6\x19\x22\xff\x48\xb2\xba\x0f\xf1\xcf\xf5\x9e\x5c\xb0\x33\x03\x93\xc6\xfc\xc0\x4c\x0c\x23\x67\x05\xdd\xdb\x40\x68\xfd\x96\xa0\x21\x9e\xdf\x77\x06\xea\xbb\xfa\x0e\xbd\x5e\x4a\xb4\xe1\x9d\x38\x81\x31\xb5\x0c\x15\x2a\x49\x36\xe6\x77\xa3\xfe\x82\x97\x01\x95\x3d\x31\x25\x62\x4d\xe7\xbf\x1b\xae\xf0\x88\xda\x57\x82\x26\xe1\x9d\x77\x89\xc6\xeb\x5e\x8d\x32\xc9\x20\x30\x58\xeb\xcb\xfa\xbe\xef\xe7\x61\x8e\x5d\xa6\xf1\x22\x09\x93\xa3\xa9\xbe\x19\xbf\x4b\xa7\x78\xe8\x0d\x13\x5a\xde\xd9\x19\xf9\x79\x08\xeb\x01\x5d\x41\x5b\x46\x16\x6d\xc4\xd7\xd7\x12\xfa\x80\xd3\xa3\x70\xa0\xe3\xa7\xb6\x2a\x4f\x13\xa1\x0d\xbe\xe1\xa2\x85\x43\x70\x88\x51\x6f\x8d\xe9\xbf\xf3\x25\xab\xcc\x6f\xfe\x72\x5a\x17\x4c\xf8\x32\xd8\xc6\xd5\x22\x79\x18\x1e\xe4\x31\x52\x8c\xf3\x27\xec\xc0\xef\x65\x6f\xfc\x11\xf2\xa3\x36\xef\x81\x83\x8b\x2d\xe6\xf9\xf7\xdb\xd0\x91\x61\xbb\x71\xf0\x64\xc7\xef\xa3\x4c\xfb\xbd\xf3\xc2\xfc\x2a\x66\x71\x1f\x21\xeb\x2e\x09\x44\xbd\x80\x2d\x14\x00\x1d\xfc\xe5\x68\x75\x79\x78\xc8\xc1\x2f\x9b\xdd\x3e\x68\x72\x98\x98\x14\x3c\xef\x45\xf0\xad\x23\xfc\xa8\xfc\x54\xfb\xe9\xca\x94\xdf\x0a\x05\xcb\xba\x7c\x77\x33\x43\x62\x33\xf9\xd9\x64\x32\x8f\x49\x09\xe1\xe9\x2b\xdc\x9a\x3d\x1e\xb8\x4b\x4c\xb0\xc2\x01\xec\x86\xd4\x53\x1a\x39\x81\x76\xc6\x4c\x51\x49\xaa\x04\xa0\x1b\x24\xf5\x1a\x3f\xe4\x94\x27\xbc\x2c\x51\xf1\x8e\x32\x27\x46\x51\x8c\x48\xe9\x63\xe8\xd6\xc0\x6c\x25\x1a\xdf\x51\x2a\x47\x54\xe2\x27\x65\x86\x58\x5b\x12\x2b\x1b\x83\xab\x45\x11\xe8\x18\x98\x29\x9d\x5f\x41\x55\xa6\xcc\x3c\x00\x2c\xff\x7a\xc5\x97\xa5\x9a\x69\x2e\x31\x29\xf8\xf7\x73\x56\x53\xb3\x1a\xcf\x12\x99\x03\x36\xf9\x6a\x71\x8a\x75\x2d\xaf\x5d\x6e\x5a\x39\x0f\xff\x22\x53\x24\x6f\xa2\x56\x57\x9a\x8a\x32\xe7\xf7\x2e\xc2\x6f\xb6\x87\xb2\x8a\xe0\x52\x12\x99\x2e\x69\xda\x8f\x8c\x97\xdf\x6b\xf9\x36\x77\xb4\x95\xa6\x7b\x4d\x8e\x85\x25\xfb\xb2\x0d\x09\x54\xa6\x80\x66\x3f\x64\x0b\xed\xe6\xa8\x8f\x79\x32\xc1\x89\x2e\xb6\x36\xc5\xd0\xda\x8f\xdf\x74\xee\xaa\xb5\xc2\xa4\xcb\x6b\xd2\x6e\xcb\xff\xe6\x28\x93\x5d\xb9\xb5\x7d\x4f\x8c\xaa\xa7\xe4\x77\xb8\xf1\x65\xff\x0a\xe9\xe2\x5f\x1c\xbb\x20\xf3\x8d\x86\x78\x7f\x71\xda\xae\x08\x7c\x31\xb6\x5d\x74\x7f\xc8\x6f\x70\xc4\xbb\x38\xe6\x05\x87\xb2\xc3\x95\xba\x44\x17\x97\xa4\x99\x3c\xad\x3a\xac\xc7\x68\x7f\xae\x3e\xb2\x95\xf5\xfb\xab\xcd\xe4\xee\xca\x1d\x14\xf6\x84\xad\xa6\x8c\x12\xa8\x74\x7f\x12\xcf\xef\x73\xf9\x5f\xdb\x2b\x9b\xde\xb3\x27\x3c\xc9\x0b\x35\xca\x32\xfe\xf3\xad\x96\x2f\x42\x32\x05\x84\x9a\xa8\xca\xdf\x73\xf9\x81\xc2\x07\x45\x9a\x60\x1e\x4d\x02\x81\xa0\xfc\x33\xa2\x1e\x16\x6e\x88\x36\xdc\x19\x9a\xd1\xc6\x4a\x2b\x35\xad\xd5\xef\x33\x6b\x7a\xdc\x1d\x58\x57\x7e\x8e\x50\x25\x9a\xcd\x2d\xe4\xe4\x98\x97\x08\xbb\xe9\x7a\x22\x8a\xd9\x69\xa6\x45\x82\x19\xa6\xaf\xb8\x5e\x44\xba\xa6\x21\xa0\xdc\x34\xc9\xee\x89\xd2\xc7\x8b\x9e\x1d\x40\xa1\xf6\x73\x26\xfd\x27\xdd\x47\x3c\x3d\xcd\x3c\x5f\x76\x19\xfd\xc4\xc0\x21\xa0\x1f\xcf\xe2\x5b\xa8\x9e\xab\xaf\x1a\x3f\x44\xa6\x2f\x48\x8c\x2b\x86\x12\x69\x62\x47\x6a\x97\x01\x80\xa0\xe3\x64\x0f\xce\x91\x84\xca\x74\x42\x4b\xad\x14\x8b\xef\xa4\x0a\x6a\xd8\x33\x3c\x22\x17\x13\x24\xac\xb7\x56\xd5\x1f\x47\xe4\x0d\x2e\xf5\x6a\x8e\x24\x1c\xaf\x17\x62\xbc\x98\xa8\x8b\x51\x6f\x8a\xa0\x29\x1b\x56\xf9\xf1\x25\x18\x15\xb4\x9a\x20\xba\xe3\xbf\x7b\x41\x3e\x97\x76\x75\x23\xfd\xef\x17\x7b\x4a\xcc\x9c\x03\xfd\x8e\x2f\x01\x40\xb0\x56\x93\xe4\x7c\xd9\xa3\x86\xda\x0c\x2a\x8d\xb0\xc5\xa1\xd1\x7e\xbf\xa8\x96\x06\xb7\xee\xc2\x97\xf2\xe3\xeb\x8b\x2e\x8c\x51\xc5\x37\xbe\x2e\x4c\xb4\x5e\x12\xf9\xc3\x98\x14\x41\x12\xd9\x9c\xea\x28\xa3\xa4\xa2\x39\x10\x47\xda\xca\xd3\x7b\x22\xe9\x86\x41\xe5\x8e\xe8\xf5\x2a\xbe\x33\x9c\x1b\x58\x0e\x0d\xb1\x0d\x47\x26\xcd\x72\x29\x83\x5b\x9a\x2b\xa0\x79\xf5\x6e\x06\x4d\xd7\xd4\x66\xca\x8c\x67\x18\x63\xb9\x2f\x0b\x74\xf2\x6f\x13\x4b\x6e\x44\xca\xcd\xe7\xea\xe4\x8b\xd1\x51\xe7\xf3\xc7\x8c\x46\x6a\x8e\xa7\x08\x40\x38\x4b\xa9\xef\xb3\x16\x6d\xa6\x37\x6d\x53\xeb\x44\x72\x95\xf5\x15\xff\x7d\x3b\xf9\x86\xa1\x4c\xd3\xd2\x54\x79\xab\x49\x31\x5f\x2f\x89\x65\xec\x96\x65\x73\x10\x9e\x61\xc7\x81\xb9\x2f\x48\xfa\x71\xfd\x36\x42\x25\xee\xae\x82\x65\x21\x8f\xf6\x2c\xd8\xcc\x10\xb3\xe2\xe4\x8b\xa9\xc4\x3f\xe0\x37\x6d\x15\x3d\xe9\x92\xcc\x2f\x78\x49\xd1\x82\xbe\xb9\x7d\xb6\x22\xf4\x29\x62\xbf\xb6\xf1\xc5\x79\x8a\x7f\xba\xce\x18\x31\x07\xd7\xcd\x48\x79\xd9\x8c\xce\x76\x5d\x1b\x72\xe8\xd0\x25\xb8\x45\xd2\x9b\x84\x9b\x70\xab\xd8\xf5\x8f\xeb\x2a\x8d\xad\x39\x96\x2d\xac\x33\xb8\xbd\x01\x33\xbc\x10\x26\x0e\x7a\x40\x71\x88\x3c\xbd\xee\xd3\x3d\xd5\x02\xbe\x6b\x0a\xbb\x87\xcc\xcb\xc4\xee\xba\x29\xb2\x26\xae\xdc\x01\xa2\xc5\xad\xc4\x21\x98\x8a\xf4\x4d\xee\x6e\xa0\x95\xa9\x35\x57\x00\xe5\x8e\x96\xe7\x46\xfa\x9a\x9c\x62\x58\x89\xad\x48\xa6\x8e\x05\x8c\xdc\x8d\x16\x83\x91\xa4\x5b\x02\x9c\x63\xcb\x5e\x1d\x54\x9a\xdd\xb8\x27\xdd\xaf\x35\xc2\x72\x37\x46\x8a\x83\xd8\x68\x0f\xe0\x79\xc8\x20\x6d\x74\x57\x76\x4f\xd3\x7d\x7b\x72\xd5\x81\x7b\x39\xca\x42\x43\x25\x16\xdb\xe0\x21\x2c\xd9\x4a\x3d\xc3\xf8\x51\x4e\x85\x5d\x75\xd0\xb3\x18\x81\x61\x8e\xee\x6b\x1f\x30\x5d\xfe\x3c\xd2\x28\x49\x1f\x19\xde\xe8\xcf\xea\xb3\x33\x80\xce\x3e\xf6\xfc\xb1\x4f\x26\x77\xb8\x35\x5e\xe1\x92\x2d\xfd\xde\x35\x8e\xe7\x4e\x1a\xf0\x3a\x3b\xef\xbf\x5f\xab\x9c\x0b\x57\x7d\xd4\xb4\x11\x97\xd1\x6c\x86\x50\x0d\x1f\x6b\x70\x86\xfb\xfd\xcb\x85\xa8\xc9\xd7\x5d\xfe\xb5\xf1\x27\x37\xa6\x1f\xf1\x00\x6e\x57\x9c\x6e\x01\x2e\x01\x27\x62\xed\x11\x99\xfe\xa9\x8a\xb4\x8a\xbc\x88\xa8\xa9\x4d\xaf\x33\xa6\xa3\xc5\x03\x74\xb4\xad\x4f\x2a\x5f\x35\x9d\x53\x69\xc7\xaa\x2b\x24\x93\xec\xa2\x3b\xc3\xd6\x8f\xb6\x6d\x91\xc4\x33\x70\x18\xac\x4e\x61\x39\x04\x6c\x10\xda\x02\xa4\xd9\x72\x07\xd7\xde\xdd\xd5\xad\x10\xc4\x2b\x97\xec\x7c\x2d\xce\x66\xb5\xf4\x9c\xf0\xb1\xd9\xf8\x36\x15\xca\x6a\xdb\xc2\x73\x57\x07\x40\x89\x68\x42\x5c\x33\x98\xed\x7e\x8e\x8b\xe8\x62\x8f\x5c\x1d\xad\xbb\x29\x9d\xcc\x95\x26\x18\xf4\x60\xa2\xb7\xe7\x59\xe6\x29\x03\xf9\x53\xa4\x20\x32\x8b\x9d\x2d\x56\x4d\x1f\xde\x28\xe7\xab\xc9\x49\xa8\x60\xf6\x55\xee\xdd\x98\x83\x2a\x19\x80\x8e\xee\x9e\xec\xbf\xaa\xc6\x3d\xe4\xc4\xa8\x74\x77\x7f\xd7\x56\xaf\x2e\x9d\x19\xef\x04\x27\xbd\x5b\xce\x7a\x93\xed\x0f\xb1\x60\xba\xe5\x98\xdc\xa0\xc6\x5b\xdf\xdd\x2f\x70\xea\x6c\x9c\x21\xee\xf2\x3e\xbb\xca\x21\x6d\xcc\xd7\x30\xd9\xf1\x19\x28\x9b\x08\xfd\xae\x77\x4f\x7c\x98\x5a\x63\xeb\xf6\x54\xe1\x66\x5a\xcc\xcc\x74\xdf\x6e\x8d\x29\xef\xca\x84\x9a\xd1\x5b\xbb\x08\x81\x31\x23\xd1\xf9\x59\x2c\xe8\x8f\x1f\x21\xc0\xd7\x1f\x92\xaf\x36\x43\xe4\x9d\xb4\x49\x2d\x3c\x32\xea\x18\xd1\xbf\x6c\x67\x57\x99\xd0\xf2\x93\x62\xb2\x5f\x94\xc9\x3a\xbf\x19\xd0\xbd\x4f\xb0\xad\x33\x8c\xd6\xac\xad\x0d\xde\x53\xbb\x7b\xbc\x28\x17\x63\x14\xf4\x0d\x4e\x64\x46\x53\xcd\x89\xd7\xb3\x75\x30\xe3\xa2\x42\x4c\x74\xf6\x41\xa0\x58\xa8\xa5\x3c\xf4\xfb\xa4\xd9\x29\x3e\x3b\x44\xaa\x2d\xfd\x28\x58\x95\xe4\x3c\x5f\x3a\x69\x1e\xf6\xb9\x8b\xc1\x95\xe9\x60\x59\xc9\xe4\x38\x07\x8a\x87\x4e\x2f\xd5\x09\x19\xc6\xe6\x60\x11\xd0\xdf\x3d\x50\x2c\xe5\xa8\x36\xb0\xf7\x0a\x48\xe8\x63\x08\xd8\x0a\xb1\x82\x89\x9c\x20\x59\x8a\x47\x43\xef\x64\xeb\xfd\xc9\xbe\x6c\xbb\x7e\xbb\x23\xe4\xeb\x0b\x7a\x01\xd1\xfa\xb7\x7c\x58\x88\x59\x68\x77\x54\xe0\xe0\x93\x98\x51\x49\x09\x1a\x72\xc0\xb8\xf7\x60\xd4\x8e\x74\xe8\xb7\x15\xdf\x19\xe5\x2f\x58\x0a\x55\xd0\xab\x0e\x2e\x9b\x0a\xa7\xd8\x89\x66\x90\x4c\xc4\xe6\x8e\x2d\x3a\xd7\x61\xde\xdf\x06\x89\x36\xd1\xa4\xf8\x93\x37\x48\x0b\x13\x9e\x6d\x5e\x18\x06\x67\x06\x2a\x3f\xfa\x90\x1d\x00\xb7\xfd\xd8\x2a\x12\xf9\x1a\x79\x28\xe3\x92\xe4\xa4\x59\x96\x7d\xf5\xef\x28\xe8\xc8\xe2\x33\xba\x04\xb1\x05\x6a\x5e\x7c\xea\x7d\x81\x13\xf5\xdb\xc5\x7c\xf5\xe7\x61\x4f\xc7\xfb\x51\x61\x54\x70\xd0\x1e\x62\x8d\x8f\x17\x0c\x42\x91\x34\x04\xef\x53\xc5\xe3\x76\xc7\x22\x8c\xe8\xaf\x8d\x33\xc5\x25\x29\xe6\xb8\xe2\x0a\x97\x8d\x2c\xa4\x89\xfd\xba\x63\x54\xfd\x58\x0f\x2f\x77\x55\xf3\xf1\xbe\x76\x38\xbe\x33\x83\xc9\x7c\x2d\x05\x7d\xde\x73\x39\x37\xa4\xf9\x61\x4d\xe8\x0f\xb8\x25\x9c\xe7\x9b\x24\xc3\x9d\x45\xdb\x38\xe2\xf9\x7d\xf4\xfb\x2a\xcf\xb4\x7c\x2a\x90\xdb\x8b\x5e\x3b\xde\x26\x7f\xec\x60\x53\x87\x6f\x1d\x5c\xe0\xe5\x34\x89\xae\x10\xe2\x48\x7b\x73\x6c\x2f\x7b\xc4\x48\x91\xaf\xff\x02\x74\x65\x14\x18\xbd\xee\x4d\xfe\xe3\x3b\x01\x2e\x04\x34\x5d\xb6\x03\xb3\xde\x2d\xbd\x94\xc4\x1d\x95\xc7\x44\xcd\xde\x9a\x7a\xdb\xf3\xb5\x27\xd1\x52\x03\x3f\xd8\xb6\x12\x8a\xcf\x37\x03\x42\x9d\x0e\x08\xaf\x8d\x3e\xbc\xaf\x29\xef\xb6\x7d\x2c\xb5\xfd\x44\x1b\xfc\xb5\x08\xd5\x29\xa2\x90\xdc\xa6\x61\x4b\x83\x41\xfd\x36\x8f\x87\x6a\x95\x60\x73\x6b\x47\x18\x96\x00\x55\x79\x76\x60\xb3\xae\xf4\x67\x88\xe8\x76\xb6\xfd\xea\x9a\x54\x7f\xad\xd7\x33\xce\x92\x38\xdd\xfe\x0b\xd3\x7d\xfb\xf2\xf5\xc4\x4d\x45\xd9\xe3\x1b\xd3\x6f\x5a\x4f\x0a\xdb\x0e\x81\xf0\x65\xcf\x73\x6d\xcb\x9a\xff\x44\xed\x1f\xbb\xd7\x97\xcc\xa5\xc9\xbd\x65\xe2\x39\xc8\x49\x80\xec\x2b\x42\x12\xc9\x19\x4a\xbf\x2f\xa1\x3b\xcd\xa6\xf2\x57\x38\x5e\xf4\xc4\xf1\xb1\x3c\x9a\xc1\x8d\x1a\x02\x4b\x72\x89\xc3\x39\x4a\x3b\xbd\x9c\xf0\x51\x3b\x0b\xa1\x0a\x7a\x09\x55\x99\x37\x96\x83\x42\xf9\x8b\x12\xdc\xfb\x45\x7e\xad\xcc\x12\xfe\xdd\x69\xf5\x02\x95\x21\x5c\x47\x8b\x61\x85\x89\x23\x81\xc3\xde\xf8\x96\x42\x5d\x19\xa6\x3c\x44\x77\xaa\x1d\xf9\xd0\xc5\x59\x4d\xb2\xe3\x7d\xfd\xdd\x0c\xdf\xbb\xd0\x6d\x9b\x23\xf0\x63\xf5\x97\x47\xb1\x8f\xcf\x92\x00\x4b\xe3\xf2\x83\x8a\xd7\x04\xdd\xd5\xc8\xfc\xc1\x71\x23\x50\x0b\xbd\xef\xea\x0a\xbb\xfc\x79\x88\x63\x1c\x74\x4e\xc5\x55\x01\x0f\x61\xea\x78\x62\x2b\xca\x0b\xeb\x4b\xb4\x7c\xbf\x0f\xb0\x6c\x93\x1e\xed\x62\xaf\x95\xc0\xc1\xd5\x5f\x69\x60\x89\x04\xed\x7f\xed\x60\x7c\x89\xfa\xa6\x4d\xbe\x26\xea\xc8\x41\x8f\x53\x79\xb6\x35\xd6\x00\xfd\x11\xb9\xc4\xec\xa4\x15\x87\xcc\xea\x13\x52\x90\xb6\xa3\x74\x7f\xa2\x1d\x39\x16\x38\x74\x35\x1a\xad\x8c\xea\xb4\xfd\x84\xae\xc8\xc2\x6f\xc4\x7f\x02\x90\xfa\xf8\xb5\x38\xb2\x9b\x5b\xb7\xd6\xa6\x28\x73\x0a\x35\x27\xc9\x30\x54\xfe\x92\x61\x9e\x00\x41\xd3\x7f\x0f\x77\x49\x24\x85\x99\xd2\xc7\x28\x66\xa3\xca\x43\x12\x44\x9d\x0d\x47\x8c\x38\xd2\x2d\x37\xc4\x1f\xb3\xc6\x78\x1e\x78\x52\xf0\xc6\xa8\x4d\x6b\x43\xce\xa0\xfb\x98\x5e\x0c\x11\xdd\x5a\xf8\xd5\x03\xdc\xe2\x4b\x89\x29\x79\x02\xe0\x91\xf2\x25\x8a\xb0\xc7\x77\xdc\xea\xe8\x21\x33\x47\xf3\x37\x8e\x08\xc5\x5b\x97\x4e\x64\x79\xf1\x97\x47\xf9\x2b\x13\xa1\x72\xa4\xf0\x37\x50\x65\xe9\xbd\x99\x43\x7e\x8e\x88\xe3\x64\xc7\xeb\x61\x5f\xc4\xef\x8a\xd4\x60\x83\xad\x02\xe1\xcb\x5b\xb4\x01\x81\x72\xa9\x37\xab\x6a\x7c\xe4\xa1\x8a\xde\xd7\xf2\x57\x51\x7b\x70\x21\x50\x9a\xcd\x5b\x7f\x73\x0b\xb4\xab\x1b\x3b\x67\x99\xa2\x11\x9b\x7a\x62\xb7\xf3\x97\xc1\xeb\xa7\x1b\x5c\xe6\x18\x7d\x53\x98\xb8\xd0\x7e\x9c\x41\xa3\xee\x40\x91\x49\xf0\xde\x2e\x44\xab\xb0\x63\xe1\xdd\x85\xc1\xf5\xc4\xcf\x31\xbf\x23\x67\xbc\x57\x13\x95\xb7\x76\x15\x2a\x6c\xc7\x6d\x9d\xca\x55\xeb\xf0\xce\x73\x1e\x20\x87\x9a\x39\x44\x2b\xf2\x3d\x5d\x18\xe4\x86\xda\x32\x8e\x9f\x64\x41\x6c\x68\x4a\xbd\xfb\x15\xe9\x14\x0d\x79\xb3\xe2\x0c\xce\x1b\xe7\x2f\x4e\x5b\x5b\xa8\xa2\xd2\x33\x44\x18\x85\xff\xe5\xbe\xe1\x0c\xd7\xf2\x74\x1e\x22\x87\x5d\x66\x4a\x11\x94\xc1\x87\xbf\x57\x79\x84\xdc\xc0\x3c\xc5\xc0\xc8\x2d\x4c\x77\x14\xfa\x84\xd4\x95\xa1\xda\xb8\x93\xd0\x07\xec\x0c\x28\x91\x4b\xad\x5f\x42\x88\x7a\x61\x73\x7f\x96\xf4\xd6\x92\x21\x8f\xef\x0f\x65\xd5\xe7\xcf\xe3\x17\xa2\x38\xa2\x85\x65\x35\xf5\x18\xd3\xdd\xe9\x66\xf3\x7b\xda\x1e\xff\xc1\x4d\xa6\xbe\x08\xb2\x08\x4f\x75\xca\x34\x8d\x1d\x4c\x85\xa2\x98\x1d\x4b\x7f\xf9\xce\x6d\x34\xf1\x99\x91\xbc\x0c\xea\x0b\x4f\xd9\xa4\xc6\x55\xe3\x59\x76\x71\x92\x1e\xfb\x54\x56\x42\x72\x04\xf6\x5b\xeb\x22\x03\xdd\x90\x18\x29\x56\xe1\x28\xdd\xac\x38\x3a\xd9\x9f\xd7\xcf\x8f\x55\x3d\xf9\xb9\xf1\xf6\xe3\xe3\x14\xf9\x72\x5f\xbe\x77\xd9\x18\x51\xf8\x12\x61\xe7\xdf\xa0\x80\x51\x69\xea\x03\x53\x2f\x79\x93\xb5\x8d\x1e\x4d\xe3\xd6\x92\xa2\xb1\x45\x55\xd2\x4a\xc7\xd9\x17\x45\x50\x28\xd3\xd9\xc2\x6b\x20\x5f\x0c\x0a\x1d\xbb\xe1\xd8\x3f\x62\x6b\x6d\xa3\x25\x84\x34\x1d\x39\x09\x66\xb3\x63\x23\x2a\xfa\x17\xe9\xe2\xf8\x60\x27\xf6\xa7\x1d\x4d\x56\x39\x02\x21\x99\x72\x45\x20\x8a\xef\xf4\x3c\x5e\x9a\x65\x99\x27\x90\x43\x24\x5b\x96\x5b\xbf\x21\x09\x65\x0e\x17\x5a\x40\x6a\xe7\x04\xa2\xd8\x52\x01\x92\xc0\x7b\x52\xe2\x91\x26\xa6\xf3\x4d\x93\xf1\x8f\x30\x88\xbf\xf1\x2f\x7a\x70\x6c\x69\xf2\x00\xc0\x1d\x04\x75\x86\xed\x3b\x93\xba\x81\xed\xc1\x2a\x6f\x91\x11\xac\x1d\x33\xc3\xd4\x26\x9d\x92\x4b\xf2\x2e\x99\x02\x06\x9c\xe7\xb2\x07\xbb\xac\x0b\x52\x0e\xd9\x22\x46\xed\x98\x0a\x40\x65\x7b\xec\x87\xc6\x2c\xdd\x00\x97\xc0\x47\x9a\xf6\x28\xad\xe4\x7c\xc8\x71\x36\xc5\x15\x27\x0d\x47\x16\x20\xb5\x68\x66\xe3\xcd\x98\xf6\x8e\xa8\x04\xdd\x4f\x79\xdb\x13\xa3\x8d\xfe\xfe\x27\x91\x74\xa7\x93\xa9\x7b\xd7\x22\xc2\x90\xaa\xbe\xa2\xf0\x5b\x27\x0a\x74\xc7\x92\x5b\x35\x5f\xaa\x7d\x8e\x3f\x7f\x86\xe8\x6e\x63\x05\x11\x7a\x72\x6d\x74\x69\x2e\x5d\xde\x4f\xd4\x1a\xde\xea\xbf\x15\xf1\xc0\x45\xbd\x6b\xd1\xe7\x3a\x79\x6c\x7c\x89\x77\xab\xc6\xf6\x88\xdc\x78\x68\xe8\x98\x73\x12\xf7\x08\x44\x77\x6e\xe3\x1d\x45\xd5\xad\x89\xcf\x50\x71\x86\x54\x1f\x2e\x8f\x7c\xa2\x87\x3e\x9a\xd4\xc5\x14\x34\x14\x3f\xfb\x9e\xd6\x2a\xca\xd3\x63\xd0\xd3\x75\x6b\x64\xae\xfa\x85\xe1\x93\x44\x72\x94\xd4\xc6\x76\x3d\x51\xb2\x09\xd8\x34\x46\xd5\x86\xd6\x8f\x69\x95\x59\xbf\x8f\x83\x49\x06\x60\x8d\x0f\x4c\x56\xc1\xec\xaf\x09\x1a\x65\x85\xc9\x46\x78\x2a\x53\x42\x01\x4d\xe8\xd0\x10\xa6\x3b\x39\x86\xf8\x66\xaf\x56\x1e\x82\x11\x22\x3b\x75\x93\x64\xce\x38\x40\x91\xb5\xc5\x8c\xd1\x78\x68\x4e\x5f\x73\x93\xd2\x8d\x69\x28\xd6\xd0\xd3\x10\x95\xeb\x93\x54\xcd\x71\xfa\x97\x29\xf2\x64\x67\xd3\x44\x4c\x56\x35\x2b\x7c\x2c\xb6\xa0\xbf\xad\x36\x0f\x7c\x3d\xf3\x0c\xe4\x33\xb6\x97\xff\xf6\xf3\x3e\xaa\x48\x57\xd4\xef\xdb\x7e\x66\x69\xf0\xfa\x38\xd8\x03\x35\xd6\xca\x0f\x4c\x9e\xd0\xe7\xcd\x8f\x77\x58\xfd\xbd\xaa\x1f\xd9\xf9\xfe\x90\x9d\x4a\x61\x91\x59\x4f\x84\xc2\x99\xc0\x61\xf3\x3c\xcc\x61\xf6\x73\x95\x5c\x32\x25\xdf\xba\x1b\x20\xa5\x73\x51\xb1\xf9\x1e\xd8\x25\x43\x65\x5d\x31\x71\xf5\x1a\x8f\xe0\x59\x85\x63\xde\x85\xa3\xe0\xc4\xec\x96\xe5\xac\x6b\xe1\x74\x6d\x3f\xf3\x36\xf4\xef\x6e\x68\x22\x01\xd3\x64\x1a\xf4\x41\x15\xbb\x47\xf5\x45\x4c\x3a\xae\x69\x3d\xde\x50\x7c\x5f\x67\xcd\xa5\x30\x84\x4a\x8a\xa1\xa4\xa1\x44\x3d\x80\xf6\x6b\xbd\x48\xe5\x3e\x6f\x76\x0c\x06\x75\x4c\xd1\x60\x65\xa6\x41\x9f\xcc\xc7\xf1\xfa\xf9\x4d\x17\xe4\x84\xa1\x5c\x99\x7e\x02\x4e\xa4\xfa\x80\x40\x4c\x31\x05\x85\x47\x4c\x22\x03\x41\x3a\x7d\x01\xe9\xd5\x1e\x8f\x16\x93\xd8\x35\x12\xa1\xb1\x35\x53\x74\xc7\x6c\xfd\x76\xef\x32\xd6\x3d\xb6\xde\xc4\xa3\xff\x2e\x38\x30\x7f\xe0\x57\x9c\x5f\x27\x71\x49\xf7\xdb\xdc\x52\xef\xca\x10\x96\xee\x24\x00\x19\x25\x77\xcd\xe4\x73\x78\x34\xf7\x0f\xd6\xd1\xe3\xd9\x18\xd4\x2a\xf9\xcf\x8b\x88\x6d\x66\x9d\x81\x66\xdc\xbb\x6a\xd1\x5f\xd1\x15\xae\x5d\xe2\x56\xe7\x53\xb3\xe5\x48\xfc\xf1\xe7\xe7\x76\x37\x6c\xd1\x28\xb9\x20\x44\x2c\x11\x45\xbc\xf3\x5b\x2e\x1c\x5d\x54\xbd\x6f\x9c\xcc\xcd\x91\x76\xc6\x17\x55\xee\xbe\x21\x4a\x73\x0c\x49\xfd\xf7\x92\xbe\x0e\xb9\xb1\x73\x04\xe0\x25\x10\x65\x9c\x9a\x30\x7c\x19\x10\x70\x65\x1a\xca\x6f\xb6\x10\xd9\x39\xbd\x3f\x7d\x0a\xae\xee\xcb\x47\xf9\x75\x2d\x4a\x33\x15\x63\x63\x5a\xc1\xc7\xb1\x75\x46\x75\xab\x62\x1b\xff\x8a\x1b\x79\x68\x94\x7f\x10\x34\x62\x46\x5d\xd0\xd3\x4d\x6b\x64\x36\xb9\x90\x42\xd6\xee\xdc\x1b\x79\xd7\x20\x3c\x70\xd3\x08\x1b\x7f\xb3\xd9\x02\x31\x20\xf4\x72\x31\x66\x06\x85\x08\x6b\x4a\x7a\x89\x60\x45\x56\x44\x08\x5d\x86\x2e\xb3\x49\x69\x96\x23\x4c\x99\x03\x9e\x5d\xb9\x60\x6f\x70\xf5\x15\xc0\x50\x98\xda\x97\x63\x5e\xa2\x1e\x51\x30\x1f\xc9\x43\x7a\x0f\xfa\xdb\xf7\xa3\xe0\xe7\x11\xf9\x9b\x15\xd9\xdf\xd8\xba\x53\x17\x87\x6f\x1d\x8b\xd5\x64\x8a\xae\x5d\x9b\x47\x3e\x37\xfc\x94\xbd\x19\x13\x95\x32\x4c\x96\xf4\x99\xde\xff\xd1\x74\x16\x4b\xd6\x32\xcd\x16\xbe\x20\x06\xb8\x0d\xb1\x8d\xbb\x33\xc3\xdd\x9d\xab\x3f\xd1\xef\x77\xfe\x01\x11\x1d\x1d\x7b\x40\x15\x59\x2b\xd7\x93\x50\x59\x40\x46\x84\x93\x7e\x0d\xc5\xa4\x40\xd3\x1f\x27\x2d\x23\xb6\xa3\xd6\xf4\xc1\x46\x10\x54\xe8\xf8\xc3\xeb\x1e\x89\xd1\x7a\x56\xd9\x7e\xcf\xd0\x34\xde\xac\x84\xf8\x92\xbd\x57\x4a\x38\x53\xf9\x99\x30\xbb\x1d\xcd\x40\xb0\x7b\xfa\x2f\xda\x50\xe1\x87\x81\x3d\xd6\x7f\xbb\x18\xff\x18\x86\xab\xb1\xcc\x6a\x6e\xf7\x7c\xb1\x25\x0d\x86\xf0\x3b\xc9\x1f\x36\xd4\x8e\x07\x45\x1a\xfc\xe6\xe6\x04\x11\x45\x63\xac\x92\x56\x5c\x68\x86\x43\x54\xa2\x61\xb4\x12\xfa\x83\x91\x9e\x11\x0d\x2a\xbb\x63\xa4\x60\xae\x67\xe9\x03\x17\x53\xe0\x6f\xc1\xa8\x79\xe9\x38\x34\x89\xe8\xf9\x27\x44\x5f\x88\x4a\x33\xee\x8f\xbd\xad\x2f\x47\x69\x2d\x79\x7e\xa6\x37\x45\xe4\x73\x33\x41\xf2\x35\x6a\x3e\x45\x2d\x63\x3b\x0c\xae\xb0\x0f\x22\x2f\xe9\xbb\x5f\x12\xff\xc8\xdd\x08\xf7\xeb\xc0\x12\x56\xef\x95\x17\x66\x80\x52\x24\x7d\x0b\x4a\xd2\xe7\x81\x12\xa5\xf2\xe3\x05\xe5\xfe\xf6\xb4\x14\xa2\x52\x63\x1b\xa8\x98\x5c\x34\x3b\x86\x25\x09\x09\x2b\x03\xa1\xe9\x0f\xfd\x4c\xc9\xeb\xa9\x9c\xca\x74\x54\xb9\x73\x2a\x84\x06\xed\x2b\x50\x8a\x4a\x8d\xea\x85\x8b\x76\x8e\x3b\x3e\xfe\x99\x0b\x7a\xe0\x25\x04\xa4\x7a\x33\x22\x5d\xea\x0a\xba\x27\x3c\xa3\x97\x74\xc0\x66\x27\x10\x71\x6e\xeb\xbd\xab\xdc\xba\x6e\x2b\x39\xe4\xff\x5e\xc0\xaf\xeb\x4a\xec\xd4\x07\xae\x0e\x26\xe2\x3c\x82\xef\x88\x63\x63\xd5\xaf\xf0\x83\x51\x1f\xca\x23\x8c\x2d\x85\xf2\xd3\xee\xef\x67\x31\x5d\x9e\xc1\xa5\x15\x14\x5e\xec\x9c\x1c\x71\x3a\x29\x31\xfd\xd7\x93\xc5\xaa\xe8\x5c\xa3\x20\xee\x58\x04\x81\x13\x60\x56\x1f\x98\xab\xfa\xee\xa8\xc6\x9a\x9d\x6c\x54\x06\xef\xc3\x05\x00\x66\x2d\x35\x3d\x13\xc0\x87\x17\x08\x8e\x59\xb0\x3f\xa5\x5b\xfc\x40\xeb\xe6\x63\xfd\xec\xf4\x4e\x7e\xb3\x03\xa1\xda\x3c\x46\xba\xd3\x92\xba\xdd\x04\xb4\x3f\xcf\x93\x5a\x62\x27\x24\x9d\xe0\xdb\x26\x89\xb2\xaa\x0b\xcd\x0c\x9b\xc1\xd4\xc5\x87\xf0\x61\xf0\x31\x7b\x69\x19\x6c\x80\x60\x8e\xfa\x14\xb5\x5e\x3d\x6e\x3a\x3b\x9a\x29\xbf\x4d\x39\xe6\x6c\xfc\x3d\xa6\x59\xcb\xca\x9e\x67\xdd\x3c\x2e\x44\x29\xd6\x32\x99\x1e\x4f\x72\x2c\xa6\x53\xa0\xe4\xb1\x05\x2b\x50\x23\xd4\x02\xce\xc4\xb9\x9d\xa8\x66\x90\xc4\xe5\x2c\x83\x40\x98\x30\x54\xb0\x76\xfa\x4b\x76\x24\x3e\xc2\xcd\xbe\xf5\xcf\xf0\x02\xb8\x02\x3f\x54\xc1\x88\xb2\x92\x1a\x5f\x6b\xde\xd3\xdf\x9e\x9f\x07\x3c\x23\x73\xff\xad\x67\x74\x85\x49\x03\xfb\xe0\x65\xe5\xfa\x45\x73\x3a\x18\xc2\xf7\x43\x90\x4b\xec\x04\x4b\xcd\xad\x67\x86\xb5\x7b\x10\xf1\x4c\xe0\xf2\x56\xd4\x39\x91\x9f\xff\xf8\xac\x96\xc4\xa9\xfd\x91\x34\x80\x05\x11\x52\x06\x1a\x8c\x20\x21\xe9\xcc\xb5\x47\x82\x92\x50\x48\x0f\x01\x91\x47\x57\xb4\x61\x97\x73\x34\xd7\xc5\x92\x75\x97\xf2\x57\xcf\xa2\x0c\x1f\xac\x18\x0e\x4b\x26\x3c\x7a\xc6\xa5\x36\x16\x1b\x41\x50\x44\x0e\x42\x39\x8e\x81\xcd\x1a\x77\x0b\x81\x68\x7e\x88\xab\x53\x56\x21\x05\xcd\x6a\x4e\x0b\x5d\x4e\x19\x0c\x42\x8c\xc3\x39\xae\xe6\x50\x6f\xa0\x3e\xf4\x31\xcc\x30\x41\x16\x77\x6e\x4a\x85\xf4\x23\x10\x17\x68\xb7\x95\x00\x12\x1a\x00\xb4\x9e\x6c\xb7\x32\xc9\x2f\x34\x29\xfc\x40\x70\xce\x19\xa5\x58\xa5\x38\xd1\x18\x2b\x2d\x20\xab\x31\x8e\xb7\x94\xbe\x06\xad\x07\x4b\x84\x6a\x1a\x3f\x67\x7f\xda\x1c\xa4\x30\x8c\x02\xaa\xea\xa8\x37\x3e\x2f\x92\x30\x1a\xb6\xe2\x44\xd2\xf0\xd8\xe6\x27\xe8\x65\x2e\x26\xc4\x81\x33\x84\x45\xc7\x2f\x70\x62\x6a\x2c\x37\x19\xc7\xd1\x3d\x88\x06\xfb\xaa\xe6\x30\x60\x8d\x5c\x01\x36\x3a\xc7\xcb\xca\x3b\x28\xa2\x02\xa1\x59\x30\xf1\x52\xb6\x95\x0b\x4d\xfc\x76\xfa\x04\xf2\x3b\x37\x35\xdc\x0f\xc1\x5a\x60\x77\x59\x5e\xde\xbc\xb7\xa4\x3f\x0b\x88\x02\x01\x44\x14\xe6\x9f\x32\xdd\x30\xbe\x55\xd2\x43\x41\xbf\x5a\xe8\x7d\x53\xb5\x7f\x20\x58\x00\xa4\x8b\x57\xfb\xb8\x50\x9d\x25\xda\x0a\xb1\x62\xb4\xd0\x08\x79\xaf\xb8\xa1\xe2\xd1\xc0\xb3\x03\x16\x12\x79\xe7\x13\xa9\xc6\x93\xf3\x6d\xee\x05\x3b\x42\xa3\x0e\xfc\x2b\xa8\xf1\xdd\x37\xf5\x18\xbf\xb4\xf9\xc5\x1d\xf8\xa4\x6c\x0e\x64\x06\x9a\x35\x78\x4f\xac\xb1\x08\xa8\x9e\xf4\x4e\xd3\xf3\xd1\x20\x38\x3d\x35\xfa\x61\x4f\x1b\x53\xa9\xe5\xcc\x52\x5e\xab\xed\x4a\x94\x68\x76\xea\xe2\x18\x0c\xab\x1d\xa0\x2f\x05\x5a\x17\x4a\xee\x96\x2a\xb4\xc1\x9b\x7c\xb0\x58\x92\x27\xdf\xed\xbb\xdc\xc9\x2d\xd3\x5b\xdb\x40\xb7\x54\xee\x18\x3b\x0b\xbe\x17\x61\x76\x33\xc9\x23\x37\x4a\x79\x02\x2b\xc0\xa7\xa9\xa2\xd9\x61\x8c\x4f\xfa\xe4\xc0\xd0\xb7\xbf\x61\x85\xd3\x22\x52\xa0\xc6\xf0\x75\x6b\xba\x85\xae\x82\x86\x92\x7f\x4a\xc0\x9c\xbc\x5d\xd9\x68\xd4\x78\x6f\xbe\xc6\x59\x9c\xb5\xe2\x01\x81\x4c\xc9\xf9\x6c\x68\x00\x86\x29\x83\x68\xed\x00\xd8\x79\xe3\xee\xdc\x00\x4a\xab\xd4\x3c\x92\x4e\x69\x00\x48\x8f\xf0\xeb\x73\x7e\xd8\x8a\x94\xad\xf5\x67\x59\xe0\xf4\xce\x75\x89\xe3\x00\x2f\x78\xc9\xb2\x45\x01\x72\x40\x20\xf1\xea\x45\x0e\x3a\xb5\x59\x61\xec\xc8\xf4\x20\xd6\x9d\xa6\x07\x2d\x4e\x14\xc6\xcb\xa8\xc5\x4c\x3c\xc5\x4e\xad\x33\x26\x30\x16\x24\x51\xc9\xc2\x2d\x38\xac\xc9\xc3\x8c\x2a\x61\xce\xdd\x13\x1e\x7b\x15\x6c\x5f\x61\xd6\xfe\x9b\xb6\x18\x61\x6b\x48\x0f\x17\x8e\xe0\xeb\xae\xbc\x22\x17\x3a\x34\x3d\x9b\xd3\x57\x89\xb4\xe0\x66\xa5\xc5\x48\xa4\x20\x08\x60\x84\xc2\x0a\xa7\x4f\xc4\xd4\x52\x2f\x68\x5f\x11\x38\x4e\x0c\x91\x1e\x76\x63\x98\x9b\xd2\x96\xb9\xda\xbf\xe4\xe9\x83\xc9\xa1\x6d\xcb\x0d\x6f\x77\xa1\xd1\x03\x3e\xda\xcd\x85\xb6\x96\xea\xc2\x81\x94\x7b\xb9\x97\xcb\x61\xc3\x0b\x99\x74\x47\x32\x03\x2a\xb4\xea\xf1\xd3\xae\xc8\x4a\xa6\xa0\xb6\x5b\x63\x07\x08\xc8\xd4\xb7\x9f\x9f\x36\xaa\x4e\x68\x8c\x8c\x73\x8b\x62\xed\xc1\x7a\xfc\xa2\xd3\x73\x73\x50\xe9\x13\x10\x8b\x6e\xb1\xaa\x36\xad\x07\x32\xb7\x3e\x05\xb0\x92\xac\xa5\x26\xb6\xa1\x83\xe2\x34\xd8\x02\x4f\x90\x08\x31\xb2\x28\xaf\x08\xa6\x30\x0d\x4f\x9f\x52\xcd\x8c\xb1\x85\xab\xd3\x56\x90\x84\xcd\xde\x26\x10\x3b\x07\x36\xe6\x5e\xb5\x76\xd9\xb1\xd5\x8f\xf2\x64\x12\xf1\xcc\x0a\xff\x0c\x07\xbb\x99\x40\x3d\x8f\xcd\x83\x49\xbc\xbe\xc5\x90\x31\x69\x64\x83\xbf\xe5\x6b\xfd\x9f\x63\xfb\xc7\xf8\x65\xe9\x28\xb2\xf3\x8d\xa5\xad\x6f\x4a\x7c\x7b\x84\x5b\x68\xdf\xa2\xd8\xfe\x1a\xed\x77\xa1\x24\x71\x4c\x0e\xb2\x27\x17\xe7\xc8\xbc\x8b\xd4\x76\x2d\xd4\xff\xf4\xf4\xb7\x16\x27\xe8\xdc\x8f\x93\x59\x9c\x28\x4b\x0a\x1d\x30\x46\xe4\xc3\x1a\x63\x9e\x9d\x14\x1f\x1a\xc3\x06\xa2\xa5\x12\xcf\x36\x03\x60\x75\xa2\x59\x03\x44\xfe\xd7\x92\x0e\x94\xc3\x23\x98\xd7\xfa\xf0\xa8\x62\x0d\x21\x97\x1a\xa8\xf0\x4b\xb4\xe1\x81\xa1\x7b\x69\x49\x37\x55\x59\xb2\x0d\xc8\xb7\xc3\xaa\x5c\xbf\x6c\xc1\xfa\x95\x07\x51\x55\x88\xf2\x04\x11\xd2\x8f\x0e\x58\xed\xc8\x89\xba\x9c\xed\x3e\xb8\x54\xee\x62\xdf\x69\x9d\x1e\x49\x28\x75\x46\x32\xbf\x2d\x84\xf7\x43\xbd\x03\x8d\xb9\xbc\xf5\xc7\xfd\x4d\x50\x24\x9f\x94\x5b\xe7\x6d\x88\x65\x53\xb3\x9b\x3b\x89\x11\x7b\x45\x28\xad\x4e\xea\x1e\xb0\xd9\xa0\x04\x02\xfe\xdd\xc3\x89\xa6\xb1\x6e\x1a\x98\x5f\x5a\xd1\x65\x02\xa2\x3e\x02\x2d\x8b\xdf\xda\x69\x5b\x64\x03\x5a\x9f\x0f\xff\x62\xf8\xe0\xcc\xca\x9a\xe2\xb2\x08\x7d\xc8\x6e\x7e\x00\x63\x3d\xab\x26\x4b\xb2\x36\x10\x3b\xb3\x72\xb1\xcf\x15\x92\xeb\xd4\x7c\x96\xf6\xb4\xe3\x07\x82\x69\x39\xb6\xda\x78\x27\x4a\x3e\x9c\x57\xca\x7b\xa8\x79\xc9\x85\xc6\xca\xc2\x04\x2d\x60\xfc\x96\x05\x26\x88\x62\xb4\x1c\x6b\xfa\xe6\xa1\x3b\x73\xac\xf9\x2d\x31\x42\xb7\x82\xc4\x40\x1e\x81\xc8\xbf\xb6\x55\x61\x50\xea\x38\x8c\x2a\xad\x87\x37\x80\xf4\xf8\x55\x15\x3e\x26\x63\x29\x36\x3f\x87\x16\x9d\x3e\x2d\x65\x52\x7c\x4e\xd4\x95\xdb\xf9\xe6\x02\x51\x7f\x47\x5b\x97\x78\x42\x9d\x7f\x9e\x9d\x66\x49\x4d\x26\x62\x19\xa3\xca\x9b\x37\x37\x55\xc6\xf2\x47\x4f\xfa\xa6\xbe\x0b\x5f\x0b\x92\x30\x83\x5f\x06\xd3\x08\x59\x9c\x84\xc4\x7b\x61\xd5\xaf\x3f\xa1\x9c\xd5\x7d\xeb\xe5\xac\xc6\xcd\xa7\xcf\x6c\x23\x21\xdc\xd8\x6d\x33\x4e\xb8\x64\x51\x68\xe2\x8c\xe3\xba\x05\x29\xc6\xcf\x99\x6b\xff\x5f\x7d\xb2\xc5\x64\xee\x07\x73\xc7\xa9\xd6\xb1\x16\x42\x82\xb2\xa6\x63\x2d\xae\xe9\xb8\x2c\x2f\x21\xfe\xf6\x42\x78\x44\x67\x8e\xb1\xc1\x8a\x60\xda\x2b\x2e\x84\x58\xeb\x59\xe6\x58\xae\x26\xb4\xa7\x58\xb1\x24\xd2\x3f\x2a\x74\xa1\x74\x5c\x56\xfa\xfc\xd2\xc6\xcd\x34\x1d\xc0\x3a\x8b\xc1\x24\xc9\x13\x10\xa3\x8f\xb7\xab\x2f\x56\xa0\x92\x8a\x13\x4d\x35\x1b\xcd\xe6\x5d\x14\xcf\x2b\xf8\x9e\x53\x9e\xb3\x08\xa5\xc0\x8b\x7d\xd4\xa9\xa6\xf1\x32\x63\xaf\x21\x5b\x60\x12\x22\x97\x91\xf2\x1b\x1b\xab\x38\xad\x6e\xd5\x4e\xee\x04\xdd\xda\x71\x05\xc3\x0a\xed\x85\x16\x26\xc9\x0b\x53\x6c\xed\x8f\x6e\x9f\x94\xa3\xfc\x1d\xa8\x4c\xa2\xe0\x1e\x19\x9c\xeb\x38\x67\xb5\x57\x04\x8b\x4b\x19\x93\x31\x9e\x44\x70\xb9\x33\x98\x00\xc0\x8f\x5a\x39\xb8\xf2\x3e\x1e\x62\x3b\xc1\x77\x4b\x3f\x48\x42\x76\xf0\xb4\xd2\xda\xc3\xbf\x71\x2f\x6b\x3a\xfa\x26\xec\x1e\x99\x0b\xe1\x16\xef\x1c\xe9\xba\x12\x27\xc7\x32\x88\x46\xd4\xe3\x57\x5c\x3d\xac\xb9\xf7\xd9\x05\xc2\x3b\xaf\x6a\xb3\xa3\x59\x12\xd8\x90\xc1\x4b\xa6\x12\x69\x03\xad\xb8\x3e\x2e\xf6\x40\x68\x5f\xe8\xb9\x7e\x0a\x68\xf5\x47\x01\xf8\x20\xa0\x14\x96\xd6\xd1\x80\x76\xb5\xa5\x4e\xa6\x61\x32\x1a\xe3\x65\x66\xfc\x3b\x0b\x3d\x1b\x04\x51\x54\xcf\x82\x34\xd7\x98\xab\x74\xf1\xf4\xfb\xd3\x7f\x3f\x28\x22\x12\xe8\x55\xc0\xc3\x76\xf0\xc4\x0b\x4a\x88\x88\xbd\x77\x86\xa4\x0c\x97\xfe\xad\xd8\xe4\x4f\x10\xcb\x8a\x36\x1d\xf0\x68\xb2\x17\x82\x80\x7e\x0b\x08\xd0\x85\x8a\x1f\xab\xbe\xae\x92\xbe\x74\x84\x98\xc1\x66\xaf\xc2\x6d\xf0\x75\xdc\x55\x10\x3e\x17\xcc\x16\x0e\x2b\x9a\x1d\x68\x39\x7b\x50\xda\xbe\xfc\xdb\x69\x04\xcb\xa4\x7c\xfa\x17\x9b\x90\xa1\x88\xb7\xf9\xbb\xe7\xdf\xba\xdf\x1c\x79\xcf\xf5\x26\x75\x0f\xb6\x69\x73\x52\x9d\x10\x37\x41\x78\x79\x85\xe3\x68\xfa\x48\xf5\xeb\x54\xd1\xe9\x93\x5a\x9c\xb0\x45\x90\x3b\x29\x58\x5b\xa5\x7f\x8e\x2d\x35\xbb\x7a\xff\x49\xe7\x0a\xea\x15\xee\xb9\x4b\x66\x39\x73\xc8\xae\xa4\x92\xf0\x6a\xb4\x05\x2f\xcb\x67\x4c\xff\x9c\x5c\x63\xd3\x8c\xe0\xba\xb2\xcd\xe9\x44\x69\xd9\xfb\x9c\x8a\xce\x8e\xea\xa3\x43\xad\x46\x18\x57\x87\xcc\x31\x2e\x37\x8a\xdd\x6e\x5f\xc4\xd5\x28\x14\x11\x05\x78\xc9\xaf\xe7\xb2\xa8\x2e\x54\x5c\x10\x61\x49\x21\x3f\x13\x62\xb3\x47\x1a\xac\x2e\x6f\xac\x88\x3f\x0a\xac\xe0\x86\x75\xe9\xea\xbc\xbe\xe6\xbe\x73\x88\x3e\x68\x0c\x34\x1b\xc7\xed\x67\x75\x53\x13\x50\x66\x53\x4b\xbe\x9b\xd2\xfe\x7c\x06\x81\x7d\xd0\x8d\x6a\xce\xfc\x60\xb9\x9a\xf8\x13\x69\x02\xe1\x21\x44\xbe\x26\x9f\x44\x21\xd2\xf6\x37\xcb\x35\x53\xcb\xfc\x94\x20\x85\xee\xd5\x43\x2d\xa1\x24\x3d\x98\x93\xe7\x98\x52\xbe\x6b\xdd\xf3\x02\x5e\x7f\xb0\x0d\x10\x4a\xf5\x2c\x88\x52\x3d\x93\x0c\x3f\x85\xb6\x9b\xba\xb6\x04\xd1\xc0\x50\x7d\xd0\xeb\x18\x77\x18\x52\x95\x84\xdc\xa1\xea\x0f\x7b\x26\x3e\xf5\xf2\xea\xad\x05\x54\xc8\x16\x1c\xbf\x5b\xd1\xf1\x86\x5a\x19\x2d\x44\x0f\x64\x58\xd2\xb0\x96\xd5\x69\x1e\x95\x9f\x5d\x0b\xa2\x08\x56\x3e\x6c\x78\x39\xa8\x09\x5c\xc4\x27\x48\x64\x4d\xb7\x95\x46\x8e\x46\xba\xdf\x81\x01\x32\xcf\xd8\x55\xc2\xf4\x60\xf1\x11\x77\x80\x25\x40\xf4\x00\xa1\x50\xcb\xe2\x75\x5c\xb5\x4c\xf3\xa9\x88\x37\x50\x11\xfd\xd8\x18\xfe\x09\xba\x1d\x6b\x33\xc8\xe0\xa0\x69\x49\x1e\xa2\x0e\x5d\xcc\xf5\x7a\x0f\x19\xec\x7a\x04\x00\x58\x52\x63\x52\x38\x52\x8d\x95\x3f\x41\xb3\x1d\xae\x30\xd1\xb2\xd4\xc5\xbe\x75\xd6\x4c\x10\x4d\x08\x76\xfa\xcf\x29\x01\xef\x16\x32\x27\x92\x46\xee\x43\x2a\x3d\xd2\xac\x66\x23\x1c\x2d\x3e\x36\xf7\x6d\x1b\xef\xc5\x00\xb6\x1b\x98\x96\xd3\x79\xfd\x13\x3f\xfa\x61\x1a\xdc\xe4\x0a\xf3\xf1\xdf\x1c\xb2\x43\x8d\x81\x3b\xff\x1b\xf3\x87\xad\xef\x4a\x6b\x3a\xf3\x56\x4a\xb7\xb2\x26\x84\x28\x2a\x3d\x7c\x8e\x0b\x9d\xe4\x53\x53\x31\xe9\xb4\x0c\xe2\x28\xc1\x9c\x65\xee\xb2\x3d\x00\x8d\xb7\x3e\x86\x41\x9f\x47\x13\xea\x94\xc7\x4a\x5d\x43\x85\x1e\x24\xc7\x27\xff\xf3\x9e\xe5\xb5\xd3\x6a\x3e\x29\x5c\x4c\x55\x37\xb2\x11\x40\x75\x36\xbb\xdd\x72\x02\xe3\x39\xeb\xe0\x6f\xb6\x16\xc9\x14\xd3\xdd\xae\x3d\xbd\xff\x9e\x5f\x93\x86\xbf\x1e\xf2\x89\x72\x6b\xe3\x9a\x4f\x75\xb9\xa2\x60\xc3\xf3\x27\xfe\x36\x23\xda\x6d\xe7\x54\x7c\x12\xab\x7f\x79\x61\x57\x76\x84\x77\xec\x0e\xa2\xad\x88\x4f\x4a\x50\x7a\x36\x18\x00\x9d\xe9\x18\xc4\x0f\xa6\xc1\x0a\x92\xf8\xfb\x41\x25\x0c\x0f\x0d\x96\x55\x70\xaa\xfa\x28\xd0\xb8\xd0\xec\x56\xb4\xee\x40\xc1\xee\xdb\x75\x97\xc6\xc3\x2d\x59\xb9\x64\x08\x6f\x1e\x63\x40\x08\x2f\xb3\x93\xec\x1c\x9c\x4d\x18\x75\xb1\xf0\xf5\xb5\x10\x8c\x07\xdc\x7e\x51\x6b\x59\x71\x7a\xaf\x1d\xa8\xbb\x64\x50\xa4\xdc\x56\x62\xbf\x42\xcd\xb6\x3b\xc7\xdf\x78\x26\xe9\x88\x78\x1b\x42\x8e\xaa\x5d\x3b\xb6\xc4\x54\x3d\x36\x1b\x9a\xc3\xdf\x91\xb5\x58\x28\x59\xb7\x4b\x95\xc6\x67\xbf\x6a\x63\xa1\x24\x3e\x6b\xb2\x5c\xb3\x82\x08\x1d\xe1\xae\x95\x6f\x52\x9d\xa3\xbd\x4c\x14\x2d\xfd\xf9\x60\x35\x03\x69\x78\xb1\xc1\x6f\xb4\x5f\x05\x42\x12\x4d\x04\xc6\x29\x1e\x15\xb0\xfa\x70\x9d\xec\xf6\x7b\x0f\xb5\xc4\xcc\xf0\xde\x97\x67\xf5\xdd\xaf\x30\x23\x74\x6f\x2e\x2c\x54\x0e\x7f\x8d\xaa\xd2\x7a\x7f\x4f\x0e\x55\xd9\x33\x22\x1c\x76\x01\xc2\x6f\xf7\xba\xdf\x0c\x54\xb5\x83\xdd\x2e\xcb\x0a\x13\x89\xd3\x7f\x50\x7a\x8b\xfa\x52\x86\x4c\x26\x8a\xe3\x9c\x12\xdc\x00\x42\x89\xee\x31\x8f\x89\x5f\x37\xfa\x63\x6d\x1f\xb5\x36\x94\x7d\x1c\xaa\x5f\x38\xc6\xf0\xc8\x2a\xe7\x40\x00\xbd\x2e\x94\x44\x04\x78\x81\x88\x4a\x59\xd3\x87\x14\x1a\xfa\xd3\x51\xe3\x01\x37\x62\xae\x29\xb7\xa6\x82\xb6\xac\x06\x8c\x81\x29\x6e\x5e\x35\xcd\xed\x2f\x07\x01\x97\x3e\x09\xa3\xe2\xaa\x43\x0a\x1e\xab\x1d\xd1\xa4\xb6\xb4\x70\x41\x90\x5d\x15\xd1\x82\xed\x94\xa2\x81\xbd\x19\x46\x66\x58\xc5\x11\x7e\x3e\xfb\x9e\x71\xbf\xbc\x54\x71\xf9\xa9\x61\x25\xd1\x5f\x1c\x40\x8b\x47\x82\x96\xe0\x29\x3b\x8b\xc4\xab\x2d\x60\xa5\xd4\x60\xca\xca\xb3\x35\x8d\x10\x45\x6c\x30\xb0\xbc\x9a\x7c\x8d\x75\x6c\xb7\x70\x2e\x4e\x12\x40\x66\xdc\xbb\xc2\x1d\xbf\x0a\x99\x8d\xdf\xcc\xc4\x22\x8b\xa0\xd5\xe9\xff\x0c\xcf\xfe\x0a\x80\x8b\x7b\x80\xad\xdb\xdf\xbd\x19\xd3\xb7\xb3\x6b\xbe\x4d\x39\xe1\x19\x13\x26\x8b\x3e\xb7\xa7\x35\x10\xfd\x0c\x2d\xba\x0b\x30\xd4\x05\xa6\x97\x72\x31\xf9\x74\xc0\x33\xea\x90\x78\x65\x47\xd5\x66\x55\xf9\x5c\xc0\xa6\x5a\x19\xe3\xaf\x9d\x3e\xa1\xcc\xdd\x3f\x29\x22\xe5\x0e\x06\x98\x8b\x10\x1d\xa1\x9a\x6b\xbd\xb8\x69\x4b\xe4\x2f\xe7\x8f\xd4\x7b\x42\x4d\xa8\x39\x2c\xec\x5e\xe3\xe2\x80\xce\xd2\x77\xd8\x3c\x78\xe6\x14\xc2\x17\x18\x0f\x47\x69\xe4\xed\x13\x11\x3f\xbd\x71\xc7\xb7\x11\x3b\x39\x24\x33\x08\x13\xbd\xa8\x95\x73\xec\xeb\x40\xc8\xe2\x7a\x58\x2a\xd7\x27\x57\x96\x64\x3c\xaf\x2e\xef\x16\xa2\xb6\xe9\xb0\x54\x64\xd7\x24\x5f\xf4\xd3\x47\x51\xd2\x83\xe8\x52\x82\x4b\xef\x03\x8a\x9f\xe3\x97\xed\xd1\xa9\x62\x3b\x6f\xca\x43\x11\x89\xe2\xbf\x99\x97\xf2\x0e\x84\xc1\xa5\x93\x4a\x24\x15\x4d\x0f\x65\xfb\x02\xbb\x99\xd9\xac\x87\x06\x54\x48\xde\x6d\x0b\xf3\xcf\xf2\x20\x7a\x15\x23\x92\x67\x62\x7e\x40\x2e\x5a\x24\x8b\x13\x6d\xef\x53\xab\x63\x7f\x90\x51\x65\xb2\x82\x25\x3b\xa4\x1c\x3d\xb0\x92\xae\xc5\x71\x59\x63\x31\x06\x28\x4d\xfa\x70\xea\x94\xc0\xcb\x3a\xdd\x5f\x03\x11\x15\x0d\x14\x94\xf1\x4b\xda\xdd\x98\xf6\x30\x33\xa6\xd9\xfe\x8b\x8f\x6b\xe8\x9a\xf9\x13\x87\xf8\x03\x0d\x6f\x59\xd7\x79\xa5\xad\x58\x63\xab\x09\xff\xbd\x74\x5e\x1b\x7e\x8e\x7d\xbe\x68\x82\x7e\xc3\xfd\xd4\x4a\x5a\x31\xfa\xf6\x02\x6b\xfa\x5e\x38\xbf\x7a\x55\x62\x22\xd0\x45\x19\x6c\x12\x21\xf2\xa8\xa9\xc4\x3c\xdd\x28\xeb\x7c\x9e\xa9\xf5\x48\x1b\xb0\xbd\x41\x93\x5f\xab\xfe\xb0\xca\x4c\x77\x0b\x76\x82\xc0\x13\xfc\x25\x6e\xb9\x7a\xd7\xf2\x32\xb2\xd8\x24\x34\xbb\x18\x64\x21\x45\xd8\xa1\x8c\xc6\x28\x95\xb7\x05\x87\x82\x50\x71\x9e\xc6\x87\x02\x38\x78\x67\x5c\x56\x15\xc7\xad\x23\x43\x1e\x2f\x0d\xf7\x31\xba\x18\x31\xe7\xec\x53\x90\xd7\x82\xd0\xb9\x95\x1b\x14\xb8\x54\x68\xf4\x4c\x60\x6b\xf9\x61\xe1\xa2\x21\x09\x8a\x2b\x92\x7f\x7f\x73\x80\x53\xa4\x78\x17\xc9\x42\x37\xd4\x7d\xeb\x9b\xca\xcd\x48\xa0\xfc\xe0\x22\x62\x97\xa8\xc4\x7c\xa8\x5b\xc4\xce\xd9\x25\xd3\x71\xeb\x4b\xc3\x28\xbd\x83\xd8\xee\x42\x79\x0a\x91\xa4\xd8\xd8\xb0\x55\x6d\xb7\x53\x6a\xfe\xd3\x07\xba\x9c\x82\x93\x2c\xe2\x6c\xbd\x5a\x6c\x4d\x4c\xa0\x44\xbd\x05\x02\xe7\x5c\xb7\x9a\x9c\xaf\xb5\xdf\x4b\x97\x57\x94\x18\x36\xbd\x90\xc7\xf8\x05\xe9\x09\xb0\x60\xa4\xc5\xc5\x72\xd7\xad\xac\x72\xf5\xd5\x92\x05\x22\xc0\x64\x89\x1e\x00\x39\xe2\xb1\xc3\xb9\x58\x98\x95\x6f\xd1\x27\xc1\x66\xce\xab\x50\xe7\xd0\x2d\x38\x6b\xc1\x7b\x70\x15\xdc\x39\xcb\xfa\x75\x8f\x01\x7d\xf0\x00\x97\xba\xae\xc4\x2e\xd1\x00\xc9\xa5\x31\x8a\x4a\x77\x87\x83\xf2\x46\xab\xe5\x9d\x4b\x02\x8b\x66\xf0\x8a\x3a\x1f\x0b\xb8\x53\x82\xd2\x0a\x27\xb0\xfe\x8f\x0b\xff\xbd\xa7\x24\xc3\x48\x05\x1e\xc0\xf1\xb3\x7a\xd0\x09\xd0\x8c\xc0\x53\x52\x99\x74\xa4\x90\xd2\x94\x7a\xf7\x6c\x17\x7c\xe9\x89\xaa\x39\xaf\x5d\x91\xd3\x77\xdf\x34\x92\x33\x59\x8a\xc0\xa6\xee\x41\x7f\x05\x07\x09\x76\x92\x86\xc3\x1f\x67\xee\xca\xdc\x39\xd3\xbc\xaa\xed\xda\x1e\x77\xcb\xc1\x7a\xe5\x42\x9b\x0a\x97\x0a\x6e\x4f\x1d\x4c\xe2\x9d\x25\x12\xe0\x0a\xa7\x48\xf7\x6a\xb4\xb8\x15\x00\xe8\xf5\xce\x6f\x83\x57\x74\xc3\x81\x0c\x01\xdd\x6e\xc7\x77\x24\xeb\x9e\x99\x2e\x0f\x80\x05\x7d\xa1\xb9\xa3\x04\x29\x2e\x65\x81\xca\x40\x1b\x10\x5e\xab\xc3\x1e\x12\xa7\x24\x94\x6d\xd0\x4b\xfa\x67\xd2\xd9\xf6\x1e\x9b\xfc\x8a\x04\xa7\x5b\x6d\xa2\xf0\x16\x8a\x4a\xf2\x2d\xa5\xcb\xdd\xff\xb1\x9e\xdd\xa5\x19\x33\x18\x6d\x48\x1e\x2f\xc0\x57\x47\xbe\x10\xeb\xca\x1d\x07\x24\x8a\xbd\x10\x25\x6f\x61\x1b\xc9\x5d\x94\x9f\x2f\xe4\x28\x25\x67\x92\xa3\xbb\xaa\xc3\xfd\xd8\x9b\xb0\xd8\x46\x1f\x7b\xbf\xa6\x84\x58\x9f\x9c\x39\xff\x31\xf6\x20\xf2\xef\x0a\x97\xcb\x9a\xf6\x88\xd1\x25\xaf\x34\xff\xac\xe9\xfb\x8c\x97\xda\xf9\x9f\xe4\xa1\x94\xc1\x7b\x10\x65\x5e\xc7\xe9\x3e\x47\x16\xc0\x7f\x3a\x57\xa9\x76\x43\x8d\x80\x4e\x55\xb2\x6a\xa1\x61\xfa\x00\x06\xcb\x39\x71\xf4\xc3\x01\xa5\xad\xfc\xdc\x08\x34\xb3\xf3\x60\x9a\x98\x1c\xb8\xf0\xc2\x5a\x56\x2a\x4d\xf2\xe0\x91\x17\xe2\xbd\x6e\xf9\x0a\xfb\x86\x15\xd7\xcf\x48\xa3\xc2\x26\xfa\x29\xee\x7e\xf1\x16\x5b\xc0\x64\x61\xd1\x1d\x3d\x3e\x34\xa1\x65\x9c\x5e\xbf\x86\x33\x59\x07\x5e\x6e\xe9\x8b\xfb\x75\x6b\x69\x2f\xb5\xa8\xae\x30\xe0\xf0\x91\xac\xe9\xb8\xad\x22\xbb\xe2\x3f\xa5\xee\x5a\x61\x72\xcf\x1d\x01\xc0\xaa\x85\xb3\xc3\x57\x3a\x37\x56\xf1\xb3\x1a\xed\xd9\x37\x9a\x11\xcb\xaa\x0b\x2d\xaf\x4d\xbd\x75\x00\x28\xec\xdf\xe3\x07\x43\xe9\x6f\xc1\x37\xe3\x26\x21\x04\x9e\x5f\xbc\x74\x69\x45\x5b\xad\x82\xb2\x43\xe1\x69\xe3\x02\xd2\x22\x10\x92\x77\x20\xc1\xcf\x88\x32\x42\x95\xfb\x45\x76\xe9\xbe\x23\xe4\xe8\x06\x40\xf8\xc9\x25\x95\xab\xe7\x44\x74\x66\xc2\xec\x36\x63\x86\x51\x5d\x2f\xe8\x19\x26\x12\xc6\x41\xf4\x97\x3b\xa3\xad\xd7\x57\x3d\x69\xf7\xec\x93\x80\x95\xdb\xa0\x5d\xbe\x6e\x96\x36\x7a\xb6\xfd\x43\x1e\x10\xe6\x5e\x58\x76\x6e\xba\x1c\x3c\x13\x81\xbc\xb7\xd4\xbe\xfb\x5f\x45\x39\xce\x4d\xf1\x8e\x49\xef\x09\x58\x41\x19\x9b\x3f\xbf\x85\xd2\xf9\xc2\xa0\xda\x86\xaf\xa8\x33\xb5\x16\x4a\xd2\x70\x14\x06\xa3\x92\x5d\xd4\xbf\x9d\x8c\x4a\x1b\x8f\xc1\x91\x85\xe7\xc4\x7c\xbb\xdb\x18\xcf\x71\xea\x87\x66\x43\xb6\x20\x88\xf5\xf7\x6b\xa0\xde\x86\x86\xc1\x16\x3a\xe6\x14\xa4\xa4\x70\x03\x38\x97\xa6\x75\xe0\x15\x3d\x8d\x9a\xd3\x8c\x25\xb9\xe0\xec\x6b\x78\x28\x33\x5e\x25\x06\x9f\x16\xe3\x8d\x7c\x45\x60\x8d\x65\xa7\xe7\x7d\x08\x40\xf3\x2f\xcb\xdd\x26\x4b\xa1\xd4\xd4\x5c\x51\x86\x7e\x00\xc8\xf6\x10\xf7\xb1\x7e\x73\xa2\x2e\x44\x5b\x1b\x56\x3b\x78\x96\x35\x2f\x91\xa9\xb9\x45\x60\x72\x93\xcb\x19\x8c\x50\x0c\x77\x25\x5f\x4e\x99\xca\x3b\x68\x02\x7c\xc4\xe1\xda\xf6\x21\xd4\xb0\xd4\x33\xe3\x83\xde\x0a\x11\x71\x06\x20\x28\x75\x00\x2a\x0d\xf8\xd1\xd9\xa3\xba\xad\xd0\xc1\x3a\x76\xaf\x0f\x7d\x79\x1d\x58\x4c\xdc\x2d\x67\x29\xd7\x82\xb5\x4c\x1e\xff\x5a\xd4\x76\xad\xe7\xde\x89\x7c\xf4\xb0\x5e\x15\x24\xfa\xa4\x2e\xd5\x1d\xd8\x0f\xff\x89\x39\x38\xb9\x4a\xd0\xb1\xe8\x84\x12\x31\x47\x77\xfb\x2a\x6e\xeb\xbd\x03\x2e\xf0\xa9\x78\x7e\xcc\x1a\x1b\xb1\xcd\x69\xd5\x2e\x57\x83\xd3\xc5\x60\x73\x96\x22\xbf\x81\xbb\x3d\x1a\x00\xd6\xa1\xdb\x6d\x5c\x9b\x2a\x1a\x78\x3c\x7f\xc0\xd2\x70\x59\x81\x6b\x23\xe7\x18\x02\xbd\xda\x11\x18\x08\x1c\x25\xfe\xe9\x5b\xf1\x26\x8d\x5d\xf6\xf6\xb3\x70\x8b\x28\x82\x81\xac\xf9\xa8\xe6\x65\x3f\x1c\x03\x9a\x08\xe6\x6b\x2a\xea\xa0\x8e\xee\xba\x07\xd1\x94\xbe\x3b\x45\x3f\xb1\x36\x10\x60\x98\x8c\x4b\xe7\x5c\x62\x77\x01\x41\x55\xd9\x61\x64\xbe\x01\x48\xb6\xdd\x93\x8b\x62\x19\xd4\x7d\xb0\xc7\xe4\x64\x0b\x34\x77\x53\xe2\xf9\x18\xd1\x1f\x0d\x09\xf6\x4d\x7d\x65\x2c\x37\xf9\xa7\xc7\x52\x43\xe4\xd8\x4a\x7a\xa8\x8c\xfd\x20\x47\xc0\xb1\x9c\x28\x10\x8c\x5d\x0b\x7e\x2b\xfe\x3c\x6f\xe4\xfa\xcd\x75\xe1\xc0\x6e\xfa\x91\xf6\xc8\xde\x4b\x5d\x1a\x14\xd4\xaa\x9d\x56\x9e\x02\x02\x22\x9f\x57\xbf\x58\x7d\x7a\xe3\xb2\xee\xdb\x36\x62\xbb\x7c\x41\xd7\xc1\xfc\x1f\xeb\x86\x5b\xf0\x39\xa7\xa4\x43\x35\xf5\x8b\x2f\xd1\x55\xda\x79\x56\x91\xab\xa6\x91\x12\xac\x2c\x14\x7d\x7f\x3f\xde\x5e\x82\x1d\x1d\x93\x4f\x0f\x13\xd0\x8c\xb2\x39\xae\x33\x9c\xed\xe2\xd5\x27\xde\xb1\x38\x10\x9a\x45\xc7\x2a\x3b\x10\xfa\xca\x46\xc2\x8c\x10\x74\xc1\x4e\x6f\x4f\x44\xb6\xae\x79\x5c\xa6\x9a\x5e\x3c\x4e\xda\x2b\xa6\x0c\x81\xdd\x23\xc2\x0b\x4b\xa7\x80\xca\xfc\x3d\xb6\x7f\xf3\x78\xd1\x59\xd1\xcf\x80\x62\xb9\xbe\x3e\x09\xa2\xb6\x5e\x0d\xf4\x7e\x27\xd0\x14\x39\x96\x3f\x46\x35\xa6\x6b\x92\x6a\x09\x3f\x55\x3c\xbf\x2e\x74\x1b\xdc\x3e\xf8\xb1\x0e\xc6\x2d\xdc\x8f\x02\xad\xee\xc9\xaa\x25\xb3\x05\x49\xfa\x16\xf4\xb9\x16\x24\x17\xd9\x3a\x3e\x25\x6f\x63\xdb\x4e\xc6\x41\xbd\x42\x25\x80\x79\x06\xf9\x97\x84\xd1\xd0\x25\x3b\x0a\xd8\x55\x4d\xbc\x07\xa6\x28\x76\x55\xdd\x5c\x2a\x21\x60\xb2\xb7\xbe\xf9\xb9\x7a\xc0\xd7\x98\x59\xbd\x70\x41\xad\xfb\x92\x77\x6b\x61\x9d\xa0\x05\x14\xf5\x4e\x14\x80\xdc\xcd\x84\xa8\x39\x72\x8e\x77\x78\x2e\x4d\x77\xc6\x6f\x6b\x46\xd2\x04\x5d\x82\x19\xb4\x16\xb7\x88\x15\x08\x59\x40\x03\x99\x2a\xac\xa8\xa3\x5e\xac\xf3\x0e\x1b\x2f\x6d\x3c\x6a\x1d\x42\xd4\xe1\x26\xda\x32\xda\x2a\x4d\xda\xc8\xa1\x78\xd7\x1b\x52\x98\xf0\x82\xac\x77\xe2\xb4\x23\x27\xf3\xc0\x94\x91\x00\x30\xb9\x36\x79\x11\x65\xbf\x98\x4e\x8e\x6a\xd3\x8f\x7b\x4a\x9c\x2e\x7b\x32\x2e\x3e\x0c\xcd\x0e\xe4\xc9\x2b\x29\xc6\xe8\x17\xe7\x2a\xa1\xa6\x09\x48\xb8\xbd\xc6\x74\xb4\x4d\xc8\x27\xad\x55\xc4\xf2\x66\x68\xba\x00\x88\x7a\x85\x1d\x7c\xef\x6a\xbf\xd0\x5d\x3c\x85\x4b\xc8\x6e\x1e\x59\xee\x80\xb2\xc7\x4d\x6f\xc4\x75\x5e\x35\x1a\x10\x63\x18\x2a\x1f\xdd\x6e\xc2\xaf\x0c\xbd\x50\x92\x66\x57\xca\xfd\x0d\x86\x17\xa2\x1d\xde\x23\xf1\x72\xf5\x28\x71\x6e\xeb\x37\xff\x10\x7a\xe1\x8f\x7f\x75\x66\x4f\xe9\x39\xf3\x95\xa7\xb5\x93\x55\xab\xd7\xef\xab\x48\xc2\x98\x9b\xb2\xc6\x0f\x86\x84\x38\xb7\xab\x0b\x94\xda\xe4\x9b\xfa\x1d\x21\x60\x9a\x6b\xca\xbb\x39\x90\x9b\xa0\xb0\x65\xea\x73\x5a\x88\xa8\x38\x51\x3c\x46\x45\x84\xce\x2f\x10\x03\xd4\xfb\x08\x21\xba\xaa\x24\x40\xf7\xec\x84\x13\x75\x13\xb4\x80\x40\x4d\x86\xdd\x9b\x28\xdc\x28\x2f\xa6\xd6\xb8\x04\xa2\x29\x8c\x02\xb6\x0d\xa1\x9c\x99\x4e\xce\xe8\xc1\x01\x75\xf2\xb4\x47\xa9\x28\xa9\xa6\x2c\x8e\x94\x03\x80\x2c\xeb\xfc\xb5\xc4\x67\xcf\xe2\x47\xc8\xc6\xe8\x77\xe7\x32\x77\x90\x13\x7d\x83\x24\x99\x1f\x27\xd0\x09\x0a\x94\x4b\x31\x20\xb0\xca\x99\x8c\xa9\x10\xc4\xbd\xec\x7a\x54\x67\xd9\xd2\x21\xe8\xca\xa4\x5f\x17\x4a\x60\x89\xd0\xc9\x21\x9a\x1e\x35\x44\x56\xfb\x67\xa1\x30\xed\xf8\x95\x30\x5d\x54\x32\xee\x58\x6e\x48\x28\xf6\x82\xbc\x0b\xd0\xd5\x82\xcb\x2b\xe3\x17\x82\x6b\x65\x02\x38\x6e\x32\xb0\x8e\xc3\x7a\xa1\x1b\xc4\xb2\x21\x58\x32\x0a\xe1\xba\x8d\x5f\x5a\x28\xa6\xaf\x46\xda\x25\x10\x18\xc7\xe5\x21\xcf\xb4\x14\xc2\x5b\x72\x65\x18\x05\xb8\xcc\xa3\xb9\xd4\x79\x6a\x2d\x71\x8f\xbb\xf4\x06\x90\x5c\x8a\xcd\xcd\xa9\x74\x8b\xe7\x48\x91\x77\x72\x5d\xbb\x8c\xfe\x18\x5d\x1a\x86\x4a\x19\x54\x90\x3e\xff\xc4\x94\xaa\xa2\xfd\x24\x8f\x6f\xf4\x92\x7d\xec\x1a\xb0\x5a\xa1\xc2\xf2\xd0\xd2\x2a\x23\x6b\x24\x0f\x84\xc6\xc1\x17\xcf\xdf\x49\x79\x83\x20\x08\xa2\xb6\xc7\x59\xb3\xf1\x95\x0e\x03\xf9\xb1\x96\x8d\xb8\xeb\x17\x8a\x65\xee\x9c\x5b\x97\x30\x1c\xd6\x74\x5a\x52\x71\x4b\xc4\x54\x1f\xe8\x12\x91\x8a\xf3\x31\xb2\x5f\x1c\xfb\x4b\x2d\x42\xfd\xd5\xdf\x7c\x20\xf1\x33\xb6\xaa\xe7\x86\xe1\x42\xa7\x10\xeb\xba\x0e\x06\x00\x52\x68\xf6\x80\xd7\x89\x66\xbb\x45\x1e\xb4\xe5\x45\xb0\x17\xfd\x1c\xdf\x0f\x24\x16\x8b\x58\xa8\x90\xec\x7d\x4b\x6f\xa2\x38\xb7\x19\x59\xf7\x48\xda\x25\x50\xe1\x67\xb7\xec\xe4\x89\x56\x9e\x4d\x3f\xf5\xe7\x3e\x2d\x0e\xa8\x0c\x3b\x9d\x02\xb8\x08\x8b\x36\x06\xd1\x7e\xa3\xbe\x1b\xca\x0d\xa9\x16\x57\x1a\x00\x33\x96\xfc\x02\x9c\x30\x90\x89\xc4\x4d\x41\x01\x5a\xfe\xb1\x18\x8b\x97\xb9\x03\x90\x48\x98\x1a\x22\x0e\xa3\xf2\xc2\x9c\x52\x5f\xb8\xf5\xca\x42\x57\xbf\x5f\x64\x6e\x05\xfa\x08\x93\x01\xa7\x75\x59\xf9\x7e\xa6\x8e\x4c\x42\x9b\xe2\x3b\x20\xeb\x1e\xc5\x9a\x69\x46\xd6\xbb\x7f\xdb\x37\xbe\x0d\xa7\x09\x3a\x8b\x24\x62\xdd\xd6\x67\xff\xd7\x89\x60\xc4\xdb\x78\x64\xeb\x54\x74\xe6\x54\x3c\xb7\x85\x28\xa7\x99\xd8\x15\x93\x82\x2c\xf9\x59\x2e\x3f\xd0\x39\x54\xfe\xec\xf4\x4b\x61\xb0\x42\xdd\x00\xb7\x9c\x8f\x20\x0b\xf4\x7b\x48\x26\xcc\x48\x6b\x7a\x8b\x34\x0c\xce\x6e\x77\x5d\xe1\x0b\x33\xf0\x8c\x18\x96\x64\x77\x07\x97\x0b\x19\xeb\x37\x9c\xaa\x83\xbe\xda\x1e\xb2\x7b\x70\x0c\x11\x5a\x73\x5d\xb6\xb4\x96\x05\x9e\xb2\xaf\x3c\xe5\xe5\x4f\xc7\x29\xba\xbc\xe3\x99\x30\xd4\x63\x24\xd2\x70\xb8\xee\x18\xd1\x16\x43\x44\xf6\x6c\xfc\xca\x65\x0d\x38\x4e\x0f\x24\xe7\x9a\x70\x60\x67\x21\xf7\xd5\xe9\xef\xe4\x67\xdf\xf0\x42\xb8\x9b\x54\xef\x55\x75\x61\xdb\xe0\xdc\x20\xf0\x8a\x47\x1b\xad\xab\xee\x3c\x84\xaf\x8c\x6f\x20\x52\x99\xb9\xf3\xb5\x6b\x40\x68\x6d\x10\xa8\x04\x9b\x30\xb5\xc7\x18\xd1\x0c\xd9\x8a\xab\xc3\x45\x45\xd6\xeb\x6b\x0f\xbd\x44\x5f\xa8\x27\x9a\xc3\x5f\x93\x7a\x74\xc3\xf9\x83\xe2\xfa\x83\x92\xd2\xe5\xb5\x6e\xa0\xfd\xac\xc6\xb2\x8a\xcd\xfc\xdf\x5e\xe4\x69\xf9\x77\x10\x84\xb9\xcd\xc4\xdf\x64\x9d\xd3\x4a\x98\xc7\x96\x22\x1d\x05\x9a\xbf\xeb\xae\xe1\xec\x58\x2b\xdb\x49\xad\x3f\x70\x44\xf6\x4e\xe6\x7e\xd0\x60\x66\x7f\xd3\x9a\x1d\x48\x2d\x98\xef\xb4\x01\xa4\xd2\x4f\xc5\xef\xf8\x16\x73\x98\xd3\xf0\xbc\x50\x92\x03\x8a\x64\x47\x52\xd9\x17\x04\xe9\x00\x2e\x4d\x7d\xf5\x4e\xff\x04\x68\x35\x9c\xd5\x62\xbf\x73\xf9\x09\xac\xec\x0a\x8f\x6f\xa8\x5f\xa3\x83\x67\xdc\x96\xf3\x77\x87\xda\x41\xf0\x89\x97\x36\x37\x55\x91\x07\x42\xe2\xc0\xb7\x6b\x3c\x25\x26\x63\x30\x30\x51\x86\x11\xa5\x37\xb7\x0c\x72\xb9\x71\x98\x8e\xc3\xba\x05\x2b\x5c\x5e\xd3\xab\x04\x7d\xab\xd7\x92\xc8\xee\xd6\xf4\x34\xb2\x2b\x0c\xd0\xa0\x50\x13\x47\x4e\x8a\x9a\xf3\xe2\x5e\xd8\x0e\x0f\x84\xfc\x6b\xca\x42\xdb\x6d\x0b\x96\x1d\xc8\x5b\xea\xd3\xcf\x2d\x22\x1f\x6e\x3c\x02\x71\x39\x7b\xd5\x5a\xec\xd4\x5a\x1c\x00\xab\x0d\xa6\x41\x3a\x08\x83\x21\x09\xe0\x22\x0a\xa1\x42\x0a\xe0\x2a\xba\x75\x96\xf9\xff\x56\x02\x95\xf7\x77\x41\x94\x55\xac\xb2\x25\x77\x78\xad\x12\xe7\x2d\x2d\xc2\x5a\x98\x60\x29\x79\x0c\xc8\x1f\x67\xcb\x62\xbf\x16\x98\xbf\x65\x64\xdf\x48\x14\x35\xab\x23\xf2\x5e\xb9\x8d\x70\xfd\x0d\x49\xf8\x17\x93\x26\xdb\xd8\x88\x1b\x23\xc7\x92\xde\x06\x56\xb3\x82\x10\x8b\xcd\xa6\xbe\xa1\x67\x02\x97\xf3\xe5\xf4\x89\x57\x7e\xf1\x73\xec\xd0\x8f\x25\x47\x58\x7e\xdf\xf0\xc0\x54\x7f\xd5\xac\x4f\x00\xaa\x20\xa0\xb8\xec\x3c\xf9\xa4\x50\xaf\xe5\x3e\xb2\xc4\x62\x39\x92\x15\x73\xaf\xf8\xe2\xb9\x32\x3b\x20\x6c\xce\x42\x3c\x69\x72\xdc\x04\x55\x1a\x9f\x0b\x35\xf2\x34\x73\x8c\x0c\x75\x4d\x7a\x88\xed\x13\x1f\x91\x6a\x01\x95\xf1\xbb\x6b\x61\xdf\x7e\xd2\xcf\xf1\x83\xbf\x4b\x74\xfa\x04\x05\x3f\xca\xa5\x43\x7c\xda\x08\xa0\x4f\xfe\xfe\x23\x3a\x73\x22\x35\x03\x0a\xda\x10\x5d\x49\x73\x5d\xbe\x7d\xf5\xad\xbf\x94\x4b\xf7\xfb\x28\xe5\x75\x64\x73\xb8\x9a\xb2\xe0\x44\xed\x75\xa0\x81\xcb\x5d\x66\x95\x65\x02\x38\x8f\x3a\xd7\x0f\x0a\x7e\xe6\xbd\x84\x89\xe3\x1d\xb0\xfe\x22\x87\x21\x16\x98\x90\xed\x8d\x66\x85\x04\xff\xe3\x2e\x22\x1c\x96\x98\xe3\x6c\xc0\x19\xf5\xb1\xa0\xcf\xdf\x77\xfe\xad\x29\x6f\xa1\x08\xde\x0d\xb0\xbf\x1c\xdd\x3d\x14\x48\x9c\xd7\xf6\x62\x23\xab\xba\x93\x52\x8b\x6b\x61\x7e\x57\x1b\xe0\xc7\x6e\x4a\xd9\x52\x59\x3c\xc3\x54\x0e\xdb\x5f\xb5\xc4\xee\x1f\x10\x3f\x69\x1d\x4b\x0f\x55\xfd\x96\x74\x5c\xc2\x60\xb0\xdf\x6c\x58\xc2\x20\xc8\x00\x58\xab\x5b\x80\xea\x0a\x8b\xe9\x05\xdb\xfe\xbd\x44\x51\x1d\x1d\xbd\x7e\x86\xf8\x95\x8a\xc3\xc9\x09\xba\x23\x57\x50\xe6\xcb\x5b\x01\x98\x82\xd8\xef\xf8\x6f\x0f\x7f\x3c\x2a\x3b\x92\x1a\x9e\xfe\x18\x9e\xf0\xe9\x9f\x0e\x1b\x45\x14\x40\x94\xc5\xf4\x6c\x32\x2c\x69\xc8\x58\x11\xa5\xf3\x36\x6a\x7c\x39\xea\x04\xfe\x52\xc7\xe3\xb2\xc6\xa3\xea\x4e\x09\x64\x28\x49\x1b\x0e\x73\x86\xe7\xbe\x5b\x0a\xd1\x01\x80\x3f\x54\x34\x21\x0e\xe6\x1b\xe8\xb6\x30\x52\x36\x8d\x9f\xf3\xaf\x3e\x7c\xdf\x89\xbc\xd0\x26\x7f\x55\x37\xa3\xbf\x16\x87\x45\xdc\xa8\x76\xf3\x9b\xd1\x46\xef\x58\xa8\x3f\x01\x10\xb3\x95\xce\x81\x60\xa3\x32\x69\x01\xb8\x60\xeb\x21\x7e\x49\xbf\x5f\x03\x35\xd5\xd4\x21\x28\xad\x29\x55\x47\x3a\xc2\x07\x58\x66\x86\xf8\x36\x6c\xef\xd7\xbe\xd0\xb8\x54\xba\xc3\xab\xe8\xc5\x87\xc6\xd9\xfe\x31\x88\x40\x9f\xfd\x1e\xf2\x23\x68\xdd\x99\x1d\x56\xec\x55\xf9\xbb\x69\xc7\x55\xda\xf7\x8d\x13\x0f\x97\x40\x81\x27\xaa\x09\x3f\xbd\x1e\x3f\x3d\x0a\xe0\x01\x30\xa5\x40\x5c\x52\xf1\x65\x47\xd2\x31\x39\xb3\x10\x5e\xd2\x70\x58\x92\x70\x68\xe2\x58\x4f\xf2\x78\x75\x87\xc5\x16\xd7\xa2\xec\x62\xc6\x45\x06\x81\xc1\x0a\xb3\xe6\xc7\x87\xb6\x3c\xe8\x74\xd6\xdb\x73\xb9\x4a\xb6\x62\x0a\xb0\xaa\x8d\x0e\x56\x43\xb2\x09\x34\x25\x0a\xc9\xc1\x3e\x47\x4e\xf0\x01\x17\x1d\x80\xbf\x1d\x5f\xe3\xb6\x16\xbb\xf2\xa5\xcd\xfe\xe6\x32\x33\xbf\x9a\xaa\x2a\x59\x70\x2f\x56\xcf\xcc\x57\x70\x1d\x3d\xe1\x8d\xa1\xc2\x5a\xdb\x6d\x0b\x62\xbc\xb0\xc0\x4d\x91\x2b\xfa\x6d\x60\xaa\xc8\x1e\x1b\x6b\xce\x8e\x38\x2a\x12\xbb\xd1\x3b\x81\xf5\x0f\xe0\xe8\x74\x67\x59\x9b\xb2\x56\x38\xa5\xa7\x4b\xc1\xc7\x78\xdc\xd7\x3e\xb2\xfd\x1f\xeb\xe4\x57\x24\xf3\x01\xcd\xf6\xe4\xaf\xdb\x79\x99\xe9\xdc\x82\x7f\xa4\xb2\x91\x5b\x28\xc9\x8c\x11\xf0\x6f\xa7\xc2\x68\x8b\x25\x2f\x2f\xdc\xd6\x91\xf6\x7c\x17\x04\xc9\x36\x53\xdf\x7d\x0e\x07\x27\xde\x43\x2d\x38\xa1\xcc\x18\xdf\xc0\xdf\x3f\xfd\x33\x20\xd7\xff\x18\x7c\xaf\x32\xa4\x38\xbb\xc5\x71\xdb\x29\x42\xc8\x51\x6c\x6f\x28\x8c\x96\xb4\x1c\x66\xd5\xd5\x2c\x57\x65\x01\x89\xbb\x41\x74\x7d\x12\xfd\xf5\x07\xc5\x7b\xc6\x96\xb9\x3d\xc9\x86\xa8\x31\x32\x31\x50\x40\xc7\x4d\x06\x25\x8e\x01\x19\x50\x6a\x9e\xbc\x12\x0d\x82\x2e\x48\x1a\xe8\x3b\x8a\x24\x07\xfc\x8c\xb9\xa1\x09\xd1\x6c\xb6\xf0\x3c\xd1\xdc\x73\x84\xb2\x4f\xa6\x35\xd1\x28\xca\xcb\x7f\x33\x23\x3a\xab\xc1\xf6\x07\x95\xb5\x55\x1b\x53\xf8\x5f\x0b\xe2\xee\x9b\x1d\xe6\x44\x42\xa2\x02\x5a\xec\xe3\xf9\xf0\x11\x97\x86\x54\xdb\x4f\x2b\x72\x32\xc7\x62\x5d\xb3\x2e\x73\x3a\x3e\xf9\x35\xaa\xe3\x32\xa7\x85\xa9\x67\x7f\x7c\x2a\x2c\xd4\x54\xad\x28\xe9\x07\x8a\xbb\x6f\xe9\x93\x16\x27\x92\x79\x17\x59\x8c\xe8\x41\x5a\x5c\x23\x2d\x9c\x7e\x20\x14\x0b\x17\x2b\x9c\x62\x65\xe8\xd7\x5d\x4c\x3f\xc3\x65\x87\xba\x62\xbb\xad\x54\x5a\x56\x37\x7d\x34\xc8\x2e\xaf\x05\x53\x2b\xf2\x73\xb0\xdb\x91\xd3\x65\x4d\x47\x3c\x67\xf0\x57\x5f\xcb\x71\x59\xd3\xa6\x0f\x7e\x67\xb6\x92\x79\x38\x2e\x4b\x92\x86\xfb\xc6\x3c\xd8\x6f\x5e\xf5\x56\x71\x25\x50\x6a\xc2\xa4\x4d\xeb\x71\x0b\x0d\x23\x22\xc8\x37\x1e\xe1\xc3\x46\x52\x59\xfc\xcd\x86\xb2\x56\x17\x0d\xaa\xb8\xbb\x33\x86\x4b\x97\x27\xcf\xfe\x31\x33\x3d\x06\x97\x12\x0d\x49\x2a\x36\xbb\xc7\x10\x3b\xfe\x1c\xa7\xf6\x84\x4a\x23\x71\xb2\xcc\xf4\xd1\x65\x4d\x90\xce\x33\xe8\xf4\x25\xb8\x17\xfc\x5c\x21\xbb\x5e\xe1\xdd\x37\xf5\x5f\xaf\xe7\xe9\x22\x0c\xce\x8d\x67\xc3\x5a\x86\x21\x09\xe9\x2b\x43\xa6\xec\x80\xc8\x83\x7b\x64\x9c\x08\xc7\xbb\xab\x77\x9c\x48\xc7\x65\x6d\x7a\xe7\x97\xba\xad\x04\xd1\x55\x14\xc0\x45\xa0\xb9\xfd\xc2\xc0\x96\xd6\x9b\x22\xc0\xc8\xb2\x8b\x51\x80\x85\x4b\x1f\xc5\xad\xc7\x08\xa5\xb2\x20\xf1\xca\xaf\xd9\x06\x84\x0e\x07\xbc\xe4\xdb\x26\x8e\x7e\xdf\x4e\x94\x04\xfb\xd4\xda\x4b\x9a\x93\x57\x00\x60\xf9\x56\x9f\x3f\xb6\x2f\x86\x0d\x71\xda\x15\x29\x55\x46\xa6\x2e\x49\x4a\x1b\x87\x22\xab\xf4\x75\x79\x80\x24\xcd\x03\x18\x48\xb1\xd2\xc9\xce\x93\x85\x3f\x94\xc1\x27\x3b\x5d\x60\x84\x2e\x46\x11\x01\x9b\xd3\x4a\x8c\x0b\x63\xff\xf3\x90\xb9\x75\x71\x71\xcb\x31\xaa\xbe\xa5\xf0\xe1\x0b\xdd\x1d\x61\x7b\x74\x83\x89\x3c\x88\xa2\x60\x9f\x62\x94\xb4\xef\xe4\x2d\x64\x9f\x6f\x63\x21\xb1\x48\x71\x7e\x1b\x5a\x48\xff\xce\xc9\xf9\xe5\xcf\xc2\x46\x23\x56\x0e\x8a\x3b\x0c\x6e\x44\xb8\x5e\xff\xe7\x0d\x5d\xa9\xa3\xc4\xbe\xd4\x9e\x52\xa1\xc0\x8a\xce\x6c\x48\x60\x23\xa1\x1a\x84\x8b\x85\x62\x0b\xd3\xc6\x87\x42\xf5\x5c\x91\x5f\x79\x3f\x04\x07\x0a\x3b\xce\xb1\x31\x4f\x11\x1d\x7d\x03\x2c\x09\x94\x01\x5b\xe8\xff\x10\x73\x56\xb9\xa6\x46\x14\xce\x4b\xc6\xcc\x18\x85\x7b\x04\x5a\xb6\x11\x99\x57\xec\x00\x3a\x0e\x75\xe4\x4b\x28\xcc\xff\x3d\x95\xd6\xf7\x3b\x25\xb5\x20\x45\x8b\x43\xec\xe5\x80\x23\x83\x3f\xa8\xe1\x38\xd9\x34\xf8\x00\xa2\x4f\xb2\x42\x69\x26\xe6\x38\xd3\x40\x3a\x01\xa9\x48\x64\x0b\x8e\x2d\x20\xa8\x84\x30\x12\x10\x3c\x1e\xfc\x40\xc8\xb2\x11\x91\x4d\xa9\x74\x99\x95\x35\xee\xee\xe5\xd9\xf0\x0f\x4d\x25\x5d\x3f\x1f\x9a\xc4\x76\x30\xf8\xd6\x0b\x86\x55\x85\x7c\xd0\x57\x6b\xc4\xaf\x86\xf1\x8d\x7e\xc8\x8a\x0b\x7d\x53\xba\x94\x05\xa0\x57\xc0\x0f\xa7\x48\xad\x52\x2a\x85\x47\x87\xba\x70\xeb\x71\x69\xe3\x58\x27\xdc\xdf\x1d\x32\xb1\xc8\xb6\x35\x8f\x2b\xd3\x6b\x70\x4d\x78\x6c\xe9\x6e\x8d\x3f\x81\x2a\xcd\x17\x57\x7e\x7f\xbe\xef\x4f\x31\x6b\x59\x56\xb9\x7a\xed\x9c\x19\xe4\x10\x41\xb7\x9d\xdf\x6d\xc1\x91\x47\x2c\xdc\xe8\xd1\x0f\x19\x6b\xa0\xb9\xf1\x7a\x20\x44\x01\x5a\x6c\xc9\x8e\xe2\x14\x50\x18\x04\x57\x21\x58\x06\x13\xa4\x60\xe5\xb5\xbe\xbc\x3a\x25\x55\xaa\x51\xa3\xcd\xdb\x0d\xe8\xc6\x24\x7b\x0f\x54\x7a\xc3\xbb\x8f\x84\x5f\x89\xab\x05\x1e\x04\x5a\x0f\x78\xdf\x7e\x12\x05\x4f\x6e\x89\x9c\xba\x3d\x22\xff\x98\x42\x38\xaf\x6a\x8b\x15\x96\xd8\x66\x26\xd5\x61\xea\x90\x45\x0f\xe8\x6d\x84\x23\x4a\xa0\xd5\x41\x33\x6f\x24\x33\xf8\x88\xb5\xb9\x89\x8e\x9f\x70\xc5\xd9\xb8\xd8\xfc\x37\x5f\xdd\x83\x91\x6c\x5a\x63\x99\x35\x3c\x81\x67\xc3\x1d\x0f\x09\xf6\x1e\x2a\x53\xf2\x14\x08\x68\xb4\x42\xeb\x58\x49\xc6\xa7\x04\xe4\x75\xea\xbb\xaf\xad\xdc\xaa\xb4\x83\x01\x89\x3e\x4e\x2f\x1d\x5e\x11\xcb\xc8\x6c\x79\xc9\x9e\x35\xa1\x04\xab\x36\x63\x2a\x77\xc6\x43\x8c\xd2\x9c\x0a\xcd\xcc\x6e\x75\x5e\xdb\xc2\xec\x7c\xed\x17\x03\x51\xc5\x11\x6c\xbf\xa8\xf4\x1d\xe8\x32\xb5\x84\xe9\xb8\xcc\x1a\x4e\x01\x1d\x0f\xdb\x8d\xda\x6b\x62\x48\xdc\x8c\x31\x6e\x36\x4c\x57\x52\xcd\x09\x1f\x5b\x27\x58\xad\xba\xfc\x83\xd5\xce\x2e\xc3\xa1\xfa\xee\x9b\x87\x53\x94\xe2\x5c\xe3\xc4\xd3\xe8\xbf\xfd\x79\x3f\xce\x7e\x42\x05\x07\xa2\xa6\x83\x39\x41\x96\x6b\x31\xf6\xcc\x14\xe3\x4a\xc3\x6a\x12\xf0\xc4\x81\x54\x24\x83\x34\xbd\x37\x73\xf2\x9e\x0a\x38\x43\x1e\xeb\x55\xb1\x91\xdd\xdf\xdf\x3a\x98\x5f\x06\x60\xc5\xfd\xc3\x68\x04\x69\xe4\x24\x31\x7c\x06\x8c\xfb\x74\xbd\x57\x30\xb7\x9b\xdf\x2a\xd3\xf8\x52\x04\x75\x86\x90\x85\xe6\x3d\xe0\x8d\xc9\x9b\x91\x9f\xd1\x8b\xcf\xab\xcc\x55\x29\xa3\x6e\x30\x00\x7a\x13\x11\x74\x5a\x95\xf2\xfe\x2f\xe2\xe9\x8a\xee\xd9\xb9\x90\xbc\x87\x81\xfa\x1f\x6b\x87\x52\xaf\x9a\x2c\x22\x11\x85\x35\xc5\x72\x91\x72\xfd\x71\xf7\x0a\x1f\xdb\x11\xc9\x03\xed\x35\x13\x26\xef\xc7\x79\xdd\xea\xf6\xad\xf3\x4e\x7e\x4d\xca\xcf\xfd\xb5\x73\x4f\xea\x5f\xe7\x27\xbc\xfd\x2c\x86\xed\x4b\xec\x34\x20\x64\x73\x53\x49\x79\x7a\x37\x96\x6f\xaa\xdb\x79\xbd\x88\x70\xdf\x58\x34\x48\x87\x6d\x33\x51\xf6\xec\x34\xda\xff\x7c\x87\x0e\x68\x66\x87\xe9\x07\x5d\x44\xc1\x53\xfd\xbb\xe7\xbf\xb5\xda\x70\x85\xdd\xba\x53\x3a\x15\x4a\x1a\x0c\xb5\x7c\x26\x41\xc4\x37\x75\xd6\x3b\x3d\x54\xf0\x38\xef\x8b\x7c\x30\x1e\xaa\xda\xca\xe7\xc3\xef\xf5\x9b\x8f\x23\x59\x9e\x96\x00\xa8\xf7\x61\xf7\xfa\xaf\xee\xac\xe8\xbb\xd1\x86\x17\x04\xef\x0d\x7f\x43\x96\xd1\x23\x62\x94\x1f\xb6\x57\x18\xcf\x9e\x2c\x43\x72\x49\x89\x31\xe4\x8e\x7c\xf4\x4e\x29\x4c\xd7\xdc\xa0\x06\xb2\x6f\x1d\xa1\xbe\xe8\xf6\x4b\x12\x3a\x63\x37\xc8\xf3\x3d\xfa\xf7\xa0\xb8\xe1\xdd\x72\x1b\x0b\xf2\x75\x7b\x61\x35\x69\xc1\x98\x48\xd7\xaf\xe5\x3a\x89\xe0\xf4\x83\xe6\xa2\x17\x4c\x35\x11\x61\x73\xc8\x19\x6d\x7d\xab\x13\x30\x70\xdd\x22\x62\xf4\x45\xf0\xf3\xc1\x62\x61\xd2\x6b\x0b\x14\xc1\xf1\xa3\x1f\x9a\xeb\x7c\x70\x59\xd8\x5e\x9a\xf0\x91\x85\xfe\xd1\xfa\xbd\xca\x36\xe1\x2e\x54\xd6\xd4\xc6\xa3\xc9\xd5\xb1\xed\x70\x4b\x1c\x0e\xcb\x19\x79\xf8\xbe\xb6\x31\x21\xf3\x8c\xfd\xfb\x34\xa8\xf1\xd4\x97\xda\x2d\xd2\xb8\x2e\xa2\xc4\x1f\xac\xba\x7f\x85\xa9\x6b\x49\x1a\xd4\x80\x93\xe6\x50\xff\x2b\x18\x99\x2a\x62\x26\xb6\xfe\x97\xf7\x7e\x11\x8f\xcb\x4f\x10\xfc\xbc\x27\x6d\x85\x97\x7b\x4a\xc3\x6f\xf4\xaa\x9b\xe2\x53\xeb\xa9\xa1\x58\x6f\xef\xa0\xd4\x76\x5d\xe6\xcd\xb1\xc7\xc4\x0f\x2c\x86\x71\x1d\xfd\xca\x5e\xca\x6c\x6e\x7d\x0e\x2d\x6f\x81\xe8\xfe\x4b\x88\x66\x73\x75\xad\x7b\xde\x7e\x51\x63\x8e\x33\xd8\xcd\xf8\x68\xaa\x92\x6f\x98\x15\xd4\xb1\x5b\x89\x1c\x4d\xf3\xad\x47\xb4\xf6\x1b\xc9\xe2\x28\x4a\x34\x68\xa9\x6b\x98\xdb\xd2\x98\x09\xae\x8e\xad\xc9\x5d\x62\x09\x60\x5c\xc0\xeb\x0f\xc6\xb8\x95\xaa\x0d\x79\x83\x8d\x6c\xce\x8a\x42\xe8\xd8\x76\x9a\xc1\xd9\xea\xc1\xf7\x9c\xfd\xe8\x72\x05\x98\xd7\x7a\x39\x99\x92\x4e\xf0\x7d\xfc\x6b\x66\x9e\x50\xe9\xe6\x35\xfc\x92\x5a\x20\x03\x4a\x1f\xc1\x83\xbf\x75\x69\xca\x81\xb1\x5d\x53\x1f\x2e\xf5\xda\x0e\x75\xa4\x9b\x43\x91\xd1\x12\xb9\xf7\x84\xe6\xe6\xd4\xa7\xc6\x5f\xbc\xd8\x44\x64\xd7\x0b\xd9\x66\x39\x49\xdb\xbf\xb2\x20\xcf\x6f\xbf\xc4\xac\x40\xfa\x23\xda\x88\xa7\xb8\x08\x12\xac\x19\xf1\xde\xf1\x6d\x68\x00\x27\xda\xde\xd4\x9c\x48\x16\xa6\x9e\x18\xcf\xc8\xff\xea\xdd\x4a\x1a\xd6\xb3\xcc\x3e\x4d\x55\x7d\x45\x14\x50\x34\x6b\x3a\x28\xb2\x43\x41\x4e\x46\x92\xf4\x8a\x0e\xe2\x3a\x2f\x9e\x9f\x58\xe8\x98\xed\xa2\x06\x63\xd5\xf7\xb2\x2f\x21\xf4\xbc\x5b\xe3\x30\x74\x7b\x50\xc7\xea\xe8\x8a\x4f\xae\x88\x3d\xac\x89\x0f\x9a\xef\x99\x76\x59\xfc\x43\x01\x91\xb6\x60\x7d\xbc\x84\x97\x80\x2b\x65\xf5\xcd\x42\xfa\x8e\x11\xc6\x80\x4f\xd3\x72\x32\xe7\x0a\x37\x20\xfd\x51\x60\x71\x4e\x5f\xf2\xb8\x67\xc4\x3f\x10\x6e\x49\xd2\xab\x58\x17\x42\x56\xe2\x49\x24\xb9\xcd\xcd\xe1\x5f\xfe\x84\xc4\xd2\x0c\x46\xa5\x9e\x8f\x37\xf3\xe1\x59\x0a\x64\x40\xa6\xe5\x24\xba\x70\xea\x67\x4a\xd1\x8b\xf1\x2c\xc1\xf9\x0c\xf9\xe0\x7b\x85\x57\x3c\xa6\x5c\xa0\x2e\xdb\x0d\xde\x1f\x76\x92\x78\x73\xab\xf7\xba\xb0\x4c\xb6\x0c\xfc\xa9\xcb\xb4\x2d\x0a\x5f\xe3\x6d\xdf\x6d\x04\x70\x6d\x34\x6a\x3c\xb5\x91\x8b\xdb\xce\xf9\x3c\xa0\x97\x48\x8a\x54\xd1\x36\x40\x64\xc7\x0b\x82\xed\x77\xb4\x21\x10\xd3\x81\x90\x9e\x32\xf1\xf9\xa9\xa1\x24\x0d\x23\x78\x35\xa2\x28\xda\x21\xde\x02\x7d\x0b\x71\xaa\xed\x9c\x8a\x2c\xde\xb9\x02\x69\x7e\xbb\xea\xe8\x13\x59\xac\x8f\xff\xd6\xfe\x7b\x4a\x27\x83\xc4\x5f\xe8\xbf\x54\x9a\xe6\x6b\xa6\xd5\xd2\xd6\x6d\x7a\xf7\x5a\x2f\x28\xd6\x3b\x7a\xe9\x70\xa4\x10\xbf\x37\xf4\x6a\xc8\x57\xfa\xce\xf0\x7c\x58\xf1\xfa\xc4\x72\xd9\xf3\x78\x81\x22\x3d\xe7\x55\x68\x34\x17\x3a\xc3\xa2\xde\x10\x22\x56\xcc\x05\x31\xf3\xde\x5a\x9f\x49\x96\xa2\xe0\x9c\x3a\x1c\xa0\x51\xac\x7a\xf0\x5b\xa5\xcb\x60\xcc\xc6\xbf\x9c\x10\x49\x17\x3c\x2f\x8c\x42\x92\xe6\xbf\x6f\xbf\xb9\xf4\x5a\x5a\x1c\x95\x04\xdb\x6d\x7e\xbf\xfb\x84\x89\x1f\xcf\x62\xb9\x29\x65\x24\x95\x22\xf8\x99\xf5\xcb\x9b\x1f\x6b\x76\xf0\xc5\x85\x6e\x0f\x4e\x11\xd9\x0c\xbd\x25\x44\x18\xe0\x19\xc5\x80\xd8\xb8\xfe\xa4\x10\x74\x79\x42\x59\x89\xb6\xbb\xdc\xb8\x2a\xa5\x40\xce\x9c\x33\x74\xce\x6c\x03\xf5\x61\x76\x1d\x3d\x6c\x57\x35\x2f\x02\xba\xfe\x18\x3a\x7c\x22\xdf\xa9\xf6\xbc\xbd\x39\x51\x80\xd5\xc5\xbd\x22\xf1\x85\xb9\xac\x98\x3a\x33\xd1\x2f\x94\x0d\x26\xbf\x18\xa6\xb7\x25\x2f\xd9\x61\xe2\xdc\x96\xf2\x44\xe7\xc0\xe7\x75\x4c\x55\x9a\x14\xa8\xc4\x1b\xe2\xf9\x78\x40\x8d\x30\x18\x32\xf3\xdb\xb1\x38\xb2\x5b\xce\x67\xf3\x4e\x7e\xf5\x4e\x26\xac\xb2\xaa\x16\x0a\x28\x2a\x6b\xfa\x80\xe9\xf2\x07\x92\x7e\xb1\x4e\x7d\xaf\x2d\xca\x53\xb2\x78\xfa\x44\x71\xfd\x81\x30\x94\x68\xea\x70\x1c\x3b\xf3\xaf\xd8\x11\xe2\x48\xd1\xec\x50\xc4\xea\xb2\x53\x2a\xa3\x0a\xd2\xde\xde\x4a\x21\x79\x83\x6f\xb5\xaa\xe5\x25\x42\xb4\x6f\xb1\xf3\x63\xaf\x0d\xd9\x32\xad\x34\xdc\xe4\x6a\xa6\x77\xc7\x75\x25\xf6\x2b\xa2\x34\x65\x52\xec\xc9\xe8\x36\xd1\x7a\x59\x66\x18\xc8\x5a\x77\x5a\x1b\xbe\xa3\x3d\xf6\x8b\x97\x62\x38\x87\xb5\x40\xa6\x36\xaf\x68\xeb\xa5\x42\x6f\x04\x81\x72\x63\x98\x0e\x48\xca\x72\x8b\x49\x81\xb6\xbd\xea\x7f\xec\x9d\xfb\xda\x0f\x61\xe0\x3d\xaa\x73\xdb\x61\x12\x1b\x32\xf8\x19\x37\xba\x05\x22\xcc\x29\x23\x81\x45\x7d\x77\xcd\x32\x8a\xa4\xc7\xcd\x62\xc0\xcf\x68\x4a\x90\xe2\xef\xef\x11\x37\xb7\xee\x9e\x3e\x7b\x57\xb9\x35\xd1\xc7\xed\xc1\xde\x00\xac\xbb\xfc\x90\x39\x7c\x37\x60\xb8\xed\x8e\x26\x05\x93\xa8\x06\x3b\x04\x46\x04\x69\x10\x2f\x11\x90\xfe\xbc\x1f\xac\xed\x7b\xa5\x00\xca\xcb\xb6\x91\x36\x10\xfb\xa6\x3e\x82\x44\x34\x66\x35\xe1\x00\x68\x20\xa4\x76\x20\x04\x16\xa1\x48\x07\x81\x05\x20\xae\x1e\x6a\x72\xf0\xe0\x3a\x94\x1a\xa5\x40\x09\xe6\xe7\x2c\x66\x76\xef\x1a\xfe\x1b\x03\x95\x29\xc5\xb9\xd1\xb8\x46\x17\xc3\x24\x0d\x8c\x46\x1e\xfb\x75\x2b\xbe\xdd\xba\x6e\x29\xbc\xe2\x47\x48\x9e\x08\x1d\xd1\x71\x18\x8c\xc9\x80\x9f\xd9\x84\x83\x20\x55\x5a\x12\xc8\xd1\x20\x80\x37\x55\xaf\x80\x20\x51\xe8\x53\xe9\xd6\xd8\x4a\x9c\x5b\x48\x26\x7d\x1a\xfe\x9a\x39\x07\xe3\xbd\x92\x6d\x06\x7a\xb1\xe7\x30\xe1\x52\xa0\xe5\x30\x9c\x1b\x82\xca\x9f\xbf\x9c\xaa\x3d\x9e\xda\x21\xa6\xcb\x26\xc2\x32\xaf\x66\x0b\x31\x5e\x22\xf3\x72\xeb\x2f\xd9\x8b\x41\x8b\xea\xce\xec\xfa\xaf\xd6\xc2\xfd\x7a\xb6\xa6\xec\xb2\x62\x2c\x5e\xe6\x8d\xee\x2d\xe9\xd2\x92\xa0\xb7\xb0\x10\xfe\x84\x95\xeb\x34\x86\x24\xf4\x1f\x28\xe7\x8b\x67\xcf\x08\xa0\x50\x66\x83\xb9\x57\x56\x90\x7a\xe1\x5f\x5d\xef\xd8\xd2\xf7\xd8\x08\x84\x06\xb4\x46\x55\xda\xf9\xd4\xb8\x7b\xd7\xac\xc7\x0f\x7f\x4d\x02\xa8\xd7\x06\xad\x19\x77\x53\x86\x39\xfd\xeb\x97\xd0\xf8\xe1\x4f\x22\x01\x90\x1d\x6e\x3e\x31\x20\x21\x1e\x84\x13\xcd\x76\xfc\x17\x7d\x4f\x0f\xe4\xb2\x65\x4a\x22\xc9\xa7\x07\xa1\x52\x92\x46\x01\x81\x86\x54\xcd\x77\x20\x1a\x1f\x67\xa0\xef\xf5\xbf\x4b\x1a\x1f\xb8\xdf\x83\x29\x80\x8d\xb8\xf1\x15\x8f\xe4\xe5\x4f\x13\x08\xdc\xf5\x61\xa3\x5c\x08\xd9\xd8\x29\xb3\x6b\x6e\xf2\xcb\x23\x41\xef\x64\x7a\x8a\x25\x3f\xb2\x41\xa9\x59\x5c\x77\x10\x39\xa5\x5f\xd4\xf7\xe0\x05\xa8\xe1\x88\x2f\x39\x24\x7e\xc6\x2c\x61\xc0\x98\xb5\x4e\x24\xd6\x11\xc0\xeb\xc1\x00\x75\xf2\x7c\xab\x54\x23\x9a\x77\x20\xa7\xc7\x52\x96\x15\x7f\xb2\x5c\xbb\x12\xfc\xc7\x79\xff\x1d\xec\xd9\x64\xce\xbc\x47\x28\x88\x47\x16\xfe\xe4\xe1\xcf\xb4\x23\xd0\x42\xa3\x85\x52\xac\x32\x1a\x70\xb3\x98\x92\x33\x1b\xf1\x3f\x89\x0a\x8c\xaf\xb2\xef\xdc\x82\xce\x16\xca\x4b\x4b\x7a\xc1\xa4\x09\x82\x24\x6c\x16\x8d\x2b\xae\xcc\x5f\x32\xdb\x27\x65\x41\x3b\x06\x59\xb5\xd3\xf6\xf7\x97\xb3\x23\x0e\x8b\x88\x63\x24\x37\xf6\xad\x30\x47\x4a\x64\x7b\xd5\x1e\xc4\x97\xe7\x80\x89\x25\x27\xa2\x01\xd2\x5d\xf6\x4d\x20\xff\xd8\xb8\x16\x97\x21\x49\xea\x6a\xc0\x2b\x32\x4d\xbd\x85\x65\x6c\xf1\x3e\x33\x49\xfa\x60\x6a\x54\xd4\x1c\xd5\x94\x40\xa5\xb5\x53\x9f\x41\xc9\x44\x51\xc2\x50\xdf\x71\x20\x73\x94\x54\x9e\x86\x15\x04\x1b\xf1\x29\x55\x57\x7a\xb1\x99\x63\xb1\x99\xb3\x0a\x24\x4a\x0e\xb4\xbe\x7d\x52\xde\xfc\x4f\xa3\x69\x7a\xba\xd0\x31\xec\x17\x22\x9a\x70\xa0\xf0\xe2\x35\x37\xc9\x0e\xa8\x37\xf5\xa6\xd2\xcb\x99\x45\x0f\xbf\xd3\x73\x59\x73\xbe\x3a\x46\x11\x64\x01\x74\xe0\x3c\x1f\x32\xfe\x9e\x17\x0d\xcb\x73\x30\xf9\x7f\xc3\xe5\xea\x5a\x8d\x29\xd4\x5f\xa6\x95\x72\xc4\x54\x5c\xd7\x73\x53\x97\xfc\x9a\xeb\xf7\x79\xe8\xfd\x4d\xdc\x40\x1f\x5f\x28\x11\x9b\x1d\x1e\xd5\xb7\x5f\x6c\x2f\x92\xb4\x17\x3b\x9f\x8b\xa9\x75\x2a\xf8\x95\x32\x2b\x3b\x72\x69\x4a\x21\x9c\x21\x24\x4e\x00\xf6\x62\xeb\x40\x23\x43\x72\x62\x88\xd0\x48\xbe\x9f\xe4\x40\x69\x58\xb3\xe1\xb1\xa6\xe3\xc6\x5b\xc5\xfd\x0c\x0d\x23\xd8\x29\x6f\xc3\x85\xe5\xc1\x85\xf1\xe5\x12\x8f\x01\x8b\xf5\xef\xd5\xe6\xa1\x79\xd0\x61\xb9\x7a\x27\x7f\x3e\x6c\xd8\xe8\x9a\x8e\x3d\x19\xbe\x40\x55\xee\x48\x15\xa1\x34\xd9\xc1\xc4\x6b\x6e\x0e\x93\x3f\x23\x75\x4d\xd4\x87\x53\x60\x8a\x34\x73\xac\x3f\x5e\x8b\xab\xae\xb0\x33\x97\x6c\x66\x92\x10\x0d\xe2\x65\xf4\x79\x10\x84\x77\x46\x57\xd3\x87\x83\x4f\xca\x89\xef\x7f\xda\xc5\x47\x7d\x6b\xf4\x27\x07\xe0\xc5\x15\xac\xd6\x97\xa3\x48\x51\x17\xbc\x0d\x19\xab\xf5\x61\xcd\x4d\x55\x3a\xff\xc9\x9f\x5e\x5a\x12\x2d\x08\xfe\x29\x54\x02\xcf\xa2\x74\xf3\x30\xb7\x77\xa2\x19\x52\x52\x5f\xd8\x9c\x50\xf2\x0a\xe6\xbb\xdb\x9c\x80\xe5\xe6\x4d\x6c\x84\x37\xbc\x8a\x52\x69\x8f\xcf\x72\x02\x63\x3b\xd2\x3b\xaf\xea\x95\x1d\x08\x7d\xa2\x20\x79\x80\x01\x02\x1f\x64\x79\xfa\xf0\x4b\x57\xca\x06\x13\x40\xa5\x0c\x50\x7f\xb8\x41\x92\x6b\x4d\xae\x0e\xd5\x0b\xe1\x42\xf3\xe8\xdf\x79\xd4\x10\x26\x19\xc9\x99\x0d\x78\xf8\x03\x26\x49\xeb\xf1\x32\x61\x9d\xc5\x26\xf0\x3d\x77\x80\xd8\x3f\x83\x29\x08\x82\x8c\x85\x81\x6b\xc2\x78\x19\x94\xb9\xee\xe7\x40\x61\xcb\x91\xac\x10\x88\x6e\xff\x01\x84\xda\xad\xf7\x81\x12\x3b\x29\x21\x1c\xf3\x49\xa0\xf0\x63\xf7\xe1\x26\x8a\xcc\x80\x0e\xd9\xf5\x7f\x4c\x2f\xcc\xbe\xc0\xe2\xbf\x2f\x79\x81\xdc\xba\x81\xcc\x19\x7e\x7e\xc0\xde\x78\xd9\x38\xa2\x10\xcb\x75\xcb\x39\xcd\x4e\x0f\x07\x59\x2c\x44\x19\x9d\x57\xb4\xed\x88\x71\xe2\x7b\x66\x7c\xc9\xe3\x5f\x00\x71\x9c\xd7\x46\xae\x84\xd9\xf4\x80\x34\x83\x12\x80\x5a\xa5\xa5\x76\x2a\x4f\xaa\xcd\x9c\xf6\xca\x6a\xc5\x8d\x76\x83\xdd\x18\x29\x14\xc0\xb0\x38\xcc\x7b\x3e\x65\xfb\x8a\xbc\x01\xb2\xf9\x51\x26\x7d\xc5\x4a\x4a\xdf\x36\x52\xf2\xc1\x0a\xff\xcb\xdb\xac\x84\xb8\xd6\x63\x96\x4a\x46\x03\xc0\x10\xc3\xb4\xee\xf1\x76\x83\x6b\xea\x4b\xa1\xbf\xbf\x71\x94\x55\x5d\x3b\x3c\x49\x31\xb5\x52\xb3\x29\x5d\x9e\xc9\xa2\xbd\x23\xdd\x86\x62\x1d\x6f\x26\x6f\x63\xb9\xe1\x07\x81\xff\x34\xec\x5f\x8c\xb9\xf2\xb4\x90\xe5\xac\x8b\x7c\x40\x3c\x69\x51\x49\x2a\x5e\x70\x9d\x83\x02\x95\x79\x44\x25\xba\xe0\xe6\x02\x5d\x8f\x4c\xc2\x20\x88\x92\x03\xd5\xf8\x2f\x91\xa5\xc0\xd5\x11\x75\x78\x65\x64\xc6\x3c\xf5\xbb\x88\x05\x1f\x4d\x3e\xcd\xa0\x10\x14\x28\x42\xd6\xfc\xae\xea\x09\x0e\x29\x84\xe6\xb5\xd4\x5a\x6c\xfb\x1b\x7f\xaa\x35\x79\xae\xcf\x9e\x7b\x3f\xa5\xa1\x2b\x87\x15\x23\x6c\x53\x64\x9e\x8d\x9b\xde\xc8\x80\xc5\x1f\xf3\x6a\xd3\xb7\x50\x13\x43\xc6\xb1\xc8\x36\x9c\xfb\x97\x5c\xe6\x45\x90\x9b\xd5\x68\xf6\x9f\x68\xff\xd1\xf7\xc4\x84\xc2\x3d\x2d\x68\x5a\xa1\x75\x84\x92\x8c\x42\x23\x2c\xa3\xb0\xae\xc4\xdf\x77\x1f\x6a\xae\x16\x66\x06\x02\xe6\xaf\xbe\xe9\x2c\xeb\xf4\x7b\xd0\x95\x56\x87\xc5\xd6\xdd\x72\xb2\xe9\xb1\xc8\x9e\xa5\xc8\x9e\x10\x9b\x42\x5f\x79\x20\x39\x5f\xb2\xa3\x1b\x0c\x90\xe0\xf9\x2d\x3e\xf6\x63\x64\x25\xd4\xbc\xde\xc1\x00\xa6\xb1\xbc\x67\xf8\x42\x61\x50\xfe\x14\x9a\x0f\x67\x2a\x46\x17\x42\xac\xe3\x03\xf7\x1b\xcb\xbd\x0b\x5f\xff\xec\xd7\xee\x77\xe7\xf4\x43\x0a\x88\x04\xda\xf2\xa0\x59\x4d\x7f\xf6\xa8\x84\x01\x9b\x5d\xb3\x28\x08\xbd\x27\x06\x03\x9e\x44\x5a\xaf\xe8\x78\x14\x9d\xf8\x36\xb4\x30\x37\xc3\x87\x21\xef\xa1\xa6\x3d\x08\xed\xae\x3e\xb2\xfb\xda\x8f\x48\xcc\x89\x48\xeb\x59\x60\xe1\x10\x7a\x74\x2f\xe5\xfa\x92\x7f\xca\x6c\xc4\xce\x6b\xf7\x1b\x05\x01\x3d\x94\x90\x15\x45\x76\xc4\x19\x9c\x08\x20\x85\x44\x2b\x2b\x2a\x53\x2a\x00\x50\xb6\x95\x73\xe2\x8e\x1e\x5f\x4c\x57\x62\xe4\xcd\xe4\x98\xda\xfe\x63\x34\xff\xa9\xfe\x74\xc4\x6f\xe5\x65\x49\xc3\x13\x2b\xa5\x13\xab\xa4\x01\xef\xe9\x3c\xac\x3a\x7f\x51\x3d\xa5\xba\x13\xfa\xfc\x52\xb2\x78\xe1\x15\x13\xd9\x3a\x7e\xf0\x14\x61\x6b\x46\xf1\x2b\x0f\x3d\x79\x20\x92\xd6\x3e\xc1\xc4\xe2\xf0\xe6\x82\xdf\xf6\xf6\x5f\x9d\x21\x6b\xa3\xb2\x16\x00\x00\x55\x4b\xe5\x64\xd9\xfb\x6d\x0c\xb6\x32\x27\xa0\x7a\x4a\x99\xe3\x6c\x2c\xb7\x1c\xb8\xcc\xea\x6b\x97\x79\x88\x83\xdf\x7d\x53\xdb\x27\x3f\xdf\x99\x39\x92\x31\x19\x6b\x71\xb9\x20\x5a\x7f\xa0\xbd\x56\xa4\x3f\x34\x1b\x79\xdb\xe6\x51\xdb\x24\xda\x3f\x8f\xd8\xc5\x7e\xa5\xec\x98\xae\x74\x0f\xc8\x27\x7d\x1c\xf3\x7a\x12\x5b\x30\xa0\x4b\x8c\xc3\x0a\x8a\xda\x6d\x2b\x79\xea\xa1\x22\xf8\x42\x77\xef\x89\xa5\xfb\xd4\xac\x32\x0c\x53\x33\xc2\x20\xd8\x81\x3d\xa1\x42\x81\xa9\xdc\xbc\xba\xe3\xe7\x05\x41\x10\xa4\x70\x79\xf5\x1e\xac\x31\xf6\x01\xe4\xa8\xf2\xae\xeb\xba\xea\xc2\xbc\xb0\x32\x28\x31\xf0\xf3\xac\xaa\xb9\xbd\x08\x73\x43\xbf\xe0\xbf\xd8\x1d\x6e\xcb\xe4\x01\x73\x08\x31\xf9\x88\x7f\xc7\x36\x2a\x6b\x7a\x6e\x35\x26\x73\xa7\x51\xfb\x12\x7b\x57\x5e\xb2\x43\xff\xce\xac\x90\x47\x8f\xf9\x1d\xc6\xf8\xd9\x0c\x39\x6f\x0f\x05\x38\x72\xd7\x25\x5f\x3d\xbb\xc2\xc1\x33\x19\xcf\x92\x4b\xe0\x8a\x8f\xee\x09\x83\xc2\xf9\x03\x22\x68\x0f\x86\x6a\x41\xa4\xc1\xef\x54\xb7\x8c\x6c\xb7\xc2\xae\x71\xdf\xf8\x25\x34\x48\x5f\x2f\xbe\x16\xe6\xe4\x86\x0f\x0e\x11\xa6\xc4\x53\x40\x65\xbe\x74\xe7\x2d\x6b\x32\x42\x3f\x85\x8b\x23\x6e\x51\xed\x9e\x17\xb0\x15\xcd\xef\x49\xb1\x05\x44\xe3\xe5\x2f\xc5\xd2\x50\xbe\x54\xfd\x2f\xff\xb2\x75\x2c\xd6\xc2\x18\xc2\x6f\xbf\x94\x3a\x63\xbb\x03\x40\x43\xbd\xb8\x19\x71\x6e\x4a\x58\x7a\x1a\x0d\x8c\x16\x68\xc1\x1c\x99\x0b\x1d\x4a\xe5\x5f\x04\xec\x07\x3f\x5b\xd0\x84\x40\x71\xa7\x97\x8d\xfd\xc4\xec\x07\xf2\xef\xfe\x2f\x1b\x8b\x0d\xb8\xf0\x7c\x96\xf5\x7a\x2a\xaa\xa7\xae\x81\xa6\x9b\x20\x68\xaf\x1f\x70\xd0\xac\x9d\x99\xe3\x0c\xe7\xb2\x60\x9a\x32\x63\xdb\x61\x84\xd9\x79\x54\xb7\x9f\x81\x98\x2a\xc4\x09\x1b\x8b\x0b\xe1\xd8\x84\x9d\x49\xd2\x38\xbf\x85\x48\x47\x67\x89\xa9\x43\x73\x21\x66\x70\x48\xf0\xbc\x93\xa6\x0d\x7f\xae\xf4\x96\xae\xb1\xc4\x76\xc3\xab\x72\x41\x52\xa6\x52\x69\x6c\xf6\x99\x71\xea\x67\xf6\x38\xb6\x33\x21\x33\x70\x50\xef\xf0\xce\xb2\x35\xcd\x07\x4b\x67\xdc\x14\xc7\xfc\xe3\x7b\x91\xed\x1d\xd5\x36\x91\x43\x6c\x1a\xe6\x0d\x83\x7e\x91\x39\x93\xb1\x83\xda\x95\x41\x8b\x04\xea\x99\x61\x69\x99\x83\x72\x7e\x2c\xf5\xe1\x29\xf7\x64\x74\x04\x47\x0c\x86\xa5\x70\x50\x3e\x7b\xb0\x4b\x3e\xbd\x1e\xf7\x03\xeb\xf8\xd7\xaf\xc4\xfe\xd9\x29\xaf\x17\x0f\x5d\x0a\x0e\x04\x5b\x6e\xa2\xc3\x8e\x1f\xe8\x2e\xb1\xc5\x2a\x57\x0f\xfa\xe4\x51\xab\x5b\xc2\x59\x38\x5b\xd3\xb3\xe0\x75\xb5\x3c\x9a\x35\x75\xd8\x2f\xd0\x2c\xe1\x7c\x07\xe1\x5a\x45\xad\xbc\x2d\x43\x4b\x16\xde\x0d\x02\x3f\xd0\x1e\x92\xbe\x09\x4b\xf4\x95\xdf\x9f\xdf\xd9\x06\x84\x66\xdb\x2e\x9e\x91\xc2\x2a\xa2\x00\x36\xb4\x6c\x59\xd5\x76\xa6\x4d\xd2\x13\x61\xfe\x06\xb6\x60\x4b\x7c\x96\xcf\x57\x57\xf6\xe2\x3d\xf4\x7f\x32\x6c\x78\xfe\xb5\xcb\x6e\x5e\xe9\x4c\x48\x71\x5a\x7b\xeb\xc6\xc7\x8a\x6f\x3a\xab\x6a\x2b\x5f\x9f\x9f\x96\xd3\x09\x5b\xe7\xbe\xfd\x76\x34\xa3\x94\x15\xa8\x23\x2c\x26\xed\xfa\x6f\x3c\x92\x9b\x7a\x44\x19\x1d\x92\x83\x05\x48\x04\x32\x8c\xf6\x3b\x55\xde\xc6\xfe\xd8\xc3\x13\x13\x4a\xff\x84\xc7\x0f\x58\xbb\xfe\x29\x9a\xdb\xaf\xeb\xaa\x76\x4c\xaa\xda\xc2\xdf\x9a\x69\xe3\xb1\xa6\x4b\x23\x1c\xce\x7a\x95\x77\x40\xb6\x3c\xfa\xcc\x0e\x2c\xf7\x19\xb4\x4a\x5e\xd9\xc7\x73\x19\xc7\xf4\x31\xbf\x49\x0e\xe7\x9a\x8f\xb7\x83\x73\xff\x6f\x7f\x00\x4c\x0e\xe7\x5a\x1e\xe2\xe4\xb1\x03\x7f\x80\x1b\x4c\x8a\x62\xd2\x33\x37\xa5\x02\xc1\xcf\x0f\x2c\x3b\x37\x1c\x25\xa4\x00\x57\x11\x48\x26\x9a\x61\x6a\xd0\x78\x47\x57\x59\x9b\xe3\xb6\x64\x83\x29\xd2\xfc\x6a\xa2\x63\x7d\xc1\x76\xf8\xc4\x0e\xe0\xa2\x3a\xa7\x2f\x81\x4f\x34\x3b\x34\x17\x7e\x32\x57\x69\xe7\x55\x5d\xf5\x46\x0e\x3d\xcd\x40\xf6\x38\x75\xe7\xcd\xfa\xd7\x3c\xe0\x01\xbb\x9e\x95\xa9\xca\xe2\xea\x6d\xc5\x4c\x77\x46\x79\x2c\x69\xb3\x91\x30\xb7\x96\x7f\xf8\xa5\x5a\x9e\xba\xe5\x4f\x94\x2a\x3d\xc3\x57\x94\x58\x39\xb7\x51\x69\x63\x53\xf2\xe6\x5a\xc5\x39\x28\x5b\x0c\x91\x6e\x61\x20\xe3\xf8\x50\x0d\x38\x15\x3e\x32\x65\xd1\x95\xe2\xdc\xc4\x80\xfb\x14\x81\x59\x7f\xdd\x83\xb7\xa0\x57\xff\x44\x1e\x26\xfe\xd5\xf0\x56\xb6\x76\xfd\x51\xf9\x09\xb4\xcd\xda\xfe\xef\xbf\xef\xa7\x23\x01\x9b\x84\x7f\x03\x98\xb1\xd3\x83\x40\xd1\x53\xbe\x19\x37\xe7\x45\xb1\x3e\x99\x62\x26\x6e\x9d\x0b\xf7\x49\xe9\xdc\xf4\x4d\xf4\x9c\x8a\x3d\xae\x13\xaf\x26\x17\x84\xec\x05\x1a\xdc\x82\x99\xb5\x2e\xa8\xe1\x58\xdf\x29\x51\x82\x72\xe2\x33\xac\xc5\xc5\x12\x1a\xa5\x27\xfc\x55\x7c\x99\xfe\x12\x06\xba\x4f\x44\x9b\xe1\x4e\xa5\xf7\x26\x3e\xb7\xbc\x9b\x81\x33\xf5\xdb\x1f\x0a\xa8\x84\x66\xb1\xbf\x3a\xef\x6a\x38\xe7\x40\x0d\xa2\x2d\x2f\x47\x7f\xb7\xae\x83\xda\x4d\xeb\xa3\x47\x5d\xca\x4b\xcb\x12\xb5\xaa\xeb\xc2\x47\x7a\xc5\xaf\x07\x4e\x9a\x8d\x93\xc6\x90\x60\x4a\xd2\x6e\x8a\x5c\x23\x40\x82\x0d\x77\x31\x12\x25\x6f\xae\x0b\x98\xcd\x0a\xbf\x9e\xff\x7c\xf8\x28\x21\xb2\x40\x3b\xbc\x94\x0f\xfe\xd2\xfe\xf8\x68\x0b\xd6\x4f\xeb\x7e\xe1\x03\x80\x3a\x79\x98\x9f\x8c\xdc\x83\x3d\xfe\x13\x4c\x04\x8e\x61\x8a\x2c\xa5\x01\xfe\xe1\xec\xf7\xd0\x70\xb1\x64\x2b\xb1\x6f\xcc\x3b\x04\xc3\x90\x70\xde\xc9\x31\x82\xd0\x33\x56\xd3\x7f\xbd\xc8\xb3\x84\xb9\xed\x96\xb7\x91\x77\x92\x2d\xec\xf0\x03\xdd\xb1\x90\xbc\x03\x61\x06\x97\xcc\xaf\xfe\x6d\x9a\x01\x3f\xe5\x57\xaa\xbf\x05\x03\xc0\xf6\x46\xc2\x2f\x5e\x04\x2e\x37\xa4\x85\x19\x92\x30\x78\x7c\x9b\x1f\x08\x92\x04\xaa\xa3\x69\x38\x46\xae\xf7\x7a\x35\xd8\xe5\x00\xe6\x3b\xd9\x81\x4c\x5f\x8e\xdf\x4c\x31\xf4\xdc\x07\xc2\x1b\xe3\x6e\x31\x39\x48\xc1\x2f\x59\xbc\xa6\xeb\xca\x2c\x94\x5f\xcf\xec\x47\x53\x58\x81\x08\xe4\x53\x9c\xc8\x1d\x3e\x50\x4b\x62\x35\x6d\x41\x44\x07\x05\x5b\x82\xd4\xb2\x1a\x6d\xf0\xfb\xb3\xa9\xcc\x6e\x25\xf5\xdd\xd7\x33\xc2\xcc\xfc\x33\x46\xda\xb9\x81\xfc\x63\x46\x73\x5e\x95\xf6\xcf\x2b\xdb\xba\x84\x9d\x68\xfe\xc7\x94\x0b\xf9\x7f\x2c\x9d\xb5\xd6\xb4\xcc\x12\x85\x2f\x88\x00\xb7\x10\x67\x70\xb7\x0c\x77\x77\xae\xfe\xac\xf7\xfb\xcf\x24\x93\x0c\x2c\x56\x4f\x57\xed\x67\x37\xd5\xd5\xfe\x20\x3b\xd4\x6b\xbd\x6f\xcc\xd5\x25\x3f\xd2\xce\x35\x33\x70\x64\x43\xc7\x2f\x35\x18\x0f\x9b\xc2\x66\x76\xb9\x66\x17\x40\x4e\x12\x64\x72\x2d\xe4\x00\x36\x52\xae\x5e\xd3\x71\x49\x09\xba\x94\x38\xe1\x0f\x34\xdf\x2f\x6f\x9c\x19\xc9\xac\xc9\xd6\xcf\x11\x6f\xe3\x50\x74\x7c\x85\xdd\x43\xd5\xdd\xb7\x10\x5e\x38\x6f\x48\xa9\xdc\xc2\x38\xb6\x21\x95\x3a\x84\xe3\x07\xb1\x5f\x84\x74\x8e\x96\xc0\xda\xbc\xc6\x0b\xe6\x38\x99\xa9\x32\x10\x8c\x8a\x8d\x4c\x93\x38\xff\xd7\x7d\x38\x45\xb2\xdb\xc0\xf3\x6f\xf2\xb4\xcc\x94\xa8\x0a\x0e\x5a\x1d\x86\x98\x73\x66\x37\x59\x71\x22\x6c\xfa\x2e\x15\x7e\x01\x46\xe7\xc3\x62\x6e\xea\xbc\xf1\x6f\xbd\x6a\xdf\x98\x87\x9e\x59\xb6\xab\x22\xb8\x31\x21\x6d\xc4\xcb\xa4\xbc\x26\x47\xe9\xb7\x8b\xc1\x40\xd1\x9c\x3e\xcc\x58\x81\xa9\x33\xe1\x38\x17\x63\xff\x57\xb7\xec\x90\x84\x21\x9a\x1d\x18\x75\x5a\x20\xb3\x36\xfe\x36\xbc\x4f\x79\xc9\xe4\x71\x5e\x30\x49\xc3\x08\x95\xab\x62\x1c\x1c\xea\x32\x48\xa6\xb0\x20\x05\x13\x73\xdc\xbe\x0c\xcc\xfc\xef\x37\xfb\x13\xb3\xc6\x71\x90\xa6\xc4\xb3\x0f\xf8\x52\x1d\x7f\xbb\xc2\xd1\xc4\xa1\x4c\xa1\x51\xc3\xdc\x5e\x72\x20\x6d\xbd\xa8\xa9\xf4\xb4\xca\xdd\xb5\xd5\x83\x85\x34\x17\x48\xab\x3f\x28\x36\x55\x5a\x3a\xd3\x35\x30\x23\x22\xc2\x30\x2b\x6d\x3c\xca\xde\x82\x38\x62\x25\xd6\x59\x68\x1c\x21\xbc\xbd\xba\xff\x6f\x5f\xf5\x20\x13\xc9\x83\xa8\x4f\xac\xe7\xed\x5b\xfc\x88\xa0\x9f\x3d\xaf\x42\xf1\xbd\x52\xa5\x3a\x16\x59\x14\xc5\xe9\xd8\x0b\xf8\x1a\xd2\xfb\xb2\x1d\xf5\xae\xc5\x0e\xd5\x16\x7c\xd8\x88\x02\xd9\x63\xef\x9f\x1e\x4b\x75\xcb\xdd\x96\x0c\x02\x95\x49\xee\xa6\x6d\x11\x67\x8e\x54\x26\x00\xde\xb3\x6f\x3c\x09\x45\x97\xd7\x64\xe3\x66\x3b\xaf\x84\xde\x3a\x89\x7f\x86\xfc\x79\x83\x3f\x83\x01\x31\x2b\x3e\x26\x11\x9b\x59\x37\x31\xc6\xe2\xc5\xdd\xa7\x88\xbd\xa2\x86\xaf\x43\x1d\x56\xe8\x88\x6c\x3e\x6c\x7c\xb6\x76\xe4\x59\x74\x1c\xf8\x8f\x97\x8d\x26\xa1\x7a\xac\xa6\x62\xab\x1e\x18\xdf\x2d\x3d\x99\x2c\xd1\xc3\x74\x6e\x73\xe6\xf8\x60\xfd\x7e\x8e\x99\x6d\x4a\x75\xc5\xbb\xfe\xe9\x9e\x2f\xf3\xb3\xce\xdd\xa5\xf8\x9a\xb7\xff\xcd\x1a\x77\x5f\x17\xaa\x66\x88\x80\x3b\x9a\xf1\x14\xcc\x91\x9f\x91\x36\x04\x41\x10\x4a\xb1\x9b\x54\x39\x6a\x7c\x72\xf7\x10\x0d\x8f\x7b\x3c\x4c\x53\xd5\xae\xf9\xb3\xed\x30\x89\xe4\xf4\x2e\xf1\x62\xab\xc0\x34\xa6\xcc\xef\xe6\x0a\x4c\x03\x69\xc3\x99\x8f\x51\x75\xab\x3c\x5e\x18\x4c\x38\x4c\x68\x62\xff\x16\xfa\x25\x5b\x24\xbd\x51\xc3\xd4\xdb\xad\x5f\xa9\x5b\x00\x5a\xe3\xc7\x4c\x37\x45\x81\xe6\xd7\x67\x9e\xa2\xbf\x6e\xe2\x5f\x01\x6c\xd8\x0c\x1b\x50\xa3\x52\x44\x21\xbc\xaf\x92\x99\xa2\xd9\x21\x0a\x3c\x99\xb3\x38\xbd\xdf\x89\x3c\x03\xb2\xef\x17\xdc\xbf\x16\x1c\xac\xd1\x7e\x03\xf4\x58\x18\x56\x0b\xfb\x68\xed\xf4\xb0\x97\x34\xdb\x9c\x56\xd5\x62\xc4\x54\x22\x6c\xe9\x08\x08\xbf\x19\x62\xbf\xc3\x3b\xcf\xd6\x34\x1b\x50\x85\x44\x7a\x0f\x15\x88\x4e\xc0\xdc\x6e\xab\x06\xd0\x4b\x9e\x5a\xcd\x24\x2b\xb7\xae\xeb\x66\x3e\xba\xd3\x8e\xc2\x18\xf9\x6f\x61\xb5\xbd\xdd\xd2\x3f\xf1\x1d\x95\x31\xa1\xcb\x2e\x11\xe0\xf8\x23\x2c\xc6\x79\xa3\x0f\xe3\x80\xca\xc2\x81\x6a\x62\xee\x75\x5d\xd3\xdd\xca\x1f\xc0\xf4\x63\xa1\xe6\x58\x07\x3f\xb3\x09\x88\x51\x28\xb3\xda\x7d\x50\xbc\x1e\xbf\x8d\xea\xa2\x57\x38\x4b\x15\xa8\x87\x0a\xcf\x9f\x2c\x8d\xcc\x30\x8e\xfb\xd3\xfe\x37\xa5\xcb\x37\x8e\x71\x83\xdd\x0c\x2e\xf3\x44\x77\xf8\x0b\x0f\x5b\x18\x16\xce\xeb\x33\x63\xfa\x9a\x7f\xc7\x85\xa8\x52\x33\x27\x63\x72\x66\x33\x29\xfc\xec\xa5\xd2\x93\x91\x89\x6c\x50\xfe\xe3\xaf\x39\x52\x4b\xab\x57\x90\x17\x3a\x7e\x19\xfc\x4a\x24\x32\x45\x1b\x3a\xc0\xbc\x7d\x9b\x86\xe8\x50\x39\x5f\x17\xc8\x5d\x91\x9d\x83\x51\x73\x14\x10\xfd\xba\xc6\x12\x42\x81\xd3\x63\xc9\x9d\x29\xb0\x5c\xbb\xb8\xe1\x70\xfe\x36\xee\xdc\x5a\xf9\x61\x18\x04\x21\x49\xba\xbc\x22\xfe\x8d\xdc\xdd\xc4\xe7\x94\x6d\x77\x4c\x6f\x9c\x1f\x08\x5c\x57\xb5\xeb\xed\xab\x24\xd8\xed\x03\xfa\xfd\x34\x31\xd3\x72\x4c\xab\xba\xf3\xdd\xfb\x62\x7e\x3b\xc2\xdb\x72\x82\xc3\x88\x9d\x74\x99\x66\x59\xee\x27\x11\xc3\x0a\x74\xcf\xf3\x8f\x3b\x4d\x57\x5f\xd7\x1f\xa7\xd6\x5c\xe3\xb0\xae\x6d\xf1\xb2\x5c\x57\x27\x9b\x93\x30\x8e\x16\x4e\xb0\x95\xdb\x42\x82\x1b\x65\xe1\xdf\x64\x99\x41\x57\x80\x41\x62\x14\x68\x00\x07\x19\xb4\xaa\xed\x32\x0b\x35\xcb\xac\x7b\x0e\x14\x95\xea\x4f\x00\x72\x0f\x89\xeb\x6b\xe2\x7a\xf7\x02\x94\x15\x3c\xcf\xfd\xf8\x1c\xfd\x21\x46\x57\xd7\x91\xf4\xec\xe9\xab\x74\x2d\x3d\x45\xa7\xa7\x55\x2c\x74\x73\xe3\x9b\x66\x5e\x0d\x43\x7d\x2f\x9c\x4a\x86\x5b\xd1\xda\x85\xdf\xc4\xb0\xb7\x59\x94\x56\x27\xdb\xc3\x25\x36\x09\x04\xd6\x5a\x08\xdc\xbc\x11\xd9\x04\xe8\xc9\x99\x22\x77\xca\xe7\xef\x6c\x47\xb0\x1c\xf7\x83\x39\xf8\xc8\x02\x1d\xc4\x1b\xf9\x58\xcf\x62\x4a\x60\xc3\x38\x7a\x02\x54\x3c\x66\x07\xaa\x9b\x90\xec\x53\x45\xf6\x0d\x1b\xce\x4b\xee\x8e\x37\x30\xd6\xa9\xaa\x2a\x53\xce\x34\xb2\x53\xc7\x75\xad\xb1\x4a\xb7\xb5\x83\xb1\xd9\x5f\x98\x19\xc4\x97\x17\xc4\xe2\xaa\xbe\xcb\xb9\x5b\x40\xe0\xaa\xd7\xe3\x12\xcf\x4d\x3c\x06\x0a\x94\xe5\x41\x8a\xb8\xb7\x29\x50\xea\x39\xbf\x97\x8b\x7d\xe7\xc7\x3c\x9c\x3b\x6e\x46\x43\x1c\xc4\x2d\x1a\xbb\x72\x13\x06\x43\x92\x08\xc7\x0e\x83\x20\x58\xd7\x3b\xf3\x2b\x5f\x2a\x12\x3c\xe1\x31\x5f\xf7\x4b\xd5\x76\x9e\xcf\xdf\xae\xc7\x90\x78\xfc\x38\xbd\x1f\xd3\xb1\x99\x11\x15\xa7\x4b\xc8\x09\x90\xf9\x35\xba\x12\x51\xc8\xba\x80\x4d\xf7\xcd\x21\x6f\xcf\xe8\xb3\x4b\x7a\xe0\xa6\x3e\xd7\x94\x3b\x84\x51\x4f\xff\xf6\xc5\xa7\xac\x4c\x74\x0f\xf3\x90\x81\x2c\x94\x06\xf7\x3a\x04\x4a\x4b\x68\x95\x36\x91\xd6\xb3\xa0\xb0\xab\x56\xf6\x3a\x24\x4a\xb4\xb5\x51\xbb\xd6\x18\xd3\x4b\x66\xac\x91\x2f\xfc\x77\x2c\xdb\x4b\x55\x16\x9a\x16\x76\xdd\x0d\xd5\x06\x11\xd0\x7e\x1f\xf0\x20\x39\x6d\xb2\xfd\x56\xb3\x9e\x0d\x3e\x43\x92\x61\x1f\xfd\xd2\x92\x39\x68\x51\x7d\x69\x9b\xb1\xe9\xa5\x2e\x95\x07\x6e\xc0\xd6\x0a\x8b\xbb\x77\x2d\x47\xd9\xf7\x61\x4b\xd6\x36\x3a\xd2\xc4\x22\xee\x87\x12\x8f\xc5\xda\x4e\x7d\xd9\xa0\x0d\x5f\xc6\xf4\xfd\x39\xee\xba\x0e\x5b\xfd\xcd\x07\x4e\xca\x96\xcc\x9c\x57\xb3\x1b\xbb\xe4\xd2\xbb\x38\x4d\xc7\x61\x45\xc9\x03\x52\xc4\xab\x6e\x74\xdc\x5a\x10\xf2\xf7\xc1\x86\xd0\x39\x20\x27\xdd\xa8\x7b\x90\xe2\x0a\x67\x87\x29\x65\xca\xe0\x4e\xfe\xaf\x94\x9d\x40\x2c\xc6\xb5\xec\x17\xd5\x85\xdf\xe6\x3a\x18\x9b\xf9\x99\x11\xb6\x07\x00\xcd\x52\x29\x58\x4d\x28\x88\x4e\xb0\xd0\xef\xeb\xb9\xfa\x56\x76\xac\x8e\x0a\x80\x5d\xc3\xe0\x27\x07\x11\x34\x68\x91\x3f\x51\x6c\xfc\x50\xee\x06\x4a\x52\x11\x53\xe9\x89\x82\xd7\x3b\xe3\x73\xa3\x22\xe9\x58\x5c\x36\x59\x70\x65\xf7\x8a\x84\x02\xc2\x22\x3e\x6b\xfa\x28\x79\xea\x10\x59\xad\x1d\xe2\xb7\xed\x71\x97\x83\xd3\x79\x63\x69\xab\x36\xee\x0c\x55\x96\x3c\x7a\x68\x69\xd3\x63\x29\xab\x59\x9d\x7a\x19\xba\x09\x9d\x43\x6d\xea\x7b\xf0\xee\xaa\xfc\xf1\x0b\x0c\xc9\xfd\x18\x22\x3a\x1f\xa1\x18\xbd\xd7\x31\xd5\xfd\x1e\xdd\x15\x56\x07\x3a\xf6\x42\xc9\x70\x01\xbf\xf5\x40\x74\xa0\x5c\xfe\x6c\x86\x69\xf5\x4e\xff\x7e\xaf\xf1\xcb\x78\x9a\xd6\x7d\x5b\x93\xce\x71\x10\x8c\x8c\x85\x15\x71\x48\xca\x48\x78\xe8\x7b\x50\x85\x50\x0b\xce\x74\x4c\x46\x47\xab\x2c\xea\xcc\x39\x9d\x96\x5d\xc8\x5c\x26\xed\xcd\x7b\xc5\x14\x7a\x6c\xb6\xcf\xea\x03\xc0\x4b\xaf\xb9\x87\x4d\x1b\x1e\xad\x5a\xac\x8d\x47\xe5\xb4\xa3\xa6\x71\xf6\xa9\x6b\xa8\xa8\xf5\xdd\xd9\x49\xb9\xec\xde\x35\xee\x44\xab\x53\xe0\xde\x3b\x44\x8b\x1e\x5f\x6d\x93\x4a\x29\x67\x83\x09\xba\x5c\x4e\xff\x31\x5e\xe6\x90\xd0\x68\x1b\x27\x6f\xa9\x6d\xed\xd3\x2f\xcd\x44\xbd\xfa\x27\xce\xe9\x10\x25\x27\x4a\xc9\x73\x15\x91\x35\x07\x15\xb2\x43\x1f\xe3\x97\xb7\x61\x59\x7d\xb3\x1e\x8e\x79\xba\xab\x99\x01\x8b\xd6\xf4\xfc\x76\x5f\x10\xd9\x3b\xac\xf4\x54\xb3\x59\x86\x63\x06\xec\x14\xf9\xdf\x19\x08\x56\x84\xa8\x49\x0e\xfb\xfb\x96\x22\xca\xd0\xa4\xd5\x80\x01\x23\xf6\xfb\x0a\xab\xbe\xf7\x3a\x08\x83\x20\x38\x50\x8d\x61\x37\x9f\x1c\x8f\x91\x2c\xed\xe9\xe5\xfc\x2a\x2f\x2d\x8f\x3f\x18\x93\xd1\xe4\xc5\x97\x73\xd4\x1f\x2c\x66\x70\x14\xf6\x57\x00\xb2\x0e\xa6\x8d\x3c\x2b\xdd\xbd\x6f\xea\xba\xb6\x23\xc7\xd6\x34\x5a\x08\x81\xbf\x96\x51\x83\x51\x72\xdd\x67\x18\xcd\x8a\x98\x1a\xcb\x0e\xd7\x96\x14\x7e\x9b\xca\xf3\x25\x14\x00\x54\x60\x5e\x6a\x31\x94\x76\xaf\xde\x91\xf4\x0b\x74\x5e\x7b\x37\x0d\xb8\xa8\xa5\x87\x92\x07\x36\x8c\x90\xfa\xf1\x97\xac\x60\x24\x4c\xd0\xc8\xe6\x2e\x4c\x9f\x32\x0c\x67\x0b\x4d\x9e\xb9\x84\x9b\xaf\xeb\xbc\x12\x2e\x2a\xd4\x11\xd5\x02\xef\x3d\xab\x48\x15\xcc\x7b\x06\x30\x77\xfc\x6b\x7c\xf5\x4d\x3c\x42\xab\x9a\xb8\x14\xe7\x54\x52\x20\x03\x25\xe9\x17\xaf\x00\xaa\xa4\x42\x55\xee\x9c\xd6\x44\x18\x85\xb2\xcb\x09\x3b\x7f\xe7\xf6\x6f\x1d\xf8\xe8\xdf\x1a\x33\x39\xa1\xd1\x3d\x3f\x91\xf5\x20\x58\x52\xb5\x49\xb5\x30\x4a\x9c\x01\xfa\x9d\xfd\x79\x73\xbf\x4e\x9f\x70\x67\xce\x24\xcd\x41\x0b\xce\x89\x31\x2d\xdc\x02\xe8\xd0\x5c\xbc\xcf\x04\xa3\x60\x19\x6a\x18\x89\xf4\x3d\xb7\x83\x78\x53\xc7\x48\xa5\xda\x5f\xd5\x76\x45\xb2\x62\x54\x4d\x8d\x55\x60\x84\x2c\xbe\x5f\x33\xfa\xeb\xba\xac\xae\x00\x34\xde\x4d\x6b\x25\x5a\xa2\xd9\x2e\x70\x80\xf3\x9e\xa9\x41\x57\x81\x4c\x32\x02\x63\xdf\x3e\x08\x8f\x4e\x9f\xb4\x5c\x31\x48\x63\xb3\x09\xef\x7b\x74\x22\x72\xcd\xb5\xca\xc6\x3f\x6e\xe3\x40\xb1\x05\xe9\x0e\x2f\x21\xdc\xe4\x8f\xb5\xf4\x58\x3b\x12\x43\xb4\xb8\x7e\x01\xb6\x07\x49\x18\x4c\x09\x52\xd4\xf5\x75\xb3\xa5\xdf\xcd\xf4\x96\x4a\x1b\x42\x18\x23\x9f\x29\xbd\xdb\x86\xde\xf7\xcc\x08\x7c\x64\xe1\xec\xde\x99\xf2\xed\x18\x50\x9d\x5f\xbc\x58\x82\x77\x4e\xd5\x82\xac\x07\xd9\xe1\x64\x71\x4e\xd5\x01\xcb\x13\xd6\xab\x52\x66\x8c\x2f\x55\x80\xc5\xe9\xf9\x90\x31\x13\xd4\xf9\xc7\x6c\x35\x52\x47\x48\x88\xdc\x12\x3d\xeb\xb4\xf7\x4a\xa5\xc2\xef\x65\x52\x06\x60\x14\x0d\xdc\x64\x76\x73\x4a\x9c\x64\xb7\xda\x14\xa3\xd6\xe2\x4b\x97\x13\x7f\x90\x62\x0b\x1b\xc8\x95\x9d\xe8\x62\xb2\x03\x9a\x85\x54\x15\x7d\x4a\x70\xfd\xeb\xd5\x63\xa1\x28\xe5\x6a\x3c\xd3\xbf\x85\xd9\x4f\x48\x5a\x00\x9b\xff\xe6\x1a\xc7\x0c\xbe\xc5\x30\xce\x1f\x14\x38\x6a\xbf\xf8\x1d\x0f\x81\x6b\x6a\xee\x9b\xf0\xde\x87\xa5\xbd\xf9\x92\x89\x09\xe0\xfd\xfa\xe8\x06\x26\xe4\x4d\x9e\x3c\x12\x57\xb8\x3c\xed\xe3\xf4\xda\x28\xd8\x60\x68\xe8\x06\xf0\x75\xa2\x85\xf8\x69\x39\x5f\x51\x6e\x0d\x4c\xc7\x51\x53\xc7\x21\x5c\x32\x09\x03\x83\xc3\x30\xe6\xea\x0a\x91\xfa\xee\x78\x7e\x45\xde\xd2\xef\xd6\x09\x58\xd1\xd1\xc5\xa3\x0a\x14\x42\xbe\xaa\xef\xe5\x41\xb4\xc1\xd7\xb3\x1e\x7d\xca\x0e\x66\x7a\x67\xef\x22\x23\x69\x00\xa3\x72\x6e\x9a\xcc\x73\x65\xe1\x8f\x5e\x0d\x98\x5d\x41\xd9\xa0\x89\x30\x5c\x44\xf1\x4f\x64\xeb\x5c\xd4\xbf\xb9\x9e\x0e\xbc\x4c\xac\x32\xc7\x22\x06\x03\xc0\x6c\x77\xf2\x5f\xb5\x46\xdb\x03\x5a\x13\x7e\x0b\xbc\x50\x47\x30\x61\x7e\x7b\x26\x35\xb3\xab\x69\x1c\x71\x5e\x5b\xdf\x3a\x66\xef\x5e\x17\x05\x54\x56\x70\x81\x58\x5b\x52\x43\xe7\xca\x94\xcb\x30\x0c\x1e\xc0\x41\xe0\x44\xcc\x17\x81\xa9\xb1\x17\x4a\x52\x44\x3e\x7c\x17\x0a\x66\x31\x61\xf8\xe4\x65\xed\x26\x3a\x50\x55\xbb\xad\x7d\x31\xa1\xd6\x6b\xa7\x1d\xb6\x76\x45\xae\x42\xfe\x54\xca\xec\x21\x9d\xaf\x2e\x98\xc4\xc0\x9b\xb9\x1e\xb0\xbb\x4d\xb5\xa3\x2b\x69\x8e\x91\x60\x70\xa2\x27\xae\x41\xea\xf4\xa8\x41\xb1\xa3\xc0\x0d\x4b\xf8\xc8\x22\xb9\x7b\xa0\xcc\x9a\xbc\x98\x0e\xaf\xe8\xa2\x81\x09\x48\xb8\xe4\x0c\x49\x63\xac\xa3\x0f\x5f\x42\x7f\xe0\xf9\x53\xaf\x77\x95\xdb\x6b\xe0\xd6\x16\x84\x20\x12\xa3\x60\xfa\x06\x94\xd9\x6a\x2c\x73\xa4\xe4\x79\x96\x39\x00\xb0\x7c\x72\x4f\xa0\xac\xbc\xe6\x9d\x05\x9a\x3d\x8c\x59\xf9\x97\xba\xb4\xe1\xc5\x53\xaf\xca\x47\x56\x83\xec\x16\x4a\xe7\x40\xc5\x47\xe0\x57\x97\x40\x11\xeb\xed\xb4\xf6\xaf\xc5\xbd\x39\x6c\xbe\x3d\x74\x4b\xd3\xcb\x65\x4d\x6a\x20\xa3\x97\x1f\xe1\x16\xf0\xd7\x5f\xbc\x20\xf1\x9b\xbe\x3a\xad\x10\xf6\x11\x59\xfb\xa8\x8f\xfb\xbc\xbe\xe7\xad\xf5\x29\x50\xc9\xa0\x88\x6b\xce\x6d\x26\x34\x50\x5e\x99\x24\x14\x27\x22\xcb\xa0\x8c\x90\xc5\x75\x84\x64\xa1\xcc\x80\xfd\x5c\xa3\xd9\x43\xfe\x8f\x19\x2d\x44\x72\x2a\xa8\x71\x30\xa8\xf4\xb2\x4f\x7f\xf7\x4d\x15\xf3\x33\xec\x51\x28\x61\x30\x55\x6d\xe7\x75\xf4\x92\x91\xc8\x28\xb0\x94\x71\x3a\xcb\x4b\xf0\xbc\x22\x11\x00\xf7\x40\x74\xf2\x97\x80\x2e\xde\xd6\x35\x95\xaf\xf2\x4e\x57\x7d\x93\x67\xb7\xd3\x89\x92\x3b\x16\x76\x6f\xd7\x14\x04\x00\x2f\x3c\xd7\x96\x62\x1f\x43\xb0\xc5\x50\xb0\xda\x1d\xd3\xf0\x9c\x25\xdc\x82\x46\xc9\x4d\x1f\x83\x7e\x88\xb6\xbe\x57\x62\xb8\x44\x30\x24\x81\x80\xcf\x7e\x20\x3e\x45\xc5\x33\x7d\x53\x92\x62\x57\xe9\x65\xfd\xd4\x12\xb3\xb1\xab\xd2\x48\xec\xd2\xc4\xa1\xc8\x71\xc3\xf0\x4a\x58\xae\xc0\x8c\xb3\x73\xb4\xe0\xec\x99\x74\x45\x1b\xaa\x0c\xf8\x5e\x44\x11\xdc\xcf\xa1\x46\x5d\xd6\x0b\x31\xf5\xf2\x63\x55\x45\xdc\xf1\xea\xf7\x43\x60\xf5\x5f\x3d\x3a\x71\x6e\x73\x7d\xdf\x62\xd1\xeb\x3c\x59\x22\xe4\x88\xd6\xb1\xc0\x52\xa1\xea\x09\x8d\x39\xf4\x99\xcf\xb8\xa2\x63\xf4\x2b\x68\x11\x7f\x41\xaf\x80\x74\x43\x8c\xcb\x1a\x19\xe3\x97\x12\xfb\x26\x79\xf6\x13\x10\x94\xcb\xff\xbe\x05\x92\x3c\xe5\x84\x8a\x3e\x19\xbc\xa4\x99\xe1\xf4\x70\x97\xfc\x2d\x39\xb6\x20\x80\x22\xb9\x9c\x0d\x02\xc2\x61\x49\x8a\x13\xf5\x87\xe8\xfc\xf0\x5b\xe7\xb3\x03\x21\x9a\x2b\x11\x82\xdc\xd2\xfa\xfb\xce\x2d\x81\xc9\x43\x0c\xb4\x70\xb4\x23\x3a\x8e\x07\xb9\xba\x37\x00\x30\xb3\x20\x20\x67\x99\x7a\xd8\xc3\x9c\xaf\xf3\xf4\x39\xb6\xe5\xc6\x0a\xdf\xd4\x19\x5b\x2a\x2b\x6b\x82\x39\x81\xeb\x3e\xe2\xf0\xb7\x7a\x0d\x16\x82\x64\xbf\xb5\x24\x31\xae\x34\x15\x12\xd2\x4c\x2c\x62\xdf\x1c\xd5\x23\x2d\x78\x7f\x70\x02\xba\xc1\x70\x64\x27\xa2\x25\x54\x0d\x24\x63\xf1\x9b\x2b\xf3\xc5\x76\x7b\x56\xb9\xba\x7e\xff\x3f\x0e\x26\x06\x4a\x94\x33\x0d\xda\x0c\x38\xff\xf6\xb5\x9f\xd1\x2d\x0e\xfe\xbc\x09\x36\x55\x79\xcb\x7c\xcf\x05\x5e\x3c\x4b\x7e\x45\x0f\x46\x96\x8b\xbd\x88\xe0\x05\xbd\x81\xf9\x7e\x1f\xdb\xe3\x8b\x8d\x77\xaf\xc8\x73\x4a\x12\x06\x81\x1f\x14\xed\xe8\x43\xb6\x90\x18\x88\xc4\x19\x46\xd6\x6e\x01\x42\xe3\x27\x9a\x1d\x48\x73\x2d\xfc\x6b\x76\xc6\x8b\x26\x2f\xd8\xde\xf2\xca\xa3\x05\x94\x19\x52\xc3\xec\x3a\xf7\xee\xdb\xef\x4b\x28\x92\x72\xeb\x3e\x10\x9d\x90\xc5\xef\xe6\x2a\x3d\x50\x82\x9f\x02\xe6\x6d\x06\xa2\x5f\x0c\x97\x7c\xa9\xde\x16\xf0\x53\x15\xa5\x4e\x63\x28\x35\x42\x65\x18\x7e\xaf\xde\xe5\xd3\x29\x56\x83\x98\x09\xdf\x3e\x0e\xe1\x06\xd9\xc4\x90\x5e\x57\x98\xc5\x54\xfb\x5b\x1d\x67\xdf\x46\x11\xc8\x59\xb6\x7e\xd8\xa6\x96\x01\xf7\x4b\x12\x3e\x9b\x75\x99\x00\xa6\xcd\x25\xac\xde\x73\x76\xa4\xb7\x90\x92\xd7\xc0\x16\x35\xc6\x8f\x02\x2a\x53\xc0\x2e\xf1\x82\xb5\x1f\x11\xb3\x1e\x52\x4c\x49\xa7\x75\x65\x2f\x71\x2e\xe7\x74\x78\x65\xe1\x4c\x62\xa6\xbe\xe1\xe7\x93\x82\xe9\xa3\xe7\xd7\x88\x70\x68\x19\x3f\xf0\x3a\x74\xa8\xef\xf1\x59\x38\x9c\x2c\xed\x1e\x75\x27\xc9\x0f\xf1\xe6\x0f\x42\x3a\x6e\x2b\xb4\x07\x9a\x22\xb8\x51\x67\xeb\xb4\xea\xee\x5b\x07\x7a\xaf\x22\x5e\x63\x93\xf3\xdf\x92\x86\x03\x5d\x91\x93\x08\x19\x5c\x3b\xb7\x67\xdf\x09\xaf\x71\x92\xce\xfe\x56\xab\x54\xb3\x33\x79\x59\xd3\x07\x3d\x78\x1c\x4a\xc4\x64\x3e\x7b\x3d\x1c\x75\x8a\x0c\xcb\xdf\x7d\xf6\x2f\x08\xb1\xb7\x30\x05\x66\x15\xa1\xb6\xc3\x91\x23\xdc\x00\xb0\xa2\xdb\x22\x9e\x95\xb4\xb0\x26\x6f\xae\x24\xae\x1d\xee\xb5\xb2\x46\x41\xa4\x6e\xe8\x66\x8a\x85\x37\x00\xad\x6d\xe7\x53\xfb\x9b\x3b\x83\x1f\x29\xa2\x93\x47\x94\x19\x99\xcf\x2d\xa0\x63\x3a\x2e\xeb\x1f\x17\xed\xa3\x95\xd1\x03\x55\xcd\x6f\x7f\x69\x24\xf8\x6b\x38\x59\x04\xb5\x97\x62\xc1\xea\x12\xde\xf7\x76\xe9\xf2\x44\xba\x99\x48\x85\xfd\xc5\x21\x1c\xe2\xc0\xf6\x5f\x4f\x41\x6b\xda\x04\xe2\xc7\xea\x23\xef\x28\xd2\xba\xc3\x3b\x69\xbe\xf8\x4e\x24\x2d\xe8\x84\x17\x3a\x77\xef\x0e\x54\x56\xa4\x87\x02\xae\x67\x36\x08\xe9\xb2\x97\xea\xa4\x7a\x5e\x2e\x67\xab\xe7\xc4\x35\xd5\x45\x87\xbf\x45\xca\xf9\xae\x11\xe9\x5a\xa1\x9d\x19\x94\xc2\xe7\x66\x74\x46\xf3\x4c\xb4\xd1\x85\xc8\x6e\x5a\x89\x3f\xf3\x40\xff\xf4\x27\x93\xdd\xfe\xd0\x5a\xb4\x46\xc8\x17\x30\xe5\x6e\x78\xad\x8b\xa1\xa4\xd6\x79\x60\x02\x2f\xd1\x0a\x95\xb5\xb6\x0f\xc8\xc3\x6d\x6a\xc2\xf4\x04\xec\xa7\x53\x2a\xdd\x0f\x9b\x16\x3e\x45\x14\x78\xfc\x62\xcd\xe0\x47\x81\x46\xff\x25\x4d\x1c\x89\x84\x81\x24\x69\x88\xad\x52\x87\x74\x4a\x1f\x7d\x8d\x41\x46\x07\x01\x16\x18\x08\x04\x0f\x94\x4a\xfd\x8c\xe9\x44\x2e\xeb\x93\xe5\xde\x3d\x87\xda\xf1\xaa\xb6\xa3\x4b\xd2\xf1\x6d\xb6\xaa\xd4\x60\x09\xd7\xd8\x44\xe0\xe4\xd7\xb7\xc1\xf7\x35\x7e\x79\x25\xe0\xe8\xbc\x69\x2e\x52\x98\x11\x2a\x3d\x37\x55\x09\xcc\x86\xcf\x69\x3a\x1a\xe3\x87\xe5\x9a\x32\x4c\x1d\x4c\x2f\x13\x5e\xad\x2b\x09\xd0\x4e\x95\x10\x89\x41\x94\xd3\xcc\x47\x4c\x5a\x0f\xb5\xc4\x08\x29\x49\x17\x4e\xc0\xf6\x7c\xea\xb8\xac\xa3\x1b\x06\x42\x4b\xe9\x25\x3f\x5f\x9f\x77\x4b\x89\x1e\xb8\xf4\xec\x89\xad\xa2\x24\x0e\x13\x40\x74\xe7\xbc\xa3\xd5\x27\x59\x9e\x99\x31\xc2\xe1\xca\x45\x18\x0f\x78\x83\xe8\xe4\x45\x9a\x23\x76\xfd\xde\x5a\xf5\xcb\xe4\x37\x7f\x81\xc0\x1c\xbb\x27\x71\xe7\x70\x5a\x54\x4f\x1f\x3b\x89\xcf\xe4\x79\x06\x04\xf1\xc2\x4e\x43\xee\x40\x05\x07\xad\xfa\xb9\x3d\xad\x52\x32\xa0\x03\xef\xd2\x82\x7e\x67\xc4\x29\xc9\x18\x4b\x7c\x4d\x98\xed\x0f\xef\x6d\x39\x1c\x96\x14\x8d\x2e\xe8\x27\x36\x35\xae\x1a\xb2\x27\x02\xac\x89\x66\x94\xc9\x40\x06\x1f\x8e\x3e\xc8\xd7\x6f\x7e\x46\xaf\x18\x36\x30\x1d\xb0\x49\xe8\x0d\x28\x7c\xbe\xfd\x23\xb8\x82\x7d\x10\x3a\x6d\xfd\x7c\x8e\xb1\x05\x8e\xfa\xac\x8f\xa4\xfd\x0d\xfe\x3c\x05\x03\xaa\x9f\xf5\x84\x16\x35\x67\x40\xaa\x33\x35\x37\xb4\xf1\xa8\x95\x4a\xeb\x37\xfc\xa0\xc0\xb8\xc1\x36\xbd\xdf\x08\xbb\xce\x8f\x5a\xb2\x7e\x26\xf2\xa7\x59\xb3\x26\xb6\x88\x0f\xf5\xeb\x8e\x1c\x5b\xfa\xa7\xe1\x68\xf6\x3c\xcc\x8b\x78\xb0\x70\xb5\x7b\x1b\x68\x4b\x52\x91\xc5\xef\xbd\x36\x52\xf6\xad\x25\x66\x76\x86\xb5\x9a\xbd\x3a\x91\x15\xd9\x66\xdc\x84\x9c\x9a\xad\x2e\x74\xa6\x13\x3a\xdb\xdf\x7d\x51\x61\x5e\x0a\xec\x59\xe1\x95\x99\xa8\x7a\x25\x0c\x9c\xec\xeb\x2f\xe3\xc9\xcd\x01\xc4\x30\xaa\xfb\x75\x0f\x05\x0a\xed\x73\x0b\xbc\xf2\x41\xf3\x24\x0b\x0c\xc0\x2d\x7e\xdf\xf7\xf3\xc5\x7e\x06\x21\x46\x82\xc1\xc7\xf8\x03\xd1\x7b\xd7\xe6\x7c\x8d\x6e\x03\x5d\x9d\xe8\x6c\x84\x07\x6c\xad\xe9\xbb\x4f\x3c\x3d\x91\xf3\x1e\x69\x0f\xa3\x9a\xa9\xbb\xac\xde\x14\x4f\x15\x4b\xcc\xb0\xce\x2e\x28\x8e\x28\x79\x49\x9f\x68\xe1\xf9\xb0\xae\x4c\xb3\x19\x0e\x4d\xf6\x10\xf7\x8f\xe9\x15\x32\x45\x8e\x99\x07\x6e\xaa\xe6\x0f\xd5\x1b\x94\x46\xa8\x44\x6b\x84\x57\x38\xdb\x3f\xfc\xe4\x6b\xcc\x64\xe0\x27\x5f\x44\x17\x73\x38\x25\xd9\xc3\x0f\x8f\xe0\x0a\xb0\xaf\x8b\x72\x6c\xf0\x44\x61\xea\xce\x50\x3c\x08\x92\xd0\x7f\xbb\xfc\xd1\x2b\x19\xa6\x21\x33\x67\x32\x82\x57\xe8\x8b\xff\x70\x08\x8c\xa0\x41\x71\xfd\x55\xb4\x50\x10\xc9\xac\x4c\x0f\xcb\x68\xc0\xd3\x66\xe4\xe6\x4e\xc4\x06\x7f\x0b\x46\x32\xdd\x05\x51\x45\x5f\x5b\x6d\xcb\x8c\xfc\xb5\x84\x40\xc8\xcc\x34\x51\x54\xd5\xa8\x9e\x5b\x18\x2a\xe1\x39\xbb\x27\x3c\x26\xc7\xc6\xa9\xd4\x74\x0a\x2d\xe2\x52\x73\x63\x49\x12\x06\x87\xb6\x56\xfc\xbd\x6b\xcc\xdf\x23\x47\xf0\x91\xb6\x64\x85\xfc\xfe\x38\xa4\x8c\xac\xb1\x17\x11\x98\xa1\xbe\xd6\xc8\xd7\x2c\x93\xa8\xd5\x7c\xaf\xcd\xbd\xba\x61\x02\xe7\xe9\xc1\xa8\x42\x41\x0f\xe7\x86\x4a\x33\x96\x65\xd3\x60\xd8\x7a\xb1\x2b\x94\xc4\x0d\xc3\xb9\x9d\x2a\x10\x1d\x7f\x13\x5b\xa4\x33\x3f\x3f\xec\x60\x1a\xcc\x63\x89\xbd\x7e\xf6\xe5\xf4\x5f\xf2\x39\x33\x24\xb0\x6a\xe3\xef\x7d\xdf\x85\x5b\x70\x04\x17\xee\x4c\xfe\x19\x4d\xdd\xb3\xfe\x44\x8e\xf0\xd1\x33\xb1\x34\xdc\xc5\xf3\x4b\xa8\x53\x29\xb5\x19\x44\xf0\x24\x0c\x30\x8c\xf1\xcb\x60\xb9\x0b\xd4\xf2\x0c\x64\x07\x0a\x8c\xe9\x5b\x8f\x43\xb3\x7d\xc3\x85\x03\xdd\x13\x9e\xf9\x7b\x69\x70\x5b\x5e\x27\x5a\x65\x5f\xd4\x3d\xe8\xca\xde\x1c\x35\xbe\xdf\x83\x01\xca\x5f\x6c\xbc\x78\xd1\x2b\xa3\x97\x1d\xaf\xe2\xb5\xb2\xf5\x52\x08\x5d\x80\xf9\xb9\x3f\x54\x69\x19\x24\xd5\xd8\x07\x66\x49\x3d\xed\x70\x84\xdc\x10\x74\x7e\xd5\x0a\xd9\x57\xd4\xb4\x07\x35\x55\x52\x99\xfe\x2f\x26\x58\xef\xdd\x44\xa9\x73\x76\x77\x6a\x16\x08\xab\xae\x4f\xbe\x60\x81\x6e\x31\xc2\xac\x40\x24\x25\x04\xb3\x02\x35\x72\x65\x18\xc6\x0f\x35\x1a\x89\x5c\x82\x1e\x48\xe8\x85\xcd\xd4\x90\x60\xc3\xf5\xf7\x36\x44\xd2\x40\x13\xbf\x51\x99\xe6\x74\xc4\xd7\xdf\xd0\x12\x41\x54\xf5\xb0\xf1\xe5\x19\x2c\x41\x58\x6e\x61\x84\x14\xd6\x54\x63\xe7\xc2\x36\xb4\x48\x84\x6e\xff\xf5\xc9\x78\xf4\xd6\xa1\x27\xf5\x0e\x18\xad\x16\x41\xb1\xaf\xea\x14\x00\x27\x4c\xf2\x92\xe5\xb7\x81\x2f\x86\x18\xf5\xd2\xa3\xa4\xf6\x51\x07\x9f\xf8\x07\xff\xc7\x64\x64\x47\x23\x5e\x82\x61\x6d\xf9\x9d\xbf\x01\x6c\xf2\x6d\x22\xba\x2a\x58\x24\xcc\xbd\x5d\xcc\xfe\x72\x72\xee\xf4\xb7\xa2\x4f\xad\x25\xd6\xb5\x9d\x05\xee\xbe\x09\xfd\x27\x2e\x7b\xd9\x3b\xf1\x1e\x92\x0d\x56\x42\xed\x05\xfe\xe9\x1a\x05\x64\xfa\x16\x00\xa5\xcd\x0f\x8e\x2f\x32\x77\x59\xaa\x3d\x61\x36\x73\xc3\x28\x52\xf7\x0c\x46\x31\x2c\xb6\xd3\x67\x52\x57\xee\xe0\xb9\xea\x33\x95\x4f\xdd\xdf\xf5\xc4\xb9\xad\xab\x18\x33\xb1\xa9\x76\x65\x49\xec\x48\xd0\xb1\x9e\x91\x2e\xf5\x8f\xd7\x2c\x72\x5c\x09\xbf\x80\x08\x1a\x37\xaa\x51\x8b\x8f\x60\x47\xd7\x90\xac\xc3\xe0\xc8\x4e\xb4\x42\xa7\x1d\xca\x0c\x68\x5e\x8e\xd7\xc9\xc8\x62\xa4\x4b\xda\x92\xba\x40\x2d\x84\xd1\x39\x63\x0f\x1f\x90\xc1\x15\xa4\x93\x01\xdf\x08\xa7\x22\x71\x48\xca\x6a\x54\xf1\x42\xcd\x1a\xbd\x6b\x72\xf6\xe7\x0a\xd7\x43\x83\x81\x56\x5e\x50\x41\xf7\x4f\x04\x9a\x07\x19\x25\xa8\xc7\xbc\x89\xe5\x56\xf5\x9a\x8e\x4a\xf3\x53\x9d\xbe\xee\x53\x8c\x41\x8a\xa0\x67\xeb\x58\xea\x7a\xdc\x4c\x45\x18\xfa\x3d\xb6\x5a\x7e\x3b\x29\xbc\xdf\x0d\x64\xd6\x94\x86\xc3\xe2\x31\xa0\x06\x9b\x93\x27\x75\xc5\xa9\x69\xe8\x2b\x43\x81\x00\xf2\xc9\xe3\x0f\xa2\x23\x04\x5b\xfd\xe7\x2d\x29\x49\xe6\x10\x59\x50\x38\xbb\xff\xb1\x11\x83\xc9\x3f\x7d\xf4\xf2\xae\x9c\xbf\x3a\xff\xe6\xa1\x7b\x50\x62\x1e\xc0\x68\xfc\x0a\x7d\x52\xc4\xfc\x56\xb9\x7e\x29\xa3\x85\x9c\x7d\x8c\xad\x55\xc1\x15\x1a\x35\xd4\x06\x82\xf3\x7d\xc5\xdd\x06\x80\x3c\xae\xd3\x8a\x72\x57\x83\x0b\xd9\x49\x4b\xab\xfe\x89\xa5\x33\xf1\x01\x79\xac\x52\x33\x07\xb6\x25\x38\xb7\x6f\x49\xa0\x29\x6b\x62\xdc\xc9\x19\x39\xbd\xf3\xaa\x2e\xf1\x19\xf5\x3f\xea\x00\x7b\x10\x80\xdd\x0d\x26\xf0\x20\x10\x1d\xe1\x66\x64\x40\xc0\x28\x53\x41\xe4\xea\xbc\xfc\xdf\x87\x8b\xae\xe5\x3b\x7f\xc3\x8d\xed\xa5\x59\xc6\xf2\x11\xda\xfd\xa2\xb9\x5f\x71\x21\x9d\x67\xa2\x1f\x85\x9b\xc9\xc8\xdd\x68\x74\x21\xb1\x89\xcc\x71\x43\x89\x42\x8f\xc8\x40\x5f\x1b\x88\x37\x2e\xdc\xf8\x86\xc4\xe7\xc3\x47\x2c\x45\x77\xd3\x96\xfd\x2d\x30\xbb\xdc\x2e\xfe\x89\x45\x2a\x09\xf1\x8d\x0c\x02\x9d\x0f\xe4\x16\xee\x65\xd2\xb7\x87\x3c\x08\x76\xbf\x5b\x6f\x18\x15\x90\x4c\x6b\x72\xd1\x14\xb1\x23\xe5\xa5\x66\x7a\x5f\xe4\x93\xa3\xd5\xb3\x20\x20\x36\x92\xc3\xfb\x65\x6b\x56\xa7\x2f\x9e\xad\x89\x50\x1b\x10\x2e\x3b\x45\x1a\xc5\xc1\xec\x80\xdd\x23\x72\x5d\x7d\xfa\x6c\x26\x12\x6f\x5f\x64\xb3\x5e\x5d\x7a\xa6\x0e\xb5\x00\xe9\x32\x57\x76\x5b\xbd\x74\xae\xd6\xa2\x7c\x78\x02\x3b\xbb\x5e\xab\x0d\x22\x0a\x0b\xdb\x4b\xe3\x44\xb7\xba\x21\x6d\xc3\x18\xbd\x2c\x40\x8d\x3b\x67\xec\x0a\x13\xbc\x37\x9d\xf4\xaa\x57\x03\x43\xf7\x7c\x64\x28\xe7\xd6\xed\x17\x2d\xd1\x91\xbc\x00\x49\x59\xf1\x2d\xa6\x80\x09\xba\x90\x3f\x1c\x0a\x2d\x4e\xfc\x49\xeb\xe8\x54\x3c\x0e\xfe\x04\x9e\xa5\x7f\x88\xd4\xca\xec\x1d\xdb\x17\x5c\x23\x89\x19\x8e\x6b\x59\x59\x63\xc2\x1b\x6e\xb4\x0d\xd4\x68\x87\x59\x43\x3a\x1b\x8a\x92\x31\x25\x89\xae\x95\xbb\xf2\xa7\x3f\x0f\x38\x5a\x0a\x79\x69\x56\x2a\x49\x4d\x1c\x18\xe0\x08\x3f\xb6\xff\x63\xed\xdb\x00\x92\x71\x59\x10\xd2\x4f\xb0\xb5\x6a\x9f\x19\x2a\x41\xf0\xce\x13\xa2\x1c\x50\x92\xbe\x9b\x52\xb0\x45\x36\x0e\x1c\xab\x23\x66\xcf\x66\xeb\xf3\x38\xd1\xac\x45\xf2\xeb\xe6\xb3\xc7\x82\xd3\x00\x9c\xd4\x7f\x3a\x9b\x09\x97\x8e\x57\x9a\xec\xba\x11\x09\x7f\xc6\xf8\x95\x95\x1e\x9e\xe3\xf1\x03\xdc\x71\xd9\x56\x12\x38\xe5\x48\x8b\x2a\xe1\x8a\xa0\xb5\x04\x67\xdd\x1f\x95\xb6\xf8\x01\xbe\x44\x4f\xf8\x23\x74\xce\xab\xd5\x0c\x0f\xfc\xb4\xd3\xea\xfe\xbd\xfb\xa7\xcb\xd3\x0f\x92\x70\x38\x17\x47\x61\x35\xe5\xea\x60\x52\xa4\xde\xac\xc3\xde\x82\xae\xf4\xd1\xba\x08\xb4\x4d\xcc\x50\x83\x91\xd4\x41\xe4\x74\xcc\x48\x1a\xe8\x17\xb8\x01\x97\xaa\x4f\x4c\x8f\x7a\xeb\xb6\x1e\x39\x76\xf2\x16\xf4\x59\x6d\xdc\xbc\xf4\xda\x9c\x83\x5c\x61\x95\xe2\x42\xd3\x82\x0d\xa5\xc4\x9c\x10\x62\x4c\xc6\x24\x60\x68\x27\x1c\x66\x22\xa3\xc8\x9c\x01\x2f\xd4\x45\x74\x4e\x88\x0d\xff\xdf\xba\xe1\xe6\x2c\x68\x32\xc3\x41\x10\x04\x00\x8f\x72\x16\x38\x11\x00\x2e\x1c\x94\x15\x6e\x30\x2c\x5a\x13\x4a\x4e\x3c\x44\x94\xa6\x20\x23\x31\x3a\x5c\x09\x52\xa5\xd6\x65\xb3\x8c\x4c\xaa\x35\x78\xa1\x0a\x76\x67\x03\x6e\x7a\x35\xcc\xfb\x36\xc5\x2b\xfd\x5f\x62\x4b\x14\x17\xda\x15\x73\xf2\x66\xd8\xf0\x54\xf7\xa3\x18\x53\xf6\xd2\x3d\x02\x41\x00\xea\x70\xd5\xf5\x27\x05\xd4\xa6\xaf\x3a\x80\xc7\x27\x96\x45\x79\xc6\x69\x59\xb3\xf1\xb6\x48\x5a\x27\x2a\x4b\xe5\xe6\x7d\xb0\xd2\xe7\xe3\x50\x7d\x78\xf1\xaa\x59\x6a\x1e\xe4\xeb\xb2\xf2\x49\x11\xda\x97\xf5\xdb\x5e\x5c\x64\xbf\x19\x3d\x10\x12\x6d\x75\x7a\xfa\x68\xb2\x3b\x37\x30\x77\x28\xb0\x12\x5b\xa2\xfc\x8d\xe0\x46\x03\x1a\x8e\x7b\x92\xe7\xd6\x3e\xd8\x25\x03\x37\xa2\xbc\x30\xc5\xcb\x60\x7e\x9c\x1b\x7a\xe3\xa2\x8d\x00\x58\xad\xcb\x9a\xf6\x8e\xc6\x33\x98\xfa\xab\xc5\x38\x10\x5d\x7f\x98\x07\xd0\x06\x72\xc3\xf3\x61\xe7\xc6\x8a\xa9\x5b\xf0\xfd\xf8\xc6\x10\x24\x31\x2c\x37\x30\x90\x14\x76\x44\x89\xe5\x45\xaf\xa4\x67\x4f\x95\x82\x49\x35\x12\xba\x1b\x10\xfc\x70\x0a\xa6\x23\xb0\x55\x30\x8d\x23\x22\x93\x5e\x50\xde\x9c\x1d\x46\xac\x3b\xdd\x95\x9d\x0c\xfb\xae\x1d\xa9\x2c\xe1\x37\x57\x0a\x2e\xfe\xe0\x95\xce\x0e\xf0\xb4\xa2\x42\xcd\x44\x4f\x5c\x3f\xa9\xc1\xbb\x8b\x6b\x95\x78\x3f\x34\xee\xed\xab\xc4\xfb\xe3\xa1\x6f\x81\x00\x8b\xae\x2c\xcd\x15\x68\x79\x05\x92\xdb\x14\x87\x24\x4c\xf4\xd1\xe3\x9f\x97\xe0\x19\xd1\x7c\x29\xa5\xc9\x15\x23\x24\xd3\x79\x55\xeb\x9a\x64\x18\xb0\xeb\xcb\x6d\x7f\xa6\x1e\x3f\xb3\x96\xe8\x35\xe3\x0a\xa7\x15\xd7\x47\x12\xa4\x9b\x20\x0c\x46\xc5\xb9\x58\xfa\x5f\x9d\x34\x59\x5d\x12\x3f\x50\x8f\x0e\x65\x06\xd1\x56\xa7\x37\xd0\xf8\xfc\x4a\xbc\x73\xb9\xfa\xa7\xc2\x47\x16\x36\x73\x2a\x08\x1c\xf2\xe5\xa7\x26\x7a\xcd\x8d\xcf\xf9\x64\x8f\x02\xd3\x11\xf4\x49\xcb\xbb\xce\xa0\x54\x25\x28\x19\x4d\xdc\x3f\x5e\x0d\x87\x25\xf3\x2b\x93\xa5\xc4\x0b\xde\x07\xa5\x42\xc1\xe8\x42\x71\x15\x3d\xa2\x6c\xa0\xca\xca\x9e\x65\xc4\xeb\x1a\x0b\xec\x4d\x3a\xad\x80\x1f\x78\x26\xc1\x97\x5f\x20\x5e\x2d\x10\x50\xfc\xb6\xd3\x9f\x03\x6b\xfa\x8e\x7a\x10\x6e\x3e\xa6\x3b\x35\x18\xbf\xe3\xa0\x16\xfb\x8f\x17\x3b\x13\xfc\x55\x7d\x72\x66\x26\x32\x71\x8f\xc1\x69\x03\xe1\xa0\x2d\x1f\x9b\xfc\x95\x21\xf4\xf5\xcb\x13\x29\x18\x70\xf7\xcf\x9b\x0f\x5b\x0a\x1b\xf7\xa6\xbb\x82\xc3\x82\x97\xff\xe7\x19\x13\xed\x33\x52\xa6\xb9\x5d\xa9\xb3\x34\xb4\x8d\x47\x7d\x31\xd9\xb4\xfa\x60\x80\xac\x16\x04\xca\x49\x81\xdb\x19\xc0\xca\xda\x1c\xaa\x00\x91\xe3\x3a\x7c\xcc\xbb\x36\x1f\x97\x35\xad\x15\x7d\xba\xfc\x82\xb0\xe6\xd3\x74\xba\x0b\x0b\xe0\x21\x09\x83\x1d\xcd\x40\xf1\x96\xd0\x0f\xf9\xd8\x31\xa9\x84\xe6\x26\x0f\x53\xfe\xf6\x7e\xfd\xd3\xaf\x84\xd3\xbf\x05\xfc\x1e\x89\x57\x85\xec\x27\xef\xa3\xa0\x99\x73\xc5\xf0\x09\x8a\x03\xe0\xf5\xe2\x14\x9c\x1e\xa1\x38\xb8\x7e\xfb\xb3\xbc\x94\xb8\x77\x99\x67\x2f\x6b\xdc\x02\x82\x70\x7b\xe0\x14\x23\xc4\xb9\x32\x08\xa4\x8e\x8a\xae\x16\x9c\x89\xc4\xa7\xa8\xb8\x6d\xf8\xd1\x3d\x07\x54\x1b\x31\xdb\x36\x5b\x4f\xf8\x9a\x8e\x8b\x25\x7b\xd8\x9d\xe0\x95\x79\x7c\x18\x6c\x2b\xef\xaf\xbd\xf7\x4a\x0b\x89\xc5\xc1\x26\x15\x87\x88\xc4\xb2\x89\x3a\x33\x10\x82\x2a\xaa\x03\x49\xcf\x38\x1a\xf0\x32\x02\x49\xf9\xa3\x41\x19\x88\xff\xf8\xe9\xb4\x6f\x98\xa5\xc0\x4a\xe9\x08\x04\x70\x0c\xe9\x4b\x6c\x78\x31\xab\x03\x61\x80\xe4\x80\x58\x98\xb1\x8e\x67\x2f\x83\x21\x09\x51\xf2\x68\xcc\x2f\x2f\x74\x7e\x2e\xbd\x3e\x11\x9e\xa2\x52\x7d\x23\x3d\xc2\x17\x40\x38\x30\x02\x82\x7e\x51\xdf\xe4\x08\xd7\xb9\xc4\x4a\x91\x22\xaa\x01\x0e\x42\xa9\x38\xc6\x2f\x6c\x81\x38\x48\x7e\x82\x29\xeb\xd3\x4f\x97\x62\xf4\xcd\x19\xf7\xde\x35\x71\x45\x32\x61\xbc\x6f\xf3\x8a\xb6\xcd\x18\x51\xd9\x62\xe7\x3e\x44\x8e\x2d\xf3\x65\xf3\xbc\x54\xb8\x90\xf8\xe6\x32\xbb\x1a\xcd\xaf\x57\xa7\x34\xe9\xeb\x77\x53\x46\xc9\x83\x14\xbd\x9b\x06\x64\x4b\x2c\x5e\x96\x16\x2d\x79\xcd\x06\xcb\x0b\x40\xb4\x06\x74\x91\xd9\xc5\x46\xa7\xdc\x66\x97\x9b\x27\x11\x48\x02\x63\x18\xcd\x75\x27\xe5\xa6\xe2\xe2\xc3\xfe\x3c\x94\xd8\xf8\xc1\xcf\x59\x2a\xd5\xa5\x2e\x94\x5c\xf0\x97\x7e\x92\x9d\x08\x8b\xea\x78\xf1\x8e\x34\x8e\x1d\x89\x7f\xe6\xaf\xe2\xd3\x22\xa5\x90\x74\x86\x57\xb4\xa7\xda\x3e\x15\xa2\x3f\xd5\x2f\xad\xe1\xaa\x4e\x2b\x0a\x6d\x92\x47\x9d\xd0\x0f\xc4\x27\xb6\x68\xe0\x7b\x84\x6e\x7e\xc7\xbc\xf7\x26\x09\xa6\x8b\x83\x92\x18\x28\xc3\x58\x02\x33\xe4\x7b\xb2\x3c\x98\xce\x8a\x1d\x85\xb2\x24\xe9\xc7\xbf\xb8\x8a\x6e\xca\x94\xcb\xfa\xc6\x8b\xd3\x9d\x27\x29\x8f\x07\xa8\xee\x0b\x84\xac\xfe\xf2\x4d\x7b\x94\xbf\x3e\xa9\x78\x8d\xda\xef\x31\x4e\x9f\x36\x9f\x90\xc9\x3b\xfb\x2b\x41\x9f\x8d\xea\x94\x6f\x26\xf4\x8e\x73\xe2\x58\xad\xcc\x29\xe1\x92\xfe\x9d\xf2\xd6\xc7\x1a\xed\xa5\xae\x0b\xd5\x70\xbd\xdb\xdb\x99\x70\xfb\x43\x9a\x36\x71\x5f\xd2\x7c\xc4\x4f\xf2\x52\x07\x97\x28\xcc\x69\x51\xcb\x61\xcf\xd4\x03\x68\x15\x8c\x2a\xaf\x28\x43\x46\xdf\x9a\x95\xea\xc7\x0e\x13\xbf\x40\xbb\x26\x3d\xbf\xe2\xf0\xcc\x29\x83\x10\xb1\xbc\xaa\x9f\x3c\x31\xf5\x08\x42\xa1\xfc\x9a\x0c\x77\x47\x21\x7c\x64\x65\x3b\xf8\x0c\x80\xde\x71\x6e\xc9\x13\x57\x5a\x37\xc3\x30\x1c\xc3\xb8\xbe\xc3\x06\xdc\xbc\xa9\x90\xc1\x10\xdd\x43\x53\x87\x97\x41\xc0\xc5\x01\x95\xe5\xa2\xb3\xc2\xab\xac\x67\x8e\xbc\xaf\x31\x15\xd0\x86\x60\x75\x42\x48\x65\xce\xe6\x73\xc3\x45\x14\x30\x6e\x61\xf1\x0d\xf5\xd2\xfa\x36\xbc\xc4\x57\x65\x67\x24\xd1\xce\xc5\x37\x37\x46\xe6\x67\x7d\x0f\xbe\xa1\x57\xd5\x05\x83\x21\xaa\x26\xbc\xb3\xd7\xd6\xd6\x40\x95\x09\x8d\x5d\x71\x5d\xd3\x37\x70\x2d\x11\xb9\x74\xef\x3c\xa1\xf6\x4c\xe0\x85\xf0\x95\x1e\xe7\xa7\x4c\x95\x99\x9a\xa5\x9e\xc2\x79\x53\x86\x60\xb1\x29\xa3\x4c\x89\x6b\xc1\x7a\xeb\xfc\xbc\xba\x5b\x6a\xad\x44\x62\x8c\x00\x59\x9e\x44\xc1\xd6\x77\x02\x75\x47\x3b\xb4\x78\xf5\x23\x5b\x01\x43\x58\x24\x31\x9d\xb1\x3b\x50\xe3\xe2\x6d\xc8\xa8\xeb\xf0\x55\x4b\xb3\xdd\x56\xf2\x5c\x5a\x24\x5b\xf3\x5b\x12\x1e\x28\xc0\xbd\xc8\xcd\x81\x6b\xd4\x0a\xb5\xcc\x05\x37\x41\x8d\x2f\xff\x9b\x67\x5d\x33\x0d\xc2\xb3\x7b\x2d\x4e\x65\x30\x03\x3e\x6a\xe9\xa0\xd9\x01\x02\x00\x56\xce\xed\x2b\x34\x8e\xef\x82\xa8\xd1\xc5\x08\x5f\x13\xe6\xaa\xb5\x8c\xc0\x94\x83\x92\x86\x72\xf7\xdc\xb7\x6e\x1c\xb9\xce\x2b\x3d\x11\x0e\x4b\xf5\xcb\xdb\xf9\xd5\x3b\x6b\xf8\xf0\xbf\x41\xbb\x02\x3e\x69\x63\xb1\x01\x8f\xe3\x1d\x1d\xdd\xb5\x36\xd5\x15\x68\x0b\x44\xc5\x4e\x2d\xc9\x79\x1b\x08\xfc\x3d\x2e\x8f\x00\x09\x1a\x20\x6e\x3d\x2d\xdd\xce\x41\x0c\xaa\x52\xe2\x23\xf4\x15\x44\xa3\x33\x10\x56\x83\x0d\x06\xfc\x2d\x20\x14\x85\xa1\xed\xac\x23\x93\x09\xde\x8c\x68\x43\xfa\x6f\x19\x0c\x0a\x52\xde\x7c\x52\x91\xec\xb8\xef\x84\xe4\x48\xbe\xf2\xc3\x39\x10\x15\x37\x3f\xa7\xf6\x29\xa8\xd1\x9b\x7e\x99\x69\x76\x75\x6d\xeb\xb8\xe9\xf5\xaf\xf5\xfa\x99\x84\xef\x7c\x73\xff\x65\x37\xf7\xf7\x4d\xd2\xb6\xa6\xbd\xf2\xc3\xd7\xc4\x87\xe9\x4a\x46\x4f\x41\x8f\xff\x38\x50\xd5\xd6\xf7\x62\x99\xc1\xf0\x7c\x9c\x02\x4b\x8a\x48\x01\xcd\x99\x91\x48\xc3\xdf\x25\xf4\xb7\x22\xeb\x73\xbe\x0b\xe0\xc2\x8f\xbe\x85\xaa\x88\x13\x7f\x72\x8b\x62\x7b\xbf\x13\x50\x83\x09\xc4\x27\xa2\xcf\xa8\xaa\xf4\x1f\xd5\xcc\x49\xc1\xf5\xb9\xaf\x55\x7d\x84\xca\x1d\xaa\x7f\xfa\xf7\xb3\xae\xd6\x94\xbf\xa3\xa4\x69\x6a\x10\xf6\x69\xf1\x89\x4a\x99\x3a\x66\xec\x6e\x93\xb9\xb8\x7e\x56\x8d\x8e\x7e\x5e\x0c\x85\x31\x91\x2c\x4e\xd0\xfc\x72\x24\x38\xa8\x0b\xa4\x4c\x99\x47\x8e\x60\xb3\x7e\xa5\x35\x73\xdf\x52\x1e\x27\x92\xc5\xb9\x2e\xc7\xab\x44\x43\x94\xa7\xf4\x58\xae\x47\xbf\xfe\xb1\x0b\x1a\x04\xc8\x02\xd4\x86\xef\x01\x37\x7c\x0f\x47\xdf\xe2\x7e\x6d\xbc\xab\x18\x95\x19\xe3\x27\x3c\xba\xab\x7a\x18\x64\x42\x85\x5c\x14\x17\xaa\x72\xf5\xfc\xe3\x11\x5e\x64\xaf\x8a\x5f\xab\x49\x29\xd1\x83\x6c\xfc\xca\xec\x6a\x3b\x5e\xad\xf0\x2b\xd4\x6a\xe2\xc1\x3a\x97\x4f\xb2\x37\x25\xbe\x88\x02\x38\x0c\x92\x2d\x3e\x84\xb9\xcf\x1d\x21\x2a\x4e\x14\xed\x7d\xc1\x95\xbc\x47\xb9\x2a\x68\xc5\x69\x00\x82\x33\x63\x3d\xd7\x75\xdf\x56\x96\x9b\x57\x75\xdf\xd4\x77\xdf\x04\xec\x52\x55\x6e\x27\x50\x85\x96\xe5\x67\x4f\x5d\x30\xc9\x6a\xc2\x74\x7d\xb7\x73\x8a\x36\x5e\xd5\xb6\x67\x83\xc0\x65\x6c\x48\x1e\x2d\x69\xdc\xfc\xbf\xf4\x3f\x2c\x69\xd8\xe6\xc1\x80\x97\x3e\x8c\x33\x71\x1e\xd2\x23\x6c\x4e\x66\x8a\x5b\xbc\x8d\x78\xc6\x24\x75\x9e\x0d\xec\x58\x45\xb1\xb6\x1f\x88\x41\xf0\xaf\x96\x9a\xf9\xb1\x43\x78\x6c\xc4\x5a\x4c\x1b\x4e\x7a\x09\x45\xc6\x67\x04\xc9\x83\x9d\x1f\x9a\x0b\xd1\x95\xd9\xfd\xbb\xd6\xfa\xc5\x26\x55\x69\xc8\x72\xf5\xe9\xa0\x82\x3d\x5e\x86\x05\x57\xd7\x3f\xce\x5e\xc7\x52\x0b\xe1\x73\x9b\x89\xdb\xa5\x54\xb3\xe8\x71\xb5\x04\x2b\x98\xc8\xaf\x79\x6d\xe7\x75\x9d\x57\x81\x69\xfa\xd2\xa2\xa7\x89\x96\x80\x18\xfe\xbb\xbd\x74\xdf\xd2\x11\x6d\x37\x0e\x49\x84\xbb\xcc\x8e\x90\x48\xfe\x80\xb8\x8b\xfb\xb4\xfe\x68\x4d\x1d\xec\x24\xf1\x5d\x8b\xf3\xa5\xfd\x8d\x45\x3c\x09\x70\x0c\x17\x19\x3e\x61\xa3\xd1\x30\x4b\xef\x77\x6b\x41\x9b\x1d\xeb\xe2\x91\x13\x0c\xc9\xac\xfe\x77\xbc\xf8\xa2\xba\xc2\xd1\x42\xdb\xbf\x1e\x59\x18\x50\x99\xb2\xb5\x16\x4a\x79\x46\x76\x49\xf2\x68\x02\xe5\xa3\x46\x2b\x19\xa0\xb7\xdf\x41\x13\x97\x47\x97\x60\x39\x4f\xcc\xfa\xe7\x74\xf1\xf8\xf2\xd7\x43\x2b\xae\xd1\x0b\xa6\x62\x18\x13\xd3\x72\x5c\x55\x8f\xe4\xaa\x42\x57\xba\x7a\xf1\x1c\x83\x0d\x4f\x48\x0d\xe9\x08\xd1\xea\x03\x24\x74\xde\x7b\x61\x1f\x0d\x82\xce\x79\x6f\x79\x74\xaf\x27\xc2\xd7\x9e\x62\x84\x3e\x2e\x24\x4b\x02\x3f\x10\x1c\x2d\x19\xe8\x7c\x52\x5f\xa9\xf8\xbc\x58\x9c\xad\x46\xb8\xc0\x2e\xa1\x01\x94\x9c\x3e\xde\x1c\x16\xb7\x03\x1f\x68\x27\x27\x13\x8d\x66\x5d\xe6\xed\x5b\xc7\x8b\xb3\xe3\xe9\x1c\x7e\xc6\xe4\xcc\x78\xe0\xa6\x4a\x4b\xab\x2b\xe9\xd1\xb7\x9b\x44\xc5\x8b\x8a\x44\x38\x8c\x9c\xbe\x0d\x28\x6b\x8d\xaa\xec\xb7\x6a\x6d\xfb\xeb\x7e\x88\x51\x3e\xda\x43\xd2\x2f\x39\x80\xd1\x21\x7d\xdf\x47\x07\x90\xda\x40\xfa\x97\xff\x8b\x24\x9d\x2c\xaf\xd1\x9c\xba\x65\x4d\xc7\xf5\x01\x1b\xc3\x0b\xe1\x3d\x13\x1f\xdf\xa7\xcb\x64\xf5\x07\xc5\x7b\xc1\x0b\x95\xbb\x87\x4e\x8c\x1a\x88\x3e\x20\xcf\x24\xe7\xcf\x6d\x5c\x19\x62\xd4\x58\x51\x3e\x75\x1c\x8a\x2c\x02\xeb\xf4\x69\x81\x0c\xe6\x9b\x72\x22\x2b\xf3\xdf\x38\xe3\x1f\x69\xda\x58\x6e\xcc\x28\xa5\x2e\x43\x7e\x45\x38\x66\x59\x32\x48\xc2\x2d\xba\x1e\x60\xfc\x1a\xbc\x9b\xeb\xe1\x35\x5e\xfc\xf0\x11\x8e\x6e\x88\x8e\x1f\x08\x8f\xee\xf9\x9f\xf9\xe1\x2d\x92\x16\x40\x17\xef\xfe\xe8\x29\x67\x1d\xc2\xf0\x61\x78\x39\x6c\x77\xaf\x50\x59\xe8\x5b\x83\x44\x7f\x46\x19\xa9\x17\x25\xcb\x04\x6c\x4f\x2f\xa9\xdb\x3b\x3c\x4e\x88\x16\x45\x58\x6d\x34\x54\xd1\x92\x9e\xcd\x73\xda\x6d\x6d\x1d\x5a\x08\x7b\x39\xac\x7b\x3e\x6a\x20\x5d\x0b\xe5\x91\xf3\xfa\x83\xe1\xf5\x58\xb9\x69\x2d\xb6\x12\x87\x95\x8d\x21\x3e\x25\x21\x5b\x63\xa6\xb6\x22\x6b\x71\x4d\x44\x22\x39\xe7\xad\x01\x5a\xb8\xc1\xd0\xef\x8a\xa2\x8d\xa4\x5b\x34\xb0\x1f\x3c\x27\xd7\xf8\x18\xc9\x03\x21\xb0\xb8\x02\xc0\xaa\x9a\x8b\xf4\x8c\x26\x1d\xb7\xc4\x27\x54\x08\xae\x99\x4b\xb9\x25\xce\xed\xc4\x42\xbc\xcf\xf4\xd7\x9f\x65\x94\x00\x2c\xb1\x7d\xf5\xf0\x86\x0c\x9e\x02\xe5\x27\x99\x7c\x6b\x83\x57\x04\x25\x8b\x97\x55\xdd\xa4\x42\x43\xb4\x5c\x08\xb7\x5f\xd2\x38\x14\x9b\x34\x1c\xf0\x50\x7b\x4a\x75\x4d\xfb\x85\xd4\x5e\x91\xbd\xd3\xaf\x8a\x13\xca\x9f\xe2\xf8\xcf\xb2\x18\x5f\xfe\x19\xe1\x56\x9c\x64\xd9\xf5\x8a\x71\x86\x9f\x14\x08\x47\x88\x96\x27\x2f\x60\xaa\xd8\x22\x38\xcd\xd5\x76\x6c\x94\x57\x74\xa1\x84\x11\x7e\x5e\xb2\x23\xb4\x02\x50\x40\x45\x9f\x7c\x1e\x89\x50\x20\x3a\xbe\x0f\x74\x53\x87\xd3\xee\x8e\x20\xd9\x15\xd6\xab\x5a\x63\x31\x9d\xe9\x73\x6e\x4a\x7e\x34\xd9\x9d\xe5\x41\xf6\x9d\x5b\x6f\xe8\xe8\xee\x90\x61\x95\x4c\xa0\xe4\x81\x4a\x5d\x73\x5c\xc9\x48\x4a\x6f\x7d\xee\xab\x5a\xff\x2c\xf9\x3a\xef\xd9\xd7\x44\x13\x84\x95\xe1\x08\xe1\x23\x06\x66\x28\x27\x69\x54\x1a\x71\x60\x3a\x10\x2d\xa3\x51\x63\xfc\xf4\xee\x66\xd3\xe7\xda\x18\x17\x3b\xff\x34\x1e\xa2\x35\x77\x1e\x82\x9e\xab\x77\x6d\x83\x11\xba\xec\xfa\xdd\xe2\xfa\xc5\x75\xe3\x29\x96\x9e\x76\x1f\xab\x0a\x9d\x35\xaa\xfa\xe3\xfb\xcc\x39\xd0\x15\xdd\x49\x21\xc3\x01\xd9\x3b\xe0\x26\xa0\x1f\x74\x43\xbe\xd9\xe0\x8d\x11\x8d\x6d\xcb\xf4\x60\xc3\x8a\x78\x12\x8b\xeb\x26\x62\x0d\xd9\x23\xe1\x13\xf1\x14\x88\xa0\xda\x98\x96\x55\xd3\x55\xf7\x4d\x79\x37\x9a\xa0\x41\xbd\x34\xf4\xcf\x86\x12\xab\x0c\xa0\xc9\xa0\x40\xe6\xca\x92\x1d\x25\x68\x73\xf2\xb6\x68\x47\x19\x2c\x01\xd5\x16\x5b\xd5\x69\xc3\x30\x5d\x36\xa4\xaf\xec\xa6\x8e\x08\x60\xfb\x3e\x2c\x14\x04\xdb\x8c\x36\xee\x15\x24\x1d\xe4\x4b\x0e\x7e\x9a\xef\xda\x4f\xa6\x9a\x8b\x5e\x04\x4d\xd6\x21\x04\xc1\x0b\xfe\x13\xe4\xc3\x2e\xca\x25\xca\x03\xd1\xc9\x8a\x13\xcd\x70\x1c\x03\x57\x0d\x7f\xe9\x3f\xa0\xef\x4b\x2e\x31\xbd\x9e\xc8\x8e\x91\x64\xfb\x4b\x20\xf2\x4c\x7c\x82\x17\xaf\xaa\xe6\x4e\x46\x37\xb0\xe6\x5b\xdc\x7f\x0d\xf3\x9b\xac\x0b\xa1\x2a\xeb\x4b\x91\xa7\xfe\x29\x17\xc9\xc3\x3a\x90\x52\x0d\xd5\x9b\xbe\x96\x85\xe8\x91\x1d\x08\x69\xa2\xc1\x9d\x1e\xe7\x29\x7b\xc8\x14\x00\xde\x84\x93\x80\xde\x62\xa7\xd1\xc5\x30\x2e\x24\x66\xdc\x0b\xfc\xf7\x66\x06\x01\x5c\x73\x66\x8c\xf8\x0e\x2f\x2f\x61\x26\xc4\x1f\x9f\xca\xfc\x83\x11\xc5\x65\xcf\xbc\x5b\x39\x3e\xc4\xb2\x76\x2e\x6b\x08\x7a\xb9\xb8\x41\xc7\x46\x53\x3b\xc2\x63\x7e\xf9\x31\xbc\x67\x04\x85\xc8\x71\x65\x1b\x4c\x90\xce\x50\x58\x1d\x96\x4d\xc7\x84\x6e\xf0\xce\x7e\x75\xf6\xa3\xd9\x8f\xa8\x48\xa0\xeb\x11\x0d\x26\x3b\x0a\x2f\x4e\xaa\x41\x50\x3c\xe4\xea\x59\x95\xe9\x07\xcc\xaf\xe8\x9a\xde\x52\x9f\xf4\xed\x16\xc7\x64\x4d\xc9\x63\x44\x03\xf8\x90\x71\x04\x76\x85\x05\x95\x1f\x10\x8c\xe8\x12\xb4\xe3\x3c\x5c\x87\xfc\x4b\xf5\x5f\x3e\xae\xeb\xb9\xa1\x76\x89\x57\x15\x49\x8d\x39\x36\x99\xd1\x75\x4e\xa6\xd0\x39\x2b\xf7\xa6\x9c\x8f\x1a\x68\x35\x50\x4d\xf4\x97\x35\x7e\x4d\xf1\xb5\x14\x38\x01\x18\x95\x11\x40\xb5\x7f\xa9\x98\xd9\x1f\xa1\xe1\x80\x55\x47\xce\x9e\x49\x93\xb7\xc0\xf9\xb9\xc5\x77\x41\x96\x53\x3b\x76\x31\xab\x2a\xb8\x15\x6e\x30\x2d\x3b\x7e\x22\x38\x1d\x4c\x80\x55\x8b\x66\x4c\xee\x9a\x93\xb7\x1c\x28\xd9\xf3\x16\x30\x57\x0a\x7d\x0d\x00\x0a\x80\x1c\x19\x1c\x08\x99\x78\x38\x45\x14\x27\xea\x86\x70\xc8\x27\xc1\x87\xd3\xc0\xe5\xdd\xe0\x6c\xfe\x9a\x1b\xe8\x66\xc2\x5a\x74\x45\xb3\x67\xa4\xb2\x8e\xf2\x8a\xe2\x38\x66\x1b\xca\x50\x3c\xe1\xcb\xf7\x28\xc8\x3c\xe1\xd6\xed\x5d\xe0\xda\x79\x55\xb1\x37\xd5\xe5\x0f\x1e\x79\xcb\xa7\x05\x8e\xe0\x97\x0b\xbd\x10\xd2\x42\x01\x4d\xec\x0e\x73\xc6\x8c\x11\x74\x2e\x01\xca\x9a\x27\x3d\xc2\x2d\xd9\x91\x40\xf9\x43\xd3\x30\x81\x15\xd7\xbf\x7e\x88\xfa\x47\x58\xee\xd4\x78\xbe\x92\x47\xec\x73\xe0\xd8\x04\xfa\x40\x7e\x46\xc9\x9f\x86\x66\xc3\xa5\x13\xeb\xba\xce\x43\xfb\x24\xde\x50\x97\xd2\x71\xac\x48\x96\x1d\x03\xf0\xa5\xac\xad\x73\xe6\x7b\xf7\x16\x47\x9f\x58\x35\x1b\x54\x75\xf1\x98\x3d\x1c\x59\x09\x5e\xa6\xec\xa5\xc8\xd1\xd9\xd4\x01\xa0\xd9\x51\x87\xa5\xe5\x97\xb2\x0e\xc8\x4f\xa9\xc3\xd9\x21\xb3\x50\x21\x7d\xd9\x35\xa0\xda\xd0\x22\x0b\x11\xb0\xaa\xa2\xa8\x91\x06\x53\x40\x8b\x2d\x00\x70\x79\xca\x4b\xb6\xd1\x3e\xc1\x1a\x2e\x33\x98\xd8\xc4\xe1\x09\x56\xc7\x96\x5d\x8f\x3e\x2a\xd4\xa4\x5b\x0d\x61\x5e\x1b\x49\xaf\xe4\x82\x4c\xde\x72\xd6\x2f\xb8\xbe\xdf\x42\x81\x3b\x2a\xc8\x55\x6e\x0b\x87\x96\xc2\x1a\x16\x4a\x31\x0d\xd9\xdb\x9b\x4f\xca\x4e\x64\xd6\x6c\x0d\x93\x98\xb2\xd2\x01\x90\x05\x8c\xc0\xb1\x07\xca\x63\x83\x3b\xcd\x7b\x75\x18\x4a\x53\xb4\xe6\xde\x3b\x69\x7e\x07\x59\x7e\x47\x26\xc1\x47\xea\x29\x2d\x20\x04\x67\x36\x92\xdc\xf8\x55\x0f\x55\xc9\xcf\x06\x93\xf2\x93\x87\xca\x13\xfe\x88\x49\x09\x8d\x2e\x47\x00\x06\x3c\x12\x8a\x5a\xf8\x31\xd7\x65\xfe\x81\x88\x52\x61\x80\x1f\x14\x8e\xcb\x9a\x1d\xc8\x56\x6c\x68\x9a\x18\xd2\x9d\xcb\xed\x95\x6b\x9a\x3b\xaf\xe5\x45\xee\x48\x7c\x4d\x1f\x61\x76\xf3\x01\x7a\xa9\x73\xcc\x22\x5c\x45\x4a\x7e\x86\xd4\x54\x53\x86\x91\x6b\x38\x05\xfe\x59\x33\xfe\x9b\x73\x0e\x11\xf2\x71\x42\x01\xec\x33\xe6\x10\x07\xba\xe6\x07\x5e\x20\xa8\x98\x92\x42\x65\x43\x3e\x23\x7a\x97\x22\xc7\x96\xdf\x87\xf6\x5f\x05\xa9\xfe\xfd\xf9\xc7\x34\x82\xce\xd4\xbc\x77\x9e\xa1\x35\x0f\xa2\x2d\x17\xea\x2c\x57\x87\x8d\x32\x38\xa0\xe3\xb0\xa6\x0f\x7b\xb2\x69\xef\xfe\x64\x7a\xcc\x8c\x91\x8f\xec\xcc\xb1\xc4\x67\x78\x9f\xb4\x30\xff\xbc\xc9\x73\x9a\xec\x63\x96\x50\x25\x4f\x11\x48\x7d\xb3\x32\x11\x27\x9c\x1d\x78\x7c\x46\xad\xf6\xe9\x7e\x4e\x40\x86\xe7\x5b\x32\x8f\xf5\xbc\x70\xd0\xc0\x97\x5d\x00\x68\xcd\x50\x69\x42\xe6\xfa\xaf\x39\x64\xa6\xec\xf0\x8e\xc0\x63\x8a\x18\x5d\x8a\x0c\x03\x92\x39\x8b\x0f\x2b\x3c\xc3\xfb\xb0\x91\xf4\x7f\x3c\xa6\xb8\x95\xfc\x50\x21\xd8\xa2\xaf\x4b\x95\xbe\xf1\xc0\x3b\x7a\xa5\xc6\xf8\xe2\x59\x5c\x20\x24\xb0\x07\x48\xa4\x02\x7c\x2b\xdf\xb4\x1c\xc1\x1f\xfd\x92\x6a\x8b\x74\x35\x0e\x10\x1e\x30\x5c\xda\x97\x67\x25\x57\xcf\xbb\xc2\x95\x0b\x59\x2e\x06\x0d\x50\x60\x84\xda\x5f\xe4\x3c\x44\x69\xc9\x54\x45\x00\xf9\x71\xcb\x70\x58\x8f\x30\x41\x97\x04\xd2\x81\x5e\x62\xd0\xb9\x89\x51\x95\x74\xc1\x9c\x9f\xec\x1b\x07\xe9\x7c\x8e\x1a\x9f\x0d\x19\x52\xe7\x3e\x3a\xcf\xd4\xda\xcf\x02\xbf\x29\x41\x9c\x3e\xf9\x23\x14\x29\x1c\x97\xb4\x04\xcb\x2b\x3a\xa3\x49\x7c\x6c\x5f\xe0\x50\x81\xf7\x1f\x83\x7f\x30\x40\xe1\x90\xa3\x4b\x80\xd7\x0a\x6e\xd3\x7b\xa8\x9b\xdc\xe0\x82\x3d\x3e\x5c\x56\x71\x2f\x46\xf4\xbf\x79\xb4\x85\xc8\xbe\x85\xf0\xee\x09\xb0\xd1\x16\xc5\x15\x0d\x78\x99\xf6\x8b\x1a\x8f\x4a\x1b\xf7\x4a\x6b\xf7\x0a\x57\x0e\x73\xca\xfb\x45\xfa\xa7\x29\x93\xc7\x3e\xbf\x61\x5e\x1a\x4a\x57\x4c\x59\x7c\x69\x02\x2f\x5a\x46\xdf\xe8\x12\x8d\x0a\x3a\x4c\x8d\xc4\x2b\x63\xa7\x46\xb7\x7e\xb3\x4c\xd0\x07\x55\x77\x5e\x8d\x2e\xfe\x77\x00\x5a\x66\xb8\x73\xef\xe3\xbc\xfc\xe7\xbb\x1d\xa8\x90\x6d\xd8\xf0\xbc\x7f\x5b\x35\xe0\x22\x15\xed\xd5\x6a\xc0\xaa\x1c\xa6\xe0\x5f\x8b\xaa\xbf\x4f\x11\xb5\x54\xf4\xa7\x52\x2d\x76\x2a\x6d\xef\xd7\xbd\xb0\x44\x3d\x5e\x66\x23\x2e\x79\x53\x62\x56\xfe\x5f\x8e\x88\xa3\x85\x3f\x48\x13\x0d\x49\xf1\x55\xaa\x90\x24\x6c\x5f\x66\xff\x38\x81\xe9\xd1\xb1\xa5\x46\x4f\xd9\x66\x0b\x8d\xf7\x9d\x06\x7b\x0b\x85\x52\xbc\x1c\x82\x70\x58\x55\x64\x7d\xba\x38\x14\x1f\xd2\xb4\x95\xb2\x27\x61\x73\xba\x88\xcf\xa4\xc8\x82\xd6\x90\x39\xb7\x26\xea\x8f\x64\x92\x3d\x95\xbc\xe0\xdc\x54\xe0\x5f\x03\xde\x85\x06\xea\x3a\x49\xa5\x66\x4d\xa5\x65\x4f\x23\x13\xcd\x0a\x9a\x70\xd3\x71\x59\x89\x92\x0c\x67\xb2\xc1\x0e\xa1\xf1\x7d\xd1\x74\x7a\x1c\x70\xa8\x6f\x9f\x88\x41\x09\xc3\x61\x59\x9d\x69\x10\x93\xd0\x9f\xf8\x14\xf8\xc2\xab\xb8\xd6\x23\xbc\x40\x6b\x28\x23\xbb\xb4\x3a\x99\x21\xe5\x37\x1f\x16\x9c\xc0\x40\x8a\xb7\xc0\x30\x73\x8b\x90\xe0\xc6\x65\x8a\x17\x4b\xb8\x67\x51\x55\xcd\x54\x55\x91\x63\x4a\x29\x90\x6e\xa2\x0d\xc6\x1a\xd1\x62\xd1\x1c\xa8\xac\xf9\x6b\x34\x81\x0f\x63\xab\x23\x01\xef\xbb\x4b\x0f\xfe\x33\x5e\x74\x15\x11\x65\xd3\xfd\x3b\x14\x20\x45\xfe\x7d\xcf\xe9\xd8\xf4\x5c\xbd\xb6\x89\xef\xc3\x86\xe7\x3f\xfe\x2f\x60\x6c\xce\x5a\x80\x6b\x56\x59\x9b\xc6\xe9\x0a\x5c\x61\xe1\xb7\x0e\xa4\xf4\x42\x97\xf6\x52\x97\x46\x50\xf3\xf6\xd6\x8b\xea\x48\x41\x2b\x2c\xc2\x6f\x11\x7f\x81\xd8\xd8\xa1\xf8\xaf\xea\xcb\x9e\xf0\x3f\xc7\x13\x06\x43\x02\x54\x1e\xa8\x1b\x16\xdf\xe7\xdd\x42\x00\xe0\x02\x92\x20\xe1\x65\x9a\x7c\x8c\x6d\xb4\x11\x85\x21\x71\x8d\x67\x57\x15\xcb\xa8\x8d\x8f\x18\x1d\x73\x1b\xbc\x73\x2d\xc6\xb8\xc5\xbb\x24\xf3\xcd\x8c\x40\x87\x66\x43\x06\x1f\xf9\x7f\x8f\xdf\xf8\xbf\x21\x09\x03\xd1\xc9\xfd\xcb\xcb\x90\xb3\x0f\x8a\x30\xf8\x35\xaa\x7b\x6e\x04\x25\x04\xa2\x93\xd0\x9f\x75\x16\x7f\xe3\xab\x7c\xd7\x74\x63\xbe\x41\x84\xcd\x66\xa0\xef\x85\x57\x16\xd1\x2a\xf1\x51\x44\x22\xea\x58\x5d\x9f\x7f\xb3\x45\xd2\x40\x65\x4d\x71\xa1\x8f\xdc\x8b\x94\xe6\xe8\xda\x21\x7e\x0e\x2c\x62\xbe\x94\x27\x3d\xbf\x9a\xa4\x7d\x1a\x4e\xc2\x65\x4b\x81\xa8\x0f\x5e\xc8\xe6\xb8\x44\x9f\xf8\xce\x7d\x71\x8b\xb8\x62\x47\xf8\x2d\x92\x8c\x20\x9c\xe3\xfe\x5c\xf2\x63\xab\x3f\x8f\x3a\xd5\x98\x01\x15\x51\x98\x94\xf7\xa0\x89\x2f\x80\x97\xa5\x2e\xad\x97\x76\x25\x3b\x92\x58\xa1\xd6\xbb\xf2\x5a\x22\x18\xba\x19\x74\xba\x87\x7d\x64\xed\xa5\x5e\x95\xa3\xf3\x9e\xc1\x7d\x0e\x84\xa4\x89\x71\x6c\xc8\x1f\x4e\x92\xcb\x7b\x91\x1b\xaa\x4b\x0d\x38\xe5\x7c\x9b\x1d\x08\x2e\x9e\x3b\x51\x05\xc1\x92\x12\x67\x4c\xf4\xbf\x94\x56\x16\xea\x99\x89\x42\x9f\xe2\x59\x9d\x67\x50\xc3\x4a\x71\x0b\x56\x58\x92\x4d\x66\xfb\xb7\x07\x27\xc9\xcf\x3f\x96\xd8\x81\x97\x40\xf8\x1c\x88\x6a\x4a\xe5\xea\x15\x3e\xb2\x68\x0f\xb1\x70\x59\x89\x93\x24\x29\x1e\x50\xea\xc7\xca\xfc\x59\x26\x69\x82\x1e\x3f\xf8\x0b\xba\xec\xd3\xbf\xef\x29\xf9\x3b\x3c\x64\xb3\xf1\xb5\xa6\x81\xa8\x04\x21\x8b\xb3\x4b\xa2\x1f\x26\x6b\x62\x6c\x19\x5e\xc1\xbc\x57\x89\x08\xb0\xe7\xbe\xd7\x1f\x7c\x0e\xa5\x43\x25\x67\x16\x70\x27\x98\x1d\xf4\x43\x11\xab\x5a\x5e\x1f\x86\xad\x1b\x31\x2e\x7d\x62\xb1\x54\x4e\x14\x26\x4e\xd1\x32\xd0\x2d\xa4\x1b\x7a\x01\x0f\x34\xc3\x8b\x37\xb9\x75\x6f\xff\xfa\xf3\x52\x44\x95\x00\x24\x8e\x23\xce\x84\x92\x84\x88\x46\x2a\x50\x5c\x9b\x9f\x16\x5b\xe9\x2f\x1f\x69\x9a\x60\x7a\x42\xa5\x77\x2f\x36\x9f\xf0\x32\x24\x27\x9a\x86\xfb\x44\x61\x8d\xa8\x79\x69\xfd\xee\x6a\x06\xa2\x9e\x4c\x5f\x9d\x3a\xe8\x42\xb0\x91\xe5\x9d\x3d\x47\x36\xa9\x22\xc1\x90\xf8\xb4\xe5\xd1\xb6\xb9\x15\x4c\xf3\x73\x2e\xbb\x17\xea\x9e\xab\x29\xc3\x7e\x9e\xd8\x75\x55\x67\x97\xe8\xfb\x92\xe5\xee\x99\x2e\x00\xfd\x55\x6c\xee\x6a\xfa\x97\x23\xd6\xf4\x7d\xce\xb3\x4b\x2c\xe1\x54\x5c\x3d\x25\x67\x32\xe2\x00\xad\x3b\x0f\xff\xc3\xc1\xbc\x24\xf5\xd0\x7e\xcd\x1d\xc6\x9d\x38\x14\x31\x49\xf6\x69\x41\x4d\x4e\x2b\x78\xd0\x05\x4d\x66\x02\x63\xc2\xfb\xae\x09\xde\x39\x4f\x94\x6c\x1a\x02\x23\x8f\x72\x5a\x88\xbd\x5b\xe5\x91\xce\x9f\x17\x6f\x8e\x38\x1b\x3e\xfc\x26\x10\xb9\x02\x26\x52\x7d\x77\xc4\x68\x62\xe4\x2f\xa9\x24\x68\x01\x85\x86\xfc\xa7\xf1\xbe\x68\xf9\x02\xb3\x8a\x23\x99\xec\x52\x19\x75\x37\x46\x6a\x6d\x8c\x28\x4e\xb1\x31\xe5\xc5\x43\xf4\xa0\x38\x7e\x2a\x07\x6a\xd6\xeb\x85\x1e\x9c\xd4\xe6\xba\xd3\x92\x86\x93\xf2\xea\xa6\xce\x81\xe7\x14\x7d\xca\xfc\x63\x3a\x89\x1d\xb3\x89\xbc\x5f\x49\xe8\x69\x2e\xf9\xb9\xd0\xb1\xc1\x38\x59\x1c\x22\x92\xab\x6d\x41\xd0\xe7\x02\x03\x66\xed\x18\xe6\x0b\x9e\xa8\xf7\xfc\xe6\x5f\xcd\x23\x64\x39\xab\x98\x7c\x9f\xbc\x5f\x89\x4b\x1a\x6e\x17\x42\xb2\xd3\x80\x51\x26\xc7\x45\xc2\xfe\x9c\xd5\xd7\xdc\x7f\xa2\xdb\x35\x95\x4e\x97\x57\xe8\x60\xd2\x19\x0d\xf8\x9f\x58\xa4\xad\x59\x81\x1b\x46\x85\x86\xe9\xd5\x38\xef\x7d\x18\xa2\x34\x31\x0c\xa8\x74\x3b\x6d\x40\x4d\x01\x7d\x62\x48\xc3\x0b\x18\x9d\x83\xee\xa4\xb9\x72\xc3\xa2\x7f\x3e\x6c\x45\xda\xae\xea\x3b\xa2\xfc\x46\xa3\xf5\x65\x03\x5e\x2d\x0d\xe7\x13\x4c\x57\x51\x82\xed\xf0\x1d\x08\x33\x7e\x1e\xa8\x50\x5e\xa7\xdb\x0b\xc1\xf2\x92\xe4\x13\x1d\xbb\xea\x1e\x03\xa0\x9b\x36\xcc\x3d\xaf\x70\xab\xf2\xca\x32\xb8\x3b\xf6\x4d\xa1\x6e\x6f\x91\x8d\x83\x24\x47\xd2\x2b\xd2\x60\xf5\xc7\x32\xb5\x74\x38\xe8\x98\x26\xe6\x48\x26\x00\x76\xc7\x02\x08\xd0\xae\x3f\xa8\xef\xa8\x81\x07\x0f\x3e\x36\xff\xda\xc0\xb5\xae\x6b\xbb\x2c\x9d\x9e\x1d\xd9\x6d\x5a\xcf\xd2\x13\xd5\x31\x92\x3c\xdb\x3a\xe8\xbc\x96\x48\x56\x9c\x25\xc7\xb4\xb1\x8e\x50\x67\xe2\xec\x52\xe5\x1f\x63\xd2\xc6\xa3\x34\xd1\x9a\xd3\xc7\x92\x4d\x51\x54\x4f\x8a\x93\x76\x41\x84\x97\x56\xb8\xe4\xce\x9b\x4a\xd0\xc5\xf5\x67\x5d\xb4\xa1\xca\x1b\x9b\xcf\x33\x98\xa0\xbf\xe5\x05\x95\xd6\xd9\x3d\x59\xdb\xda\x9d\xb6\x1c\x24\x08\x6f\xb0\xb4\x56\x6b\xa6\x50\x6b\x23\xcb\x57\x9f\xa4\x11\x2a\x51\xb6\xc7\x16\x41\x45\x5f\xd7\x71\x79\x9c\x06\x76\xf7\x6d\xff\xb8\xb8\xd7\x8c\x7d\x9e\xb6\xfb\x27\x91\x34\x00\x0f\x28\x94\xe1\xb4\x21\x5d\x15\x48\x79\x8c\x5b\x1e\x59\x7a\x3b\xdc\x3e\xc7\x12\xdb\x51\x65\xc8\x83\xab\x21\xc5\xfe\xc5\xc1\x05\xdc\x3d\x4a\x57\xae\x2b\xb1\x83\x95\xd5\x30\x5f\x26\xc7\x94\x31\x52\x09\x01\x94\x4c\x58\x98\x08\x7d\x65\x33\x30\xd9\x0e\x60\x49\x9d\x57\xec\xc8\xd6\x90\x28\x8c\x7d\xf9\xa7\x13\x93\x40\x29\x45\x43\x82\xc6\x20\x3e\x7a\x60\x59\xb2\x11\x11\x7f\xfe\xfa\xfb\x75\xfa\xce\xca\xfc\x1f\x6a\xc0\xba\x67\x0b\x62\x20\x0d\x7f\x5a\x3d\xbf\x89\x4e\x17\x47\x6c\x16\xba\x00\x7e\x37\x01\x15\x51\x40\x96\xe0\x7b\x33\xc4\xc4\x8c\xd1\xfb\xa0\x9d\x79\xc7\xb0\xdd\x55\xb3\x1a\x4b\x14\x1a\x9d\xc1\x5f\xb2\x1f\x92\xc0\xd9\xdf\xb7\x9e\x55\xe6\x2e\xaa\x15\x5a\xf2\x33\x52\xdd\x7a\xfe\xb1\x8b\x0b\x17\xde\x8f\xb0\x78\x67\xc6\xf1\x32\x1a\x3a\xd6\xbb\xe5\xc9\x9a\x60\x73\xea\xc8\x60\x1e\x03\xad\x31\xd5\x6a\xb7\x74\x0c\x58\x3b\xf7\x56\x94\x21\xa9\x36\x02\xb8\x76\x93\x54\xa8\x3b\x75\xd5\xea\x83\xaa\xad\x77\x61\x43\x4b\xd1\x45\x47\x72\x81\x1d\xd0\x19\xca\xf4\x69\xcb\x16\x63\x55\x96\x2b\x2e\x89\x8b\x3d\x22\xb5\xee\x56\x78\xcf\x7c\xc3\x94\x11\x35\xe8\x76\x50\x57\x90\xe4\xac\x46\x27\x86\x72\xfa\xca\xe0\x18\x21\xef\x80\x52\xb9\x6b\xf9\x24\xe0\x9a\x5c\x2c\x8b\xb2\x05\x39\x22\xff\x63\xbe\xc0\xce\x68\x59\x9d\xf0\x84\xb2\x60\x52\xbb\x77\x32\x8a\xa1\xd4\xf0\x35\x43\x5f\x10\xe4\xb8\xe2\xea\x44\x4b\xe3\x5e\xf0\x42\xd4\xf0\xe1\x53\x6a\x38\xa7\xaa\xcb\x44\x03\xb8\x70\x04\x43\x75\x04\xb5\xc9\x8f\x68\x1b\xda\x2f\x5b\x51\x3d\x00\x04\x1e\x52\x4d\xcb\x1d\x04\xab\xe1\xd1\x6c\x9b\x24\x3c\xb2\x80\x27\xbe\xa9\xdc\x1c\x9d\x33\x8e\x31\x49\x61\x75\xae\xce\x59\x10\x35\xba\xba\x7d\xc5\x12\x3d\x70\x08\xf9\x74\x16\xa0\xb9\x47\xef\x6a\x82\x4f\xe4\x8c\x74\x4e\x27\x0e\xbc\xd0\xc4\x44\x66\x97\x3a\x05\x1b\x21\xf2\x38\x75\x98\x2e\xca\xaa\x7e\x0d\xa9\xeb\x1e\x04\xdb\xda\x61\x70\xc2\x6c\xfc\x0a\x00\xb0\xa2\x60\x11\x46\x0f\xdd\x09\x3a\x89\xb0\x74\xfe\x44\x5c\x66\x28\x33\xe6\xd8\x50\x39\x3c\xf1\xf8\xd6\x71\x27\x4b\xd4\x42\x85\xf5\x56\xc5\x23\xc4\x5c\x2e\xee\xb1\xc2\x5c\x6e\xc5\x32\x73\xa4\xe4\x99\xb1\x6f\xac\xe8\xf4\xcf\x48\xc7\xe4\x89\xdd\x5a\xec\xc7\x7a\xd5\x9c\x86\x43\xe3\xf2\x3f\xdf\x01\x82\xb5\x16\xb6\x63\xf5\x87\x7f\xef\xa4\xe2\x91\xa9\x99\xd1\x53\x46\x45\x36\xdd\x75\x59\xd5\x06\x2b\xa7\x5e\x6c\x6c\xc7\x1a\xf9\xd6\x99\x1a\x0a\x97\x9e\x3d\x8e\xf9\x92\xa8\x88\x70\x5e\xf5\x2e\x2f\x6e\x43\x73\xdf\xcb\xc7\x34\xe0\x3d\xc6\xac\xfc\x66\xae\x30\xa7\x1c\x28\x3f\xd2\x46\x73\x9d\x07\xcb\x8f\x80\x0a\x2b\x42\xc9\x60\xa2\x89\x1b\xdf\xe1\x09\x04\x34\xf3\xbd\xf7\x34\xa2\xbd\x4a\x82\x21\x00\xb1\x48\xe9\x2f\xa6\x91\xd4\x10\x38\xd1\x17\x79\x71\x89\x43\x8e\x51\xa8\x96\x48\x43\xb1\xbd\xa6\x76\x4b\x56\x38\x5e\x7e\x66\xb6\x12\xe7\x1e\x8f\xca\x58\x4f\x81\xa9\x7d\x40\x46\x1b\x63\xf6\x5d\xb7\x4c\xea\xf5\xf2\x0b\xfe\x3c\x8a\xe3\xef\x21\x49\xaf\x1f\x72\x33\x8a\x93\x77\x45\x1a\x94\x47\x98\xc1\x30\xf0\x65\xba\x54\x2d\x56\x72\xa6\xb1\xc8\xed\xd7\xc1\x75\x7b\x9d\x31\x77\xf6\xcf\x18\x68\x20\xa2\x09\x93\x87\xd3\x2b\x9a\xcd\xb0\xb4\x98\x10\x67\xbb\x9c\xef\x61\xc0\x2f\x95\xa2\x64\x7f\x53\xb7\x32\x70\xc5\xc7\x0a\x73\xe5\x00\x70\x1f\x81\x22\xbc\x46\x72\x43\x67\x58\x17\x14\xa3\xaa\xaf\x03\xf5\xcb\x0f\x97\x92\x3e\x10\x3d\x1f\x3e\xef\xd4\x94\x2a\xf6\xa6\xe3\x70\x58\x5b\x79\x02\x57\x31\x92\xfc\x8e\x02\x70\x0a\x13\x0f\x84\x0c\x3a\x30\x0f\xd7\x95\x38\xd1\x7d\x3c\x45\xf7\x02\x44\xf8\x1e\xbf\xed\x40\x88\xf7\x62\x04\xe7\xba\x88\xb9\xc9\xe6\x35\x46\x78\x06\x4c\xda\x66\x04\x91\x53\xb9\xf3\x30\x1a\x0a\xd9\x5b\x60\x45\x54\x4c\x1a\xc7\x68\x6f\xa1\x68\x1d\x45\x47\x5e\xf8\xe4\xb9\x5c\x92\x64\x22\x31\xc4\x10\x9e\x6b\x23\xe2\x80\x13\xbf\xac\x38\x61\xf3\x31\xb0\x6f\x02\xf3\x90\x7f\xd8\xea\x92\xe1\x2f\x44\x6f\x4c\x67\x34\x1c\xbc\x10\x02\x2b\x66\x90\xa9\x35\xee\x3e\xf7\x12\xd7\x3d\xd9\xee\xe9\x35\x02\xe7\x81\xea\xc2\x1f\x3b\x67\x12\x40\x91\xbb\x0c\x1e\xc0\x14\x61\x53\xdb\x0f\xe3\xb2\xa6\xd5\x45\x02\x22\x65\x70\x0b\x44\x54\xf0\xca\xb2\x0c\xb8\x56\x94\xc4\x58\x05\xed\xa5\x16\x39\x90\x25\xcf\x63\x5b\x9a\x74\x0e\x75\x27\x40\x52\xc8\xae\x5e\x9a\x9d\x42\xf6\x49\x58\x4c\xc5\xf7\xd5\xb8\xf9\xe2\xf9\x9f\x87\x12\xb9\x0c\x7c\xa0\xb3\x24\x77\x8f\xb1\x45\x6f\x81\x68\xcf\x0f\x24\xa7\x01\x0e\x38\x8f\x02\x71\x5e\xc4\x0a\x37\xaf\x49\x21\x0e\x1e\xa1\xb7\xf4\x98\x8a\xbd\x7b\xd7\x5a\x81\x83\x00\x4e\x08\xba\x6c\x05\x93\x52\x29\x89\x6f\x36\xe1\x3d\xcf\x77\xd7\xb8\xfb\x68\xa5\x5b\x59\x87\x0d\x26\x30\x95\xab\xff\x15\xfc\xb2\xbf\x5c\xe4\x9b\x1b\xe5\x73\x68\x21\xfe\xbc\xe6\xd2\xbf\x45\x79\x29\x3e\xc3\x5a\x91\xbb\xa9\xc1\xbe\x69\x6f\xf3\xeb\xbd\x0f\xbf\x49\x84\x0c\x32\x03\x72\x3b\x79\xd8\x12\xc4\x81\xe8\x3c\xfa\x9c\x7e\x0f\xff\xfc\xc4\xef\x99\xa9\x0c\xfb\xcd\x47\x6a\x7a\x5c\x2c\x57\x2b\xbb\x3e\x18\xc9\xe0\x43\x86\x54\x53\xfe\x54\xa6\x87\xf7\x44\xe0\xd6\xfe\x3c\x4a\x4e\x3c\x3a\xbd\x21\x83\x4d\x7d\xf0\x1a\xe6\x18\xe0\x48\xb9\xf5\x48\xb6\x7c\x0d\x59\x7f\x7f\x9f\x5d\x45\x60\xb4\x0f\xe1\x36\x6d\x30\xa1\x5f\x15\x25\x3a\xb6\xff\xcc\x58\xd6\xa0\x1b\xa5\xb6\xb1\xa7\xf4\x70\x0b\x0c\xd1\x5d\x6a\x5f\xf3\x83\x62\x21\xb2\xbf\x87\xb6\xd4\x36\xfc\x7e\x3c\x70\xb0\x0b\x22\xf3\x0b\x35\x46\x24\xff\x60\x90\x8e\x97\x11\x19\xe8\xb0\xc1\x7b\x12\x60\x8d\x2d\x31\x01\x4e\x96\x19\x23\xe9\xc0\x83\xfe\xf8\x3e\xec\x17\x1f\x78\x5a\x12\xb6\xc4\x1a\x48\x7b\xd2\x1d\xcb\x22\x81\x5b\x12\x68\xa9\xad\xd8\xec\xd9\xf8\x82\x0f\x54\xf0\xfe\xc4\x1f\x20\xba\x0c\x5f\xf2\x37\x2f\xa8\xaa\xa3\x0a\x16\x7b\xcd\x2d\xdd\x8a\x33\xc9\x91\x82\x7e\xe6\x27\x8c\xff\xc7\xde\x75\xb4\xde\x8f\x73\xe7\xfd\xfb\x29\xc2\x64\x15\x0c\x71\x6f\x33\x24\x60\xdf\xeb\x5e\xaf\xbb\x4d\x36\xee\xbd\x77\x87\xf7\xbb\x87\xfb\x1b\x08\xff\xc9\x2e\xfb\x11\x78\x61\x21\x74\x2c\xe9\x41\xe7\x3c\xb2\xa4\x47\x30\x10\x5f\x09\x9f\x58\x97\x5b\x8a\x0e\xb7\x03\x00\x89\x93\x1a\x15\x01\x79\x03\xb3\x16\xeb\x02\x41\xa7\x47\xbe\x30\x63\x68\x62\x68\xa7\x99\xc1\x46\xe6\x82\xe1\x3b\x15\xbd\x02\x9d\xbb\x8b\xbc\x17\xef\x81\x36\xec\xe5\x6b\x1c\x15\xbd\x89\x90\x13\x8a\x49\x2d\xa9\x52\x59\xb1\xdb\x79\x5e\x66\x5a\x94\x88\xb1\x50\xf1\x24\x42\x33\xf9\xbc\x62\x3a\x49\x81\x42\x5f\xfb\xc7\x6b\xb0\xf5\x96\xbc\x21\x42\xb0\x98\xaf\xbe\x6c\x74\xe6\x20\xa3\x22\x8c\x76\x52\xad\x0d\x00\x43\x01\x29\xa1\xb0\xb0\x7c\xe4\xf1\xaf\xe2\x10\x1b\x80\xc8\xb5\xc1\x13\xca\x2c\x78\x00\xf0\x38\x50\x62\xdd\xf5\xb8\x93\x7d\x6f\x70\x46\x8c\xce\xc1\xe2\x20\x2c\x8e\x36\xc9\x0c\xa0\x97\xd8\xdf\xac\x43\x8c\xf9\x0f\xf1\xa6\x11\x52\x5f\x60\x02\x1f\xe1\x18\x8c\x55\x2b\x0f\x06\x3c\x74\x39\xf6\x31\xbb\x73\x2f\x8c\x01\xa1\x87\x04\x4b\x05\xca\x38\x20\xba\x70\x8f\x0a\xcb\x9c\x82\x67\x1c\x0e\x37\x70\x0a\x48\x1b\x40\xb4\xfb\x39\x1e\x64\x1c\x34\x9a\xa1\x0d\x2b\xde\x7c\xf1\xdf\x66\x6d\x13\x44\xfa\xef\x7b\x81\x32\x47\x53\x5f\xd8\x4e\xae\x86\xf8\x06\x03\x12\x47\x4f\xa0\xa0\x11\x81\x0e\x26\x22\xef\x40\xba\x13\x17\x88\xd4\x45\x75\x0f\xb2\x2f\xa1\x9b\xe3\x7a\x24\x34\x15\xd8\xf3\xf0\xe8\x63\x3a\xde\xa8\xab\xa5\x85\xe6\x08\x21\xf9\xae\xc6\x58\x24\x39\x86\x6a\x42\x40\x47\xef\xb4\xe3\x2d\xd7\x95\x6e\xb5\x18\x09\xa3\xd2\x86\x46\xee\x80\x78\xf6\xbc\xaa\x8c\x96\x1b\x60\x3f\xca\xdb\xfb\x18\x07\x0b\x7e\x52\x89\x0d\x8f\x22\x4f\x8a\x0d\xe5\xf8\xae\x6c\x8d\x3d\x78\x23\xe7\xeb\x1c\x15\x67\xc2\x30\x2e\x38\xab\x59\xbd\x29\x51\x41\xb7\x89\x26\x82\x80\x89\x89\xbc\x3c\xa3\xb0\xaf\xc6\xdb\xb4\x96\xdc\x0a\x33\x33\x38\xfa\x45\x9a\x95\x57\xd3\x1f\x10\xb9\xe4\x85\x5a\xec\xa8\xd7\x9d\xaa\xeb\xf1\x56\x6e\xd2\x17\x11\x0f\x13\x60\x0c\xc8\x02\x8b\xcd\x89\x24\xfa\x22\xdf\x0a\x15\xbb\x9d\x6c\xbd\xd9\x93\xcc\xf7\x00\x21\xa7\x39\x12\x86\x79\x0b\x6d\xb7\x9b\x6e\xfd\xf5\xa3\x4f\x36\x49\xe1\x8c\xd6\x61\x07\x04\xb5\x19\xae\x36\x0c\x60\x67\x6e\xfa\x41\xec\x99\x8f\x66\x47\xed\x99\x0c\x57\x9a\xbc\x6e\xcf\x82\x6f\x20\x5d\x5d\x86\x27\x6e\x2e\x0a\x19\xef\xc6\xa3\x62\x47\x47\x2f\x0d\x5f\x04\xb9\x2b\xfd\xe2\xad\xc4\x1a\xa0\x74\xe4\xe1\x39\xda\x94\x20\x08\x01\x05\xd1\x98\x51\xb8\x12\x54\x9e\x1a\xc2\x1b\x3c\x0c\x91\xb0\x87\x29\x82\xf0\x22\x4f\x14\x3c\xdd\x30\x80\x28\x7f\x48\x64\x80\xd2\xa0\x55\xd3\x58\xe7\x52\x76\x6d\x7f\x28\xd5\x78\xb0\x2f\x4f\x24\xc0\xe3\xaa\x63\x08\xda\x92\x0f\x75\x38\x97\x98\x0f\x8b\x72\x67\xb1\x04\x2d\x11\x4d\xd0\x09\x93\x62\x82\xc4\x80\xfb\x38\x2b\xcd\x0a\x1a\x3a\x9a\x23\xb3\x63\x0f\x6a\x47\x49\x3a\x52\x0c\x24\x50\x15\xe6\x50\xde\x14\xc7\x5b\xa1\x3f\x6e\x1d\xcd\x62\xa0\x18\xd3\xfb\x3e\x84\xfb\x62\x63\x40\x6e\x46\xbe\xdb\xf1\xd5\x13\x58\x9c\xc5\xa5\xab\xaf\x5e\x82\x7a\x05\x72\xb2\x42\x93\x62\xb7\xbb\xaf\xfa\x9e\x43\x8f\x20\x05\x9a\x27\xbe\x22\x31\x58\x87\x40\x80\x5f\xc0\x4a\x50\x2f\xb6\xc6\xc3\x35\x3a\xf2\x17\x44\xda\xd0\xc6\x9a\x62\xd3\x21\x64\xde\x42\x86\x77\x2a\xd3\xac\x94\x5a\x5a\x1d\x2d\x99\x9b\x0f\x6c\xdb\x70\x40\xd9\x2e\x15\xf0\x66\x42\x50\x91\x86\x14\xf5\xee\x3e\x9f\x11\xab\xc4\xf5\x6e\xb5\x39\x21\x80\x62\x7b\xf3\x9c\x72\x4b\x11\xf6\xb3\x47\x6c\xeb\x9f\x90\x67\x4f\xb7\xe1\x45\xd9\x50\x58\x7b\x0f\x96\xdd\xbf\x97\x2b\xd4\x26\xdb\x6a\x1d\x9e\xbe\x37\xbb\x10\x6a\x75\xa0\x82\xea\xc1\x57\x28\xd3\x06\x67\x6c\xc0\xeb\xfa\xb8\x22\x0b\xa4\x6f\xa6\x55\xde\x65\x39\x4a\x2f\x5d\x81\x0a\x58\x91\xf9\x17\x4c\x83\x07\x85\x67\x7b\xcd\xb8\x0f\xef\x39\x2e\x6a\x3e\xd7\x0a\x3b\x8c\x9d\x3a\xdc\x25\x59\xef\x00\xa6\x0b\x97\x3e\x16\x4a\xdc\xc9\xb5\xea\xec\x2d\xc9\xcd\x60\x33\xc4\x06\x60\x3b\x67\x5a\xc7\x39\xee\xbf\x3c\x20\x9f\x53\x43\xb4\x4b\x40\xed\xb2\xbd\x87\x79\xde\x4a\x7d\xef\x86\x0b\x63\x70\x7a\xa7\x9f\x66\x85\x3e\x52\x4f\x7b\x8c\x07\x2c\x35\x81\x66\x8f\xcf\x01\x0b\x8d\xb5\x3f\x2b\xab\xe4\x43\x42\xcf\xa7\xe6\x8c\x84\x5f\x62\xcb\x86\xfb\x19\x9a\xca\x7c\x4d\xd7\x68\x98\xf0\x9c\xd4\x74\x10\x66\xbc\x27\x97\x28\xda\x54\x9c\x8d\x66\x24\x8d\x4e\x18\x1c\x76\x29\xb5\x56\x36\x97\x27\x25\x0d\x73\x28\x90\xc4\x68\x78\xfb\x76\x3b\xb9\x90\xcf\x23\x5b\x62\xa4\x35\xc4\xf7\xb5\x12\x89\x5c\x42\xdc\x94\xd8\x05\x7a\xa5\x01\x4c\x83\xf4\x76\xe2\xb7\x59\xb6\xef\x58\x17\x01\xfb\xd3\xb9\x87\x0d\x00\x13\xed\xa8\x12\xbf\x27\x1b\x81\x51\x69\x61\xa0\xa8\x7e\xa7\x15\xa3\x00\xb9\xd3\xe2\xaa\x55\x42\xa9\x2a\xb0\x65\x72\xa7\x54\xcf\x57\xa1\xb7\x78\x33\xa2\xe4\x6c\x73\x49\x28\xbd\x46\x82\x0d\x45\x66\x7b\x1b\xe4\x5e\x20\xa0\xac\x4a\x93\x98\xee\x01\x44\xca\xef\x06\x03\x12\x73\x10\xb4\x05\xe7\x5b\x8f\xbf\xbe\xfe\xb1\x84\x0a\x23\x96\x71\x0a\x88\x25\xb6\x5f\xa9\xa2\x50\xa1\xc7\x18\x1c\xe7\xc2\xc0\x6e\x05\xbc\x51\x8d\x91\xad\x36\x68\x53\x46\x49\x9a\xe3\x63\xa6\xee\xe5\xdc\xac\xce\x9b\x48\xf4\x6f\xf4\xff\x62\x3c\x42\x52\x81\x57\x8d\x14\xcf\x8a\x14\xe6\xa7\xa6\xf3\x7d\x38\x86\x77\x17\xd3\x7d\xd4\x47\x74\xfe\x78\x33\x94\x1f\xc3\x9e\xf4\xf8\x20\xdb\xb4\xcd\xb3\x67\x48\xd5\xd2\x6e\x55\xea\xd2\x2b\xf5\x38\x39\xd5\x49\xee\x9f\xe7\x4c\xa3\xc8\xf6\x1b\x6b\x2e\x4c\x94\x32\xde\xe5\x4c\xa4\x39\x4c\x44\x7d\xd8\x13\xc0\xee\xde\x40\x10\x23\xfa\x4c\xec\xea\x8c\xbd\xab\x73\xac\xce\x03\xb2\xdb\x4d\x5d\x30\xab\x78\x2f\xd3\x59\xa6\x41\x61\x28\xf0\xb6\xbc\x11\xe1\xcd\x0e\x25\x56\x2b\x74\x23\xf6\x90\x68\xd1\x81\xcb\xb3\x23\x94\x28\xfc\x0c\x1c\x4c\x51\x9a\x84\x84\x08\x65\xa7\xdf\x40\x16\x1c\xd7\x39\xba\xae\xc8\x55\x89\xff\xac\x2d\xcd\xca\xbd\xfd\xf0\x06\xea\xd9\xf6\x8d\xf0\x15\x55\x88\x57\xba\x10\x68\x57\x9c\x9f\xd7\xe8\x67\xc6\x90\x21\x22\x20\x57\x09\x42\x1d\xc3\x25\x60\x19\x58\xec\x50\xaa\x8b\x5c\x25\xd9\xc7\xe1\x8a\xc0\xb1\x10\xe8\xac\x0b\xf9\xe7\xcc\xa8\x7a\x55\xc3\x46\xba\xb5\x9a\x10\x2b\x0f\x20\xf3\xc5\x03\x7a\x72\x3e\xbb\x48\xb8\xa8\xcf\x2b\x60\x42\x33\x45\x55\x1b\xde\x2c\x4e\xf7\x8e\x05\xa2\x72\x93\x6f\xa5\xed\x3b\x5f\xe3\x40\x31\xdc\xa9\x0a\xbd\xca\x98\x35\x51\xf2\xdb\x37\xd7\x9a\x08\xd7\x07\xa0\x65\xdf\xf3\x02\x18\xc6\xdd\x25\x8c\xb2\x57\x40\xd2\x40\xe3\x62\x2a\x47\xf4\xcb\x8c\x16\x13\xb2\x06\xd9\x49\xe8\xdd\x0c\xc7\xb7\x51\xe6\x5a\x03\xd2\x66\xec\x0f\xd3\x16\x6f\xc1\xb7\x9f\x8a\x03\x25\x36\xef\xe1\x2b\xad\x68\x10\x27\x7e\xc8\xec\xc6\x6d\x8c\xce\x4d\x17\xd6\x1c\xc3\x79\x9c\x6a\x2c\x44\x90\xda\x8d\xa2\xcb\xa8\xe5\x30\x87\x07\x50\xaf\x5c\xb9\xb5\x46\x74\x32\xa2\x0a\xdc\x4e\xf6\x5d\x58\x77\x3c\x2f\xf0\xa0\x8c\xbb\xb6\xe7\x85\x6e\xaa\x72\x6f\x89\xd7\x41\x77\x88\x83\x77\xfb\x68\x8f\x36\x69\x85\x48\xef\xe0\x8f\x1e\x6c\x27\x4d\x02\xb6\x28\x37\xb5\x70\x97\x2d\x21\xe2\xce\xdb\xc3\xc9\xf3\x96\xe7\x65\x3e\x17\xb7\x4a\x05\xe9\x8e\xfb\x68\xaa\x7d\x2e\x51\xa4\x5d\x50\x26\x66\x37\x1d\x3c\x17\x28\x0e\xe3\x44\x6b\xe4\x86\x10\x97\xac\xbe\x38\xf7\x75\x1c\x30\xf9\x80\xd2\x93\xa2\x0a\xd2\x2e\xf1\x05\x4d\xaa\x4d\xe8\x7d\x65\x3a\x24\x6f\xf6\x0a\xd3\x7d\xbd\xbd\xcf\x6e\x4b\x6c\x7c\xa1\xd6\x27\x3d\x9e\x38\x7a\x82\x1b\xaf\xfa\x94\xae\x09\x42\xe5\x27\x25\x3c\xba\xd3\x84\x37\x1a\xe6\xba\x20\xe1\xa3\x06\x67\xec\xc7\xe5\xa9\x42\x7c\x6e\x84\x4c\x27\x12\xd6\xb9\xb1\x57\xae\x9d\x3b\x57\xb5\x0d\x84\x9c\x48\x9a\x74\x69\x14\x65\x1d\x5f\x65\x3c\xc7\xfd\x3c\x03\x45\xaf\x7d\xf9\xb5\x6a\x43\xdf\x21\x39\xd6\x92\x72\xaa\x54\x57\x1d\xa8\x96\x1a\xe9\x32\x73\x99\x75\xda\x9f\x3b\x81\xeb\x71\x52\x90\x6d\x89\xe1\x2d\xf6\x97\x68\x87\x88\x58\xff\x5c\xfb\x8b\xc5\x66\xb9\x4e\x06\x04\xa6\x83\x64\x43\xa8\xe0\x2e\x04\x64\xa0\x97\x18\xd9\x60\x92\x1e\xd8\x2b\x4b\x7d\x2e\x75\x3a\x2b\xf0\x60\x1a\x07\xe3\x2c\x86\xf3\xc3\x1c\x09\x2d\x32\xfc\xa6\xd2\x8e\x6e\xeb\x1f\x6f\xee\x40\xb0\x18\x50\x92\x8e\xb5\x7b\xb3\x59\x8a\x48\x6c\xaa\x38\xdc\x19\x0f\x60\xc5\xef\xa6\xf8\xe3\x0a\x80\xcf\x5f\xbe\x5c\x45\x81\xe7\xa5\x95\x76\x38\x6b\xe9\x7c\x2e\xfd\xfd\x79\x34\xd2\x38\x57\xf5\x75\x6f\x53\x82\x33\xe5\xae\x35\x28\xb5\xc9\xcf\x41\x7e\x19\xcd\x86\x90\x79\x61\x06\x33\x1f\xee\xef\x91\x10\x2a\x28\x23\xad\xd5\x33\x87\xa7\x82\xc8\xcc\xb7\x2e\x7b\x18\x99\x96\xd5\xeb\xb5\x68\xfc\xc7\x5b\xd1\xa5\xa3\x41\xbe\x82\xb6\xd6\x79\xdf\x1f\xac\xae\xc7\x5e\xfd\xd2\xb3\x14\x00\x9d\x09\x4e\xd6\x20\xda\xc6\x17\xc3\xbc\xa5\xfa\x47\x6b\xb7\xc6\x76\xa9\x40\x45\xfa\x06\x33\x88\x06\xb8\x95\x5c\x88\xbc\x83\x49\x5a\x68\x72\x0c\xdc\xe9\x9f\x73\x56\x0a\x6a\x9a\xf7\x80\xcd\x4a\xed\x93\xd9\x1e\x90\x34\xa2\x3a\x61\x5f\x55\x90\x53\xd4\xd0\xa6\x16\x07\x44\xe6\xc7\xd7\x13\x03\x47\x43\xce\x3c\xc6\x5e\xda\x9b\x04\x1c\xa7\xc5\x0d\x8b\x33\xa5\x0b\x17\xad\x86\x50\x9b\x3e\x11\xd5\x25\x7b\x8d\xd9\x0b\xc8\xf2\x15\x79\xf7\x68\xbc\xf5\xb0\x78\x26\x16\xc7\x30\xef\xf9\x1b\x47\xa3\x2a\xfc\xd0\x20\x41\xe7\x14\x50\x8f\xf7\xba\x28\x40\x07\x9e\xa5\xc2\x8e\x82\x31\x7c\x7d\x63\x96\x1f\xe9\xf4\x39\xfa\x9e\x61\x18\x46\xab\x1f\x5a\xb5\x21\x19\x03\x0a\x8b\xf3\x78\x33\x00\x91\x46\x84\x71\xaf\xb3\x5c\xcf\x78\x36\x32\x5b\x49\x23\x48\x46\x48\x13\x7f\xd6\x14\xde\x1e\x4d\x90\xb8\x60\x8a\x0d\xa2\xe4\xe6\x07\xd3\x26\xf9\x7a\x22\xd2\xc4\xe9\x9f\xc3\xbb\x5d\x8c\xe7\x7b\x90\x1c\x8a\x97\x62\x02\x63\x26\x2c\x60\x56\x60\x0b\x95\x6d\x9a\x08\x17\xe4\x89\xd6\x17\x59\x0b\x0c\xd0\x4a\xbd\x2e\x21\x35\x47\x14\x5a\x52\xdc\x65\x38\xd1\x8a\x24\x3a\x02\x5a\x63\x1b\xa8\x3f\x4b\xf9\x47\x89\xbd\x4f\xf6\x1e\x97\x9b\x02\x8d\xde\x91\x21\xa0\x30\x45\xf4\x06\x44\x5e\xb0\xca\x78\x7b\x5a\x98\xfd\xa4\xe2\x17\x86\xec\x99\x0a\xd5\xe2\x6a\x09\x7b\x16\x1a\x5f\xa6\x0c\x23\xbd\xc0\x99\xe8\xa7\x3a\x04\xcc\x06\xdb\xf5\xa2\x40\x67\x38\xdd\xb1\x8e\xba\xf4\x6e\xde\xa7\xc9\xf7\x86\x88\x2e\x94\x65\xfa\x91\x1e\xd2\x17\x1b\x29\x1e\xa8\xe8\xce\x25\x93\x64\x59\x0d\x7b\xbd\x19\x1e\x9c\xa0\x73\x53\xa4\xe0\x74\x0b\x36\x0d\x32\x2f\xc0\xbc\x1e\x0d\xd5\x7d\x32\x9b\xd0\x64\xd2\x7b\x30\x36\x9a\xa9\x29\x99\x95\x31\x04\x16\x04\x09\xef\x40\x10\x32\x3b\x02\x2a\x11\xae\xb4\x10\xe5\xdb\xed\xa6\xd9\xef\x21\x7f\x31\x55\xd6\xf5\x78\x4f\xfb\xf3\xe0\x3c\xb4\x2e\xf6\xb9\xa9\x36\xb4\xa9\x36\x7d\xaf\xcc\xe7\x75\xaf\x8b\xfc\x35\x65\x51\x80\xca\xdf\x64\x8d\xc1\xba\xe5\xf6\xf6\x53\x79\xbc\xee\xac\xdf\x98\x46\x40\xfd\x45\x20\x51\x77\xe6\x3f\xb5\x38\xb2\x0c\xf3\x62\x44\x5c\x70\x86\x02\x25\xe8\x7c\x47\x13\x9a\xea\x65\x3b\xec\x0e\x14\x49\x86\x08\xd6\x1f\x32\xc3\xd4\x29\x5f\x66\x62\x5b\xe2\x3e\xea\x63\x64\x5b\x22\x1f\xee\x42\xc4\x78\x0b\x89\xf7\x31\x61\xcf\xf3\x62\x3a\x3f\xc6\x5b\xad\xa1\x4d\xf9\x06\x1e\xb6\xf9\x60\x58\xa8\xaf\x08\x79\xf7\xdd\x14\x6f\x4d\x88\xc0\x8d\x8b\xa8\x5d\x8d\xae\x3e\x99\x09\x7d\x75\x32\xf4\x2a\x62\x80\x02\xe6\x5b\x06\xd0\x39\x5e\x50\x74\x0f\xc7\x7e\x37\xf5\x56\x1f\xf5\x3f\xbf\xff\xea\x10\xe6\xa8\xf3\x3b\x8b\xc2\xb1\xa0\x7b\x6c\xc9\x30\x2c\x87\xcb\x16\xc7\xbb\x59\x10\xc0\xb4\x69\xc7\x42\xd5\xc2\xba\x3f\xcd\xb1\xde\xb8\xb7\x5e\x4a\x02\x0c\xb1\x8c\x78\x15\x79\xd0\xe1\x9f\x56\x7d\x21\x94\xdd\xe2\xc6\x17\xf4\x36\x1a\xc7\xdd\x04\x75\x8d\xf4\x25\x01\x88\xde\x30\xa3\xd0\x39\x9f\x4b\x73\x5c\xe4\x8b\xa2\x35\x89\x6f\xbd\x09\xcf\x4f\xf1\x85\x71\xd1\xc1\xb2\xed\x76\xb3\xdd\x4e\x82\xd5\x47\x82\x35\x4c\x84\x3f\x4c\xdf\xef\xfa\xd6\xc6\x7e\x3e\x82\xb7\x80\x11\x65\x76\x74\xe0\x91\xf9\xf5\xc9\x4d\xa1\xf9\x40\xb1\x8e\xe0\x2b\xa2\x3b\x1f\x44\x73\x38\x44\xaf\x42\x89\x7d\x95\xe5\xf8\xc2\xb0\x5d\xad\x27\xcd\xe1\x32\x00\xcc\x7f\x94\xd8\xe2\xbc\x8b\x08\x7b\xc0\x01\xa7\xb5\x15\x90\x9d\x89\x7d\x11\xae\x36\xf9\x51\xf0\xbb\xd6\xa4\xc7\xeb\xd8\xe7\xb7\x04\x83\x56\x93\x31\xb6\xaa\x90\x6f\x0a\x54\x6f\x6a\xd0\x80\x22\x7d\x68\x10\x3c\x4e\x5c\x7f\xa8\x43\x7d\x52\xd4\xbb\x3f\x76\x57\xe0\xe6\x8f\x74\xa4\xeb\xe1\x79\x11\x14\xe9\xbb\x4c\xc5\x11\x18\x46\xe5\x55\xfe\x08\x57\x2b\x8d\x84\x83\xba\xc7\x32\xb2\x57\xba\xaf\xf2\x67\xdf\x9e\x52\xcf\xb3\xdf\xcd\x71\x93\x7c\x92\x8d\x65\x66\x4c\xe8\xbb\x2e\x7e\xb3\xdd\xac\x67\xe2\xb7\x7f\x9d\x0f\x75\x32\x8c\xc4\x49\x2d\xd7\x7b\x7d\x24\x3e\x04\xfd\x6a\x07\xe3\x4e\x49\xf9\x59\x41\x95\x04\xc6\x55\x7d\x9d\x2b\xcf\xba\x6e\x27\xf5\xfc\x94\x7b\xdd\x8f\xc3\x5b\xdb\x76\x9a\x63\xa1\x26\x82\x99\xb0\x5b\x4f\xcc\x9b\x3f\x17\x40\x65\x1b\xda\xde\x2e\xa4\xff\x09\x57\x68\x73\xbe\x8f\x0b\xeb\x89\xaf\xf9\x55\x0d\xe9\xce\xc7\xe5\x33\xf3\x8d\x71\xfc\x77\x0c\xee\xce\xf2\xbc\x3c\x68\x71\xc1\xea\xa2\xef\x80\x56\xae\xf7\x0d\xe0\xe0\xc0\x1b\x5e\xed\xc6\x49\x03\x1f\xf9\x51\x17\x06\x66\x5e\x7c\xab\xcf\x2c\x24\xd9\x17\x5c\x47\x13\x60\xcb\xaf\xcd\x73\xf9\x77\x49\x81\x83\xa2\x0c\xbe\xeb\x79\x96\xfb\x6d\xfb\xcf\xdd\x2a\x1e\x9b\x05\x1e\x9c\xcd\x2f\x7c\x85\x14\x96\x3d\xa1\x58\xcf\x44\x0b\x12\x1a\x6f\xfe\x3a\xa2\x26\x86\xc8\x4f\x98\x28\x37\xb5\x3f\xeb\xa2\xce\x56\x07\xdf\xe9\x22\x3f\x26\x90\xfa\xbc\xe5\x2b\xd6\xcb\xed\xa4\x4e\xcc\x28\x6e\xeb\xa2\xd8\xe3\xa4\xb9\xff\x73\x83\xe2\xbc\x2c\xd7\x61\xe2\xe0\x8b\x7e\xab\xdf\xd7\x56\x85\x8d\x56\x86\xb3\x10\x11\xde\xd6\x8a\x40\x3d\xea\xae\xbc\xd0\x58\x25\x94\x10\xf9\xb0\xc4\xfe\x34\x4b\xab\xc4\x50\x65\xd2\x60\x27\xf3\xbf\xe9\xe4\x14\x78\x4b\x02\x6c\x60\x34\x06\xc5\x8a\x2b\xfd\x30\xbf\xa4\xd7\xc7\x7a\x6e\x8e\xf9\x4b\x7a\xb7\x9c\x96\x88\xcc\xaf\xe5\x38\x26\x0f\x83\x33\xfd\x4b\x31\xe3\x63\x5d\x6b\xca\xfe\x9a\xf5\x61\xf1\xaa\x60\xcf\xbf\x14\xc3\x59\x51\xb8\xce\x5f\x4d\xb0\x8c\xd8\xf3\x94\xf8\x7f\x6d\xa6\x7f\xdb\xfc\xdb\xe6\xff\xc3\xe6\x01\x15\x0b\x50\x75\x81\xb1\x89\x3d\xd2\xfe\xe4\xda\xae\x67\x58\x0a\xfe\x0a\x25\xe9\x3f\x7e\xfb\xb7\x3f\xfe\xf1\xcf\x7f\xfc\x7b\xb2\xe4\x71\x96\x2e\x7b\x9f\xfc\x67\x57\x03\x5d\xfd\x7b\x92\x17\xe3\x92\xff\x77\x3a\x0e\x5b\x3e\x6c\xbf\xff\x06\xfe\x17\x04\xc5\xd0\x6f\x7f\x4c\x71\x96\xd5\x43\xf9\x3b\xf4\x2f\xf8\x74\xfd\x91\x8e\xdd\xb8\xfc\xfe\xaf\x69\x9a\xfe\xf1\xcf\xff\x09\x00\x00\xff\xff\xaf\xa5\x1a\xb1\x3d\x07\x02\x00") func pagesAssetsStylesContainersCssBytes() ([]byte, error) { return bindataRead( diff --git a/vendor/github.com/google/cadvisor/pages/templates.go b/vendor/github.com/google/cadvisor/pages/templates.go index 45f1c184002..b4821d4ae44 100644 --- a/vendor/github.com/google/cadvisor/pages/templates.go +++ b/vendor/github.com/google/cadvisor/pages/templates.go @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/BUILD index 78d32f02804..aafb2ae03ed 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/BUILD @@ -3,7 +3,6 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "console.go", "container.go", "error.go", "factory.go", @@ -12,13 +11,8 @@ go_library( "stats.go", "sync.go", ] + select({ - "@io_bazel_rules_go//go/platform:freebsd": [ - "console_freebsd.go", - "stats_freebsd.go", - ], "@io_bazel_rules_go//go/platform:linux": [ "capabilities_linux.go", - "compat_1.5_linux.go", "console_linux.go", "container_linux.go", "criu_opts_linux.go", @@ -30,23 +24,11 @@ go_library( "process_linux.go", "restored_process.go", "rootfs_linux.go", - "setgroups_linux.go", "setns_init_linux.go", "standard_init_linux.go", "state_linux.go", "stats_linux.go", ], - "@io_bazel_rules_go//go/platform:solaris": [ - "console_solaris.go", - "container_solaris.go", - "stats_solaris.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "console_windows.go", - "container_windows.go", - "criu_opts_windows.go", - "stats_windows.go", - ], "//conditions:default": [], }), importpath = "github.com/opencontainers/runc/libcontainer", @@ -57,18 +39,19 @@ go_library( "//vendor/github.com/opencontainers/runc/libcontainer/utils:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/docker/docker/pkg/mount:go_default_library", - "//vendor/github.com/docker/docker/pkg/symlink:go_default_library", + "//vendor/github.com/containerd/console:go_default_library", + "//vendor/github.com/cyphar/filepath-securejoin:go_default_library", "//vendor/github.com/golang/protobuf/proto:go_default_library", "//vendor/github.com/mrunalp/fileutils:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/apparmor:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/configs/validate:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/criurpc:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/intelrdt:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/keys:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/mount:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/seccomp:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/system:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", @@ -98,7 +81,9 @@ filegroup( "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:all-srcs", "//vendor/github.com/opencontainers/runc/libcontainer/configs:all-srcs", "//vendor/github.com/opencontainers/runc/libcontainer/criurpc:all-srcs", + "//vendor/github.com/opencontainers/runc/libcontainer/intelrdt:all-srcs", "//vendor/github.com/opencontainers/runc/libcontainer/keys:all-srcs", + "//vendor/github.com/opencontainers/runc/libcontainer/mount:all-srcs", "//vendor/github.com/opencontainers/runc/libcontainer/seccomp:all-srcs", "//vendor/github.com/opencontainers/runc/libcontainer/stacktrace:all-srcs", "//vendor/github.com/opencontainers/runc/libcontainer/system:all-srcs", diff --git a/vendor/github.com/opencontainers/runc/libcontainer/SPEC.md b/vendor/github.com/opencontainers/runc/libcontainer/SPEC.md index e5894c6429d..4363b6f9f4e 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/SPEC.md +++ b/vendor/github.com/opencontainers/runc/libcontainer/SPEC.md @@ -154,6 +154,90 @@ that no processes or threads escape the cgroups. This sync is done via a pipe ( specified in the runtime section below ) that the container's init process will block waiting for the parent to finish setup. +### IntelRdt + +Intel platforms with new Xeon CPU support Intel Resource Director Technology +(RDT). Cache Allocation Technology (CAT) is a sub-feature of RDT, which +currently supports L3 cache resource allocation. + +This feature provides a way for the software to restrict cache allocation to a +defined 'subset' of L3 cache which may be overlapping with other 'subsets'. +The different subsets are identified by class of service (CLOS) and each CLOS +has a capacity bitmask (CBM). + +It can be used to handle L3 cache resource allocation for containers if +hardware and kernel support Intel RDT/CAT. + +In Linux 4.10 kernel or newer, the interface is defined and exposed via +"resource control" filesystem, which is a "cgroup-like" interface. + +Comparing with cgroups, it has similar process management lifecycle and +interfaces in a container. But unlike cgroups' hierarchy, it has single level +filesystem layout. + +Intel RDT "resource control" filesystem hierarchy: +``` +mount -t resctrl resctrl /sys/fs/resctrl +tree /sys/fs/resctrl +/sys/fs/resctrl/ +|-- info +| |-- L3 +| |-- cbm_mask +| |-- min_cbm_bits +| |-- num_closids +|-- cpus +|-- schemata +|-- tasks +|-- + |-- cpus + |-- schemata + |-- tasks + +``` + +For runc, we can make use of `tasks` and `schemata` configuration for L3 cache +resource constraints. + +The file `tasks` has a list of tasks that belongs to this group (e.g., +" group). Tasks can be added to a group by writing the task ID +to the "tasks" file (which will automatically remove them from the previous +group to which they belonged). New tasks created by fork(2) and clone(2) are +added to the same group as their parent. If a pid is not in any sub group, it +is in root group. + +The file `schemata` has allocation masks/values for L3 cache on each socket, +which contains L3 cache id and capacity bitmask (CBM). +``` + Format: "L3:=;=;..." +``` +For example, on a two-socket machine, L3's schema line could be `L3:0=ff;1=c0` +Which means L3 cache id 0's CBM is 0xff, and L3 cache id 1's CBM is 0xc0. + +The valid L3 cache CBM is a *contiguous bits set* and number of bits that can +be set is less than the max bit. The max bits in the CBM is varied among +supported Intel Xeon platforms. In Intel RDT "resource control" filesystem +layout, the CBM in a group should be a subset of the CBM in root. Kernel will +check if it is valid when writing. e.g., 0xfffff in root indicates the max bits +of CBM is 20 bits, which mapping to entire L3 cache capacity. Some valid CBM +values to set in a group: 0xf, 0xf0, 0x3ff, 0x1f00 and etc. + +For more information about Intel RDT/CAT kernel interface: +https://www.kernel.org/doc/Documentation/x86/intel_rdt_ui.txt + +An example for runc: +``` +Consider a two-socket machine with two L3 caches where the default CBM is +0xfffff and the max CBM length is 20 bits. With this configuration, tasks +inside the container only have access to the "upper" 80% of L3 cache id 0 and +the "lower" 50% L3 cache id 1: + +"linux": { + "intelRdt": { + "l3CacheSchema": "L3:0=ffff0;1=3ff" + } +} +``` + ### Security The standard set of Linux capabilities that are set in a container @@ -306,7 +390,7 @@ a container. | Exec | Execute a new process inside of the container ( requires setns ) | | Set | Setup configs of the container after it's created | -### Execute a new process inside of a running container. +### Execute a new process inside of a running container User can execute a new process inside of a running container. Any binaries to be executed must be accessible within the container's rootfs. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/BUILD index db8484c7c8c..5e06bf7d2e5 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/BUILD @@ -38,7 +38,6 @@ go_library( ], "//conditions:default": [], }), - cgo = True, importpath = "github.com/opencontainers/runc/libcontainer/apparmor", visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go index 82ed1a68a69..7fff0627fa1 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go @@ -2,15 +2,10 @@ package apparmor -// #cgo LDFLAGS: -lapparmor -// #include -// #include -import "C" import ( "fmt" "io/ioutil" "os" - "unsafe" ) // IsEnabled returns true if apparmor is enabled for the host. @@ -24,16 +19,36 @@ func IsEnabled() bool { return false } +func setprocattr(attr, value string) error { + // Under AppArmor you can only change your own attr, so use /proc/self/ + // instead of /proc// like libapparmor does + path := fmt.Sprintf("/proc/self/attr/%s", attr) + + f, err := os.OpenFile(path, os.O_WRONLY, 0) + if err != nil { + return err + } + defer f.Close() + + _, err = fmt.Fprintf(f, "%s", value) + return err +} + +// changeOnExec reimplements aa_change_onexec from libapparmor in Go +func changeOnExec(name string) error { + value := "exec " + name + if err := setprocattr("exec", value); err != nil { + return fmt.Errorf("apparmor failed to apply profile: %s", err) + } + return nil +} + // ApplyProfile will apply the profile with the specified name to the process after // the next exec. func ApplyProfile(name string) error { if name == "" { return nil } - cName := C.CString(name) - defer C.free(unsafe.Pointer(cName)) - if _, err := C.aa_change_onexec(cName); err != nil { - return fmt.Errorf("apparmor failed to apply profile: %s", err) - } - return nil + + return changeOnExec(name) } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/BUILD index 95dd26783e7..b6fdcc6e0d3 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/BUILD @@ -63,7 +63,6 @@ filegroup( srcs = [ ":package-srcs", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:all-srcs", - "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless:all-srcs", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd:all-srcs", ], tags = ["automanaged"], diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go index 22d82acb4e2..43bdccf3e9d 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go @@ -145,8 +145,17 @@ func (m *Manager) Apply(pid int) (err error) { m.Paths[sys.Name()] = p if err := sys.Apply(d); err != nil { + if os.IsPermission(err) && m.Cgroups.Path == "" { + // If we didn't set a cgroup path, then let's defer the error here + // until we know whether we have set limits or not. + // If we hadn't set limits, then it's ok that we couldn't join this cgroup, because + // it will have the same limits as its parent. + delete(m.Paths, sys.Name()) + continue + } return err } + } return nil } @@ -198,6 +207,10 @@ func (m *Manager) Set(container *configs.Config) error { for _, sys := range subsystems { path := paths[sys.Name()] if err := sys.Set(path, container.Cgroups); err != nil { + if path == "" { + // cgroup never applied + return fmt.Errorf("cannot set limits on the %s cgroup, as the container has not joined it", sys.Name()) + } return err } } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go index e70dfe3b950..4b19f8a970d 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go @@ -29,11 +29,15 @@ func (s *FreezerGroup) Apply(d *cgroupData) error { func (s *FreezerGroup) Set(path string, cgroup *configs.Cgroup) error { switch cgroup.Resources.Freezer { case configs.Frozen, configs.Thawed: - if err := writeFile(path, "freezer.state", string(cgroup.Resources.Freezer)); err != nil { - return err - } - for { + // In case this loop does not exit because it doesn't get the expected + // state, let's write again this state, hoping it's going to be properly + // set this time. Otherwise, this loop could run infinitely, waiting for + // a state change that would never happen. + if err := writeFile(path, "freezer.state", string(cgroup.Resources.Freezer)); err != nil { + return err + } + state, err := readFile(path, "freezer.state") if err != nil { return err @@ -41,6 +45,7 @@ func (s *FreezerGroup) Set(path string, cgroup *configs.Cgroup) error { if strings.TrimSpace(state) == string(cgroup.Resources.Freezer) { break } + time.Sleep(1 * time.Millisecond) } case configs.Undefined: diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless/rootless.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless/rootless.go deleted file mode 100644 index b1efbfd9997..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless/rootless.go +++ /dev/null @@ -1,128 +0,0 @@ -// +build linux - -package rootless - -import ( - "fmt" - - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fs" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/configs/validate" -) - -// TODO: This is copied from libcontainer/cgroups/fs, which duplicates this code -// needlessly. We should probably export this list. - -var subsystems = []subsystem{ - &fs.CpusetGroup{}, - &fs.DevicesGroup{}, - &fs.MemoryGroup{}, - &fs.CpuGroup{}, - &fs.CpuacctGroup{}, - &fs.PidsGroup{}, - &fs.BlkioGroup{}, - &fs.HugetlbGroup{}, - &fs.NetClsGroup{}, - &fs.NetPrioGroup{}, - &fs.PerfEventGroup{}, - &fs.FreezerGroup{}, - &fs.NameGroup{GroupName: "name=systemd"}, -} - -type subsystem interface { - // Name returns the name of the subsystem. - Name() string - - // Returns the stats, as 'stats', corresponding to the cgroup under 'path'. - GetStats(path string, stats *cgroups.Stats) error -} - -// The noop cgroup manager is used for rootless containers, because we currently -// cannot manage cgroups if we are in a rootless setup. This manager is chosen -// by factory if we are in rootless mode. We error out if any cgroup options are -// set in the config -- this may change in the future with upcoming kernel features -// like the cgroup namespace. - -type Manager struct { - Cgroups *configs.Cgroup - Paths map[string]string -} - -func (m *Manager) Apply(pid int) error { - // If there are no cgroup settings, there's nothing to do. - if m.Cgroups == nil { - return nil - } - - // We can't set paths. - // TODO(cyphar): Implement the case where the runner of a rootless container - // owns their own cgroup, which would allow us to set up a - // cgroup for each path. - if m.Cgroups.Paths != nil { - return fmt.Errorf("cannot change cgroup path in rootless container") - } - - // We load the paths into the manager. - paths := make(map[string]string) - for _, sys := range subsystems { - name := sys.Name() - - path, err := cgroups.GetOwnCgroupPath(name) - if err != nil { - // Ignore paths we couldn't resolve. - continue - } - - paths[name] = path - } - - m.Paths = paths - return nil -} - -func (m *Manager) GetPaths() map[string]string { - return m.Paths -} - -func (m *Manager) Set(container *configs.Config) error { - // We have to re-do the validation here, since someone might decide to - // update a rootless container. - return validate.New().Validate(container) -} - -func (m *Manager) GetPids() ([]int, error) { - dir, err := cgroups.GetOwnCgroupPath("devices") - if err != nil { - return nil, err - } - return cgroups.GetPids(dir) -} - -func (m *Manager) GetAllPids() ([]int, error) { - dir, err := cgroups.GetOwnCgroupPath("devices") - if err != nil { - return nil, err - } - return cgroups.GetAllPids(dir) -} - -func (m *Manager) GetStats() (*cgroups.Stats, error) { - // TODO(cyphar): We can make this work if we figure out a way to allow usage - // of cgroups with a rootless container. While this doesn't - // actually require write access to a cgroup directory, the - // statistics are not useful if they can be affected by - // non-container processes. - return nil, fmt.Errorf("cannot get cgroup stats in rootless container") -} - -func (m *Manager) Freeze(state configs.FreezerState) error { - // TODO(cyphar): We can make this work if we figure out a way to allow usage - // of cgroups with a rootless container. - return fmt.Errorf("cannot use freezer cgroup in rootless container") -} - -func (m *Manager) Destroy() error { - // We don't have to do anything here because we didn't do any setup. - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_nosystemd.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_nosystemd.go index 7de9ae6050b..a65d8e4432d 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_nosystemd.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_nosystemd.go @@ -1,4 +1,4 @@ -// +build !linux +// +build !linux static_build package systemd @@ -43,7 +43,7 @@ func (m *Manager) GetStats() (*cgroups.Stats, error) { } func (m *Manager) Set(container *configs.Config) error { - return nil, fmt.Errorf("Systemd not supported") + return fmt.Errorf("Systemd not supported") } func (m *Manager) Freeze(state configs.FreezerState) error { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go index de89ccbedaa..45bd3acce71 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go @@ -1,4 +1,4 @@ -// +build linux +// +build linux,!static_build package systemd @@ -271,6 +271,13 @@ func (m *Manager) Apply(pid int) error { // cpu.cfs_quota_us and cpu.cfs_period_us are controlled by systemd. if c.Resources.CpuQuota != 0 && c.Resources.CpuPeriod != 0 { cpuQuotaPerSecUSec := uint64(c.Resources.CpuQuota*1000000) / c.Resources.CpuPeriod + // systemd converts CPUQuotaPerSecUSec (microseconds per CPU second) to CPUQuota + // (integer percentage of CPU) internally. This means that if a fractional percent of + // CPU is indicated by Resources.CpuQuota, we need to round up to the nearest + // 10ms (1% of a second) such that child cgroups can set the cpu.cfs_quota_us they expect. + if cpuQuotaPerSecUSec%10000 != 0 { + cpuQuotaPerSecUSec = ((cpuQuotaPerSecUSec / 10000) + 1) * 10000 + } properties = append(properties, newProp("CPUQuotaPerSecUSec", cpuQuotaPerSecUSec)) } @@ -288,10 +295,13 @@ func (m *Manager) Apply(pid int) error { } } - if _, err := theConn.StartTransientUnit(unitName, "replace", properties, nil); err != nil && !isUnitExists(err) { + statusChan := make(chan string) + if _, err := theConn.StartTransientUnit(unitName, "replace", properties, statusChan); err != nil && !isUnitExists(err) { return err } + <-statusChan + if err := joinCgroups(c, pid); err != nil { return err } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go deleted file mode 100644 index c7bdf1f60a0..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build linux,!go1.5 - -package libcontainer - -import "syscall" - -// GidMappingsEnableSetgroups was added in Go 1.5, so do nothing when building -// with earlier versions -func enableSetgroups(sys *syscall.SysProcAttr) { -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/configs/BUILD index 01bd8e8c03b..67df01f41ca 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/BUILD @@ -7,28 +7,25 @@ go_library( "config.go", "device.go", "hugepage_limit.go", + "intelrdt.go", "interface_priority_map.go", "mount.go", "namespaces.go", "network.go", ] + select({ "@io_bazel_rules_go//go/platform:android": [ - "cgroup_unsupported.go", "namespaces_syscall_unsupported.go", "namespaces_unsupported.go", ], "@io_bazel_rules_go//go/platform:darwin": [ - "cgroup_unsupported.go", "namespaces_syscall_unsupported.go", "namespaces_unsupported.go", ], "@io_bazel_rules_go//go/platform:dragonfly": [ - "cgroup_unsupported.go", "namespaces_syscall_unsupported.go", "namespaces_unsupported.go", ], "@io_bazel_rules_go//go/platform:freebsd": [ - "device_defaults.go", "namespaces_syscall_unsupported.go", "namespaces_unsupported.go", ], @@ -40,27 +37,22 @@ go_library( "namespaces_syscall.go", ], "@io_bazel_rules_go//go/platform:nacl": [ - "cgroup_unsupported.go", "namespaces_syscall_unsupported.go", "namespaces_unsupported.go", ], "@io_bazel_rules_go//go/platform:netbsd": [ - "cgroup_unsupported.go", "namespaces_syscall_unsupported.go", "namespaces_unsupported.go", ], "@io_bazel_rules_go//go/platform:openbsd": [ - "cgroup_unsupported.go", "namespaces_syscall_unsupported.go", "namespaces_unsupported.go", ], "@io_bazel_rules_go//go/platform:plan9": [ - "cgroup_unsupported.go", "namespaces_syscall_unsupported.go", "namespaces_unsupported.go", ], "@io_bazel_rules_go//go/platform:solaris": [ - "cgroup_unsupported.go", "namespaces_syscall_unsupported.go", "namespaces_unsupported.go", ], diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go deleted file mode 100644 index 95e2830a436..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !windows,!linux,!freebsd - -package configs - -type Cgroup struct { -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go index 269fffff357..3cae4fd8d96 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go @@ -187,6 +187,10 @@ type Config struct { // Rootless specifies whether the container is a rootless container. Rootless bool `json:"rootless"` + + // IntelRdt specifies settings for Intel RDT/CAT group that the container is placed into + // to limit the resources (e.g., L3 cache) the container has available + IntelRdt *IntelRdt `json:"intel_rdt,omitempty"` } type Hooks struct { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go index 4d348d217ec..e4f423c523f 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go @@ -1,4 +1,4 @@ -// +build linux freebsd +// +build linux package configs diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go new file mode 100644 index 00000000000..36bd5f96a11 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go @@ -0,0 +1,7 @@ +package configs + +type IntelRdt struct { + // The schema for L3 cache id and capacity bitmask (CBM) + // Format: "L3:=;=;..." + L3CacheSchema string `json:"l3_cache_schema,omitempty"` +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/BUILD index 282deb77d09..f564a88af5b 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/BUILD @@ -10,6 +10,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/intelrdt:go_default_library", "//vendor/github.com/opencontainers/selinux/go-selinux:go_default_library", ], ) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/rootless.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/rootless.go index 0cebfaf801a..7a9f33b7114 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/rootless.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/rootless.go @@ -21,13 +21,6 @@ func (v *ConfigValidator) rootless(config *configs.Config) error { if err := rootlessMount(config); err != nil { return err } - // Currently, cgroups cannot effectively be used in rootless containers. - // The new cgroup namespace doesn't really help us either because it doesn't - // have nice interactions with the user namespace (we're working with upstream - // to fix this). - if err := rootlessCgroup(config); err != nil { - return err - } // XXX: We currently can't verify the user config at all, because // configs.Config doesn't store the user-related configs. So this @@ -36,37 +29,27 @@ func (v *ConfigValidator) rootless(config *configs.Config) error { return nil } -func rootlessMappings(config *configs.Config) error { - rootuid, err := config.HostRootUID() - if err != nil { - return fmt.Errorf("failed to get root uid from uidMappings: %v", err) +func hasIDMapping(id int, mappings []configs.IDMap) bool { + for _, m := range mappings { + if id >= m.ContainerID && id < m.ContainerID+m.Size { + return true + } } + return false +} + +func rootlessMappings(config *configs.Config) error { if euid := geteuid(); euid != 0 { if !config.Namespaces.Contains(configs.NEWUSER) { return fmt.Errorf("rootless containers require user namespaces") } - if rootuid != euid { - return fmt.Errorf("rootless containers cannot map container root to a different host user") - } } - rootgid, err := config.HostRootGID() - if err != nil { - return fmt.Errorf("failed to get root gid from gidMappings: %v", err) + if len(config.UidMappings) == 0 { + return fmt.Errorf("rootless containers requires at least one UID mapping") } - - // Similar to the above test, we need to make sure that we aren't trying to - // map to a group ID that we don't have the right to be. - if rootgid != getegid() { - return fmt.Errorf("rootless containers cannot map container root to a different host group") - } - - // We can only map one user and group inside a container (our own). - if len(config.UidMappings) != 1 || config.UidMappings[0].Size != 1 { - return fmt.Errorf("rootless containers cannot map more than one user") - } - if len(config.GidMappings) != 1 || config.GidMappings[0].Size != 1 { - return fmt.Errorf("rootless containers cannot map more than one group") + if len(config.GidMappings) == 0 { + return fmt.Errorf("rootless containers requires at least one UID mapping") } return nil @@ -104,11 +87,28 @@ func rootlessMount(config *configs.Config) error { // Check that the options list doesn't contain any uid= or gid= entries // that don't resolve to root. for _, opt := range strings.Split(mount.Data, ",") { - if strings.HasPrefix(opt, "uid=") && opt != "uid=0" { - return fmt.Errorf("cannot specify uid= mount options in rootless containers where argument isn't 0") + if strings.HasPrefix(opt, "uid=") { + var uid int + n, err := fmt.Sscanf(opt, "uid=%d", &uid) + if n != 1 || err != nil { + // Ignore unknown mount options. + continue + } + if !hasIDMapping(uid, config.UidMappings) { + return fmt.Errorf("cannot specify uid= mount options for unmapped uid in rootless containers") + } } - if strings.HasPrefix(opt, "gid=") && opt != "gid=0" { - return fmt.Errorf("cannot specify gid= mount options in rootless containers where argument isn't 0") + + if strings.HasPrefix(opt, "gid=") { + var gid int + n, err := fmt.Sscanf(opt, "gid=%d", &gid) + if n != 1 || err != nil { + // Ignore unknown mount options. + continue + } + if !hasIDMapping(gid, config.GidMappings) { + return fmt.Errorf("cannot specify gid= mount options for unmapped gid in rootless containers") + } } } } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go index 8284345442c..cbbba9a03a2 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/intelrdt" selinux "github.com/opencontainers/selinux/go-selinux" ) @@ -40,6 +41,9 @@ func (v *ConfigValidator) Validate(config *configs.Config) error { if err := v.sysctl(config); err != nil { return err } + if err := v.intelrdt(config); err != nil { + return err + } if config.Rootless { if err := v.rootless(config); err != nil { return err @@ -153,6 +157,19 @@ func (v *ConfigValidator) sysctl(config *configs.Config) error { return nil } +func (v *ConfigValidator) intelrdt(config *configs.Config) error { + if config.IntelRdt != nil { + if !intelrdt.IsEnabled() { + return fmt.Errorf("intelRdt is specified in config, but Intel RDT feature is not supported or enabled") + } + if config.IntelRdt.L3CacheSchema == "" { + return fmt.Errorf("intelRdt is specified in config, but intelRdt.l3CacheSchema is empty") + } + } + + return nil +} + func isSymbolicLink(path string) (bool, error) { fi, err := os.Lstat(path) if err != nil { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console.go b/vendor/github.com/opencontainers/runc/libcontainer/console.go deleted file mode 100644 index 917acc702f3..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/console.go +++ /dev/null @@ -1,17 +0,0 @@ -package libcontainer - -import ( - "io" - "os" -) - -// Console represents a pseudo TTY. -type Console interface { - io.ReadWriteCloser - - // Path returns the filesystem path to the slave side of the pty. - Path() string - - // Fd returns the fd for the master of the pty. - File() *os.File -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go b/vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go deleted file mode 100644 index b7166a31f06..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build freebsd - -package libcontainer - -import ( - "errors" -) - -// newConsole returns an initialized console that can be used within a container by copying bytes -// from the master side to the slave that is attached as the tty for the container's init process. -func newConsole() (Console, error) { - return nil, errors.New("libcontainer console is not supported on FreeBSD") -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/console_linux.go index f70de384812..9997e93ed4f 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/console_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/console_linux.go @@ -1,71 +1,14 @@ package libcontainer import ( - "fmt" "os" - "unsafe" "golang.org/x/sys/unix" ) -func ConsoleFromFile(f *os.File) Console { - return &linuxConsole{ - master: f, - } -} - -// newConsole returns an initialized console that can be used within a container by copying bytes -// from the master side to the slave that is attached as the tty for the container's init process. -func newConsole() (Console, error) { - master, err := os.OpenFile("/dev/ptmx", unix.O_RDWR|unix.O_NOCTTY|unix.O_CLOEXEC, 0) - if err != nil { - return nil, err - } - console, err := ptsname(master) - if err != nil { - return nil, err - } - if err := unlockpt(master); err != nil { - return nil, err - } - return &linuxConsole{ - slavePath: console, - master: master, - }, nil -} - -// linuxConsole is a linux pseudo TTY for use within a container. -type linuxConsole struct { - master *os.File - slavePath string -} - -func (c *linuxConsole) File() *os.File { - return c.master -} - -func (c *linuxConsole) Path() string { - return c.slavePath -} - -func (c *linuxConsole) Read(b []byte) (int, error) { - return c.master.Read(b) -} - -func (c *linuxConsole) Write(b []byte) (int, error) { - return c.master.Write(b) -} - -func (c *linuxConsole) Close() error { - if m := c.master; m != nil { - return m.Close() - } - return nil -} - // mount initializes the console inside the rootfs mounting with the specified mount label // and applying the correct ownership of the console. -func (c *linuxConsole) mount() error { +func mountConsole(slavePath string) error { oldMask := unix.Umask(0000) defer unix.Umask(oldMask) f, err := os.Create("/dev/console") @@ -75,17 +18,20 @@ func (c *linuxConsole) mount() error { if f != nil { f.Close() } - return unix.Mount(c.slavePath, "/dev/console", "bind", unix.MS_BIND, "") + return unix.Mount(slavePath, "/dev/console", "bind", unix.MS_BIND, "") } // dupStdio opens the slavePath for the console and dups the fds to the current // processes stdio, fd 0,1,2. -func (c *linuxConsole) dupStdio() error { - slave, err := c.open(unix.O_RDWR) +func dupStdio(slavePath string) error { + fd, err := unix.Open(slavePath, unix.O_RDWR, 0) if err != nil { - return err + return &os.PathError{ + Op: "open", + Path: slavePath, + Err: err, + } } - fd := int(slave.Fd()) for _, i := range []int{0, 1, 2} { if err := unix.Dup3(fd, i, 0); err != nil { return err @@ -93,60 +39,3 @@ func (c *linuxConsole) dupStdio() error { } return nil } - -// open is a clone of os.OpenFile without the O_CLOEXEC used to open the pty slave. -func (c *linuxConsole) open(flag int) (*os.File, error) { - r, e := unix.Open(c.slavePath, flag, 0) - if e != nil { - return nil, &os.PathError{ - Op: "open", - Path: c.slavePath, - Err: e, - } - } - return os.NewFile(uintptr(r), c.slavePath), nil -} - -func ioctl(fd uintptr, flag, data uintptr) error { - if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 { - return err - } - return nil -} - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { - var u int32 - return ioctl(f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) -} - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN) - if err != nil { - return "", err - } - return fmt.Sprintf("/dev/pts/%d", n), nil -} - -// SaneTerminal sets the necessary tty_ioctl(4)s to ensure that a pty pair -// created by us acts normally. In particular, a not-very-well-known default of -// Linux unix98 ptys is that they have +onlcr by default. While this isn't a -// problem for terminal emulators, because we relay data from the terminal we -// also relay that funky line discipline. -func SaneTerminal(terminal *os.File) error { - termios, err := unix.IoctlGetTermios(int(terminal.Fd()), unix.TCGETS) - if err != nil { - return fmt.Errorf("ioctl(tty, tcgets): %s", err.Error()) - } - - // Set -onlcr so we don't have to deal with \r. - termios.Oflag &^= unix.ONLCR - - if err := unix.IoctlSetTermios(int(terminal.Fd()), unix.TCSETS, termios); err != nil { - return fmt.Errorf("ioctl(tty, tcsets): %s", err.Error()) - } - - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console_solaris.go b/vendor/github.com/opencontainers/runc/libcontainer/console_solaris.go deleted file mode 100644 index e5ca54599c2..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/console_solaris.go +++ /dev/null @@ -1,11 +0,0 @@ -package libcontainer - -import ( - "errors" -) - -// newConsole returns an initialized console that can be used within a container by copying bytes -// from the master side to the slave that is attached as the tty for the container's init process. -func newConsole() (Console, error) { - return nil, errors.New("libcontainer console is not supported on Solaris") -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/console_windows.go deleted file mode 100644 index c61e866a5d5..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/console_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -package libcontainer - -// newConsole returns an initialized console that can be used within a container -func newConsole() (Console, error) { - return &windowsConsole{}, nil -} - -// windowsConsole is a Windows pseudo TTY for use within a container. -type windowsConsole struct { -} - -func (c *windowsConsole) Fd() uintptr { - return 0 -} - -func (c *windowsConsole) Path() string { - return "" -} - -func (c *windowsConsole) Read(b []byte) (int, error) { - return 0, nil -} - -func (c *windowsConsole) Write(b []byte) (int, error) { - return 0, nil -} - -func (c *windowsConsole) Close() error { - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go index 9e1b74d77af..1ac74b1bf82 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go @@ -21,6 +21,7 @@ import ( "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/criurpc" + "github.com/opencontainers/runc/libcontainer/intelrdt" "github.com/opencontainers/runc/libcontainer/system" "github.com/opencontainers/runc/libcontainer/utils" @@ -38,10 +39,14 @@ type linuxContainer struct { root string config *configs.Config cgroupManager cgroups.Manager + intelRdtManager intelrdt.Manager + initPath string initArgs []string initProcess parentProcess initProcessStartTime uint64 criuPath string + newuidmapPath string + newgidmapPath string m sync.Mutex criuVersion int state containerState @@ -67,6 +72,9 @@ type State struct { // Container's standard descriptors (std{in,out,err}), needed for checkpoint and restore ExternalDescriptors []string `json:"external_descriptors,omitempty"` + + // Intel RDT "resource control" filesystem path + IntelRdtPath string `json:"intel_rdt_path"` } // Container is a libcontainer container object. @@ -163,6 +171,11 @@ func (c *linuxContainer) Stats() (*Stats, error) { if stats.CgroupStats, err = c.cgroupManager.GetStats(); err != nil { return stats, newSystemErrorWithCause(err, "getting container stats from cgroups") } + if c.intelRdtManager != nil { + if stats.IntelRdtStats, err = c.intelRdtManager.GetStats(); err != nil { + return stats, newSystemErrorWithCause(err, "getting container's Intel RDT stats") + } + } for _, iface := range c.config.Networks { switch iface.Type { case "veth": @@ -193,6 +206,15 @@ func (c *linuxContainer) Set(config configs.Config) error { } return err } + if c.intelRdtManager != nil { + if err := c.intelRdtManager.Set(&config); err != nil { + // Set configs back + if err2 := c.intelRdtManager.Set(c.config); err2 != nil { + logrus.Warnf("Setting back intelrdt configs failed due to error: %v, your state.json and actual configs might be inconsistent.", err2) + } + return err + } + } // After config setting succeed, update config and states c.config = &config _, err = c.updateState(nil) @@ -268,7 +290,7 @@ func (c *linuxContainer) start(process *Process, isInit bool) error { } if err := parent.start(); err != nil { // terminate the process to ensure that it properly is reaped. - if err := parent.terminate(); err != nil { + if err := ignoreTerminateErrors(parent.terminate()); err != nil { logrus.Warn(err) } return newSystemErrorWithCause(err, "starting container process") @@ -294,7 +316,7 @@ func (c *linuxContainer) start(process *Process, isInit bool) error { } for i, hook := range c.config.Hooks.Poststart { if err := hook.Run(s); err != nil { - if err := parent.terminate(); err != nil { + if err := ignoreTerminateErrors(parent.terminate()); err != nil { logrus.Warn(err) } return newSystemErrorWithCausef(err, "running poststart hook %d", i) @@ -392,7 +414,8 @@ func (c *linuxContainer) newParentProcess(p *Process, doInit bool) (parentProces } func (c *linuxContainer) commandTemplate(p *Process, childPipe *os.File) (*exec.Cmd, error) { - cmd := exec.Command(c.initArgs[0], c.initArgs[1:]...) + cmd := exec.Command(c.initPath, c.initArgs[1:]...) + cmd.Args[0] = c.initArgs[0] cmd.Stdin = p.Stdin cmd.Stdout = p.Stdout cmd.Stderr = p.Stderr @@ -434,15 +457,16 @@ func (c *linuxContainer) newInitProcess(p *Process, cmd *exec.Cmd, parentPipe, c return nil, err } return &initProcess{ - cmd: cmd, - childPipe: childPipe, - parentPipe: parentPipe, - manager: c.cgroupManager, - config: c.newInitConfig(p), - container: c, - process: p, - bootstrapData: data, - sharePidns: sharePidns, + cmd: cmd, + childPipe: childPipe, + parentPipe: parentPipe, + manager: c.cgroupManager, + intelRdtManager: c.intelRdtManager, + config: c.newInitConfig(p), + container: c, + process: p, + bootstrapData: data, + sharePidns: sharePidns, }, nil } @@ -461,6 +485,7 @@ func (c *linuxContainer) newSetnsProcess(p *Process, cmd *exec.Cmd, parentPipe, return &setnsProcess{ cmd: cmd, cgroupPaths: c.cgroupManager.GetPaths(), + intelRdtPath: state.IntelRdtPath, childPipe: childPipe, parentPipe: parentPipe, config: c.newInitConfig(p), @@ -499,6 +524,8 @@ func (c *linuxContainer) newInitConfig(process *Process) *initConfig { cfg.Rlimits = process.Rlimits } cfg.CreateConsole = process.ConsoleSocket != nil + cfg.ConsoleWidth = process.ConsoleWidth + cfg.ConsoleHeight = process.ConsoleHeight return cfg } @@ -600,9 +627,24 @@ func (c *linuxContainer) checkCriuFeatures(criuOpts *CriuOpts, rpcOpts *criurpc. logrus.Debugf("Feature check says: %s", criuFeatures) missingFeatures := false - if *criuFeat.MemTrack && !*criuFeatures.MemTrack { - missingFeatures = true - logrus.Debugf("CRIU does not support MemTrack") + // The outer if checks if the fields actually exist + if (criuFeat.MemTrack != nil) && + (criuFeatures.MemTrack != nil) { + // The inner if checks if they are set to true + if *criuFeat.MemTrack && !*criuFeatures.MemTrack { + missingFeatures = true + logrus.Debugf("CRIU does not support MemTrack") + } + } + + // This needs to be repeated for every new feature check. + // Is there a way to put this in a function. Reflection? + if (criuFeat.LazyPages != nil) && + (criuFeatures.LazyPages != nil) { + if *criuFeat.LazyPages && !*criuFeatures.LazyPages { + missingFeatures = true + logrus.Debugf("CRIU does not support LazyPages") + } } if missingFeatures { @@ -632,9 +674,9 @@ func parseCriuVersion(path string) (int, error) { return 0, fmt.Errorf("Unable to parse the CRIU version: %s", path) } - n, err := fmt.Sscanf(string(version), "GitID: v%d.%d.%d", &x, &y, &z) // 1.5.2 + n, err := fmt.Sscanf(version, "GitID: v%d.%d.%d", &x, &y, &z) // 1.5.2 if err != nil { - n, err = fmt.Sscanf(string(version), "GitID: v%d.%d", &x, &y) // 1.6 + n, err = fmt.Sscanf(version, "GitID: v%d.%d", &x, &y) // 1.6 y++ } else { z++ @@ -758,6 +800,25 @@ func (c *linuxContainer) addMaskPaths(req *criurpc.CriuReq) error { } req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt) } + return nil +} + +func waitForCriuLazyServer(r *os.File, status string) error { + + data := make([]byte, 1) + _, err := r.Read(data) + if err != nil { + return err + } + fd, err := os.OpenFile(status, os.O_TRUNC|os.O_WRONLY, os.ModeAppend) + if err != nil { + return err + } + _, err = fd.Write(data) + if err != nil { + return err + } + fd.Close() return nil } @@ -825,6 +886,7 @@ func (c *linuxContainer) Checkpoint(criuOpts *CriuOpts) error { EmptyNs: proto.Uint32(criuOpts.EmptyNs), OrphanPtsMaster: proto.Bool(true), AutoDedup: proto.Bool(criuOpts.AutoDedup), + LazyPages: proto.Bool(criuOpts.LazyPages), } fcg := c.cgroupManager.GetPaths()["freezer"] @@ -875,6 +937,24 @@ func (c *linuxContainer) Checkpoint(criuOpts *CriuOpts) error { Opts: &rpcOpts, } + if criuOpts.LazyPages { + // lazy migration requested; check if criu supports it + feat := criurpc.CriuFeatures{ + LazyPages: proto.Bool(true), + } + + if err := c.checkCriuFeatures(criuOpts, &rpcOpts, &feat); err != nil { + return err + } + + statusRead, statusWrite, err := os.Pipe() + if err != nil { + return err + } + rpcOpts.StatusFd = proto.Int32(int32(statusWrite.Fd())) + go waitForCriuLazyServer(statusRead, criuOpts.StatusFd) + } + //no need to dump these information in pre-dump if !criuOpts.PreDump { for _, m := range c.config.Mounts { @@ -1027,6 +1107,7 @@ func (c *linuxContainer) Restore(process *Process, criuOpts *CriuOpts) error { EmptyNs: proto.Uint32(criuOpts.EmptyNs), OrphanPtsMaster: proto.Bool(true), AutoDedup: proto.Bool(criuOpts.AutoDedup), + LazyPages: proto.Bool(criuOpts.LazyPages), }, } @@ -1404,7 +1485,7 @@ func (c *linuxContainer) criuNotifications(resp *criurpc.CriuResp, process *Proc defer master.Close() // While we can access console.master, using the API is a good idea. - if err := utils.SendFd(process.ConsoleSocket, master); err != nil { + if err := utils.SendFd(process.ConsoleSocket, master.Name(), master.Fd()); err != nil { return err } } @@ -1519,6 +1600,10 @@ func (c *linuxContainer) currentState() (*State, error) { startTime, _ = c.initProcess.startTime() externalDescriptors = c.initProcess.externalDescriptors() } + intelRdtPath, err := intelrdt.GetIntelRdtPath(c.ID()) + if err != nil { + intelRdtPath = "" + } state := &State{ BaseState: BaseState{ ID: c.ID(), @@ -1529,6 +1614,7 @@ func (c *linuxContainer) currentState() (*State, error) { }, Rootless: c.config.Rootless, CgroupPaths: c.cgroupManager.GetPaths(), + IntelRdtPath: intelRdtPath, NamespacePaths: make(map[configs.NamespaceType]string), ExternalDescriptors: externalDescriptors, } @@ -1627,6 +1713,12 @@ func (c *linuxContainer) bootstrapData(cloneFlags uintptr, nsMaps map[configs.Na if !joinExistingUser { // write uid mappings if len(c.config.UidMappings) > 0 { + if c.config.Rootless && c.newuidmapPath != "" { + r.AddData(&Bytemsg{ + Type: UidmapPathAttr, + Value: []byte(c.newuidmapPath), + }) + } b, err := encodeIDMapping(c.config.UidMappings) if err != nil { return nil, err @@ -1647,6 +1739,12 @@ func (c *linuxContainer) bootstrapData(cloneFlags uintptr, nsMaps map[configs.Na Type: GidmapAttr, Value: b, }) + if c.config.Rootless && c.newgidmapPath != "" { + r.AddData(&Bytemsg{ + Type: GidmapPathAttr, + Value: []byte(c.newgidmapPath), + }) + } // The following only applies if we are root. if !c.config.Rootless { // check if we have CAP_SETGID to setgroup properly @@ -1678,3 +1776,18 @@ func (c *linuxContainer) bootstrapData(cloneFlags uintptr, nsMaps map[configs.Na return bytes.NewReader(r.Serialize()), nil } + +// ignoreTerminateErrors returns nil if the given err matches an error known +// to indicate that the terminate occurred successfully or err was nil, otherwise +// err is returned unaltered. +func ignoreTerminateErrors(err error) error { + if err == nil { + return nil + } + s := err.Error() + switch { + case strings.Contains(s, "process already finished"), strings.Contains(s, "Wait was already called"): + return nil + } + return err +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container_solaris.go b/vendor/github.com/opencontainers/runc/libcontainer/container_solaris.go deleted file mode 100644 index bb84ff7402b..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/container_solaris.go +++ /dev/null @@ -1,20 +0,0 @@ -package libcontainer - -// State represents a running container's state -type State struct { - BaseState - - // Platform specific fields below here -} - -// A libcontainer container object. -// -// Each container is thread-safe within the same process. Since a container can -// be destroyed by a separate process, any function may return that the container -// was not found. -type Container interface { - BaseContainer - - // Methods below here are platform specific - -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/container_windows.go deleted file mode 100644 index bb84ff7402b..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/container_windows.go +++ /dev/null @@ -1,20 +0,0 @@ -package libcontainer - -// State represents a running container's state -type State struct { - BaseState - - // Platform specific fields below here -} - -// A libcontainer container object. -// -// Each container is thread-safe within the same process. Since a container can -// be destroyed by a separate process, any function may return that the container -// was not found. -type Container interface { - BaseContainer - - // Methods below here are platform specific - -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_linux.go index 8f142c9fa5c..a2e344fc4b6 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_linux.go @@ -23,7 +23,7 @@ type VethPairName struct { type CriuOpts struct { ImagesDirectory string // directory for storing image files WorkDirectory string // directory to cd and write logs/pidfiles/stats to - ParentImage string // direcotry for storing parent image files in pre-dump and dump + ParentImage string // directory for storing parent image files in pre-dump and dump LeaveRunning bool // leave container in running state after checkpoint TcpEstablished bool // checkpoint/restore established TCP connections ExternalUnixConnections bool // allow external unix connections @@ -35,4 +35,6 @@ type CriuOpts struct { ManageCgroupsMode cgMode // dump or restore cgroup mode EmptyNs uint32 // don't c/r properties for namespace from this mask AutoDedup bool // auto deduplication for incremental dumps + LazyPages bool // restore memory pages lazily using userfaultfd + StatusFd string // fd for feedback when lazy server is ready } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go deleted file mode 100644 index bc9207703a1..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package libcontainer - -// TODO Windows: This can ultimately be entirely factored out as criu is -// a Unix concept not relevant on Windows. -type CriuOpts struct { -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go index 947bdea1ceb..7d53d5e04d8 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go @@ -11,13 +11,13 @@ import ( "runtime/debug" "strconv" - "github.com/docker/docker/pkg/mount" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups/fs" - "github.com/opencontainers/runc/libcontainer/cgroups/rootless" "github.com/opencontainers/runc/libcontainer/cgroups/systemd" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/configs/validate" + "github.com/opencontainers/runc/libcontainer/intelrdt" + "github.com/opencontainers/runc/libcontainer/mount" "github.com/opencontainers/runc/libcontainer/utils" "golang.org/x/sys/unix" @@ -72,15 +72,15 @@ func Cgroupfs(l *LinuxFactory) error { return nil } -// RootlessCgroups is an options func to configure a LinuxFactory to -// return containers that use the "rootless" cgroup manager, which will -// fail to do any operations not possible to do with an unprivileged user. -// It should only be used in conjunction with rootless containers. -func RootlessCgroups(l *LinuxFactory) error { - l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager { - return &rootless.Manager{ - Cgroups: config, - Paths: paths, +// IntelRdtfs is an options func to configure a LinuxFactory to return +// containers that use the Intel RDT "resource control" filesystem to +// create and manage Intel Xeon platform shared resources (e.g., L3 cache). +func IntelRdtFs(l *LinuxFactory) error { + l.NewIntelRdtManager = func(config *configs.Config, id string, path string) intelrdt.Manager { + return &intelrdt.IntelRdtManager{ + Config: config, + Id: id, + Path: path, } } return nil @@ -119,12 +119,16 @@ func New(root string, options ...func(*LinuxFactory) error) (Factory, error) { } l := &LinuxFactory{ Root: root, - InitArgs: []string{"/proc/self/exe", "init"}, + InitPath: "/proc/self/exe", + InitArgs: []string{os.Args[0], "init"}, Validator: validate.New(), CriuPath: "criu", } Cgroupfs(l) for _, opt := range options { + if opt == nil { + continue + } if err := opt(l); err != nil { return nil, err } @@ -137,6 +141,10 @@ type LinuxFactory struct { // Root directory for the factory to store state. Root string + // InitPath is the path for calling the init responsibilities for spawning + // a container. + InitPath string + // InitArgs are arguments for calling the init responsibilities for spawning // a container. InitArgs []string @@ -145,11 +153,19 @@ type LinuxFactory struct { // containers. CriuPath string + // New{u,g}uidmapPath is the path to the binaries used for mapping with + // rootless containers. + NewuidmapPath string + NewgidmapPath string + // Validator provides validation to container configurations. Validator validate.Validator // NewCgroupsManager returns an initialized cgroups manager for a single container. NewCgroupsManager func(config *configs.Cgroup, paths map[string]string) cgroups.Manager + + // NewIntelRdtManager returns an initialized Intel RDT manager for a single container. + NewIntelRdtManager func(config *configs.Config, id string, path string) intelrdt.Manager } func (l *LinuxFactory) Create(id string, config *configs.Config) (Container, error) { @@ -174,17 +190,20 @@ func (l *LinuxFactory) Create(id string, config *configs.Config) (Container, err if err := os.Chown(containerRoot, unix.Geteuid(), unix.Getegid()); err != nil { return nil, newGenericError(err, SystemError) } - if config.Rootless { - RootlessCgroups(l) - } c := &linuxContainer{ id: id, root: containerRoot, config: config, + initPath: l.InitPath, initArgs: l.InitArgs, criuPath: l.CriuPath, + newuidmapPath: l.NewuidmapPath, + newgidmapPath: l.NewgidmapPath, cgroupManager: l.NewCgroupsManager(config.Cgroups, nil), } + if intelrdt.IsEnabled() { + c.intelRdtManager = l.NewIntelRdtManager(config, id, "") + } c.state = &stoppedState{c: c} return c, nil } @@ -203,17 +222,16 @@ func (l *LinuxFactory) Load(id string) (Container, error) { processStartTime: state.InitProcessStartTime, fds: state.ExternalDescriptors, } - // We have to use the RootlessManager. - if state.Rootless { - RootlessCgroups(l) - } c := &linuxContainer{ initProcess: r, initProcessStartTime: state.InitProcessStartTime, id: id, config: &state.Config, + initPath: l.InitPath, initArgs: l.InitArgs, criuPath: l.CriuPath, + newuidmapPath: l.NewuidmapPath, + newgidmapPath: l.NewgidmapPath, cgroupManager: l.NewCgroupsManager(state.Config.Cgroups, state.CgroupPaths), root: containerRoot, created: state.Created, @@ -222,6 +240,9 @@ func (l *LinuxFactory) Load(id string) (Container, error) { if err := c.refreshState(); err != nil { return nil, err } + if intelrdt.IsEnabled() { + c.intelRdtManager = l.NewIntelRdtManager(&state.Config, id, state.IntelRdtPath) + } return c, nil } @@ -323,3 +344,21 @@ func (l *LinuxFactory) validateID(id string) error { return nil } + +// NewuidmapPath returns an option func to configure a LinuxFactory with the +// provided .. +func NewuidmapPath(newuidmapPath string) func(*LinuxFactory) error { + return func(l *LinuxFactory) error { + l.NewuidmapPath = newuidmapPath + return nil + } +} + +// NewgidmapPath returns an option func to configure a LinuxFactory with the +// provided .. +func NewgidmapPath(newgidmapPath string) func(*LinuxFactory) error { + return func(l *LinuxFactory) error { + l.NewgidmapPath = newgidmapPath + return nil + } +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go index 2020bb7a5aa..2770be30718 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go @@ -12,15 +12,16 @@ import ( "syscall" // only for Errno "unsafe" + "golang.org/x/sys/unix" + + "github.com/containerd/console" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/system" "github.com/opencontainers/runc/libcontainer/user" "github.com/opencontainers/runc/libcontainer/utils" - "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" - "golang.org/x/sys/unix" ) type initType string @@ -61,6 +62,8 @@ type initConfig struct { ContainerId string `json:"containerid"` Rlimits []configs.Rlimit `json:"rlimits"` CreateConsole bool `json:"create_console"` + ConsoleWidth uint16 `json:"console_width"` + ConsoleHeight uint16 `json:"console_height"` Rootless bool `json:"rootless"` } @@ -170,29 +173,38 @@ func setupConsole(socket *os.File, config *initConfig, mount bool) error { // however, that setupUser (specifically fixStdioPermissions) *will* change // the UID owner of the console to be the user the process will run as (so // they can actually control their console). - console, err := newConsole() + + pty, slavePath, err := console.NewPty() if err != nil { return err } - // After we return from here, we don't need the console anymore. - defer console.Close() - linuxConsole, ok := console.(*linuxConsole) - if !ok { - return fmt.Errorf("failed to cast console to *linuxConsole") + if config.ConsoleHeight != 0 && config.ConsoleWidth != 0 { + err = pty.Resize(console.WinSize{ + Height: config.ConsoleHeight, + Width: config.ConsoleWidth, + }) + + if err != nil { + return err + } } + + // After we return from here, we don't need the console anymore. + defer pty.Close() + // Mount the console inside our rootfs. if mount { - if err := linuxConsole.mount(); err != nil { + if err := mountConsole(slavePath); err != nil { return err } } // While we can access console.master, using the API is a good idea. - if err := utils.SendFd(socket, linuxConsole.File()); err != nil { + if err := utils.SendFd(socket, pty.Name(), pty.Fd()); err != nil { return err } // Now, dup over all the things. - return linuxConsole.dupStdio() + return dupStdio(slavePath) } // syncParentReady sends to the given pipe a JSON payload which indicates that @@ -261,25 +273,27 @@ func setupUser(config *initConfig) error { } } + // Rather than just erroring out later in setuid(2) and setgid(2), check + // that the user is mapped here. + if _, err := config.Config.HostUID(execUser.Uid); err != nil { + return fmt.Errorf("cannot set uid to unmapped user in user namespace") + } + if _, err := config.Config.HostGID(execUser.Gid); err != nil { + return fmt.Errorf("cannot set gid to unmapped user in user namespace") + } + if config.Rootless { - if execUser.Uid != 0 { - return fmt.Errorf("cannot run as a non-root user in a rootless container") - } - - if execUser.Gid != 0 { - return fmt.Errorf("cannot run as a non-root group in a rootless container") - } - - // We cannot set any additional groups in a rootless container and thus we - // bail if the user asked us to do so. TODO: We currently can't do this - // earlier, but if libcontainer.Process.User was typesafe this might work. + // We cannot set any additional groups in a rootless container and thus + // we bail if the user asked us to do so. TODO: We currently can't do + // this check earlier, but if libcontainer.Process.User was typesafe + // this might work. if len(addGroups) > 0 { return fmt.Errorf("cannot set any additional groups in a rootless container") } } - // before we change to the container's user make sure that the processes STDIO - // is correctly owned by the user that we are switching to. + // Before we change to the container's user make sure that the processes + // STDIO is correctly owned by the user that we are switching to. if err := fixStdioPermissions(config, execUser); err != nil { return err } @@ -298,7 +312,6 @@ func setupUser(config *initConfig) error { if err := system.Setgid(execUser.Gid); err != nil { return err } - if err := system.Setuid(execUser.Uid); err != nil { return err } @@ -335,14 +348,6 @@ func fixStdioPermissions(config *initConfig, u *user.ExecUser) error { continue } - // Skip chown if s.Gid is actually an unmapped gid in the host. While - // this is a bit dodgy if it just so happens that the console _is_ - // owned by overflow_gid, there's no way for us to disambiguate this as - // a userspace program. - if _, err := config.Config.HostGID(int(s.Gid)); err != nil { - continue - } - // We only change the uid owner (as it is possible for the mount to // prefer a different gid, and there's no reason for us to change it). // The reason why we don't just leave the default uid=X mount setup is @@ -350,6 +355,15 @@ func fixStdioPermissions(config *initConfig, u *user.ExecUser) error { // this code, you couldn't effectively run as a non-root user inside a // container and also have a console set up. if err := unix.Fchown(int(fd), u.Uid, int(s.Gid)); err != nil { + // If we've hit an EINVAL then s.Gid isn't mapped in the user + // namespace. If we've hit an EPERM then the inode's current owner + // is not mapped in our user namespace (in particular, + // privileged_wrt_inode_uidgid() has failed). In either case, we + // are in a configuration where it's better for us to just not + // touch the stdio rather than bail at this point. + if err == unix.EINVAL || err == unix.EPERM { + continue + } return err } } @@ -480,6 +494,16 @@ func signalAllProcesses(m cgroups.Manager, s os.Signal) error { logrus.Warn(err) } + subreaper, err := system.GetSubreaper() + if err != nil { + // The error here means that PR_GET_CHILD_SUBREAPER is not + // supported because this code might run on a kernel older + // than 3.4. We don't want to throw an error in that case, + // and we simplify things, considering there is no subreaper + // set. + subreaper = 0 + } + for _, p := range procs { if s != unix.SIGKILL { if ok, err := isWaitable(p.Pid); err != nil { @@ -493,9 +517,16 @@ func signalAllProcesses(m cgroups.Manager, s os.Signal) error { } } - if _, err := p.Wait(); err != nil { - if !isNoChildren(err) { - logrus.Warn("wait: ", err) + // In case a subreaper has been setup, this code must not + // wait for the process. Otherwise, we cannot be sure the + // current process will be reaped by the subreaper, while + // the subreaper might be waiting for this process in order + // to retrieve its exit code. + if subreaper == 0 { + if _, err := p.Wait(); err != nil { + if !isNoChildren(err) { + logrus.Warn("wait: ", err) + } } } } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/intelrdt/BUILD similarity index 65% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless/BUILD rename to vendor/github.com/opencontainers/runc/libcontainer/intelrdt/BUILD index 82406c88c1a..4c530fdd02f 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/intelrdt/BUILD @@ -4,18 +4,16 @@ go_library( name = "go_default_library", srcs = select({ "@io_bazel_rules_go//go/platform:linux": [ - "rootless.go", + "intelrdt.go", + "stats.go", ], "//conditions:default": [], }), - importpath = "github.com/opencontainers/runc/libcontainer/cgroups/rootless", + importpath = "github.com/opencontainers/runc/libcontainer/intelrdt", visibility = ["//visibility:public"], deps = select({ "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/configs/validate:go_default_library", ], "//conditions:default": [], }), diff --git a/vendor/github.com/opencontainers/runc/libcontainer/intelrdt/intelrdt.go b/vendor/github.com/opencontainers/runc/libcontainer/intelrdt/intelrdt.go new file mode 100644 index 00000000000..487c630af61 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/intelrdt/intelrdt.go @@ -0,0 +1,553 @@ +// +build linux + +package intelrdt + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + + "github.com/opencontainers/runc/libcontainer/configs" +) + +/* + * About Intel RDT/CAT feature: + * Intel platforms with new Xeon CPU support Resource Director Technology (RDT). + * Intel Cache Allocation Technology (CAT) is a sub-feature of RDT. Currently L3 + * Cache is the only resource that is supported in RDT. + * + * This feature provides a way for the software to restrict cache allocation to a + * defined 'subset' of L3 cache which may be overlapping with other 'subsets'. + * The different subsets are identified by class of service (CLOS) and each CLOS + * has a capacity bitmask (CBM). + * + * For more information about Intel RDT/CAT can be found in the section 17.17 + * of Intel Software Developer Manual. + * + * About Intel RDT/CAT kernel interface: + * In Linux 4.10 kernel or newer, the interface is defined and exposed via + * "resource control" filesystem, which is a "cgroup-like" interface. + * + * Comparing with cgroups, it has similar process management lifecycle and + * interfaces in a container. But unlike cgroups' hierarchy, it has single level + * filesystem layout. + * + * Intel RDT "resource control" filesystem hierarchy: + * mount -t resctrl resctrl /sys/fs/resctrl + * tree /sys/fs/resctrl + * /sys/fs/resctrl/ + * |-- info + * | |-- L3 + * | |-- cbm_mask + * | |-- min_cbm_bits + * | |-- num_closids + * |-- cpus + * |-- schemata + * |-- tasks + * |-- + * |-- cpus + * |-- schemata + * |-- tasks + * + * For runc, we can make use of `tasks` and `schemata` configuration for L3 cache + * resource constraints. + * + * The file `tasks` has a list of tasks that belongs to this group (e.g., + * " group). Tasks can be added to a group by writing the task ID + * to the "tasks" file (which will automatically remove them from the previous + * group to which they belonged). New tasks created by fork(2) and clone(2) are + * added to the same group as their parent. If a pid is not in any sub group, it is + * in root group. + * + * The file `schemata` has allocation bitmasks/values for L3 cache on each socket, + * which contains L3 cache id and capacity bitmask (CBM). + * Format: "L3:=;=;..." + * For example, on a two-socket machine, L3's schema line could be `L3:0=ff;1=c0` + * which means L3 cache id 0's CBM is 0xff, and L3 cache id 1's CBM is 0xc0. + * + * The valid L3 cache CBM is a *contiguous bits set* and number of bits that can + * be set is less than the max bit. The max bits in the CBM is varied among + * supported Intel Xeon platforms. In Intel RDT "resource control" filesystem + * layout, the CBM in a group should be a subset of the CBM in root. Kernel will + * check if it is valid when writing. e.g., 0xfffff in root indicates the max bits + * of CBM is 20 bits, which mapping to entire L3 cache capacity. Some valid CBM + * values to set in a group: 0xf, 0xf0, 0x3ff, 0x1f00 and etc. + * + * For more information about Intel RDT/CAT kernel interface: + * https://www.kernel.org/doc/Documentation/x86/intel_rdt_ui.txt + * + * An example for runc: + * Consider a two-socket machine with two L3 caches where the default CBM is + * 0xfffff and the max CBM length is 20 bits. With this configuration, tasks + * inside the container only have access to the "upper" 80% of L3 cache id 0 and + * the "lower" 50% L3 cache id 1: + * + * "linux": { + * "intelRdt": { + * "l3CacheSchema": "L3:0=ffff0;1=3ff" + * } + * } + */ + +type Manager interface { + // Applies Intel RDT configuration to the process with the specified pid + Apply(pid int) error + + // Returns statistics for Intel RDT + GetStats() (*Stats, error) + + // Destroys the Intel RDT 'container_id' group + Destroy() error + + // Returns Intel RDT path to save in a state file and to be able to + // restore the object later + GetPath() string + + // Set Intel RDT "resource control" filesystem as configured. + Set(container *configs.Config) error +} + +// This implements interface Manager +type IntelRdtManager struct { + mu sync.Mutex + Config *configs.Config + Id string + Path string +} + +const ( + IntelRdtTasks = "tasks" +) + +var ( + // The absolute root path of the Intel RDT "resource control" filesystem + intelRdtRoot string + intelRdtRootLock sync.Mutex + + // The flag to indicate if Intel RDT is supported + isEnabled bool +) + +type intelRdtData struct { + root string + config *configs.Config + pid int +} + +// Check if Intel RDT is enabled in init() +func init() { + // 1. Check if hardware and kernel support Intel RDT/CAT feature + // "cat_l3" flag is set if supported + isFlagSet, err := parseCpuInfoFile("/proc/cpuinfo") + if !isFlagSet || err != nil { + isEnabled = false + return + } + + // 2. Check if Intel RDT "resource control" filesystem is mounted + // The user guarantees to mount the filesystem + isEnabled = isIntelRdtMounted() +} + +// Return the mount point path of Intel RDT "resource control" filesysem +func findIntelRdtMountpointDir() (string, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return "", err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + text := s.Text() + fields := strings.Split(text, " ") + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + numPostFields := len(postSeparatorFields) + + // This is an error as we can't detect if the mount is for "Intel RDT" + if numPostFields == 0 { + return "", fmt.Errorf("Found no fields post '-' in %q", text) + } + + if postSeparatorFields[0] == "resctrl" { + // Check that the mount is properly formated. + if numPostFields < 3 { + return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + return fields[4], nil + } + } + if err := s.Err(); err != nil { + return "", err + } + + return "", NewNotFoundError("Intel RDT") +} + +// Gets the root path of Intel RDT "resource control" filesystem +func getIntelRdtRoot() (string, error) { + intelRdtRootLock.Lock() + defer intelRdtRootLock.Unlock() + + if intelRdtRoot != "" { + return intelRdtRoot, nil + } + + root, err := findIntelRdtMountpointDir() + if err != nil { + return "", err + } + + if _, err := os.Stat(root); err != nil { + return "", err + } + + intelRdtRoot = root + return intelRdtRoot, nil +} + +func isIntelRdtMounted() bool { + _, err := getIntelRdtRoot() + if err != nil { + return false + } + + return true +} + +func parseCpuInfoFile(path string) (bool, error) { + f, err := os.Open(path) + if err != nil { + return false, err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if err := s.Err(); err != nil { + return false, err + } + + text := s.Text() + flags := strings.Split(text, " ") + + // "cat_l3" flag is set if Intel RDT/CAT is supported + for _, flag := range flags { + if flag == "cat_l3" { + return true, nil + } + } + } + return false, nil +} + +func parseUint(s string, base, bitSize int) (uint64, error) { + value, err := strconv.ParseUint(s, base, bitSize) + if err != nil { + intValue, intErr := strconv.ParseInt(s, base, bitSize) + // 1. Handle negative values greater than MinInt64 (and) + // 2. Handle negative values lesser than MinInt64 + if intErr == nil && intValue < 0 { + return 0, nil + } else if intErr != nil && intErr.(*strconv.NumError).Err == strconv.ErrRange && intValue < 0 { + return 0, nil + } + + return value, err + } + + return value, nil +} + +// Gets a single uint64 value from the specified file. +func getIntelRdtParamUint(path, file string) (uint64, error) { + fileName := filepath.Join(path, file) + contents, err := ioutil.ReadFile(fileName) + if err != nil { + return 0, err + } + + res, err := parseUint(strings.TrimSpace(string(contents)), 10, 64) + if err != nil { + return res, fmt.Errorf("unable to parse %q as a uint from file %q", string(contents), fileName) + } + return res, nil +} + +// Gets a string value from the specified file +func getIntelRdtParamString(path, file string) (string, error) { + contents, err := ioutil.ReadFile(filepath.Join(path, file)) + if err != nil { + return "", err + } + + return strings.TrimSpace(string(contents)), nil +} + +func readTasksFile(dir string) ([]int, error) { + f, err := os.Open(filepath.Join(dir, IntelRdtTasks)) + if err != nil { + return nil, err + } + defer f.Close() + + var ( + s = bufio.NewScanner(f) + out = []int{} + ) + + for s.Scan() { + if t := s.Text(); t != "" { + pid, err := strconv.Atoi(t) + if err != nil { + return nil, err + } + out = append(out, pid) + } + } + return out, nil +} + +func writeFile(dir, file, data string) error { + if dir == "" { + return fmt.Errorf("no such directory for %s", file) + } + if err := ioutil.WriteFile(filepath.Join(dir, file), []byte(data+"\n"), 0700); err != nil { + return fmt.Errorf("failed to write %v to %v: %v", data, file, err) + } + return nil +} + +func getIntelRdtData(c *configs.Config, pid int) (*intelRdtData, error) { + rootPath, err := getIntelRdtRoot() + if err != nil { + return nil, err + } + return &intelRdtData{ + root: rootPath, + config: c, + pid: pid, + }, nil +} + +// Get the read-only L3 cache information +func getL3CacheInfo() (*L3CacheInfo, error) { + l3CacheInfo := &L3CacheInfo{} + + rootPath, err := getIntelRdtRoot() + if err != nil { + return l3CacheInfo, err + } + + path := filepath.Join(rootPath, "info", "L3") + cbmMask, err := getIntelRdtParamString(path, "cbm_mask") + if err != nil { + return l3CacheInfo, err + } + minCbmBits, err := getIntelRdtParamUint(path, "min_cbm_bits") + if err != nil { + return l3CacheInfo, err + } + numClosids, err := getIntelRdtParamUint(path, "num_closids") + if err != nil { + return l3CacheInfo, err + } + + l3CacheInfo.CbmMask = cbmMask + l3CacheInfo.MinCbmBits = minCbmBits + l3CacheInfo.NumClosids = numClosids + + return l3CacheInfo, nil +} + +// WriteIntelRdtTasks writes the specified pid into the "tasks" file +func WriteIntelRdtTasks(dir string, pid int) error { + if dir == "" { + return fmt.Errorf("no such directory for %s", IntelRdtTasks) + } + + // Dont attach any pid if -1 is specified as a pid + if pid != -1 { + if err := ioutil.WriteFile(filepath.Join(dir, IntelRdtTasks), []byte(strconv.Itoa(pid)), 0700); err != nil { + return fmt.Errorf("failed to write %v to %v: %v", pid, IntelRdtTasks, err) + } + } + return nil +} + +// Check if Intel RDT is enabled +func IsEnabled() bool { + return isEnabled +} + +// Get the 'container_id' path in Intel RDT "resource control" filesystem +func GetIntelRdtPath(id string) (string, error) { + rootPath, err := getIntelRdtRoot() + if err != nil { + return "", err + } + + path := filepath.Join(rootPath, id) + return path, nil +} + +// Applies Intel RDT configuration to the process with the specified pid +func (m *IntelRdtManager) Apply(pid int) (err error) { + // If intelRdt is not specified in config, we do nothing + if m.Config.IntelRdt == nil { + return nil + } + d, err := getIntelRdtData(m.Config, pid) + if err != nil && !IsNotFound(err) { + return err + } + + m.mu.Lock() + defer m.mu.Unlock() + path, err := d.join(m.Id) + if err != nil { + return err + } + + m.Path = path + return nil +} + +// Destroys the Intel RDT 'container_id' group +func (m *IntelRdtManager) Destroy() error { + m.mu.Lock() + defer m.mu.Unlock() + if err := os.RemoveAll(m.Path); err != nil { + return err + } + m.Path = "" + return nil +} + +// Returns Intel RDT path to save in a state file and to be able to +// restore the object later +func (m *IntelRdtManager) GetPath() string { + if m.Path == "" { + m.Path, _ = GetIntelRdtPath(m.Id) + } + return m.Path +} + +// Returns statistics for Intel RDT +func (m *IntelRdtManager) GetStats() (*Stats, error) { + // If intelRdt is not specified in config + if m.Config.IntelRdt == nil { + return nil, nil + } + + m.mu.Lock() + defer m.mu.Unlock() + stats := NewStats() + + // The read-only L3 cache information + l3CacheInfo, err := getL3CacheInfo() + if err != nil { + return nil, err + } + stats.L3CacheInfo = l3CacheInfo + + // The read-only L3 cache schema in root + rootPath, err := getIntelRdtRoot() + if err != nil { + return nil, err + } + tmpRootStrings, err := getIntelRdtParamString(rootPath, "schemata") + if err != nil { + return nil, err + } + // L3 cache schema is in the first line + schemaRootStrings := strings.Split(tmpRootStrings, "\n") + stats.L3CacheSchemaRoot = schemaRootStrings[0] + + // The L3 cache schema in 'container_id' group + tmpStrings, err := getIntelRdtParamString(m.GetPath(), "schemata") + if err != nil { + return nil, err + } + // L3 cache schema is in the first line + schemaStrings := strings.Split(tmpStrings, "\n") + stats.L3CacheSchema = schemaStrings[0] + + return stats, nil +} + +// Set Intel RDT "resource control" filesystem as configured. +func (m *IntelRdtManager) Set(container *configs.Config) error { + path := m.GetPath() + + // About L3 cache schema file: + // The schema has allocation masks/values for L3 cache on each socket, + // which contains L3 cache id and capacity bitmask (CBM). + // Format: "L3:=;=;..." + // For example, on a two-socket machine, L3's schema line could be: + // L3:0=ff;1=c0 + // Which means L3 cache id 0's CBM is 0xff, and L3 cache id 1's CBM is 0xc0. + // + // About L3 cache CBM validity: + // The valid L3 cache CBM is a *contiguous bits set* and number of + // bits that can be set is less than the max bit. The max bits in the + // CBM is varied among supported Intel Xeon platforms. In Intel RDT + // "resource control" filesystem layout, the CBM in a group should + // be a subset of the CBM in root. Kernel will check if it is valid + // when writing. + // e.g., 0xfffff in root indicates the max bits of CBM is 20 bits, + // which mapping to entire L3 cache capacity. Some valid CBM values + // to set in a group: 0xf, 0xf0, 0x3ff, 0x1f00 and etc. + if container.IntelRdt != nil { + l3CacheSchema := container.IntelRdt.L3CacheSchema + if l3CacheSchema != "" { + if err := writeFile(path, "schemata", l3CacheSchema); err != nil { + return err + } + } + } + + return nil +} + +func (raw *intelRdtData) join(id string) (string, error) { + path := filepath.Join(raw.root, id) + if err := os.MkdirAll(path, 0755); err != nil { + return "", err + } + + if err := WriteIntelRdtTasks(path, raw.pid); err != nil { + return "", err + } + return path, nil +} + +type NotFoundError struct { + ResourceControl string +} + +func (e *NotFoundError) Error() string { + return fmt.Sprintf("mountpoint for %s not found", e.ResourceControl) +} + +func NewNotFoundError(res string) error { + return &NotFoundError{ + ResourceControl: res, + } +} + +func IsNotFound(err error) bool { + if err == nil { + return false + } + _, ok := err.(*NotFoundError) + return ok +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/intelrdt/stats.go b/vendor/github.com/opencontainers/runc/libcontainer/intelrdt/stats.go new file mode 100644 index 00000000000..095c0a380cd --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/intelrdt/stats.go @@ -0,0 +1,24 @@ +// +build linux + +package intelrdt + +type L3CacheInfo struct { + CbmMask string `json:"cbm_mask,omitempty"` + MinCbmBits uint64 `json:"min_cbm_bits,omitempty"` + NumClosids uint64 `json:"num_closids,omitempty"` +} + +type Stats struct { + // The read-only L3 cache information + L3CacheInfo *L3CacheInfo `json:"l3_cache_info,omitempty"` + + // The read-only L3 cache schema in root + L3CacheSchemaRoot string `json:"l3_cache_schema_root,omitempty"` + + // The L3 cache schema in 'container_id' group + L3CacheSchema string `json:"l3_cache_schema,omitempty"` +} + +func NewStats() *Stats { + return &Stats{} +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/keys/keyctl.go b/vendor/github.com/opencontainers/runc/libcontainer/keys/keyctl.go index 82ffa7a8849..ce8b4e6b040 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/keys/keyctl.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/keys/keyctl.go @@ -29,7 +29,7 @@ func ModKeyringPerm(ringId KeySerial, mask, setbits uint32) error { return err } - res := strings.Split(string(dest), ";") + res := strings.Split(dest, ";") if len(res) < 5 { return fmt.Errorf("Destination buffer for key description is too small") } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/message_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/message_linux.go index 8829b71ad85..ab453cde912 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/message_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/message_linux.go @@ -18,6 +18,8 @@ const ( SetgroupAttr uint16 = 27285 OomScoreAdjAttr uint16 = 27286 RootlessAttr uint16 = 27287 + UidmapPathAttr uint16 = 27288 + GidmapPathAttr uint16 = 27289 ) type Int32msg struct { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/mount/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/mount/BUILD new file mode 100644 index 00000000000..211d28b0f6b --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/mount/BUILD @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "mount.go", + "mountinfo.go", + ] + select({ + "@io_bazel_rules_go//go/platform:linux": [ + "mount_linux.go", + ], + "//conditions:default": [], + }), + importpath = "github.com/opencontainers/runc/libcontainer/mount", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/mount/mount.go b/vendor/github.com/opencontainers/runc/libcontainer/mount/mount.go new file mode 100644 index 00000000000..e8965e081bb --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/mount/mount.go @@ -0,0 +1,23 @@ +package mount + +// GetMounts retrieves a list of mounts for the current running process. +func GetMounts() ([]*Info, error) { + return parseMountTable() +} + +// Mounted looks at /proc/self/mountinfo to determine of the specified +// mountpoint has been mounted +func Mounted(mountpoint string) (bool, error) { + entries, err := parseMountTable() + if err != nil { + return false, err + } + + // Search the table for the mountpoint + for _, e := range entries { + if e.Mountpoint == mountpoint { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/mount/mount_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/mount/mount_linux.go new file mode 100644 index 00000000000..1e5191928de --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/mount/mount_linux.go @@ -0,0 +1,82 @@ +// +build linux + +package mount + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +const ( + /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options*/ + mountinfoFormat = "%d %d %d:%d %s %s %s %s" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts +func parseMountTable() ([]*Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} + +func parseInfoFile(r io.Reader) ([]*Info, error) { + var ( + s = bufio.NewScanner(r) + out = []*Info{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + var ( + p = &Info{} + text = s.Text() + optionalFields string + ) + + if _, err := fmt.Sscanf(text, mountinfoFormat, + &p.ID, &p.Parent, &p.Major, &p.Minor, + &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { + return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + if len(postSeparatorFields) < 3 { + return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + if optionalFields != "-" { + p.Optional = optionalFields + } + + p.Fstype = postSeparatorFields[0] + p.Source = postSeparatorFields[1] + p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") + out = append(out, p) + } + return out, nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/mount/mountinfo.go b/vendor/github.com/opencontainers/runc/libcontainer/mount/mountinfo.go new file mode 100644 index 00000000000..e3fc3535e93 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/mount/mountinfo.go @@ -0,0 +1,40 @@ +package mount + +// Info reveals information about a particular mounted filesystem. This +// struct is populated from the content in the /proc//mountinfo file. +type Info struct { + // ID is a unique identifier of the mount (may be reused after umount). + ID int + + // Parent indicates the ID of the mount parent (or of self for the top of the + // mount tree). + Parent int + + // Major indicates one half of the device ID which identifies the device class. + Major int + + // Minor indicates one half of the device ID which identifies a specific + // instance of device. + Minor int + + // Root of the mount within the filesystem. + Root string + + // Mountpoint indicates the mount point relative to the process's root. + Mountpoint string + + // Opts represents mount-specific options. + Opts string + + // Optional represents optional fields. + Optional string + + // Fstype indicates the type of filesystem, such as EXT3. + Fstype string + + // Source indicates filesystem specific information or "none". + Source string + + // VfsOpts represents per super block options. + VfsOpts string +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/process.go b/vendor/github.com/opencontainers/runc/libcontainer/process.go index f1ad0814912..86bf7387f8c 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/process.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/process.go @@ -47,6 +47,10 @@ type Process struct { // ExtraFiles specifies additional open files to be inherited by the container ExtraFiles []*os.File + // Initial sizings for the console + ConsoleWidth uint16 + ConsoleHeight uint16 + // Capabilities specify the capabilities to keep when executing the process inside the container // All capabilities not specified will be dropped from the processes capability mask Capabilities *configs.Capabilities diff --git a/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go index 50f9af574c4..149b1126652 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go @@ -15,6 +15,7 @@ import ( "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/intelrdt" "github.com/opencontainers/runc/libcontainer/system" "github.com/opencontainers/runc/libcontainer/utils" @@ -49,6 +50,7 @@ type setnsProcess struct { parentPipe *os.File childPipe *os.File cgroupPaths map[string]string + intelRdtPath string config *initConfig fds []string process *Process @@ -83,12 +85,20 @@ func (p *setnsProcess) start() (err error) { if err = p.execSetns(); err != nil { return newSystemErrorWithCause(err, "executing setns process") } - // We can't join cgroups if we're in a rootless container. - if !p.config.Rootless && len(p.cgroupPaths) > 0 { + if len(p.cgroupPaths) > 0 { if err := cgroups.EnterPid(p.cgroupPaths, p.pid()); err != nil { return newSystemErrorWithCausef(err, "adding pid %d to cgroups", p.pid()) } } + if p.intelRdtPath != "" { + // if Intel RDT "resource control" filesystem path exists + _, err := os.Stat(p.intelRdtPath) + if err == nil { + if err := intelrdt.WriteIntelRdtTasks(p.intelRdtPath, p.pid()); err != nil { + return newSystemErrorWithCausef(err, "adding pid %d to Intel RDT resource control filesystem", p.pid()) + } + } + } // set rlimits, this has to be done here because we lose permissions // to raise the limits once we enter a user-namespace if err := setupRlimits(p.config.Rlimits, p.pid()); err != nil { @@ -193,16 +203,17 @@ func (p *setnsProcess) setExternalDescriptors(newFds []string) { } type initProcess struct { - cmd *exec.Cmd - parentPipe *os.File - childPipe *os.File - config *initConfig - manager cgroups.Manager - container *linuxContainer - fds []string - process *Process - bootstrapData io.Reader - sharePidns bool + cmd *exec.Cmd + parentPipe *os.File + childPipe *os.File + config *initConfig + manager cgroups.Manager + intelRdtManager intelrdt.Manager + container *linuxContainer + fds []string + process *Process + bootstrapData io.Reader + sharePidns bool } func (p *initProcess) pid() int { @@ -261,12 +272,35 @@ func (p *initProcess) start() error { p.process.ops = nil return newSystemErrorWithCause(err, "starting init process command") } + // Do this before syncing with child so that no children can escape the + // cgroup. We don't need to worry about not doing this and not being root + // because we'd be using the rootless cgroup manager in that case. + if err := p.manager.Apply(p.pid()); err != nil { + return newSystemErrorWithCause(err, "applying cgroup configuration for process") + } + if p.intelRdtManager != nil { + if err := p.intelRdtManager.Apply(p.pid()); err != nil { + return newSystemErrorWithCause(err, "applying Intel RDT configuration for process") + } + } + defer func() { + if err != nil { + // TODO: should not be the responsibility to call here + p.manager.Destroy() + if p.intelRdtManager != nil { + p.intelRdtManager.Destroy() + } + } + }() + if _, err := io.Copy(p.parentPipe, p.bootstrapData); err != nil { return newSystemErrorWithCause(err, "copying bootstrap data to pipe") } + if err := p.execSetns(); err != nil { return newSystemErrorWithCause(err, "running exec setns process for init") } + // Save the standard descriptor names before the container process // can potentially move them (e.g., via dup2()). If we don't do this now, // we won't know at checkpoint time which file descriptor to look up. @@ -275,18 +309,6 @@ func (p *initProcess) start() error { return newSystemErrorWithCausef(err, "getting pipe fds for pid %d", p.pid()) } p.setExternalDescriptors(fds) - // Do this before syncing with child so that no children can escape the - // cgroup. We don't need to worry about not doing this and not being root - // because we'd be using the rootless cgroup manager in that case. - if err := p.manager.Apply(p.pid()); err != nil { - return newSystemErrorWithCause(err, "applying cgroup configuration for process") - } - defer func() { - if err != nil { - // TODO: should not be the responsibility to call here - p.manager.Destroy() - } - }() if err := p.createNetworkInterfaces(); err != nil { return newSystemErrorWithCause(err, "creating network interfaces") } @@ -312,6 +334,11 @@ func (p *initProcess) start() error { if err := p.manager.Set(p.config.Config); err != nil { return newSystemErrorWithCause(err, "setting cgroup config for ready process") } + if p.intelRdtManager != nil { + if err := p.intelRdtManager.Set(p.config.Config); err != nil { + return newSystemErrorWithCause(err, "setting Intel RDT config for ready process") + } + } if p.config.Config.Hooks != nil { s := configs.HookState{ @@ -337,6 +364,11 @@ func (p *initProcess) start() error { if err := p.manager.Set(p.config.Config); err != nil { return newSystemErrorWithCause(err, "setting cgroup config for procHooks process") } + if p.intelRdtManager != nil { + if err := p.intelRdtManager.Set(p.config.Config); err != nil { + return newSystemErrorWithCause(err, "setting Intel RDT config for procHooks process") + } + } if p.config.Config.Hooks != nil { s := configs.HookState{ Version: p.container.config.Version, diff --git a/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go index e2e734a8566..eb9e0253b9a 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go @@ -13,11 +13,11 @@ import ( "strings" "time" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/symlink" + "github.com/cyphar/filepath-securejoin" "github.com/mrunalp/fileutils" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/mount" "github.com/opencontainers/runc/libcontainer/system" libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils" "github.com/opencontainers/selinux/go-selinux/label" @@ -40,7 +40,8 @@ func needsSetupDev(config *configs.Config) bool { // prepareRootfs sets up the devices, mount points, and filesystems for use // inside a new mount namespace. It doesn't set anything as ro. You must call // finalizeRootfs after this function to finish setting up the rootfs. -func prepareRootfs(pipe io.ReadWriter, config *configs.Config) (err error) { +func prepareRootfs(pipe io.ReadWriter, iConfig *initConfig) (err error) { + config := iConfig.Config if err := prepareRoot(config); err != nil { return newSystemErrorWithCause(err, "preparing rootfs") } @@ -80,6 +81,7 @@ func prepareRootfs(pipe io.ReadWriter, config *configs.Config) (err error) { // The hooks are run after the mounts are setup, but before we switch to the new // root, so that the old root is still available in the hooks for any mount // manipulations. + // Note that iConfig.Cwd is not guaranteed to exist here. if err := syncParentHooks(pipe); err != nil { return err } @@ -111,6 +113,14 @@ func prepareRootfs(pipe io.ReadWriter, config *configs.Config) (err error) { } } + if cwd := iConfig.Cwd; cwd != "" { + // Note that spec.Process.Cwd can contain unclean value like "../../../../foo/bar...". + // However, we are safe to call MkDirAll directly because we are in the jail here. + if err := os.MkdirAll(cwd, 0755); err != nil { + return err + } + } + return nil } @@ -230,7 +240,7 @@ func mountToRootfs(m *configs.Mount, rootfs, mountLabel string) error { // any previous mounts can invalidate the next mount's destination. // this can happen when a user specifies mounts within other mounts to cause breakouts or other // evil stuff to try to escape the container's rootfs. - if dest, err = symlink.FollowSymlinkInScope(dest, rootfs); err != nil { + if dest, err = securejoin.SecureJoin(rootfs, m.Destination); err != nil { return err } if err := checkMountDestination(rootfs, dest); err != nil { @@ -318,7 +328,7 @@ func mountToRootfs(m *configs.Mount, rootfs, mountLabel string) error { // this can happen when a user specifies mounts within other mounts to cause breakouts or other // evil stuff to try to escape the container's rootfs. var err error - if dest, err = symlink.FollowSymlinkInScope(dest, rootfs); err != nil { + if dest, err = securejoin.SecureJoin(rootfs, m.Destination); err != nil { return err } if err := checkMountDestination(rootfs, dest); err != nil { @@ -668,9 +678,12 @@ func pivotRoot(rootfs string) error { return err } - // Make oldroot rprivate to make sure our unmounts don't propagate to the - // host (and thus bork the machine). - if err := unix.Mount("", ".", "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil { + // Make oldroot rslave to make sure our unmounts don't propagate to the + // host (and thus bork the machine). We don't use rprivate because this is + // known to cause issues due to races where we still have a reference to a + // mount while a process in the host namespace are trying to operate on + // something they think has no mounts (devicemapper in particular). + if err := unix.Mount("", ".", "", unix.MS_SLAVE|unix.MS_REC, ""); err != nil { return err } // Preform the unmount. MNT_DETACH allows us to unmount /proc/self/cwd. @@ -733,7 +746,14 @@ func remountReadonly(m *configs.Mount) error { flags = m.Flags ) for i := 0; i < 5; i++ { - if err := unix.Mount("", dest, "", uintptr(flags|unix.MS_REMOUNT|unix.MS_RDONLY), ""); err != nil { + // There is a special case in the kernel for + // MS_REMOUNT | MS_BIND, which allows us to change only the + // flags even as an unprivileged user (i.e. user namespace) + // assuming we don't drop any security related flags (nodev, + // nosuid, etc.). So, let's use that case so that we can do + // this re-mount without failing in a userns. + flags |= unix.MS_REMOUNT | unix.MS_BIND | unix.MS_RDONLY + if err := unix.Mount("", dest, "", uintptr(flags), ""); err != nil { switch err { case unix.EBUSY: time.Sleep(100 * time.Millisecond) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go index 2523cbf9901..d99f3fe640c 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go @@ -22,6 +22,11 @@ var ( actErrno = libseccomp.ActErrno.SetReturnCode(int16(unix.EPERM)) ) +const ( + // Linux system calls can have at most 6 arguments + syscallMaxArguments int = 6 +) + // Filters given syscalls in a container, preventing them from being used // Started in the container init process, and carried over to all child processes // Setns calls, however, require a separate invocation, as they are not children @@ -45,11 +50,11 @@ func InitSeccomp(config *configs.Seccomp) error { for _, arch := range config.Architectures { scmpArch, err := libseccomp.GetArchFromString(arch) if err != nil { - return err + return fmt.Errorf("error validating Seccomp architecture: %s", err) } if err := filter.AddArch(scmpArch); err != nil { - return err + return fmt.Errorf("error adding architecture to seccomp filter: %s", err) } } @@ -170,29 +175,55 @@ func matchCall(filter *libseccomp.ScmpFilter, call *configs.Syscall) error { // Convert the call's action to the libseccomp equivalent callAct, err := getAction(call.Action) if err != nil { - return err + return fmt.Errorf("action in seccomp profile is invalid: %s", err) } // Unconditional match - just add the rule if len(call.Args) == 0 { if err = filter.AddRule(callNum, callAct); err != nil { - return err + return fmt.Errorf("error adding seccomp filter rule for syscall %s: %s", call.Name, err) } } else { - // Conditional match - convert the per-arg rules into library format + // If two or more arguments have the same condition, + // Revert to old behavior, adding each condition as a separate rule + argCounts := make([]uint, syscallMaxArguments) conditions := []libseccomp.ScmpCondition{} for _, cond := range call.Args { newCond, err := getCondition(cond) if err != nil { - return err + return fmt.Errorf("error creating seccomp syscall condition for syscall %s: %s", call.Name, err) } + argCounts[cond.Index] += 1 + conditions = append(conditions, newCond) } - if err = filter.AddRuleConditional(callNum, callAct, conditions); err != nil { - return err + hasMultipleArgs := false + for _, count := range argCounts { + if count > 1 { + hasMultipleArgs = true + break + } + } + + if hasMultipleArgs { + // Revert to old behavior + // Add each condition attached to a separate rule + for _, cond := range conditions { + condArr := []libseccomp.ScmpCondition{cond} + + if err = filter.AddRuleConditional(callNum, callAct, condArr); err != nil { + return fmt.Errorf("error adding seccomp rule for syscall %s: %s", call.Name, err) + } + } + } else { + // No conditions share same argument + // Use new, proper behavior + if err = filter.AddRuleConditional(callNum, callAct, conditions); err != nil { + return fmt.Errorf("error adding seccomp rule for syscall %s: %s", call.Name, err) + } } } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go deleted file mode 100644 index c7bdb605aa8..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build linux,go1.5 - -package libcontainer - -import "syscall" - -// Set the GidMappingsEnableSetgroups member to true, so the process's -// setgroups proc entry wont be set to 'deny' if GidMappings are set -func enableSetgroups(sys *syscall.SysProcAttr) { - sys.GidMappingsEnableSetgroups = true -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go index 35b84219c5d..096c601e767 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go @@ -47,7 +47,10 @@ func (l *linuxSetnsInit) Init() error { return err } } - if l.config.Config.Seccomp != nil { + // Without NoNewPrivileges seccomp is a privileged operation, so we need to + // do this before dropping capabilities; otherwise do it as late as possible + // just before execve so as few syscalls take place after it as possible. + if l.config.Config.Seccomp != nil && !l.config.NoNewPrivileges { if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil { return err } @@ -61,5 +64,13 @@ func (l *linuxSetnsInit) Init() error { if err := label.SetProcessLabel(l.config.ProcessLabel); err != nil { return err } + // Set seccomp as close to execve as possible, so as few syscalls take + // place afterward (reducing the amount of syscalls that users need to + // enable in their seccomp profiles). + if l.config.Config.Seccomp != nil && l.config.NoNewPrivileges { + if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil { + return newSystemErrorWithCause(err, "init seccomp") + } + } return system.Execv(l.config.Args[0], l.config.Args[0:], os.Environ()) } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go index fbcf3a6ac02..8a544ed5be7 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go @@ -30,15 +30,15 @@ func (l *linuxStandardInit) getSessionRingParams() (string, uint32, uint32) { var newperms uint32 if l.config.Config.Namespaces.Contains(configs.NEWUSER) { - // with user ns we need 'other' search permissions + // With user ns we need 'other' search permissions. newperms = 0x8 } else { - // without user ns we need 'UID' search permissions + // Without user ns we need 'UID' search permissions. newperms = 0x80000 } - // create a unique per session container name that we can - // join in setns; however, other containers can also join it + // Create a unique per session container name that we can join in setns; + // However, other containers can also join it. return fmt.Sprintf("_ses.%s", l.config.ContainerId), 0xffffffff, newperms } @@ -46,12 +46,12 @@ func (l *linuxStandardInit) Init() error { if !l.config.Config.NoNewKeyring { ringname, keepperms, newperms := l.getSessionRingParams() - // do not inherit the parent's session keyring + // Do not inherit the parent's session keyring. sessKeyId, err := keys.JoinSessionKeyring(ringname) if err != nil { return err } - // make session keyring searcheable + // Make session keyring searcheable. if err := keys.ModKeyringPerm(sessKeyId, keepperms, newperms); err != nil { return err } @@ -68,7 +68,7 @@ func (l *linuxStandardInit) Init() error { // prepareRootfs() can be executed only for a new mount namespace. if l.config.Config.Namespaces.Contains(configs.NEWNS) { - if err := prepareRootfs(l.pipe, l.config.Config); err != nil { + if err := prepareRootfs(l.pipe, l.config); err != nil { return err } } @@ -150,19 +150,20 @@ func (l *linuxStandardInit) Init() error { if err := pdeath.Restore(); err != nil { return err } - // compare the parent from the initial start of the init process and make sure that it did not change. - // if the parent changes that means it died and we were reparented to something else so we should - // just kill ourself and not cause problems for someone else. + // Compare the parent from the initial start of the init process and make + // sure that it did not change. if the parent changes that means it died + // and we were reparented to something else so we should just kill ourself + // and not cause problems for someone else. if unix.Getppid() != l.parentPid { return unix.Kill(unix.Getpid(), unix.SIGKILL) } - // check for the arg before waiting to make sure it exists and it is returned - // as a create time error. + // Check for the arg before waiting to make sure it exists and it is + // returned as a create time error. name, err := exec.LookPath(l.config.Args[0]) if err != nil { return err } - // close the pipe to signal that we have completed our init. + // Close the pipe to signal that we have completed our init. l.pipe.Close() // Wait for the FIFO to be opened on the other side before exec-ing the // user process. We open it through /proc/self/fd/$fd, because the fd that @@ -170,19 +171,26 @@ func (l *linuxStandardInit) Init() error { // re-open an O_PATH fd through /proc. fd, err := unix.Open(fmt.Sprintf("/proc/self/fd/%d", l.fifoFd), unix.O_WRONLY|unix.O_CLOEXEC, 0) if err != nil { - return newSystemErrorWithCause(err, "openat exec fifo") + return newSystemErrorWithCause(err, "open exec fifo") } if _, err := unix.Write(fd, []byte("0")); err != nil { return newSystemErrorWithCause(err, "write 0 exec fifo") } + // Close the O_PATH fifofd fd before exec because the kernel resets + // dumpable in the wrong order. This has been fixed in newer kernels, but + // we keep this to ensure CVE-2016-9962 doesn't re-emerge on older kernels. + // N.B. the core issue itself (passing dirfds to the host filesystem) has + // since been resolved. + // https://github.com/torvalds/linux/blob/v4.9/fs/exec.c#L1290-L1318 + unix.Close(l.fifoFd) + // Set seccomp as close to execve as possible, so as few syscalls take + // place afterward (reducing the amount of syscalls that users need to + // enable in their seccomp profiles). if l.config.Config.Seccomp != nil && l.config.NoNewPrivileges { if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil { return newSystemErrorWithCause(err, "init seccomp") } } - // close the statedir fd before exec because the kernel resets dumpable in the wrong order - // https://github.com/torvalds/linux/blob/v4.9/fs/exec.c#L1290-L1318 - unix.Close(l.fifoFd) if err := syscall.Exec(name, l.config.Args[0:], os.Environ()); err != nil { return newSystemErrorWithCause(err, "exec user process") } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/state_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/state_linux.go index 44fa6b43a8d..1f8c5e71e41 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/state_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/state_linux.go @@ -45,6 +45,11 @@ func destroy(c *linuxContainer) error { } } err := c.cgroupManager.Destroy() + if c.intelRdtManager != nil { + if ierr := c.intelRdtManager.Destroy(); err == nil { + err = ierr + } + } if rerr := os.RemoveAll(c.root); err == nil { err = rerr } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go b/vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go deleted file mode 100644 index f8d1d689cee..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go +++ /dev/null @@ -1,5 +0,0 @@ -package libcontainer - -type Stats struct { - Interfaces []*NetworkInterface -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go index c629dc67de9..29fd641e9dd 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go @@ -1,8 +1,10 @@ package libcontainer import "github.com/opencontainers/runc/libcontainer/cgroups" +import "github.com/opencontainers/runc/libcontainer/intelrdt" type Stats struct { - Interfaces []*NetworkInterface - CgroupStats *cgroups.Stats + Interfaces []*NetworkInterface + CgroupStats *cgroups.Stats + IntelRdtStats *intelrdt.Stats } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats_solaris.go b/vendor/github.com/opencontainers/runc/libcontainer/stats_solaris.go deleted file mode 100644 index da78c1c2e15..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/stats_solaris.go +++ /dev/null @@ -1,7 +0,0 @@ -package libcontainer - -// Solaris - TODO - -type Stats struct { - Interfaces []*NetworkInterface -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go deleted file mode 100644 index f8d1d689cee..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package libcontainer - -type Stats struct { - Interfaces []*NetworkInterface -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/system/BUILD index 7ba719fd195..0e7fc01da98 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/BUILD @@ -18,7 +18,6 @@ go_library( "unsupported.go", ], "@io_bazel_rules_go//go/platform:freebsd": [ - "sysconfig.go", "sysconfig_notcgo.go", "unsupported.go", ], @@ -55,17 +54,29 @@ go_library( "//conditions:default": [], }) + select({ "@io_bazel_rules_go//go/platform:linux_386": [ - "syscall_linux_386.go", + "syscall_linux_32.go", ], "@io_bazel_rules_go//go/platform:linux_amd64": [ "syscall_linux_64.go", ], "@io_bazel_rules_go//go/platform:linux_arm": [ - "syscall_linux_arm.go", + "syscall_linux_32.go", ], "@io_bazel_rules_go//go/platform:linux_arm64": [ "syscall_linux_64.go", ], + "@io_bazel_rules_go//go/platform:linux_mips": [ + "syscall_linux_64.go", + ], + "@io_bazel_rules_go//go/platform:linux_mips64": [ + "syscall_linux_64.go", + ], + "@io_bazel_rules_go//go/platform:linux_mips64le": [ + "syscall_linux_64.go", + ], + "@io_bazel_rules_go//go/platform:linux_mipsle": [ + "syscall_linux_64.go", + ], "@io_bazel_rules_go//go/platform:linux_ppc64": [ "syscall_linux_64.go", ], diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go index 4837085a7fd..5f124cd8bbc 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go @@ -134,3 +134,14 @@ func RunningInUserNS() bool { func SetSubreaper(i int) error { return unix.Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0) } + +// GetSubreaper returns the subreaper setting for the calling process +func GetSubreaper() (int, error) { + var i uintptr + + if err := unix.Prctl(unix.PR_GET_CHILD_SUBREAPER, uintptr(unsafe.Pointer(&i)), 0, 0, 0); err != nil { + return -1, err + } + + return int(i), nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go similarity index 93% rename from vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go rename to vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go index 31ff3deb135..c5ca5d86235 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go @@ -1,4 +1,5 @@ -// +build linux,arm +// +build linux +// +build 386 arm package system diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go deleted file mode 100644 index 3f7235ed154..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build linux,386 - -package system - -import ( - "golang.org/x/sys/unix" -) - -// Setuid sets the uid of the calling thread to the specified uid. -func Setuid(uid int) (err error) { - _, _, e1 := unix.RawSyscall(unix.SYS_SETUID32, uintptr(uid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// Setgid sets the gid of the calling thread to the specified gid. -func Setgid(gid int) (err error) { - _, _, e1 := unix.RawSyscall(unix.SYS_SETGID32, uintptr(gid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go index d7891a2ffa0..11c3faafbf0 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go @@ -1,4 +1,5 @@ -// +build linux,arm64 linux,amd64 linux,ppc linux,ppc64 linux,ppc64le linux,s390x +// +build linux +// +build arm64 amd64 mips mipsle mips64 mips64le ppc ppc64 ppc64le s390x package system diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go index b3a07cba3ef..b8434f10500 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go @@ -1,4 +1,4 @@ -// +build cgo,linux cgo,freebsd +// +build cgo,linux package system diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/user/BUILD index d70f0eabb1d..966c1074fb7 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/BUILD @@ -6,9 +6,6 @@ go_library( "lookup.go", "user.go", ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "lookup_unsupported.go", - ], "@io_bazel_rules_go//go/platform:darwin": [ "lookup_unix.go", ], @@ -21,24 +18,15 @@ go_library( "@io_bazel_rules_go//go/platform:linux": [ "lookup_unix.go", ], - "@io_bazel_rules_go//go/platform:nacl": [ - "lookup_unsupported.go", - ], "@io_bazel_rules_go//go/platform:netbsd": [ "lookup_unix.go", ], "@io_bazel_rules_go//go/platform:openbsd": [ "lookup_unix.go", ], - "@io_bazel_rules_go//go/platform:plan9": [ - "lookup_unsupported.go", - ], "@io_bazel_rules_go//go/platform:solaris": [ "lookup_unix.go", ], - "@io_bazel_rules_go//go/platform:windows": [ - "lookup_unsupported.go", - ], "//conditions:default": [], }), importpath = "github.com/opencontainers/runc/libcontainer/user", diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go deleted file mode 100644 index 4a8d00acbd5..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris - -package user - -import ( - "io" - "syscall" -) - -func GetPasswdPath() (string, error) { - return "", ErrUnsupported -} - -func GetPasswd() (io.ReadCloser, error) { - return nil, ErrUnsupported -} - -func GetGroupPath() (string, error) { - return "", ErrUnsupported -} - -func GetGroup() (io.ReadCloser, error) { - return nil, ErrUnsupported -} - -// CurrentUser looks up the current user by their user id in /etc/passwd. If the -// user cannot be found (or there is no /etc/passwd file on the filesystem), -// then CurrentUser returns an error. -func CurrentUser() (User, error) { - return LookupUid(syscall.Getuid()) -} - -// CurrentGroup looks up the current user's group by their primary group id's -// entry in /etc/passwd. If the group cannot be found (or there is no -// /etc/group file on the filesystem), then CurrentGroup returns an error. -func CurrentGroup() (Group, error) { - return LookupGid(syscall.Getgid()) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go index 2cbb6491a70..c8a9364d54d 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go @@ -84,12 +84,10 @@ func RecvFd(socket *os.File) (*os.File, error) { // addition, the file.Name() of the given file will also be sent as // non-auxiliary data in the same payload (allowing to send contextual // information for a file descriptor). -func SendFd(socket, file *os.File) error { - name := []byte(file.Name()) +func SendFd(socket *os.File, name string, fd uintptr) error { if len(name) >= MaxNameLen { - return fmt.Errorf("sendfd: filename too long: %s", file.Name()) + return fmt.Errorf("sendfd: filename too long: %s", name) } - oob := unix.UnixRights(int(file.Fd())) - - return unix.Sendmsg(int(socket.Fd()), name, oob, nil, 0) + oob := unix.UnixRights(int(fd)) + return unix.Sendmsg(int(socket.Fd()), []byte(name), oob, nil, 0) } diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/README b/vendor/k8s.io/kube-openapi/pkg/generators/README index 35660a40da7..feb19b401a9 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/README +++ b/vendor/k8s.io/kube-openapi/pkg/generators/README @@ -11,5 +11,36 @@ escape or quote the value string. Extensions can be used to pass more informatio documentation generators. For example a type might have a friendly name to be displayed in documentation or being used in a client's fluent interface. +# Custom OpenAPI type definitions + +Custom types which otherwise don't map directly to OpenAPI can override their +OpenAPI definition by implementing a function named "OpenAPIDefinition" with +the following signature: + + import openapi "k8s.io/kube-openapi/pkg/common" + + // ... + + type Time struct { + time.Time + } + + func (_ Time) OpenAPIDefinition() openapi.OpenAPIDefinition { + return openapi.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "date-time", + }, + }, + } + } + +Alternatively, the type can avoid the "openapi" import by defining the following +methods. The following example produces the same OpenAPI definition as the +example above: + + func (_ Time) OpenAPISchemaType() []string { return []string{"string"} } + func (_ Time) OpenAPISchemaFormat() string { return "date-time" } TODO(mehdy): Make k8s:openapi-gen a parameter to the generator now that OpenAPI has its own repo. diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go index 5efb3f45c6f..d9b0980abb4 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go @@ -118,35 +118,13 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat `)...) - outputPath := arguments.OutputPackagePath - - if err := context.AddDir(outputPath); err != nil { - glog.Fatalf("Failed to load output package: %v", err) - } - - // Compute the canonical output path to allow retrieval of the - // package for a vendored output path. - const vendorPath = "/vendor/" - canonicalOutputPath := outputPath - if strings.Contains(outputPath, vendorPath) { - canonicalOutputPath = outputPath[strings.Index(outputPath, vendorPath)+len(vendorPath):] - } - - // The package for outputPath is mapped to the canonical path - pkg := context.Universe[canonicalOutputPath] - if pkg == nil { - glog.Fatalf("Got nil output package: %v", err) - } return generator.Packages{ &generator.DefaultPackage{ - PackageName: strings.Split(filepath.Base(pkg.Path), ".")[0], - // Use the supplied output path rather than the canonical - // one to allow generation into the path of a - // vendored package. - PackagePath: outputPath, + PackageName: filepath.Base(arguments.OutputPackagePath), + PackagePath: arguments.OutputPackagePath, HeaderText: header, GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { - return []generator.Generator{NewOpenAPIGen(arguments.OutputFileBaseName, pkg, context)} + return []generator.Generator{NewOpenAPIGen(arguments.OutputFileBaseName, arguments.OutputPackagePath, context)} }, FilterFunc: func(c *generator.Context, t *types.Type) bool { // There is a conflict between this codegen and codecgen, we should avoid types generated for codecgen @@ -175,12 +153,12 @@ const ( type openAPIGen struct { generator.DefaultGen // TargetPackage is the package that will get GetOpenAPIDefinitions function returns all open API definitions. - targetPackage *types.Package + targetPackage string imports namer.ImportTracker context *generator.Context } -func NewOpenAPIGen(sanitizedName string, targetPackage *types.Package, context *generator.Context) generator.Generator { +func NewOpenAPIGen(sanitizedName string, targetPackage string, context *generator.Context) generator.Generator { return &openAPIGen{ DefaultGen: generator.DefaultGen{ OptionalName: sanitizedName, @@ -194,7 +172,7 @@ func NewOpenAPIGen(sanitizedName string, targetPackage *types.Package, context * func (g *openAPIGen) Namers(c *generator.Context) namer.NameSystems { // Have the raw namer for this file track what it imports. return namer.NameSystems{ - "raw": namer.NewRawNamer(g.targetPackage.Path, g.imports), + "raw": namer.NewRawNamer(g.targetPackage, g.imports), } } @@ -207,10 +185,10 @@ func (g *openAPIGen) Filter(c *generator.Context, t *types.Type) bool { } func (g *openAPIGen) isOtherPackage(pkg string) bool { - if pkg == g.targetPackage.Path { + if pkg == g.targetPackage { return false } - if strings.HasSuffix(pkg, "\""+g.targetPackage.Path+"\"") { + if strings.HasSuffix(pkg, "\""+g.targetPackage+"\"") { return false } return true @@ -300,23 +278,37 @@ func newOpenAPITypeWriter(sw *generator.SnippetWriter) openAPITypeWriter { } } +func methodReturnsValue(mt *types.Type, pkg, name string) bool { + if len(mt.Signature.Parameters) != 0 || len(mt.Signature.Results) != 1 { + return false + } + r := mt.Signature.Results[0] + return r.Name.Name == name && r.Name.Package == pkg +} + func hasOpenAPIDefinitionMethod(t *types.Type) bool { for mn, mt := range t.Methods { if mn != "OpenAPIDefinition" { continue } - if len(mt.Signature.Parameters) != 0 || len(mt.Signature.Results) != 1 { - return false - } - r := mt.Signature.Results[0] - if r.Name.Name != "OpenAPIDefinition" || r.Name.Package != openAPICommonPackagePath { - return false - } - return true + return methodReturnsValue(mt, openAPICommonPackagePath, "OpenAPIDefinition") } return false } +func hasOpenAPIDefinitionMethods(t *types.Type) bool { + var hasSchemaTypeMethod, hasOpenAPISchemaFormat bool + for mn, mt := range t.Methods { + switch mn { + case "OpenAPISchemaType": + hasSchemaTypeMethod = methodReturnsValue(mt, "", "[]string") + case "OpenAPISchemaFormat": + hasOpenAPISchemaFormat = methodReturnsValue(mt, "", "string") + } + } + return hasSchemaTypeMethod && hasOpenAPISchemaFormat +} + // typeShortName returns short package name (e.g. the name x appears in package x definition) dot type name. func typeShortName(t *types.Type) string { return filepath.Base(t.Name.Package) + "." + t.Name.Name @@ -360,6 +352,28 @@ func (g openAPITypeWriter) generate(t *types.Type) error { g.Do("$.type|raw${}.OpenAPIDefinition(),\n", args) return nil } + if hasOpenAPIDefinitionMethods(t) { + // Since this generated snippet is part of a map: + // + // map[string]common.OpenAPIDefinition: { + // "TYPE_NAME": { + // Schema: spec.Schema{ ... }, + // }, + // } + // + // For compliance with gofmt -s it's important we elide the + // struct type. The type is implied by the map and will be + // removed otherwise. + g.Do("{\n"+ + "Schema: spec.Schema{\n"+ + "SchemaProps: spec.SchemaProps{\n"+ + "Type:$.type|raw${}.OpenAPISchemaType(),\n"+ + "Format:$.type|raw${}.OpenAPISchemaFormat(),\n"+ + "},\n"+ + "},\n"+ + "},\n", args) + return nil + } g.Do("{\nSchema: spec.Schema{\nSchemaProps: spec.SchemaProps{\n", nil) g.generateDescription(t.CommentLines) g.Do("Properties: map[string]$.SpecSchemaType|raw${\n", args) diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go index 5f607c76701..61dbf4fc0e4 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -210,11 +210,18 @@ func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error }, nil } +func (d *Definitions) parseArbitrary(s *openapi_v2.Schema, path *Path) (Schema, error) { + return &Arbitrary{ + BaseSchema: d.parseBaseSchema(s, path), + }, nil +} + // ParseSchema creates a walkable Schema from an openapi schema. While // this function is public, it doesn't leak through the interface. func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, error) { - if len(s.GetType().GetValue()) == 1 { - t := s.GetType().GetValue()[0] + objectTypes := s.GetType().GetValue() + if len(objectTypes) == 1 { + t := objectTypes[0] switch t { case object: return d.parseMap(s, path) @@ -229,6 +236,9 @@ func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, err if s.GetProperties() != nil { return d.parseKind(s, path) } + if len(objectTypes) == 0 || (len(objectTypes) == 1 && objectTypes[0] == "") { + return d.parseArbitrary(s, path) + } return d.parsePrimitive(s, path) } diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go index 02ab06d6d53..b48e62c3bf9 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go @@ -58,6 +58,14 @@ type SchemaVisitor interface { VisitReference(Reference) } +// SchemaVisitorArbitrary is an additional visitor interface which handles +// arbitrary types. For backwards compatability, it's a separate interface +// which is checked for at runtime. +type SchemaVisitorArbitrary interface { + SchemaVisitor + VisitArbitrary(*Arbitrary) +} + // Schema is the base definition of an openapi type. type Schema interface { // Giving a visitor here will let you visit the actual type. @@ -242,6 +250,23 @@ func (p *Primitive) GetName() string { return fmt.Sprintf("%s (%s)", p.Type, p.Format) } +// Arbitrary is a value of any type (primitive, object or array) +type Arbitrary struct { + BaseSchema +} + +var _ Schema = &Arbitrary{} + +func (a *Arbitrary) Accept(v SchemaVisitor) { + if visitor, ok := v.(SchemaVisitorArbitrary); ok { + visitor.VisitArbitrary(a) + } +} + +func (a *Arbitrary) GetName() string { + return "Arbitrary value (primitive, object or array)" +} + // Reference implementation depends on the type of document. type Reference interface { Schema diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go index 0be7a5302f1..bbbdd4f61c9 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go @@ -127,6 +127,9 @@ func (item *mapItem) VisitKind(schema *proto.Kind) { } } +func (item *mapItem) VisitArbitrary(schema *proto.Arbitrary) { +} + func (item *mapItem) VisitReference(schema proto.Reference) { // passthrough schema.SubSchema().Accept(item) @@ -163,11 +166,14 @@ func (item *arrayItem) VisitArray(schema *proto.Array) { } func (item *arrayItem) VisitMap(schema *proto.Map) { - item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "array", Actual: "map"}) + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: "array"}) } func (item *arrayItem) VisitKind(schema *proto.Kind) { - item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "array", Actual: "map"}) + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: "array"}) +} + +func (item *arrayItem) VisitArbitrary(schema *proto.Arbitrary) { } func (item *arrayItem) VisitReference(schema proto.Reference) { @@ -226,6 +232,9 @@ func (item *primitiveItem) VisitKind(schema *proto.Kind) { item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: item.Kind}) } +func (item *primitiveItem) VisitArbitrary(schema *proto.Arbitrary) { +} + func (item *primitiveItem) VisitReference(schema proto.Reference) { // passthrough schema.SubSchema().Accept(item) From 65046435e772f7e0e521fced59737bca11980bcb Mon Sep 17 00:00:00 2001 From: Mik Vyatskov Date: Fri, 12 Jan 2018 20:26:37 +0100 Subject: [PATCH 142/264] Adjust the Stackdriver Logging length test Signed-off-by: Mik Vyatskov --- test/e2e/instrumentation/logging/stackdrvier/basic.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/instrumentation/logging/stackdrvier/basic.go b/test/e2e/instrumentation/logging/stackdrvier/basic.go index 7bc5e682b5f..7b0f6ff593a 100644 --- a/test/e2e/instrumentation/logging/stackdrvier/basic.go +++ b/test/e2e/instrumentation/logging/stackdrvier/basic.go @@ -108,7 +108,7 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd }) ginkgo.By("Checking that too long lines are trimmed", func() { - maxLength := 100000 + maxLength := 100 * 1024 cmd := []string{ "/bin/sh", "-c", From c1554d08d8e5af81eb3f10868be9a5652b85d089 Mon Sep 17 00:00:00 2001 From: Jeff Grafton Date: Fri, 12 Jan 2018 12:27:34 -0800 Subject: [PATCH 143/264] Install gazelle from bazelbuild/bazel-gazelle instead of rules_go --- hack/update-bazel.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/update-bazel.sh b/hack/update-bazel.sh index e569d94e9c7..1690c32f8f5 100755 --- a/hack/update-bazel.sh +++ b/hack/update-bazel.sh @@ -31,8 +31,8 @@ kube::util::go_install_from_commit \ github.com/kubernetes/repo-infra/kazel \ ae4e9a3906ace4ba657b7a09242610c6266e832c kube::util::go_install_from_commit \ - github.com/bazelbuild/rules_go/go/tools/gazelle/gazelle \ - 737df20c53499fd84b67f04c6ca9ccdee2e77089 + github.com/bazelbuild/bazel-gazelle/cmd/gazelle \ + 31ce76e3acc34a22434d1a783bb9b3cae790d108 # 0.8.0 touch "${KUBE_ROOT}/vendor/BUILD" From 0d5eb00a39293bad4237bd4556f67ff959e13c49 Mon Sep 17 00:00:00 2001 From: Jeff Grafton Date: Fri, 12 Jan 2018 12:27:49 -0800 Subject: [PATCH 144/264] Revert "Rewrite go_install_from_commit to handle pkgs that aren't in HEAD" This reverts commit e55938940d2d95e9cb1ff1def63cc54d7725f774. --- hack/lib/util.sh | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/hack/lib/util.sh b/hack/lib/util.sh index 0035ced815f..f699a295267 100755 --- a/hack/lib/util.sh +++ b/hack/lib/util.sh @@ -476,15 +476,10 @@ kube::util::go_install_from_commit() { kube::util::ensure-temp-dir mkdir -p "${KUBE_TEMP}/go/src" - # TODO(spiffxp): remove this brittle workaround for go getting a package that doesn't exist at HEAD - repo=$(echo ${pkg} | cut -d/ -f1-3) - git clone "https://${repo}" "${KUBE_TEMP}/go/src/${repo}" - # GOPATH="${KUBE_TEMP}/go" go get -d -u "${pkg}" + GOPATH="${KUBE_TEMP}/go" go get -d -u "${pkg}" ( - cd "${KUBE_TEMP}/go/src/${repo}" - git fetch # TODO(spiffxp): workaround + cd "${KUBE_TEMP}/go/src/${pkg}" git checkout -q "${commit}" - GOPATH="${KUBE_TEMP}/go" go get -d "${pkg}" #TODO(spiffxp): workaround GOPATH="${KUBE_TEMP}/go" go install "${pkg}" ) PATH="${KUBE_TEMP}/go/bin:${PATH}" From d8f6febc7d2ea5c045dd93ff9ec7a06f11cdf2a5 Mon Sep 17 00:00:00 2001 From: Jeff Grafton Date: Fri, 12 Jan 2018 10:08:23 -0800 Subject: [PATCH 145/264] Use the bazel version check function from bazel-skylib --- build/root/WORKSPACE | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/build/root/WORKSPACE b/build/root/WORKSPACE index c6ba9c71059..a3bf016e7f1 100644 --- a/build/root/WORKSPACE +++ b/build/root/WORKSPACE @@ -12,6 +12,13 @@ http_archive( urls = ["https://github.com/kubernetes/repo-infra/archive/1fb0a3ff0cc5308a6d8e2f3f9c57d1f2f940354e.tar.gz"], ) +http_archive( + name = "bazel_skylib", + sha256 = "bbccf674aa441c266df9894182d80de104cabd19be98be002f6d478aaa31574d", + strip_prefix = "bazel-skylib-2169ae1c374aab4a09aa90e65efe1a3aad4e279b", + urls = ["https://github.com/bazelbuild/bazel-skylib/archive/2169ae1c374aab4a09aa90e65efe1a3aad4e279b.tar.gz"], +) + ETCD_VERSION = "3.1.10" new_http_archive( @@ -39,9 +46,9 @@ http_archive( urls = ["https://github.com/bazelbuild/rules_docker/archive/8bbe2a8abd382641e65ff7127a3700a8530f02ce.tar.gz"], ) -load("@io_kubernetes_build//defs:bazel_version.bzl", "check_version") +load("@bazel_skylib//:lib.bzl", "versions") -check_version("0.8.0") +versions.check(minimum_bazel_version = "0.8.0") load("@io_bazel_rules_go//go:def.bzl", "go_rules_dependencies", "go_register_toolchains", "go_download_sdk") load("@io_bazel_rules_docker//docker:docker.bzl", "docker_repositories", "docker_pull") From b96c383ef7982f526db190ebba76730fd6c818df Mon Sep 17 00:00:00 2001 From: Penghao Cen Date: Sat, 13 Jan 2018 05:47:49 +0800 Subject: [PATCH 146/264] Check grpc server ready properly --- pkg/kubelet/cm/deviceplugin/device_plugin_stub.go | 2 +- pkg/kubelet/cm/deviceplugin/endpoint_test.go | 12 ++---------- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go b/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go index 9969e99989b..5e39dd00853 100644 --- a/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go +++ b/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go @@ -70,7 +70,7 @@ func (m *Stub) Start() error { // Wait till grpc server is ready. for i := 0; i < 10; i++ { services := m.server.GetServiceInfo() - if len(services) > 1 { + if len(services) > 0 { break } time.Sleep(1 * time.Second) diff --git a/pkg/kubelet/cm/deviceplugin/endpoint_test.go b/pkg/kubelet/cm/deviceplugin/endpoint_test.go index f4634db85f4..6005310181a 100644 --- a/pkg/kubelet/cm/deviceplugin/endpoint_test.go +++ b/pkg/kubelet/cm/deviceplugin/endpoint_test.go @@ -90,20 +90,12 @@ func TestRun(t *testing.T) { go e.run() // Wait for the first callback to be issued. - select { - case <-callbackChan: - break - } + <-callbackChan p.Update(updated) // Wait for the second callback to be issued. - select { - case <-callbackChan: - break - } - - time.Sleep(time.Second) + <-callbackChan e.mutex.Lock() defer e.mutex.Unlock() From 7064f4856ab1addbc612fdc7f5aa2957207232fd Mon Sep 17 00:00:00 2001 From: Robert Bailey Date: Wed, 10 Jan 2018 16:47:04 -0800 Subject: [PATCH 147/264] Remove salt support for providers that no longer exist. --- cluster/get-kube.sh | 8 - cluster/saltbase/salt/base.sls | 14 -- cluster/saltbase/salt/cni/init.sls | 15 -- cluster/saltbase/salt/docker/init.sls | 42 ----- cluster/saltbase/salt/generate-cert/init.sls | 15 +- .../salt/generate-cert/make-ca-cert.sh | 18 --- cluster/saltbase/salt/helpers/init.sls | 14 -- .../salt/helpers/safe_format_and_mount | 144 ------------------ cluster/saltbase/salt/kube-addons/init.sls | 2 +- cluster/saltbase/salt/kube-apiserver/init.sls | 2 +- .../kube-apiserver/kube-apiserver.manifest | 18 +-- .../kube-controller-manager.manifest | 16 +- .../saltbase/salt/kube-node-unpacker/init.sls | 4 - .../salt/kube-proxy/kube-proxy.manifest | 2 +- cluster/saltbase/salt/kubelet/default | 7 +- cluster/saltbase/salt/top.sls | 8 +- 16 files changed, 15 insertions(+), 314 deletions(-) delete mode 100644 cluster/saltbase/salt/helpers/init.sls delete mode 100644 cluster/saltbase/salt/helpers/safe_format_and_mount diff --git a/cluster/get-kube.sh b/cluster/get-kube.sh index f0492d45ac6..b733bd2187e 100755 --- a/cluster/get-kube.sh +++ b/cluster/get-kube.sh @@ -24,14 +24,6 @@ # Set KUBERNETES_PROVIDER to choose between different providers: # Google Compute Engine [default] # * export KUBERNETES_PROVIDER=gce; wget -q -O - https://get.k8s.io | bash -# Libvirt (with CoreOS as a guest operating system) -# * export KUBERNETES_PROVIDER=libvirt-coreos; wget -q -O - https://get.k8s.io | bash -# Vagrant (local virtual machines) -# * export KUBERNETES_PROVIDER=vagrant; wget -q -O - https://get.k8s.io | bash -# VMWare Photon Controller -# * export KUBERNETES_PROVIDER=photon-controller; wget -q -O - https://get.k8s.io | bash -# OpenStack-Heat -# * export KUBERNETES_PROVIDER=openstack-heat; wget -q -O - https://get.k8s.io | bash # # Set KUBERNETES_RELEASE to choose a specific release instead of the current # stable release, (e.g. 'v1.3.7'). diff --git a/cluster/saltbase/salt/base.sls b/cluster/saltbase/salt/base.sls index 91639730dd0..c9ccdf5a84c 100644 --- a/cluster/saltbase/salt/base.sls +++ b/cluster/saltbase/salt/base.sls @@ -24,20 +24,6 @@ pkg-core: - git {% endif %} -# Fix ARP cache issues on AWS by setting net.ipv4.neigh.default.gc_thresh1=0 -# See issue #23395 -{% if grains.get('cloud') == 'aws' %} -# Work around Salt #18089: https://github.com/saltstack/salt/issues/18089 -# (we also have to give it a different id from the same fix elsewhere) -99-salt-conf-with-a-different-id: - file.touch: - - name: /etc/sysctl.d/99-salt.conf - -net.ipv4.neigh.default.gc_thresh1: - sysctl.present: - - value: 0 -{% endif %} - /usr/local/share/doc/kubernetes: file.directory: - user: root diff --git a/cluster/saltbase/salt/cni/init.sls b/cluster/saltbase/salt/cni/init.sls index a1d1060d6bd..14f26927fef 100644 --- a/cluster/saltbase/salt/cni/init.sls +++ b/cluster/saltbase/salt/cni/init.sls @@ -24,18 +24,3 @@ cni-tar: - source_hash: md5=9534876FAE7DBE813CDAB404DC1F9219 - archive_format: tar - if_missing: /home/kubernetes/bin - -{% if grains['cloud'] is defined and grains.cloud in [ 'vagrant' ] %} -# Install local CNI network plugins in a Vagrant environment -cmd-local-cni-plugins: - cmd.run: - - name: | - cp -v /vagrant/cluster/network-plugins/cni/bin/* /home/kubernetes/bin/. - chmod +x /home/kubernetes/bin/* -cmd-local-cni-config: - cmd.run: - - name: | - cp -v /vagrant/cluster/network-plugins/cni/config/* /etc/cni/net.d/. - chown root:root /etc/cni/net.d/* - chmod 744 /etc/cni/net.d/* -{% endif -%} diff --git a/cluster/saltbase/salt/docker/init.sls b/cluster/saltbase/salt/docker/init.sls index ed1b9186d5a..23ab4cfba1b 100644 --- a/cluster/saltbase/salt/docker/init.sls +++ b/cluster/saltbase/salt/docker/init.sls @@ -344,37 +344,6 @@ net.ipv4.ip_forward: {% set override_deb_sha1='' %} {% set override_docker_ver='' %} -{% elif grains.get('cloud', '') == 'aws' - and grains.get('os_family', '') == 'Debian' - and grains.get('oscodename', '') == 'jessie' -%} -# TODO: Get from google storage? -{% set docker_pkg_name='docker-engine' %} -{% set override_docker_ver='1.11.2-0~jessie' %} -{% set override_deb='docker-engine_1.11.2-0~jessie_amd64.deb' %} -{% set override_deb_url='http://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.11.2-0~jessie_amd64.deb' %} -{% set override_deb_sha1='c312f1f6fa0b34df4589bb812e4f7af8e28fd51d' %} - -# Ubuntu presents as os_family=Debian, osfullname=Ubuntu -{% elif grains.get('cloud', '') == 'aws' - and grains.get('os_family', '') == 'Debian' - and grains.get('oscodename', '') == 'trusty' -%} -# TODO: Get from google storage? -{% set docker_pkg_name='docker-engine' %} -{% set override_docker_ver='1.11.2-0~trusty' %} -{% set override_deb='docker-engine_1.11.2-0~trusty_amd64.deb' %} -{% set override_deb_url='http://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.11.2-0~trusty_amd64.deb' %} -{% set override_deb_sha1='022dee31e68c6d572eaac750915786e4a6729d2a' %} - -{% elif grains.get('cloud', '') == 'aws' - and grains.get('os_family', '') == 'Debian' - and grains.get('oscodename', '') == 'wily' -%} -# TODO: Get from google storage? -{% set docker_pkg_name='docker-engine' %} -{% set override_docker_ver='1.11.2-0~wily' %} -{% set override_deb='docker-engine_1.11.2-0~wily_amd64.deb' %} -{% set override_deb_url='http://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.11.2-0~wily_amd64.deb' %} -{% set override_deb_sha1='3e02f51fe18aa777eeb1676c3d9a75e5ea6d96c9' %} - {% else %} {% set docker_pkg_name='lxc-docker-1.7.1' %} {% set override_docker_ver='1.7.1' %} @@ -502,19 +471,8 @@ fix-systemd-docker-healthcheck-service: {% endif %} docker: -# Starting Docker is racy on aws for some reason. To be honest, since Monit -# is managing Docker restart we should probably just delete this whole thing -# but the kubernetes components use salt 'require' to set up a dag, and that -# complicated and scary to unwind. -# On AWS, we use a trick now... We don't start the docker service through Salt. -# Kubelet or our health checker will start it. But we use service.enabled, -# so we still have a `service: docker` node for our DAG. -{% if grains.cloud is defined and grains.cloud == 'aws' %} - service.enabled: -{% else %} service.running: - enable: True -{% endif %} # If we put a watch on this, salt will try to start the service. # We put the watch on the fixer instead {% if not pillar.get('is_systemd') %} diff --git a/cluster/saltbase/salt/generate-cert/init.sls b/cluster/saltbase/salt/generate-cert/init.sls index 436e5af7f71..4543239afe1 100644 --- a/cluster/saltbase/salt/generate-cert/init.sls +++ b/cluster/saltbase/salt/generate-cert/init.sls @@ -1,17 +1,6 @@ {% set master_extra_sans=grains.get('master_extra_sans', '') %} -{% if grains.cloud is defined %} - {% if grains.cloud == 'gce' %} - {% set cert_ip='_use_gce_external_ip_' %} - {% endif %} - {% if grains.cloud == 'aws' %} - {% set cert_ip='_use_aws_external_ip_' %} - {% endif %} - {% if grains.cloud == 'azure-legacy' %} - {% set cert_ip='_use_azure_dns_name_' %} - {% endif %} - {% if grains.cloud == 'photon-controller' %} - {% set cert_ip=grains.ip_interfaces.eth0[0] %} - {% endif %} +{% if grains.cloud is defined and grains.cloud == 'gce' %} + {% set cert_ip='_use_gce_external_ip_' %} {% endif %} # If there is a pillar defined, override any defaults. diff --git a/cluster/saltbase/salt/generate-cert/make-ca-cert.sh b/cluster/saltbase/salt/generate-cert/make-ca-cert.sh index f4e23a81f9e..41531209ed5 100755 --- a/cluster/saltbase/salt/generate-cert/make-ca-cert.sh +++ b/cluster/saltbase/salt/generate-cert/make-ca-cert.sh @@ -38,24 +38,6 @@ if [ "$cert_ip" == "_use_gce_external_ip_" ]; then cert_ip=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip) fi -if [ "$cert_ip" == "_use_aws_external_ip_" ]; then - # If there's no public IP assigned (e.g. this host is running on an internal subnet in a VPC), then - # curl will happily spit out the contents of AWS's 404 page and an exit code of zero. - # - # The string containing the 404 page trips up one of easyrsa's calls to openssl later; whichever - # one creates the CA certificate, because the 404 page is > 64 characters. - if cert_ip=$(curl -f -s http://169.254.169.254/latest/meta-data/public-ipv4); then - : - else - cert_ip=$(curl -f -s http://169.254.169.254/latest/meta-data/local-ipv4) - fi -fi - -if [ "$cert_ip" == "_use_azure_dns_name_" ]; then - cert_ip=$(uname -n | awk -F. '{ print $2 }').cloudapp.net - use_cn=true -fi - sans="IP:${cert_ip}" if [[ -n "${extra_sans}" ]]; then sans="${sans},${extra_sans}" diff --git a/cluster/saltbase/salt/helpers/init.sls b/cluster/saltbase/salt/helpers/init.sls deleted file mode 100644 index 7e5ad435d6e..00000000000 --- a/cluster/saltbase/salt/helpers/init.sls +++ /dev/null @@ -1,14 +0,0 @@ -{% if grains['cloud'] is defined and grains['cloud'] == 'aws' %} -/usr/share/google: - file.directory: - - user: root - - group: root - - dir_mode: 755 - -/usr/share/google/safe_format_and_mount: - file.managed: - - source: salt://helpers/safe_format_and_mount - - user: root - - group: root - - mode: 755 -{% endif %} diff --git a/cluster/saltbase/salt/helpers/safe_format_and_mount b/cluster/saltbase/salt/helpers/safe_format_and_mount deleted file mode 100644 index 53cfe6cc815..00000000000 --- a/cluster/saltbase/salt/helpers/safe_format_and_mount +++ /dev/null @@ -1,144 +0,0 @@ -#! /bin/bash -# Copyright 2013 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Mount a disk, formatting it if necessary. If the disk looks like it may -# have been formatted before, we will not format it. -# -# This script uses blkid and file to search for magic "formatted" bytes -# at the beginning of the disk. Furthermore, it attempts to use fsck to -# repair the filesystem before formatting it. - -FSCK=fsck.ext4 -MOUNT_OPTIONS="discard,defaults" -MKFS="mkfs.ext4 -F" -if [ -e /etc/redhat-release ]; then - if grep -q '7\..' /etc/redhat-release; then - FSCK=fsck.xfs - MKFS=mkfs.xfs - fi -fi - -LOGTAG=safe_format_and_mount -LOGFACILITY=user - -function log() { - local readonly severity=$1; shift; - logger -t ${LOGTAG} -p ${LOGFACILITY}.${severity} -s "$@" -} - -function log_command() { - local readonly log_file=$(mktemp) - local readonly retcode - log info "Running: $*" - $* > ${log_file} 2>&1 - retcode=$? - # only return the last 1000 lines of the logfile, just in case it's HUGE. - tail -1000 ${log_file} | logger -t ${LOGTAG} -p ${LOGFACILITY}.info -s - rm -f ${log_file} - return ${retcode} -} - -function help() { - cat >&2 < -EOF - exit 0 -} - -while getopts ":hf:o:m:" opt; do - case $opt in - h) help;; - f) FSCK=$OPTARG;; - o) MOUNT_OPTIONS=$OPTARG;; - m) MKFS=$OPTARG;; - -) break;; - \?) log error "Invalid option: -${OPTARG}"; exit 1;; - :) log "Option -${OPTARG} requires an argument."; exit 1;; - esac -done - -shift $(($OPTIND - 1)) -readonly DISK=$1 -readonly MOUNTPOINT=$2 - -[[ -z ${DISK} ]] && help -[[ -z ${MOUNTPOINT} ]] && help - -function disk_looks_unformatted() { - blkid ${DISK} - if [[ $? == 0 ]]; then - return 0 - fi - - local readonly file_type=$(file --special-files ${DISK}) - case ${file_type} in - *filesystem*) - return 0;; - esac - - return 1 -} - -function format_disk() { - log_command ${MKFS} ${DISK} -} - -function try_repair_disk() { - log_command ${FSCK} -a ${DISK} - local readonly fsck_return=$? - if [[ ${fsck_return} -ge 8 ]]; then - log error "Fsck could not correct errors on ${DISK}" - return 1 - fi - if [[ ${fsck_return} -gt 0 ]]; then - log warning "Fsck corrected errors on ${DISK}" - fi - return 0 -} - -function try_mount() { - local mount_retcode - try_repair_disk - - log_command mount -o ${MOUNT_OPTIONS} ${DISK} ${MOUNTPOINT} - mount_retcode=$? - if [[ ${mount_retcode} == 0 ]]; then - return 0 - fi - - # Check to see if it looks like a filesystem before formatting it. - disk_looks_unformatted ${DISK} - if [[ $? == 0 ]]; then - log error "Disk ${DISK} looks formatted but won't mount. Giving up." - return ${mount_retcode} - fi - - # The disk looks like it's not been formatted before. - format_disk - if [[ $? != 0 ]]; then - log error "Format of ${DISK} failed." - fi - - log_command mount -o ${MOUNT_OPTIONS} ${DISK} ${MOUNTPOINT} - mount_retcode=$? - if [[ ${mount_retcode} == 0 ]]; then - return 0 - fi - log error "Tried everything we could, but could not mount ${DISK}." - return ${mount_retcode} -} - -try_mount -exit $? diff --git a/cluster/saltbase/salt/kube-addons/init.sls b/cluster/saltbase/salt/kube-addons/init.sls index 3171cb6ca61..923a711a6b8 100644 --- a/cluster/saltbase/salt/kube-addons/init.sls +++ b/cluster/saltbase/salt/kube-addons/init.sls @@ -204,7 +204,7 @@ addon-dir-create: - group: root - mode: 755 -{% if pillar.get('enable_default_storage_class', '').lower() == 'true' and grains['cloud'] is defined and grains['cloud'] in ['aws', 'gce', 'openstack'] %} +{% if pillar.get('enable_default_storage_class', '').lower() == 'true' and grains['cloud'] is defined and grains['cloud'] == 'gce' %} /etc/kubernetes/addons/storage-class/default.yaml: file.managed: - source: salt://kube-addons/storage-class/{{ grains['cloud'] }}/default.yaml diff --git a/cluster/saltbase/salt/kube-apiserver/init.sls b/cluster/saltbase/salt/kube-apiserver/init.sls index f22067b877c..261fd53ef35 100644 --- a/cluster/saltbase/salt/kube-apiserver/init.sls +++ b/cluster/saltbase/salt/kube-apiserver/init.sls @@ -1,4 +1,4 @@ -{% if grains['cloud'] is defined and grains.cloud in ['aws', 'gce', 'vagrant', 'photon-controller', 'openstack'] %} +{% if grains['cloud'] is defined and grains.cloud == 'gce' %} # TODO: generate and distribute tokens on other cloud providers. /srv/kubernetes/known_tokens.csv: file.managed: diff --git a/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest b/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest index 878b10f43bf..34e75ac7a89 100644 --- a/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest +++ b/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest @@ -14,24 +14,14 @@ {% set srv_sshproxy_path = "/srv/sshproxy" -%} {% if grains.cloud is defined -%} - {% if grains.cloud not in ['vagrant', 'photon-controller', 'azure-legacy'] -%} - {% set cloud_provider = "--cloud-provider=" + grains.cloud -%} - {% endif -%} + {% set cloud_provider = "--cloud-provider=" + grains.cloud -%} - {% if grains.cloud in [ 'openstack' ] and grains.cloud_config is defined -%} - {% set cloud_config = "--cloud-config=" + grains.cloud_config -%} - {% endif -%} - - {% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%} + {% if grains.cloud == 'gce' and grains.cloud_config is defined -%} {% set cloud_config = "--cloud-config=" + grains.cloud_config -%} {% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%} {% set cloud_config_volume = "{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"" + grains.cloud_config + "\", \"type\": \"FileOrCreate\"}}," -%} {% endif -%} - {% if grains.cloud in ['openstack'] -%} - {% set cloud_config_mount = "{\"name\": \"instanceid\",\"mountPath\": \"/var/lib/cloud/data/instance-id\",\"readOnly\": true}," -%} - {% set cloud_config_volume = "{\"name\": \"instanceid\",\"hostPath\": {\"path\": \"/var/lib/cloud/data/instance-id\"}}," -%} - {% endif -%} {% endif -%} {% set advertise_address = "" -%} @@ -99,7 +89,7 @@ {% set client_ca_file = "" -%} {% set secure_port = "6443" -%} -{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack'] %} +{% if grains['cloud'] is defined and grains.cloud == 'gce' %} {% set secure_port = "443" -%} {% set client_ca_file = "--client-ca-file=/srv/kubernetes/ca.crt" -%} {% endif -%} @@ -113,7 +103,7 @@ {% set basic_auth_file = "" -%} {% set authz_mode = "" -%} {% set abac_policy_file = "" -%} -{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack'] %} +{% if grains['cloud'] is defined and grains.cloud == 'gce' %} {% set token_auth_file = " --token-auth-file=/srv/kubernetes/known_tokens.csv" -%} {% set basic_auth_file = " --basic-auth-file=/srv/kubernetes/basic_auth.csv" -%} {% set authz_mode = " --authorization-mode=ABAC" -%} diff --git a/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest b/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest index c287b29652e..74353d07f6f 100644 --- a/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest +++ b/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest @@ -40,30 +40,20 @@ {% flex_vol_plugin_dir = "/usr/libexec/kubernetes/kubelet-plugins/volume/exec" -%} {% if grains.cloud is defined -%} - {% if grains.cloud not in ['vagrant', 'photon-controller', 'azure-legacy'] -%} - {% set cloud_provider = "--cloud-provider=" + grains.cloud -%} - {% endif -%} + {% set cloud_provider = "--cloud-provider=" + grains.cloud -%} {% set service_account_key = "--service-account-private-key-file=/srv/kubernetes/server.key" -%} - {% if grains.cloud in [ 'openstack' ] and grains.cloud_config is defined -%} - {% set cloud_config = "--cloud-config=" + grains.cloud_config -%} - {% endif -%} - - {% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%} + {% if grains.cloud == 'gce' and grains.cloud_config is defined -%} {% set cloud_config = "--cloud-config=" + grains.cloud_config -%} {% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%} {% set cloud_config_volume = "{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"" + grains.cloud_config + "\", \"type\": \"FileOrCreate\"}}," -%} {% endif -%} - {% if grains.cloud in ['openstack'] -%} - {% set cloud_config_mount = "{\"name\": \"instanceid\",\"mountPath\": \"/var/lib/cloud/data/instance-id\",\"readOnly\": true}," -%} - {% set cloud_config_volume = "{\"name\": \"instanceid\",\"hostPath\": {\"path\": \"/var/lib/cloud/data/instance-id\"}}," -%} - {% endif -%} {% endif -%} {% set root_ca_file = "" -%} -{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack', 'azure-legacy'] %} +{% if grains.cloud is defined and grains.cloud == 'gce' %} {% set root_ca_file = "--root-ca-file=/srv/kubernetes/ca.crt" -%} {% endif -%} diff --git a/cluster/saltbase/salt/kube-node-unpacker/init.sls b/cluster/saltbase/salt/kube-node-unpacker/init.sls index 03495d7fe18..7c9d47eccc0 100644 --- a/cluster/saltbase/salt/kube-node-unpacker/init.sls +++ b/cluster/saltbase/salt/kube-node-unpacker/init.sls @@ -24,10 +24,6 @@ kube-proxy-tar: {% set is_helium = '0' %} # Super annoying, the salt version on GCE is old enough that 'salt.cmd.run' # isn't supported -{% if grains.cloud is defined and grains.cloud == 'aws' %} - # Salt has terrible problems with systemd on AWS too - {% set is_helium = '0' %} -{% endif %} # Salt Helium doesn't support systemd modules for service running {% if pillar.get('is_systemd') and is_helium == '0' %} diff --git a/cluster/saltbase/salt/kube-proxy/kube-proxy.manifest b/cluster/saltbase/salt/kube-proxy/kube-proxy.manifest index d35692a3fd4..6e9af81b78f 100644 --- a/cluster/saltbase/salt/kube-proxy/kube-proxy.manifest +++ b/cluster/saltbase/salt/kube-proxy/kube-proxy.manifest @@ -8,7 +8,7 @@ {% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() -%} {% set api_servers = "--master=https://" + ips[0][0] -%} {% endif -%} -{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack', 'azure-legacy' ] %} +{% if grains['cloud'] is defined and grains.cloud == 'gce' %} {% set api_servers_with_port = api_servers -%} {% else -%} {% set api_servers_with_port = api_servers + ":6443" -%} diff --git a/cluster/saltbase/salt/kubelet/default b/cluster/saltbase/salt/kubelet/default index 27511061e84..c2aff4694a7 100644 --- a/cluster/saltbase/salt/kubelet/default +++ b/cluster/saltbase/salt/kubelet/default @@ -22,7 +22,7 @@ {% set debugging_handlers = "--enable-debugging-handlers=true" -%} {% if grains['roles'][0] == 'kubernetes-master' -%} - {% if grains.cloud in ['aws', 'gce', 'vagrant', 'photon-controller', 'openstack', 'azure-legacy'] -%} + {% if grains.cloud == 'gce' -%} # Unless given a specific directive, disable registration for the kubelet # running on the master. {% if kubeconfig != "" -%} @@ -37,14 +37,11 @@ {% endif -%} {% set cloud_provider = "" -%} -{% if grains.cloud is defined and grains.cloud not in ['vagrant', 'photon-controller', 'azure-legacy'] -%} +{% if grains.cloud is defined -%} {% set cloud_provider = "--cloud-provider=" + grains.cloud -%} {% endif -%} {% set cloud_config = "" -%} -{% if grains.cloud in [ 'openstack' ] and grains.cloud_config is defined -%} - {% set cloud_config = "--cloud-config=" + grains.cloud_config -%} -{% endif -%} {% set config = "--pod-manifest-path=/etc/kubernetes/manifests" -%} diff --git a/cluster/saltbase/salt/top.sls b/cluster/saltbase/salt/top.sls index e517778a77e..51c3a347cca 100644 --- a/cluster/saltbase/salt/top.sls +++ b/cluster/saltbase/salt/top.sls @@ -3,9 +3,6 @@ base: - base - debian-auto-upgrades - salt-helpers -{% if grains.get('cloud') == 'aws' %} - - ntp -{% endif %} {% if pillar.get('e2e_storage_test_environment', '').lower() == 'true' %} - e2e {% endif %} @@ -20,7 +17,6 @@ base: {% elif pillar.get('network_provider', '').lower() == 'cni' %} - cni {% endif %} - - helpers - kube-client-tools - kube-node-unpacker - kubelet @@ -60,11 +56,9 @@ base: - kube-client-tools - kube-master-addons - kube-admission-controls -{% if grains['cloud'] is defined and grains['cloud'] != 'vagrant' %} - logrotate -{% endif %} - kube-addons -{% if grains['cloud'] is defined and grains['cloud'] in [ 'vagrant', 'gce', 'aws', 'photon-controller', 'openstack', 'azure-legacy'] %} +{% if grains['cloud'] is defined and grains['cloud'] == 'gce' %} - docker - kubelet {% endif %} From 63826000c57c84ec086215e465dcae1e703f1cb4 Mon Sep 17 00:00:00 2001 From: Balu Dontu Date: Fri, 12 Jan 2018 16:40:08 -0800 Subject: [PATCH 148/264] Remove vmUUID check in VSphere cloud provider --- .../providers/vsphere/vsphere.go | 8 ------ .../providers/vsphere/vsphere_util.go | 25 ------------------- 2 files changed, 33 deletions(-) diff --git a/pkg/cloudprovider/providers/vsphere/vsphere.go b/pkg/cloudprovider/providers/vsphere/vsphere.go index 5707dd3d06c..42400682cb7 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere.go @@ -375,14 +375,6 @@ func newControllerNode(cfg VSphereConfig) (*VSphere, error) { if cfg.Global.VCenterPort == "" { cfg.Global.VCenterPort = "443" } - if cfg.Global.VMUUID == "" { - // This needs root privileges on the host, and will fail otherwise. - cfg.Global.VMUUID, err = getvmUUID() - if err != nil { - glog.Errorf("Failed to get VM UUID. err: %+v", err) - return nil, err - } - } vsphereInstanceMap, err := populateVsphereInstanceMap(&cfg) if err != nil { return nil, err diff --git a/pkg/cloudprovider/providers/vsphere/vsphere_util.go b/pkg/cloudprovider/providers/vsphere/vsphere_util.go index 0f4edb155ae..45a35c71d41 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere_util.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere_util.go @@ -19,7 +19,6 @@ package vsphere import ( "context" "errors" - "io/ioutil" "os" "regexp" "runtime" @@ -128,30 +127,6 @@ func GetgovmomiClient(conn *vclib.VSphereConnection) (*govmomi.Client, error) { return client, err } -// getvmUUID gets the BIOS UUID via the sys interface. This UUID is known by vsphere -func getvmUUID() (string, error) { - id, err := ioutil.ReadFile(UUIDPath) - if err != nil { - return "", fmt.Errorf("error retrieving vm uuid: %s", err) - } - uuidFromFile := string(id[:]) - //strip leading and trailing white space and new line char - uuid := strings.TrimSpace(uuidFromFile) - // check the uuid starts with "VMware-" - if !strings.HasPrefix(uuid, UUIDPrefix) { - return "", fmt.Errorf("Failed to match Prefix, UUID read from the file is %v", uuidFromFile) - } - // Strip the prefix and while spaces and - - uuid = strings.Replace(uuid[len(UUIDPrefix):(len(uuid))], " ", "", -1) - uuid = strings.Replace(uuid, "-", "", -1) - if len(uuid) != 32 { - return "", fmt.Errorf("Length check failed, UUID read from the file is %v", uuidFromFile) - } - // need to add dashes, e.g. "564d395e-d807-e18a-cb25-b79f65eb2b9f" - uuid = fmt.Sprintf("%s-%s-%s-%s-%s", uuid[0:8], uuid[8:12], uuid[12:16], uuid[16:20], uuid[20:32]) - return uuid, nil -} - // Returns the accessible datastores for the given node VM. func getAccessibleDatastores(ctx context.Context, nodeVmDetail *NodeDetails, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) { accessibleDatastores, err := nodeVmDetail.vm.GetAllAccessibleDatastores(ctx) From 16ff0c2dda5672dd0078226507bbbcd959d61246 Mon Sep 17 00:00:00 2001 From: ravisantoshgudimetla Date: Wed, 10 Jan 2018 14:19:45 +0530 Subject: [PATCH 149/264] Improved readability for messages being logged --- pkg/scheduler/schedulercache/cache.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pkg/scheduler/schedulercache/cache.go b/pkg/scheduler/schedulercache/cache.go index f891707d505..04ccc88eef2 100644 --- a/pkg/scheduler/schedulercache/cache.go +++ b/pkg/scheduler/schedulercache/cache.go @@ -131,7 +131,7 @@ func (cache *schedulerCache) AssumePod(pod *v1.Pod) error { cache.mu.Lock() defer cache.mu.Unlock() if _, ok := cache.podStates[key]; ok { - return fmt.Errorf("pod %v state wasn't initial but get assumed", key) + return fmt.Errorf("pod %v is not in the cache, so can't be assumed", key) } cache.addPod(pod) @@ -178,7 +178,7 @@ func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error { currState, ok := cache.podStates[key] if ok && currState.pod.Spec.NodeName != pod.Spec.NodeName { - return fmt.Errorf("pod %v state was assumed on a different node", key) + return fmt.Errorf("pod %v was assumed on %v but assigned to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName) } switch { @@ -191,7 +191,7 @@ func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error { delete(cache.assumedPods, key) delete(cache.podStates, key) default: - return fmt.Errorf("pod %v state wasn't assumed but get forgotten", key) + return fmt.Errorf("pod %v wasn't assumed so cannot be forgotten", key) } return nil } @@ -241,7 +241,7 @@ func (cache *schedulerCache) AddPod(pod *v1.Pod) error { case ok && cache.assumedPods[key]: if currState.pod.Spec.NodeName != pod.Spec.NodeName { // The pod was added to a different node than it was assumed to. - glog.Warningf("Pod %v assumed to a different node than added to.", key) + glog.Warningf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName) // Clean this up. cache.removePod(currState.pod) cache.addPod(pod) @@ -257,7 +257,7 @@ func (cache *schedulerCache) AddPod(pod *v1.Pod) error { } cache.podStates[key] = ps default: - return fmt.Errorf("pod was already in added state. Pod key: %v", key) + return fmt.Errorf("pod %v was already in added state.", key) } return nil } @@ -284,7 +284,7 @@ func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error { return err } default: - return fmt.Errorf("pod %v state wasn't added but get updated", key) + return fmt.Errorf("pod %v is not added to scheduler cache, so cannot be updated", key) } return nil } @@ -304,7 +304,7 @@ func (cache *schedulerCache) RemovePod(pod *v1.Pod) error { // before Remove event, in which case the state would change from Assumed to Added. case ok && !cache.assumedPods[key]: if currState.pod.Spec.NodeName != pod.Spec.NodeName { - glog.Errorf("Pod %v removed from a different node than previously added to.", key) + glog.Errorf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName) glog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions") } err := cache.removePod(currState.pod) @@ -313,7 +313,7 @@ func (cache *schedulerCache) RemovePod(pod *v1.Pod) error { } delete(cache.podStates, key) default: - return fmt.Errorf("pod state wasn't added but get removed. Pod key: %v", key) + return fmt.Errorf("pod %v is not found in scheduler cache, so cannot be removed from it", key) } return nil } @@ -345,7 +345,7 @@ func (cache *schedulerCache) GetPod(pod *v1.Pod) (*v1.Pod, error) { podState, ok := cache.podStates[key] if !ok { - return nil, fmt.Errorf("pod %v does not exist", key) + return nil, fmt.Errorf("pod %v does not exist in scheduler cache", key) } return podState.pod, nil From 8aebf3554c7534300f08a7646748a1afe91b1812 Mon Sep 17 00:00:00 2001 From: ravisantoshgudimetla Date: Sat, 13 Jan 2018 10:21:06 +0530 Subject: [PATCH 150/264] Added metrics for preemption victims, pods preempted and duration of preemption --- pkg/scheduler/metrics/metrics.go | 24 ++++++++++++++++++++++++ pkg/scheduler/scheduler.go | 8 ++++++-- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/pkg/scheduler/metrics/metrics.go b/pkg/scheduler/metrics/metrics.go index c0a87f319ae..7d329ba5ac0 100644 --- a/pkg/scheduler/metrics/metrics.go +++ b/pkg/scheduler/metrics/metrics.go @@ -59,6 +59,14 @@ var ( Buckets: prometheus.ExponentialBuckets(1000, 2, 15), }, ) + SchedulingAlgorithmPremptionEvaluationDuration = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Subsystem: schedulerSubsystem, + Name: "scheduling_algorithm_preemption_evaluation", + Help: "Scheduling algorithm preemption evaluation duration", + Buckets: prometheus.ExponentialBuckets(1000, 2, 15), + }, + ) BindingLatency = prometheus.NewHistogram( prometheus.HistogramOpts{ Subsystem: schedulerSubsystem, @@ -67,6 +75,18 @@ var ( Buckets: prometheus.ExponentialBuckets(1000, 2, 15), }, ) + PreemptionVictims = prometheus.NewGauge( + prometheus.GaugeOpts{ + Subsystem: schedulerSubsystem, + Name: "pod_preemption_victims", + Help: "Number of selected preemption victims", + }) + PreemptionAttempts = prometheus.NewCounter( + prometheus.CounterOpts{ + Subsystem: schedulerSubsystem, + Name: "total_preemption_attempts", + Help: "Total preemption attempts in the cluster till now", + }) ) var registerMetrics sync.Once @@ -78,8 +98,12 @@ func Register() { prometheus.MustRegister(E2eSchedulingLatency) prometheus.MustRegister(SchedulingAlgorithmLatency) prometheus.MustRegister(BindingLatency) + prometheus.MustRegister(SchedulingAlgorithmPredicateEvaluationDuration) prometheus.MustRegister(SchedulingAlgorithmPriorityEvaluationDuration) + prometheus.MustRegister(SchedulingAlgorithmPremptionEvaluationDuration) + prometheus.MustRegister(PreemptionVictims) + prometheus.MustRegister(PreemptionAttempts) }) } diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 9fae7d117f7..fee845f34e8 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -216,7 +216,9 @@ func (sched *Scheduler) preempt(preemptor *v1.Pod, scheduleErr error) (string, e glog.Errorf("Error getting the updated preemptor pod object: %v", err) return "", err } + node, victims, nominatedPodsToClear, err := sched.config.Algorithm.Preempt(preemptor, sched.config.NodeLister, scheduleErr) + metrics.PreemptionVictims.Set(float64(len(victims))) if err != nil { glog.Errorf("Error preempting victims to make room for %v/%v.", preemptor.Namespace, preemptor.Name) return "", err @@ -440,18 +442,20 @@ func (sched *Scheduler) scheduleOne() { // Synchronously attempt to find a fit for the pod. start := time.Now() suggestedHost, err := sched.schedule(pod) - metrics.SchedulingAlgorithmLatency.Observe(metrics.SinceInMicroseconds(start)) if err != nil { // schedule() may have failed because the pod would not fit on any host, so we try to // preempt, with the expectation that the next time the pod is tried for scheduling it // will fit due to the preemption. It is also possible that a different pod will schedule // into the resources that were preempted, but this is harmless. if fitError, ok := err.(*core.FitError); ok { + preemptionStartTime := time.Now() sched.preempt(pod, fitError) + metrics.PreemptionAttempts.Inc() + metrics.SchedulingAlgorithmPremptionEvaluationDuration.Observe(metrics.SinceInMicroseconds(preemptionStartTime)) } return } - + metrics.SchedulingAlgorithmLatency.Observe(metrics.SinceInMicroseconds(start)) // Tell the cache to assume that a pod now is running on a given node, even though it hasn't been bound yet. // This allows us to keep scheduling without waiting on binding to occur. assumedPod := pod.DeepCopy() From b8526cd0777af6717d986942ccb6de6d23eca1bb Mon Sep 17 00:00:00 2001 From: Wang Guoliang Date: Thu, 21 Dec 2017 23:03:46 +0800 Subject: [PATCH 151/264] -Add scheduler optimization options, short circuit all predicates if one predicate fails --- ...scheduler-policy-config-with-extender.json | 3 +- examples/scheduler-policy-config.json | 3 +- pkg/scheduler/api/types.go | 6 ++ pkg/scheduler/api/v1/types.go | 6 ++ pkg/scheduler/core/extender_test.go | 2 +- pkg/scheduler/core/generic_scheduler.go | 64 +++++++++++-------- pkg/scheduler/core/generic_scheduler_test.go | 45 +++++++++---- pkg/scheduler/factory/factory.go | 11 +++- pkg/scheduler/scheduler_test.go | 6 +- 9 files changed, 98 insertions(+), 48 deletions(-) diff --git a/examples/scheduler-policy-config-with-extender.json b/examples/scheduler-policy-config-with-extender.json index 996e6efc828..cd566fb314c 100644 --- a/examples/scheduler-policy-config-with-extender.json +++ b/examples/scheduler-policy-config-with-extender.json @@ -26,5 +26,6 @@ "nodeCacheCapable": false } ], -"hardPodAffinitySymmetricWeight" : 10 +"hardPodAffinitySymmetricWeight" : 10, +"alwaysCheckAllPredicates" : false } diff --git a/examples/scheduler-policy-config.json b/examples/scheduler-policy-config.json index b0fecffab23..048299e5e36 100644 --- a/examples/scheduler-policy-config.json +++ b/examples/scheduler-policy-config.json @@ -15,5 +15,6 @@ {"name" : "ServiceSpreadingPriority", "weight" : 1}, {"name" : "EqualPriority", "weight" : 1} ], -"hardPodAffinitySymmetricWeight" : 10 +"hardPodAffinitySymmetricWeight" : 10, +"alwaysCheckAllPredicates" : false } diff --git a/pkg/scheduler/api/types.go b/pkg/scheduler/api/types.go index 080fc386db5..28b095f3348 100644 --- a/pkg/scheduler/api/types.go +++ b/pkg/scheduler/api/types.go @@ -47,6 +47,12 @@ type Policy struct { // corresponding to every RequiredDuringScheduling affinity rule. // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 1-100. HardPodAffinitySymmetricWeight int32 + + // When AlwaysCheckAllPredicates is set to true, scheduler checks all + // the configured predicates even after one or more of them fails. + // When the flag is set to false, scheduler skips checking the rest + // of the predicates after it finds one predicate that failed. + AlwaysCheckAllPredicates bool } type PredicatePolicy struct { diff --git a/pkg/scheduler/api/v1/types.go b/pkg/scheduler/api/v1/types.go index 3f6684a5f3c..14e2f06b1e1 100644 --- a/pkg/scheduler/api/v1/types.go +++ b/pkg/scheduler/api/v1/types.go @@ -39,6 +39,12 @@ type Policy struct { // corresponding to every RequiredDuringScheduling affinity rule. // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 1-100. HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"` + + // When AlwaysCheckAllPredicates is set to true, scheduler checks all + // the configured predicates even after one or more of them fails. + // When the flag is set to false, scheduler skips checking the rest + // of the predicates after it finds one predicate that failed. + AlwaysCheckAllPredicates bool `json:"alwaysCheckAllPredicates"` } type PredicatePolicy struct { diff --git a/pkg/scheduler/core/extender_test.go b/pkg/scheduler/core/extender_test.go index 23551a2415c..09e136d38b6 100644 --- a/pkg/scheduler/core/extender_test.go +++ b/pkg/scheduler/core/extender_test.go @@ -317,7 +317,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) { } queue := NewSchedulingQueue() scheduler := NewGenericScheduler( - cache, nil, queue, test.predicates, algorithm.EmptyPredicateMetadataProducer, test.prioritizers, algorithm.EmptyMetadataProducer, extenders, nil, schedulertesting.FakePersistentVolumeClaimLister{}) + cache, nil, queue, test.predicates, algorithm.EmptyPredicateMetadataProducer, test.prioritizers, algorithm.EmptyMetadataProducer, extenders, nil, schedulertesting.FakePersistentVolumeClaimLister{}, false) podIgnored := &v1.Pod{} machine, err := scheduler.Schedule(podIgnored, schedulertesting.FakeNodeLister(makeNodeList(test.nodes))) if test.expectsErr { diff --git a/pkg/scheduler/core/generic_scheduler.go b/pkg/scheduler/core/generic_scheduler.go index f147d534958..2009b7af895 100644 --- a/pkg/scheduler/core/generic_scheduler.go +++ b/pkg/scheduler/core/generic_scheduler.go @@ -90,16 +90,17 @@ func (f *FitError) Error() string { } type genericScheduler struct { - cache schedulercache.Cache - equivalenceCache *EquivalenceCache - schedulingQueue SchedulingQueue - predicates map[string]algorithm.FitPredicate - priorityMetaProducer algorithm.MetadataProducer - predicateMetaProducer algorithm.PredicateMetadataProducer - prioritizers []algorithm.PriorityConfig - extenders []algorithm.SchedulerExtender - lastNodeIndexLock sync.Mutex - lastNodeIndex uint64 + cache schedulercache.Cache + equivalenceCache *EquivalenceCache + schedulingQueue SchedulingQueue + predicates map[string]algorithm.FitPredicate + priorityMetaProducer algorithm.MetadataProducer + predicateMetaProducer algorithm.PredicateMetadataProducer + prioritizers []algorithm.PriorityConfig + extenders []algorithm.SchedulerExtender + lastNodeIndexLock sync.Mutex + lastNodeIndex uint64 + alwaysCheckAllPredicates bool cachedNodeInfoMap map[string]*schedulercache.NodeInfo volumeBinder *volumebinder.VolumeBinder @@ -133,7 +134,7 @@ func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister trace.Step("Computing predicates") startPredicateEvalTime := time.Now() - filteredNodes, failedPredicateMap, err := findNodesThatFit(pod, g.cachedNodeInfoMap, nodes, g.predicates, g.extenders, g.predicateMetaProducer, g.equivalenceCache, g.schedulingQueue) + filteredNodes, failedPredicateMap, err := findNodesThatFit(pod, g.cachedNodeInfoMap, nodes, g.predicates, g.extenders, g.predicateMetaProducer, g.equivalenceCache, g.schedulingQueue, g.alwaysCheckAllPredicates) if err != nil { return "", err } @@ -295,6 +296,7 @@ func findNodesThatFit( metadataProducer algorithm.PredicateMetadataProducer, ecache *EquivalenceCache, schedulingQueue SchedulingQueue, + alwaysCheckAllPredicates bool, ) ([]*v1.Node, FailedPredicateMap, error) { var filtered []*v1.Node failedPredicateMap := FailedPredicateMap{} @@ -313,7 +315,7 @@ func findNodesThatFit( meta := metadataProducer(pod, nodeNameToInfo) checkNode := func(i int) { nodeName := nodes[i].Name - fits, failedPredicates, err := podFitsOnNode(pod, meta, nodeNameToInfo[nodeName], predicateFuncs, ecache, schedulingQueue) + fits, failedPredicates, err := podFitsOnNode(pod, meta, nodeNameToInfo[nodeName], predicateFuncs, ecache, schedulingQueue, alwaysCheckAllPredicates) if err != nil { predicateResultLock.Lock() errs[err.Error()]++ @@ -402,6 +404,7 @@ func podFitsOnNode( predicateFuncs map[string]algorithm.FitPredicate, ecache *EquivalenceCache, queue SchedulingQueue, + alwaysCheckAllPredicates bool, ) (bool, []algorithm.PredicateFailureReason, error) { var ( equivalenceHash uint64 @@ -457,8 +460,6 @@ func podFitsOnNode( fit, reasons, invalid = ecache.PredicateWithECache(pod.GetName(), info.Node().GetName(), predicateKey, equivalenceHash) } - // TODO(bsalamat): When one predicate fails and fit is false, why do we continue - // checking other predicates? if !eCacheAvailable || invalid { // we need to execute predicate functions since equivalence cache does not work fit, reasons, err = predicate(pod, metaToUse, nodeInfoToUse) @@ -479,6 +480,11 @@ func podFitsOnNode( if !fit { // eCache is available and valid, and predicates result is unfit, record the fail reasons failedPredicates = append(failedPredicates, reasons...) + // if alwaysCheckAllPredicates is false, short circuit all predicates when one predicate fails. + if !alwaysCheckAllPredicates { + glog.V(5).Infoln("since alwaysCheckAllPredicates has not been set, the predicate evaluation is short circuited and there are chances of other predicates failing as well.") + break + } } } } @@ -917,7 +923,7 @@ func selectVictimsOnNode( // that we should check is if the "pod" is failing to schedule due to pod affinity // failure. // TODO(bsalamat): Consider checking affinity to lower priority pods if feasible with reasonable performance. - if fits, _, err := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, queue); !fits { + if fits, _, err := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, queue, false); !fits { if err != nil { glog.Warningf("Encountered error while selecting victims on node %v: %v", nodeInfo.Node().Name, err) } @@ -931,7 +937,7 @@ func selectVictimsOnNode( violatingVictims, nonViolatingVictims := filterPodsWithPDBViolation(potentialVictims.Items, pdbs) reprievePod := func(p *v1.Pod) bool { addPod(p) - fits, _, _ := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, queue) + fits, _, _ := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, queue, false) if !fits { removePod(p) victims = append(victims, p) @@ -1045,18 +1051,20 @@ func NewGenericScheduler( priorityMetaProducer algorithm.MetadataProducer, extenders []algorithm.SchedulerExtender, volumeBinder *volumebinder.VolumeBinder, - pvcLister corelisters.PersistentVolumeClaimLister) algorithm.ScheduleAlgorithm { + pvcLister corelisters.PersistentVolumeClaimLister, + alwaysCheckAllPredicates bool) algorithm.ScheduleAlgorithm { return &genericScheduler{ - cache: cache, - equivalenceCache: eCache, - schedulingQueue: podQueue, - predicates: predicates, - predicateMetaProducer: predicateMetaProducer, - prioritizers: prioritizers, - priorityMetaProducer: priorityMetaProducer, - extenders: extenders, - cachedNodeInfoMap: make(map[string]*schedulercache.NodeInfo), - volumeBinder: volumeBinder, - pvcLister: pvcLister, + cache: cache, + equivalenceCache: eCache, + schedulingQueue: podQueue, + predicates: predicates, + predicateMetaProducer: predicateMetaProducer, + prioritizers: prioritizers, + priorityMetaProducer: priorityMetaProducer, + extenders: extenders, + cachedNodeInfoMap: make(map[string]*schedulercache.NodeInfo), + volumeBinder: volumeBinder, + pvcLister: pvcLister, + alwaysCheckAllPredicates: alwaysCheckAllPredicates, } } diff --git a/pkg/scheduler/core/generic_scheduler_test.go b/pkg/scheduler/core/generic_scheduler_test.go index cdfc6b20fe5..55fede23c4a 100644 --- a/pkg/scheduler/core/generic_scheduler_test.go +++ b/pkg/scheduler/core/generic_scheduler_test.go @@ -187,16 +187,17 @@ func TestSelectHost(t *testing.T) { func TestGenericScheduler(t *testing.T) { predicates.SetPredicatesOrdering(order) tests := []struct { - name string - predicates map[string]algorithm.FitPredicate - prioritizers []algorithm.PriorityConfig - nodes []string - pvcs []*v1.PersistentVolumeClaim - pod *v1.Pod - pods []*v1.Pod - expectedHosts sets.String - expectsErr bool - wErr error + name string + predicates map[string]algorithm.FitPredicate + prioritizers []algorithm.PriorityConfig + alwaysCheckAllPredicates bool + nodes []string + pvcs []*v1.PersistentVolumeClaim + pod *v1.Pod + pods []*v1.Pod + expectedHosts sets.String + expectsErr bool + wErr error }{ { predicates: map[string]algorithm.FitPredicate{"false": falsePredicate}, @@ -377,6 +378,22 @@ func TestGenericScheduler(t *testing.T) { expectsErr: true, wErr: fmt.Errorf("persistentvolumeclaim \"existingPVC\" is being deleted"), }, + { + // alwaysCheckAllPredicates is true + predicates: map[string]algorithm.FitPredicate{"true": truePredicate, "matches": matchesPredicate, "false": falsePredicate}, + prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}}, + alwaysCheckAllPredicates: true, + nodes: []string{"1"}, + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}}, + name: "test alwaysCheckAllPredicates is true", + wErr: &FitError{ + Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}}, + NumAllNodes: 1, + FailedPredicates: FailedPredicateMap{ + "1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate, algorithmpredicates.ErrFakePredicate}, + }, + }, + }, } for _, test := range tests { cache := schedulercache.New(time.Duration(0), wait.NeverStop) @@ -393,7 +410,7 @@ func TestGenericScheduler(t *testing.T) { pvcLister := schedulertesting.FakePersistentVolumeClaimLister(pvcs) scheduler := NewGenericScheduler( - cache, nil, NewSchedulingQueue(), test.predicates, algorithm.EmptyPredicateMetadataProducer, test.prioritizers, algorithm.EmptyMetadataProducer, []algorithm.SchedulerExtender{}, nil, pvcLister) + cache, nil, NewSchedulingQueue(), test.predicates, algorithm.EmptyPredicateMetadataProducer, test.prioritizers, algorithm.EmptyMetadataProducer, []algorithm.SchedulerExtender{}, nil, pvcLister, test.alwaysCheckAllPredicates) machine, err := scheduler.Schedule(test.pod, schedulertesting.FakeNodeLister(makeNodeList(test.nodes))) if !reflect.DeepEqual(err, test.wErr) { @@ -414,7 +431,7 @@ func TestFindFitAllError(t *testing.T) { "2": schedulercache.NewNodeInfo(), "1": schedulercache.NewNodeInfo(), } - _, predicateMap, err := findNodesThatFit(&v1.Pod{}, nodeNameToInfo, makeNodeList(nodes), predicates, nil, algorithm.EmptyPredicateMetadataProducer, nil, nil) + _, predicateMap, err := findNodesThatFit(&v1.Pod{}, nodeNameToInfo, makeNodeList(nodes), predicates, nil, algorithm.EmptyPredicateMetadataProducer, nil, nil, false) if err != nil { t.Errorf("unexpected error: %v", err) @@ -449,7 +466,7 @@ func TestFindFitSomeError(t *testing.T) { nodeNameToInfo[name].SetNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: name}}) } - _, predicateMap, err := findNodesThatFit(pod, nodeNameToInfo, makeNodeList(nodes), predicates, nil, algorithm.EmptyPredicateMetadataProducer, nil, nil) + _, predicateMap, err := findNodesThatFit(pod, nodeNameToInfo, makeNodeList(nodes), predicates, nil, algorithm.EmptyPredicateMetadataProducer, nil, nil, false) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -1276,7 +1293,7 @@ func TestPreempt(t *testing.T) { extenders = append(extenders, extender) } scheduler := NewGenericScheduler( - cache, nil, NewSchedulingQueue(), map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, algorithm.EmptyPredicateMetadataProducer, []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, algorithm.EmptyMetadataProducer, extenders, nil, schedulertesting.FakePersistentVolumeClaimLister{}) + cache, nil, NewSchedulingQueue(), map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, algorithm.EmptyPredicateMetadataProducer, []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, algorithm.EmptyMetadataProducer, extenders, nil, schedulertesting.FakePersistentVolumeClaimLister{}, false) // Call Preempt and check the expected results. node, victims, _, err := scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{Pod: test.pod, FailedPredicates: failedPredMap})) if err != nil { diff --git a/pkg/scheduler/factory/factory.go b/pkg/scheduler/factory/factory.go index 04b7a585913..fca4f46e3dd 100644 --- a/pkg/scheduler/factory/factory.go +++ b/pkg/scheduler/factory/factory.go @@ -130,6 +130,9 @@ type configFactory struct { // Handles volume binding decisions volumeBinder *volumebinder.VolumeBinder + + // always check all predicates even if the middle of one predicate fails. + alwaysCheckAllPredicates bool } // NewConfigFactory initializes the default implementation of a Configurator To encourage eventual privatization of the struct type, we only @@ -880,6 +883,12 @@ func (f *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler if policy.HardPodAffinitySymmetricWeight != 0 { f.hardPodAffinitySymmetricWeight = policy.HardPodAffinitySymmetricWeight } + // When AlwaysCheckAllPredicates is set to true, scheduler checks all the configured + // predicates even after one or more of them fails. + if policy.AlwaysCheckAllPredicates { + f.alwaysCheckAllPredicates = policy.AlwaysCheckAllPredicates + } + return f.CreateFromKeys(predicateKeys, priorityKeys, extenders) } @@ -933,7 +942,7 @@ func (f *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, glog.Info("Created equivalence class cache") } - algo := core.NewGenericScheduler(f.schedulerCache, f.equivalencePodCache, f.podQueue, predicateFuncs, predicateMetaProducer, priorityConfigs, priorityMetaProducer, extenders, f.volumeBinder, f.pVCLister) + algo := core.NewGenericScheduler(f.schedulerCache, f.equivalencePodCache, f.podQueue, predicateFuncs, predicateMetaProducer, priorityConfigs, priorityMetaProducer, extenders, f.volumeBinder, f.pVCLister, f.alwaysCheckAllPredicates) podBackoff := util.CreateDefaultPodBackoff() return &scheduler.Config{ diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index 73d4abcc280..ed36792156b 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -533,7 +533,8 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulercache. algorithm.EmptyMetadataProducer, []algorithm.SchedulerExtender{}, nil, - schedulertesting.FakePersistentVolumeClaimLister{}) + schedulertesting.FakePersistentVolumeClaimLister{}, + false) bindingChan := make(chan *v1.Binding, 1) errChan := make(chan error, 1) configurator := &FakeConfigurator{ @@ -577,7 +578,8 @@ func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, sc algorithm.EmptyMetadataProducer, []algorithm.SchedulerExtender{}, nil, - schedulertesting.FakePersistentVolumeClaimLister{}) + schedulertesting.FakePersistentVolumeClaimLister{}, + false) bindingChan := make(chan *v1.Binding, 2) configurator := &FakeConfigurator{ Config: &Config{ From 22592c8cdae6a4e013f26d5e1634f7e1cb5e760c Mon Sep 17 00:00:00 2001 From: zhengchuan hu Date: Thu, 11 Jan 2018 01:15:10 +0800 Subject: [PATCH 152/264] fix typeos in cloud-controller-manager --- pkg/cloudprovider/plugins.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cloudprovider/plugins.go b/pkg/cloudprovider/plugins.go index 888532717aa..739c0961339 100644 --- a/pkg/cloudprovider/plugins.go +++ b/pkg/cloudprovider/plugins.go @@ -64,7 +64,7 @@ func IsCloudProvider(name string) bool { // the name is unknown. The error return is only used if the named provider // was known but failed to initialize. The config parameter specifies the // io.Reader handler of the configuration file for the cloud provider, or nil -// for no configuation. +// for no configuration. func GetCloudProvider(name string, config io.Reader) (Interface, error) { providersMutex.Lock() defer providersMutex.Unlock() From 9ac650c437206c018d98f892ff35d63ce80f6039 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Sat, 13 Jan 2018 08:08:48 -0800 Subject: [PATCH 153/264] cluster: remove kube-push --- cluster/gce/util.sh | 60 ------------------------- cluster/kube-push.sh | 96 ---------------------------------------- cluster/skeleton/util.sh | 5 --- 3 files changed, 161 deletions(-) delete mode 100755 cluster/kube-push.sh diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 5c4f48d12cd..0bbd740d864 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -2065,66 +2065,6 @@ function prepare-push() { fi } -# Push binaries to kubernetes master -function push-master() { - echo "Updating master metadata ..." - write-master-env - prepare-startup-script - add-instance-metadata-from-file "${KUBE_MASTER}" "kube-env=${KUBE_TEMP}/master-kube-env.yaml" "startup-script=${KUBE_TEMP}/configure-vm.sh" - - echo "Pushing to master (log at ${OUTPUT}/push-${KUBE_MASTER}.log) ..." - cat ${KUBE_TEMP}/configure-vm.sh | gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone "${ZONE}" "${KUBE_MASTER}" --command "sudo bash -s -- --push" &> ${OUTPUT}/push-"${KUBE_MASTER}".log -} - -# Push binaries to kubernetes node -function push-node() { - node=${1} - - echo "Updating node ${node} metadata... " - prepare-startup-script - add-instance-metadata-from-file "${node}" "kube-env=${KUBE_TEMP}/node-kube-env.yaml" "startup-script=${KUBE_TEMP}/configure-vm.sh" - - echo "Start upgrading node ${node} (log at ${OUTPUT}/push-${node}.log) ..." - cat ${KUBE_TEMP}/configure-vm.sh | gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone "${ZONE}" "${node}" --command "sudo bash -s -- --push" &> ${OUTPUT}/push-"${node}".log -} - -# Push binaries to kubernetes cluster -function kube-push() { - # Disable this until it's fixed. - # See https://github.com/kubernetes/kubernetes/issues/17397 - echo "./cluster/kube-push.sh is currently not supported in GCE." - echo "Please use ./cluster/gce/upgrade.sh." - exit 1 - - prepare-push true - - push-master - - for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - push-node "${NODE_NAMES[$i]}" & - done - - kube::util::wait-for-jobs || { - echo -e "${color_red}Some commands failed.${color_norm}" >&2 - } - - # TODO(zmerlynn): Re-create instance-template with the new - # node-kube-env. This isn't important until the node-ip-range issue - # is solved (because that's blocking automatic dynamic nodes from - # working). The node-kube-env has to be composed with the KUBELET_TOKEN - # and KUBE_PROXY_TOKEN. Ideally we would have - # http://issue.k8s.io/3168 - # implemented before then, though, so avoiding this mess until then. - - echo - echo "Kubernetes cluster is running. The master is running at:" - echo - echo " https://${KUBE_MASTER_IP}" - echo - echo "The user name and password to use is located in ~/.kube/config" - echo -} - # ----------------------------------------------------------------------------- # Cluster specific test helpers used from hack/e2e.go diff --git a/cluster/kube-push.sh b/cluster/kube-push.sh deleted file mode 100755 index aa84f902fa4..00000000000 --- a/cluster/kube-push.sh +++ /dev/null @@ -1,96 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Push a new release to the cluster. -# -# This will find the release tar, cause it to be downloaded, unpacked, installed -# and enacted. - -set -o errexit -set -o nounset -set -o pipefail - -echo "kube-push.sh is currently broken; see https://github.com/kubernetes/kubernetes/issues/17397" -exit 1 - -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. - -if [ -f "${KUBE_ROOT}/cluster/env.sh" ]; then - source "${KUBE_ROOT}/cluster/env.sh" -fi - -source "${KUBE_ROOT}/cluster/kube-util.sh" - -function usage() { - echo "${0} [-m|-n ] " - echo " Updates Kubernetes binaries. Can be done for all components (by default), master(-m) or specified node(-n)." - echo " If the version is not specified will try to use local binaries." - echo " Warning: upgrading single node is experimental" -} - -push_to_master=false -push_to_node=false - -while getopts "mn:h" opt; do - case ${opt} in - m) - push_to_master=true;; - n) - push_to_node=true - node_id="$OPTARG";; - h) - usage - exit 0;; - \?) - echo "Invalid option: -$OPTARG" >&2 - usage - exit 1;; - esac -done -shift $((OPTIND-1)) - -if [[ "${push_to_master}" == "true" ]] && [[ "${push_to_node}" == "true" ]]; then - echo "Only one of options -m -n should be specified" - usage - exit 1 -fi - -verify-prereqs -verify-kube-binaries -KUBE_VERSION=${1-} - -if [[ "${push_to_master}" == "false" ]] && [[ "${push_to_node}" == "false" ]]; then - echo "Updating cluster using provider: $KUBERNETES_PROVIDER" - kube-push -fi - -if [[ "${push_to_master}" == "true" ]]; then - echo "Updating master to version ${KUBE_VERSION:-"dev"}" - prepare-push false - push-master -fi - -if [[ "${push_to_node}" == "true" ]]; then - echo "Updating node $node_id to version ${KUBE_VERSION:-"dev"}" - prepare-push true - push-node $node_id -fi - -echo "Validating cluster post-push..." - -"${KUBE_ROOT}/cluster/validate-cluster.sh" - -echo "Done" diff --git a/cluster/skeleton/util.sh b/cluster/skeleton/util.sh index 28d82d07d77..0cd4756101a 100644 --- a/cluster/skeleton/util.sh +++ b/cluster/skeleton/util.sh @@ -55,11 +55,6 @@ function kube-down { echo "Skeleton Provider: kube-down not implemented" 1>&2 } -# Update a kubernetes cluster -function kube-push { - echo "Skeleton Provider: kube-push not implemented" 1>&2 -} - # Prepare update a kubernetes component function prepare-push { echo "Skeleton Provider: prepare-push not implemented" 1>&2 From 6387c7b5b31c78af4d5dc5c7b036f7a69f04b7ac Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Sat, 13 Jan 2018 08:11:57 -0800 Subject: [PATCH 154/264] cluster: remove support for cvm from gce kube-up --- cluster/gce/BUILD | 7 +- cluster/gce/configure-vm.sh | 932 ------------------------------ cluster/gce/debian/node-helper.sh | 32 - cluster/gce/util.sh | 11 +- 4 files changed, 4 insertions(+), 978 deletions(-) delete mode 100755 cluster/gce/configure-vm.sh delete mode 100755 cluster/gce/debian/node-helper.sh diff --git a/cluster/gce/BUILD b/cluster/gce/BUILD index 00a2b7663ec..e297c36ef6f 100644 --- a/cluster/gce/BUILD +++ b/cluster/gce/BUILD @@ -38,13 +38,12 @@ filegroup( tags = ["automanaged"], ) -# Having the configure-vm.sh script and and trusty code from the GCE cluster -# deploy hosted with the release is useful for GKE. -# This list should match the list in kubernetes/release/lib/releaselib.sh. +# Having the COS code from the GCE cluster deploy hosted with the release is +# useful for GKE. This list should match the list in +# kubernetes/release/lib/releaselib.sh. release_filegroup( name = "gcs-release-artifacts", srcs = [ - "configure-vm.sh", "gci/configure.sh", "gci/master.yaml", "gci/node.yaml", diff --git a/cluster/gce/configure-vm.sh b/cluster/gce/configure-vm.sh deleted file mode 100755 index c1b66bab0e3..00000000000 --- a/cluster/gce/configure-vm.sh +++ /dev/null @@ -1,932 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -# If we have any arguments at all, this is a push and not just setup. -is_push=$@ - -function ensure-basic-networking() { - # Deal with GCE networking bring-up race. (We rely on DNS for a lot, - # and it's just not worth doing a whole lot of startup work if this - # isn't ready yet.) - until getent hosts metadata.google.internal &>/dev/null; do - echo 'Waiting for functional DNS (trying to resolve metadata.google.internal)...' - sleep 3 - done - until getent hosts $(hostname -f || echo _error_) &>/dev/null; do - echo 'Waiting for functional DNS (trying to resolve my own FQDN)...' - sleep 3 - done - until getent hosts $(hostname -i || echo _error_) &>/dev/null; do - echo 'Waiting for functional DNS (trying to resolve my own IP)...' - sleep 3 - done - - echo "Networking functional on $(hostname) ($(hostname -i))" -} - -# A hookpoint for installing any needed packages -ensure-packages() { - : -} - -function create-node-pki { - echo "Creating node pki files" - - local -r pki_dir="/etc/kubernetes/pki" - mkdir -p "${pki_dir}" - - if [[ -z "${CA_CERT_BUNDLE:-}" ]]; then - CA_CERT_BUNDLE="${CA_CERT}" - fi - - CA_CERT_BUNDLE_PATH="${pki_dir}/ca-certificates.crt" - echo "${CA_CERT_BUNDLE}" | base64 --decode > "${CA_CERT_BUNDLE_PATH}" - - if [[ ! -z "${KUBELET_CERT:-}" && ! -z "${KUBELET_KEY:-}" ]]; then - KUBELET_CERT_PATH="${pki_dir}/kubelet.crt" - echo "${KUBELET_CERT}" | base64 --decode > "${KUBELET_CERT_PATH}" - - KUBELET_KEY_PATH="${pki_dir}/kubelet.key" - echo "${KUBELET_KEY}" | base64 --decode > "${KUBELET_KEY_PATH}" - fi -} - -# A hookpoint for setting up local devices -ensure-local-disks() { - for ssd in /dev/disk/by-id/google-local-ssd-*; do - if [ -e "$ssd" ]; then - ssdnum=`echo $ssd | sed -e 's/\/dev\/disk\/by-id\/google-local-ssd-\([0-9]*\)/\1/'` - echo "Formatting and mounting local SSD $ssd to /mnt/disks/ssd$ssdnum" - mkdir -p /mnt/disks/ssd$ssdnum - /usr/share/google/safe_format_and_mount -m "mkfs.ext4 -F" "${ssd}" /mnt/disks/ssd$ssdnum &>/var/log/local-ssd-$ssdnum-mount.log || \ - { echo "Local SSD $ssdnum mount failed, review /var/log/local-ssd-$ssdnum-mount.log"; return 1; } - else - echo "No local SSD disks found." - fi - done -} - -function config-ip-firewall { - echo "Configuring IP firewall rules" - - # Do not consider loopback addresses as martian source or destination while - # routing. This enables the use of 127/8 for local routing purposes. - sysctl -w net.ipv4.conf.all.route_localnet=1 - - # We need to add rules to accept all TCP/UDP/ICMP packets. - if iptables -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then - echo "Add rules to accept all inbound TCP/UDP/ICMP packets" - iptables -A INPUT -p TCP -j ACCEPT - iptables -A INPUT -p UDP -j ACCEPT - iptables -A INPUT -p ICMP -j ACCEPT - fi - if iptables -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then - echo "Add rules to accept all forwarded TCP/UDP/ICMP packets" - iptables -A FORWARD -p TCP -j ACCEPT - iptables -A FORWARD -p UDP -j ACCEPT - iptables -A FORWARD -p ICMP -j ACCEPT - fi - - # Flush iptables nat table - iptables -t nat -F || true - - if [[ "${NON_MASQUERADE_CIDR:-}" == "0.0.0.0/0" ]]; then - echo "Add rules for ip masquerade" - iptables -t nat -N IP-MASQ - iptables -t nat -A POSTROUTING -m comment --comment "ip-masq: ensure nat POSTROUTING directs all non-LOCAL destination traffic to our custom IP-MASQ chain" -m addrtype ! --dst-type LOCAL -j IP-MASQ - iptables -t nat -A IP-MASQ -d 169.254.0.0/16 -m comment --comment "ip-masq: local traffic is not subject to MASQUERADE" -j RETURN - iptables -t nat -A IP-MASQ -d 10.0.0.0/8 -m comment --comment "ip-masq: local traffic is not subject to MASQUERADE" -j RETURN - iptables -t nat -A IP-MASQ -d 172.16.0.0/12 -m comment --comment "ip-masq: local traffic is not subject to MASQUERADE" -j RETURN - iptables -t nat -A IP-MASQ -d 192.168.0.0/16 -m comment --comment "ip-masq: local traffic is not subject to MASQUERADE" -j RETURN - iptables -t nat -A IP-MASQ -m comment --comment "ip-masq: outbound traffic is subject to MASQUERADE (must be last in chain)" -j MASQUERADE - fi - - if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]]; then - echo "Add rule for metadata concealment" - iptables -t nat -I PREROUTING -p tcp -d 169.254.169.254 --dport 80 -m comment --comment "metadata-concealment: bridge traffic to metadata server goes to metadata proxy" -j DNAT --to-destination 127.0.0.1:988 - fi -} - -function ensure-install-dir() { - INSTALL_DIR="/var/cache/kubernetes-install" - mkdir -p ${INSTALL_DIR} - cd ${INSTALL_DIR} -} - -function salt-apiserver-timeout-grain() { - cat <>/etc/salt/minion.d/grains.conf - minRequestTimeout: '$1' -EOF -} - -function set-broken-motd() { - echo -e '\nBroken (or in progress) Kubernetes node setup! Suggested first step:\n tail /var/log/startupscript.log\n' > /etc/motd -} - -function reset-motd() { - # kubelet is installed both on the master and nodes, and the version is easy to parse (unlike kubectl) - local -r version="$(/usr/local/bin/kubelet --version=true | cut -f2 -d " ")" - # This logic grabs either a release tag (v1.2.1 or v1.2.1-alpha.1), - # or the git hash that's in the build info. - local gitref="$(echo "${version}" | sed -r "s/(v[0-9]+\.[0-9]+\.[0-9]+)(-[a-z]+\.[0-9]+)?.*/\1\2/g")" - local devel="" - if [[ "${gitref}" != "${version}" ]]; then - devel=" -Note: This looks like a development version, which might not be present on GitHub. -If it isn't, the closest tag is at: - https://github.com/kubernetes/kubernetes/tree/${gitref} -" - gitref="${version//*+/}" - fi - cat > /etc/motd < "${kube_env_yaml}"; do - echo 'Waiting for kube-env...' - sleep 3 - done - - # kube-env has all the environment variables we care about, in a flat yaml format - eval "$(python -c ' -import pipes,sys,yaml - -for k,v in yaml.load(sys.stdin).iteritems(): - print("""readonly {var}={value}""".format(var = k, value = pipes.quote(str(v)))) - print("""export {var}""".format(var = k)) - ' < """${kube_env_yaml}""")" - ) -} - -function remove-docker-artifacts() { - echo "== Deleting docker0 ==" - apt-get-install bridge-utils - - # Remove docker artifacts on minion nodes, if present - ifconfig docker0 down || true - brctl delbr docker0 || true - echo "== Finished deleting docker0 ==" -} - -# Retry a download until we get it. Takes a hash and a set of URLs. -# -# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown. -# $2+ are the URLs to download. -download-or-bust() { - local -r hash="$1" - shift 1 - - urls=( $* ) - while true; do - for url in "${urls[@]}"; do - local file="${url##*/}" - rm -f "${file}" - if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 "${url}"; then - echo "== Failed to download ${url}. Retrying. ==" - elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - else - if [[ -n "${hash}" ]]; then - echo "== Downloaded ${url} (SHA1 = ${hash}) ==" - else - echo "== Downloaded ${url} ==" - fi - return - fi - done - done -} - -validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha1sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} ==" - return 1 - fi -} - -apt-get-install() { - local -r packages=( $@ ) - installed=true - for package in "${packages[@]}"; do - if ! dpkg -s "${package}" &>/dev/null; then - installed=false - break - fi - done - if [[ "${installed}" == "true" ]]; then - echo "== ${packages[@]} already installed, skipped apt-get install ${packages[@]} ==" - return - fi - - apt-get-update - - # Forcibly install packages (options borrowed from Salt logs). - until apt-get -q -y -o DPkg::Options::=--force-confold -o DPkg::Options::=--force-confdef install $@; do - echo "== install of packages $@ failed, retrying ==" - sleep 5 - done -} - -apt-get-update() { - echo "== Refreshing package database ==" - until apt-get update; do - echo "== apt-get update failed, retrying ==" - sleep 5 - done -} - -# Restart any services that need restarting due to a library upgrade -# Uses needrestart -restart-updated-services() { - # We default to restarting services, because this is only done as part of an update - if [[ "${AUTO_RESTART_SERVICES:-true}" != "true" ]]; then - echo "Auto restart of services prevented by AUTO_RESTART_SERVICES=${AUTO_RESTART_SERVICES}" - return - fi - echo "Restarting services with updated libraries (needrestart -r a)" - # The pipes make sure that needrestart doesn't think it is running with a TTY - # Debian bug #803249; fixed but not necessarily in package repos yet - echo "" | needrestart -r a 2>&1 | tee /dev/null -} - -# Reboot the machine if /var/run/reboot-required exists -reboot-if-required() { - if [[ ! -e "/var/run/reboot-required" ]]; then - return - fi - - echo "Reboot is required (/var/run/reboot-required detected)" - if [[ -e "/var/run/reboot-required.pkgs" ]]; then - echo "Packages that triggered reboot:" - cat /var/run/reboot-required.pkgs - fi - - # We default to rebooting the machine because this is only done as part of an update - if [[ "${AUTO_REBOOT:-true}" != "true" ]]; then - echo "Reboot prevented by AUTO_REBOOT=${AUTO_REBOOT}" - return - fi - - rm -f /var/run/reboot-required - rm -f /var/run/reboot-required.pkgs - echo "Triggering reboot" - init 6 -} - -# Install upgrades using unattended-upgrades, then reboot or restart services -auto-upgrade() { - # We default to not installing upgrades - if [[ "${AUTO_UPGRADE:-false}" != "true" ]]; then - echo "AUTO_UPGRADE not set to true; won't auto-upgrade" - return - fi - apt-get-install unattended-upgrades needrestart - unattended-upgrade --debug - reboot-if-required # We may reboot the machine right here - restart-updated-services -} - -# -# Install salt from GCS. See README.md for instructions on how to update these -# debs. -install-salt() { - if dpkg -s salt-minion &>/dev/null; then - echo "== SaltStack already installed, skipping install step ==" - return - fi - - echo "== Refreshing package database ==" - until apt-get update; do - echo "== apt-get update failed, retrying ==" - sleep 5 - done - - mkdir -p /var/cache/salt-install - cd /var/cache/salt-install - - DEBS=( - libzmq3_3.2.3+dfsg-1~bpo70~dst+1_amd64.deb - python-zmq_13.1.0-1~bpo70~dst+1_amd64.deb - salt-common_2014.1.13+ds-1~bpo70+1_all.deb - salt-minion_2014.1.13+ds-1~bpo70+1_all.deb - ) - URL_BASE="https://storage.googleapis.com/kubernetes-release/salt" - - for deb in "${DEBS[@]}"; do - if [ ! -e "${deb}" ]; then - download-or-bust "" "${URL_BASE}/${deb}" - fi - done - - # Based on - # https://major.io/2014/06/26/install-debian-packages-without-starting-daemons/ - # We do this to prevent Salt from starting the salt-minion - # daemon. The other packages don't have relevant daemons. (If you - # add a package that needs a daemon started, add it to a different - # list.) - cat > /usr/sbin/policy-rc.d <&2 -exit 101 -EOF - chmod 0755 /usr/sbin/policy-rc.d - - for deb in "${DEBS[@]}"; do - echo "== Installing ${deb}, ignore dependency complaints (will fix later) ==" - dpkg --skip-same-version --force-depends -i "${deb}" - done - - # This will install any of the unmet dependencies from above. - echo "== Installing unmet dependencies ==" - until apt-get install -f -y; do - echo "== apt-get install failed, retrying ==" - sleep 5 - done - - rm /usr/sbin/policy-rc.d - - # Log a timestamp - echo "== Finished installing Salt ==" -} - -# Ensure salt-minion isn't running and never runs -stop-salt-minion() { - if [[ -e /etc/init/salt-minion.override ]]; then - # Assume this has already run (upgrade, or baked into containervm) - return - fi - - # This ensures it on next reboot - echo manual > /etc/init/salt-minion.override - update-rc.d salt-minion disable - - while service salt-minion status >/dev/null; do - echo "salt-minion found running, stopping" - service salt-minion stop - sleep 1 - done -} - -# Finds the master PD device; returns it in MASTER_PD_DEVICE -find-master-pd() { - MASTER_PD_DEVICE="" - if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then - return - fi - device_info=$(ls -l /dev/disk/by-id/google-master-pd) - relative_path=${device_info##* } - MASTER_PD_DEVICE="/dev/disk/by-id/${relative_path}" -} - -# Create the overlay files for the salt tree. We create these in a separate -# place so that we can blow away the rest of the salt configs on a kube-push and -# re-apply these. -function create-salt-pillar() { - # Always overwrite the cluster-params.sls (even on a push, we have - # these variables) - mkdir -p /srv/salt-overlay/pillar - cat </srv/salt-overlay/pillar/cluster-params.sls -instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")' -node_tags: '$(echo "$NODE_TAGS" | sed -e "s/'/''/g")' -node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")' -cluster_cidr: '$(echo "$CLUSTER_IP_RANGE" | sed -e "s/'/''/g")' -allocate_node_cidrs: '$(echo "$ALLOCATE_NODE_CIDRS" | sed -e "s/'/''/g")' -non_masquerade_cidr: '$(echo "$NON_MASQUERADE_CIDR" | sed -e "s/'/''/g")' -service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")' -enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")' -enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")' -enable_cluster_ui: '$(echo "$ENABLE_CLUSTER_UI" | sed -e "s/'/''/g")' -enable_node_problem_detector: '$(echo "$ENABLE_NODE_PROBLEM_DETECTOR" | sed -e "s/'/''/g")' -enable_l7_loadbalancing: '$(echo "$ENABLE_L7_LOADBALANCING" | sed -e "s/'/''/g")' -enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")' -enable_metadata_proxy: '$(echo "$ENABLE_METADATA_CONCEALMENT" | sed -e "s/'/''/g")' -enable_metrics_server: '$(echo "$ENABLE_METRICS_SERVER" | sed -e "s/'/''/g")' -enable_pod_security_policy: '$(echo "$ENABLE_POD_SECURITY_POLICY" | sed -e "s/'/''/g")' -enable_rescheduler: '$(echo "$ENABLE_RESCHEDULER" | sed -e "s/'/''/g")' -logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")' -elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")' -enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")' -cluster_dns_core_dns: '$(echo "$CLUSTER_DNS_CORE_DNS" | sed -e "s/'/''/g")' -enable_cluster_registry: '$(echo "$ENABLE_CLUSTER_REGISTRY" | sed -e "s/'/''/g")' -dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")' -dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")' -enable_dns_horizontal_autoscaler: '$(echo "$ENABLE_DNS_HORIZONTAL_AUTOSCALER" | sed -e "s/'/''/g")' -admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")' -network_provider: '$(echo "$NETWORK_PROVIDER" | sed -e "s/'/''/g")' -prepull_e2e_images: '$(echo "$PREPULL_E2E_IMAGES" | sed -e "s/'/''/g")' -hairpin_mode: '$(echo "$HAIRPIN_MODE" | sed -e "s/'/''/g")' -softlockup_panic: '$(echo "$SOFTLOCKUP_PANIC" | sed -e "s/'/''/g")' -opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG" | sed -e "s/'/''/g")' -opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG")' -opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET")' -network_policy_provider: '$(echo "$NETWORK_POLICY_PROVIDER" | sed -e "s/'/''/g")' -enable_manifest_url: '$(echo "${ENABLE_MANIFEST_URL:-}" | sed -e "s/'/''/g")' -manifest_url: '$(echo "${MANIFEST_URL:-}" | sed -e "s/'/''/g")' -manifest_url_header: '$(echo "${MANIFEST_URL_HEADER:-}" | sed -e "s/'/''/g")' -num_nodes: $(echo "${NUM_NODES:-}" | sed -e "s/'/''/g") -e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")' -kube_uid: '$(echo "${KUBE_UID}" | sed -e "s/'/''/g")' -initial_etcd_cluster: '$(echo "${INITIAL_ETCD_CLUSTER:-}" | sed -e "s/'/''/g")' -initial_etcd_cluster_state: '$(echo "${INITIAL_ETCD_CLUSTER_STATE:-}" | sed -e "s/'/''/g")' -ca_cert_bundle_path: '$(echo "${CA_CERT_BUNDLE_PATH:-}" | sed -e "s/'/''/g")' -hostname: '$(echo "${ETCD_HOSTNAME:-$(hostname -s)}" | sed -e "s/'/''/g")' -enable_pod_priority: '$(echo "${ENABLE_POD_PRIORITY:-}" | sed -e "s/'/''/g")' -enable_default_storage_class: '$(echo "$ENABLE_DEFAULT_STORAGE_CLASS" | sed -e "s/'/''/g")' -kube_proxy_daemonset: '$(echo "$KUBE_PROXY_DAEMONSET" | sed -e "s/'/''/g")' -EOF - if [ -n "${STORAGE_BACKEND:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -storage_backend: '$(echo "$STORAGE_BACKEND" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${STORAGE_MEDIA_TYPE:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -storage_media_type: '$(echo "$STORAGE_MEDIA_TYPE" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT_SEC:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -kube_apiserver_request_timeout_sec: '$(echo "$KUBE_APISERVER_REQUEST_TIMEOUT_SEC" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -etcd_liveness_probe_initial_delay: '$(echo "$ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -kube_apiserver_liveness_probe_initial_delay: '$(echo "$KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${ADMISSION_CONTROL:-}" ] && [ ${ADMISSION_CONTROL} == *"ImagePolicyWebhook"* ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -admission-control-config-file: /etc/admission_controller.config -EOF - fi - if [ -n "${KUBELET_PORT:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -kubelet_port: '$(echo "$KUBELET_PORT" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${ETCD_IMAGE:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -etcd_docker_tag: '$(echo "$ETCD_IMAGE" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${ETCD_DOCKER_REPOSITORY:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -etcd_docker_repository: '$(echo "$ETCD_DOCKER_REPOSITORY" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${ETCD_VERSION:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -etcd_version: '$(echo "$ETCD_VERSION" | sed -e "s/'/''/g")' -EOF - fi - if [[ -n "${ETCD_CA_KEY:-}" && -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -etcd_over_ssl: 'true' -EOF - else - cat <>/srv/salt-overlay/pillar/cluster-params.sls -etcd_over_ssl: 'false' -EOF - fi - if [ -n "${ETCD_QUORUM_READ:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -etcd_quorum_read: '$(echo "${ETCD_QUORUM_READ}" | sed -e "s/'/''/g")' -EOF - fi - # Configuration changes for test clusters - if [ -n "${APISERVER_TEST_ARGS:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -apiserver_test_args: '$(echo "$APISERVER_TEST_ARGS" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${API_SERVER_TEST_LOG_LEVEL:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -api_server_test_log_level: '$(echo "$API_SERVER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${KUBELET_TEST_ARGS:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -kubelet_test_args: '$(echo "$KUBELET_TEST_ARGS" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${KUBELET_TEST_LOG_LEVEL:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -kubelet_test_log_level: '$(echo "$KUBELET_TEST_LOG_LEVEL" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${DOCKER_TEST_LOG_LEVEL:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -docker_test_log_level: '$(echo "$DOCKER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${CONTROLLER_MANAGER_TEST_ARGS:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -controller_manager_test_args: '$(echo "$CONTROLLER_MANAGER_TEST_ARGS" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -controller_manager_test_log_level: '$(echo "$CONTROLLER_MANAGER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${SCHEDULER_TEST_ARGS:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -scheduler_test_args: '$(echo "$SCHEDULER_TEST_ARGS" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${SCHEDULER_TEST_LOG_LEVEL:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -scheduler_test_log_level: '$(echo "$SCHEDULER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${KUBEPROXY_TEST_ARGS:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -kubeproxy_test_args: '$(echo "$KUBEPROXY_TEST_ARGS" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${KUBEPROXY_TEST_LOG_LEVEL:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -kubeproxy_test_log_level: '$(echo "$KUBEPROXY_TEST_LOG_LEVEL" | sed -e "s/'/''/g")' -EOF - fi - # TODO: Replace this with a persistent volume (and create it). - if [[ "${ENABLE_CLUSTER_REGISTRY}" == true && -n "${CLUSTER_REGISTRY_DISK}" ]]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -cluster_registry_disk_type: gce -cluster_registry_disk_size: $(echo $(convert-bytes-gce-kube ${CLUSTER_REGISTRY_DISK_SIZE}) | sed -e "s/'/''/g") -cluster_registry_disk_name: $(echo ${CLUSTER_REGISTRY_DISK} | sed -e "s/'/''/g") -EOF - fi - if [ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -terminated_pod_gc_threshold: '$(echo "${TERMINATED_POD_GC_THRESHOLD}" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${ENABLE_CUSTOM_METRICS:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -enable_custom_metrics: '$(echo "${ENABLE_CUSTOM_METRICS}" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${NODE_LABELS:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -node_labels: '$(echo "${NODE_LABELS}" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${NON_MASTER_NODE_LABELS:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -non_master_node_labels: '$(echo "${NON_MASTER_NODE_LABELS}" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${NODE_TAINTS:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -node_taints: '$(echo "${NODE_TAINTS}" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${EVICTION_HARD:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -eviction_hard: '$(echo "${EVICTION_HARD}" | sed -e "s/'/''/g")' -EOF - fi - if [[ "${ENABLE_CLUSTER_AUTOSCALER:-false}" == "true" ]]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -enable_cluster_autoscaler: '$(echo "${ENABLE_CLUSTER_AUTOSCALER}" | sed -e "s/'/''/g")' -autoscaler_mig_config: '$(echo "${AUTOSCALER_MIG_CONFIG}" | sed -e "s/'/''/g")' -autoscaler_expander_config: '$(echo "${AUTOSCALER_EXPANDER_CONFIG}" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -scheduling_algorithm_provider: '$(echo "${SCHEDULING_ALGORITHM_PROVIDER}" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${ENABLE_IP_ALIASES:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -enable_ip_aliases: '$(echo "$ENABLE_IP_ALIASES" | sed -e "s/'/''/g")' -EOF - fi -} - -# The job of this function is simple, but the basic regular expression syntax makes -# this difficult to read. What we want to do is convert from [0-9]+B, KB, KiB, MB, etc -# into [0-9]+, Ki, Mi, Gi, etc. -# This is done in two steps: -# 1. Convert from [0-9]+X?i?B into [0-9]X? (X denotes the prefix, ? means the field -# is optional. -# 2. Attach an 'i' to the end of the string if we find a letter. -# The two step process is needed to handle the edge case in which we want to convert -# a raw byte count, as the result should be a simple number (e.g. 5B -> 5). -function convert-bytes-gce-kube() { - local -r storage_space=$1 - echo "${storage_space}" | sed -e 's/^\([0-9]\+\)\([A-Z]\)\?i\?B$/\1\2/g' -e 's/\([A-Z]\)$/\1i/' -} - -# This should happen both on cluster initialization and node upgrades. -# -# - Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and -# KUBELET_KEY to generate a kubeconfig file for the kubelet to securely -# connect to the apiserver. - -function create-salt-kubelet-auth() { - local -r kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/bootstrap-kubeconfig" - if [ ! -e "${kubelet_kubeconfig_file}" ]; then - mkdir -p /srv/salt-overlay/salt/kubelet - (umask 077; - cat > "${kubelet_kubeconfig_file}" < "${kube_proxy_kubeconfig_file}" < /dev/null -} - -function download-release() { - # In case of failure checking integrity of release, retry. - until try-download-release; do - sleep 15 - echo "Couldn't download release. Retrying..." - done - - echo "Running release install script" - kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR_URL##*/}" -} - -function fix-apt-sources() { - sed -i -e "\|^deb.*http://http.debian.net/debian| s/^/#/" /etc/apt/sources.list - sed -i -e "\|^deb.*http://ftp.debian.org/debian| s/^/#/" /etc/apt/sources.list.d/backports.list -} - -function salt-run-local() { - cat </etc/salt/minion.d/local.conf -file_client: local -file_roots: - base: - - /srv/salt -EOF -} - -function salt-debug-log() { - cat </etc/salt/minion.d/log-level-debug.conf -log_level: debug -log_level_logfile: debug -EOF -} - -function salt-node-role() { - local -r kubelet_bootstrap_kubeconfig="/srv/salt-overlay/salt/kubelet/bootstrap-kubeconfig" - local -r kubelet_kubeconfig="/srv/salt-overlay/salt/kubelet/kubeconfig" - cat </etc/salt/minion.d/grains.conf -grains: - roles: - - kubernetes-pool - cloud: gce - api_servers: '${KUBERNETES_MASTER_NAME}' - kubelet_bootstrap_kubeconfig: /var/lib/kubelet/bootstrap-kubeconfig - kubelet_kubeconfig: /var/lib/kubelet/kubeconfig -EOF -} - -function env-to-grains { - local key=$1 - local env_key=`echo $key | tr '[:lower:]' '[:upper:]'` - local value=${!env_key:-} - if [[ -n "${value}" ]]; then - # Note this is yaml, so indentation matters - cat <>/etc/salt/minion.d/grains.conf - ${key}: '$(echo "${value}" | sed -e "s/'/''/g")' -EOF - fi -} - -function node-docker-opts() { - if [[ -n "${EXTRA_DOCKER_OPTS-}" ]]; then - DOCKER_OPTS="${DOCKER_OPTS:-} ${EXTRA_DOCKER_OPTS}" - fi - - # Decide whether to enable a docker registry mirror. This is taken from - # the "kube-env" metadata value. - if [[ -n "${DOCKER_REGISTRY_MIRROR_URL:-}" ]]; then - echo "Enable docker registry mirror at: ${DOCKER_REGISTRY_MIRROR_URL}" - DOCKER_OPTS="${DOCKER_OPTS:-} --registry-mirror=${DOCKER_REGISTRY_MIRROR_URL}" - fi -} - -function salt-grains() { - env-to-grains "docker_opts" - env-to-grains "docker_root" - env-to-grains "kubelet_root" - env-to-grains "feature_gates" -} - -function configure-salt() { - mkdir -p /etc/salt/minion.d - salt-run-local - salt-node-role - node-docker-opts - salt-grains - install-salt - stop-salt-minion -} - -function run-salt() { - echo "== Calling Salt ==" - local rc=0 - for i in {0..6}; do - salt-call --retcode-passthrough --local state.highstate && rc=0 || rc=$? - if [[ "${rc}" == 0 ]]; then - return 0 - fi - done - echo "Salt failed to run repeatedly" >&2 - return "${rc}" -} - -function run-user-script() { - if curl-metadata k8s-user-startup-script > "${INSTALL_DIR}/k8s-user-script.sh"; then - user_script=$(cat "${INSTALL_DIR}/k8s-user-script.sh") - fi - if [[ ! -z ${user_script:-} ]]; then - chmod u+x "${INSTALL_DIR}/k8s-user-script.sh" - echo "== running user startup script ==" - "${INSTALL_DIR}/k8s-user-script.sh" - fi -} - -if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then - echo "Support for debian master has been removed" - exit 1 -fi - -if [[ -z "${is_push}" ]]; then - echo "== kube-up node config starting ==" - set-broken-motd - ensure-basic-networking - fix-apt-sources - ensure-install-dir - ensure-packages - set-kube-env - auto-upgrade - ensure-local-disks - create-node-pki - create-salt-pillar - create-salt-kubelet-auth - if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then - create-salt-kubeproxy-auth - fi - download-release - configure-salt - remove-docker-artifacts - config-ip-firewall - run-salt - reset-motd - - run-user-script - echo "== kube-up node config done ==" -else - echo "== kube-push node config starting ==" - ensure-basic-networking - ensure-install-dir - set-kube-env - create-salt-pillar - download-release - reset-motd - run-salt - echo "== kube-push node config done ==" -fi diff --git a/cluster/gce/debian/node-helper.sh b/cluster/gce/debian/node-helper.sh deleted file mode 100755 index b62930f0e34..00000000000 --- a/cluster/gce/debian/node-helper.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A library of helper functions and constant for debian os distro - -function get-node-instance-metadata { - local metadata="" - metadata+="startup-script=${KUBE_TEMP}/configure-vm.sh," - metadata+="kube-env=${KUBE_TEMP}/node-kube-env.yaml," - metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt" - echo "${metadata}" -} - -# $1: template name (required) -function create-node-instance-template { - local template_name="$1" - prepare-startup-script - create-node-template "$template_name" "${scope_flags}" "$(get-node-instance-metadata)" -} diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 5c4f48d12cd..97c25c527bf 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -25,7 +25,7 @@ source "${KUBE_ROOT}/cluster/gce/${KUBE_CONFIG_FILE-"config-default.sh"}" source "${KUBE_ROOT}/cluster/common.sh" source "${KUBE_ROOT}/hack/lib/util.sh" -if [[ "${NODE_OS_DISTRIBUTION}" == "debian" || "${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]]; then +if [[ "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]]; then source "${KUBE_ROOT}/cluster/gce/${NODE_OS_DISTRIBUTION}/node-helper.sh" else echo "Cannot operate on cluster using node os distro: ${NODE_OS_DISTRIBUTION}" >&2 @@ -2233,12 +2233,3 @@ function ssh-to-node() { function prepare-e2e() { detect-project } - -# Writes configure-vm.sh to a temporary location with comments stripped. GCE -# limits the size of metadata fields to 32K, and stripping comments is the -# easiest way to buy us a little more room. -function prepare-startup-script() { - # Find a standard sed instance (and ensure that the command works as expected on a Mac). - kube::util::ensure-gnu-sed - ${SED} '/^\s*#\([^!].*\)*$/ d' ${KUBE_ROOT}/cluster/gce/configure-vm.sh > ${KUBE_TEMP}/configure-vm.sh -} From 410b4016fd3dc97cdaf0a8e2bc20726900db772e Mon Sep 17 00:00:00 2001 From: ilackarms Date: Sat, 13 Jan 2018 13:14:31 -0500 Subject: [PATCH 155/264] periodically flush writer --- staging/src/k8s.io/apiserver/pkg/server/filters/compression.go | 1 + 1 file changed, 1 insertion(+) diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/compression.go b/staging/src/k8s.io/apiserver/pkg/server/filters/compression.go index 6303ab54a5f..6bedfadea73 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/filters/compression.go +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/compression.go @@ -136,6 +136,7 @@ func (c *compressionResponseWriter) Write(p []byte) (int, error) { return -1, errors.New("compressing error: tried to write data using closed compressor") } c.Header().Set(headerContentEncoding, c.encoding) + defer c.compressor.Flush() return c.compressor.Write(p) } From 1e2b644260cf6643f89502b953912b581cc689a0 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Sat, 13 Jan 2018 16:25:18 -0800 Subject: [PATCH 156/264] cluster: move logging library to hack/ it's used once in cluster and used a bunch in hack/ and build/ --- cluster/BUILD | 4 ---- cluster/common.sh | 3 +-- cluster/lib/BUILD | 25 ------------------------- hack/generate-bindata.sh | 2 +- hack/lib/BUILD | 4 +--- hack/lib/init.sh | 2 +- {cluster => hack}/lib/logging.sh | 0 test/e2e_node/gubernator.sh | 2 +- 8 files changed, 5 insertions(+), 37 deletions(-) delete mode 100644 cluster/lib/BUILD rename {cluster => hack}/lib/logging.sh (100%) diff --git a/cluster/BUILD b/cluster/BUILD index 1f55e38dc54..9d3ad2c9744 100644 --- a/cluster/BUILD +++ b/cluster/BUILD @@ -20,7 +20,6 @@ filegroup( "//cluster/images/etcd/rollback:all-srcs", "//cluster/images/hyperkube:all-srcs", "//cluster/images/kubemark:all-srcs", - "//cluster/lib:all-srcs", "//cluster/saltbase:all-srcs", ], tags = ["automanaged"], @@ -55,7 +54,6 @@ sh_test( name = "common_test", srcs = ["common.sh"], deps = [ - "//cluster/lib", "//hack/lib", ], ) @@ -64,7 +62,6 @@ sh_test( name = "clientbin_test", srcs = ["clientbin.sh"], deps = [ - "//cluster/lib", "//hack/lib", ], ) @@ -73,7 +70,6 @@ sh_test( name = "kube-util_test", srcs = ["kube-util.sh"], deps = [ - "//cluster/lib", "//hack/lib", ], ) diff --git a/cluster/common.sh b/cluster/common.sh index 2aa73622a64..cdc2300612c 100755 --- a/cluster/common.sh +++ b/cluster/common.sh @@ -25,7 +25,6 @@ KUBE_ROOT=$(cd $(dirname "${BASH_SOURCE}")/.. && pwd) DEFAULT_KUBECONFIG="${HOME:-.}/.kube/config" source "${KUBE_ROOT}/hack/lib/util.sh" -source "${KUBE_ROOT}/cluster/lib/logging.sh" # KUBE_RELEASE_VERSION_REGEX matches things like "v1.2.3" or "v1.2.3-alpha.4" # # NOTE This must match the version_regex in build/common.sh @@ -499,7 +498,7 @@ function stage-images() { done kube::util::wait-for-jobs || { - kube::log::error "unable to push images. See ${temp_dir}/*.log for more info." + echo "!!! unable to push images. See ${temp_dir}/*.log for more info." 1>&2 return 1 } diff --git a/cluster/lib/BUILD b/cluster/lib/BUILD deleted file mode 100644 index 9634d17f6f4..00000000000 --- a/cluster/lib/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -sh_library( - name = "lib", - srcs = [ - "logging.sh", - ], - visibility = [ - "//build/visible_to:COMMON_testing", - "//build/visible_to:cluster", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = [ - "//build/visible_to:cluster", - ], -) diff --git a/hack/generate-bindata.sh b/hack/generate-bindata.sh index 40605fb419d..6d2ec0a5d71 100755 --- a/hack/generate-bindata.sh +++ b/hack/generate-bindata.sh @@ -22,7 +22,7 @@ if [[ -z "${KUBE_ROOT:-}" ]]; then KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. fi -source "${KUBE_ROOT}/cluster/lib/logging.sh" +source "${KUBE_ROOT}/hack/lib/logging.sh" if [[ ! -d "${KUBE_ROOT}/examples" ]]; then echo "${KUBE_ROOT}/examples not detected. This script should be run from a location where the source dirs are available." diff --git a/hack/lib/BUILD b/hack/lib/BUILD index 4c3122fb09f..bfce52eb23a 100644 --- a/hack/lib/BUILD +++ b/hack/lib/BUILD @@ -6,14 +6,12 @@ sh_library( "etcd.sh", "golang.sh", "init.sh", + "logging.sh", "swagger.sh", "test.sh", "util.sh", "version.sh", ], - deps = [ - "//cluster/lib", - ], ) filegroup( diff --git a/hack/lib/init.sh b/hack/lib/init.sh index cbff854137b..d141d168c71 100755 --- a/hack/lib/init.sh +++ b/hack/lib/init.sh @@ -37,7 +37,7 @@ export no_proxy=127.0.0.1,localhost THIS_PLATFORM_BIN="${KUBE_ROOT}/_output/bin" source "${KUBE_ROOT}/hack/lib/util.sh" -source "${KUBE_ROOT}/cluster/lib/logging.sh" +source "${KUBE_ROOT}/hack/lib/logging.sh" kube::log::install_errexit diff --git a/cluster/lib/logging.sh b/hack/lib/logging.sh similarity index 100% rename from cluster/lib/logging.sh rename to hack/lib/logging.sh diff --git a/test/e2e_node/gubernator.sh b/test/e2e_node/gubernator.sh index 6a41900c81b..97f3da8ce38 100755 --- a/test/e2e_node/gubernator.sh +++ b/test/e2e_node/gubernator.sh @@ -22,7 +22,7 @@ set -o errexit set -o nounset set -o pipefail -source cluster/lib/logging.sh +source hack/lib/logging.sh if [[ $# -eq 0 || ! $1 =~ ^[Yy]$ ]]; then From d7c9ad97e8f41c42496e71b36afe30ca25193e33 Mon Sep 17 00:00:00 2001 From: Di Xu Date: Sun, 14 Jan 2018 16:38:31 +0800 Subject: [PATCH 157/264] Enable ValidatingAdmissionWebhook and MutatingAdmissionWebhook in kubeadm from v1.9 --- cmd/kubeadm/app/phases/controlplane/manifests.go | 4 ++-- cmd/kubeadm/app/phases/controlplane/manifests_test.go | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/kubeadm/app/phases/controlplane/manifests.go b/cmd/kubeadm/app/phases/controlplane/manifests.go index f1ae775f864..ac6ef16f2a2 100644 --- a/cmd/kubeadm/app/phases/controlplane/manifests.go +++ b/cmd/kubeadm/app/phases/controlplane/manifests.go @@ -43,8 +43,8 @@ const ( DefaultCloudConfigPath = "/etc/kubernetes/cloud-config" defaultV18AdmissionControl = "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota" - deprecatedV19AdmissionControl = "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota" - defaultV19AdmissionControl = "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota" + deprecatedV19AdmissionControl = "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" + defaultV19AdmissionControl = "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" ) // CreateInitStaticPodManifestFiles will write all static pod manifest files needed to bring up the control plane. diff --git a/cmd/kubeadm/app/phases/controlplane/manifests_test.go b/cmd/kubeadm/app/phases/controlplane/manifests_test.go index 4764bf534f5..366033a6c85 100644 --- a/cmd/kubeadm/app/phases/controlplane/manifests_test.go +++ b/cmd/kubeadm/app/phases/controlplane/manifests_test.go @@ -364,7 +364,7 @@ func TestGetAPIServerCommand(t *testing.T) { expected: []string{ "kube-apiserver", "--insecure-port=0", - "--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota", + "--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota", "--service-cluster-ip-range=bar", "--service-account-key-file=" + testCertsDir + "/sa.pub", "--client-ca-file=" + testCertsDir + "/ca.crt", @@ -401,7 +401,7 @@ func TestGetAPIServerCommand(t *testing.T) { expected: []string{ "kube-apiserver", "--insecure-port=0", - "--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota", + "--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota", "--service-cluster-ip-range=bar", "--service-account-key-file=" + testCertsDir + "/sa.pub", "--client-ca-file=" + testCertsDir + "/ca.crt", @@ -437,7 +437,7 @@ func TestGetAPIServerCommand(t *testing.T) { expected: []string{ "kube-apiserver", "--insecure-port=0", - "--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota", + "--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota", "--service-cluster-ip-range=bar", "--service-account-key-file=" + testCertsDir + "/sa.pub", "--client-ca-file=" + testCertsDir + "/ca.crt", @@ -473,7 +473,7 @@ func TestGetAPIServerCommand(t *testing.T) { expected: []string{ "kube-apiserver", "--insecure-port=0", - "--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota", + "--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota", "--service-cluster-ip-range=bar", "--service-account-key-file=" + testCertsDir + "/sa.pub", "--client-ca-file=" + testCertsDir + "/ca.crt", From f4f8e6fc30978f4da2de9d6c684d1ccd9b293b8a Mon Sep 17 00:00:00 2001 From: lcfang Date: Mon, 15 Jan 2018 09:49:52 +0800 Subject: [PATCH 158/264] fix some bad url --- api/openapi-spec/swagger.json | 6 +++--- api/swagger-spec/scheduling.k8s.io_v1alpha1.json | 4 ++-- api/swagger-spec/v1.json | 2 +- cmd/kubeadm/app/phases/etcd/spec/spec.go | 2 +- .../scheduling.k8s.io/v1alpha1/definitions.html | 4 ++-- docs/api-reference/v1/definitions.html | 2 +- staging/src/k8s.io/api/core/v1/generated.proto | 2 +- staging/src/k8s.io/api/core/v1/types.go | 2 +- .../src/k8s.io/api/core/v1/types_swagger_doc_generated.go | 2 +- staging/src/k8s.io/api/scheduling/v1alpha1/generated.proto | 4 ++-- staging/src/k8s.io/api/scheduling/v1alpha1/types.go | 4 ++-- .../api/scheduling/v1alpha1/types_swagger_doc_generated.go | 4 ++-- 12 files changed, 19 insertions(+), 19 deletions(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 795486975c8..cbe0e50f498 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -77568,7 +77568,7 @@ "type": "string" }, "qosClass": { - "description": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md", + "description": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", "type": "string" }, "reason": { @@ -82131,7 +82131,7 @@ "type": "string" }, "metadata": { - "description": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "value": { @@ -82170,7 +82170,7 @@ "type": "string" }, "metadata": { - "description": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" } }, diff --git a/api/swagger-spec/scheduling.k8s.io_v1alpha1.json b/api/swagger-spec/scheduling.k8s.io_v1alpha1.json index 76bec8c02df..7b3abc4f7b9 100644 --- a/api/swagger-spec/scheduling.k8s.io_v1alpha1.json +++ b/api/swagger-spec/scheduling.k8s.io_v1alpha1.json @@ -744,7 +744,7 @@ }, "metadata": { "$ref": "v1.ListMeta", - "description": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata" + "description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" }, "items": { "type": "array", @@ -790,7 +790,7 @@ }, "metadata": { "$ref": "v1.ObjectMeta", - "description": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata" + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" }, "value": { "type": "integer", diff --git a/api/swagger-spec/v1.json b/api/swagger-spec/v1.json index cc2cebe67cf..81a814e10f4 100644 --- a/api/swagger-spec/v1.json +++ b/api/swagger-spec/v1.json @@ -23155,7 +23155,7 @@ }, "qosClass": { "type": "string", - "description": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md" + "description": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md" } } }, diff --git a/cmd/kubeadm/app/phases/etcd/spec/spec.go b/cmd/kubeadm/app/phases/etcd/spec/spec.go index 4cc587a5b6c..622075478b1 100644 --- a/cmd/kubeadm/app/phases/etcd/spec/spec.go +++ b/cmd/kubeadm/app/phases/etcd/spec/spec.go @@ -68,7 +68,7 @@ func AddKnownTypes(s *runtime.Scheme) error { type EtcdClusterList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` Items []EtcdCluster `json:"items"` } diff --git a/docs/api-reference/scheduling.k8s.io/v1alpha1/definitions.html b/docs/api-reference/scheduling.k8s.io/v1alpha1/definitions.html index ae3e0927564..5c480a3ca0f 100755 --- a/docs/api-reference/scheduling.k8s.io/v1alpha1/definitions.html +++ b/docs/api-reference/scheduling.k8s.io/v1alpha1/definitions.html @@ -1272,7 +1272,7 @@ Examples:

metadata

-

Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata

+

Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata

false

v1.ListMeta

@@ -1327,7 +1327,7 @@ Examples:

metadata

-

Standard object’s metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata

+

Standard object’s metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata

false

v1.ObjectMeta

diff --git a/docs/api-reference/v1/definitions.html b/docs/api-reference/v1/definitions.html index cbb2f013aad..7d7d2ee02d0 100755 --- a/docs/api-reference/v1/definitions.html +++ b/docs/api-reference/v1/definitions.html @@ -9068,7 +9068,7 @@ Examples:

qosClass

-

The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md

+

The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md

false

string

diff --git a/staging/src/k8s.io/api/core/v1/generated.proto b/staging/src/k8s.io/api/core/v1/generated.proto index 6bef759dece..84327e89d93 100644 --- a/staging/src/k8s.io/api/core/v1/generated.proto +++ b/staging/src/k8s.io/api/core/v1/generated.proto @@ -3179,7 +3179,7 @@ message PodStatus { // The Quality of Service (QOS) classification assigned to the pod based on resource requirements // See PodQOSClass type for available QOS classes - // More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md + // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md // +optional optional string qosClass = 9; } diff --git a/staging/src/k8s.io/api/core/v1/types.go b/staging/src/k8s.io/api/core/v1/types.go index 49ef6109276..a5ce97a7bbd 100644 --- a/staging/src/k8s.io/api/core/v1/types.go +++ b/staging/src/k8s.io/api/core/v1/types.go @@ -3042,7 +3042,7 @@ type PodStatus struct { ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"` // The Quality of Service (QOS) classification assigned to the pod based on resource requirements // See PodQOSClass type for available QOS classes - // More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md + // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md // +optional QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"` } diff --git a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go index 80cacc974e5..079e0823cb1 100644 --- a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -1531,7 +1531,7 @@ var map_PodStatus = map[string]string{ "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md", + "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", } func (PodStatus) SwaggerDoc() map[string]string { diff --git a/staging/src/k8s.io/api/scheduling/v1alpha1/generated.proto b/staging/src/k8s.io/api/scheduling/v1alpha1/generated.proto index 75b4968cc31..588ef9718b0 100644 --- a/staging/src/k8s.io/api/scheduling/v1alpha1/generated.proto +++ b/staging/src/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -33,7 +33,7 @@ option go_package = "v1alpha1"; // integer value. The value can be any valid integer. message PriorityClass { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -55,7 +55,7 @@ message PriorityClass { // PriorityClassList is a collection of priority classes. message PriorityClassList { // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/staging/src/k8s.io/api/scheduling/v1alpha1/types.go b/staging/src/k8s.io/api/scheduling/v1alpha1/types.go index bca0f3471f2..07bf337fb14 100644 --- a/staging/src/k8s.io/api/scheduling/v1alpha1/types.go +++ b/staging/src/k8s.io/api/scheduling/v1alpha1/types.go @@ -29,7 +29,7 @@ import ( type PriorityClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -54,7 +54,7 @@ type PriorityClass struct { type PriorityClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/staging/src/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go index c6187398dc3..4b68bf04a47 100644 --- a/staging/src/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1alpha1 // AUTO-GENERATED FUNCTIONS START HERE var map_PriorityClass = map[string]string{ "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class.", "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", @@ -41,7 +41,7 @@ func (PriorityClass) SwaggerDoc() map[string]string { var map_PriorityClassList = map[string]string{ "": "PriorityClassList is a collection of priority classes.", - "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "items is the list of PriorityClasses", } From 5deb5f4913ab9ea6a469c5a4c601f82a9e30a1c4 Mon Sep 17 00:00:00 2001 From: junxu Date: Thu, 11 Jan 2018 22:36:28 -0500 Subject: [PATCH 159/264] Rename func name according TODO --- pkg/scheduler/algorithm/priorities/metadata.go | 4 ++-- pkg/scheduler/algorithm/types.go | 9 ++++----- pkg/scheduler/algorithm/types_test.go | 8 ++++---- .../algorithmprovider/defaults/defaults.go | 2 +- pkg/scheduler/core/extender_test.go | 2 +- pkg/scheduler/core/generic_scheduler.go | 11 +++++------ pkg/scheduler/core/generic_scheduler_test.go | 4 ++-- pkg/scheduler/factory/factory.go | 2 +- pkg/scheduler/factory/plugins.go | 13 ++++++------- pkg/scheduler/scheduler.go | 2 +- pkg/scheduler/scheduler_test.go | 4 ++-- pkg/scheduler/testutil.go | 2 +- 12 files changed, 30 insertions(+), 33 deletions(-) diff --git a/pkg/scheduler/algorithm/priorities/metadata.go b/pkg/scheduler/algorithm/priorities/metadata.go index fe9dce79f47..b949ad7d9bf 100644 --- a/pkg/scheduler/algorithm/priorities/metadata.go +++ b/pkg/scheduler/algorithm/priorities/metadata.go @@ -32,7 +32,7 @@ type PriorityMetadataFactory struct { statefulSetLister algorithm.StatefulSetLister } -func NewPriorityMetadataFactory(serviceLister algorithm.ServiceLister, controllerLister algorithm.ControllerLister, replicaSetLister algorithm.ReplicaSetLister, statefulSetLister algorithm.StatefulSetLister) algorithm.MetadataProducer { +func NewPriorityMetadataFactory(serviceLister algorithm.ServiceLister, controllerLister algorithm.ControllerLister, replicaSetLister algorithm.ReplicaSetLister, statefulSetLister algorithm.StatefulSetLister) algorithm.PriorityMetadataProducer { factory := &PriorityMetadataFactory{ serviceLister: serviceLister, controllerLister: controllerLister, @@ -52,7 +52,7 @@ type priorityMetadata struct { podFirstServiceSelector labels.Selector } -// PriorityMetadata is a MetadataProducer. Node info can be nil. +// PriorityMetadata is a PriorityMetadataProducer. Node info can be nil. func (pmf *PriorityMetadataFactory) PriorityMetadata(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} { // If we cannot compute metadata, just return nil if pod == nil { diff --git a/pkg/scheduler/algorithm/types.go b/pkg/scheduler/algorithm/types.go index 5fb2981f110..f6ff3b49427 100644 --- a/pkg/scheduler/algorithm/types.go +++ b/pkg/scheduler/algorithm/types.go @@ -43,10 +43,9 @@ type PriorityReduceFunction func(pod *v1.Pod, meta interface{}, nodeNameToInfo m // PredicateMetadataProducer is a function that computes predicate metadata for a given pod. type PredicateMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) PredicateMetadata -// MetadataProducer is a function that computes metadata for a given pod. This +// PriorityMetadataProducer is a function that computes metadata for a given pod. This // is now used for only for priority functions. For predicates please use PredicateMetadataProducer. -// TODO: Rename this once we have a specific type for priority metadata producer. -type MetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} +type PriorityMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} // DEPRECATED // Use Map-Reduce pattern for priority functions. @@ -67,8 +66,8 @@ func EmptyPredicateMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*sche return nil } -// EmptyMetadataProducer returns a no-op MetadataProducer type. -func EmptyMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} { +// EmptyPriorityMetadataProducer returns a no-op PriorityMetadataProducer type. +func EmptyPriorityMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} { return nil } diff --git a/pkg/scheduler/algorithm/types_test.go b/pkg/scheduler/algorithm/types_test.go index 862425f7218..58ead064d99 100644 --- a/pkg/scheduler/algorithm/types_test.go +++ b/pkg/scheduler/algorithm/types_test.go @@ -24,8 +24,8 @@ import ( "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) -// EmptyMetadataProducer should returns a no-op MetadataProducer type. -func TestEmptyMetadataProducer(t *testing.T) { +// EmptyPriorityMetadataProducer should returns a no-op PriorityMetadataProducer type. +func TestEmptyPriorityMetadataProducer(t *testing.T) { fakePod := new(v1.Pod) fakeLabelSelector := labels.SelectorFromSet(labels.Set{"foo": "bar"}) @@ -33,8 +33,8 @@ func TestEmptyMetadataProducer(t *testing.T) { "2": schedulercache.NewNodeInfo(fakePod), "1": schedulercache.NewNodeInfo(), } - // Test EmptyMetadataProducer - metadata := EmptyMetadataProducer(fakePod, nodeNameToInfo) + // Test EmptyPriorityMetadataProducer + metadata := EmptyPriorityMetadataProducer(fakePod, nodeNameToInfo) if metadata != nil { t.Errorf("failed to produce empty metadata: got %v, expected nil", metadata) } diff --git a/pkg/scheduler/algorithmprovider/defaults/defaults.go b/pkg/scheduler/algorithmprovider/defaults/defaults.go index 6cbc772ac99..11b0a54042a 100644 --- a/pkg/scheduler/algorithmprovider/defaults/defaults.go +++ b/pkg/scheduler/algorithmprovider/defaults/defaults.go @@ -42,7 +42,7 @@ func init() { return predicates.NewPredicateMetadataFactory(args.PodLister) }) factory.RegisterPriorityMetadataProducerFactory( - func(args factory.PluginFactoryArgs) algorithm.MetadataProducer { + func(args factory.PluginFactoryArgs) algorithm.PriorityMetadataProducer { return priorities.NewPriorityMetadataFactory(args.ServiceLister, args.ControllerLister, args.ReplicaSetLister, args.StatefulSetLister) }) diff --git a/pkg/scheduler/core/extender_test.go b/pkg/scheduler/core/extender_test.go index 09e136d38b6..69cf8c54d15 100644 --- a/pkg/scheduler/core/extender_test.go +++ b/pkg/scheduler/core/extender_test.go @@ -317,7 +317,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) { } queue := NewSchedulingQueue() scheduler := NewGenericScheduler( - cache, nil, queue, test.predicates, algorithm.EmptyPredicateMetadataProducer, test.prioritizers, algorithm.EmptyMetadataProducer, extenders, nil, schedulertesting.FakePersistentVolumeClaimLister{}, false) + cache, nil, queue, test.predicates, algorithm.EmptyPredicateMetadataProducer, test.prioritizers, algorithm.EmptyPriorityMetadataProducer, extenders, nil, schedulertesting.FakePersistentVolumeClaimLister{}, false) podIgnored := &v1.Pod{} machine, err := scheduler.Schedule(podIgnored, schedulertesting.FakeNodeLister(makeNodeList(test.nodes))) if test.expectsErr { diff --git a/pkg/scheduler/core/generic_scheduler.go b/pkg/scheduler/core/generic_scheduler.go index 2009b7af895..da04ff45ad6 100644 --- a/pkg/scheduler/core/generic_scheduler.go +++ b/pkg/scheduler/core/generic_scheduler.go @@ -94,17 +94,16 @@ type genericScheduler struct { equivalenceCache *EquivalenceCache schedulingQueue SchedulingQueue predicates map[string]algorithm.FitPredicate - priorityMetaProducer algorithm.MetadataProducer + priorityMetaProducer algorithm.PriorityMetadataProducer predicateMetaProducer algorithm.PredicateMetadataProducer prioritizers []algorithm.PriorityConfig extenders []algorithm.SchedulerExtender lastNodeIndexLock sync.Mutex lastNodeIndex uint64 alwaysCheckAllPredicates bool - - cachedNodeInfoMap map[string]*schedulercache.NodeInfo - volumeBinder *volumebinder.VolumeBinder - pvcLister corelisters.PersistentVolumeClaimLister + cachedNodeInfoMap map[string]*schedulercache.NodeInfo + volumeBinder *volumebinder.VolumeBinder + pvcLister corelisters.PersistentVolumeClaimLister } // Schedule tries to schedule the given pod to one of node in the node list. @@ -1048,7 +1047,7 @@ func NewGenericScheduler( predicates map[string]algorithm.FitPredicate, predicateMetaProducer algorithm.PredicateMetadataProducer, prioritizers []algorithm.PriorityConfig, - priorityMetaProducer algorithm.MetadataProducer, + priorityMetaProducer algorithm.PriorityMetadataProducer, extenders []algorithm.SchedulerExtender, volumeBinder *volumebinder.VolumeBinder, pvcLister corelisters.PersistentVolumeClaimLister, diff --git a/pkg/scheduler/core/generic_scheduler_test.go b/pkg/scheduler/core/generic_scheduler_test.go index 55fede23c4a..70802239d78 100644 --- a/pkg/scheduler/core/generic_scheduler_test.go +++ b/pkg/scheduler/core/generic_scheduler_test.go @@ -410,7 +410,7 @@ func TestGenericScheduler(t *testing.T) { pvcLister := schedulertesting.FakePersistentVolumeClaimLister(pvcs) scheduler := NewGenericScheduler( - cache, nil, NewSchedulingQueue(), test.predicates, algorithm.EmptyPredicateMetadataProducer, test.prioritizers, algorithm.EmptyMetadataProducer, []algorithm.SchedulerExtender{}, nil, pvcLister, test.alwaysCheckAllPredicates) + cache, nil, NewSchedulingQueue(), test.predicates, algorithm.EmptyPredicateMetadataProducer, test.prioritizers, algorithm.EmptyPriorityMetadataProducer, []algorithm.SchedulerExtender{}, nil, pvcLister, test.alwaysCheckAllPredicates) machine, err := scheduler.Schedule(test.pod, schedulertesting.FakeNodeLister(makeNodeList(test.nodes))) if !reflect.DeepEqual(err, test.wErr) { @@ -1293,7 +1293,7 @@ func TestPreempt(t *testing.T) { extenders = append(extenders, extender) } scheduler := NewGenericScheduler( - cache, nil, NewSchedulingQueue(), map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, algorithm.EmptyPredicateMetadataProducer, []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, algorithm.EmptyMetadataProducer, extenders, nil, schedulertesting.FakePersistentVolumeClaimLister{}, false) + cache, nil, NewSchedulingQueue(), map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, algorithm.EmptyPredicateMetadataProducer, []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, algorithm.EmptyPriorityMetadataProducer, extenders, nil, schedulertesting.FakePersistentVolumeClaimLister{}, false) // Call Preempt and check the expected results. node, victims, _, err := scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{Pod: test.pod, FailedPredicates: failedPredMap})) if err != nil { diff --git a/pkg/scheduler/factory/factory.go b/pkg/scheduler/factory/factory.go index fca4f46e3dd..cfde827ca9b 100644 --- a/pkg/scheduler/factory/factory.go +++ b/pkg/scheduler/factory/factory.go @@ -983,7 +983,7 @@ func (f *configFactory) GetPriorityFunctionConfigs(priorityKeys sets.String) ([] return getPriorityFunctionConfigs(priorityKeys, *pluginArgs) } -func (f *configFactory) GetPriorityMetadataProducer() (algorithm.MetadataProducer, error) { +func (f *configFactory) GetPriorityMetadataProducer() (algorithm.PriorityMetadataProducer, error) { pluginArgs, err := f.getPluginArgs() if err != nil { return nil, err diff --git a/pkg/scheduler/factory/plugins.go b/pkg/scheduler/factory/plugins.go index b8733d2961d..1447d9487c2 100644 --- a/pkg/scheduler/factory/plugins.go +++ b/pkg/scheduler/factory/plugins.go @@ -49,9 +49,8 @@ type PluginFactoryArgs struct { HardPodAffinitySymmetricWeight int32 } -// MetadataProducerFactory produces MetadataProducer from the given args. -// TODO: Rename this to PriorityMetadataProducerFactory. -type MetadataProducerFactory func(PluginFactoryArgs) algorithm.MetadataProducer +// PriorityMetadataProducerFactory produces PriorityMetadataProducer from the given args. +type PriorityMetadataProducerFactory func(PluginFactoryArgs) algorithm.PriorityMetadataProducer // PredicateMetadataProducerFactory produces PredicateMetadataProducer from the given args. type PredicateMetadataProducerFactory func(PluginFactoryArgs) algorithm.PredicateMetadataProducer @@ -89,7 +88,7 @@ var ( algorithmProviderMap = make(map[string]AlgorithmProviderConfig) // Registered metadata producers - priorityMetadataProducer MetadataProducerFactory + priorityMetadataProducer PriorityMetadataProducerFactory predicateMetadataProducer PredicateMetadataProducerFactory // get equivalence pod function @@ -245,7 +244,7 @@ func IsFitPredicateRegistered(name string) bool { return ok } -func RegisterPriorityMetadataProducerFactory(factory MetadataProducerFactory) { +func RegisterPriorityMetadataProducerFactory(factory PriorityMetadataProducerFactory) { schedulerFactoryMutex.Lock() defer schedulerFactoryMutex.Unlock() priorityMetadataProducer = factory @@ -404,12 +403,12 @@ func getFitPredicateFunctions(names sets.String, args PluginFactoryArgs) (map[st return predicates, nil } -func getPriorityMetadataProducer(args PluginFactoryArgs) (algorithm.MetadataProducer, error) { +func getPriorityMetadataProducer(args PluginFactoryArgs) (algorithm.PriorityMetadataProducer, error) { schedulerFactoryMutex.Lock() defer schedulerFactoryMutex.Unlock() if priorityMetadataProducer == nil { - return algorithm.EmptyMetadataProducer, nil + return algorithm.EmptyPriorityMetadataProducer, nil } return priorityMetadataProducer(args), nil } diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index fee845f34e8..788647618ef 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -78,7 +78,7 @@ func (sched *Scheduler) StopEverything() { // factory.go. type Configurator interface { GetPriorityFunctionConfigs(priorityKeys sets.String) ([]algorithm.PriorityConfig, error) - GetPriorityMetadataProducer() (algorithm.MetadataProducer, error) + GetPriorityMetadataProducer() (algorithm.PriorityMetadataProducer, error) GetPredicateMetadataProducer() (algorithm.PredicateMetadataProducer, error) GetPredicates(predicateKeys sets.String) (map[string]algorithm.FitPredicate, error) GetHardPodAffinitySymmetricWeight() int32 diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index ed36792156b..d2a5d14772e 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -530,7 +530,7 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulercache. predicateMap, algorithm.EmptyPredicateMetadataProducer, []algorithm.PriorityConfig{}, - algorithm.EmptyMetadataProducer, + algorithm.EmptyPriorityMetadataProducer, []algorithm.SchedulerExtender{}, nil, schedulertesting.FakePersistentVolumeClaimLister{}, @@ -575,7 +575,7 @@ func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, sc predicateMap, algorithm.EmptyPredicateMetadataProducer, []algorithm.PriorityConfig{}, - algorithm.EmptyMetadataProducer, + algorithm.EmptyPriorityMetadataProducer, []algorithm.SchedulerExtender{}, nil, schedulertesting.FakePersistentVolumeClaimLister{}, diff --git a/pkg/scheduler/testutil.go b/pkg/scheduler/testutil.go index 249ced16cd8..0e0c2eae7ce 100644 --- a/pkg/scheduler/testutil.go +++ b/pkg/scheduler/testutil.go @@ -40,7 +40,7 @@ func (fc *FakeConfigurator) GetPriorityFunctionConfigs(priorityKeys sets.String) } // GetPriorityMetadataProducer is not implemented yet. -func (fc *FakeConfigurator) GetPriorityMetadataProducer() (algorithm.MetadataProducer, error) { +func (fc *FakeConfigurator) GetPriorityMetadataProducer() (algorithm.PriorityMetadataProducer, error) { return nil, fmt.Errorf("not implemented") } From cd02f168e6b476ed475228e725008ab948dfadc5 Mon Sep 17 00:00:00 2001 From: wackxu Date: Tue, 14 Nov 2017 20:07:24 +0800 Subject: [PATCH 160/264] use shared informers for TokenCleaner controller --- cmd/kube-controller-manager/app/bootstrap.go | 1 + pkg/controller/bootstrap/BUILD | 3 - pkg/controller/bootstrap/tokencleaner.go | 129 ++++++++++++++---- pkg/controller/bootstrap/tokencleaner_test.go | 24 ++-- 4 files changed, 116 insertions(+), 41 deletions(-) diff --git a/cmd/kube-controller-manager/app/bootstrap.go b/cmd/kube-controller-manager/app/bootstrap.go index 38e066523fd..aeb8405612f 100644 --- a/cmd/kube-controller-manager/app/bootstrap.go +++ b/cmd/kube-controller-manager/app/bootstrap.go @@ -39,6 +39,7 @@ func startBootstrapSignerController(ctx ControllerContext) (bool, error) { func startTokenCleanerController(ctx ControllerContext) (bool, error) { tcc, err := bootstrap.NewTokenCleaner( ctx.ClientBuilder.ClientGoClientOrDie("token-cleaner"), + ctx.InformerFactory.Core().V1().Secrets(), bootstrap.DefaultTokenCleanerOptions(), ) if err != nil { diff --git a/pkg/controller/bootstrap/BUILD b/pkg/controller/bootstrap/BUILD index e4ef02a20b5..f5850a6fba9 100644 --- a/pkg/controller/bootstrap/BUILD +++ b/pkg/controller/bootstrap/BUILD @@ -54,12 +54,9 @@ go_library( "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", diff --git a/pkg/controller/bootstrap/tokencleaner.go b/pkg/controller/bootstrap/tokencleaner.go index 6c099a4c733..34a91e492c8 100644 --- a/pkg/controller/bootstrap/tokencleaner.go +++ b/pkg/controller/bootstrap/tokencleaner.go @@ -17,21 +17,23 @@ limitations under the License. package bootstrap import ( + "fmt" "time" "github.com/golang/glog" - "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/watch" + coreinformers "k8s.io/client-go/informers/core/v1" clientset "k8s.io/client-go/kubernetes" + corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" api "k8s.io/kubernetes/pkg/apis/core" bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api" + "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/util/metrics" ) @@ -59,57 +61,128 @@ type TokenCleaner struct { client clientset.Interface - secrets cache.Store - secretsController cache.Controller + // secretLister is able to list/get secrets and is populated by the shared informer passed to NewTokenCleaner. + secretLister corelisters.SecretLister + + // secretSynced returns true if the secret shared informer has been synced at least once. + secretSynced cache.InformerSynced + + queue workqueue.RateLimitingInterface } // NewTokenCleaner returns a new *NewTokenCleaner. -// -// TODO: Switch to shared informers -func NewTokenCleaner(cl clientset.Interface, options TokenCleanerOptions) (*TokenCleaner, error) { +func NewTokenCleaner(cl clientset.Interface, secrets coreinformers.SecretInformer, options TokenCleanerOptions) (*TokenCleaner, error) { e := &TokenCleaner{ client: cl, + secretLister: secrets.Lister(), + secretSynced: secrets.Informer().HasSynced, tokenSecretNamespace: options.TokenSecretNamespace, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "token_cleaner"), } + if cl.CoreV1().RESTClient().GetRateLimiter() != nil { if err := metrics.RegisterMetricAndTrackRateLimiterUsage("token_cleaner", cl.CoreV1().RESTClient().GetRateLimiter()); err != nil { return nil, err } } - secretSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(bootstrapapi.SecretTypeBootstrapToken)}) - e.secrets, e.secretsController = cache.NewInformer( - &cache.ListWatch{ - ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) { - lo.FieldSelector = secretSelector.String() - return e.client.CoreV1().Secrets(e.tokenSecretNamespace).List(lo) + secrets.Informer().AddEventHandlerWithResyncPeriod( + cache.FilteringResourceEventHandler{ + FilterFunc: func(obj interface{}) bool { + switch t := obj.(type) { + case *v1.Secret: + return t.Type == bootstrapapi.SecretTypeBootstrapToken && t.Namespace == e.tokenSecretNamespace + default: + utilruntime.HandleError(fmt.Errorf("object passed to %T that is not expected: %T", e, obj)) + return false + } }, - WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) { - lo.FieldSelector = secretSelector.String() - return e.client.CoreV1().Secrets(e.tokenSecretNamespace).Watch(lo) + Handler: cache.ResourceEventHandlerFuncs{ + AddFunc: e.enqueueSecrets, + UpdateFunc: func(oldSecret, newSecret interface{}) { e.enqueueSecrets(newSecret) }, }, }, - &v1.Secret{}, options.SecretResync, - cache.ResourceEventHandlerFuncs{ - AddFunc: e.evalSecret, - UpdateFunc: func(oldSecret, newSecret interface{}) { e.evalSecret(newSecret) }, - }, ) + return e, nil } // Run runs controller loops and returns when they are done func (tc *TokenCleaner) Run(stopCh <-chan struct{}) { - go tc.secretsController.Run(stopCh) - go wait.Until(tc.evalSecrets, 10*time.Second, stopCh) + defer utilruntime.HandleCrash() + defer tc.queue.ShutDown() + + glog.Infof("Starting token cleaner controller") + defer glog.Infof("Shutting down token cleaner controller") + + if !controller.WaitForCacheSync("token_cleaner", stopCh, tc.secretSynced) { + return + } + + go wait.Until(tc.worker, 10*time.Second, stopCh) + <-stopCh } -func (tc *TokenCleaner) evalSecrets() { - for _, obj := range tc.secrets.List() { - tc.evalSecret(obj) +func (tc *TokenCleaner) enqueueSecrets(obj interface{}) { + key, err := controller.KeyFunc(obj) + if err != nil { + utilruntime.HandleError(err) + return } + tc.queue.Add(key) +} + +// worker runs a thread that dequeues secrets, handles them, and marks them done. +func (tc *TokenCleaner) worker() { + for tc.processNextWorkItem() { + } +} + +// processNextWorkItem deals with one key off the queue. It returns false when it's time to quit. +func (tc *TokenCleaner) processNextWorkItem() bool { + key, quit := tc.queue.Get() + if quit { + return false + } + defer tc.queue.Done(key) + + if err := tc.syncFunc(key.(string)); err != nil { + tc.queue.AddRateLimited(key) + utilruntime.HandleError(fmt.Errorf("Sync %v failed with : %v", key, err)) + return true + } + + tc.queue.Forget(key) + return true +} + +func (tc *TokenCleaner) syncFunc(key string) error { + startTime := time.Now() + defer func() { + glog.V(4).Infof("Finished syncing secret %q (%v)", key, time.Now().Sub(startTime)) + }() + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + + ret, err := tc.secretLister.Secrets(namespace).Get(name) + if apierrors.IsNotFound(err) { + glog.V(3).Infof("secret has been deleted: %v", key) + return nil + } + + if err != nil { + return err + } + + if ret.Type == bootstrapapi.SecretTypeBootstrapToken { + tc.evalSecret(ret) + } + return nil } func (tc *TokenCleaner) evalSecret(o interface{}) { diff --git a/pkg/controller/bootstrap/tokencleaner_test.go b/pkg/controller/bootstrap/tokencleaner_test.go index 47059dd4d19..5fddd7980f6 100644 --- a/pkg/controller/bootstrap/tokencleaner_test.go +++ b/pkg/controller/bootstrap/tokencleaner_test.go @@ -23,6 +23,8 @@ import ( "github.com/davecgh/go-spew/spew" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/informers" + coreinformers "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" api "k8s.io/kubernetes/pkg/apis/core" @@ -32,24 +34,26 @@ func init() { spew.Config.DisableMethods = true } -func newTokenCleaner() (*TokenCleaner, *fake.Clientset, error) { +func newTokenCleaner() (*TokenCleaner, *fake.Clientset, coreinformers.SecretInformer, error) { options := DefaultTokenCleanerOptions() cl := fake.NewSimpleClientset() - tcc, err := NewTokenCleaner(cl, options) + informerFactory := informers.NewSharedInformerFactory(cl, options.SecretResync) + secrets := informerFactory.Core().V1().Secrets() + tcc, err := NewTokenCleaner(cl, secrets, options) if err != nil { - return nil, nil, err + return nil, nil, nil, err } - return tcc, cl, nil + return tcc, cl, secrets, nil } func TestCleanerNoExpiration(t *testing.T) { - cleaner, cl, err := newTokenCleaner() + cleaner, cl, secrets, err := newTokenCleaner() if err != nil { t.Fatalf("error creating TokenCleaner: %v", err) } secret := newTokenSecret("tokenID", "tokenSecret") - cleaner.secrets.Add(secret) + secrets.Informer().GetIndexer().Add(secret) cleaner.evalSecret(secret) @@ -59,14 +63,14 @@ func TestCleanerNoExpiration(t *testing.T) { } func TestCleanerExpired(t *testing.T) { - cleaner, cl, err := newTokenCleaner() + cleaner, cl, secrets, err := newTokenCleaner() if err != nil { t.Fatalf("error creating TokenCleaner: %v", err) } secret := newTokenSecret("tokenID", "tokenSecret") addSecretExpiration(secret, timeString(-time.Hour)) - cleaner.secrets.Add(secret) + secrets.Informer().GetIndexer().Add(secret) cleaner.evalSecret(secret) @@ -81,14 +85,14 @@ func TestCleanerExpired(t *testing.T) { } func TestCleanerNotExpired(t *testing.T) { - cleaner, cl, err := newTokenCleaner() + cleaner, cl, secrets, err := newTokenCleaner() if err != nil { t.Fatalf("error creating TokenCleaner: %v", err) } secret := newTokenSecret("tokenID", "tokenSecret") addSecretExpiration(secret, timeString(time.Hour)) - cleaner.secrets.Add(secret) + secrets.Informer().GetIndexer().Add(secret) cleaner.evalSecret(secret) From fa8afc1d393300803480e5faf4b6545916cd5ccf Mon Sep 17 00:00:00 2001 From: linweibin Date: Mon, 15 Jan 2018 16:02:35 +0800 Subject: [PATCH 161/264] Remove unused code in UT files in pkg/ --- .../providers/azure/azure_test.go | 50 ------------------- pkg/kubectl/cmd/BUILD | 1 - pkg/kubectl/cmd/attach_test.go | 6 +-- pkg/kubectl/cmd/drain_test.go | 16 ------ pkg/kubectl/cmd/exec_test.go | 6 +-- pkg/kubectl/cmd/logs_test.go | 4 +- pkg/kubectl/cmd/portforward_test.go | 8 +-- pkg/kubectl/cmd/resource/get_test.go | 11 ---- pkg/kubectl/cmd/taint_test.go | 1 - pkg/kubectl/cmd/top_pod_test.go | 2 - pkg/kubectl/namespace_test.go | 1 - pkg/kubectl/rolling_updater_test.go | 1 - pkg/kubelet/cm/cpumanager/BUILD | 1 - pkg/kubelet/cm/cpumanager/cpu_manager_test.go | 37 -------------- pkg/kubelet/cm/deviceplugin/manager_test.go | 5 -- pkg/kubelet/eviction/helpers_test.go | 18 +------ pkg/kubelet/kubelet_test.go | 2 - .../kuberuntime/kuberuntime_gc_test.go | 1 - .../kuberuntime/kuberuntime_manager_test.go | 6 --- pkg/kubelet/remote/remote_runtime_test.go | 7 --- pkg/kubelet/rkt/fake_rkt_interface_test.go | 1 - pkg/kubelet/stats/cri_stats_provider_test.go | 1 - pkg/kubelet/stats/stats_provider_test.go | 1 - pkg/master/BUILD | 5 -- pkg/master/master_test.go | 31 ------------ pkg/printers/humanreadable_test.go | 11 ---- pkg/printers/internalversion/describe_test.go | 4 +- .../apps/statefulset/storage/storage_test.go | 11 ---- pkg/registry/batch/cronjob/strategy_test.go | 6 --- .../core/endpoint/storage/storage_test.go | 10 ---- pkg/registry/core/event/strategy_test.go | 13 ----- .../persistentvolume/storage/storage_test.go | 5 -- pkg/registry/core/pod/strategy_test.go | 5 -- .../controller/storage/storage_test.go | 11 ---- .../storage/storage_test.go | 11 ---- .../storageclass/storage/storage_test.go | 4 -- .../volumeattachment/storage/storage_test.go | 4 -- pkg/security/podsecuritypolicy/group/BUILD | 5 +- .../podsecuritypolicy/group/mustrunas_test.go | 2 - pkg/serviceaccount/jwt_test.go | 11 ---- pkg/util/dbus/dbus_test.go | 7 +-- 41 files changed, 18 insertions(+), 325 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_test.go b/pkg/cloudprovider/providers/azure/azure_test.go index 40a1153680b..c42b9be3d08 100644 --- a/pkg/cloudprovider/providers/azure/azure_test.go +++ b/pkg/cloudprovider/providers/azure/azure_test.go @@ -1000,23 +1000,6 @@ func getBackendPort(port int32) int32 { return port + 10000 } -func getTestPublicFipConfigurationProperties() network.FrontendIPConfigurationPropertiesFormat { - return network.FrontendIPConfigurationPropertiesFormat{ - PublicIPAddress: &network.PublicIPAddress{ID: to.StringPtr("/this/is/a/public/ip/address/id")}, - } -} - -func getTestInternalFipConfigurationProperties(expectedSubnetName *string) network.FrontendIPConfigurationPropertiesFormat { - var expectedSubnet *network.Subnet - if expectedSubnetName != nil { - expectedSubnet = &network.Subnet{Name: expectedSubnetName} - } - return network.FrontendIPConfigurationPropertiesFormat{ - PublicIPAddress: &network.PublicIPAddress{ID: to.StringPtr("/this/is/a/public/ip/address/id")}, - Subnet: expectedSubnet, - } -} - func getTestService(identifier string, proto v1.Protocol, requestedPorts ...int32) v1.Service { ports := []v1.ServicePort{} for _, port := range requestedPorts { @@ -1056,39 +1039,6 @@ func setLoadBalancerAutoModeAnnotation(service *v1.Service) { setLoadBalancerModeAnnotation(service, ServiceAnnotationLoadBalancerAutoModeValue) } -func getTestLoadBalancer(services ...v1.Service) network.LoadBalancer { - rules := []network.LoadBalancingRule{} - probes := []network.Probe{} - - for _, service := range services { - for _, port := range service.Spec.Ports { - ruleName := getLoadBalancerRuleName(&service, port, nil) - rules = append(rules, network.LoadBalancingRule{ - Name: to.StringPtr(ruleName), - LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{ - FrontendPort: to.Int32Ptr(port.Port), - BackendPort: to.Int32Ptr(port.Port), - }, - }) - probes = append(probes, network.Probe{ - Name: to.StringPtr(ruleName), - ProbePropertiesFormat: &network.ProbePropertiesFormat{ - Port: to.Int32Ptr(port.NodePort), - }, - }) - } - } - - lb := network.LoadBalancer{ - LoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{ - LoadBalancingRules: &rules, - Probes: &probes, - }, - } - - return lb -} - func getServiceSourceRanges(service *v1.Service) []string { if len(service.Spec.LoadBalancerSourceRanges) == 0 { if !requiresInternalLoadBalancer(service) { diff --git a/pkg/kubectl/cmd/BUILD b/pkg/kubectl/cmd/BUILD index 0bf32689830..d6cba425fec 100644 --- a/pkg/kubectl/cmd/BUILD +++ b/pkg/kubectl/cmd/BUILD @@ -207,7 +207,6 @@ go_test( importpath = "k8s.io/kubernetes/pkg/kubectl/cmd", deps = [ "//pkg/api/legacyscheme:go_default_library", - "//pkg/api/ref:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/api/testing:go_default_library", "//pkg/apis/batch:go_default_library", diff --git a/pkg/kubectl/cmd/attach_test.go b/pkg/kubectl/cmd/attach_test.go index 42801a9b257..f4d9b1d5cd7 100644 --- a/pkg/kubectl/cmd/attach_test.go +++ b/pkg/kubectl/cmd/attach_test.go @@ -286,9 +286,9 @@ func TestAttach(t *testing.T) { func TestAttachWarnings(t *testing.T) { version := legacyscheme.Registry.GroupOrDie(api.GroupName).GroupVersion.Version tests := []struct { - name, container, version, podPath, fetchPodPath, expectedErr, expectedOut string - pod *api.Pod - stdin, tty bool + name, container, version, podPath, fetchPodPath, expectedErr string + pod *api.Pod + stdin, tty bool }{ { name: "fallback tty if not supported", diff --git a/pkg/kubectl/cmd/drain_test.go b/pkg/kubectl/cmd/drain_test.go index f9996314406..8efe31f8db7 100644 --- a/pkg/kubectl/cmd/drain_test.go +++ b/pkg/kubectl/cmd/drain_test.go @@ -44,7 +44,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/rest/fake" "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/api/ref" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/apis/batch" api "k8s.io/kubernetes/pkg/apis/core" @@ -882,18 +881,3 @@ func (m *MyReq) isFor(method string, path string) bool { req.URL.Path == strings.Join([]string{"/apis/extensions/v1beta1", path}, "") || req.URL.Path == strings.Join([]string{"/apis/batch/v1", path}, "")) } - -func refJson(t *testing.T, o runtime.Object) string { - ref, err := ref.GetReference(legacyscheme.Scheme, o) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - _, _, codec, _ := cmdtesting.NewAPIFactory() - json, err := runtime.Encode(codec, &api.SerializedReference{Reference: *ref}) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - return string(json) -} diff --git a/pkg/kubectl/cmd/exec_test.go b/pkg/kubectl/cmd/exec_test.go index afa52ca634b..c327b731b0d 100644 --- a/pkg/kubectl/cmd/exec_test.go +++ b/pkg/kubectl/cmd/exec_test.go @@ -163,9 +163,9 @@ func TestPodAndContainer(t *testing.T) { func TestExec(t *testing.T) { version := "v1" tests := []struct { - name, podPath, execPath, container string - pod *api.Pod - execErr bool + name, podPath, execPath string + pod *api.Pod + execErr bool }{ { name: "pod exec", diff --git a/pkg/kubectl/cmd/logs_test.go b/pkg/kubectl/cmd/logs_test.go index be3dc7cab11..026b899f7c1 100644 --- a/pkg/kubectl/cmd/logs_test.go +++ b/pkg/kubectl/cmd/logs_test.go @@ -34,8 +34,8 @@ import ( func TestLog(t *testing.T) { tests := []struct { - name, version, podPath, logPath, container string - pod *api.Pod + name, version, podPath, logPath string + pod *api.Pod }{ { name: "v1 - pod log", diff --git a/pkg/kubectl/cmd/portforward_test.go b/pkg/kubectl/cmd/portforward_test.go index dcb6f1da82d..e2437bcb620 100644 --- a/pkg/kubectl/cmd/portforward_test.go +++ b/pkg/kubectl/cmd/portforward_test.go @@ -47,10 +47,10 @@ func testPortForward(t *testing.T, flags map[string]string, args []string) { version := "v1" tests := []struct { - name string - podPath, pfPath, container string - pod *api.Pod - pfErr bool + name string + podPath, pfPath string + pod *api.Pod + pfErr bool }{ { name: "pod portforward", diff --git a/pkg/kubectl/cmd/resource/get_test.go b/pkg/kubectl/cmd/resource/get_test.go index 9d30530aa96..14a02d9e135 100644 --- a/pkg/kubectl/cmd/resource/get_test.go +++ b/pkg/kubectl/cmd/resource/get_test.go @@ -83,17 +83,6 @@ func defaultClientConfig() *restclient.Config { } } -func defaultClientConfigForVersion(version *schema.GroupVersion) *restclient.Config { - return &restclient.Config{ - APIPath: "/api", - ContentConfig: restclient.ContentConfig{ - NegotiatedSerializer: scheme.Codecs, - ContentType: runtime.ContentTypeJSON, - GroupVersion: version, - }, - } -} - type testPrinter struct { Objects []runtime.Object Err error diff --git a/pkg/kubectl/cmd/taint_test.go b/pkg/kubectl/cmd/taint_test.go index 8e901364943..172be9d309f 100644 --- a/pkg/kubectl/cmd/taint_test.go +++ b/pkg/kubectl/cmd/taint_test.go @@ -84,7 +84,6 @@ func TestTaint(t *testing.T) { args []string expectFatal bool expectTaint bool - selector bool }{ // success cases { diff --git a/pkg/kubectl/cmd/top_pod_test.go b/pkg/kubectl/cmd/top_pod_test.go index 94bca7ec3ed..a839454e2f1 100644 --- a/pkg/kubectl/cmd/top_pod_test.go +++ b/pkg/kubectl/cmd/top_pod_test.go @@ -41,7 +41,6 @@ func TestTopPod(t *testing.T) { testNS := "testns" testCases := []struct { name string - namespace string flags map[string]string args []string expectedPath string @@ -176,7 +175,6 @@ func TestTopPodCustomDefaults(t *testing.T) { testNS := "custom-namespace" testCases := []struct { name string - namespace string flags map[string]string args []string expectedPath string diff --git a/pkg/kubectl/namespace_test.go b/pkg/kubectl/namespace_test.go index 2a017ba806f..23b214225a8 100644 --- a/pkg/kubectl/namespace_test.go +++ b/pkg/kubectl/namespace_test.go @@ -29,7 +29,6 @@ func TestNamespaceGenerate(t *testing.T) { params map[string]interface{} expected *v1.Namespace expectErr bool - index int }{ { params: map[string]interface{}{ diff --git a/pkg/kubectl/rolling_updater_test.go b/pkg/kubectl/rolling_updater_test.go index 0bd04c6b184..3620ed0712e 100644 --- a/pkg/kubectl/rolling_updater_test.go +++ b/pkg/kubectl/rolling_updater_test.go @@ -1260,7 +1260,6 @@ func TestFindSourceController(t *testing.T) { tests := []struct { list *api.ReplicationControllerList expectedController *api.ReplicationController - err error name string expectError bool }{ diff --git a/pkg/kubelet/cm/cpumanager/BUILD b/pkg/kubelet/cm/cpumanager/BUILD index d3330084825..a270b6a0527 100644 --- a/pkg/kubelet/cm/cpumanager/BUILD +++ b/pkg/kubelet/cm/cpumanager/BUILD @@ -47,7 +47,6 @@ go_test( "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", ], ) diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go index 9381ea470bc..704060fb38e 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go +++ b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go @@ -28,7 +28,6 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" @@ -118,28 +117,6 @@ func (psp mockPodStatusProvider) GetPodStatus(uid types.UID) (v1.PodStatus, bool return psp.podStatus, psp.found } -type mockPodKiller struct { - killedPods []*v1.Pod -} - -func (f *mockPodKiller) killPodNow(pod *v1.Pod, status v1.PodStatus, gracePeriodOverride *int64) error { - f.killedPods = append(f.killedPods, pod) - return nil -} - -type mockPodProvider struct { - pods []*v1.Pod -} - -func (f *mockPodProvider) getPods() []*v1.Pod { - return f.pods -} - -type mockRecorder struct{} - -func (r *mockRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { -} - func makePod(cpuRequest, cpuLimit string) *v1.Pod { return &v1.Pod{ Spec: v1.PodSpec{ @@ -161,20 +138,6 @@ func makePod(cpuRequest, cpuLimit string) *v1.Pod { } } -// CpuAllocatable must be <= CpuCapacity -func prepareCPUNodeStatus(CPUCapacity, CPUAllocatable string) v1.NodeStatus { - nodestatus := v1.NodeStatus{ - Capacity: make(v1.ResourceList, 1), - Allocatable: make(v1.ResourceList, 1), - } - cpucap, _ := resource.ParseQuantity(CPUCapacity) - cpuall, _ := resource.ParseQuantity(CPUAllocatable) - - nodestatus.Capacity[v1.ResourceCPU] = cpucap - nodestatus.Allocatable[v1.ResourceCPU] = cpuall - return nodestatus -} - func TestCPUManagerAdd(t *testing.T) { testCases := []struct { description string diff --git a/pkg/kubelet/cm/deviceplugin/manager_test.go b/pkg/kubelet/cm/deviceplugin/manager_test.go index 124f690acf7..8fef628b86c 100644 --- a/pkg/kubelet/cm/deviceplugin/manager_test.go +++ b/pkg/kubelet/cm/deviceplugin/manager_test.go @@ -233,11 +233,6 @@ func TestUpdateCapacityAllocatable(t *testing.T) { } -type stringPairType struct { - value1 string - value2 string -} - func constructDevices(devices []string) sets.String { ret := sets.NewString() for _, dev := range devices { diff --git a/pkg/kubelet/eviction/helpers_test.go b/pkg/kubelet/eviction/helpers_test.go index 727e0be14b9..7b8f5580665 100644 --- a/pkg/kubelet/eviction/helpers_test.go +++ b/pkg/kubelet/eviction/helpers_test.go @@ -1645,7 +1645,7 @@ func TestGetStarvedResources(t *testing.T) { } } -func testParsePercentage(t *testing.T) { +func TestParsePercentage(t *testing.T) { testCases := map[string]struct { hasError bool value float32 @@ -1674,7 +1674,7 @@ func testParsePercentage(t *testing.T) { } } -func testCompareThresholdValue(t *testing.T) { +func TestCompareThresholdValue(t *testing.T) { testCases := []struct { a, b evictionapi.ThresholdValue equal bool @@ -1831,20 +1831,6 @@ func newResourceList(cpu, memory, disk string) v1.ResourceList { return res } -func newEphemeralStorageResourceList(ephemeral, cpu, memory string) v1.ResourceList { - res := v1.ResourceList{} - if ephemeral != "" { - res[v1.ResourceEphemeralStorage] = resource.MustParse(ephemeral) - } - if cpu != "" { - res[v1.ResourceCPU] = resource.MustParse(cpu) - } - if memory != "" { - res[v1.ResourceMemory] = resource.MustParse("1Mi") - } - return res -} - func newResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequirements { res := v1.ResourceRequirements{} res.Requests = requests diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index c1061355baa..ea0172ce147 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -334,8 +334,6 @@ func newTestPods(count int) []*v1.Pod { return pods } -var emptyPodUIDs map[types.UID]kubetypes.SyncPodType - func TestSyncLoopAbort(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() diff --git a/pkg/kubelet/kuberuntime/kuberuntime_gc_test.go b/pkg/kubelet/kuberuntime/kuberuntime_gc_test.go index 780d47f9b75..ebd306fd75a 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_gc_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_gc_test.go @@ -65,7 +65,6 @@ func TestSandboxGC(t *testing.T) { description string // description of the test case sandboxes []sandboxTemplate // templates of sandboxes containers []containerTemplate // templates of containers - minAge time.Duration // sandboxMinGCAge remain []int // template indexes of remaining sandboxes evictTerminatedPods bool }{ diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go index 85a06794eeb..b8e3e55d863 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go @@ -225,12 +225,6 @@ func verifyFakeContainerList(fakeRuntime *apitest.FakeRuntimeService, expected s return actual, actual.Equal(expected) } -type containerRecord struct { - container *v1.Container - attempt uint32 - state runtimeapi.ContainerState -} - // Only extract the fields of interests. type cRecord struct { name string diff --git a/pkg/kubelet/remote/remote_runtime_test.go b/pkg/kubelet/remote/remote_runtime_test.go index db5b31b6bf6..2f26efc3da0 100644 --- a/pkg/kubelet/remote/remote_runtime_test.go +++ b/pkg/kubelet/remote/remote_runtime_test.go @@ -50,13 +50,6 @@ func createRemoteRuntimeService(endpoint string, t *testing.T) internalapi.Runti return runtimeService } -func createRemoteImageService(endpoint string, t *testing.T) internalapi.ImageManagerService { - imageService, err := NewRemoteImageService(endpoint, defaultConnectionTimeout) - assert.NoError(t, err) - - return imageService -} - func TestVersion(t *testing.T) { fakeRuntime, endpoint := createAndStartFakeRemoteRuntime(t) defer fakeRuntime.Stop() diff --git a/pkg/kubelet/rkt/fake_rkt_interface_test.go b/pkg/kubelet/rkt/fake_rkt_interface_test.go index 19b3665685d..2ee7bdcdd65 100644 --- a/pkg/kubelet/rkt/fake_rkt_interface_test.go +++ b/pkg/kubelet/rkt/fake_rkt_interface_test.go @@ -195,7 +195,6 @@ func (f *fakePodDeletionProvider) IsPodDeleted(uid types.UID) bool { type fakeUnitGetter struct { networkNamespace kubecontainer.ContainerID - callServices []string } func newfakeUnitGetter() *fakeUnitGetter { diff --git a/pkg/kubelet/stats/cri_stats_provider_test.go b/pkg/kubelet/stats/cri_stats_provider_test.go index 5cff0ac7e90..931b368ec5d 100644 --- a/pkg/kubelet/stats/cri_stats_provider_test.go +++ b/pkg/kubelet/stats/cri_stats_provider_test.go @@ -38,7 +38,6 @@ import ( func TestCRIListPodStats(t *testing.T) { const ( seedRoot = 0 - seedRuntime = 100 seedKubelet = 200 seedMisc = 300 seedSandbox0 = 1000 diff --git a/pkg/kubelet/stats/stats_provider_test.go b/pkg/kubelet/stats/stats_provider_test.go index 6f02704ed59..1ff7f6524b4 100644 --- a/pkg/kubelet/stats/stats_provider_test.go +++ b/pkg/kubelet/stats/stats_provider_test.go @@ -59,7 +59,6 @@ const ( offsetFsTotalUsageBytes offsetFsBaseUsageBytes offsetFsInodeUsage - offsetVolume ) var ( diff --git a/pkg/master/BUILD b/pkg/master/BUILD index 3162a764c33..fadfc10b42b 100644 --- a/pkg/master/BUILD +++ b/pkg/master/BUILD @@ -145,13 +145,8 @@ go_test( "//vendor/github.com/go-openapi/strfmt:go_default_library", "//vendor/github.com/go-openapi/validate:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", - "//vendor/k8s.io/api/apps/v1beta1:go_default_library", - "//vendor/k8s.io/api/autoscaling/v1:go_default_library", - "//vendor/k8s.io/api/batch/v1:go_default_library", - "//vendor/k8s.io/api/batch/v1beta1:go_default_library", "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/pkg/master/master_test.go b/pkg/master/master_test.go index 506b81f9f6e..2d8d8c4f3e7 100644 --- a/pkg/master/master_test.go +++ b/pkg/master/master_test.go @@ -27,13 +27,8 @@ import ( "strings" "testing" - appsapiv1beta1 "k8s.io/api/apps/v1beta1" - autoscalingapiv1 "k8s.io/api/autoscaling/v1" - batchapiv1 "k8s.io/api/batch/v1" - batchapiv1beta1 "k8s.io/api/batch/v1beta1" certificatesapiv1beta1 "k8s.io/api/certificates/v1beta1" apiv1 "k8s.io/api/core/v1" - extensionsapiv1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -194,32 +189,6 @@ func newMaster(t *testing.T) (*Master, *etcdtesting.EtcdTestServer, Config, *ass return master, etcdserver, config, assert } -// limitedAPIResourceConfigSource only enables the core group, the extensions group, the batch group, and the autoscaling group. -func limitedAPIResourceConfigSource() *serverstorage.ResourceConfig { - ret := serverstorage.NewResourceConfig() - ret.EnableVersions( - apiv1.SchemeGroupVersion, - extensionsapiv1beta1.SchemeGroupVersion, - batchapiv1.SchemeGroupVersion, - batchapiv1beta1.SchemeGroupVersion, - appsapiv1beta1.SchemeGroupVersion, - autoscalingapiv1.SchemeGroupVersion, - ) - return ret -} - -// newLimitedMaster only enables the core group, the extensions group, the batch group, and the autoscaling group. -func newLimitedMaster(t *testing.T) (*Master, *etcdtesting.EtcdTestServer, Config, *assert.Assertions) { - etcdserver, config, sharedInformers, assert := setUp(t) - config.ExtraConfig.APIResourceConfigSource = limitedAPIResourceConfigSource() - master, err := config.Complete(sharedInformers).New(genericapiserver.EmptyDelegate) - if err != nil { - t.Fatalf("Error in bringing up the master: %v", err) - } - - return master, etcdserver, config, assert -} - // TestVersion tests /version func TestVersion(t *testing.T) { s, etcdserver, _, _ := newMaster(t) diff --git a/pkg/printers/humanreadable_test.go b/pkg/printers/humanreadable_test.go index 7c45b868e3a..779f6b748c8 100644 --- a/pkg/printers/humanreadable_test.go +++ b/pkg/printers/humanreadable_test.go @@ -45,17 +45,6 @@ func testPrintNamespace(obj *api.Namespace, options PrintOptions) ([]metav1alpha return []metav1alpha1.TableRow{row}, nil } -func testPrintNamespaceList(list *api.NamespaceList, options PrintOptions) ([]metav1alpha1.TableRow, error) { - rows := make([]metav1alpha1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := testPrintNamespace(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} func TestPrintRowsForHandlerEntry(t *testing.T) { printFunc := reflect.ValueOf(testPrintNamespace) diff --git a/pkg/printers/internalversion/describe_test.go b/pkg/printers/internalversion/describe_test.go index 71dfd3f05f4..54947d5974f 100644 --- a/pkg/printers/internalversion/describe_test.go +++ b/pkg/printers/internalversion/describe_test.go @@ -783,8 +783,8 @@ func TestDefaultDescribers(t *testing.T) { func TestGetPodsTotalRequests(t *testing.T) { testCases := []struct { - pods *api.PodList - expectedReqs, expectedLimits map[api.ResourceName]resource.Quantity + pods *api.PodList + expectedReqs map[api.ResourceName]resource.Quantity }{ { pods: &api.PodList{ diff --git a/pkg/registry/apps/statefulset/storage/storage_test.go b/pkg/registry/apps/statefulset/storage/storage_test.go index 1e2d13ed848..ad98dd1ed43 100644 --- a/pkg/registry/apps/statefulset/storage/storage_test.go +++ b/pkg/registry/apps/statefulset/storage/storage_test.go @@ -44,17 +44,6 @@ func newStorage(t *testing.T) (StatefulSetStorage, *etcdtesting.EtcdTestServer) return storage, server } -// createStatefulSet is a helper function that returns a StatefulSet with the updated resource version. -func createStatefulSet(storage *REST, ps apps.StatefulSet, t *testing.T) (apps.StatefulSet, error) { - ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), ps.Namespace) - obj, err := storage.Create(ctx, &ps, rest.ValidateAllObjectFunc, false) - if err != nil { - t.Errorf("Failed to create StatefulSet, %v", err) - } - newPS := obj.(*apps.StatefulSet) - return *newPS, nil -} - func validNewStatefulSet() *apps.StatefulSet { return &apps.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/registry/batch/cronjob/strategy_test.go b/pkg/registry/batch/cronjob/strategy_test.go index 6cf9b60b589..0b46ac9f861 100644 --- a/pkg/registry/batch/cronjob/strategy_test.go +++ b/pkg/registry/batch/cronjob/strategy_test.go @@ -26,12 +26,6 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" ) -func newBool(a bool) *bool { - r := new(bool) - *r = a - return r -} - func TestCronJobStrategy(t *testing.T) { ctx := genericapirequest.NewDefaultContext() if !Strategy.NamespaceScoped() { diff --git a/pkg/registry/core/endpoint/storage/storage_test.go b/pkg/registry/core/endpoint/storage/storage_test.go index a511424a098..bf7b366f102 100644 --- a/pkg/registry/core/endpoint/storage/storage_test.go +++ b/pkg/registry/core/endpoint/storage/storage_test.go @@ -54,16 +54,6 @@ func validNewEndpoints() *api.Endpoints { } } -func validChangedEndpoints() *api.Endpoints { - endpoints := validNewEndpoints() - endpoints.ResourceVersion = "1" - endpoints.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}, {IP: "5.6.7.8"}}, - Ports: []api.EndpointPort{{Port: 80, Protocol: "TCP"}}, - }} - return endpoints -} - func TestCreate(t *testing.T) { storage, server := newStorage(t) defer server.Terminate(t) diff --git a/pkg/registry/core/event/strategy_test.go b/pkg/registry/core/event/strategy_test.go index e95df8a0f85..fe1763798bc 100644 --- a/pkg/registry/core/event/strategy_test.go +++ b/pkg/registry/core/event/strategy_test.go @@ -31,19 +31,6 @@ import ( _ "k8s.io/kubernetes/pkg/api/testapi" ) -func testEvent(name string) *api.Event { - return &api.Event{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: "default", - }, - InvolvedObject: api.ObjectReference{ - Namespace: "default", - }, - Reason: "forTesting", - } -} - func TestGetAttrs(t *testing.T) { eventA := &api.Event{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/registry/core/persistentvolume/storage/storage_test.go b/pkg/registry/core/persistentvolume/storage/storage_test.go index 95c90eede9c..489569cca4e 100644 --- a/pkg/registry/core/persistentvolume/storage/storage_test.go +++ b/pkg/registry/core/persistentvolume/storage/storage_test.go @@ -77,11 +77,6 @@ func validNewPersistentVolume(name string) *api.PersistentVolume { return pv } -func validChangedPersistentVolume() *api.PersistentVolume { - pv := validNewPersistentVolume("foo") - return pv -} - func TestCreate(t *testing.T) { storage, _, server := newStorage(t) defer server.Terminate(t) diff --git a/pkg/registry/core/pod/strategy_test.go b/pkg/registry/core/pod/strategy_test.go index 8d10e6a6eeb..d013292635b 100644 --- a/pkg/registry/core/pod/strategy_test.go +++ b/pkg/registry/core/pod/strategy_test.go @@ -138,11 +138,6 @@ func getResourceList(cpu, memory string) api.ResourceList { return res } -func addResource(rName, value string, rl api.ResourceList) api.ResourceList { - rl[api.ResourceName(rName)] = resource.MustParse(value) - return rl -} - func getResourceRequirements(requests, limits api.ResourceList) api.ResourceRequirements { res := api.ResourceRequirements{} res.Requests = requests diff --git a/pkg/registry/extensions/controller/storage/storage_test.go b/pkg/registry/extensions/controller/storage/storage_test.go index 62689035fb4..c969bb5c3eb 100644 --- a/pkg/registry/extensions/controller/storage/storage_test.go +++ b/pkg/registry/extensions/controller/storage/storage_test.go @@ -74,17 +74,6 @@ var validController = api.ReplicationController{ Spec: validControllerSpec, } -var validScale = autoscaling.Scale{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test"}, - Spec: autoscaling.ScaleSpec{ - Replicas: validReplicas, - }, - Status: autoscaling.ScaleStatus{ - Replicas: 0, - Selector: "a=b", - }, -} - func TestGet(t *testing.T) { storage, _, si, destroyFunc := newStorage(t) defer destroyFunc() diff --git a/pkg/registry/policy/poddisruptionbudget/storage/storage_test.go b/pkg/registry/policy/poddisruptionbudget/storage/storage_test.go index 4c67d4ca3da..c11b5798754 100644 --- a/pkg/registry/policy/poddisruptionbudget/storage/storage_test.go +++ b/pkg/registry/policy/poddisruptionbudget/storage/storage_test.go @@ -39,17 +39,6 @@ func newStorage(t *testing.T) (*REST, *StatusREST, *etcdtesting.EtcdTestServer) return podDisruptionBudgetStorage, statusStorage, server } -// createPodDisruptionBudget is a helper function that returns a PodDisruptionBudget with the updated resource version. -func createPodDisruptionBudget(storage *REST, pdb policy.PodDisruptionBudget, t *testing.T) (policy.PodDisruptionBudget, error) { - ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), pdb.Namespace) - obj, err := storage.Create(ctx, &pdb, rest.ValidateAllObjectFunc, false) - if err != nil { - t.Errorf("Failed to create PodDisruptionBudget, %v", err) - } - newPS := obj.(*policy.PodDisruptionBudget) - return *newPS, nil -} - func validNewPodDisruptionBudget() *policy.PodDisruptionBudget { minAvailable := intstr.FromInt(7) return &policy.PodDisruptionBudget{ diff --git a/pkg/registry/storage/storageclass/storage/storage_test.go b/pkg/registry/storage/storageclass/storage/storage_test.go index 9a5dd866950..aaca94134f1 100644 --- a/pkg/registry/storage/storageclass/storage/storage_test.go +++ b/pkg/registry/storage/storageclass/storage/storage_test.go @@ -57,10 +57,6 @@ func validNewStorageClass(name string) *storageapi.StorageClass { } } -func validChangedStorageClass() *storageapi.StorageClass { - return validNewStorageClass("foo") -} - func TestCreate(t *testing.T) { storage, server := newStorage(t) defer server.Terminate(t) diff --git a/pkg/registry/storage/volumeattachment/storage/storage_test.go b/pkg/registry/storage/volumeattachment/storage/storage_test.go index 669e319155c..540255de895 100644 --- a/pkg/registry/storage/volumeattachment/storage/storage_test.go +++ b/pkg/registry/storage/volumeattachment/storage/storage_test.go @@ -60,10 +60,6 @@ func validNewVolumeAttachment(name string) *storageapi.VolumeAttachment { } } -func validChangedVolumeAttachment() *storageapi.VolumeAttachment { - return validNewVolumeAttachment("foo") -} - func TestCreate(t *testing.T) { if *testapi.Storage.GroupVersion() != storageapiv1alpha1.SchemeGroupVersion { // skip the test for all versions exception v1alpha1 diff --git a/pkg/security/podsecuritypolicy/group/BUILD b/pkg/security/podsecuritypolicy/group/BUILD index 601e7476dee..95be9751975 100644 --- a/pkg/security/podsecuritypolicy/group/BUILD +++ b/pkg/security/podsecuritypolicy/group/BUILD @@ -31,10 +31,7 @@ go_test( ], embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/group", - deps = [ - "//pkg/apis/core:go_default_library", - "//pkg/apis/extensions:go_default_library", - ], + deps = ["//pkg/apis/extensions:go_default_library"], ) filegroup( diff --git a/pkg/security/podsecuritypolicy/group/mustrunas_test.go b/pkg/security/podsecuritypolicy/group/mustrunas_test.go index 395ffd80eb0..c0347672e69 100644 --- a/pkg/security/podsecuritypolicy/group/mustrunas_test.go +++ b/pkg/security/podsecuritypolicy/group/mustrunas_test.go @@ -17,7 +17,6 @@ limitations under the License. package group import ( - api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" "testing" ) @@ -110,7 +109,6 @@ func TestGenerate(t *testing.T) { func TestValidate(t *testing.T) { tests := map[string]struct { ranges []extensions.GroupIDRange - pod *api.Pod groups []int64 pass bool }{ diff --git a/pkg/serviceaccount/jwt_test.go b/pkg/serviceaccount/jwt_test.go index 076f4ade98a..e21af9584da 100644 --- a/pkg/serviceaccount/jwt_test.go +++ b/pkg/serviceaccount/jwt_test.go @@ -80,17 +80,6 @@ X024wzbiw1q07jFCyfQmODzURAx1VNT7QVUMdz/N8vy47/H40AZJ -----END RSA PRIVATE KEY----- ` -// openssl ecparam -name prime256v1 -genkey -out ecdsa256params.pem -const ecdsaPrivateKeyWithParams = `-----BEGIN EC PARAMETERS----- -BggqhkjOPQMBBw== ------END EC PARAMETERS----- ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIJ9LWDj3ZWe9CksPV7mZjD2dYXG9icfzxadCRwd3vr1toAoGCCqGSM49 -AwEHoUQDQgAEaLNEpzbaaNTCkKjBVj7sxpfJ1ifJQGNvcck4nrzcwFRuujwVDDJh -95iIGwKCQeSg+yhdN6Q/p2XaxNIZlYmUhg== ------END EC PRIVATE KEY----- -` - // openssl ecparam -name prime256v1 -genkey -noout -out ecdsa256.pem const ecdsaPrivateKey = `-----BEGIN EC PRIVATE KEY----- MHcCAQEEIEZmTmUhuanLjPA2CLquXivuwBDHTt5XYwgIr/kA1LtRoAoGCCqGSM49 diff --git a/pkg/util/dbus/dbus_test.go b/pkg/util/dbus/dbus_test.go index 3f22d5982f9..359f6fbd86f 100644 --- a/pkg/util/dbus/dbus_test.go +++ b/pkg/util/dbus/dbus_test.go @@ -25,21 +25,16 @@ import ( ) const ( - DBusNameFlagAllowReplacement uint32 = 1 << (iota + 1) - DBusNameFlagReplaceExisting - DBusNameFlagDoNotQueue + DBusNameFlagDoNotQueue uint32 = 1 << (iota + 1) ) const ( DBusRequestNameReplyPrimaryOwner uint32 = iota + 1 - DBusRequestNameReplyInQueue - DBusRequestNameReplyExists DBusRequestNameReplyAlreadyOwner ) const ( DBusReleaseNameReplyReleased uint32 = iota + 1 - DBusReleaseNameReplyNonExistent DBusReleaseNameReplyNotOwner ) From 24762b9f436faa5ecf59eb8de6ccd59cf143de0d Mon Sep 17 00:00:00 2001 From: Cosmin Cojocar Date: Mon, 15 Jan 2018 10:02:00 +0100 Subject: [PATCH 162/264] Extend the ListNextResults methods with the resource group and instrument them --- .../providers/azure/azure_backoff.go | 6 +-- .../providers/azure/azure_client.go | 45 ++++++++++++------- .../providers/azure/azure_fakes.go | 10 ++--- .../providers/azure/azure_util_vmss.go | 4 +- 4 files changed, 40 insertions(+), 25 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_backoff.go b/pkg/cloudprovider/providers/azure/azure_backoff.go index ff0e16bfd7d..9e4ee788d45 100644 --- a/pkg/cloudprovider/providers/azure/azure_backoff.go +++ b/pkg/cloudprovider/providers/azure/azure_backoff.go @@ -89,7 +89,7 @@ func (az *Cloud) VirtualMachineClientListWithRetry() ([]compute.VirtualMachine, if result.NextLink != nil { err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error - result, retryErr = az.VirtualMachinesClient.ListNextResults(result) + result, retryErr = az.VirtualMachinesClient.ListNextResults(az.ResourceGroup, result) if retryErr != nil { glog.Errorf("VirtualMachinesClient.ListNextResults(%v) - backoff: failure, will retry,err=%v", az.ResourceGroup, retryErr) @@ -176,7 +176,7 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) { if result.NextLink != nil { err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error - result, retryErr = az.LoadBalancerClient.ListNextResults(result) + result, retryErr = az.LoadBalancerClient.ListNextResults(az.ResourceGroup, result) if retryErr != nil { glog.Errorf("LoadBalancerClient.ListNextResults(%v) - backoff: failure, will retry,err=%v", az.ResourceGroup, @@ -225,7 +225,7 @@ func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAd if result.NextLink != nil { err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error - result, retryErr = az.PublicIPAddressesClient.ListNextResults(result) + result, retryErr = az.PublicIPAddressesClient.ListNextResults(az.ResourceGroup, result) if retryErr != nil { glog.Errorf("PublicIPAddressesClient.ListNextResults(%v) - backoff: failure, will retry,err=%v", pipResourceGroup, diff --git a/pkg/cloudprovider/providers/azure/azure_client.go b/pkg/cloudprovider/providers/azure/azure_client.go index e0e2697aef8..a3bce657b90 100644 --- a/pkg/cloudprovider/providers/azure/azure_client.go +++ b/pkg/cloudprovider/providers/azure/azure_client.go @@ -35,7 +35,7 @@ type VirtualMachinesClient interface { CreateOrUpdate(resourceGroupName string, VMName string, parameters compute.VirtualMachine, cancel <-chan struct{}) (<-chan compute.VirtualMachine, <-chan error) Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) List(resourceGroupName string) (result compute.VirtualMachineListResult, err error) - ListNextResults(lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) + ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) } // InterfacesClient defines needed functions for azure network.InterfacesClient @@ -51,7 +51,7 @@ type LoadBalancersClient interface { Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) List(resourceGroupName string) (result network.LoadBalancerListResult, err error) - ListNextResults(lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) + ListNextResults(resourceGroupName string, lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) } // PublicIPAddressesClient defines needed functions for azure network.PublicIPAddressesClient @@ -60,7 +60,7 @@ type PublicIPAddressesClient interface { Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) List(resourceGroupName string) (result network.PublicIPAddressListResult, err error) - ListNextResults(lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) + ListNextResults(resourceGroupName string, lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) } // SubnetsClient defines needed functions for azure network.SubnetsClient @@ -84,7 +84,7 @@ type VirtualMachineScaleSetsClient interface { CreateOrUpdate(resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet, cancel <-chan struct{}) (<-chan compute.VirtualMachineScaleSet, <-chan error) Get(resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) List(resourceGroupName string) (result compute.VirtualMachineScaleSetListResult, err error) - ListNextResults(lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) + ListNextResults(resourceGroupName string, astResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) UpdateInstances(resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (<-chan compute.OperationStatusResponse, <-chan error) } @@ -93,7 +93,7 @@ type VirtualMachineScaleSetVMsClient interface { Get(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) GetInstanceView(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) List(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result compute.VirtualMachineScaleSetVMListResult, err error) - ListNextResults(lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) + ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) } // RoutesClient defines needed functions for azure network.RoutesClient @@ -193,14 +193,17 @@ func (az *azVirtualMachinesClient) List(resourceGroupName string) (result comput return } -func (az *azVirtualMachinesClient) ListNextResults(lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) { +func (az *azVirtualMachinesClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) { az.rateLimiter.Accept() glog.V(10).Infof("azVirtualMachinesClient.ListNextResults(%q): start", lastResults) defer func() { glog.V(10).Infof("azVirtualMachinesClient.ListNextResults(%q): end", lastResults) }() - return az.client.ListNextResults(lastResults) + mc := newMetricContext("vm", "list_next_results", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.ListNextResults(lastResults) + mc.Observe(err) + return } // azInterfacesClient implements InterfacesClient. @@ -341,14 +344,17 @@ func (az *azLoadBalancersClient) List(resourceGroupName string) (result network. return } -func (az *azLoadBalancersClient) ListNextResults(lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) { +func (az *azLoadBalancersClient) ListNextResults(resourceGroupName string, lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) { az.rateLimiter.Accept() glog.V(10).Infof("azLoadBalancersClient.ListNextResults(%q): start", lastResult) defer func() { glog.V(10).Infof("azLoadBalancersClient.ListNextResults(%q): end", lastResult) }() - return az.client.ListNextResults(lastResult) + mc := newMetricContext("load_balancers", "list_next_results", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.ListNextResults(lastResult) + mc.Observe(err) + return } // azPublicIPAddressesClient implements PublicIPAddressesClient. @@ -428,14 +434,17 @@ func (az *azPublicIPAddressesClient) List(resourceGroupName string) (result netw return } -func (az *azPublicIPAddressesClient) ListNextResults(lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) { +func (az *azPublicIPAddressesClient) ListNextResults(resourceGroupName string, lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) { az.rateLimiter.Accept() glog.V(10).Infof("azPublicIPAddressesClient.ListNextResults(%q): start", lastResults) defer func() { glog.V(10).Infof("azPublicIPAddressesClient.ListNextResults(%q): end", lastResults) }() - return az.client.ListNextResults(lastResults) + mc := newMetricContext("public_ip_addresses", "list_next_results", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.ListNextResults(lastResults) + mc.Observe(err) + return } // azSubnetsClient implements SubnetsClient. @@ -653,14 +662,17 @@ func (az *azVirtualMachineScaleSetsClient) List(resourceGroupName string) (resul return } -func (az *azVirtualMachineScaleSetsClient) ListNextResults(lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) { +func (az *azVirtualMachineScaleSetsClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) { az.rateLimiter.Accept() glog.V(10).Infof("azVirtualMachineScaleSetsClient.ListNextResults(%q): start", lastResults) defer func() { glog.V(10).Infof("azVirtualMachineScaleSetsClient.ListNextResults(%q): end", lastResults) }() - return az.client.ListNextResults(lastResults) + mc := newMetricContext("vmss", "list_next_results", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.ListNextResults(lastResults) + mc.Observe(err) + return } func (az *azVirtualMachineScaleSetsClient) UpdateInstances(resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (<-chan compute.OperationStatusResponse, <-chan error) { @@ -737,14 +749,17 @@ func (az *azVirtualMachineScaleSetVMsClient) List(resourceGroupName string, virt return } -func (az *azVirtualMachineScaleSetVMsClient) ListNextResults(lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) { +func (az *azVirtualMachineScaleSetVMsClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) { az.rateLimiter.Accept() glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.ListNextResults(%q,%q,%q): start", lastResults) defer func() { glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.ListNextResults(%q,%q,%q): end", lastResults) }() - return az.client.ListNextResults(lastResults) + mc := newMetricContext("vmssvm", "list_next_results", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.ListNextResults(lastResults) + mc.Observe(err) + return } // azRoutesClient implements RoutesClient. diff --git a/pkg/cloudprovider/providers/azure/azure_fakes.go b/pkg/cloudprovider/providers/azure/azure_fakes.go index dd66d509f17..ec58e57da44 100644 --- a/pkg/cloudprovider/providers/azure/azure_fakes.go +++ b/pkg/cloudprovider/providers/azure/azure_fakes.go @@ -144,7 +144,7 @@ func (fLBC fakeAzureLBClient) List(resourceGroupName string) (result network.Loa return result, nil } -func (fLBC fakeAzureLBClient) ListNextResults(lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) { +func (fLBC fakeAzureLBClient) ListNextResults(resourceGroupName string, lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) { fLBC.mutex.Lock() defer fLBC.mutex.Unlock() result.Response.Response = &http.Response{ @@ -264,7 +264,7 @@ func (fAPC fakeAzurePIPClient) Get(resourceGroupName string, publicIPAddressName } } -func (fAPC fakeAzurePIPClient) ListNextResults(lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) { +func (fAPC fakeAzurePIPClient) ListNextResults(resourceGroupName string, lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) { fAPC.mutex.Lock() defer fAPC.mutex.Unlock() return network.PublicIPAddressListResult{}, nil @@ -411,7 +411,7 @@ func (fVMC fakeAzureVirtualMachinesClient) List(resourceGroupName string) (resul result.Value = &value return result, nil } -func (fVMC fakeAzureVirtualMachinesClient) ListNextResults(lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) { +func (fVMC fakeAzureVirtualMachinesClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) { fVMC.mutex.Lock() defer fVMC.mutex.Unlock() return compute.VirtualMachineListResult{}, nil @@ -659,7 +659,7 @@ func (fVMC fakeVirtualMachineScaleSetVMsClient) List(resourceGroupName string, v return result, nil } -func (fVMC fakeVirtualMachineScaleSetVMsClient) ListNextResults(lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) { +func (fVMC fakeVirtualMachineScaleSetVMsClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) { return result, nil } @@ -764,7 +764,7 @@ func (fVMSSC fakeVirtualMachineScaleSetsClient) List(resourceGroupName string) ( return result, nil } -func (fVMSSC fakeVirtualMachineScaleSetsClient) ListNextResults(lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) { +func (fVMSSC fakeVirtualMachineScaleSetsClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) { return result, nil } diff --git a/pkg/cloudprovider/providers/azure/azure_util_vmss.go b/pkg/cloudprovider/providers/azure/azure_util_vmss.go index 2116e4f0dc7..48bd3adcd8f 100644 --- a/pkg/cloudprovider/providers/azure/azure_util_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_util_vmss.go @@ -422,7 +422,7 @@ func (ss *scaleSet) listScaleSetsWithRetry() ([]string, error) { if result.NextLink != nil { backoffError := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { - result, err = ss.VirtualMachineScaleSetsClient.ListNextResults(result) + result, err = ss.VirtualMachineScaleSetsClient.ListNextResults(ss.ResourceGroup, result) if err != nil { glog.Errorf("VirtualMachineScaleSetsClient.ListNextResults for %v failed: %v", ss.ResourceGroup, err) return false, err @@ -468,7 +468,7 @@ func (ss *scaleSet) listScaleSetVMsWithRetry(scaleSetName string) ([]compute.Vir if result.NextLink != nil { backoffError := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { - result, err = ss.VirtualMachineScaleSetVMsClient.ListNextResults(result) + result, err = ss.VirtualMachineScaleSetVMsClient.ListNextResults(ss.ResourceGroup, result) if err != nil { glog.Errorf("VirtualMachineScaleSetVMsClient.ListNextResults for %v failed: %v", scaleSetName, err) return false, err From eb1650ce567e0bf19f310817502a7a4fe3049a11 Mon Sep 17 00:00:00 2001 From: Cao Shufeng Date: Fri, 12 Jan 2018 17:22:33 +0800 Subject: [PATCH 163/264] remove invalid and useless functions from unit test --- .../admission/plugin/webhook/initializer/initializer_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer/initializer_test.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer/initializer_test.go index 553690d9a58..bc05b9c5db2 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer/initializer_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer/initializer_test.go @@ -26,9 +26,7 @@ import ( type doNothingAdmission struct{} -func (doNothingAdmission) Admit(a admission.Attributes) error { return nil } func (doNothingAdmission) Handles(o admission.Operation) bool { return false } -func (doNothingAdmission) Validate() error { return nil } type fakeServiceResolver struct{} From 4139594e663e6f4c696145724fe4fa358f556338 Mon Sep 17 00:00:00 2001 From: Nikhita Raghunath Date: Fri, 12 Jan 2018 18:57:07 +0530 Subject: [PATCH 164/264] unstructured helpers: print path in error --- .../pkg/apis/meta/v1/unstructured/helpers.go | 56 ++++++++++--------- 1 file changed, 31 insertions(+), 25 deletions(-) diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index fdc688f0732..08705ac8410 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -43,14 +43,15 @@ func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, func nestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) { var val interface{} = obj - for _, field := range fields { + + for i, field := range fields { if m, ok := val.(map[string]interface{}); ok { val, ok = m[field] if !ok { return nil, false, nil } } else { - return nil, false, fmt.Errorf("%v is of the type %T, expected map[string]interface{}", val, val) + return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected map[string]interface{}", jsonPath(fields[:i+1]), val, val) } } return val, true, nil @@ -65,7 +66,7 @@ func NestedString(obj map[string]interface{}, fields ...string) (string, bool, e } s, ok := val.(string) if !ok { - return "", false, fmt.Errorf("%v is of the type %T, expected string", val, val) + return "", false, fmt.Errorf("%v accessor error: %v is of the type %T, expected string", jsonPath(fields), val, val) } return s, true, nil } @@ -79,7 +80,7 @@ func NestedBool(obj map[string]interface{}, fields ...string) (bool, bool, error } b, ok := val.(bool) if !ok { - return false, false, fmt.Errorf("%v is of the type %T, expected bool", val, val) + return false, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected bool", jsonPath(fields), val, val) } return b, true, nil } @@ -93,7 +94,7 @@ func NestedFloat64(obj map[string]interface{}, fields ...string) (float64, bool, } f, ok := val.(float64) if !ok { - return 0, false, fmt.Errorf("%v is of the type %T, expected float64", val, val) + return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected float64", jsonPath(fields), val, val) } return f, true, nil } @@ -107,7 +108,7 @@ func NestedInt64(obj map[string]interface{}, fields ...string) (int64, bool, err } i, ok := val.(int64) if !ok { - return 0, false, fmt.Errorf("%v is of the type %T, expected int64", val, val) + return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected int64", jsonPath(fields), val, val) } return i, true, nil } @@ -121,14 +122,14 @@ func NestedStringSlice(obj map[string]interface{}, fields ...string) ([]string, } m, ok := val.([]interface{}) if !ok { - return nil, false, fmt.Errorf("%v is of the type %T, expected []interface{}", val, val) + return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected []interface{}", jsonPath(fields), val, val) } strSlice := make([]string, 0, len(m)) for _, v := range m { if str, ok := v.(string); ok { strSlice = append(strSlice, str) } else { - return nil, false, fmt.Errorf("contains non-string key in the slice: %v is of the type %T, expected string", v, v) + return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the slice: %v is of the type %T, expected string", jsonPath(fields), v, v) } } return strSlice, true, nil @@ -143,7 +144,7 @@ func NestedSlice(obj map[string]interface{}, fields ...string) ([]interface{}, b } _, ok := val.([]interface{}) if !ok { - return nil, false, fmt.Errorf("%v is of the type %T, expected []interface{}", val, val) + return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected []interface{}", jsonPath(fields), val, val) } return runtime.DeepCopyJSONValue(val).([]interface{}), true, nil } @@ -160,7 +161,7 @@ func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]s if str, ok := v.(string); ok { strMap[k] = str } else { - return nil, false, fmt.Errorf("contains non-string key in the map: %v is of the type %T, expected string", v, v) + return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the map: %v is of the type %T, expected string", jsonPath(fields), v, v) } } return strMap, true, nil @@ -185,25 +186,26 @@ func nestedMapNoCopy(obj map[string]interface{}, fields ...string) (map[string]i } m, ok := val.(map[string]interface{}) if !ok { - return nil, false, fmt.Errorf("%v is of the type %T, expected map[string]interface{}", val, val) + return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected map[string]interface{}", jsonPath(fields), val, val) } return m, true, nil } // SetNestedField sets the value of a nested field to a deep copy of the value provided. -// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. -func SetNestedField(obj map[string]interface{}, value interface{}, fields ...string) bool { +// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedField(obj map[string]interface{}, value interface{}, fields ...string) error { return setNestedFieldNoCopy(obj, runtime.DeepCopyJSONValue(value), fields...) } -func setNestedFieldNoCopy(obj map[string]interface{}, value interface{}, fields ...string) bool { +func setNestedFieldNoCopy(obj map[string]interface{}, value interface{}, fields ...string) error { m := obj - for _, field := range fields[:len(fields)-1] { + + for i, field := range fields[:len(fields)-1] { if val, ok := m[field]; ok { if valMap, ok := val.(map[string]interface{}); ok { m = valMap } else { - return false + return fmt.Errorf("value cannot be set because %v is not a map[string]interface{}", jsonPath(fields[:i+1])) } } else { newVal := make(map[string]interface{}) @@ -212,12 +214,12 @@ func setNestedFieldNoCopy(obj map[string]interface{}, value interface{}, fields } } m[fields[len(fields)-1]] = value - return true + return nil } // SetNestedStringSlice sets the string slice value of a nested field. -// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. -func SetNestedStringSlice(obj map[string]interface{}, value []string, fields ...string) bool { +// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedStringSlice(obj map[string]interface{}, value []string, fields ...string) error { m := make([]interface{}, 0, len(value)) // convert []string into []interface{} for _, v := range value { m = append(m, v) @@ -226,14 +228,14 @@ func SetNestedStringSlice(obj map[string]interface{}, value []string, fields ... } // SetNestedSlice sets the slice value of a nested field. -// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. -func SetNestedSlice(obj map[string]interface{}, value []interface{}, fields ...string) bool { +// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedSlice(obj map[string]interface{}, value []interface{}, fields ...string) error { return SetNestedField(obj, value, fields...) } // SetNestedStringMap sets the map[string]string value of a nested field. -// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. -func SetNestedStringMap(obj map[string]interface{}, value map[string]string, fields ...string) bool { +// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedStringMap(obj map[string]interface{}, value map[string]string, fields ...string) error { m := make(map[string]interface{}, len(value)) // convert map[string]string into map[string]interface{} for k, v := range value { m[k] = v @@ -242,8 +244,8 @@ func SetNestedStringMap(obj map[string]interface{}, value map[string]string, fie } // SetNestedMap sets the map[string]interface{} value of a nested field. -// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. -func SetNestedMap(obj map[string]interface{}, value map[string]interface{}, fields ...string) bool { +// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedMap(obj map[string]interface{}, value map[string]interface{}, fields ...string) error { return SetNestedField(obj, value, fields...) } @@ -268,6 +270,10 @@ func getNestedString(obj map[string]interface{}, fields ...string) string { return val } +func jsonPath(fields []string) string { + return "." + strings.Join(fields, ".") +} + func extractOwnerReference(v map[string]interface{}) metav1.OwnerReference { // though this field is a *bool, but when decoded from JSON, it's // unmarshalled as bool. From fd520aef61996c213a960e1ea933def56dec17f6 Mon Sep 17 00:00:00 2001 From: Michal Fojtik Date: Mon, 15 Jan 2018 13:02:31 +0100 Subject: [PATCH 165/264] Show findmt command output in case of error --- pkg/util/mount/nsenter_mount.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/util/mount/nsenter_mount.go b/pkg/util/mount/nsenter_mount.go index a6c7869b0d8..99e81837fde 100644 --- a/pkg/util/mount/nsenter_mount.go +++ b/pkg/util/mount/nsenter_mount.go @@ -165,7 +165,7 @@ func (n *NsenterMounter) IsLikelyNotMountPoint(file string) (bool, error) { glog.V(5).Infof("nsenter findmnt args: %v", args) out, err := n.ne.Exec("findmnt", args).CombinedOutput() if err != nil { - glog.V(2).Infof("Failed findmnt command for path %s: %v", file, err) + glog.V(2).Infof("Failed findmnt command for path %s: %s %v", file, out, err) // Different operating systems behave differently for paths which are not mount points. // On older versions (e.g. 2.20.1) we'd get error, on newer ones (e.g. 2.26.2) we'd get "/". // It's safer to assume that it's not a mount point. From 32520e09853289a22656b540ed4141e72926a108 Mon Sep 17 00:00:00 2001 From: Cosmin Cojocar Date: Mon, 15 Jan 2018 13:32:42 +0100 Subject: [PATCH 166/264] Review fixes --- pkg/cloudprovider/providers/azure/azure_client.go | 2 +- pkg/cloudprovider/providers/azure/azure_metrics.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_client.go b/pkg/cloudprovider/providers/azure/azure_client.go index a3bce657b90..a8bf5a2eac6 100644 --- a/pkg/cloudprovider/providers/azure/azure_client.go +++ b/pkg/cloudprovider/providers/azure/azure_client.go @@ -84,7 +84,7 @@ type VirtualMachineScaleSetsClient interface { CreateOrUpdate(resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet, cancel <-chan struct{}) (<-chan compute.VirtualMachineScaleSet, <-chan error) Get(resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) List(resourceGroupName string) (result compute.VirtualMachineScaleSetListResult, err error) - ListNextResults(resourceGroupName string, astResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) + ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) UpdateInstances(resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (<-chan compute.OperationStatusResponse, <-chan error) } diff --git a/pkg/cloudprovider/providers/azure/azure_metrics.go b/pkg/cloudprovider/providers/azure/azure_metrics.go index 2ef21bb5a5c..908ce7a5944 100644 --- a/pkg/cloudprovider/providers/azure/azure_metrics.go +++ b/pkg/cloudprovider/providers/azure/azure_metrics.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. From 1bdc99d2ecc406aa1d3128bc008fcbbe80d58807 Mon Sep 17 00:00:00 2001 From: Shyam Jeedigunta Date: Thu, 11 Jan 2018 13:25:57 +0100 Subject: [PATCH 167/264] Add script to run integration benchmark tests in dockerized env --- hack/jenkins/benchmark-dockerized.sh | 51 ++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100755 hack/jenkins/benchmark-dockerized.sh diff --git a/hack/jenkins/benchmark-dockerized.sh b/hack/jenkins/benchmark-dockerized.sh new file mode 100755 index 00000000000..994189dea0e --- /dev/null +++ b/hack/jenkins/benchmark-dockerized.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail +set -o xtrace + +retry() { + for i in {1..5}; do + "$@" && return 0 || sleep $i + done + "$@" +} + +# Runs benchmark integration tests, producing JUnit-style XML test +# reports in ${WORKSPACE}/artifacts. This script is intended to be run from +# kubekins-test container with a kubernetes repo mounted (at the path +# /go/src/k8s.io/kubernetes). See k8s.io/test-infra/scenarios/kubernetes_verify.py. + +export PATH=${GOPATH}/bin:${PWD}/third_party/etcd:/usr/local/go/bin:${PATH} + +retry go get github.com/tools/godep && godep version +retry go get github.com/jstemmer/go-junit-report + +# Disable the Go race detector. +export KUBE_RACE=" " +# Disable coverage report +export KUBE_COVER="n" +# Produce a JUnit-style XML test report. +export KUBE_JUNIT_REPORT_DIR=${WORKSPACE}/artifacts +export ARTIFACTS_DIR=${WORKSPACE}/artifacts + +cd /go/src/k8s.io/kubernetes + +./hack/install-etcd.sh + +make test-integration WHAT="$*" KUBE_TEST_ARGS="-run='XXX' -bench=. -benchmem" From 8f9cddda32b65aaaf3325c3dd1c36ee6ebdeaf45 Mon Sep 17 00:00:00 2001 From: Slava Semushin Date: Wed, 10 Jan 2018 20:14:48 +0100 Subject: [PATCH 168/264] cmd/kube-apiserver/app/aggregator.go: add comments for explaining the group/version fields. --- cmd/kube-apiserver/app/aggregator.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cmd/kube-apiserver/app/aggregator.go b/cmd/kube-apiserver/app/aggregator.go index 6d6e469434b..623fcad8158 100644 --- a/cmd/kube-apiserver/app/aggregator.go +++ b/cmd/kube-apiserver/app/aggregator.go @@ -187,8 +187,12 @@ func makeAPIServiceAvailableHealthzCheck(name string, apiServices []*apiregistra }) } +// priority defines group priority that is used in discovery. This controls +// group position in the kubectl output. type priority struct { - group int32 + // group indicates the order of the group relative to other groups. + group int32 + // version indicates the relative order of the version inside of its group. version int32 } @@ -229,6 +233,9 @@ var apiVersionPriorities = map[schema.GroupVersion]priority{ {Group: "admissionregistration.k8s.io", Version: "v1beta1"}: {group: 16700, version: 12}, {Group: "admissionregistration.k8s.io", Version: "v1alpha1"}: {group: 16700, version: 9}, {Group: "scheduling.k8s.io", Version: "v1alpha1"}: {group: 16600, version: 9}, + // Append a new group to the end of the list if unsure. + // You can use min(existing group)-100 as the initial value for a group. + // Version can be set to 9 (to have space around) for a new group. } func apiServicesToRegister(delegateAPIServer genericapiserver.DelegationTarget, registration autoregister.AutoAPIServiceRegistration) []*apiregistration.APIService { From 1a552bbe149373c056ee004304d7e5abaa89f4c6 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Mon, 27 Nov 2017 14:44:04 +0100 Subject: [PATCH 169/264] admission: do not leak admission config types outside of the plugins --- .../pkg/admission/eventratelimit/admission.go | 5 - .../podtolerationrestriction/admission.go | 4 - .../pkg/admission/resourcequota/admission.go | 5 - .../src/k8s.io/apiserver/pkg/admission/BUILD | 1 + .../k8s.io/apiserver/pkg/admission/config.go | 16 +-- .../apiserver/pkg/admission/config_test.go | 100 +++++++++++++++++- .../admission/plugin/webhook/validating/BUILD | 2 - .../plugin/webhook/validating/admission.go | 5 - .../k8s.io/apiserver/pkg/admission/plugins.go | 10 +- .../apiserver/pkg/apis/apiserver/types.go | 2 +- .../pkg/apis/apiserver/v1alpha1/conversion.go | 88 --------------- .../pkg/apis/apiserver/v1alpha1/types.go | 2 +- .../apiserver/pkg/server/options/admission.go | 11 +- vendor/github.com/jmespath/go-jmespath/BUILD | 5 +- 14 files changed, 119 insertions(+), 137 deletions(-) delete mode 100644 staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/conversion.go diff --git a/plugin/pkg/admission/eventratelimit/admission.go b/plugin/pkg/admission/eventratelimit/admission.go index 8cd64ebe587..7e025319690 100644 --- a/plugin/pkg/admission/eventratelimit/admission.go +++ b/plugin/pkg/admission/eventratelimit/admission.go @@ -23,7 +23,6 @@ import ( "k8s.io/client-go/util/flowcontrol" api "k8s.io/kubernetes/pkg/apis/core" eventratelimitapi "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit" - eventratelimitapiv1alpha1 "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1" "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/validation" ) @@ -44,10 +43,6 @@ func Register(plugins *admission.Plugins) { } return newEventRateLimit(configuration, realClock{}) }) - - // add our config types - eventratelimitapi.AddToScheme(plugins.ConfigScheme) - eventratelimitapiv1alpha1.AddToScheme(plugins.ConfigScheme) } // Plugin implements an admission controller that can enforce event rate limits diff --git a/plugin/pkg/admission/podtolerationrestriction/admission.go b/plugin/pkg/admission/podtolerationrestriction/admission.go index 3318e221b2c..0bfe76696b9 100644 --- a/plugin/pkg/admission/podtolerationrestriction/admission.go +++ b/plugin/pkg/admission/podtolerationrestriction/admission.go @@ -38,7 +38,6 @@ import ( "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/util/tolerations" pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction" - pluginapiv1alpha1 "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1" ) // Register registers a plugin @@ -50,9 +49,6 @@ func Register(plugins *admission.Plugins) { } return NewPodTolerationsPlugin(pluginConfig), nil }) - // add our config types - pluginapi.AddToScheme(plugins.ConfigScheme) - pluginapiv1alpha1.AddToScheme(plugins.ConfigScheme) } // The annotation keys for default and whitelist of tolerations diff --git a/plugin/pkg/admission/resourcequota/admission.go b/plugin/pkg/admission/resourcequota/admission.go index 24f8b6354b9..c6e89aad806 100644 --- a/plugin/pkg/admission/resourcequota/admission.go +++ b/plugin/pkg/admission/resourcequota/admission.go @@ -28,7 +28,6 @@ import ( kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" "k8s.io/kubernetes/pkg/quota" resourcequotaapi "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota" - resourcequotaapiv1alpha1 "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1" "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/validation" ) @@ -49,10 +48,6 @@ func Register(plugins *admission.Plugins) { } return NewResourceQuota(configuration, 5, make(chan struct{})) }) - - // add our config types - resourcequotaapi.AddToScheme(plugins.ConfigScheme) - resourcequotaapiv1alpha1.AddToScheme(plugins.ConfigScheme) } // QuotaAdmission implements an admission controller that can enforce quota constraints diff --git a/staging/src/k8s.io/apiserver/pkg/admission/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/BUILD index aab87e45791..4af97de951a 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/BUILD @@ -20,6 +20,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/apiserver:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1:go_default_library", ], diff --git a/staging/src/k8s.io/apiserver/pkg/admission/config.go b/staging/src/k8s.io/apiserver/pkg/admission/config.go index eb979861207..e716e62238a 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/config.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/config.go @@ -126,16 +126,10 @@ type configProvider struct { } // GetAdmissionPluginConfigurationFor returns a reader that holds the admission plugin configuration. -func GetAdmissionPluginConfigurationFor(pluginCfg apiserver.AdmissionPluginConfiguration, scheme *runtime.Scheme) (io.Reader, error) { - // if there is nothing nested in the object, we return the named location - obj := pluginCfg.Configuration - if obj != nil { - // serialize the configuration and build a reader for it - content, err := writeYAML(obj, scheme) - if err != nil { - return nil, err - } - return bytes.NewBuffer(content), nil +func GetAdmissionPluginConfigurationFor(pluginCfg apiserver.AdmissionPluginConfiguration) (io.Reader, error) { + // if there is a nest object, return it directly + if pluginCfg.Configuration != nil { + return bytes.NewBuffer(pluginCfg.Configuration.Raw), nil } // there is nothing nested, so we delegate to path if pluginCfg.Path != "" { @@ -162,7 +156,7 @@ func (p configProvider) ConfigFor(pluginName string) (io.Reader, error) { if pluginName != pluginCfg.Name { continue } - pluginConfig, err := GetAdmissionPluginConfigurationFor(pluginCfg, p.scheme) + pluginConfig, err := GetAdmissionPluginConfigurationFor(pluginCfg) if err != nil { return nil, err } diff --git a/staging/src/k8s.io/apiserver/pkg/admission/config_test.go b/staging/src/k8s.io/apiserver/pkg/admission/config_test.go index debde2463d2..67d8a1a5625 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/config_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/config_test.go @@ -23,6 +23,7 @@ import ( "testing" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/json" "k8s.io/apiserver/pkg/apis/apiserver" apiserverapi "k8s.io/apiserver/pkg/apis/apiserver" apiserverapiv1alpha1 "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1" @@ -49,7 +50,7 @@ func TestReadAdmissionConfiguration(t *testing.T) { ExpectedAdmissionConfig *apiserver.AdmissionConfiguration PluginNames []string }{ - "v1Alpha1 configuration - path fixup": { + "v1alpha1 configuration - path fixup": { ConfigBody: `{ "apiVersion": "apiserver.k8s.io/v1alpha1", "kind": "AdmissionConfiguration", @@ -70,7 +71,7 @@ func TestReadAdmissionConfiguration(t *testing.T) { }, PluginNames: []string{}, }, - "v1Alpha1 configuration - abspath": { + "v1alpha1 configuration - abspath": { ConfigBody: `{ "apiVersion": "apiserver.k8s.io/v1alpha1", "kind": "AdmissionConfiguration", @@ -153,3 +154,98 @@ func TestReadAdmissionConfiguration(t *testing.T) { } } } + +func TestEmbeddedConfiguration(t *testing.T) { + // create a place holder file to hold per test config + configFile, err := ioutil.TempFile("", "admission-plugin-config") + if err != nil { + t.Fatalf("unexpected err: %v", err) + } + if err = configFile.Close(); err != nil { + t.Fatalf("unexpected err: %v", err) + } + configFileName := configFile.Name() + + testCases := map[string]struct { + ConfigBody string + ExpectedConfig string + }{ + "versioned configuration": { + ConfigBody: `{ + "apiVersion": "apiserver.k8s.io/v1alpha1", + "kind": "AdmissionConfiguration", + "plugins": [ + { + "name": "Foo", + "configuration": { + "apiVersion": "foo.admission.k8s.io/v1alpha1", + "kind": "Configuration", + "foo": "bar" + } + } + ]}`, + ExpectedConfig: `{ + "apiVersion": "foo.admission.k8s.io/v1alpha1", + "kind": "Configuration", + "foo": "bar" + }`, + }, + "legacy configuration": { + ConfigBody: `{ + "apiVersion": "apiserver.k8s.io/v1alpha1", + "kind": "AdmissionConfiguration", + "plugins": [ + { + "name": "Foo", + "configuration": { + "foo": "bar" + } + } + ]}`, + ExpectedConfig: `{ + "foo": "bar" + }`, + }, + } + + for desc, test := range testCases { + scheme := runtime.NewScheme() + apiserverapi.AddToScheme(scheme) + apiserverapiv1alpha1.AddToScheme(scheme) + + if err = ioutil.WriteFile(configFileName, []byte(test.ConfigBody), 0644); err != nil { + t.Errorf("[%s] unexpected err writing temp file: %v", desc, err) + continue + } + config, err := ReadAdmissionConfiguration([]string{"Foo"}, configFileName, scheme) + if err != nil { + t.Errorf("[%s] unexpected err: %v", desc, err) + continue + } + r, err := config.ConfigFor("Foo") + if err != nil { + t.Errorf("[%s] Failed to get Foo config: %v", desc, err) + continue + } + bs, err := ioutil.ReadAll(r) + if err != nil { + t.Errorf("[%s] Failed to read Foo config data: %v", desc, err) + continue + } + + if !equalJSON(test.ExpectedConfig, string(bs)) { + t.Errorf("Unexpected config: expected=%q got=%q", test.ExpectedConfig, string(bs)) + } + } +} + +func equalJSON(a, b string) bool { + var x, y interface{} + if err := json.Unmarshal([]byte(a), &x); err != nil { + return false + } + if err := json.Unmarshal([]byte(b), &y); err != nil { + return false + } + return reflect.DeepEqual(x, y) +} diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD index 5ab45072db6..4226a13912c 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD @@ -23,8 +23,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/admission/initializer:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission/metrics:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission.go index b88556631cf..f68e46fa585 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission.go @@ -40,8 +40,6 @@ import ( genericadmissioninit "k8s.io/apiserver/pkg/admission/initializer" admissionmetrics "k8s.io/apiserver/pkg/admission/metrics" "k8s.io/apiserver/pkg/admission/plugin/webhook/config" - webhookadmissionapi "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission" - webhookadmissionapiv1alpha1 "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1" webhookerrors "k8s.io/apiserver/pkg/admission/plugin/webhook/errors" "k8s.io/apiserver/pkg/admission/plugin/webhook/namespace" "k8s.io/apiserver/pkg/admission/plugin/webhook/request" @@ -66,9 +64,6 @@ func Register(plugins *admission.Plugins) { return plugin, nil }) - // add our config types - webhookadmissionapi.AddToScheme(plugins.ConfigScheme) - webhookadmissionapiv1alpha1.AddToScheme(plugins.ConfigScheme) } // WebhookSource can list dynamic webhook plugins. diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugins.go b/staging/src/k8s.io/apiserver/pkg/admission/plugins.go index 3ede44a173f..05e321ffc17 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugins.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugins.go @@ -25,8 +25,6 @@ import ( "sort" "sync" - "k8s.io/apimachinery/pkg/runtime" - "github.com/golang/glog" ) @@ -39,16 +37,10 @@ type Factory func(config io.Reader) (Interface, error) type Plugins struct { lock sync.Mutex registry map[string]Factory - - // ConfigScheme is used to parse the admission plugin config file. - // It is exposed to act as a hook for extending server providing their own config. - ConfigScheme *runtime.Scheme } func NewPlugins() *Plugins { - return &Plugins{ - ConfigScheme: runtime.NewScheme(), - } + return &Plugins{} } // All registered admission options. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/types.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/types.go index f84fd04a340..e55da95f95d 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/types.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/types.go @@ -46,5 +46,5 @@ type AdmissionPluginConfiguration struct { // Configuration is an embedded configuration object to be used as the plugin's // configuration. If present, it will be used instead of the path to the configuration file. // +optional - Configuration runtime.Object + Configuration *runtime.Unknown } diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/conversion.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/conversion.go deleted file mode 100644 index 378cc080d3a..00000000000 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/conversion.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -var _ runtime.NestedObjectDecoder = &AdmissionConfiguration{} - -// DecodeNestedObjects handles encoding RawExtensions on the AdmissionConfiguration, ensuring the -// objects are decoded with the provided decoder. -func (c *AdmissionConfiguration) DecodeNestedObjects(d runtime.Decoder) error { - // decoding failures result in a runtime.Unknown object being created in Object and passed - // to conversion - for k, v := range c.Plugins { - decodeNestedRawExtensionOrUnknown(d, &v.Configuration) - c.Plugins[k] = v - } - return nil -} - -var _ runtime.NestedObjectEncoder = &AdmissionConfiguration{} - -// EncodeNestedObjects handles encoding RawExtensions on the AdmissionConfiguration, ensuring the -// objects are encoded with the provided encoder. -func (c *AdmissionConfiguration) EncodeNestedObjects(e runtime.Encoder) error { - for k, v := range c.Plugins { - if err := encodeNestedRawExtension(e, &v.Configuration); err != nil { - return err - } - c.Plugins[k] = v - } - return nil -} - -// decodeNestedRawExtensionOrUnknown decodes the raw extension into an object once. If called -// On a RawExtension that has already been decoded (has an object), it will not run again. -func decodeNestedRawExtensionOrUnknown(d runtime.Decoder, ext *runtime.RawExtension) { - if ext.Raw == nil || ext.Object != nil { - return - } - obj, gvk, err := d.Decode(ext.Raw, nil, nil) - if err != nil { - unk := &runtime.Unknown{Raw: ext.Raw} - if runtime.IsNotRegisteredError(err) { - if _, gvk, err := d.Decode(ext.Raw, nil, unk); err == nil { - unk.APIVersion = gvk.GroupVersion().String() - unk.Kind = gvk.Kind - ext.Object = unk - return - } - } - // TODO: record mime-type with the object - if gvk != nil { - unk.APIVersion = gvk.GroupVersion().String() - unk.Kind = gvk.Kind - } - obj = unk - } - ext.Object = obj -} - -func encodeNestedRawExtension(e runtime.Encoder, ext *runtime.RawExtension) error { - if ext.Raw != nil || ext.Object == nil { - return nil - } - data, err := runtime.Encode(e, ext.Object) - if err != nil { - return err - } - ext.Raw = data - return nil -} diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go index 522c41c4143..239b8e20e04 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go @@ -46,5 +46,5 @@ type AdmissionPluginConfiguration struct { // Configuration is an embedded configuration object to be used as the plugin's // configuration. If present, it will be used instead of the path to the configuration file. // +optional - Configuration runtime.RawExtension `json:"configuration"` + Configuration *runtime.Unknown `json:"configuration"` } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/admission.go b/staging/src/k8s.io/apiserver/pkg/server/options/admission.go index aa180378dc5..66b0b97ba63 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/admission.go @@ -38,6 +38,13 @@ import ( "k8s.io/client-go/rest" ) +var scheme = runtime.NewScheme() + +func init() { + apiserverapi.AddToScheme(scheme) + apiserverapiv1alpha1.AddToScheme(scheme) +} + // AdmissionOptions holds the admission options type AdmissionOptions struct { // RecommendedPluginOrder holds an ordered list of plugin names we recommend to use by default @@ -69,8 +76,6 @@ func NewAdmissionOptions() *AdmissionOptions { RecommendedPluginOrder: []string{lifecycle.PluginName, initialization.PluginName, mutatingwebhook.PluginName, validatingwebhook.PluginName}, DefaultOffPlugins: []string{initialization.PluginName, mutatingwebhook.PluginName, validatingwebhook.PluginName}, } - apiserverapi.AddToScheme(options.Plugins.ConfigScheme) - apiserverapiv1alpha1.AddToScheme(options.Plugins.ConfigScheme) server.RegisterAllAdmissionPlugins(options.Plugins) return options } @@ -120,7 +125,7 @@ func (a *AdmissionOptions) ApplyTo( pluginNames = a.enabledPluginNames() } - pluginsConfigProvider, err := admission.ReadAdmissionConfiguration(pluginNames, a.ConfigFile, a.Plugins.ConfigScheme) + pluginsConfigProvider, err := admission.ReadAdmissionConfiguration(pluginNames, a.ConfigFile, scheme) if err != nil { return fmt.Errorf("failed to read plugin config: %v", err) } diff --git a/vendor/github.com/jmespath/go-jmespath/BUILD b/vendor/github.com/jmespath/go-jmespath/BUILD index f4c95791b8b..a3dbf5f5c3a 100644 --- a/vendor/github.com/jmespath/go-jmespath/BUILD +++ b/vendor/github.com/jmespath/go-jmespath/BUILD @@ -25,7 +25,10 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//vendor/github.com/jmespath/go-jmespath/cmd/jpgo:all-srcs", + ], tags = ["automanaged"], visibility = ["//visibility:public"], ) From 83268fa9a8642c9754eeadca76c1b572c4c0ec43 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Thu, 11 Jan 2018 17:17:27 +0100 Subject: [PATCH 170/264] Update generated files --- .../pkg/apis/apiserver/v1alpha1/BUILD | 1 - .../v1alpha1/zz_generated.conversion.go | 34 ++++--------------- .../v1alpha1/zz_generated.deepcopy.go | 10 +++++- .../apis/apiserver/zz_generated.deepcopy.go | 12 ++++--- vendor/github.com/jmespath/go-jmespath/BUILD | 5 +-- 5 files changed, 24 insertions(+), 38 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/BUILD b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/BUILD index 2640ff64b64..3075c3bbec8 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/BUILD @@ -8,7 +8,6 @@ load( go_library( name = "go_default_library", srcs = [ - "conversion.go", "doc.go", "register.go", "types.go", diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go index b1af97ec392..d9668e99029 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go @@ -21,6 +21,8 @@ limitations under the License. package v1alpha1 import ( + unsafe "unsafe" + conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" apiserver "k8s.io/apiserver/pkg/apis/apiserver" @@ -42,17 +44,7 @@ func RegisterConversions(scheme *runtime.Scheme) error { } func autoConvert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in *AdmissionConfiguration, out *apiserver.AdmissionConfiguration, s conversion.Scope) error { - if in.Plugins != nil { - in, out := &in.Plugins, &out.Plugins - *out = make([]apiserver.AdmissionPluginConfiguration, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Plugins = nil - } + out.Plugins = *(*[]apiserver.AdmissionPluginConfiguration)(unsafe.Pointer(&in.Plugins)) return nil } @@ -62,17 +54,7 @@ func Convert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration } func autoConvert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration(in *apiserver.AdmissionConfiguration, out *AdmissionConfiguration, s conversion.Scope) error { - if in.Plugins != nil { - in, out := &in.Plugins, &out.Plugins - *out = make([]AdmissionPluginConfiguration, len(*in)) - for i := range *in { - if err := Convert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Plugins = nil - } + out.Plugins = *(*[]AdmissionPluginConfiguration)(unsafe.Pointer(&in.Plugins)) return nil } @@ -84,9 +66,7 @@ func Convert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration func autoConvert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in *AdmissionPluginConfiguration, out *apiserver.AdmissionPluginConfiguration, s conversion.Scope) error { out.Name = in.Name out.Path = in.Path - if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Configuration, &out.Configuration, s); err != nil { - return err - } + out.Configuration = (*runtime.Unknown)(unsafe.Pointer(in.Configuration)) return nil } @@ -98,9 +78,7 @@ func Convert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginC func autoConvert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(in *apiserver.AdmissionPluginConfiguration, out *AdmissionPluginConfiguration, s conversion.Scope) error { out.Name = in.Name out.Path = in.Path - if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Configuration, &out.Configuration, s); err != nil { - return err - } + out.Configuration = (*runtime.Unknown)(unsafe.Pointer(in.Configuration)) return nil } diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go index c8b46fac5d8..d795781ff22 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go @@ -60,7 +60,15 @@ func (in *AdmissionConfiguration) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AdmissionPluginConfiguration) DeepCopyInto(out *AdmissionPluginConfiguration) { *out = *in - in.Configuration.DeepCopyInto(&out.Configuration) + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + if *in == nil { + *out = nil + } else { + *out = new(runtime.Unknown) + (*in).DeepCopyInto(*out) + } + } return } diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go index 7e5fb6edb45..431abf61d68 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go @@ -60,10 +60,14 @@ func (in *AdmissionConfiguration) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AdmissionPluginConfiguration) DeepCopyInto(out *AdmissionPluginConfiguration) { *out = *in - if in.Configuration == nil { - out.Configuration = nil - } else { - out.Configuration = in.Configuration.DeepCopyObject() + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + if *in == nil { + *out = nil + } else { + *out = new(runtime.Unknown) + (*in).DeepCopyInto(*out) + } } return } diff --git a/vendor/github.com/jmespath/go-jmespath/BUILD b/vendor/github.com/jmespath/go-jmespath/BUILD index a3dbf5f5c3a..f4c95791b8b 100644 --- a/vendor/github.com/jmespath/go-jmespath/BUILD +++ b/vendor/github.com/jmespath/go-jmespath/BUILD @@ -25,10 +25,7 @@ filegroup( filegroup( name = "all-srcs", - srcs = [ - ":package-srcs", - "//vendor/github.com/jmespath/go-jmespath/cmd/jpgo:all-srcs", - ], + srcs = [":package-srcs"], tags = ["automanaged"], visibility = ["//visibility:public"], ) From 7e33b128567700ef114fe15ae43f5e2e662b29cf Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Sun, 14 Jan 2018 11:38:00 -0500 Subject: [PATCH 171/264] Return the correct set of supported mime types for non-streaming requests --- .../pkg/endpoints/handlers/create.go | 2 +- .../pkg/endpoints/handlers/delete.go | 4 +-- .../handlers/negotiation/negotiate.go | 27 +++++++++---------- .../pkg/endpoints/handlers/update.go | 2 +- 4 files changed, 17 insertions(+), 18 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go index dc3560623ea..1d474267dfd 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go @@ -61,7 +61,7 @@ func createHandler(r rest.NamedCreater, scope RequestScope, typer runtime.Object ctx = request.WithNamespace(ctx, namespace) gv := scope.Kind.GroupVersion() - s, err := negotiation.NegotiateInputSerializer(req, scope.Serializer) + s, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer) if err != nil { scope.err(err, w, req) return diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/delete.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/delete.go index 0bc5a659b55..b8ac281fa76 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/delete.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/delete.go @@ -60,7 +60,7 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope RequestSco return } if len(body) > 0 { - s, err := negotiation.NegotiateInputSerializer(req, metainternalversion.Codecs) + s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversion.Codecs) if err != nil { scope.err(err, w, req) return @@ -228,7 +228,7 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope RequestSco return } if len(body) > 0 { - s, err := negotiation.NegotiateInputSerializer(req, scope.Serializer) + s, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer) if err != nil { scope.err(err, w, req) return diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go index 7f4225a5b93..3edfa675bf8 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go @@ -73,31 +73,30 @@ func NegotiateOutputStreamSerializer(req *http.Request, ns runtime.NegotiatedSer } // NegotiateInputSerializer returns the input serializer for the provided request. -func NegotiateInputSerializer(req *http.Request, ns runtime.NegotiatedSerializer) (runtime.SerializerInfo, error) { +func NegotiateInputSerializer(req *http.Request, streaming bool, ns runtime.NegotiatedSerializer) (runtime.SerializerInfo, error) { mediaType := req.Header.Get("Content-Type") - return NegotiateInputSerializerForMediaType(mediaType, ns) + return NegotiateInputSerializerForMediaType(mediaType, streaming, ns) } // NegotiateInputSerializerForMediaType returns the appropriate serializer for the given media type or an error. -func NegotiateInputSerializerForMediaType(mediaType string, ns runtime.NegotiatedSerializer) (runtime.SerializerInfo, error) { +func NegotiateInputSerializerForMediaType(mediaType string, streaming bool, ns runtime.NegotiatedSerializer) (runtime.SerializerInfo, error) { mediaTypes := ns.SupportedMediaTypes() if len(mediaType) == 0 { mediaType = mediaTypes[0].MediaType } - mediaType, _, err := mime.ParseMediaType(mediaType) - if err != nil { - _, supported := MediaTypesForSerializer(ns) - return runtime.SerializerInfo{}, NewUnsupportedMediaTypeError(supported) - } - - for _, info := range mediaTypes { - if info.MediaType != mediaType { - continue + if mediaType, _, err := mime.ParseMediaType(mediaType); err == nil { + for _, info := range mediaTypes { + if info.MediaType != mediaType { + continue + } + return info, nil } - return info, nil } - _, supported := MediaTypesForSerializer(ns) + supported, streamingSupported := MediaTypesForSerializer(ns) + if streaming { + return runtime.SerializerInfo{}, NewUnsupportedMediaTypeError(streamingSupported) + } return runtime.SerializerInfo{}, NewUnsupportedMediaTypeError(supported) } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go index 319bfd51b7c..0eac36660a4 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go @@ -56,7 +56,7 @@ func UpdateResource(r rest.Updater, scope RequestScope, typer runtime.ObjectType return } - s, err := negotiation.NegotiateInputSerializer(req, scope.Serializer) + s, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer) if err != nil { scope.err(err, w, req) return From fc37221db5d08eb9553273ad6202a2557cfde131 Mon Sep 17 00:00:00 2001 From: Spyros Trigazis Date: Fri, 12 Jan 2018 09:56:50 +0000 Subject: [PATCH 172/264] Fix comparison of golang versions Change hack/lib/golang.sh to compare golang version properly with "sort -s -t. -k 1,1 -k 2,2n -k 3,3n", which sorts key by key and not as strings. --- hack/lib/golang.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh index 3e12b3170a4..589e965afb4 100755 --- a/hack/lib/golang.sh +++ b/hack/lib/golang.sh @@ -323,7 +323,7 @@ EOF go_version=($(go version)) local minimum_go_version minimum_go_version=go1.9.1 - if [[ "${go_version[2]}" < "${minimum_go_version}" && "${go_version[2]}" != "devel" ]]; then + if [[ "${minimum_go_version}" != $(echo -e "${minimum_go_version}\n${go_version[2]}" | sort -s -t. -k 1,1 -k 2,2n -k 3,3n | head -n1) && "${go_version[2]}" != "devel" ]]; then kube::log::usage_from_stdin < Date: Mon, 15 Jan 2018 14:41:42 -0500 Subject: [PATCH 173/264] Limit all category to apps group for ds/deployment/replicaset --- hack/make-rules/test-cmd-util.sh | 8 ++++++-- pkg/registry/extensions/daemonset/storage/storage.go | 10 ++++++++-- pkg/registry/extensions/deployment/storage/storage.go | 10 ++++++++-- pkg/registry/extensions/replicaset/storage/storage.go | 10 ++++++++-- pkg/registry/extensions/rest/storage_extensions.go | 6 +++--- 5 files changed, 33 insertions(+), 11 deletions(-) diff --git a/hack/make-rules/test-cmd-util.sh b/hack/make-rules/test-cmd-util.sh index aaed0980807..0120083319d 100755 --- a/hack/make-rules/test-cmd-util.sh +++ b/hack/make-rules/test-cmd-util.sh @@ -1397,11 +1397,15 @@ run_kubectl_get_tests() { kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/pods 200 OK" kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/replicationcontrollers 200 OK" kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/services 200 OK" + kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/daemonsets 200 OK" + kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/deployments 200 OK" + kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/replicasets 200 OK" kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/statefulsets 200 OK" kube::test::if_has_string "${output_message}" "/apis/autoscaling/v1/namespaces/default/horizontalpodautoscalers 200" kube::test::if_has_string "${output_message}" "/apis/batch/v1/namespaces/default/jobs 200 OK" - kube::test::if_has_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/deployments 200 OK" - kube::test::if_has_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/replicasets 200 OK" + kube::test::if_has_not_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/daemonsets 200 OK" + kube::test::if_has_not_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/deployments 200 OK" + kube::test::if_has_not_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/replicasets 200 OK" ### Test kubectl get chunk size output_message=$(kubectl --v=6 get clusterrole --chunk-size=10 2>&1 "${kube_flags[@]}") diff --git a/pkg/registry/extensions/daemonset/storage/storage.go b/pkg/registry/extensions/daemonset/storage/storage.go index 461e5a97ff2..3291ba23d97 100644 --- a/pkg/registry/extensions/daemonset/storage/storage.go +++ b/pkg/registry/extensions/daemonset/storage/storage.go @@ -33,6 +33,7 @@ import ( // rest implements a RESTStorage for DaemonSets type REST struct { *genericregistry.Store + categories []string } // NewREST returns a RESTStorage object that will work against DaemonSets. @@ -56,7 +57,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { statusStore := *store statusStore.UpdateStrategy = daemonset.StatusStrategy - return &REST{store}, &StatusREST{store: &statusStore} + return &REST{store, []string{"all"}}, &StatusREST{store: &statusStore} } // Implement ShortNamesProvider @@ -71,7 +72,12 @@ var _ rest.CategoriesProvider = &REST{} // Categories implements the CategoriesProvider interface. Returns a list of categories a resource is part of. func (r *REST) Categories() []string { - return []string{"all"} + return r.categories +} + +func (r *REST) WithCategories(categories []string) *REST { + r.categories = categories + return r } // StatusREST implements the REST endpoint for changing the status of a daemonset diff --git a/pkg/registry/extensions/deployment/storage/storage.go b/pkg/registry/extensions/deployment/storage/storage.go index 6cf80f8dcc7..1601c5b4f59 100644 --- a/pkg/registry/extensions/deployment/storage/storage.go +++ b/pkg/registry/extensions/deployment/storage/storage.go @@ -63,6 +63,7 @@ func NewStorage(optsGetter generic.RESTOptionsGetter) DeploymentStorage { type REST struct { *genericregistry.Store + categories []string } // NewREST returns a RESTStorage object that will work against deployments. @@ -83,7 +84,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, *Rollbac statusStore := *store statusStore.UpdateStrategy = deployment.StatusStrategy - return &REST{store}, &StatusREST{store: &statusStore}, &RollbackREST{store: store} + return &REST{store, []string{"all"}}, &StatusREST{store: &statusStore}, &RollbackREST{store: store} } // Implement ShortNamesProvider @@ -99,7 +100,12 @@ var _ rest.CategoriesProvider = &REST{} // Categories implements the CategoriesProvider interface. Returns a list of categories a resource is part of. func (r *REST) Categories() []string { - return []string{"all"} + return r.categories +} + +func (r *REST) WithCategories(categories []string) *REST { + r.categories = categories + return r } // StatusREST implements the REST endpoint for changing the status of a deployment diff --git a/pkg/registry/extensions/replicaset/storage/storage.go b/pkg/registry/extensions/replicaset/storage/storage.go index 893e66390da..faa0021df53 100644 --- a/pkg/registry/extensions/replicaset/storage/storage.go +++ b/pkg/registry/extensions/replicaset/storage/storage.go @@ -62,6 +62,7 @@ func NewStorage(optsGetter generic.RESTOptionsGetter) ReplicaSetStorage { type REST struct { *genericregistry.Store + categories []string } // NewREST returns a RESTStorage object that will work against ReplicaSet. @@ -86,7 +87,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { statusStore := *store statusStore.UpdateStrategy = replicaset.StatusStrategy - return &REST{store}, &StatusREST{store: &statusStore} + return &REST{store, []string{"all"}}, &StatusREST{store: &statusStore} } // Implement ShortNamesProvider @@ -102,7 +103,12 @@ var _ rest.CategoriesProvider = &REST{} // Categories implements the CategoriesProvider interface. Returns a list of categories a resource is part of. func (r *REST) Categories() []string { - return []string{"all"} + return r.categories +} + +func (r *REST) WithCategories(categories []string) *REST { + r.categories = categories + return r } // StatusREST implements the REST endpoint for changing the status of a ReplicaSet diff --git a/pkg/registry/extensions/rest/storage_extensions.go b/pkg/registry/extensions/rest/storage_extensions.go index 6de5b96233a..f879b87d57d 100644 --- a/pkg/registry/extensions/rest/storage_extensions.go +++ b/pkg/registry/extensions/rest/storage_extensions.go @@ -62,12 +62,12 @@ func (p RESTStorageProvider) v1beta1Storage(apiResourceConfigSource serverstorag if apiResourceConfigSource.ResourceEnabled(version.WithResource("daemonsets")) { daemonSetStorage, daemonSetStatusStorage := daemonstore.NewREST(restOptionsGetter) - storage["daemonsets"] = daemonSetStorage + storage["daemonsets"] = daemonSetStorage.WithCategories(nil) storage["daemonsets/status"] = daemonSetStatusStorage } if apiResourceConfigSource.ResourceEnabled(version.WithResource("deployments")) { deploymentStorage := deploymentstore.NewStorage(restOptionsGetter) - storage["deployments"] = deploymentStorage.Deployment + storage["deployments"] = deploymentStorage.Deployment.WithCategories(nil) storage["deployments/status"] = deploymentStorage.Status storage["deployments/rollback"] = deploymentStorage.Rollback storage["deployments/scale"] = deploymentStorage.Scale @@ -83,7 +83,7 @@ func (p RESTStorageProvider) v1beta1Storage(apiResourceConfigSource serverstorag } if apiResourceConfigSource.ResourceEnabled(version.WithResource("replicasets")) { replicaSetStorage := replicasetstore.NewStorage(restOptionsGetter) - storage["replicasets"] = replicaSetStorage.ReplicaSet + storage["replicasets"] = replicaSetStorage.ReplicaSet.WithCategories(nil) storage["replicasets/status"] = replicaSetStorage.Status storage["replicasets/scale"] = replicaSetStorage.Scale } From aeb7428c895b5fe4757a8c1a5d47592ae8708a49 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Mon, 15 Jan 2018 15:06:43 -0500 Subject: [PATCH 174/264] Log message at a better level We don't really need to log this meessage at level 1. --- pkg/cloudprovider/providers/openstack/openstack_instances.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cloudprovider/providers/openstack/openstack_instances.go b/pkg/cloudprovider/providers/openstack/openstack_instances.go index 981ff7b9f89..c1031e4f5e9 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_instances.go +++ b/pkg/cloudprovider/providers/openstack/openstack_instances.go @@ -43,7 +43,7 @@ func (os *OpenStack) Instances() (cloudprovider.Instances, bool) { return nil, false } - glog.V(1).Info("Claiming to support Instances") + glog.V(4).Info("Claiming to support Instances") return &Instances{ compute: compute, From fa51acbda35d329fbaa354145ecb04518910b936 Mon Sep 17 00:00:00 2001 From: Srini Brahmaroutu Date: Thu, 21 Sep 2017 11:58:19 -0700 Subject: [PATCH 175/264] Create Conformance document to display all tests that belong to Conformance suite --- test/conformance/BUILD | 10 +- test/conformance/cf_header.md | 41 +++++++ test/conformance/walk.go | 222 +++++++++++++++++++++++++++++++--- test/conformance/walk_test.go | 95 +++++++++++++++ 4 files changed, 351 insertions(+), 17 deletions(-) create mode 100644 test/conformance/cf_header.md create mode 100644 test/conformance/walk_test.go diff --git a/test/conformance/BUILD b/test/conformance/BUILD index 3d3b8018c8c..a91182f13e3 100644 --- a/test/conformance/BUILD +++ b/test/conformance/BUILD @@ -1,4 +1,4 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test") go_library( name = "go_default_library", @@ -48,3 +48,11 @@ sh_test( ":list_conformance_tests", ], ) + +go_test( + name = "go_default_test", + srcs = ["walk_test.go"], + data = glob(["testdata/**"]), + embed = [":go_default_library"], + importpath = "k8s.io/kubernetes/test/conformance", +) diff --git a/test/conformance/cf_header.md b/test/conformance/cf_header.md new file mode 100644 index 00000000000..e4bef5c46e9 --- /dev/null +++ b/test/conformance/cf_header.md @@ -0,0 +1,41 @@ +# Kubernetes Conformance Test Suite - v1.9 + +## **Summary** +This document provides a summary of the tests included in the Kubernetes conformance test suite. +Each test lists a set of formal requirements that a platform that meets conformance requirements must adhere to. + +The tests are a subset of the "e2e" tests that make up the Kubernetes testing infrastructure. +Each test is identified by the presence of the `[Conformance]` keyword in the ginkgo descriptive function calls. +The contents of this document are extracted from comments preceding those `[Conformance]` keywords +and those comments are expected to include a descriptive overview of what the test is validating using +RFC2119 keywords. This will provide a clear distinction between which bits of code in the tests are +there for the purposes of validating the platform rather than simply infrastructure logic used to setup, or +clean up the tests. + +Example: +``` +/* + Testname: Kubelet-OutputToLogs + Description: By default the stdout and stderr from the process + being executed in a pod MUST be sent to the pod's logs. +*/ +// Note this test needs to be fixed to also test for stderr +It("it should print the output to logs [Conformance]", func() { +``` + +would generate the following documentation for the test. Note that the "TestName" from the Documentation above will +be used to document the test which make it more human readable. The "Description" field will be used as the +documentation for that test. + +### **Output:** +## [Kubelet-OutputToLogs](https://github.com/kubernetes/kubernetes/blob/release-1.9/test/e2e_node/kubelet_test.go#L42) + +By default the stdout and stderr from the process +being executed in a pod MUST be sent to the pod's logs. +Note this test needs to be fixed to also test for stderr + +Notational Conventions when documenting the tests with the key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" are to be interpreted as described in [RFC 2119](https://tools.ietf.org/html/rfc2119). + +Note: Please see the Summary at the end of this document to find the number of tests documented for conformance. + +## **List of Tests** diff --git a/test/conformance/walk.go b/test/conformance/walk.go index 9637620f182..a4bd553ce83 100644 --- a/test/conformance/walk.go +++ b/test/conformance/walk.go @@ -24,17 +24,89 @@ limitations under the License. package main import ( + "flag" "fmt" "go/ast" "go/parser" "go/token" + "io/ioutil" "os" "path/filepath" + "regexp" + "strconv" "strings" ) +var ( + baseURL = flag.String("url", "https://github.com/kubernetes/kubernetes/tree/master/", "location of the current source") + confDoc = flag.Bool("conformance", false, "write a conformance document") + totalConfTests, totalLegacyTests, missingComments int +) + +const regexDescribe = "Describe|KubeDescribe|SIGDescribe" +const regexContext = "Context" + type visitor struct { - FileSet *token.FileSet + FileSet *token.FileSet + lastDescribe describe + cMap ast.CommentMap + //list of all the conformance tests in the path + tests []conformanceData +} + +//describe contains text associated with ginkgo describe container +type describe struct { + text string + lastContext context +} + +//context contain the text associated with the Context clause +type context struct { + text string +} + +type conformanceData struct { + // A URL to the line of code in the kube src repo for the test + URL string + // Extracted from the "Testname:" comment before the test + TestName string + // Extracted from the "Description:" comment before the test + Description string +} + +func (v *visitor) convertToConformanceData(at *ast.BasicLit) { + cd := conformanceData{} + + comment := v.comment(at) + pos := v.FileSet.Position(at.Pos()) + cd.URL = fmt.Sprintf("%s%s#L%d", *baseURL, pos.Filename, pos.Line) + + lines := strings.Split(comment, "\n") + cd.Description = "" + for _, line := range lines { + line = strings.TrimSpace(line) + if strings.HasPrefix(line, "Testname:") { + line = strings.TrimSpace(line[9:]) + cd.TestName = line + continue + } + if strings.HasPrefix(line, "Description:") { + line = strings.TrimSpace(line[12:]) + } + cd.Description += line + "\n" + } + + if cd.TestName == "" { + testName := v.getDescription(at.Value) + i := strings.Index(testName, "[Conformance]") + if i > 0 { + cd.TestName = strings.TrimSpace(testName[:i]) + } else { + cd.TestName = testName + } + } + + v.tests = append(v.tests, cd) } func newVisitor() *visitor { @@ -84,7 +156,16 @@ func (v *visitor) isLegacyItCall(call *ast.CallExpr) bool { func (v *visitor) failf(expr ast.Expr, format string, a ...interface{}) { msg := fmt.Sprintf(format, a...) fmt.Fprintf(os.Stderr, "ERROR at %v: %s\n", v.FileSet.Position(expr.Pos()), msg) - os.Exit(65) +} + +func (v *visitor) comment(x *ast.BasicLit) string { + for _, comm := range v.cMap.Comments() { + testOffset := int(x.Pos()-comm.End()) - len("framework.ConformanceIt(\"") + if 0 < testOffset && testOffset < 3 { + return comm.Text() + } + } + return "" } func (v *visitor) emit(arg ast.Expr) { @@ -94,13 +175,94 @@ func (v *visitor) emit(arg ast.Expr) { v.failf(at, "framework.ConformanceIt() called with non-string argument") return } - fmt.Printf("%s: %s\n", v.FileSet.Position(at.Pos()).Filename, at.Value) + + if *confDoc { + v.convertToConformanceData(at) + } else { + fmt.Printf("%s: %s\n", v.FileSet.Position(at.Pos()).Filename, at.Value) + } default: v.failf(at, "framework.ConformanceIt() called with non-literal argument") fmt.Fprintf(os.Stderr, "ERROR: non-literal argument %v at %v\n", arg, v.FileSet.Position(arg.Pos())) } } +func (v *visitor) getDescription(value string) string { + if len(v.lastDescribe.lastContext.text) > 0 { + return strings.Trim(v.lastDescribe.text, "\"") + + " " + strings.Trim(v.lastDescribe.lastContext.text, "\"") + + " " + strings.Trim(value, "\"") + } + return strings.Trim(v.lastDescribe.text, "\"") + + " " + strings.Trim(value, "\"") +} + +// funcName converts a selectorExpr with two idents into a string, +// x.y -> "x.y" +func funcName(n ast.Expr) string { + if sel, ok := n.(*ast.SelectorExpr); ok { + if x, ok := sel.X.(*ast.Ident); ok { + return x.String() + "." + sel.Sel.String() + } + } + return "" +} + +// isSprintf returns whether the given node is a call to fmt.Sprintf +func isSprintf(n ast.Expr) bool { + call, ok := n.(*ast.CallExpr) + return ok && funcName(call.Fun) == "fmt.Sprintf" && len(call.Args) != 0 +} + +// firstArg attempts to statically determine the value of the first +// argument. It only handles strings, and converts any unknown values +// (fmt.Sprintf interpolations) into *. +func (v *visitor) firstArg(n *ast.CallExpr) string { + if len(n.Args) == 0 { + return "" + } + var lit *ast.BasicLit + if isSprintf(n.Args[0]) { + return v.firstArg(n.Args[0].(*ast.CallExpr)) + } + lit, ok := n.Args[0].(*ast.BasicLit) + if ok && lit.Kind == token.STRING { + val, err := strconv.Unquote(lit.Value) + if err != nil { + panic(err) + } + if strings.Contains(val, "%") { + val = strings.Replace(val, "%d", "*", -1) + val = strings.Replace(val, "%v", "*", -1) + val = strings.Replace(val, "%s", "*", -1) + } + return val + } + if ident, ok := n.Args[0].(*ast.Ident); ok { + return ident.String() + } + return "*" +} + +// matchFuncName returns the first argument of a function if it's +// a Ginkgo-relevant function (Describe/KubeDescribe/Context), +// and the empty string otherwise. +func (v *visitor) matchFuncName(n *ast.CallExpr, pattern string) string { + switch x := n.Fun.(type) { + case *ast.SelectorExpr: + if match, err := regexp.MatchString(pattern, x.Sel.Name); err == nil && match { + return v.firstArg(n) + } + case *ast.Ident: + if match, err := regexp.MatchString(pattern, x.Name); err == nil && match { + return v.firstArg(n) + } + default: + return "" + } + return "" +} + // Visit visits each node looking for either calls to framework.ConformanceIt, // which it will emit in its list of conformance tests, or legacy calls to // It() with a manually embedded [Conformance] tag, which it will complain @@ -108,9 +270,16 @@ func (v *visitor) emit(arg ast.Expr) { func (v *visitor) Visit(node ast.Node) (w ast.Visitor) { switch t := node.(type) { case *ast.CallExpr: - if v.isConformanceCall(t) { + if name := v.matchFuncName(t, regexDescribe); name != "" && len(t.Args) >= 2 { + v.lastDescribe = describe{text: name} + } else if name := v.matchFuncName(t, regexContext); name != "" && len(t.Args) >= 2 { + v.lastDescribe.lastContext = context{text: name} + } else if v.isConformanceCall(t) { + totalConfTests++ v.emit(t.Args[0]) + return nil } else if v.isLegacyItCall(t) { + totalLegacyTests++ v.failf(t, "Using It() with manual [Conformance] tag is no longer allowed. Use framework.ConformanceIt() instead.") return nil } @@ -120,7 +289,7 @@ func (v *visitor) Visit(node ast.Node) (w ast.Visitor) { func scandir(dir string) { v := newVisitor() - pkg, err := parser.ParseDir(v.FileSet, dir, nil, 0) + pkg, err := parser.ParseDir(v.FileSet, dir, nil, parser.ParseComments) if err != nil { panic(err) } @@ -130,37 +299,58 @@ func scandir(dir string) { } } -func scanfile(path string) { +func scanfile(path string, src interface{}) []conformanceData { v := newVisitor() - file, err := parser.ParseFile(v.FileSet, path, nil, 0) + file, err := parser.ParseFile(v.FileSet, path, src, parser.ParseComments) if err != nil { panic(err) } + v.cMap = ast.NewCommentMap(v.FileSet, file, file.Comments) + ast.Walk(v, file) + return v.tests } func main() { - args := os.Args[1:] - if len(args) < 1 { + flag.Parse() + + if len(flag.Args()) < 1 { fmt.Fprintf(os.Stderr, "USAGE: %s [...]\n", os.Args[0]) os.Exit(64) } - for _, arg := range args { + if *confDoc { + // Note: this assumes that you're running from the root of the kube src repo + header, err := ioutil.ReadFile("test/conformance/cf_header.md") + if err == nil { + fmt.Printf("%s\n\n", header) + } + } + + totalConfTests = 0 + totalLegacyTests = 0 + missingComments = 0 + for _, arg := range flag.Args() { filepath.Walk(arg, func(path string, info os.FileInfo, err error) error { if err != nil { return err } - if info.IsDir() { - scandir(path) - } else { - // TODO(mml): Remove this once we have all-go-srcs build rules. See https://github.com/kubernetes/repo-infra/pull/45 - if strings.HasSuffix(path, ".go") { - scanfile(path) + if strings.HasSuffix(path, ".go") { + tests := scanfile(path, nil) + for _, cd := range tests { + fmt.Printf("## [%s](%s)\n\n", cd.TestName, cd.URL) + fmt.Printf("%s\n\n", cd.Description) + if len(cd.Description) < 10 { + missingComments++ + } } } return nil }) } + if *confDoc { + fmt.Println("\n## **Summary**") + fmt.Printf("\nTotal Conformance Tests: %d, total legacy tests that need conversion: %d, while total tests that need comment sections: %d\n\n", totalConfTests, totalLegacyTests, missingComments) + } } diff --git a/test/conformance/walk_test.go b/test/conformance/walk_test.go new file mode 100644 index 00000000000..8fcb8c4a156 --- /dev/null +++ b/test/conformance/walk_test.go @@ -0,0 +1,95 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "reflect" + "testing" +) + +var conformanceCases = []struct { + filename string + code string + output []conformanceData +}{ + // Go unit test + {"test/list/main_test.go", ` +var num = 3 +func Helper(x int) { return x / 0 } +var _ = Describe("Feature", func() { +/* + Testname: Kubelet-OutputToLogs + Description: By default the stdout and stderr from the process + being executed in a pod MUST be sent to the pod's logs. +*/ + framework.ConformanceIt("validates describe with ConformanceIt", func() {}) +})`, []conformanceData{{URL: "https://github.com/kubernetes/kubernetes/tree/master/test/list/main_test.go#L11", TestName: "Kubelet-OutputToLogs", + Description: `By default the stdout and stderr from the process +being executed in a pod MUST be sent to the pod's logs.` + "\n\n"}}, + }, + // Describe + It + {"e2e/foo.go", ` +var _ = Describe("Feature", func() { + //It should have comment + framework.ConformanceIt("should work properly", func() {}) +})`, []conformanceData{{URL: "https://github.com/kubernetes/kubernetes/tree/master/e2e/foo.go#L5", TestName: "Feature should work properly", Description: "It should have comment\n\n"}}, + }, + // KubeDescribe + It + {"e2e/foo.go", ` +var _ = framework.KubeDescribe("Feature", func() { + /*It should have comment*/ + framework.ConformanceIt("should work properly", func() {}) +})`, []conformanceData{{URL: "https://github.com/kubernetes/kubernetes/tree/master/e2e/foo.go#L5", TestName: "Feature should work properly", Description: "It should have comment\n\n"}}, + }, + // KubeDescribe + Context + It + {"e2e/foo.go", ` +var _ = framework.KubeDescribe("Feature", func() { + Context("when offline", func() { + //Testname: Kubelet-OutputToLogs + //Description: By default the stdout and stderr from the process + //being executed in a pod MUST be sent to the pod's logs. + framework.ConformanceIt("should work", func() {}) + }) +})`, []conformanceData{{URL: "https://github.com/kubernetes/kubernetes/tree/master/e2e/foo.go#L8", TestName: "Kubelet-OutputToLogs", + Description: `By default the stdout and stderr from the process +being executed in a pod MUST be sent to the pod's logs.` + "\n\n"}}, + }, + // KubeDescribe + Context + It + {"e2e/foo.go", ` +var _ = framework.KubeDescribe("Feature", func() { + Context("with context", func() { + //Description: By default the stdout and stderr from the process + //being executed in a pod MUST be sent to the pod's logs. + framework.ConformanceIt("should work", func() {}) + }) +})`, []conformanceData{{URL: "https://github.com/kubernetes/kubernetes/tree/master/e2e/foo.go#L7", TestName: "Feature with context should work", + Description: `By default the stdout and stderr from the process +being executed in a pod MUST be sent to the pod's logs.` + "\n\n"}}, + }, +} + +func TestConformance(t *testing.T) { + for _, test := range conformanceCases { + code := "package test\n" + test.code + *confDoc = true + tests := scanfile(test.filename, code) + if !reflect.DeepEqual(tests, test.output) { + t.Errorf("code:\n%s\ngot %v\nwant %v", + code, tests, test.output) + } + } +} From ef93e0f4267e0f5c9e396bedd1213ec695c4eba0 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Mon, 15 Jan 2018 14:23:47 +0800 Subject: [PATCH 176/264] Convert nodeName to lower case for vmss instances This is because Kubelet always converts hostname to lower case. --- pkg/cloudprovider/providers/azure/azure_util_vmss.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cloudprovider/providers/azure/azure_util_vmss.go b/pkg/cloudprovider/providers/azure/azure_util_vmss.go index 48bd3adcd8f..34585208977 100644 --- a/pkg/cloudprovider/providers/azure/azure_util_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_util_vmss.go @@ -121,7 +121,7 @@ func (ss *scaleSet) updateCache() error { for _, vm := range vms { nodeName := "" if vm.OsProfile != nil && vm.OsProfile.ComputerName != nil { - nodeName = *vm.OsProfile.ComputerName + nodeName = strings.ToLower(*vm.OsProfile.ComputerName) } vmSize := "" From 79da10fb903b03d502e0eb23e9cb455db13f4f25 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Mon, 15 Jan 2018 14:26:29 +0800 Subject: [PATCH 177/264] Rename filenames for clear --- .../providers/azure/{azure_util_cache.go => azure_cache.go} | 0 .../azure/{azure_util_cache_test.go => azure_cache_test.go} | 0 .../providers/azure/{azure_util.go => azure_standard.go} | 0 .../azure/{azure_util_test.go => azure_standard_test.go} | 0 .../providers/azure/{azure_util_vmss.go => azure_vmss.go} | 0 5 files changed, 0 insertions(+), 0 deletions(-) rename pkg/cloudprovider/providers/azure/{azure_util_cache.go => azure_cache.go} (100%) rename pkg/cloudprovider/providers/azure/{azure_util_cache_test.go => azure_cache_test.go} (100%) rename pkg/cloudprovider/providers/azure/{azure_util.go => azure_standard.go} (100%) rename pkg/cloudprovider/providers/azure/{azure_util_test.go => azure_standard_test.go} (100%) rename pkg/cloudprovider/providers/azure/{azure_util_vmss.go => azure_vmss.go} (100%) diff --git a/pkg/cloudprovider/providers/azure/azure_util_cache.go b/pkg/cloudprovider/providers/azure/azure_cache.go similarity index 100% rename from pkg/cloudprovider/providers/azure/azure_util_cache.go rename to pkg/cloudprovider/providers/azure/azure_cache.go diff --git a/pkg/cloudprovider/providers/azure/azure_util_cache_test.go b/pkg/cloudprovider/providers/azure/azure_cache_test.go similarity index 100% rename from pkg/cloudprovider/providers/azure/azure_util_cache_test.go rename to pkg/cloudprovider/providers/azure/azure_cache_test.go diff --git a/pkg/cloudprovider/providers/azure/azure_util.go b/pkg/cloudprovider/providers/azure/azure_standard.go similarity index 100% rename from pkg/cloudprovider/providers/azure/azure_util.go rename to pkg/cloudprovider/providers/azure/azure_standard.go diff --git a/pkg/cloudprovider/providers/azure/azure_util_test.go b/pkg/cloudprovider/providers/azure/azure_standard_test.go similarity index 100% rename from pkg/cloudprovider/providers/azure/azure_util_test.go rename to pkg/cloudprovider/providers/azure/azure_standard_test.go diff --git a/pkg/cloudprovider/providers/azure/azure_util_vmss.go b/pkg/cloudprovider/providers/azure/azure_vmss.go similarity index 100% rename from pkg/cloudprovider/providers/azure/azure_util_vmss.go rename to pkg/cloudprovider/providers/azure/azure_vmss.go From 66b023110fb80134ecfc7d937f90f27ea3d3205d Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Mon, 15 Jan 2018 15:43:48 +0800 Subject: [PATCH 178/264] Fix azure fake clients: use pointers --- .../providers/azure/azure_fakes.go | 155 ++++++++++-------- 1 file changed, 84 insertions(+), 71 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_fakes.go b/pkg/cloudprovider/providers/azure/azure_fakes.go index ec58e57da44..72aca359194 100644 --- a/pkg/cloudprovider/providers/azure/azure_fakes.go +++ b/pkg/cloudprovider/providers/azure/azure_fakes.go @@ -37,14 +37,14 @@ type fakeAzureLBClient struct { FakeStore map[string]map[string]network.LoadBalancer } -func newFakeAzureLBClient() fakeAzureLBClient { - fLBC := fakeAzureLBClient{} +func newFakeAzureLBClient() *fakeAzureLBClient { + fLBC := &fakeAzureLBClient{} fLBC.FakeStore = make(map[string]map[string]network.LoadBalancer) fLBC.mutex = &sync.Mutex{} return fLBC } -func (fLBC fakeAzureLBClient) CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, cancel <-chan struct{}) (<-chan network.LoadBalancer, <-chan error) { +func (fLBC *fakeAzureLBClient) CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, cancel <-chan struct{}) (<-chan network.LoadBalancer, <-chan error) { fLBC.mutex.Lock() defer fLBC.mutex.Unlock() resultChan := make(chan network.LoadBalancer, 1) @@ -80,7 +80,7 @@ func (fLBC fakeAzureLBClient) CreateOrUpdate(resourceGroupName string, loadBalan return resultChan, errChan } -func (fLBC fakeAzureLBClient) Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { +func (fLBC *fakeAzureLBClient) Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { fLBC.mutex.Lock() defer fLBC.mutex.Unlock() respChan := make(chan autorest.Response, 1) @@ -113,7 +113,7 @@ func (fLBC fakeAzureLBClient) Delete(resourceGroupName string, loadBalancerName return respChan, errChan } -func (fLBC fakeAzureLBClient) Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) { +func (fLBC *fakeAzureLBClient) Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) { fLBC.mutex.Lock() defer fLBC.mutex.Unlock() if _, ok := fLBC.FakeStore[resourceGroupName]; ok { @@ -127,7 +127,7 @@ func (fLBC fakeAzureLBClient) Get(resourceGroupName string, loadBalancerName str } } -func (fLBC fakeAzureLBClient) List(resourceGroupName string) (result network.LoadBalancerListResult, err error) { +func (fLBC *fakeAzureLBClient) List(resourceGroupName string) (result network.LoadBalancerListResult, err error) { fLBC.mutex.Lock() defer fLBC.mutex.Unlock() var value []network.LoadBalancer @@ -144,7 +144,7 @@ func (fLBC fakeAzureLBClient) List(resourceGroupName string) (result network.Loa return result, nil } -func (fLBC fakeAzureLBClient) ListNextResults(resourceGroupName string, lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) { +func (fLBC *fakeAzureLBClient) ListNextResults(resourceGroupName string, lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) { fLBC.mutex.Lock() defer fLBC.mutex.Unlock() result.Response.Response = &http.Response{ @@ -172,15 +172,15 @@ func getpublicIPAddressID(subscriptionID string, resourceGroupName, pipName stri pipName) } -func newFakeAzurePIPClient(subscriptionID string) fakeAzurePIPClient { - fAPC := fakeAzurePIPClient{} +func newFakeAzurePIPClient(subscriptionID string) *fakeAzurePIPClient { + fAPC := &fakeAzurePIPClient{} fAPC.FakeStore = make(map[string]map[string]network.PublicIPAddress) fAPC.SubscriptionID = subscriptionID fAPC.mutex = &sync.Mutex{} return fAPC } -func (fAPC fakeAzurePIPClient) CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress, cancel <-chan struct{}) (<-chan network.PublicIPAddress, <-chan error) { +func (fAPC *fakeAzurePIPClient) CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress, cancel <-chan struct{}) (<-chan network.PublicIPAddress, <-chan error) { fAPC.mutex.Lock() defer fAPC.mutex.Unlock() resultChan := make(chan network.PublicIPAddress, 1) @@ -217,7 +217,7 @@ func (fAPC fakeAzurePIPClient) CreateOrUpdate(resourceGroupName string, publicIP return resultChan, errChan } -func (fAPC fakeAzurePIPClient) Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { +func (fAPC *fakeAzurePIPClient) Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { fAPC.mutex.Lock() defer fAPC.mutex.Unlock() respChan := make(chan autorest.Response, 1) @@ -250,7 +250,7 @@ func (fAPC fakeAzurePIPClient) Delete(resourceGroupName string, publicIPAddressN return respChan, errChan } -func (fAPC fakeAzurePIPClient) Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) { +func (fAPC *fakeAzurePIPClient) Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) { fAPC.mutex.Lock() defer fAPC.mutex.Unlock() if _, ok := fAPC.FakeStore[resourceGroupName]; ok { @@ -264,13 +264,13 @@ func (fAPC fakeAzurePIPClient) Get(resourceGroupName string, publicIPAddressName } } -func (fAPC fakeAzurePIPClient) ListNextResults(resourceGroupName string, lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) { +func (fAPC *fakeAzurePIPClient) ListNextResults(resourceGroupName string, lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) { fAPC.mutex.Lock() defer fAPC.mutex.Unlock() return network.PublicIPAddressListResult{}, nil } -func (fAPC fakeAzurePIPClient) List(resourceGroupName string) (result network.PublicIPAddressListResult, err error) { +func (fAPC *fakeAzurePIPClient) List(resourceGroupName string) (result network.PublicIPAddressListResult, err error) { fAPC.mutex.Lock() defer fAPC.mutex.Unlock() var value []network.PublicIPAddress @@ -292,15 +292,15 @@ type fakeAzureInterfacesClient struct { FakeStore map[string]map[string]network.Interface } -func newFakeAzureInterfacesClient() fakeAzureInterfacesClient { - fIC := fakeAzureInterfacesClient{} +func newFakeAzureInterfacesClient() *fakeAzureInterfacesClient { + fIC := &fakeAzureInterfacesClient{} fIC.FakeStore = make(map[string]map[string]network.Interface) fIC.mutex = &sync.Mutex{} return fIC } -func (fIC fakeAzureInterfacesClient) CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters network.Interface, cancel <-chan struct{}) (<-chan network.Interface, <-chan error) { +func (fIC *fakeAzureInterfacesClient) CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters network.Interface, cancel <-chan struct{}) (<-chan network.Interface, <-chan error) { fIC.mutex.Lock() defer fIC.mutex.Unlock() resultChan := make(chan network.Interface, 1) @@ -326,7 +326,7 @@ func (fIC fakeAzureInterfacesClient) CreateOrUpdate(resourceGroupName string, ne return resultChan, errChan } -func (fIC fakeAzureInterfacesClient) Get(resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) { +func (fIC *fakeAzureInterfacesClient) Get(resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) { fIC.mutex.Lock() defer fIC.mutex.Unlock() if _, ok := fIC.FakeStore[resourceGroupName]; ok { @@ -340,7 +340,7 @@ func (fIC fakeAzureInterfacesClient) Get(resourceGroupName string, networkInterf } } -func (fIC fakeAzureInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) { +func (fIC *fakeAzureInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) { return result, nil } @@ -349,14 +349,14 @@ type fakeAzureVirtualMachinesClient struct { FakeStore map[string]map[string]compute.VirtualMachine } -func newFakeAzureVirtualMachinesClient() fakeAzureVirtualMachinesClient { - fVMC := fakeAzureVirtualMachinesClient{} +func newFakeAzureVirtualMachinesClient() *fakeAzureVirtualMachinesClient { + fVMC := &fakeAzureVirtualMachinesClient{} fVMC.FakeStore = make(map[string]map[string]compute.VirtualMachine) fVMC.mutex = &sync.Mutex{} return fVMC } -func (fVMC fakeAzureVirtualMachinesClient) CreateOrUpdate(resourceGroupName string, VMName string, parameters compute.VirtualMachine, cancel <-chan struct{}) (<-chan compute.VirtualMachine, <-chan error) { +func (fVMC *fakeAzureVirtualMachinesClient) CreateOrUpdate(resourceGroupName string, VMName string, parameters compute.VirtualMachine, cancel <-chan struct{}) (<-chan compute.VirtualMachine, <-chan error) { fVMC.mutex.Lock() defer fVMC.mutex.Unlock() resultChan := make(chan compute.VirtualMachine, 1) @@ -381,7 +381,7 @@ func (fVMC fakeAzureVirtualMachinesClient) CreateOrUpdate(resourceGroupName stri return resultChan, errChan } -func (fVMC fakeAzureVirtualMachinesClient) Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) { +func (fVMC *fakeAzureVirtualMachinesClient) Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) { fVMC.mutex.Lock() defer fVMC.mutex.Unlock() if _, ok := fVMC.FakeStore[resourceGroupName]; ok { @@ -395,7 +395,7 @@ func (fVMC fakeAzureVirtualMachinesClient) Get(resourceGroupName string, VMName } } -func (fVMC fakeAzureVirtualMachinesClient) List(resourceGroupName string) (result compute.VirtualMachineListResult, err error) { +func (fVMC *fakeAzureVirtualMachinesClient) List(resourceGroupName string) (result compute.VirtualMachineListResult, err error) { fVMC.mutex.Lock() defer fVMC.mutex.Unlock() var value []compute.VirtualMachine @@ -411,7 +411,7 @@ func (fVMC fakeAzureVirtualMachinesClient) List(resourceGroupName string) (resul result.Value = &value return result, nil } -func (fVMC fakeAzureVirtualMachinesClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) { +func (fVMC *fakeAzureVirtualMachinesClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) { fVMC.mutex.Lock() defer fVMC.mutex.Unlock() return compute.VirtualMachineListResult{}, nil @@ -422,14 +422,14 @@ type fakeAzureSubnetsClient struct { FakeStore map[string]map[string]network.Subnet } -func newFakeAzureSubnetsClient() fakeAzureSubnetsClient { - fASC := fakeAzureSubnetsClient{} +func newFakeAzureSubnetsClient() *fakeAzureSubnetsClient { + fASC := &fakeAzureSubnetsClient{} fASC.FakeStore = make(map[string]map[string]network.Subnet) fASC.mutex = &sync.Mutex{} return fASC } -func (fASC fakeAzureSubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet, cancel <-chan struct{}) (<-chan network.Subnet, <-chan error) { +func (fASC *fakeAzureSubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet, cancel <-chan struct{}) (<-chan network.Subnet, <-chan error) { fASC.mutex.Lock() defer fASC.mutex.Unlock() resultChan := make(chan network.Subnet, 1) @@ -455,7 +455,7 @@ func (fASC fakeAzureSubnetsClient) CreateOrUpdate(resourceGroupName string, virt return resultChan, errChan } -func (fASC fakeAzureSubnetsClient) Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { +func (fASC *fakeAzureSubnetsClient) Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { fASC.mutex.Lock() defer fASC.mutex.Unlock() respChan := make(chan autorest.Response, 1) @@ -489,7 +489,7 @@ func (fASC fakeAzureSubnetsClient) Delete(resourceGroupName string, virtualNetwo } return respChan, errChan } -func (fASC fakeAzureSubnetsClient) Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) { +func (fASC *fakeAzureSubnetsClient) Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) { fASC.mutex.Lock() defer fASC.mutex.Unlock() rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND") @@ -503,7 +503,7 @@ func (fASC fakeAzureSubnetsClient) Get(resourceGroupName string, virtualNetworkN Message: "Not such Subnet", } } -func (fASC fakeAzureSubnetsClient) List(resourceGroupName string, virtualNetworkName string) (result network.SubnetListResult, err error) { +func (fASC *fakeAzureSubnetsClient) List(resourceGroupName string, virtualNetworkName string) (result network.SubnetListResult, err error) { fASC.mutex.Lock() defer fASC.mutex.Unlock() rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND") @@ -526,14 +526,14 @@ type fakeAzureNSGClient struct { FakeStore map[string]map[string]network.SecurityGroup } -func newFakeAzureNSGClient() fakeAzureNSGClient { - fNSG := fakeAzureNSGClient{} +func newFakeAzureNSGClient() *fakeAzureNSGClient { + fNSG := &fakeAzureNSGClient{} fNSG.FakeStore = make(map[string]map[string]network.SecurityGroup) fNSG.mutex = &sync.Mutex{} return fNSG } -func (fNSG fakeAzureNSGClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, cancel <-chan struct{}) (<-chan network.SecurityGroup, <-chan error) { +func (fNSG *fakeAzureNSGClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, cancel <-chan struct{}) (<-chan network.SecurityGroup, <-chan error) { fNSG.mutex.Lock() defer fNSG.mutex.Unlock() resultChan := make(chan network.SecurityGroup, 1) @@ -558,7 +558,7 @@ func (fNSG fakeAzureNSGClient) CreateOrUpdate(resourceGroupName string, networkS return resultChan, errChan } -func (fNSG fakeAzureNSGClient) Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { +func (fNSG *fakeAzureNSGClient) Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { fNSG.mutex.Lock() defer fNSG.mutex.Unlock() respChan := make(chan autorest.Response, 1) @@ -591,7 +591,7 @@ func (fNSG fakeAzureNSGClient) Delete(resourceGroupName string, networkSecurityG return respChan, errChan } -func (fNSG fakeAzureNSGClient) Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) { +func (fNSG *fakeAzureNSGClient) Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) { fNSG.mutex.Lock() defer fNSG.mutex.Unlock() if _, ok := fNSG.FakeStore[resourceGroupName]; ok { @@ -605,7 +605,7 @@ func (fNSG fakeAzureNSGClient) Get(resourceGroupName string, networkSecurityGrou } } -func (fNSG fakeAzureNSGClient) List(resourceGroupName string) (result network.SecurityGroupListResult, err error) { +func (fNSG *fakeAzureNSGClient) List(resourceGroupName string) (result network.SecurityGroupListResult, err error) { fNSG.mutex.Lock() defer fNSG.mutex.Unlock() var value []network.SecurityGroup @@ -632,15 +632,22 @@ type fakeVirtualMachineScaleSetVMsClient struct { FakeStore map[string]map[string]compute.VirtualMachineScaleSetVM } -func newFakeVirtualMachineScaleSetVMsClient() fakeVirtualMachineScaleSetVMsClient { - fVMC := fakeVirtualMachineScaleSetVMsClient{} +func newFakeVirtualMachineScaleSetVMsClient() *fakeVirtualMachineScaleSetVMsClient { + fVMC := &fakeVirtualMachineScaleSetVMsClient{} fVMC.FakeStore = make(map[string]map[string]compute.VirtualMachineScaleSetVM) fVMC.mutex = &sync.Mutex{} return fVMC } -func (fVMC fakeVirtualMachineScaleSetVMsClient) List(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result compute.VirtualMachineScaleSetVMListResult, err error) { +func (fVMC *fakeVirtualMachineScaleSetVMsClient) setFakeStore(store map[string]map[string]compute.VirtualMachineScaleSetVM) { + fVMC.mutex.Lock() + defer fVMC.mutex.Unlock() + + fVMC.FakeStore = store +} + +func (fVMC *fakeVirtualMachineScaleSetVMsClient) List(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result compute.VirtualMachineScaleSetVMListResult, err error) { fVMC.mutex.Lock() defer fVMC.mutex.Unlock() @@ -659,11 +666,11 @@ func (fVMC fakeVirtualMachineScaleSetVMsClient) List(resourceGroupName string, v return result, nil } -func (fVMC fakeVirtualMachineScaleSetVMsClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) { +func (fVMC *fakeVirtualMachineScaleSetVMsClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) { return result, nil } -func (fVMC fakeVirtualMachineScaleSetVMsClient) Get(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) { +func (fVMC *fakeVirtualMachineScaleSetVMsClient) Get(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) { fVMC.mutex.Lock() defer fVMC.mutex.Unlock() @@ -680,7 +687,7 @@ func (fVMC fakeVirtualMachineScaleSetVMsClient) Get(resourceGroupName string, VM } } -func (fVMC fakeVirtualMachineScaleSetVMsClient) GetInstanceView(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) { +func (fVMC *fakeVirtualMachineScaleSetVMsClient) GetInstanceView(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) { _, err = fVMC.Get(resourceGroupName, VMScaleSetName, instanceID) if err != nil { return result, err @@ -694,15 +701,22 @@ type fakeVirtualMachineScaleSetsClient struct { FakeStore map[string]map[string]compute.VirtualMachineScaleSet } -func newFakeVirtualMachineScaleSetsClient() fakeVirtualMachineScaleSetsClient { - fVMSSC := fakeVirtualMachineScaleSetsClient{} +func newFakeVirtualMachineScaleSetsClient() *fakeVirtualMachineScaleSetsClient { + fVMSSC := &fakeVirtualMachineScaleSetsClient{} fVMSSC.FakeStore = make(map[string]map[string]compute.VirtualMachineScaleSet) fVMSSC.mutex = &sync.Mutex{} return fVMSSC } -func (fVMSSC fakeVirtualMachineScaleSetsClient) CreateOrUpdate(resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet, cancel <-chan struct{}) (<-chan compute.VirtualMachineScaleSet, <-chan error) { +func (fVMSSC *fakeVirtualMachineScaleSetsClient) setFakeStore(store map[string]map[string]compute.VirtualMachineScaleSet) { + fVMSSC.mutex.Lock() + defer fVMSSC.mutex.Unlock() + + fVMSSC.FakeStore = store +} + +func (fVMSSC *fakeVirtualMachineScaleSetsClient) CreateOrUpdate(resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet, cancel <-chan struct{}) (<-chan compute.VirtualMachineScaleSet, <-chan error) { fVMSSC.mutex.Lock() defer fVMSSC.mutex.Unlock() @@ -729,7 +743,7 @@ func (fVMSSC fakeVirtualMachineScaleSetsClient) CreateOrUpdate(resourceGroupName return resultChan, errChan } -func (fVMSSC fakeVirtualMachineScaleSetsClient) Get(resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) { +func (fVMSSC *fakeVirtualMachineScaleSetsClient) Get(resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) { fVMSSC.mutex.Lock() defer fVMSSC.mutex.Unlock() @@ -745,7 +759,7 @@ func (fVMSSC fakeVirtualMachineScaleSetsClient) Get(resourceGroupName string, VM } } -func (fVMSSC fakeVirtualMachineScaleSetsClient) List(resourceGroupName string) (result compute.VirtualMachineScaleSetListResult, err error) { +func (fVMSSC *fakeVirtualMachineScaleSetsClient) List(resourceGroupName string) (result compute.VirtualMachineScaleSetListResult, err error) { fVMSSC.mutex.Lock() defer fVMSSC.mutex.Unlock() @@ -755,7 +769,6 @@ func (fVMSSC fakeVirtualMachineScaleSetsClient) List(resourceGroupName string) ( value = append(value, v) } } - result.Response.Response = &http.Response{ StatusCode: http.StatusOK, } @@ -764,11 +777,11 @@ func (fVMSSC fakeVirtualMachineScaleSetsClient) List(resourceGroupName string) ( return result, nil } -func (fVMSSC fakeVirtualMachineScaleSetsClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) { +func (fVMSSC *fakeVirtualMachineScaleSetsClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) { return result, nil } -func (fVMSSC fakeVirtualMachineScaleSetsClient) UpdateInstances(resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (<-chan compute.OperationStatusResponse, <-chan error) { +func (fVMSSC *fakeVirtualMachineScaleSetsClient) UpdateInstances(resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (<-chan compute.OperationStatusResponse, <-chan error) { resultChan := make(chan compute.OperationStatusResponse, 1) errChan := make(chan error, 1) var result compute.OperationStatusResponse @@ -792,14 +805,14 @@ type fakeRoutesClient struct { FakeStore map[string]map[string]network.Route } -func newFakeRoutesClient() fakeRoutesClient { - fRC := fakeRoutesClient{} +func newFakeRoutesClient() *fakeRoutesClient { + fRC := &fakeRoutesClient{} fRC.FakeStore = make(map[string]map[string]network.Route) fRC.mutex = &sync.Mutex{} return fRC } -func (fRC fakeRoutesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, cancel <-chan struct{}) (<-chan network.Route, <-chan error) { +func (fRC *fakeRoutesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, cancel <-chan struct{}) (<-chan network.Route, <-chan error) { fRC.mutex.Lock() defer fRC.mutex.Unlock() @@ -826,7 +839,7 @@ func (fRC fakeRoutesClient) CreateOrUpdate(resourceGroupName string, routeTableN return resultChan, errChan } -func (fRC fakeRoutesClient) Delete(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { +func (fRC *fakeRoutesClient) Delete(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { fRC.mutex.Lock() defer fRC.mutex.Unlock() @@ -866,14 +879,14 @@ type fakeRouteTablesClient struct { FakeStore map[string]map[string]network.RouteTable } -func newFakeRouteTablesClient() fakeRouteTablesClient { - fRTC := fakeRouteTablesClient{} +func newFakeRouteTablesClient() *fakeRouteTablesClient { + fRTC := &fakeRouteTablesClient{} fRTC.FakeStore = make(map[string]map[string]network.RouteTable) fRTC.mutex = &sync.Mutex{} return fRTC } -func (fRTC fakeRouteTablesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, parameters network.RouteTable, cancel <-chan struct{}) (<-chan network.RouteTable, <-chan error) { +func (fRTC *fakeRouteTablesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, parameters network.RouteTable, cancel <-chan struct{}) (<-chan network.RouteTable, <-chan error) { fRTC.mutex.Lock() defer fRTC.mutex.Unlock() @@ -900,7 +913,7 @@ func (fRTC fakeRouteTablesClient) CreateOrUpdate(resourceGroupName string, route return resultChan, errChan } -func (fRTC fakeRouteTablesClient) Get(resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) { +func (fRTC *fakeRouteTablesClient) Get(resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) { fRTC.mutex.Lock() defer fRTC.mutex.Unlock() if _, ok := fRTC.FakeStore[resourceGroupName]; ok { @@ -919,14 +932,14 @@ type fakeStorageAccountClient struct { FakeStore map[string]map[string]storage.Account } -func newFakeStorageAccountClient() fakeStorageAccountClient { - fSAC := fakeStorageAccountClient{} +func newFakeStorageAccountClient() *fakeStorageAccountClient { + fSAC := &fakeStorageAccountClient{} fSAC.FakeStore = make(map[string]map[string]storage.Account) fSAC.mutex = &sync.Mutex{} return fSAC } -func (fSAC fakeStorageAccountClient) Create(resourceGroupName string, accountName string, parameters storage.AccountCreateParameters, cancel <-chan struct{}) (<-chan storage.Account, <-chan error) { +func (fSAC *fakeStorageAccountClient) Create(resourceGroupName string, accountName string, parameters storage.AccountCreateParameters, cancel <-chan struct{}) (<-chan storage.Account, <-chan error) { fSAC.mutex.Lock() defer fSAC.mutex.Unlock() @@ -961,7 +974,7 @@ func (fSAC fakeStorageAccountClient) Create(resourceGroupName string, accountNam return resultChan, errChan } -func (fSAC fakeStorageAccountClient) Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) { +func (fSAC *fakeStorageAccountClient) Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) { fSAC.mutex.Lock() defer fSAC.mutex.Unlock() @@ -985,15 +998,15 @@ func (fSAC fakeStorageAccountClient) Delete(resourceGroupName string, accountNam return result, err } -func (fSAC fakeStorageAccountClient) ListKeys(resourceGroupName string, accountName string) (result storage.AccountListKeysResult, err error) { +func (fSAC *fakeStorageAccountClient) ListKeys(resourceGroupName string, accountName string) (result storage.AccountListKeysResult, err error) { return storage.AccountListKeysResult{}, nil } -func (fSAC fakeStorageAccountClient) ListByResourceGroup(resourceGroupName string) (result storage.AccountListResult, err error) { +func (fSAC *fakeStorageAccountClient) ListByResourceGroup(resourceGroupName string) (result storage.AccountListResult, err error) { return storage.AccountListResult{}, nil } -func (fSAC fakeStorageAccountClient) GetProperties(resourceGroupName string, accountName string) (result storage.Account, err error) { +func (fSAC *fakeStorageAccountClient) GetProperties(resourceGroupName string, accountName string) (result storage.Account, err error) { fSAC.mutex.Lock() defer fSAC.mutex.Unlock() @@ -1014,14 +1027,14 @@ type fakeDisksClient struct { FakeStore map[string]map[string]disk.Model } -func newFakeDisksClient() fakeDisksClient { - fDC := fakeDisksClient{} +func newFakeDisksClient() *fakeDisksClient { + fDC := &fakeDisksClient{} fDC.FakeStore = make(map[string]map[string]disk.Model) fDC.mutex = &sync.Mutex{} return fDC } -func (fDC fakeDisksClient) CreateOrUpdate(resourceGroupName string, diskName string, diskParameter disk.Model, cancel <-chan struct{}) (<-chan disk.Model, <-chan error) { +func (fDC *fakeDisksClient) CreateOrUpdate(resourceGroupName string, diskName string, diskParameter disk.Model, cancel <-chan struct{}) (<-chan disk.Model, <-chan error) { fDC.mutex.Lock() defer fDC.mutex.Unlock() @@ -1048,7 +1061,7 @@ func (fDC fakeDisksClient) CreateOrUpdate(resourceGroupName string, diskName str return resultChan, errChan } -func (fDC fakeDisksClient) Delete(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan disk.OperationStatusResponse, <-chan error) { +func (fDC *fakeDisksClient) Delete(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan disk.OperationStatusResponse, <-chan error) { fDC.mutex.Lock() defer fDC.mutex.Unlock() @@ -1087,7 +1100,7 @@ func (fDC fakeDisksClient) Delete(resourceGroupName string, diskName string, can return respChan, errChan } -func (fDC fakeDisksClient) Get(resourceGroupName string, diskName string) (result disk.Model, err error) { +func (fDC *fakeDisksClient) Get(resourceGroupName string, diskName string) (result disk.Model, err error) { fDC.mutex.Lock() defer fDC.mutex.Unlock() From 2e646b0e1264c35c35230a61b69f997239a0555a Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Mon, 15 Jan 2018 15:44:02 +0800 Subject: [PATCH 179/264] Add more unit tests --- pkg/cloudprovider/providers/azure/BUILD | 10 +- .../providers/azure/azure_standard_test.go | 53 ------- .../providers/azure/azure_vmss.go | 2 + .../providers/azure/azure_vmss_test.go | 149 ++++++++++++++++++ 4 files changed, 156 insertions(+), 58 deletions(-) delete mode 100644 pkg/cloudprovider/providers/azure/azure_standard_test.go create mode 100644 pkg/cloudprovider/providers/azure/azure_vmss_test.go diff --git a/pkg/cloudprovider/providers/azure/BUILD b/pkg/cloudprovider/providers/azure/BUILD index 8272b20219d..bd65300c2ed 100644 --- a/pkg/cloudprovider/providers/azure/BUILD +++ b/pkg/cloudprovider/providers/azure/BUILD @@ -12,6 +12,7 @@ go_library( "azure.go", "azure_backoff.go", "azure_blobDiskController.go", + "azure_cache.go", "azure_client.go", "azure_controllerCommon.go", "azure_fakes.go", @@ -21,12 +22,11 @@ go_library( "azure_managedDiskController.go", "azure_metrics.go", "azure_routes.go", + "azure_standard.go", "azure_storage.go", "azure_storageaccount.go", - "azure_util.go", - "azure_util_cache.go", - "azure_util_vmss.go", "azure_vmsets.go", + "azure_vmss.go", "azure_wrap.go", "azure_zones.go", ], @@ -64,11 +64,11 @@ go_library( go_test( name = "go_default_test", srcs = [ + "azure_cache_test.go", "azure_loadbalancer_test.go", "azure_metrics_test.go", "azure_test.go", - "azure_util_cache_test.go", - "azure_util_test.go", + "azure_vmss_test.go", "azure_wrap_test.go", ], embed = [":go_default_library"], diff --git a/pkg/cloudprovider/providers/azure/azure_standard_test.go b/pkg/cloudprovider/providers/azure/azure_standard_test.go deleted file mode 100644 index cac803c2eb0..00000000000 --- a/pkg/cloudprovider/providers/azure/azure_standard_test.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package azure - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestGetScaleSetVMInstanceID(t *testing.T) { - tests := []struct { - msg string - machineName string - expectError bool - expectedInstanceID string - }{{ - msg: "invalid vmss instance name", - machineName: "vmvm", - expectError: true, - }, - { - msg: "valid vmss instance name", - machineName: "vm00000Z", - expectError: false, - expectedInstanceID: "35", - }, - } - - for i, test := range tests { - instanceID, err := getScaleSetVMInstanceID(test.machineName) - if test.expectError { - assert.Error(t, err, fmt.Sprintf("TestCase[%d]: %s", i, test.msg)) - } else { - assert.Equal(t, test.expectedInstanceID, instanceID, fmt.Sprintf("TestCase[%d]: %s", i, test.msg)) - } - } -} diff --git a/pkg/cloudprovider/providers/azure/azure_vmss.go b/pkg/cloudprovider/providers/azure/azure_vmss.go index 34585208977..a5ad369dc52 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_vmss.go @@ -190,10 +190,12 @@ func (ss *scaleSet) getCachedVirtualMachine(nodeName string) (scaleSetVMInfo, er } // Update cache and try again. + glog.V(10).Infof("vmss cache before updateCache: %v", ss.cache) if err := ss.updateCache(); err != nil { glog.Errorf("updateCache failed with error: %v", err) return scaleSetVMInfo{}, err } + glog.V(10).Infof("vmss cache after updateCache: %v", ss.cache) vm, found = getVMFromCache(nodeName) if found { return vm, nil diff --git a/pkg/cloudprovider/providers/azure/azure_vmss_test.go b/pkg/cloudprovider/providers/azure/azure_vmss_test.go new file mode 100644 index 00000000000..7830eab783d --- /dev/null +++ b/pkg/cloudprovider/providers/azure/azure_vmss_test.go @@ -0,0 +1,149 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/arm/compute" + "github.com/stretchr/testify/assert" +) + +func newTestScaleSet() *scaleSet { + ss := newScaleSet(getTestCloud()) + return ss.(*scaleSet) +} + +func setTestVirtualMachineScaleSets(ss *scaleSet, scaleSetName string, vmList []string) { + virtualMachineScaleSetsClient := newFakeVirtualMachineScaleSetsClient() + scaleSets := make(map[string]map[string]compute.VirtualMachineScaleSet) + scaleSets["rg"] = map[string]compute.VirtualMachineScaleSet{ + scaleSetName: { + Name: &scaleSetName, + }, + } + virtualMachineScaleSetsClient.setFakeStore(scaleSets) + + virtualMachineScaleSetVMsClient := newFakeVirtualMachineScaleSetVMsClient() + ssVMs := make(map[string]map[string]compute.VirtualMachineScaleSetVM) + ssVMs["rg"] = make(map[string]compute.VirtualMachineScaleSetVM) + for i := range vmList { + ID := fmt.Sprintf("azure:///subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d", scaleSetName, i) + nodeName := vmList[i] + instanceID := fmt.Sprintf("%d", i) + vmKey := fmt.Sprintf("%s-%s", scaleSetName, nodeName) + networkInterfaces := []compute.NetworkInterfaceReference{ + { + ID: &nodeName, + }, + } + ssVMs["rg"][vmKey] = compute.VirtualMachineScaleSetVM{ + VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{ + OsProfile: &compute.OSProfile{ + ComputerName: &nodeName, + }, + NetworkProfile: &compute.NetworkProfile{ + NetworkInterfaces: &networkInterfaces, + }, + }, + ID: &ID, + InstanceID: &instanceID, + Location: &ss.Cloud.Location, + } + } + virtualMachineScaleSetVMsClient.setFakeStore(ssVMs) + + ss.Cloud.VirtualMachineScaleSetsClient = virtualMachineScaleSetsClient + ss.Cloud.VirtualMachineScaleSetVMsClient = virtualMachineScaleSetVMsClient +} + +func TestGetScaleSetVMInstanceID(t *testing.T) { + tests := []struct { + msg string + machineName string + expectError bool + expectedInstanceID string + }{{ + msg: "invalid vmss instance name", + machineName: "vmvm", + expectError: true, + }, + { + msg: "valid vmss instance name", + machineName: "vm00000Z", + expectError: false, + expectedInstanceID: "35", + }, + } + + for i, test := range tests { + instanceID, err := getScaleSetVMInstanceID(test.machineName) + if test.expectError { + assert.Error(t, err, fmt.Sprintf("TestCase[%d]: %s", i, test.msg)) + } else { + assert.Equal(t, test.expectedInstanceID, instanceID, fmt.Sprintf("TestCase[%d]: %s", i, test.msg)) + } + } +} + +func TestGetInstanceIDByNodeName(t *testing.T) { + ss := newTestScaleSet() + + testCases := []struct { + description string + scaleSet string + vmList []string + nodeName string + expected string + expectError bool + }{ + { + description: "scaleSet should get instance by node name", + scaleSet: "ss", + vmList: []string{"vm1", "vm2"}, + nodeName: "vm1", + expected: "azure:///subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/ss/virtualMachines/0", + }, + { + description: "scaleSet should get instance by node name with upper cases hostname", + scaleSet: "ss", + vmList: []string{"VM1", "vm2"}, + nodeName: "vm1", + expected: "azure:///subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/ss/virtualMachines/0", + }, + { + description: "scaleSet should not get instance for non-exist nodes", + scaleSet: "ss", + vmList: []string{"VM1", "vm2"}, + nodeName: "vm3", + expectError: true, + }, + } + + for _, test := range testCases { + setTestVirtualMachineScaleSets(ss, test.scaleSet, test.vmList) + real, err := ss.GetInstanceIDByNodeName(test.nodeName) + if test.expectError { + assert.Error(t, err, test.description) + continue + } + + assert.NoError(t, err, test.description) + assert.Equal(t, test.expected, real, test.description) + } +} From 037eec3b9a32c766d965090dd719d772f7247130 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Mon, 15 Jan 2018 20:42:12 -0500 Subject: [PATCH 180/264] Add error helpers and constants for NotAcceptable and UnsupportedMediaType --- .../apimachinery/pkg/api/errors/errors.go | 18 ++++++++++++++++++ .../apimachinery/pkg/apis/meta/v1/types.go | 12 ++++++++++++ .../endpoints/handlers/negotiation/errors.go | 4 ++-- 3 files changed, 32 insertions(+), 2 deletions(-) diff --git a/staging/src/k8s.io/apimachinery/pkg/api/errors/errors.go b/staging/src/k8s.io/apimachinery/pkg/api/errors/errors.go index 9960600be33..3a2c9549ba2 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -352,6 +352,14 @@ func NewGenericServerResponse(code int, verb string, qualifiedResource schema.Gr reason = metav1.StatusReasonForbidden // the server message has details about who is trying to perform what action. Keep its message. message = serverMessage + case http.StatusNotAcceptable: + reason = metav1.StatusReasonNotAcceptable + // the server message has details about what types are acceptable + message = serverMessage + case http.StatusUnsupportedMediaType: + reason = metav1.StatusReasonUnsupportedMediaType + // the server message has details about what types are acceptable + message = serverMessage case http.StatusMethodNotAllowed: reason = metav1.StatusReasonMethodNotAllowed message = "the server does not allow this method on the requested resource" @@ -434,6 +442,16 @@ func IsResourceExpired(err error) bool { return ReasonForError(err) == metav1.StatusReasonExpired } +// IsNotAcceptable determines if err is an error which indicates that the request failed due to an invalid Accept header +func IsNotAcceptable(err error) bool { + return ReasonForError(err) == metav1.StatusReasonNotAcceptable +} + +// IsUnsupportedMediaType determines if err is an error which indicates that the request failed due to an invalid Content-Type header +func IsUnsupportedMediaType(err error) bool { + return ReasonForError(err) == metav1.StatusReasonUnsupportedMediaType +} + // IsMethodNotSupported determines if the err is an error which indicates the provided action could not // be performed because it is not supported by the server. func IsMethodNotSupported(err error) bool { diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index c8ee4e5d65b..750080770c4 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -651,6 +651,18 @@ const ( // can only be created. API calls that return MethodNotAllowed can never succeed. StatusReasonMethodNotAllowed StatusReason = "MethodNotAllowed" + // StatusReasonNotAcceptable means that the accept types indicated by the client were not acceptable + // to the server - for instance, attempting to receive protobuf for a resource that supports only json and yaml. + // API calls that return NotAcceptable can never succeed. + // Status code 406 + StatusReasonNotAcceptable StatusReason = "NotAcceptable" + + // StatusReasonUnsupportedMediaType means that the content type sent by the client is not acceptable + // to the server - for instance, attempting to send protobuf for a resource that supports only json and yaml. + // API calls that return UnsupportedMediaType can never succeed. + // Status code 415 + StatusReasonUnsupportedMediaType StatusReason = "UnsupportedMediaType" + // StatusReasonInternalError indicates that an internal error occurred, it is unexpected // and the outcome of the call is unknown. // Details (optional): diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/errors.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/errors.go index 07bc8e280f4..93b17cfb097 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/errors.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/errors.go @@ -41,7 +41,7 @@ func (e errNotAcceptable) Status() metav1.Status { return metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusNotAcceptable, - Reason: metav1.StatusReason("NotAcceptable"), + Reason: metav1.StatusReasonNotAcceptable, Message: e.Error(), } } @@ -63,7 +63,7 @@ func (e errUnsupportedMediaType) Status() metav1.Status { return metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusUnsupportedMediaType, - Reason: metav1.StatusReason("UnsupportedMediaType"), + Reason: metav1.StatusReasonUnsupportedMediaType, Message: e.Error(), } } From aa504ccd57f38bfc23248c68019b7685fb14e668 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Mon, 15 Jan 2018 20:42:47 -0500 Subject: [PATCH 181/264] Return correct error when submitting patch in unsupported format --- .../pkg/apiserver/customresource_handler.go | 6 ++++- .../apiserver/pkg/endpoints/handlers/BUILD | 1 + .../apiserver/pkg/endpoints/handlers/patch.go | 27 ++++++++++++------- .../apiserver/pkg/endpoints/installer.go | 11 +++++--- 4 files changed, 32 insertions(+), 13 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go index 3112b9353fd..17d4329e33a 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go @@ -227,7 +227,11 @@ func (r *crdHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { http.Error(w, fmt.Sprintf("%v not allowed while CustomResourceDefinition is terminating", requestInfo.Verb), http.StatusMethodNotAllowed) return } - handler := handlers.PatchResource(storage, requestScope, r.admission, unstructured.UnstructuredObjectConverter{}) + supportedTypes := []string{ + string(types.JSONPatchType), + string(types.MergePatchType), + } + handler := handlers.PatchResource(storage, requestScope, r.admission, unstructured.UnstructuredObjectConverter{}, supportedTypes) handler(w, req) return case "delete": diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD index 16901d6a6b0..2bf780bd6b5 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD @@ -71,6 +71,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/proxy:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go index 1ac736d09dd..a54054127bf 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go @@ -32,17 +32,34 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/mergepatch" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" ) // PatchResource returns a function that will handle a resource patch // TODO: Eventually PatchResource should just use GuaranteedUpdate and this routine should be a bit cleaner -func PatchResource(r rest.Patcher, scope RequestScope, admit admission.Interface, converter runtime.ObjectConvertor) http.HandlerFunc { +func PatchResource(r rest.Patcher, scope RequestScope, admit admission.Interface, converter runtime.ObjectConvertor, patchTypes []string) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { + // Do this first, otherwise name extraction can fail for unrecognized content types + // TODO: handle this in negotiation + contentType := req.Header.Get("Content-Type") + // Remove "; charset=" if included in header. + if idx := strings.Index(contentType, ";"); idx > 0 { + contentType = contentType[:idx] + } + patchType := types.PatchType(contentType) + + // Ensure the patchType is one we support + if !sets.NewString(patchTypes...).Has(contentType) { + scope.err(negotiation.NewUnsupportedMediaTypeError(patchTypes), w, req) + return + } + // TODO: we either want to remove timeout or document it (if we // document, move timeout out of this function and declare it in // api_installer) @@ -63,14 +80,6 @@ func PatchResource(r rest.Patcher, scope RequestScope, admit admission.Interface return } - // TODO: handle this in negotiation - contentType := req.Header.Get("Content-Type") - // Remove "; charset=" if included in header. - if idx := strings.Index(contentType, ";"); idx > 0 { - contentType = contentType[:idx] - } - patchType := types.PatchType(contentType) - patchJS, err := readBody(req) if err != nil { scope.err(err, w, req) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go b/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go index 87bf1700d2b..11f658ee82a 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go @@ -690,7 +690,12 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag if hasSubresource { doc = "partially update " + subresource + " of the specified " + kind } - handler := metrics.InstrumentRouteFunc(action.Verb, resource, subresource, requestScope, restfulPatchResource(patcher, reqScope, admit, mapping.ObjectConvertor)) + supportedTypes := []string{ + string(types.JSONPatchType), + string(types.MergePatchType), + string(types.StrategicMergePatchType), + } + handler := metrics.InstrumentRouteFunc(action.Verb, resource, subresource, requestScope, restfulPatchResource(patcher, reqScope, admit, mapping.ObjectConvertor, supportedTypes)) route := ws.PATCH(action.Path).To(handler). Doc(doc). Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). @@ -1099,9 +1104,9 @@ func restfulUpdateResource(r rest.Updater, scope handlers.RequestScope, typer ru } } -func restfulPatchResource(r rest.Patcher, scope handlers.RequestScope, admit admission.Interface, converter runtime.ObjectConvertor) restful.RouteFunction { +func restfulPatchResource(r rest.Patcher, scope handlers.RequestScope, admit admission.Interface, converter runtime.ObjectConvertor, supportedTypes []string) restful.RouteFunction { return func(req *restful.Request, res *restful.Response) { - handlers.PatchResource(r, scope, admit, converter)(res.ResponseWriter, req.Request) + handlers.PatchResource(r, scope, admit, converter, supportedTypes)(res.ResponseWriter, req.Request) } } From 59c305b59083a8e731c76eb859e77222402d1448 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Sun, 14 Jan 2018 13:22:41 -0500 Subject: [PATCH 182/264] Add support for submitting/receiving CRD objects as yaml --- .../pkg/apiserver/customresource_handler.go | 49 +-- .../test/integration/BUILD | 2 + .../test/integration/yaml_test.go | 361 ++++++++++++++++++ 3 files changed, 372 insertions(+), 40 deletions(-) create mode 100644 staging/src/k8s.io/apiextensions-apiserver/test/integration/yaml_test.go diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go index 17d4329e33a..f87718d7621 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go @@ -18,7 +18,6 @@ package apiserver import ( "fmt" - "io" "net/http" "path" "sync" @@ -475,27 +474,20 @@ func (s unstructuredNegotiatedSerializer) SupportedMediaTypes() []runtime.Serial Framer: json.Framer, }, }, + { + MediaType: "application/yaml", + EncodesAsText: true, + Serializer: json.NewYAMLSerializer(json.DefaultMetaFactory, s.creator, s.typer), + }, } } -func (s unstructuredNegotiatedSerializer) EncoderForVersion(serializer runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { - return versioning.NewDefaultingCodecForScheme(Scheme, crEncoderInstance, nil, gv, nil) +func (s unstructuredNegotiatedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { + return versioning.NewDefaultingCodecForScheme(Scheme, encoder, nil, gv, nil) } -func (s unstructuredNegotiatedSerializer) DecoderToVersion(serializer runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder { - return unstructuredDecoder{delegate: Codecs.DecoderToVersion(serializer, gv)} -} - -type unstructuredDecoder struct { - delegate runtime.Decoder -} - -func (d unstructuredDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { - // Delegate for things other than Unstructured. - if _, ok := into.(runtime.Unstructured); !ok && into != nil { - return d.delegate.Decode(data, defaults, into) - } - return unstructured.UnstructuredJSONScheme.Decode(data, defaults, into) +func (s unstructuredNegotiatedSerializer) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder { + return versioning.NewDefaultingCodecForScheme(Scheme, nil, decoder, nil, gv) } type unstructuredObjectTyper struct { @@ -515,29 +507,6 @@ func (t unstructuredObjectTyper) Recognizes(gvk schema.GroupVersionKind) bool { return t.delegate.Recognizes(gvk) || t.unstructuredTyper.Recognizes(gvk) } -var crEncoderInstance = crEncoder{} - -// crEncoder *usually* encodes using the unstructured.UnstructuredJSONScheme, but if the type is Status or WatchEvent -// it will serialize them out using the converting codec. -type crEncoder struct{} - -func (crEncoder) Encode(obj runtime.Object, w io.Writer) error { - switch t := obj.(type) { - case *metav1.Status, *metav1.WatchEvent: - for _, info := range Codecs.SupportedMediaTypes() { - // we are always json - if info.MediaType == "application/json" { - return info.Serializer.Encode(obj, w) - } - } - - return fmt.Errorf("unable to find json serializer for %T", t) - - default: - return unstructured.UnstructuredJSONScheme.Encode(obj, w) - } -} - type unstructuredCreator struct{} func (c unstructuredCreator) New(kind schema.GroupVersionKind) (runtime.Object, error) { diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD b/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD index 1aca59abf8e..5d26af605a7 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD @@ -12,11 +12,13 @@ go_test( "finalization_test.go", "registration_test.go", "validation_test.go", + "yaml_test.go", ], importpath = "k8s.io/apiextensions-apiserver/test/integration", tags = ["integration"], deps = [ "//vendor/github.com/coreos/etcd/clientv3:go_default_library", + "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/apiserver:go_default_library", diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/yaml_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/yaml_test.go new file mode 100644 index 00000000000..b6410123896 --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/yaml_test.go @@ -0,0 +1,361 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "encoding/json" + "fmt" + "net/http" + "testing" + + "github.com/ghodss/yaml" + + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apiextensions-apiserver/test/integration/testserver" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" +) + +func TestYAML(t *testing.T) { + config, err := testserver.DefaultServerConfig() + if err != nil { + t.Fatal(err) + } + + stopCh, apiExtensionClient, clientPool, err := testserver.StartServer(config) + if err != nil { + t.Fatal(err) + } + defer close(stopCh) + + noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped) + _, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, clientPool) + if err != nil { + t.Fatal(err) + } + + kind := noxuDefinition.Spec.Names.Kind + listKind := noxuDefinition.Spec.Names.ListKind + apiVersion := noxuDefinition.Spec.Group + "/" + noxuDefinition.Spec.Version + + rest := apiExtensionClient.Discovery().RESTClient() + + // Discovery + { + result, err := rest.Get(). + SetHeader("Accept", "application/yaml"). + AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version). + DoRaw() + if err != nil { + t.Fatal(err, string(result)) + } + obj, err := decodeYAML(result) + if err != nil { + t.Fatal(err) + } + if obj.GetAPIVersion() != "v1" || obj.GetKind() != "APIResourceList" { + t.Fatalf("unexpected discovery kind: %s", string(result)) + } + if v, ok, err := unstructured.NestedString(obj.Object, "groupVersion"); v != apiVersion || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + } + + // Error + { + result, err := rest.Get(). + SetHeader("Accept", "application/yaml"). + AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural, "missingname"). + DoRaw() + if !errors.IsNotFound(err) { + t.Fatalf("expected not found, got %v", err) + } + obj, err := decodeYAML(result) + if err != nil { + t.Fatal(err) + } + if obj.GetAPIVersion() != "v1" || obj.GetKind() != "Status" { + t.Fatalf("unexpected discovery kind: %s", string(result)) + } + if v, ok, err := unstructured.NestedString(obj.Object, "reason"); v != "NotFound" || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + } + + uid := types.UID("") + resourceVersion := "" + + // Create + { + yamlBody := []byte(fmt.Sprintf(` +apiVersion: %s +kind: %s +metadata: + name: mytest +values: + numVal: 1 + boolVal: true + stringVal: "1"`, apiVersion, kind)) + + result, err := rest.Post(). + SetHeader("Accept", "application/yaml"). + SetHeader("Content-Type", "application/yaml"). + AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural). + Body(yamlBody). + DoRaw() + if err != nil { + t.Fatal(err, string(result)) + } + obj, err := decodeYAML(result) + if err != nil { + t.Fatal(err) + } + if obj.GetName() != "mytest" { + t.Fatalf("expected mytest, got %s", obj.GetName()) + } + if obj.GetAPIVersion() != apiVersion { + t.Fatalf("expected %s, got %s", apiVersion, obj.GetAPIVersion()) + } + if obj.GetKind() != kind { + t.Fatalf("expected %s, got %s", kind, obj.GetKind()) + } + if v, ok, err := unstructured.NestedFloat64(obj.Object, "values", "numVal"); v != 1 || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + if v, ok, err := unstructured.NestedBool(obj.Object, "values", "boolVal"); v != true || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + if v, ok, err := unstructured.NestedString(obj.Object, "values", "stringVal"); v != "1" || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + uid = obj.GetUID() + resourceVersion = obj.GetResourceVersion() + } + + // Get + { + result, err := rest.Get(). + SetHeader("Accept", "application/yaml"). + AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural, "mytest"). + DoRaw() + if err != nil { + t.Fatal(err) + } + obj, err := decodeYAML(result) + if err != nil { + t.Fatal(err, string(result)) + } + if obj.GetName() != "mytest" { + t.Fatalf("expected mytest, got %s", obj.GetName()) + } + if obj.GetAPIVersion() != apiVersion { + t.Fatalf("expected %s, got %s", apiVersion, obj.GetAPIVersion()) + } + if obj.GetKind() != kind { + t.Fatalf("expected %s, got %s", kind, obj.GetKind()) + } + if v, ok, err := unstructured.NestedFloat64(obj.Object, "values", "numVal"); v != 1 || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + if v, ok, err := unstructured.NestedBool(obj.Object, "values", "boolVal"); v != true || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + if v, ok, err := unstructured.NestedString(obj.Object, "values", "stringVal"); v != "1" || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + } + + // List + { + result, err := rest.Get(). + SetHeader("Accept", "application/yaml"). + AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural). + DoRaw() + if err != nil { + t.Fatal(err, string(result)) + } + listObj, err := decodeYAML(result) + if err != nil { + t.Fatal(err) + } + if listObj.GetAPIVersion() != apiVersion { + t.Fatalf("expected %s, got %s", apiVersion, listObj.GetAPIVersion()) + } + if listObj.GetKind() != listKind { + t.Fatalf("expected %s, got %s", kind, listObj.GetKind()) + } + items, ok, err := unstructured.NestedSlice(listObj.Object, "items") + if !ok || err != nil || len(items) != 1 { + t.Fatalf("expected one item, got %v %v %v", items, ok, err) + } + obj := unstructured.Unstructured{Object: items[0].(map[string]interface{})} + if obj.GetName() != "mytest" { + t.Fatalf("expected mytest, got %s", obj.GetName()) + } + if obj.GetAPIVersion() != apiVersion { + t.Fatalf("expected %s, got %s", apiVersion, obj.GetAPIVersion()) + } + if obj.GetKind() != kind { + t.Fatalf("expected %s, got %s", kind, obj.GetKind()) + } + if v, ok, err := unstructured.NestedFloat64(obj.Object, "values", "numVal"); v != 1 || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + if v, ok, err := unstructured.NestedBool(obj.Object, "values", "boolVal"); v != true || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + if v, ok, err := unstructured.NestedString(obj.Object, "values", "stringVal"); v != "1" || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + } + + // Watch rejects yaml (no streaming support) + { + result, err := rest.Get(). + SetHeader("Accept", "application/yaml"). + AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural). + Param("watch", "true"). + DoRaw() + if !errors.IsNotAcceptable(err) { + t.Fatal("expected not acceptable error, got %v (%s)", err, string(result)) + } + obj, err := decodeYAML(result) + if err != nil { + t.Fatal(err) + } + if obj.GetAPIVersion() != "v1" || obj.GetKind() != "Status" { + t.Fatalf("unexpected result: %s", string(result)) + } + if v, ok, err := unstructured.NestedString(obj.Object, "reason"); v != "NotAcceptable" || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + if v, ok, err := unstructured.NestedFloat64(obj.Object, "code"); v != http.StatusNotAcceptable || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + } + + // Update + { + yamlBody := []byte(fmt.Sprintf(` +apiVersion: %s +kind: %s +metadata: + name: mytest + uid: %s + resourceVersion: "%s" +values: + numVal: 2 + boolVal: false + stringVal: "2"`, apiVersion, kind, uid, resourceVersion)) + result, err := rest.Put(). + SetHeader("Accept", "application/yaml"). + SetHeader("Content-Type", "application/yaml"). + AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural, "mytest"). + Body(yamlBody). + DoRaw() + if err != nil { + t.Fatal(err, string(result)) + } + obj, err := decodeYAML(result) + if err != nil { + t.Fatal(err) + } + if obj.GetName() != "mytest" { + t.Fatalf("expected mytest, got %s", obj.GetName()) + } + if obj.GetAPIVersion() != apiVersion { + t.Fatalf("expected %s, got %s", apiVersion, obj.GetAPIVersion()) + } + if obj.GetKind() != kind { + t.Fatalf("expected %s, got %s", kind, obj.GetKind()) + } + if v, ok, err := unstructured.NestedFloat64(obj.Object, "values", "numVal"); v != 2 || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + if v, ok, err := unstructured.NestedBool(obj.Object, "values", "boolVal"); v != false || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + if v, ok, err := unstructured.NestedString(obj.Object, "values", "stringVal"); v != "2" || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + if obj.GetUID() != uid { + t.Fatal("uid changed: %v vs %v", uid, obj.GetUID()) + } + } + + // Patch rejects yaml requests (only JSON mime types are allowed) + { + yamlBody := []byte(fmt.Sprintf(` +values: + numVal: 3`, apiVersion, kind, uid, resourceVersion)) + result, err := rest.Patch(types.MergePatchType). + SetHeader("Accept", "application/yaml"). + SetHeader("Content-Type", "application/yaml"). + AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural, "mytest"). + Body(yamlBody). + DoRaw() + if !errors.IsUnsupportedMediaType(err) { + t.Fatalf("Expected bad request, got %v\n%s", err, string(result)) + } + obj, err := decodeYAML(result) + if err != nil { + t.Fatal(err) + } + if obj.GetAPIVersion() != "v1" || obj.GetKind() != "Status" { + t.Fatalf("expected %s %s, got %s %s", "v1", "Status", obj.GetAPIVersion(), obj.GetKind()) + } + if v, ok, err := unstructured.NestedString(obj.Object, "reason"); v != "UnsupportedMediaType" || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + } + + // Delete + { + result, err := rest.Delete(). + SetHeader("Accept", "application/yaml"). + AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural, "mytest"). + DoRaw() + if err != nil { + t.Fatal(err, string(result)) + } + obj, err := decodeYAML(result) + if err != nil { + t.Fatal(err) + } + if obj.GetAPIVersion() != "v1" || obj.GetKind() != "Status" { + t.Fatalf("unexpected response: %s", string(result)) + } + if v, ok, err := unstructured.NestedString(obj.Object, "status"); v != "Success" || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + } +} + +func decodeYAML(data []byte) (*unstructured.Unstructured, error) { + retval := &unstructured.Unstructured{Object: map[string]interface{}{}} + // ensure this isn't JSON + if json.Unmarshal(data, &retval.Object) == nil { + return nil, fmt.Errorf("data is JSON, not YAML: %s", string(data)) + } + // ensure it is YAML + retval.Object = map[string]interface{}{} + if err := yaml.Unmarshal(data, &retval.Object); err != nil { + return nil, fmt.Errorf("error decoding YAML: %v\noriginal YAML: %s", err, string(data)) + } + return retval, nil +} From 87cebae73815fcc9a5781a42c432f62bf4d1aed0 Mon Sep 17 00:00:00 2001 From: NickrenREN Date: Fri, 12 Jan 2018 10:20:22 +0800 Subject: [PATCH 183/264] Add fsType for CSI --- pkg/apis/core/types.go | 6 ++++++ pkg/volume/csi/csi_mounter.go | 9 ++++++++- staging/src/k8s.io/api/core/v1/types.go | 6 ++++++ 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/pkg/apis/core/types.go b/pkg/apis/core/types.go index e036d0721b9..a1e2fd91763 100644 --- a/pkg/apis/core/types.go +++ b/pkg/apis/core/types.go @@ -1615,6 +1615,12 @@ type CSIPersistentVolumeSource struct { // Defaults to false (read/write). // +optional ReadOnly bool + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string } // ContainerPort represents a network port in a single container diff --git a/pkg/volume/csi/csi_mounter.go b/pkg/volume/csi/csi_mounter.go index 3009fdf47ea..62954369b65 100644 --- a/pkg/volume/csi/csi_mounter.go +++ b/pkg/volume/csi/csi_mounter.go @@ -34,6 +34,8 @@ import ( "k8s.io/kubernetes/pkg/volume/util" ) +const defaultFSType = "ext4" + //TODO (vladimirvivien) move this in a central loc later var ( volDataKey = struct { @@ -189,6 +191,11 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error { accessMode = c.spec.PersistentVolume.Spec.AccessModes[0] } + fsType := csiSource.FSType + if len(fsType) == 0 { + fsType = defaultFSType + } + err = csi.NodePublishVolume( ctx, c.volumeID, @@ -197,7 +204,7 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error { accessMode, c.volumeInfo, attribs, - "ext4", //TODO needs to be sourced from PV or somewhere else + fsType, ) if err != nil { diff --git a/staging/src/k8s.io/api/core/v1/types.go b/staging/src/k8s.io/api/core/v1/types.go index 49ef6109276..5b3147c8fe5 100644 --- a/staging/src/k8s.io/api/core/v1/types.go +++ b/staging/src/k8s.io/api/core/v1/types.go @@ -1730,6 +1730,12 @@ type CSIPersistentVolumeSource struct { // Defaults to false (read/write). // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"` } // ContainerPort represents a network port in a single container. From 37d42870eacedab76ac08bb41fec1bfb22edc1dc Mon Sep 17 00:00:00 2001 From: NickrenREN Date: Fri, 12 Jan 2018 18:58:12 +0800 Subject: [PATCH 184/264] auto generated code --- api/openapi-spec/swagger.json | 4 + api/swagger-spec/v1.json | 4 + docs/api-reference/v1/definitions.html | 7 + pkg/apis/core/v1/zz_generated.conversion.go | 2 + .../src/k8s.io/api/core/v1/generated.pb.go | 1587 +++++++++-------- .../src/k8s.io/api/core/v1/generated.proto | 6 + .../core/v1/types_swagger_doc_generated.go | 1 + 7 files changed, 836 insertions(+), 775 deletions(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 795486975c8..9426d4a89a1 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -74526,6 +74526,10 @@ "description": "Driver is the name of the driver to use for this volume. Required.", "type": "string" }, + "fsType": { + "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "type": "string" + }, "readOnly": { "description": "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).", "type": "boolean" diff --git a/api/swagger-spec/v1.json b/api/swagger-spec/v1.json index cc2cebe67cf..a76d0f4e5c2 100644 --- a/api/swagger-spec/v1.json +++ b/api/swagger-spec/v1.json @@ -21320,6 +21320,10 @@ "readOnly": { "type": "boolean", "description": "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)." + }, + "fsType": { + "type": "string", + "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." } } }, diff --git a/docs/api-reference/v1/definitions.html b/docs/api-reference/v1/definitions.html index cbb2f013aad..9d411fd1e70 100755 --- a/docs/api-reference/v1/definitions.html +++ b/docs/api-reference/v1/definitions.html @@ -7857,6 +7857,13 @@ Examples:

boolean

false

+ +

fsType

+

Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.

+

false

+

string

+ + diff --git a/pkg/apis/core/v1/zz_generated.conversion.go b/pkg/apis/core/v1/zz_generated.conversion.go index e8ddfac91fb..a2be2327cc3 100644 --- a/pkg/apis/core/v1/zz_generated.conversion.go +++ b/pkg/apis/core/v1/zz_generated.conversion.go @@ -619,6 +619,7 @@ func autoConvert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource( out.Driver = in.Driver out.VolumeHandle = in.VolumeHandle out.ReadOnly = in.ReadOnly + out.FSType = in.FSType return nil } @@ -631,6 +632,7 @@ func autoConvert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource( out.Driver = in.Driver out.VolumeHandle = in.VolumeHandle out.ReadOnly = in.ReadOnly + out.FSType = in.FSType return nil } diff --git a/staging/src/k8s.io/api/core/v1/generated.pb.go b/staging/src/k8s.io/api/core/v1/generated.pb.go index 61aa1833f85..756e93d6294 100644 --- a/staging/src/k8s.io/api/core/v1/generated.pb.go +++ b/staging/src/k8s.io/api/core/v1/generated.pb.go @@ -1570,6 +1570,10 @@ func (m *CSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0 } i++ + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) return i, nil } @@ -10676,6 +10680,8 @@ func (m *CSIPersistentVolumeSource) Size() (n int) { l = len(m.VolumeHandle) n += 1 + l + sovGenerated(uint64(l)) n += 2 + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -14053,6 +14059,7 @@ func (this *CSIPersistentVolumeSource) String() string { `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, `VolumeHandle:` + fmt.Sprintf("%v", this.VolumeHandle) + `,`, `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, `}`, }, "") return s @@ -17838,6 +17845,35 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } } m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -49512,779 +49548,780 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 12382 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x90, 0x24, 0x47, - 0x56, 0xd8, 0x55, 0xf7, 0x7c, 0xf5, 0x9b, 0xef, 0xdc, 0x5d, 0xa9, 0x77, 0x24, 0x6d, 0xaf, 0x4a, - 0x77, 0xd2, 0xea, 0x6b, 0xe6, 0xb4, 0x92, 0x4e, 0xe2, 0x74, 0x27, 0x98, 0x99, 0x9e, 0xd9, 0x6d, - 0xed, 0xce, 0x6c, 0x2b, 0x7b, 0x76, 0xf7, 0x4e, 0x88, 0xf3, 0xd5, 0x74, 0xe7, 0xcc, 0x94, 0xa6, - 0xa6, 0xaa, 0x55, 0x55, 0x3d, 0xbb, 0xa3, 0x80, 0x08, 0x5b, 0x06, 0xfc, 0x01, 0x3f, 0x2e, 0x6c, - 0xc2, 0xc6, 0x40, 0xe0, 0x08, 0x1b, 0x07, 0x9c, 0xb1, 0x1d, 0xc6, 0x60, 0xc0, 0x80, 0x6d, 0x8c, - 0x1d, 0x0e, 0xf8, 0x83, 0xc1, 0xfe, 0x71, 0x44, 0x10, 0x1e, 0xc3, 0x40, 0xd8, 0xc1, 0x0f, 0x3b, - 0x6c, 0xf3, 0x8b, 0x31, 0x36, 0x8e, 0xfc, 0xac, 0xcc, 0xea, 0xaa, 0xee, 0x9e, 0xd5, 0xec, 0x48, - 0x5c, 0xdc, 0xbf, 0xee, 0x7c, 0x2f, 0x5f, 0x66, 0xe5, 0xc7, 0xcb, 0x97, 0x2f, 0xdf, 0x07, 0xbc, - 0xb1, 0xfb, 0x7a, 0x34, 0xef, 0x06, 0x0b, 0xbb, 0x9d, 0x4d, 0x12, 0xfa, 0x24, 0x26, 0xd1, 0xc2, - 0x3e, 0xf1, 0x5b, 0x41, 0xb8, 0x20, 0x00, 0x4e, 0xdb, 0x5d, 0x68, 0x06, 0x21, 0x59, 0xd8, 0x7f, - 0x69, 0x61, 0x9b, 0xf8, 0x24, 0x74, 0x62, 0xd2, 0x9a, 0x6f, 0x87, 0x41, 0x1c, 0x20, 0xc4, 0x71, - 0xe6, 0x9d, 0xb6, 0x3b, 0x4f, 0x71, 0xe6, 0xf7, 0x5f, 0x9a, 0x7b, 0x71, 0xdb, 0x8d, 0x77, 0x3a, - 0x9b, 0xf3, 0xcd, 0x60, 0x6f, 0x61, 0x3b, 0xd8, 0x0e, 0x16, 0x18, 0xea, 0x66, 0x67, 0x8b, 0xfd, - 0x63, 0x7f, 0xd8, 0x2f, 0x4e, 0x62, 0x6e, 0x2d, 0x69, 0x86, 0xdc, 0x8f, 0x89, 0x1f, 0xb9, 0x81, - 0x1f, 0xbd, 0xe8, 0xb4, 0xdd, 0x88, 0x84, 0xfb, 0x24, 0x5c, 0x68, 0xef, 0x6e, 0x53, 0x58, 0x64, - 0x22, 0x2c, 0xec, 0xbf, 0xb4, 0x49, 0x62, 0xa7, 0xab, 0x47, 0x73, 0xaf, 0x24, 0xe4, 0xf6, 0x9c, - 0xe6, 0x8e, 0xeb, 0x93, 0xf0, 0x40, 0xd2, 0x58, 0x08, 0x49, 0x14, 0x74, 0xc2, 0x26, 0x39, 0x51, - 0xad, 0x68, 0x61, 0x8f, 0xc4, 0x4e, 0xc6, 0xd7, 0xcf, 0x2d, 0xe4, 0xd5, 0x0a, 0x3b, 0x7e, 0xec, - 0xee, 0x75, 0x37, 0xf3, 0xb9, 0x7e, 0x15, 0xa2, 0xe6, 0x0e, 0xd9, 0x73, 0xba, 0xea, 0xbd, 0x9c, - 0x57, 0xaf, 0x13, 0xbb, 0xde, 0x82, 0xeb, 0xc7, 0x51, 0x1c, 0xa6, 0x2b, 0xd9, 0xdf, 0xb0, 0xe0, - 0xf2, 0xe2, 0xdd, 0xc6, 0x8a, 0xe7, 0x44, 0xb1, 0xdb, 0x5c, 0xf2, 0x82, 0xe6, 0x6e, 0x23, 0x0e, - 0x42, 0x72, 0x27, 0xf0, 0x3a, 0x7b, 0xa4, 0xc1, 0x06, 0x02, 0xbd, 0x00, 0x63, 0xfb, 0xec, 0x7f, - 0xad, 0x5a, 0xb6, 0x2e, 0x5b, 0x57, 0x4a, 0x4b, 0x33, 0xbf, 0x7e, 0x58, 0xf9, 0xd4, 0xd1, 0x61, - 0x65, 0xec, 0x8e, 0x28, 0xc7, 0x0a, 0x03, 0x3d, 0x0d, 0x23, 0x5b, 0xd1, 0xc6, 0x41, 0x9b, 0x94, - 0x0b, 0x0c, 0x77, 0x4a, 0xe0, 0x8e, 0xac, 0x36, 0x68, 0x29, 0x16, 0x50, 0xb4, 0x00, 0xa5, 0xb6, - 0x13, 0xc6, 0x6e, 0xec, 0x06, 0x7e, 0xb9, 0x78, 0xd9, 0xba, 0x32, 0xbc, 0x34, 0x2b, 0x50, 0x4b, - 0x75, 0x09, 0xc0, 0x09, 0x0e, 0xed, 0x46, 0x48, 0x9c, 0xd6, 0x2d, 0xdf, 0x3b, 0x28, 0x0f, 0x5d, - 0xb6, 0xae, 0x8c, 0x25, 0xdd, 0xc0, 0xa2, 0x1c, 0x2b, 0x0c, 0xfb, 0x47, 0x0a, 0x30, 0xb6, 0xb8, - 0xb5, 0xe5, 0xfa, 0x6e, 0x7c, 0x80, 0xee, 0xc0, 0x84, 0x1f, 0xb4, 0x88, 0xfc, 0xcf, 0xbe, 0x62, - 0xfc, 0xea, 0xe5, 0xf9, 0xee, 0x95, 0x39, 0xbf, 0xae, 0xe1, 0x2d, 0xcd, 0x1c, 0x1d, 0x56, 0x26, - 0xf4, 0x12, 0x6c, 0xd0, 0x41, 0x18, 0xc6, 0xdb, 0x41, 0x4b, 0x91, 0x2d, 0x30, 0xb2, 0x95, 0x2c, - 0xb2, 0xf5, 0x04, 0x6d, 0x69, 0xfa, 0xe8, 0xb0, 0x32, 0xae, 0x15, 0x60, 0x9d, 0x08, 0xda, 0x84, - 0x69, 0xfa, 0xd7, 0x8f, 0x5d, 0x45, 0xb7, 0xc8, 0xe8, 0x3e, 0x95, 0x47, 0x57, 0x43, 0x5d, 0x3a, - 0x77, 0x74, 0x58, 0x99, 0x4e, 0x15, 0xe2, 0x34, 0x41, 0xfb, 0x03, 0x98, 0x5a, 0x8c, 0x63, 0xa7, - 0xb9, 0x43, 0x5a, 0x7c, 0x06, 0xd1, 0x2b, 0x30, 0xe4, 0x3b, 0x7b, 0x44, 0xcc, 0xef, 0x65, 0x31, - 0xb0, 0x43, 0xeb, 0xce, 0x1e, 0x39, 0x3e, 0xac, 0xcc, 0xdc, 0xf6, 0xdd, 0xf7, 0x3b, 0x62, 0x55, - 0xd0, 0x32, 0xcc, 0xb0, 0xd1, 0x55, 0x80, 0x16, 0xd9, 0x77, 0x9b, 0xa4, 0xee, 0xc4, 0x3b, 0x62, - 0xbe, 0x91, 0xa8, 0x0b, 0x55, 0x05, 0xc1, 0x1a, 0x96, 0x7d, 0x1f, 0x4a, 0x8b, 0xfb, 0x81, 0xdb, - 0xaa, 0x07, 0xad, 0x08, 0xed, 0xc2, 0x74, 0x3b, 0x24, 0x5b, 0x24, 0x54, 0x45, 0x65, 0xeb, 0x72, - 0xf1, 0xca, 0xf8, 0xd5, 0x2b, 0x99, 0x1f, 0x6b, 0xa2, 0xae, 0xf8, 0x71, 0x78, 0xb0, 0xf4, 0xa8, - 0x68, 0x6f, 0x3a, 0x05, 0xc5, 0x69, 0xca, 0xf6, 0xbf, 0x2b, 0xc0, 0x85, 0xc5, 0x0f, 0x3a, 0x21, - 0xa9, 0xba, 0xd1, 0x6e, 0x7a, 0x85, 0xb7, 0xdc, 0x68, 0x77, 0x3d, 0x19, 0x01, 0xb5, 0xb4, 0xaa, - 0xa2, 0x1c, 0x2b, 0x0c, 0xf4, 0x22, 0x8c, 0xd2, 0xdf, 0xb7, 0x71, 0x4d, 0x7c, 0xf2, 0x39, 0x81, - 0x3c, 0x5e, 0x75, 0x62, 0xa7, 0xca, 0x41, 0x58, 0xe2, 0xa0, 0x35, 0x18, 0x6f, 0xb2, 0x0d, 0xb9, - 0xbd, 0x16, 0xb4, 0x08, 0x9b, 0xcc, 0xd2, 0xd2, 0xf3, 0x14, 0x7d, 0x39, 0x29, 0x3e, 0x3e, 0xac, - 0x94, 0x79, 0xdf, 0x04, 0x09, 0x0d, 0x86, 0xf5, 0xfa, 0xc8, 0x56, 0xfb, 0x6b, 0x88, 0x51, 0x82, - 0x8c, 0xbd, 0x75, 0x45, 0xdb, 0x2a, 0xc3, 0x6c, 0xab, 0x4c, 0x64, 0x6f, 0x13, 0xf4, 0x12, 0x0c, - 0xed, 0xba, 0x7e, 0xab, 0x3c, 0xc2, 0x68, 0x3d, 0x41, 0xe7, 0xfc, 0x86, 0xeb, 0xb7, 0x8e, 0x0f, - 0x2b, 0xb3, 0x46, 0x77, 0x68, 0x21, 0x66, 0xa8, 0xf6, 0x1f, 0x5b, 0x50, 0x61, 0xb0, 0x55, 0xd7, - 0x23, 0x75, 0x12, 0x46, 0x6e, 0x14, 0x13, 0x3f, 0x36, 0x06, 0xf4, 0x2a, 0x40, 0x44, 0x9a, 0x21, - 0x89, 0xb5, 0x21, 0x55, 0x0b, 0xa3, 0xa1, 0x20, 0x58, 0xc3, 0xa2, 0x0c, 0x21, 0xda, 0x71, 0x42, - 0xb6, 0xbe, 0xc4, 0xc0, 0x2a, 0x86, 0xd0, 0x90, 0x00, 0x9c, 0xe0, 0x18, 0x0c, 0xa1, 0xd8, 0x8f, - 0x21, 0xa0, 0x2f, 0xc2, 0x74, 0xd2, 0x58, 0xd4, 0x76, 0x9a, 0x72, 0x00, 0xd9, 0x96, 0x69, 0x98, - 0x20, 0x9c, 0xc6, 0xb5, 0xff, 0xa1, 0x25, 0x16, 0x0f, 0xfd, 0xea, 0x4f, 0xf8, 0xb7, 0xda, 0xbf, - 0x68, 0xc1, 0xe8, 0x92, 0xeb, 0xb7, 0x5c, 0x7f, 0x1b, 0x7d, 0x15, 0xc6, 0xe8, 0xd9, 0xd4, 0x72, - 0x62, 0x47, 0xf0, 0xbd, 0xcf, 0x6a, 0x7b, 0x4b, 0x1d, 0x15, 0xf3, 0xed, 0xdd, 0x6d, 0x5a, 0x10, - 0xcd, 0x53, 0x6c, 0xba, 0xdb, 0x6e, 0x6d, 0xbe, 0x47, 0x9a, 0xf1, 0x1a, 0x89, 0x9d, 0xe4, 0x73, - 0x92, 0x32, 0xac, 0xa8, 0xa2, 0x1b, 0x30, 0x12, 0x3b, 0xe1, 0x36, 0x89, 0x05, 0x03, 0xcc, 0x64, - 0x54, 0xbc, 0x26, 0xa6, 0x3b, 0x92, 0xf8, 0x4d, 0x92, 0x1c, 0x0b, 0x1b, 0xac, 0x2a, 0x16, 0x24, - 0xec, 0x9f, 0xb6, 0xe0, 0xe2, 0x72, 0xa3, 0x96, 0xb3, 0xae, 0x9e, 0x86, 0x91, 0x56, 0xe8, 0xee, - 0x93, 0x50, 0x8c, 0xb3, 0xa2, 0x52, 0x65, 0xa5, 0x58, 0x40, 0xd1, 0xeb, 0x30, 0xc1, 0x0f, 0xa4, - 0xeb, 0x8e, 0xdf, 0xf2, 0xe4, 0x10, 0x9f, 0x17, 0xd8, 0x13, 0x77, 0x34, 0x18, 0x36, 0x30, 0x4f, - 0x38, 0xd0, 0x4d, 0x98, 0x58, 0x76, 0xda, 0xce, 0xa6, 0xeb, 0xb9, 0xb1, 0x4b, 0x22, 0xf4, 0x0c, - 0x14, 0x9d, 0x56, 0x8b, 0xf1, 0xb0, 0xd2, 0xd2, 0x85, 0xa3, 0xc3, 0x4a, 0x71, 0xb1, 0x45, 0x37, - 0x13, 0x28, 0xac, 0x03, 0x4c, 0x31, 0xd0, 0x73, 0x30, 0xd4, 0x0a, 0x83, 0x76, 0xb9, 0xc0, 0x30, - 0x1f, 0xa1, 0xfb, 0xae, 0x1a, 0x06, 0xed, 0x14, 0x2a, 0xc3, 0xb1, 0x7f, 0xb5, 0x00, 0x8f, 0x2f, - 0x93, 0xf6, 0xce, 0x6a, 0x23, 0x67, 0x54, 0xae, 0xc0, 0xd8, 0x5e, 0xe0, 0xbb, 0x71, 0x10, 0x46, - 0xa2, 0x69, 0xb6, 0xdd, 0xd7, 0x44, 0x19, 0x56, 0x50, 0x74, 0x19, 0x86, 0xda, 0x09, 0xab, 0x9e, - 0x90, 0x6c, 0x9e, 0x31, 0x69, 0x06, 0xa1, 0x18, 0x9d, 0x88, 0x84, 0x82, 0x4d, 0x29, 0x8c, 0xdb, - 0x11, 0x09, 0x31, 0x83, 0x24, 0xeb, 0x9d, 0xee, 0x04, 0xb1, 0x87, 0x52, 0xeb, 0x9d, 0x42, 0xb0, - 0x86, 0x85, 0xea, 0x50, 0xe2, 0xff, 0x30, 0xd9, 0x62, 0x1c, 0x29, 0x67, 0x95, 0x34, 0x24, 0x92, - 0x58, 0x25, 0x93, 0x6c, 0x43, 0xc8, 0x42, 0x9c, 0x10, 0x31, 0xe6, 0x69, 0xa4, 0xef, 0x3c, 0xfd, - 0x72, 0x01, 0x10, 0x1f, 0xc2, 0x3f, 0x67, 0x03, 0x77, 0xbb, 0x7b, 0xe0, 0x32, 0x8f, 0xc6, 0x9b, - 0x41, 0xd3, 0xf1, 0xd2, 0x7b, 0xec, 0xb4, 0x46, 0xef, 0x87, 0x2d, 0x40, 0xcb, 0xae, 0xdf, 0x22, - 0xe1, 0x19, 0xc8, 0x85, 0x27, 0xdb, 0x80, 0x37, 0x61, 0x6a, 0xd9, 0x73, 0x89, 0x1f, 0xd7, 0xea, - 0xcb, 0x81, 0xbf, 0xe5, 0x6e, 0xa3, 0xcf, 0xc3, 0x14, 0x15, 0x93, 0x83, 0x4e, 0xdc, 0x20, 0xcd, - 0xc0, 0x67, 0x12, 0x05, 0x15, 0x2e, 0xd1, 0xd1, 0x61, 0x65, 0x6a, 0xc3, 0x80, 0xe0, 0x14, 0xa6, - 0xfd, 0xbb, 0xf4, 0x43, 0x83, 0xbd, 0x76, 0xe0, 0x13, 0x3f, 0x5e, 0x0e, 0xfc, 0x16, 0x97, 0x3c, - 0x3f, 0x0f, 0x43, 0x31, 0xed, 0x38, 0xff, 0xc8, 0xa7, 0xe5, 0xd4, 0xd2, 0xee, 0x1e, 0x1f, 0x56, - 0x1e, 0xe9, 0xae, 0xc1, 0x3e, 0x88, 0xd5, 0x41, 0xdf, 0x06, 0x23, 0x51, 0xec, 0xc4, 0x9d, 0x48, - 0x7c, 0xf6, 0x93, 0xf2, 0xb3, 0x1b, 0xac, 0xf4, 0xf8, 0xb0, 0x32, 0xad, 0xaa, 0xf1, 0x22, 0x2c, - 0x2a, 0xa0, 0x67, 0x61, 0x74, 0x8f, 0x44, 0x91, 0xb3, 0x2d, 0x85, 0x86, 0x69, 0x51, 0x77, 0x74, - 0x8d, 0x17, 0x63, 0x09, 0x47, 0x4f, 0xc1, 0x30, 0x09, 0xc3, 0x20, 0x14, 0xab, 0x6a, 0x52, 0x20, - 0x0e, 0xaf, 0xd0, 0x42, 0xcc, 0x61, 0xf6, 0x7f, 0xb0, 0x60, 0x5a, 0xf5, 0x95, 0xb7, 0x75, 0x06, - 0xa7, 0xc3, 0x3b, 0x00, 0x4d, 0xf9, 0x81, 0x11, 0xe3, 0x77, 0xe3, 0x57, 0x9f, 0xce, 0x5a, 0xc2, - 0xdd, 0xc3, 0x98, 0x50, 0x56, 0x45, 0x11, 0xd6, 0xa8, 0xd9, 0xff, 0xd2, 0x82, 0x73, 0xa9, 0x2f, - 0xba, 0xe9, 0x46, 0x31, 0x7a, 0xb7, 0xeb, 0xab, 0xe6, 0x07, 0xfb, 0x2a, 0x5a, 0x9b, 0x7d, 0x93, - 0x5a, 0x73, 0xb2, 0x44, 0xfb, 0xa2, 0xeb, 0x30, 0xec, 0xc6, 0x64, 0x4f, 0x7e, 0xcc, 0x53, 0x3d, - 0x3f, 0x86, 0xf7, 0x2a, 0x99, 0x91, 0x1a, 0xad, 0x89, 0x39, 0x01, 0xfb, 0x7f, 0x59, 0x50, 0xe2, - 0xcb, 0x76, 0xcd, 0x69, 0x9f, 0xc1, 0x5c, 0xd4, 0x60, 0x88, 0x51, 0xe7, 0x1d, 0x7f, 0x26, 0xbb, - 0xe3, 0xa2, 0x3b, 0xf3, 0x54, 0xf4, 0xe3, 0x22, 0xb6, 0x62, 0x66, 0xb4, 0x08, 0x33, 0x12, 0x73, - 0xaf, 0x41, 0x49, 0x21, 0xa0, 0x19, 0x28, 0xee, 0x12, 0x7e, 0xad, 0x2a, 0x61, 0xfa, 0x13, 0x9d, - 0x87, 0xe1, 0x7d, 0xc7, 0xeb, 0x88, 0xcd, 0x8e, 0xf9, 0x9f, 0xcf, 0x17, 0x5e, 0xb7, 0xec, 0x5f, - 0x62, 0x7b, 0x4c, 0x34, 0xb2, 0xe2, 0xef, 0x0b, 0x66, 0xf2, 0x01, 0x9c, 0xf7, 0x32, 0x78, 0x98, - 0x18, 0x88, 0xc1, 0x79, 0xde, 0xe3, 0xa2, 0xaf, 0xe7, 0xb3, 0xa0, 0x38, 0xb3, 0x0d, 0x7a, 0x0c, - 0x04, 0x6d, 0xba, 0xa2, 0x1c, 0x8f, 0xf5, 0x57, 0x88, 0xcb, 0xb7, 0x44, 0x19, 0x56, 0x50, 0xca, - 0x20, 0xce, 0xab, 0xce, 0xdf, 0x20, 0x07, 0x0d, 0xe2, 0x91, 0x66, 0x1c, 0x84, 0x1f, 0x6b, 0xf7, - 0x9f, 0xe0, 0xa3, 0xcf, 0xf9, 0xcb, 0xb8, 0x20, 0x50, 0xbc, 0x41, 0x0e, 0xf8, 0x54, 0xe8, 0x5f, - 0x57, 0xec, 0xf9, 0x75, 0x3f, 0x63, 0xc1, 0xa4, 0xfa, 0xba, 0x33, 0xd8, 0x48, 0x4b, 0xe6, 0x46, - 0x7a, 0xa2, 0xe7, 0x7a, 0xcc, 0xd9, 0x42, 0x7f, 0xc6, 0x58, 0x80, 0xc0, 0xa9, 0x87, 0x01, 0x1d, - 0x1a, 0xca, 0xb3, 0x3f, 0xce, 0x09, 0x19, 0xe4, 0xbb, 0x6e, 0x90, 0x83, 0x8d, 0x80, 0x8a, 0x0f, - 0xd9, 0xdf, 0x65, 0xcc, 0xda, 0x50, 0xcf, 0x59, 0xfb, 0xb9, 0x02, 0x5c, 0x50, 0x23, 0x60, 0x1c, - 0xd0, 0x7f, 0xde, 0xc7, 0xe0, 0x25, 0x18, 0x6f, 0x91, 0x2d, 0xa7, 0xe3, 0xc5, 0xea, 0xe6, 0x3c, - 0xcc, 0xb5, 0x27, 0xd5, 0xa4, 0x18, 0xeb, 0x38, 0x27, 0x18, 0xb6, 0x9f, 0x18, 0x67, 0xbc, 0x37, - 0x76, 0xe8, 0x0a, 0xa6, 0xd2, 0x9b, 0xa6, 0xff, 0x98, 0xd0, 0xf5, 0x1f, 0x42, 0xd7, 0xf1, 0x14, - 0x0c, 0xbb, 0x7b, 0xf4, 0x2c, 0x2e, 0x98, 0x47, 0x6c, 0x8d, 0x16, 0x62, 0x0e, 0x43, 0x9f, 0x81, - 0xd1, 0x66, 0xb0, 0xb7, 0xe7, 0xf8, 0xad, 0x72, 0x91, 0xc9, 0x93, 0xe3, 0xf4, 0xb8, 0x5e, 0xe6, - 0x45, 0x58, 0xc2, 0xd0, 0xe3, 0x30, 0xe4, 0x84, 0xdb, 0x51, 0x79, 0x88, 0xe1, 0x8c, 0xd1, 0x96, - 0x16, 0xc3, 0xed, 0x08, 0xb3, 0x52, 0x2a, 0x27, 0xde, 0x0b, 0xc2, 0x5d, 0xd7, 0xdf, 0xae, 0xba, - 0x21, 0x13, 0xfa, 0x34, 0x39, 0xf1, 0xae, 0x82, 0x60, 0x0d, 0x0b, 0xad, 0xc2, 0x70, 0x3b, 0x08, - 0xe3, 0xa8, 0x3c, 0xc2, 0x86, 0xfb, 0xc9, 0x9c, 0xad, 0xc4, 0xbf, 0xb6, 0x1e, 0x84, 0x71, 0xf2, - 0x01, 0xf4, 0x5f, 0x84, 0x79, 0x75, 0xf4, 0x6d, 0x50, 0x24, 0xfe, 0x7e, 0x79, 0x94, 0x51, 0x99, - 0xcb, 0xa2, 0xb2, 0xe2, 0xef, 0xdf, 0x71, 0xc2, 0x84, 0xcf, 0xac, 0xf8, 0xfb, 0x98, 0xd6, 0x41, - 0x5f, 0x86, 0x92, 0xd4, 0x9d, 0x46, 0xe5, 0xb1, 0xfc, 0x25, 0x86, 0x05, 0x12, 0x26, 0xef, 0x77, - 0xdc, 0x90, 0xec, 0x11, 0x3f, 0x8e, 0x92, 0xdb, 0xaf, 0x84, 0x46, 0x38, 0xa1, 0x86, 0xbe, 0x2c, - 0xaf, 0x73, 0x6b, 0x41, 0xc7, 0x8f, 0xa3, 0x72, 0x89, 0x75, 0x2f, 0x53, 0xd1, 0x76, 0x27, 0xc1, - 0x4b, 0xdf, 0xf7, 0x78, 0x65, 0x6c, 0x90, 0x42, 0x18, 0x26, 0x3d, 0x77, 0x9f, 0xf8, 0x24, 0x8a, - 0xea, 0x61, 0xb0, 0x49, 0xca, 0xc0, 0x7a, 0x7e, 0x31, 0x5b, 0xff, 0x14, 0x6c, 0x92, 0xa5, 0xd9, - 0xa3, 0xc3, 0xca, 0xe4, 0x4d, 0xbd, 0x0e, 0x36, 0x49, 0xa0, 0xdb, 0x30, 0x45, 0x05, 0x54, 0x37, - 0x21, 0x3a, 0xde, 0x8f, 0x28, 0x93, 0x4e, 0xb1, 0x51, 0x09, 0xa7, 0x88, 0xa0, 0xb7, 0xa0, 0xe4, - 0xb9, 0x5b, 0xa4, 0x79, 0xd0, 0xf4, 0x48, 0x79, 0x82, 0x51, 0xcc, 0xdc, 0x56, 0x37, 0x25, 0x12, - 0xbf, 0x00, 0xa8, 0xbf, 0x38, 0xa9, 0x8e, 0xee, 0xc0, 0x23, 0x31, 0x09, 0xf7, 0x5c, 0xdf, 0xa1, - 0xdb, 0x41, 0xc8, 0x93, 0x4c, 0x8b, 0x37, 0xc9, 0xd6, 0xdb, 0x25, 0x31, 0x74, 0x8f, 0x6c, 0x64, - 0x62, 0xe1, 0x9c, 0xda, 0xe8, 0x16, 0x4c, 0xb3, 0x9d, 0x50, 0xef, 0x78, 0x5e, 0x3d, 0xf0, 0xdc, - 0xe6, 0x41, 0x79, 0x8a, 0x11, 0xfc, 0x8c, 0x54, 0xd3, 0xd5, 0x4c, 0x30, 0xbd, 0xf1, 0x26, 0xff, - 0x70, 0xba, 0x36, 0xda, 0x64, 0x6a, 0x9b, 0x4e, 0xe8, 0xc6, 0x07, 0x74, 0xfd, 0x92, 0xfb, 0x71, - 0x79, 0xba, 0xe7, 0xfd, 0x51, 0x47, 0x55, 0xba, 0x1d, 0xbd, 0x10, 0xa7, 0x09, 0xd2, 0xad, 0x1d, - 0xc5, 0x2d, 0xd7, 0x2f, 0xcf, 0x30, 0x8e, 0xa1, 0x76, 0x46, 0x83, 0x16, 0x62, 0x0e, 0x63, 0x2a, - 0x1b, 0xfa, 0xe3, 0x16, 0xe5, 0xa0, 0xb3, 0x0c, 0x31, 0x51, 0xd9, 0x48, 0x00, 0x4e, 0x70, 0xe8, - 0xb1, 0x1c, 0xc7, 0x07, 0x65, 0xc4, 0x50, 0xd5, 0x76, 0xd9, 0xd8, 0xf8, 0x32, 0xa6, 0xe5, 0xe8, - 0x26, 0x8c, 0x12, 0x7f, 0x7f, 0x35, 0x0c, 0xf6, 0xca, 0xe7, 0xf2, 0xf7, 0xec, 0x0a, 0x47, 0xe1, - 0x0c, 0x3d, 0xb9, 0x00, 0x88, 0x62, 0x2c, 0x49, 0xa0, 0xfb, 0x50, 0xce, 0x98, 0x11, 0x3e, 0x01, - 0xe7, 0xd9, 0x04, 0x7c, 0x41, 0xd4, 0x2d, 0x6f, 0xe4, 0xe0, 0x1d, 0xf7, 0x80, 0xe1, 0x5c, 0xea, - 0xe8, 0xbb, 0x60, 0x92, 0x6f, 0x28, 0xae, 0xef, 0x8d, 0xca, 0x17, 0xd8, 0xd7, 0x5c, 0xce, 0xdf, - 0x9c, 0x1c, 0x71, 0xe9, 0x82, 0xe8, 0xd0, 0xa4, 0x5e, 0x1a, 0x61, 0x93, 0x9a, 0xbd, 0x09, 0x53, - 0x8a, 0x6f, 0xb1, 0xa5, 0x83, 0x2a, 0x30, 0x4c, 0x19, 0xb2, 0xbc, 0xb1, 0x97, 0xe8, 0x4c, 0x31, - 0x3d, 0x1d, 0xe6, 0xe5, 0x6c, 0xa6, 0xdc, 0x0f, 0xc8, 0xd2, 0x41, 0x4c, 0xf8, 0xad, 0xab, 0xa8, - 0xcd, 0x94, 0x04, 0xe0, 0x04, 0xc7, 0xfe, 0x7f, 0x5c, 0xee, 0x49, 0x98, 0xe3, 0x00, 0xc7, 0xc1, - 0x0b, 0x30, 0xb6, 0x13, 0x44, 0x31, 0xc5, 0x66, 0x6d, 0x0c, 0x27, 0x92, 0xce, 0x75, 0x51, 0x8e, - 0x15, 0x06, 0x7a, 0x03, 0x26, 0x9b, 0x7a, 0x03, 0xe2, 0x2c, 0x53, 0x43, 0x60, 0xb4, 0x8e, 0x4d, - 0x5c, 0xf4, 0x3a, 0x8c, 0xb1, 0xd7, 0x9a, 0x66, 0xe0, 0x89, 0xfb, 0x9d, 0x3c, 0x90, 0xc7, 0xea, - 0xa2, 0xfc, 0x58, 0xfb, 0x8d, 0x15, 0x36, 0xbd, 0x73, 0xd3, 0x2e, 0xd4, 0xea, 0xe2, 0x14, 0x51, - 0x77, 0xee, 0xeb, 0xac, 0x14, 0x0b, 0xa8, 0xfd, 0x37, 0x0a, 0xda, 0x28, 0xd3, 0x1b, 0x0b, 0x41, - 0x75, 0x18, 0xbd, 0xe7, 0xb8, 0xb1, 0xeb, 0x6f, 0x0b, 0x71, 0xe1, 0xd9, 0x9e, 0x47, 0x0a, 0xab, - 0x74, 0x97, 0x57, 0xe0, 0x87, 0x9e, 0xf8, 0x83, 0x25, 0x19, 0x4a, 0x31, 0xec, 0xf8, 0x3e, 0xa5, - 0x58, 0x18, 0x94, 0x22, 0xe6, 0x15, 0x38, 0x45, 0xf1, 0x07, 0x4b, 0x32, 0xe8, 0x5d, 0x00, 0xb9, - 0x2c, 0x49, 0x4b, 0xbc, 0x92, 0xbc, 0xd0, 0x9f, 0xe8, 0x86, 0xaa, 0xb3, 0x34, 0x45, 0x8f, 0xd4, - 0xe4, 0x3f, 0xd6, 0xe8, 0xd9, 0x31, 0x13, 0xab, 0xba, 0x3b, 0x83, 0xbe, 0x93, 0x72, 0x02, 0x27, - 0x8c, 0x49, 0x6b, 0x31, 0x16, 0x83, 0xf3, 0xdc, 0x60, 0x52, 0xf1, 0x86, 0xbb, 0x47, 0x74, 0xae, - 0x21, 0x88, 0xe0, 0x84, 0x9e, 0xfd, 0x0b, 0x45, 0x28, 0xe7, 0x75, 0x97, 0x2e, 0x3a, 0x72, 0xdf, - 0x8d, 0x97, 0xa9, 0x34, 0x64, 0x99, 0x8b, 0x6e, 0x45, 0x94, 0x63, 0x85, 0x41, 0x67, 0x3f, 0x72, - 0xb7, 0xe5, 0xa5, 0x66, 0x38, 0x99, 0xfd, 0x06, 0x2b, 0xc5, 0x02, 0x4a, 0xf1, 0x42, 0xe2, 0x44, - 0xe2, 0x19, 0x4e, 0x5b, 0x25, 0x98, 0x95, 0x62, 0x01, 0xd5, 0xf5, 0x11, 0x43, 0x7d, 0xf4, 0x11, - 0xc6, 0x10, 0x0d, 0x9f, 0xee, 0x10, 0xa1, 0xaf, 0x00, 0x6c, 0xb9, 0xbe, 0x1b, 0xed, 0x30, 0xea, - 0x23, 0x27, 0xa6, 0xae, 0x64, 0xa9, 0x55, 0x45, 0x05, 0x6b, 0x14, 0xd1, 0xab, 0x30, 0xae, 0x36, - 0x60, 0xad, 0x5a, 0x1e, 0x35, 0xdf, 0x78, 0x12, 0x6e, 0x54, 0xc5, 0x3a, 0x9e, 0xfd, 0x5e, 0x7a, - 0xbd, 0x88, 0x1d, 0xa0, 0x8d, 0xaf, 0x35, 0xe8, 0xf8, 0x16, 0x7a, 0x8f, 0xaf, 0xfd, 0x6b, 0x45, - 0x98, 0x36, 0x1a, 0xeb, 0x44, 0x03, 0xf0, 0xac, 0x6b, 0xf4, 0x9c, 0x73, 0x62, 0x22, 0xf6, 0x9f, - 0xdd, 0x7f, 0xab, 0xe8, 0x67, 0x21, 0xdd, 0x01, 0xbc, 0x3e, 0xfa, 0x0a, 0x94, 0x3c, 0x27, 0x62, - 0xba, 0x0d, 0x22, 0xf6, 0xdd, 0x20, 0xc4, 0x92, 0x7b, 0x84, 0x13, 0xc5, 0xda, 0x51, 0xc3, 0x69, - 0x27, 0x24, 0xe9, 0x81, 0x4c, 0x65, 0x1f, 0xf9, 0xce, 0xab, 0x3a, 0x41, 0x05, 0xa4, 0x03, 0xcc, - 0x61, 0xe8, 0x75, 0x98, 0x08, 0x09, 0x5b, 0x15, 0xcb, 0x54, 0x94, 0x63, 0xcb, 0x6c, 0x38, 0x91, - 0xf9, 0xb0, 0x06, 0xc3, 0x06, 0x66, 0x22, 0xca, 0x8f, 0xf4, 0x10, 0xe5, 0x9f, 0x85, 0x51, 0xf6, - 0x43, 0xad, 0x00, 0x35, 0x1b, 0x35, 0x5e, 0x8c, 0x25, 0x3c, 0xbd, 0x60, 0xc6, 0x06, 0x5c, 0x30, - 0xcf, 0xc1, 0x54, 0xd5, 0x21, 0x7b, 0x81, 0xbf, 0xe2, 0xb7, 0xda, 0x81, 0xeb, 0xc7, 0xa8, 0x0c, - 0x43, 0xec, 0x74, 0xe0, 0x7b, 0x7b, 0x88, 0x52, 0xc0, 0x43, 0x54, 0x30, 0xb7, 0x7f, 0xbb, 0x00, - 0x93, 0x55, 0xe2, 0x91, 0x98, 0xf0, 0xab, 0x4c, 0x84, 0x56, 0x01, 0x6d, 0x87, 0x4e, 0x93, 0xd4, - 0x49, 0xe8, 0x06, 0x2d, 0x5d, 0xd7, 0x59, 0x64, 0xef, 0x09, 0xe8, 0x5a, 0x17, 0x14, 0x67, 0xd4, - 0x40, 0xef, 0xc0, 0x64, 0x3b, 0x24, 0x86, 0x8a, 0xce, 0xca, 0x93, 0x46, 0xea, 0x3a, 0x22, 0x17, - 0x84, 0x8d, 0x22, 0x6c, 0x92, 0x42, 0xdf, 0x01, 0x33, 0x41, 0xd8, 0xde, 0x71, 0xfc, 0x2a, 0x69, - 0x13, 0xbf, 0x45, 0x25, 0x7d, 0xa1, 0x82, 0x38, 0x7f, 0x74, 0x58, 0x99, 0xb9, 0x95, 0x82, 0xe1, - 0x2e, 0x6c, 0xf4, 0x0e, 0xcc, 0xb6, 0xc3, 0xa0, 0xed, 0x6c, 0xb3, 0x85, 0x22, 0x04, 0x1a, 0xce, - 0x7d, 0x5e, 0x38, 0x3a, 0xac, 0xcc, 0xd6, 0xd3, 0xc0, 0xe3, 0xc3, 0xca, 0x39, 0x36, 0x50, 0xb4, - 0x24, 0x01, 0xe2, 0x6e, 0x32, 0xf6, 0x36, 0x5c, 0xa8, 0x06, 0xf7, 0xfc, 0x7b, 0x4e, 0xd8, 0x5a, - 0xac, 0xd7, 0x34, 0xdd, 0xc1, 0xba, 0xbc, 0xbb, 0xf2, 0xb7, 0xe8, 0xcc, 0x73, 0x4a, 0xab, 0xc9, - 0xe5, 0x97, 0x55, 0xd7, 0x23, 0x39, 0x3a, 0x8a, 0xbf, 0x5d, 0x30, 0x5a, 0x4a, 0xf0, 0xd5, 0xb3, - 0x82, 0x95, 0xfb, 0xac, 0xf0, 0x36, 0x8c, 0x6d, 0xb9, 0xc4, 0x6b, 0x61, 0xb2, 0x25, 0x66, 0xe6, - 0x99, 0xfc, 0xe7, 0xb5, 0x55, 0x8a, 0x29, 0x75, 0x52, 0xfc, 0xe6, 0xbb, 0x2a, 0x2a, 0x63, 0x45, - 0x06, 0xed, 0xc2, 0x8c, 0xbc, 0x5a, 0x49, 0xa8, 0xd8, 0xc4, 0xcf, 0xf6, 0xba, 0xaf, 0x99, 0xc4, - 0xd9, 0x04, 0xe2, 0x14, 0x19, 0xdc, 0x45, 0x98, 0x5e, 0x75, 0xf7, 0xe8, 0x71, 0x35, 0xc4, 0x96, - 0x34, 0xbb, 0xea, 0xb2, 0x5b, 0x3b, 0x2b, 0xb5, 0x7f, 0xcc, 0x82, 0x47, 0xbb, 0x46, 0x46, 0x68, - 0x2f, 0x4e, 0x79, 0x16, 0xd2, 0xda, 0x84, 0x42, 0x7f, 0x6d, 0x82, 0xfd, 0x8f, 0x2c, 0x38, 0xbf, - 0xb2, 0xd7, 0x8e, 0x0f, 0xaa, 0xae, 0xf9, 0xf4, 0xf1, 0x1a, 0x8c, 0xec, 0x91, 0x96, 0xdb, 0xd9, - 0x13, 0x33, 0x57, 0x91, 0x2c, 0x7d, 0x8d, 0x95, 0x1e, 0x1f, 0x56, 0x26, 0x1b, 0x71, 0x10, 0x3a, - 0xdb, 0x84, 0x17, 0x60, 0x81, 0xce, 0x0e, 0x46, 0xf7, 0x03, 0x72, 0xd3, 0xdd, 0x73, 0xe5, 0x73, - 0x69, 0x4f, 0x8d, 0xda, 0xbc, 0x1c, 0xd0, 0xf9, 0xb7, 0x3b, 0x8e, 0x1f, 0xbb, 0xf1, 0x81, 0x78, - 0xd5, 0x91, 0x44, 0x70, 0x42, 0xcf, 0xfe, 0x86, 0x05, 0xd3, 0x92, 0x97, 0x2c, 0xb6, 0x5a, 0x21, - 0x89, 0x22, 0x34, 0x07, 0x05, 0xb7, 0x2d, 0x7a, 0x09, 0xa2, 0x97, 0x85, 0x5a, 0x1d, 0x17, 0xdc, - 0x36, 0xaa, 0x43, 0x89, 0xbf, 0xba, 0x26, 0x8b, 0x6b, 0xa0, 0xb7, 0x5b, 0xd6, 0x83, 0x0d, 0x59, - 0x13, 0x27, 0x44, 0xa4, 0x54, 0xcc, 0xce, 0xa1, 0xa2, 0xf9, 0x24, 0x74, 0x5d, 0x94, 0x63, 0x85, - 0x81, 0xae, 0xc0, 0x98, 0x1f, 0xb4, 0xf8, 0x23, 0x38, 0xdf, 0xd3, 0x6c, 0xc9, 0xae, 0x8b, 0x32, - 0xac, 0xa0, 0xf6, 0x0f, 0x5a, 0x30, 0x21, 0xbf, 0x6c, 0x40, 0x01, 0x9d, 0x6e, 0xad, 0x44, 0x38, - 0x4f, 0xb6, 0x16, 0x15, 0xb0, 0x19, 0xc4, 0x90, 0xab, 0x8b, 0x27, 0x91, 0xab, 0xed, 0x1f, 0x2d, - 0xc0, 0x94, 0xec, 0x4e, 0xa3, 0xb3, 0x19, 0x91, 0x18, 0x6d, 0x40, 0xc9, 0xe1, 0x43, 0x4e, 0xe4, - 0x8a, 0x7d, 0x2a, 0xfb, 0x42, 0x67, 0xcc, 0x4f, 0x22, 0xea, 0x2c, 0xca, 0xda, 0x38, 0x21, 0x84, - 0x3c, 0x98, 0xf5, 0x83, 0x98, 0x1d, 0x7b, 0x0a, 0xde, 0xeb, 0xd9, 0x21, 0x4d, 0xfd, 0xa2, 0xa0, - 0x3e, 0xbb, 0x9e, 0xa6, 0x82, 0xbb, 0x09, 0xa3, 0x15, 0xa9, 0x44, 0x2a, 0xe6, 0x5f, 0xe1, 0xf4, - 0x59, 0xc8, 0xd6, 0x21, 0xd9, 0xbf, 0x62, 0x41, 0x49, 0xa2, 0x9d, 0xc5, 0x0b, 0xd3, 0x1a, 0x8c, - 0x46, 0x6c, 0x12, 0xe4, 0xd0, 0xd8, 0xbd, 0x3a, 0xce, 0xe7, 0x2b, 0x39, 0xcd, 0xf9, 0xff, 0x08, - 0x4b, 0x1a, 0x4c, 0x0b, 0xae, 0xba, 0xff, 0x09, 0xd1, 0x82, 0xab, 0xfe, 0xe4, 0x9c, 0x30, 0xff, - 0x8d, 0xf5, 0x59, 0x53, 0x15, 0x50, 0xa1, 0xb3, 0x1d, 0x92, 0x2d, 0xf7, 0x7e, 0x5a, 0xe8, 0xac, - 0xb3, 0x52, 0x2c, 0xa0, 0xe8, 0x5d, 0x98, 0x68, 0x4a, 0xe5, 0x71, 0xc2, 0x06, 0x9e, 0xee, 0xa9, - 0x8a, 0x57, 0xaf, 0x36, 0xdc, 0x40, 0x6e, 0x59, 0xab, 0x8f, 0x0d, 0x6a, 0xe6, 0xbb, 0x7f, 0xb1, - 0xdf, 0xbb, 0x7f, 0x42, 0x37, 0xf7, 0xe5, 0xda, 0xfe, 0x71, 0x0b, 0x46, 0xb8, 0x0a, 0x72, 0x30, - 0x9d, 0xad, 0xf6, 0x0a, 0x95, 0x8c, 0xdd, 0x1d, 0x5a, 0x28, 0x1e, 0xa5, 0xd0, 0x1a, 0x94, 0xd8, - 0x0f, 0xa6, 0x8a, 0x29, 0xe6, 0x5b, 0x06, 0xf2, 0x56, 0xf5, 0x0e, 0xde, 0x91, 0xd5, 0x70, 0x42, - 0xc1, 0xfe, 0xa1, 0x22, 0x65, 0x55, 0x09, 0xaa, 0x71, 0x82, 0x5b, 0x0f, 0xef, 0x04, 0x2f, 0x3c, - 0xac, 0x13, 0x7c, 0x1b, 0xa6, 0x9b, 0xda, 0x93, 0x57, 0x32, 0x93, 0x57, 0x7a, 0x2e, 0x12, 0xed, - 0x75, 0x8c, 0xab, 0xe1, 0x96, 0x4d, 0x22, 0x38, 0x4d, 0x15, 0x7d, 0x27, 0x4c, 0xf0, 0x79, 0x16, - 0xad, 0x0c, 0xb1, 0x56, 0x3e, 0x93, 0xbf, 0x5e, 0xf4, 0x26, 0xd8, 0x4a, 0x6c, 0x68, 0xd5, 0xb1, - 0x41, 0xcc, 0xfe, 0x85, 0x31, 0x18, 0x5e, 0xd9, 0x27, 0x7e, 0x7c, 0x06, 0x0c, 0xa9, 0x09, 0x53, - 0xae, 0xbf, 0x1f, 0x78, 0xfb, 0xa4, 0xc5, 0xe1, 0x27, 0x39, 0x5c, 0x1f, 0x11, 0xa4, 0xa7, 0x6a, - 0x06, 0x09, 0x9c, 0x22, 0xf9, 0x30, 0x6e, 0xed, 0xd7, 0x60, 0x84, 0xcf, 0xbd, 0xb8, 0xb2, 0x67, - 0x2a, 0xd8, 0xd9, 0x20, 0x8a, 0x5d, 0x90, 0x68, 0x14, 0xb8, 0x46, 0x5f, 0x54, 0x47, 0xef, 0xc1, - 0xd4, 0x96, 0x1b, 0x46, 0x31, 0xbd, 0x6e, 0x47, 0xb1, 0xb3, 0xd7, 0x7e, 0x80, 0x5b, 0xba, 0x1a, - 0x87, 0x55, 0x83, 0x12, 0x4e, 0x51, 0x46, 0xdb, 0x30, 0x49, 0x2f, 0x8e, 0x49, 0x53, 0xa3, 0x27, - 0x6e, 0x4a, 0xa9, 0xe1, 0x6e, 0xea, 0x84, 0xb0, 0x49, 0x97, 0x32, 0x93, 0x26, 0xbb, 0x68, 0x8e, - 0x31, 0x89, 0x42, 0x31, 0x13, 0x7e, 0xc3, 0xe4, 0x30, 0xca, 0x93, 0x98, 0xa9, 0x48, 0xc9, 0xe4, - 0x49, 0x9a, 0x41, 0xc8, 0x57, 0xa1, 0x44, 0xe8, 0x10, 0x52, 0xc2, 0xe2, 0xb1, 0x61, 0x61, 0xb0, - 0xbe, 0xae, 0xb9, 0xcd, 0x30, 0x30, 0xf5, 0x23, 0x2b, 0x92, 0x12, 0x4e, 0x88, 0xa2, 0x65, 0x18, - 0x89, 0x48, 0xe8, 0x92, 0x48, 0x3c, 0x3b, 0xf4, 0x98, 0x46, 0x86, 0xc6, 0x4d, 0x48, 0xf9, 0x6f, - 0x2c, 0xaa, 0xd2, 0xe5, 0xe5, 0xb0, 0xdb, 0x10, 0x7b, 0x69, 0xd0, 0x96, 0xd7, 0x22, 0x2b, 0xc5, - 0x02, 0x8a, 0xde, 0x82, 0xd1, 0x90, 0x78, 0x4c, 0x01, 0x37, 0x39, 0xf8, 0x22, 0xe7, 0xfa, 0x3c, - 0x5e, 0x0f, 0x4b, 0x02, 0xe8, 0x06, 0xa0, 0x90, 0x50, 0x19, 0xc2, 0xf5, 0xb7, 0x95, 0x01, 0x85, - 0x78, 0x3f, 0x78, 0x4c, 0xb4, 0x7f, 0x0e, 0x27, 0x18, 0x7e, 0x1c, 0x06, 0x9e, 0x47, 0x42, 0x9c, - 0x51, 0x0d, 0x5d, 0x83, 0x59, 0x55, 0x5a, 0xf3, 0xa3, 0xd8, 0xf1, 0x9b, 0x84, 0x3d, 0x1d, 0x94, - 0x12, 0xa9, 0x08, 0xa7, 0x11, 0x70, 0x77, 0x1d, 0xfb, 0xeb, 0x54, 0x9c, 0xa1, 0xa3, 0x75, 0x06, - 0xb2, 0xc0, 0x9b, 0xa6, 0x2c, 0x70, 0x31, 0x77, 0xe6, 0x72, 0xe4, 0x80, 0x23, 0x0b, 0xc6, 0xb5, - 0x99, 0x4d, 0xd6, 0xac, 0xd5, 0x63, 0xcd, 0x76, 0x60, 0x86, 0xae, 0xf4, 0x5b, 0x9b, 0xcc, 0x9b, - 0xa2, 0xc5, 0x16, 0x66, 0xe1, 0xc1, 0x16, 0x66, 0x59, 0x34, 0x30, 0x73, 0x33, 0x45, 0x10, 0x77, - 0x35, 0x81, 0x5e, 0x93, 0xda, 0xa8, 0xa2, 0x61, 0x18, 0xc5, 0x35, 0x4d, 0xc7, 0x87, 0x95, 0x19, - 0xed, 0x43, 0x74, 0xed, 0x93, 0xfd, 0x55, 0xf9, 0x8d, 0x9c, 0xd9, 0x2c, 0x40, 0xa9, 0xa9, 0x16, - 0x8b, 0x65, 0xda, 0xd2, 0xaa, 0xe5, 0x80, 0x13, 0x1c, 0xba, 0x47, 0xe9, 0x15, 0x24, 0x6d, 0xcb, - 0x47, 0x2f, 0x28, 0x98, 0x41, 0xec, 0x97, 0x01, 0x56, 0xee, 0x93, 0x26, 0x5f, 0xea, 0xfa, 0xa3, - 0xae, 0x95, 0xff, 0xa8, 0x6b, 0xff, 0x47, 0x0b, 0xa6, 0x56, 0x97, 0x8d, 0x6b, 0xe2, 0x3c, 0x00, - 0xbf, 0x1b, 0xdd, 0xbd, 0xbb, 0x2e, 0xdf, 0x2b, 0xb8, 0xca, 0x59, 0x95, 0x62, 0x0d, 0x03, 0x5d, - 0x84, 0xa2, 0xd7, 0xf1, 0xc5, 0x95, 0x65, 0xf4, 0xe8, 0xb0, 0x52, 0xbc, 0xd9, 0xf1, 0x31, 0x2d, - 0xd3, 0xcc, 0xe7, 0x8a, 0x03, 0x9b, 0xcf, 0xf5, 0xf5, 0x92, 0x40, 0x15, 0x18, 0xbe, 0x77, 0xcf, - 0x6d, 0x45, 0xe5, 0xe1, 0xe4, 0x2d, 0xe5, 0xee, 0xdd, 0x5a, 0x35, 0xc2, 0xbc, 0xdc, 0xfe, 0x5a, - 0x11, 0xe6, 0x56, 0x3d, 0x72, 0xff, 0x23, 0xda, 0xe3, 0x0e, 0x6a, 0xfc, 0x77, 0x32, 0x79, 0xf1, - 0xa4, 0x96, 0x8e, 0xfd, 0xc7, 0x63, 0x0b, 0x46, 0xb9, 0x81, 0x00, 0x1f, 0x91, 0xf1, 0xab, 0x6f, - 0x64, 0xb5, 0x9e, 0x3f, 0x20, 0xf3, 0x42, 0x3b, 0xc7, 0xed, 0xa6, 0xd4, 0x49, 0x2b, 0x4a, 0xb1, - 0x24, 0x3e, 0xf7, 0x79, 0x98, 0xd0, 0x31, 0x4f, 0x64, 0x40, 0xf5, 0x97, 0x8a, 0x30, 0x43, 0x7b, - 0xf0, 0x50, 0x27, 0xe2, 0x76, 0xf7, 0x44, 0x9c, 0xb6, 0xdd, 0x69, 0xff, 0xd9, 0x78, 0x37, 0x3d, - 0x1b, 0x2f, 0xe5, 0xcd, 0xc6, 0x59, 0xcf, 0xc1, 0x5f, 0xb6, 0xe0, 0xdc, 0xaa, 0x17, 0x34, 0x77, - 0x53, 0x26, 0xb1, 0xaf, 0xc2, 0x38, 0xe5, 0xe3, 0x91, 0xe1, 0x0c, 0x60, 0xb8, 0x87, 0x08, 0x10, - 0xd6, 0xf1, 0xb4, 0x6a, 0xb7, 0x6f, 0xd7, 0xaa, 0x59, 0x5e, 0x25, 0x02, 0x84, 0x75, 0x3c, 0xfb, - 0x37, 0x2d, 0x78, 0xe2, 0xda, 0xf2, 0x4a, 0xb2, 0x14, 0xbb, 0x1c, 0x5b, 0xe8, 0x2d, 0xb0, 0xa5, - 0x75, 0x25, 0xb9, 0x05, 0x56, 0x59, 0x2f, 0x04, 0xf4, 0x93, 0xe2, 0xb4, 0xf5, 0x53, 0x16, 0x9c, - 0xbb, 0xe6, 0xc6, 0xf4, 0x58, 0x4e, 0xbb, 0x58, 0xd0, 0x73, 0x39, 0x72, 0xe3, 0x20, 0x3c, 0x48, - 0xbb, 0x58, 0x60, 0x05, 0xc1, 0x1a, 0x16, 0x6f, 0x79, 0xdf, 0x8d, 0x68, 0x4f, 0x0b, 0xa6, 0x2a, - 0x0a, 0x8b, 0x72, 0xac, 0x30, 0xe8, 0x87, 0xb5, 0xdc, 0x90, 0x5d, 0x25, 0x0e, 0x04, 0x87, 0x55, - 0x1f, 0x56, 0x95, 0x00, 0x9c, 0xe0, 0xd8, 0x3f, 0x66, 0xc1, 0x85, 0x6b, 0x5e, 0x27, 0x8a, 0x49, - 0xb8, 0x15, 0x19, 0x9d, 0x7d, 0x19, 0x4a, 0x44, 0x5e, 0xd7, 0x45, 0x5f, 0x95, 0x80, 0xa9, 0xee, - 0xf1, 0xdc, 0xbf, 0x43, 0xe1, 0x0d, 0x60, 0x5f, 0x7e, 0x32, 0xbb, 0xe8, 0x9f, 0x2d, 0xc0, 0xe4, - 0xf5, 0x8d, 0x8d, 0xfa, 0x35, 0x12, 0x8b, 0x53, 0xac, 0xbf, 0xaa, 0x19, 0x6b, 0x1a, 0xb3, 0x5e, - 0x97, 0xa2, 0x4e, 0xec, 0x7a, 0xf3, 0xdc, 0xa1, 0x70, 0xbe, 0xe6, 0xc7, 0xb7, 0xc2, 0x46, 0x1c, - 0xba, 0xfe, 0x76, 0xa6, 0x8e, 0x4d, 0x9e, 0xb5, 0xc5, 0xbc, 0xb3, 0x16, 0xbd, 0x0c, 0x23, 0xcc, - 0xa3, 0x51, 0x5e, 0x4f, 0x1e, 0x53, 0x77, 0x0a, 0x56, 0x7a, 0x7c, 0x58, 0x29, 0xdd, 0xc6, 0x35, - 0xfe, 0x07, 0x0b, 0x54, 0x74, 0x1b, 0xc6, 0x77, 0xe2, 0xb8, 0x7d, 0x9d, 0x38, 0x2d, 0x12, 0x4a, - 0xee, 0x70, 0x29, 0x8b, 0x3b, 0xd0, 0x41, 0xe0, 0x68, 0xc9, 0x86, 0x4a, 0xca, 0x22, 0xac, 0xd3, - 0xb1, 0x1b, 0x00, 0x09, 0xec, 0x94, 0xf4, 0x0b, 0xf6, 0x1f, 0x58, 0x30, 0xca, 0x9d, 0x4b, 0x42, - 0xf4, 0x05, 0x18, 0x22, 0xf7, 0x49, 0x53, 0x48, 0x8e, 0x99, 0x1d, 0x4e, 0x04, 0x0f, 0xae, 0x2d, - 0xa7, 0xff, 0x31, 0xab, 0x85, 0xae, 0xc3, 0x28, 0xed, 0xed, 0x35, 0xe5, 0x69, 0xf3, 0x64, 0xde, - 0x17, 0xab, 0x69, 0xe7, 0xb2, 0x8a, 0x28, 0xc2, 0xb2, 0x3a, 0xd3, 0xfc, 0x36, 0xdb, 0x0d, 0xca, - 0xc0, 0xe2, 0x5e, 0xe7, 0xec, 0xc6, 0x72, 0x9d, 0x23, 0x09, 0x6a, 0x5c, 0xf3, 0x2b, 0x0b, 0x71, - 0x42, 0xc4, 0xde, 0x80, 0x12, 0x9d, 0xd4, 0x45, 0xcf, 0x75, 0x7a, 0x2b, 0x9d, 0x9f, 0x87, 0x92, - 0x54, 0x00, 0x47, 0xc2, 0xfd, 0x85, 0x51, 0x95, 0xfa, 0xe1, 0x08, 0x27, 0x70, 0x7b, 0x0b, 0xce, - 0x33, 0x6b, 0x0a, 0x27, 0xde, 0x31, 0xf6, 0x58, 0xff, 0xc5, 0xfc, 0x82, 0xb8, 0x88, 0xf1, 0x99, - 0x29, 0x6b, 0xf6, 0xfa, 0x13, 0x92, 0x62, 0x72, 0x29, 0xb3, 0xff, 0x68, 0x08, 0x1e, 0xab, 0x35, - 0xf2, 0xfd, 0x8e, 0x5e, 0x87, 0x09, 0x2e, 0xa6, 0xd1, 0xa5, 0xed, 0x78, 0xa2, 0x5d, 0xf5, 0xd6, - 0xb8, 0xa1, 0xc1, 0xb0, 0x81, 0x89, 0x9e, 0x80, 0xa2, 0xfb, 0xbe, 0x9f, 0x36, 0xce, 0xad, 0xbd, - 0xbd, 0x8e, 0x69, 0x39, 0x05, 0x53, 0x89, 0x8f, 0xb3, 0x52, 0x05, 0x56, 0x52, 0xdf, 0x9b, 0x30, - 0xe5, 0x46, 0xcd, 0xc8, 0xad, 0xf9, 0x94, 0xcf, 0x24, 0x3e, 0x6b, 0x89, 0x92, 0x80, 0x76, 0x5a, - 0x41, 0x71, 0x0a, 0x5b, 0xe3, 0xeb, 0xc3, 0x03, 0x4b, 0x8d, 0x7d, 0xfd, 0x41, 0xa8, 0x40, 0xdc, - 0x66, 0x5f, 0x17, 0x31, 0x43, 0x41, 0x21, 0x10, 0xf3, 0x0f, 0x8e, 0xb0, 0x84, 0xd1, 0x1b, 0x58, - 0x73, 0xc7, 0x69, 0x2f, 0x76, 0xe2, 0x9d, 0xaa, 0x1b, 0x35, 0x83, 0x7d, 0x12, 0x1e, 0xb0, 0xcb, - 0xf3, 0x58, 0x72, 0x03, 0x53, 0x80, 0xe5, 0xeb, 0x8b, 0x75, 0x8a, 0x89, 0xbb, 0xeb, 0x98, 0x52, - 0x21, 0x9c, 0x86, 0x54, 0xb8, 0x08, 0xd3, 0xb2, 0x99, 0x06, 0x89, 0xd8, 0x19, 0x31, 0xce, 0x3a, - 0xa6, 0xbc, 0x49, 0x45, 0xb1, 0xea, 0x56, 0x1a, 0x1f, 0xbd, 0x06, 0x93, 0xae, 0xef, 0xc6, 0xae, - 0x13, 0x07, 0x21, 0x3b, 0x61, 0xf9, 0x3d, 0x99, 0x3d, 0x8a, 0xd6, 0x74, 0x00, 0x36, 0xf1, 0xec, - 0x3f, 0x1c, 0x82, 0x59, 0x36, 0x6d, 0xdf, 0x5a, 0x61, 0x9f, 0x98, 0x15, 0x76, 0xbb, 0x7b, 0x85, - 0x9d, 0x86, 0xb8, 0xfb, 0x71, 0x2e, 0xb3, 0xf7, 0xa0, 0xa4, 0xec, 0xab, 0xa5, 0x8b, 0x80, 0x95, - 0xe3, 0x22, 0xd0, 0x5f, 0xfa, 0x90, 0xcf, 0xb8, 0xc5, 0xcc, 0x67, 0xdc, 0xbf, 0x63, 0x41, 0x62, - 0x66, 0x8a, 0xae, 0x43, 0xa9, 0x1d, 0x30, 0x53, 0x8e, 0x50, 0xda, 0x47, 0x3d, 0x96, 0x79, 0x50, - 0xf1, 0x43, 0x91, 0x8f, 0x5f, 0x5d, 0xd6, 0xc0, 0x49, 0x65, 0xb4, 0x04, 0xa3, 0xed, 0x90, 0x34, - 0x62, 0xe6, 0x28, 0xd9, 0x97, 0x0e, 0x5f, 0x23, 0x1c, 0x1f, 0xcb, 0x8a, 0xf6, 0xcf, 0x59, 0x00, - 0xfc, 0xa5, 0xd4, 0xf1, 0xb7, 0xc9, 0x19, 0x68, 0x7f, 0xab, 0x30, 0x14, 0xb5, 0x49, 0xb3, 0x97, - 0x91, 0x4d, 0xd2, 0x9f, 0x46, 0x9b, 0x34, 0x93, 0x01, 0xa7, 0xff, 0x30, 0xab, 0x6d, 0x7f, 0x1f, - 0xc0, 0x54, 0x82, 0x56, 0x8b, 0xc9, 0x1e, 0x7a, 0xd1, 0x70, 0x43, 0xbb, 0x98, 0x72, 0x43, 0x2b, - 0x31, 0x6c, 0x4d, 0xd1, 0xf8, 0x1e, 0x14, 0xf7, 0x9c, 0xfb, 0x42, 0x93, 0xf4, 0x7c, 0xef, 0x6e, - 0x50, 0xfa, 0xf3, 0x6b, 0xce, 0x7d, 0x7e, 0x67, 0x7a, 0x5e, 0x2e, 0x90, 0x35, 0xe7, 0xfe, 0x31, - 0x37, 0xa5, 0x61, 0x4c, 0xea, 0xa6, 0x1b, 0xc5, 0x1f, 0xfe, 0x97, 0xe4, 0x3f, 0x5b, 0x76, 0xb4, - 0x11, 0xd6, 0x96, 0xeb, 0x8b, 0x77, 0xc3, 0x81, 0xda, 0x72, 0xfd, 0x74, 0x5b, 0xae, 0x3f, 0x40, - 0x5b, 0xae, 0x8f, 0x3e, 0x80, 0x51, 0xf1, 0x46, 0xcf, 0xec, 0xe7, 0x4d, 0x2d, 0x55, 0x5e, 0x7b, - 0xe2, 0x89, 0x9f, 0xb7, 0xb9, 0x20, 0xef, 0x84, 0xa2, 0xb4, 0x6f, 0xbb, 0xb2, 0x41, 0xf4, 0xb7, - 0x2c, 0x98, 0x12, 0xbf, 0x31, 0x79, 0xbf, 0x43, 0xa2, 0x58, 0xc8, 0x9e, 0x9f, 0x1b, 0xbc, 0x0f, - 0xa2, 0x22, 0xef, 0xca, 0xe7, 0x24, 0x9b, 0x35, 0x81, 0x7d, 0x7b, 0x94, 0xea, 0x05, 0xfa, 0x27, - 0x16, 0x9c, 0xdf, 0x73, 0xee, 0xf3, 0x16, 0x79, 0x19, 0x76, 0x62, 0x37, 0x10, 0xfe, 0x00, 0x5f, - 0x18, 0x6c, 0xfa, 0xbb, 0xaa, 0xf3, 0x4e, 0x4a, 0xd3, 0xe1, 0xf3, 0x59, 0x28, 0x7d, 0xbb, 0x9a, - 0xd9, 0xaf, 0xb9, 0x2d, 0x18, 0x93, 0xeb, 0x2d, 0xe3, 0xe6, 0x5d, 0xd5, 0x05, 0xeb, 0x13, 0x9b, - 0x48, 0x68, 0x37, 0x75, 0xd6, 0x8e, 0x58, 0x6b, 0x0f, 0xb5, 0x9d, 0xf7, 0x60, 0x42, 0x5f, 0x63, - 0x0f, 0xb5, 0xad, 0xf7, 0xe1, 0x5c, 0xc6, 0x5a, 0x7a, 0xa8, 0x4d, 0xde, 0x83, 0x8b, 0xb9, 0xeb, - 0xe3, 0x61, 0x36, 0x6c, 0xff, 0xac, 0xa5, 0xf3, 0xc1, 0x33, 0x50, 0xc1, 0x2f, 0x9b, 0x2a, 0xf8, - 0x4b, 0xbd, 0x77, 0x4e, 0x8e, 0x1e, 0xfe, 0x5d, 0xbd, 0xd3, 0x94, 0xab, 0xa3, 0xb7, 0x60, 0xc4, - 0xa3, 0x25, 0xd2, 0x38, 0xc4, 0xee, 0xbf, 0x23, 0x13, 0x59, 0x8a, 0x95, 0x47, 0x58, 0x50, 0xb0, - 0x7f, 0xd1, 0x82, 0xa1, 0x33, 0x18, 0x09, 0x6c, 0x8e, 0xc4, 0x8b, 0xb9, 0xa4, 0x45, 0x64, 0xa2, - 0x79, 0xec, 0xdc, 0x5b, 0x91, 0xd1, 0x97, 0x72, 0x06, 0xe6, 0xff, 0x16, 0x60, 0x9c, 0x36, 0x25, - 0xad, 0x18, 0xdf, 0x80, 0x49, 0xcf, 0xd9, 0x24, 0x9e, 0x7c, 0xc7, 0x4d, 0x2b, 0x4c, 0x6e, 0xea, - 0x40, 0x6c, 0xe2, 0xd2, 0xca, 0x5b, 0xfa, 0x93, 0xb6, 0x90, 0x5f, 0x54, 0x65, 0xe3, 0xbd, 0x1b, - 0x9b, 0xb8, 0xf4, 0xee, 0x7e, 0xcf, 0x89, 0x9b, 0x3b, 0x42, 0x99, 0xa2, 0xba, 0x7b, 0x97, 0x16, - 0x62, 0x0e, 0xa3, 0x02, 0x9c, 0x5c, 0x9d, 0x77, 0xe8, 0xcd, 0x30, 0xf0, 0x85, 0x78, 0xac, 0x04, - 0x38, 0x6c, 0x82, 0x71, 0x1a, 0x3f, 0xc3, 0x1f, 0x7d, 0x98, 0xd9, 0x68, 0x0e, 0xe0, 0x8f, 0x8e, - 0xea, 0x70, 0xde, 0xf5, 0x9b, 0x5e, 0xa7, 0x45, 0x6e, 0xfb, 0x5c, 0xba, 0xf3, 0xdc, 0x0f, 0x48, - 0x4b, 0x08, 0xd0, 0xca, 0x9c, 0xb6, 0x96, 0x81, 0x83, 0x33, 0x6b, 0xda, 0x7f, 0x01, 0xce, 0xdd, - 0x0c, 0x9c, 0xd6, 0x92, 0xe3, 0x39, 0x7e, 0x93, 0x84, 0x35, 0x7f, 0xbb, 0xaf, 0x95, 0x98, 0x6e, - 0xd3, 0x55, 0xe8, 0x67, 0xd3, 0x65, 0xef, 0x00, 0xd2, 0x1b, 0x10, 0xb6, 0xc9, 0x18, 0x46, 0x5d, - 0xde, 0x94, 0x58, 0xfe, 0xcf, 0x64, 0x4b, 0xd7, 0x5d, 0x3d, 0xd3, 0xac, 0x6e, 0x79, 0x01, 0x96, - 0x84, 0xec, 0xd7, 0x21, 0xd3, 0x1f, 0xb1, 0xbf, 0xda, 0xc6, 0x7e, 0x15, 0x66, 0x59, 0xcd, 0x93, - 0xa9, 0x14, 0xec, 0xbf, 0x66, 0xc1, 0xf4, 0x7a, 0x2a, 0x82, 0xc4, 0xd3, 0xec, 0xad, 0x35, 0x43, - 0xef, 0xde, 0x60, 0xa5, 0x58, 0x40, 0x4f, 0x5d, 0xbf, 0xf7, 0x67, 0x16, 0x94, 0x54, 0x70, 0x9a, - 0x33, 0x10, 0x6a, 0x97, 0x0d, 0xa1, 0x36, 0x53, 0xef, 0xa4, 0xba, 0x93, 0x27, 0xd3, 0xa2, 0x1b, - 0x2a, 0x16, 0x42, 0x0f, 0x95, 0x53, 0x42, 0x86, 0x7b, 0xce, 0x4f, 0x99, 0x01, 0x13, 0x64, 0x74, - 0x04, 0x66, 0xa6, 0xa5, 0x70, 0x3f, 0x21, 0x66, 0x5a, 0xaa, 0x3f, 0x39, 0xdc, 0xaf, 0xae, 0x75, - 0x99, 0x9d, 0x0a, 0xdf, 0xce, 0x5c, 0x19, 0xd8, 0xde, 0x54, 0x21, 0x48, 0x2a, 0xc2, 0x35, 0x41, - 0x94, 0x1e, 0x33, 0x46, 0x26, 0xfe, 0xf1, 0x40, 0x42, 0x49, 0x15, 0xfb, 0x3a, 0x4c, 0xa7, 0x06, - 0x0c, 0xbd, 0x0a, 0xc3, 0xed, 0x1d, 0x27, 0x22, 0x29, 0xd3, 0xd4, 0xe1, 0x3a, 0x2d, 0x3c, 0x3e, - 0xac, 0x4c, 0xa9, 0x0a, 0xac, 0x04, 0x73, 0x6c, 0xfb, 0x7f, 0x5a, 0x30, 0xb4, 0x1e, 0xb4, 0xce, - 0x62, 0x31, 0xbd, 0x69, 0x2c, 0xa6, 0xc7, 0xf3, 0xc2, 0xb0, 0xe5, 0xae, 0xa3, 0xd5, 0xd4, 0x3a, - 0xba, 0x94, 0x4b, 0xa1, 0xf7, 0x12, 0xda, 0x83, 0x71, 0x16, 0xdc, 0x4d, 0x98, 0xca, 0xbe, 0x6c, - 0xdc, 0xaf, 0x2a, 0xa9, 0xfb, 0xd5, 0xb4, 0x86, 0xaa, 0xdd, 0xb2, 0x9e, 0x85, 0x51, 0x61, 0xae, - 0x99, 0x76, 0xda, 0x10, 0xb8, 0x58, 0xc2, 0xed, 0x1f, 0x2f, 0x82, 0x11, 0x4c, 0x0e, 0xfd, 0x8a, - 0x05, 0xf3, 0x21, 0xf7, 0x82, 0x6d, 0x55, 0x3b, 0xa1, 0xeb, 0x6f, 0x37, 0x9a, 0x3b, 0xa4, 0xd5, - 0xf1, 0x5c, 0x7f, 0xbb, 0xb6, 0xed, 0x07, 0xaa, 0x78, 0xe5, 0x3e, 0x69, 0x76, 0xd8, 0x9b, 0x4b, - 0x9f, 0xc8, 0x75, 0xca, 0x1c, 0xea, 0xea, 0xd1, 0x61, 0x65, 0x1e, 0x9f, 0x88, 0x36, 0x3e, 0x61, - 0x5f, 0xd0, 0x6f, 0x5a, 0xb0, 0xc0, 0x63, 0xac, 0x0d, 0xde, 0xff, 0x1e, 0xb7, 0xd1, 0xba, 0x24, - 0x95, 0x10, 0xd9, 0x20, 0xe1, 0xde, 0xd2, 0x6b, 0x62, 0x40, 0x17, 0xea, 0x27, 0x6b, 0x0b, 0x9f, - 0xb4, 0x73, 0xf6, 0xbf, 0x29, 0xc2, 0x24, 0x1d, 0xc5, 0x24, 0xf2, 0xcb, 0xab, 0xc6, 0x92, 0x78, - 0x32, 0xb5, 0x24, 0x66, 0x0d, 0xe4, 0xd3, 0x09, 0xfa, 0x12, 0xc1, 0xac, 0xe7, 0x44, 0xf1, 0x75, - 0xe2, 0x84, 0xf1, 0x26, 0x71, 0xb8, 0x99, 0x50, 0xf1, 0xc4, 0x26, 0x4d, 0x4a, 0xfd, 0x75, 0x33, - 0x4d, 0x0c, 0x77, 0xd3, 0x47, 0xfb, 0x80, 0x98, 0xad, 0x53, 0xe8, 0xf8, 0x11, 0xff, 0x16, 0x57, - 0xbc, 0xc7, 0x9c, 0xac, 0xd5, 0x39, 0xd1, 0x2a, 0xba, 0xd9, 0x45, 0x0d, 0x67, 0xb4, 0xa0, 0xd9, - 0xb0, 0x0d, 0x0f, 0x6a, 0xc3, 0x36, 0xd2, 0xc7, 0x33, 0x6a, 0x0f, 0x66, 0xc4, 0xac, 0x6c, 0xb9, - 0xdb, 0xe2, 0x90, 0xfe, 0x72, 0xca, 0xc6, 0xd5, 0x1a, 0xdc, 0x50, 0xa9, 0x8f, 0x81, 0xab, 0xfd, - 0xdd, 0x70, 0x8e, 0x36, 0x67, 0xfa, 0xf1, 0x44, 0x88, 0xc0, 0xf4, 0x6e, 0x67, 0x93, 0x78, 0x24, - 0x96, 0x65, 0xa2, 0xd1, 0x4c, 0xb1, 0xdf, 0xac, 0x9d, 0xc8, 0x96, 0x37, 0x4c, 0x12, 0x38, 0x4d, - 0xd3, 0xfe, 0x49, 0x0b, 0x98, 0xb5, 0xfc, 0x19, 0x1c, 0x7f, 0x5f, 0x34, 0x8f, 0xbf, 0x72, 0x1e, - 0x07, 0xca, 0x39, 0xf9, 0x5e, 0xe1, 0xd3, 0x52, 0x0f, 0x83, 0xfb, 0x07, 0x52, 0xf6, 0xef, 0x2f, - 0x71, 0xfd, 0x1f, 0x8b, 0x6f, 0x48, 0x15, 0x14, 0x00, 0x7d, 0x0f, 0x8c, 0x35, 0x9d, 0xb6, 0xd3, - 0xe4, 0x51, 0x3c, 0x73, 0xb5, 0x3f, 0x46, 0xa5, 0xf9, 0x65, 0x51, 0x83, 0x6b, 0x33, 0x3e, 0x2b, - 0xbf, 0x52, 0x16, 0xf7, 0xd5, 0x60, 0xa8, 0x26, 0xe7, 0x76, 0x61, 0xd2, 0x20, 0xf6, 0x50, 0xaf, - 0xbe, 0xdf, 0xc3, 0x8f, 0x0b, 0x75, 0x63, 0xd9, 0x83, 0x59, 0x5f, 0xfb, 0x4f, 0x99, 0xa3, 0x14, - 0xa7, 0x3f, 0xdd, 0xef, 0x40, 0x60, 0x9c, 0x54, 0xf3, 0x06, 0x48, 0x91, 0xc1, 0xdd, 0x94, 0xed, - 0xbf, 0x67, 0xc1, 0xa3, 0x3a, 0xa2, 0x16, 0xaf, 0xa1, 0x9f, 0x3e, 0xb9, 0x0a, 0x63, 0x41, 0x9b, - 0x84, 0x4e, 0x72, 0x27, 0xbb, 0x22, 0x07, 0xfd, 0x96, 0x28, 0x3f, 0x3e, 0xac, 0x9c, 0xd7, 0xa9, - 0xcb, 0x72, 0xac, 0x6a, 0x22, 0x1b, 0x46, 0xd8, 0x60, 0x44, 0x22, 0x96, 0x06, 0x33, 0x53, 0x64, - 0x4f, 0xab, 0x11, 0x16, 0x10, 0xfb, 0xfb, 0x2c, 0xbe, 0xb0, 0xf4, 0xae, 0xa3, 0xf7, 0x61, 0x66, - 0x8f, 0x5e, 0xdf, 0x56, 0xee, 0xb7, 0x43, 0xae, 0x46, 0x97, 0xe3, 0xf4, 0x7c, 0xbf, 0x71, 0xd2, - 0x3e, 0x32, 0x31, 0x66, 0x5b, 0x4b, 0x11, 0xc3, 0x5d, 0xe4, 0xed, 0x3f, 0x29, 0xf0, 0x9d, 0xc8, - 0xa4, 0xba, 0x67, 0x61, 0xb4, 0x1d, 0xb4, 0x96, 0x6b, 0x55, 0x2c, 0x46, 0x48, 0xb1, 0xab, 0x3a, - 0x2f, 0xc6, 0x12, 0x8e, 0xae, 0x02, 0x90, 0xfb, 0x31, 0x09, 0x7d, 0xc7, 0x53, 0x86, 0x1f, 0x4a, - 0x78, 0x5a, 0x51, 0x10, 0xac, 0x61, 0xd1, 0x3a, 0xed, 0x30, 0xd8, 0x77, 0x5b, 0xcc, 0xdb, 0xb0, - 0x68, 0xd6, 0xa9, 0x2b, 0x08, 0xd6, 0xb0, 0xe8, 0x55, 0xb9, 0xe3, 0x47, 0xfc, 0x00, 0x74, 0x36, - 0x45, 0xf8, 0xb9, 0xb1, 0xe4, 0xaa, 0x7c, 0x5b, 0x07, 0x62, 0x13, 0x17, 0x2d, 0xc2, 0x48, 0xec, - 0x30, 0x73, 0x86, 0xe1, 0x7c, 0xb3, 0xc4, 0x0d, 0x8a, 0xa1, 0x87, 0x75, 0xa4, 0x15, 0xb0, 0xa8, - 0x88, 0xde, 0x91, 0x2c, 0x98, 0xb3, 0x64, 0x61, 0x0f, 0x9c, 0xbb, 0x6c, 0x75, 0xf6, 0xad, 0xf3, - 0x60, 0x61, 0x67, 0x6c, 0xd0, 0xb2, 0xbf, 0xb7, 0x04, 0x90, 0x48, 0x7b, 0xe8, 0x83, 0x2e, 0x16, - 0xf1, 0x42, 0x6f, 0xf9, 0xf0, 0xf4, 0xf8, 0x03, 0xfa, 0x7e, 0x0b, 0xc6, 0x1d, 0xcf, 0x0b, 0x9a, - 0x4e, 0xcc, 0x46, 0xb9, 0xd0, 0x9b, 0x45, 0x89, 0xf6, 0x17, 0x93, 0x1a, 0xbc, 0x0b, 0x2f, 0x4b, - 0x4b, 0x05, 0x0d, 0xd2, 0xb7, 0x17, 0x7a, 0xc3, 0xe8, 0xb3, 0xf2, 0x12, 0xc0, 0x97, 0xc7, 0x5c, - 0xfa, 0x12, 0x50, 0x62, 0xdc, 0x58, 0x93, 0xff, 0xd1, 0x6d, 0x23, 0x4e, 0xdb, 0x50, 0x7e, 0x48, - 0x0a, 0x43, 0xe8, 0xe9, 0x17, 0xa2, 0x0d, 0xd5, 0x75, 0xbf, 0xa8, 0xe1, 0xfc, 0xb8, 0x2d, 0x9a, - 0x74, 0xdd, 0xc7, 0x27, 0xea, 0x3d, 0x98, 0x6e, 0x99, 0xc7, 0xad, 0x58, 0x4d, 0xcf, 0xe4, 0xd1, - 0x4d, 0x9d, 0xce, 0xc9, 0x01, 0x9b, 0x02, 0xe0, 0x34, 0x61, 0x54, 0xe7, 0x1e, 0x6a, 0x35, 0x7f, - 0x2b, 0x10, 0x76, 0xe5, 0x76, 0xee, 0x5c, 0x1e, 0x44, 0x31, 0xd9, 0xa3, 0x98, 0xc9, 0x39, 0xba, - 0x2e, 0xea, 0x62, 0x45, 0x05, 0xbd, 0x05, 0x23, 0xcc, 0x6d, 0x38, 0x2a, 0x8f, 0xe5, 0xeb, 0x01, - 0xcd, 0x88, 0x17, 0xc9, 0xa6, 0x62, 0x7f, 0x23, 0x2c, 0x28, 0xa0, 0xeb, 0x32, 0x2c, 0x4e, 0x54, - 0xf3, 0x6f, 0x47, 0x84, 0x85, 0xc5, 0x29, 0x2d, 0x7d, 0x3a, 0x89, 0x78, 0xc3, 0xcb, 0x33, 0x03, - 0x38, 0x1b, 0x35, 0xa9, 0xbc, 0x22, 0xfe, 0xcb, 0xb8, 0xd0, 0x65, 0xc8, 0xef, 0x9e, 0x19, 0x3b, - 0x3a, 0x19, 0xce, 0x3b, 0x26, 0x09, 0x9c, 0xa6, 0x79, 0xa6, 0xc7, 0xe7, 0x9c, 0x0f, 0x33, 0xe9, - 0x8d, 0xf5, 0x50, 0x8f, 0xeb, 0x3f, 0x18, 0x82, 0x29, 0x73, 0x21, 0xa0, 0x05, 0x28, 0x09, 0x22, - 0x2a, 0x44, 0xa6, 0x5a, 0xdb, 0x6b, 0x12, 0x80, 0x13, 0x1c, 0x16, 0x22, 0x94, 0x55, 0xd7, 0xec, - 0x00, 0x93, 0x10, 0xa1, 0x0a, 0x82, 0x35, 0x2c, 0x2a, 0x44, 0x6f, 0x06, 0x41, 0xac, 0x8e, 0x02, - 0xb5, 0x5a, 0x96, 0x58, 0x29, 0x16, 0x50, 0x7a, 0x04, 0xec, 0x92, 0xd0, 0x27, 0x9e, 0xa9, 0xc9, - 0x54, 0x47, 0xc0, 0x0d, 0x1d, 0x88, 0x4d, 0x5c, 0x7a, 0xa4, 0x05, 0x11, 0x5b, 0x7e, 0x42, 0x54, - 0x4f, 0xec, 0x2a, 0x1b, 0xdc, 0x6d, 0x5e, 0xc2, 0xd1, 0x97, 0xe1, 0x51, 0xe5, 0xe5, 0x8e, 0xb9, - 0x66, 0x58, 0xb6, 0x38, 0x62, 0xdc, 0xac, 0x1f, 0x5d, 0xce, 0x46, 0xc3, 0x79, 0xf5, 0xd1, 0x9b, - 0x30, 0x25, 0x44, 0x60, 0x49, 0x71, 0xd4, 0x34, 0x56, 0xb8, 0x61, 0x40, 0x71, 0x0a, 0x1b, 0x55, - 0x61, 0x86, 0x96, 0x30, 0x29, 0x54, 0x52, 0xe0, 0xde, 0xfa, 0xea, 0xac, 0xbf, 0x91, 0x82, 0xe3, - 0xae, 0x1a, 0x68, 0x11, 0xa6, 0xb9, 0x8c, 0x42, 0xef, 0x94, 0x6c, 0x1e, 0x84, 0xbb, 0x87, 0xda, - 0x08, 0xb7, 0x4c, 0x30, 0x4e, 0xe3, 0xa3, 0xd7, 0x61, 0xc2, 0x09, 0x9b, 0x3b, 0x6e, 0x4c, 0x9a, - 0x71, 0x27, 0xe4, 0x7e, 0x20, 0x9a, 0xb5, 0xc7, 0xa2, 0x06, 0xc3, 0x06, 0xa6, 0xfd, 0x01, 0x9c, - 0xcb, 0xf0, 0x14, 0xa3, 0x0b, 0xc7, 0x69, 0xbb, 0xf2, 0x9b, 0x52, 0x16, 0x92, 0x8b, 0xf5, 0x9a, - 0xfc, 0x1a, 0x0d, 0x8b, 0xae, 0x4e, 0xa6, 0x12, 0xd7, 0x82, 0xb7, 0xab, 0xd5, 0xb9, 0x2a, 0x01, - 0x38, 0xc1, 0xb1, 0x7f, 0x03, 0x40, 0x53, 0xe8, 0x0c, 0x60, 0x1f, 0xf7, 0x3a, 0x4c, 0xc8, 0x8c, - 0x03, 0x5a, 0xa4, 0x6b, 0xf5, 0x99, 0xd7, 0x34, 0x18, 0x36, 0x30, 0x69, 0xdf, 0x7c, 0x15, 0xa7, - 0x3b, 0x65, 0x8f, 0x99, 0x44, 0xe9, 0x4e, 0x70, 0xd0, 0x0b, 0x30, 0x16, 0x11, 0x6f, 0xeb, 0xa6, - 0xeb, 0xef, 0x8a, 0x85, 0xad, 0xb8, 0x70, 0x43, 0x94, 0x63, 0x85, 0x81, 0x96, 0xa0, 0xd8, 0x71, - 0x5b, 0x62, 0x29, 0xcb, 0x03, 0xbf, 0x78, 0xbb, 0x56, 0x3d, 0x3e, 0xac, 0x3c, 0x99, 0x97, 0x48, - 0x81, 0x5e, 0xed, 0xa3, 0x79, 0xba, 0xfd, 0x68, 0xe5, 0xac, 0xb7, 0x81, 0x91, 0x13, 0xbe, 0x0d, - 0x5c, 0x05, 0x10, 0x5f, 0x2d, 0xd7, 0x72, 0x31, 0x99, 0xb5, 0x6b, 0x0a, 0x82, 0x35, 0x2c, 0x14, - 0xc1, 0x6c, 0x33, 0x24, 0x8e, 0xbc, 0x43, 0x73, 0x9f, 0xa7, 0xb1, 0x07, 0x57, 0x10, 0x2c, 0xa7, - 0x89, 0xe1, 0x6e, 0xfa, 0x28, 0x80, 0xd9, 0x96, 0x08, 0xaa, 0x90, 0x34, 0x5a, 0x3a, 0xb9, 0xa3, - 0x15, 0x33, 0xc8, 0x49, 0x13, 0xc2, 0xdd, 0xb4, 0xd1, 0x57, 0x60, 0x4e, 0x16, 0x76, 0xc7, 0xb1, - 0x60, 0xdb, 0xa5, 0xb8, 0x74, 0xe9, 0xe8, 0xb0, 0x32, 0x57, 0xcd, 0xc5, 0xc2, 0x3d, 0x28, 0x20, - 0x0c, 0x23, 0xec, 0x2d, 0x29, 0x2a, 0x8f, 0xb3, 0x73, 0xee, 0xb9, 0x7c, 0x65, 0x00, 0x5d, 0xeb, - 0xf3, 0xec, 0x1d, 0x4a, 0x98, 0x94, 0x27, 0xcf, 0x72, 0xac, 0x10, 0x0b, 0x4a, 0x68, 0x0b, 0xc6, - 0x1d, 0xdf, 0x0f, 0x62, 0x87, 0x8b, 0x50, 0x13, 0xf9, 0xb2, 0x9f, 0x46, 0x78, 0x31, 0xa9, 0xc1, - 0xa9, 0x2b, 0x2b, 0x55, 0x0d, 0x82, 0x75, 0xc2, 0xe8, 0x1e, 0x4c, 0x07, 0xf7, 0x28, 0x73, 0x94, - 0x5a, 0x8a, 0xa8, 0x3c, 0xc9, 0xda, 0x7a, 0x65, 0x40, 0x3d, 0xad, 0x51, 0x59, 0xe3, 0x5a, 0x26, - 0x51, 0x9c, 0x6e, 0x05, 0xcd, 0x1b, 0xda, 0xea, 0xa9, 0xc4, 0x9d, 0x25, 0xd1, 0x56, 0xeb, 0xca, - 0x69, 0x16, 0x17, 0x85, 0x9b, 0x48, 0xb3, 0xdd, 0x3f, 0x9d, 0x8a, 0x8b, 0x92, 0x80, 0xb0, 0x8e, - 0x87, 0x76, 0x60, 0x22, 0x79, 0xb2, 0x0a, 0x23, 0x16, 0x95, 0x6d, 0xfc, 0xea, 0xd5, 0xc1, 0x3e, - 0xae, 0xa6, 0xd5, 0xe4, 0x37, 0x07, 0xbd, 0x04, 0x1b, 0x94, 0xe7, 0xbe, 0x0d, 0xc6, 0xb5, 0x89, - 0x3d, 0x89, 0x07, 0xc0, 0xdc, 0x9b, 0x30, 0x93, 0x9e, 0xba, 0x13, 0x79, 0x10, 0xfc, 0xef, 0x02, - 0x4c, 0x67, 0xbc, 0x5c, 0xb1, 0x64, 0x0c, 0x29, 0x86, 0x9a, 0xe4, 0x5e, 0x30, 0xd9, 0x62, 0x61, - 0x00, 0xb6, 0x28, 0x79, 0x74, 0x31, 0x97, 0x47, 0x0b, 0x56, 0x38, 0xf4, 0x51, 0x58, 0xa1, 0x79, - 0xfa, 0x0c, 0x0f, 0x74, 0xfa, 0x9c, 0x02, 0xfb, 0x34, 0x0e, 0xb0, 0xd1, 0x01, 0x0e, 0xb0, 0x1f, - 0x2a, 0xc0, 0x4c, 0xda, 0xc2, 0xf7, 0x0c, 0xde, 0x3b, 0xde, 0x32, 0xde, 0x3b, 0xb2, 0x53, 0x9b, - 0xa4, 0xed, 0x8e, 0xf3, 0xde, 0x3e, 0x70, 0xea, 0xed, 0xe3, 0xb9, 0x81, 0xa8, 0xf5, 0x7e, 0x07, - 0xf9, 0xfb, 0x05, 0xb8, 0x90, 0xae, 0xb2, 0xec, 0x39, 0xee, 0xde, 0x19, 0x8c, 0xcd, 0x2d, 0x63, - 0x6c, 0x5e, 0x1c, 0xe4, 0x6b, 0x58, 0xd7, 0x72, 0x07, 0xe8, 0x6e, 0x6a, 0x80, 0x16, 0x06, 0x27, - 0xd9, 0x7b, 0x94, 0xbe, 0x51, 0x84, 0x4b, 0x99, 0xf5, 0x92, 0xe7, 0x82, 0x55, 0xe3, 0xb9, 0xe0, - 0x6a, 0xea, 0xb9, 0xc0, 0xee, 0x5d, 0xfb, 0x74, 0xde, 0x0f, 0x84, 0x3b, 0x34, 0x8b, 0x18, 0xfa, - 0x80, 0x6f, 0x07, 0x86, 0x3b, 0xb4, 0x22, 0x84, 0x4d, 0xba, 0xdf, 0x4c, 0x6f, 0x06, 0xbf, 0x61, - 0xc1, 0xc5, 0xcc, 0xb9, 0x39, 0x03, 0xbd, 0xfa, 0xba, 0xa9, 0x57, 0x7f, 0x76, 0xe0, 0xd5, 0x9a, - 0xa3, 0x68, 0xff, 0xc3, 0x62, 0xce, 0xb7, 0x30, 0xcd, 0xe4, 0x2d, 0x18, 0x77, 0x9a, 0x4d, 0x12, - 0x45, 0x6b, 0x41, 0x4b, 0x45, 0xd0, 0x7c, 0x91, 0x49, 0x1b, 0x49, 0xf1, 0xf1, 0x61, 0x65, 0x2e, - 0x4d, 0x22, 0x01, 0x63, 0x9d, 0x82, 0x19, 0xf4, 0xb7, 0x70, 0xaa, 0x41, 0x7f, 0xaf, 0x02, 0xec, - 0x2b, 0x7d, 0x45, 0x5a, 0xcd, 0xa9, 0x69, 0x32, 0x34, 0x2c, 0xf4, 0x5d, 0xec, 0x16, 0xc0, 0x8d, - 0x81, 0xf8, 0x52, 0x7c, 0x79, 0xc0, 0xb9, 0xd2, 0x0d, 0x8b, 0x78, 0xdc, 0x0d, 0xa5, 0x12, 0x56, - 0x24, 0xd1, 0x77, 0xc0, 0x4c, 0xc4, 0xc3, 0x3a, 0x2d, 0x7b, 0x4e, 0xc4, 0x9c, 0xb8, 0xc4, 0x2a, - 0x64, 0xc1, 0x34, 0x1a, 0x29, 0x18, 0xee, 0xc2, 0x46, 0xab, 0xf2, 0xa3, 0x58, 0x0c, 0x2a, 0xbe, - 0x30, 0x9f, 0x4e, 0x3e, 0x48, 0xa4, 0x82, 0x3a, 0x9f, 0x1e, 0x7e, 0x36, 0xf0, 0x5a, 0x4d, 0xfb, - 0x87, 0x86, 0xe0, 0xb1, 0x1e, 0x4c, 0x0c, 0x2d, 0x9a, 0x46, 0x00, 0xcf, 0xa7, 0xf5, 0x7f, 0x73, - 0x99, 0x95, 0x0d, 0x85, 0x60, 0x6a, 0xad, 0x14, 0x3e, 0xf2, 0x5a, 0xf9, 0x01, 0x4b, 0xd3, 0xcc, - 0x72, 0x53, 0xe1, 0x2f, 0x9e, 0x90, 0x39, 0x9f, 0xa2, 0xaa, 0x76, 0x2b, 0x43, 0xdf, 0x79, 0x75, - 0xe0, 0xee, 0x0c, 0xac, 0x00, 0x3d, 0xdb, 0x27, 0xa3, 0x0f, 0x2d, 0x78, 0x32, 0xb3, 0xbf, 0x86, - 0xd1, 0xd2, 0x02, 0x94, 0x9a, 0xb4, 0x50, 0x73, 0x0c, 0x4d, 0x3c, 0xe6, 0x25, 0x00, 0x27, 0x38, - 0x86, 0x6d, 0x52, 0xa1, 0xaf, 0x6d, 0xd2, 0xbf, 0xb6, 0xa0, 0x6b, 0x01, 0x9f, 0x01, 0x27, 0xad, - 0x99, 0x9c, 0xf4, 0xd3, 0x83, 0xcc, 0x65, 0x0e, 0x13, 0xfd, 0xdd, 0x69, 0x78, 0x24, 0xc7, 0x13, - 0x6c, 0x1f, 0x66, 0xb7, 0x9b, 0xc4, 0x74, 0xb9, 0x15, 0x1f, 0x93, 0xe9, 0x9d, 0xdc, 0xd3, 0x3f, - 0x97, 0x5f, 0x88, 0xbb, 0x50, 0x70, 0x77, 0x13, 0xe8, 0x43, 0x0b, 0xce, 0x3b, 0xf7, 0xa2, 0xae, - 0x4c, 0x8d, 0x62, 0xcd, 0xbc, 0x92, 0xa9, 0xa7, 0xed, 0x93, 0xd9, 0x91, 0xb9, 0xc5, 0x9d, 0xcf, - 0xc2, 0xc2, 0x99, 0x6d, 0x21, 0x2c, 0x82, 0x1e, 0x53, 0x79, 0xbb, 0x87, 0x53, 0x78, 0x96, 0xcb, - 0x1e, 0xe7, 0xa9, 0x12, 0x82, 0x15, 0x1d, 0x74, 0x07, 0x4a, 0xdb, 0xd2, 0x8f, 0x56, 0xf0, 0xec, - 0xcc, 0x43, 0x30, 0xd3, 0xd9, 0x96, 0xfb, 0x8e, 0x28, 0x10, 0x4e, 0x48, 0xa1, 0x37, 0xa1, 0xe8, - 0x6f, 0x45, 0xbd, 0x92, 0x4d, 0xa5, 0x6c, 0xf9, 0x78, 0xc0, 0x85, 0xf5, 0xd5, 0x06, 0xa6, 0x15, - 0xd1, 0x75, 0x28, 0x86, 0x9b, 0x2d, 0xf1, 0xb4, 0x90, 0x29, 0x97, 0xe2, 0xa5, 0x6a, 0xf6, 0x22, - 0xe1, 0x94, 0xf0, 0x52, 0x15, 0x53, 0x12, 0xa8, 0x0e, 0xc3, 0xcc, 0x69, 0x4a, 0xbc, 0x20, 0x64, - 0x0a, 0xa4, 0x3d, 0x9c, 0x0f, 0x79, 0x54, 0x06, 0x86, 0x80, 0x39, 0x21, 0xf4, 0x16, 0x8c, 0x34, - 0x59, 0x3e, 0x26, 0xa1, 0xf8, 0xc9, 0x0e, 0xd7, 0xd5, 0x95, 0xb1, 0x89, 0xbf, 0xa0, 0xf2, 0x72, - 0x2c, 0x28, 0xa0, 0x0d, 0x18, 0x69, 0x92, 0xf6, 0xce, 0x56, 0x24, 0xf4, 0x39, 0x9f, 0xcd, 0xa4, - 0xd5, 0x23, 0xfd, 0x98, 0xa0, 0xca, 0x30, 0xb0, 0xa0, 0x85, 0x3e, 0x0f, 0x85, 0xad, 0xa6, 0xf0, - 0xa4, 0xca, 0x7c, 0x43, 0x30, 0x23, 0x65, 0x2c, 0x8d, 0x1c, 0x1d, 0x56, 0x0a, 0xab, 0xcb, 0xb8, - 0xb0, 0xd5, 0x44, 0xeb, 0x30, 0xba, 0xc5, 0x7d, 0xeb, 0x45, 0x00, 0x9b, 0x67, 0xb2, 0xdd, 0xfe, - 0xbb, 0xdc, 0xef, 0xb9, 0x07, 0x90, 0x00, 0x60, 0x49, 0x84, 0xc5, 0x0b, 0x56, 0x31, 0x02, 0x44, - 0xe0, 0xfc, 0xf9, 0x93, 0xc5, 0x75, 0x10, 0x6a, 0x0e, 0x45, 0x05, 0x6b, 0x14, 0xd1, 0x57, 0xa1, - 0xe4, 0xc8, 0xcc, 0x80, 0x22, 0x08, 0xce, 0xcb, 0x99, 0xdb, 0xb1, 0x77, 0xd2, 0x44, 0xbe, 0x96, - 0x15, 0x12, 0x4e, 0x88, 0xa2, 0x5d, 0x98, 0xdc, 0x8f, 0xda, 0x3b, 0x44, 0x6e, 0x5f, 0x16, 0x13, - 0x27, 0xe7, 0xb8, 0xba, 0x23, 0x10, 0xdd, 0x30, 0xee, 0x38, 0x5e, 0x17, 0xc7, 0x61, 0x8e, 0x63, - 0x77, 0x74, 0x62, 0xd8, 0xa4, 0x4d, 0x87, 0xff, 0xfd, 0x4e, 0xb0, 0x79, 0x10, 0x13, 0x11, 0x69, - 0x3f, 0x73, 0xf8, 0xdf, 0xe6, 0x28, 0xdd, 0xc3, 0x2f, 0x00, 0x58, 0x12, 0xa1, 0x1b, 0xdc, 0x91, - 0x59, 0x37, 0x85, 0x2e, 0xe7, 0xd9, 0xdc, 0xe1, 0xe9, 0xea, 0x6f, 0x32, 0x28, 0x8c, 0x33, 0x26, - 0xa4, 0x18, 0x47, 0x6c, 0xef, 0x04, 0x71, 0xe0, 0xa7, 0xb8, 0xf1, 0x6c, 0x3e, 0x47, 0xac, 0x67, - 0xe0, 0x77, 0x73, 0xc4, 0x2c, 0x2c, 0x9c, 0xd9, 0x16, 0x6a, 0xc1, 0x54, 0x3b, 0x08, 0xe3, 0x7b, - 0x41, 0x28, 0xd7, 0x17, 0xea, 0x71, 0xc9, 0x37, 0x30, 0x45, 0x8b, 0xcc, 0x0e, 0xdc, 0x84, 0xe0, - 0x14, 0x4d, 0xf4, 0x25, 0x18, 0x8d, 0x9a, 0x8e, 0x47, 0x6a, 0xb7, 0xca, 0xe7, 0xf2, 0x8f, 0x9a, - 0x06, 0x47, 0xc9, 0x59, 0x5d, 0x6c, 0x72, 0x04, 0x0a, 0x96, 0xe4, 0xd0, 0x2a, 0x0c, 0xb3, 0xf4, - 0x2d, 0x2c, 0x49, 0x40, 0x4e, 0xb0, 0xb5, 0x2e, 0x5b, 0x69, 0xce, 0x91, 0x58, 0x31, 0xe6, 0xd5, - 0xe9, 0x1e, 0x10, 0xb2, 0x6e, 0x10, 0x95, 0x2f, 0xe4, 0xef, 0x01, 0x21, 0x22, 0xdf, 0x6a, 0xf4, - 0xda, 0x03, 0x0a, 0x09, 0x27, 0x44, 0x29, 0x3f, 0xa6, 0x3c, 0xf4, 0x91, 0x7c, 0x7e, 0x9c, 0xcf, - 0x41, 0x19, 0x3f, 0xa6, 0xfc, 0x93, 0x92, 0xb0, 0x3f, 0x1c, 0xed, 0x96, 0x4f, 0xd8, 0xed, 0xe8, - 0x7b, 0xad, 0x2e, 0xd3, 0x81, 0xcf, 0x0d, 0xaa, 0xac, 0x39, 0x45, 0xc9, 0xf4, 0x43, 0x0b, 0x1e, - 0x69, 0x67, 0x7e, 0x88, 0x38, 0xec, 0x07, 0xd3, 0xf9, 0xf0, 0x4f, 0x57, 0x89, 0x3c, 0xb2, 0xe1, - 0x38, 0xa7, 0xa5, 0xb4, 0xf4, 0x5f, 0xfc, 0xc8, 0xd2, 0xff, 0x1a, 0x8c, 0x31, 0x81, 0x32, 0x89, - 0xec, 0x37, 0x90, 0x01, 0x1e, 0x13, 0x1b, 0x96, 0x45, 0x45, 0xac, 0x48, 0xa0, 0x1f, 0xb4, 0xe0, - 0x89, 0x74, 0xd7, 0x31, 0x61, 0x60, 0x11, 0x25, 0x9a, 0x5f, 0xcc, 0x56, 0xc5, 0xf7, 0x3f, 0x51, - 0xef, 0x85, 0x7c, 0xdc, 0x0f, 0x01, 0xf7, 0x6e, 0x0c, 0x55, 0x33, 0x6e, 0x86, 0x23, 0xe6, 0xcb, - 0xe2, 0x00, 0xb7, 0xc3, 0x57, 0x60, 0x62, 0x2f, 0xe8, 0xf8, 0xd2, 0x3b, 0x46, 0xf8, 0x3e, 0x33, - 0x2d, 0xf6, 0x9a, 0x56, 0x8e, 0x0d, 0xac, 0xd4, 0x9d, 0x72, 0xec, 0x41, 0xef, 0x94, 0x67, 0x7b, - 0x53, 0xf9, 0xba, 0x95, 0x21, 0x62, 0xf3, 0xbb, 0xeb, 0x17, 0xcc, 0xbb, 0xeb, 0xd3, 0xe9, 0xbb, - 0x6b, 0x97, 0xae, 0xd2, 0xb8, 0xb6, 0x0e, 0x1e, 0x45, 0x7f, 0xd0, 0x10, 0x8a, 0xb6, 0x07, 0x97, - 0xfb, 0x1d, 0x1c, 0xcc, 0x98, 0xb1, 0xa5, 0x5e, 0xf9, 0x13, 0x63, 0xc6, 0x56, 0xad, 0x8a, 0x19, - 0x64, 0xd0, 0x18, 0x3b, 0xf6, 0x7f, 0xb7, 0xa0, 0x58, 0x0f, 0x5a, 0x67, 0xa0, 0x7b, 0xfd, 0xa2, - 0xa1, 0x7b, 0x7d, 0x2c, 0x27, 0xbf, 0x78, 0xae, 0xa6, 0x75, 0x25, 0xa5, 0x69, 0x7d, 0x22, 0x8f, - 0x40, 0x6f, 0xbd, 0xea, 0x4f, 0x14, 0x41, 0xcf, 0x86, 0x8e, 0xfe, 0xed, 0x83, 0x58, 0xc5, 0x17, - 0x7b, 0x25, 0x48, 0x17, 0x94, 0x99, 0x0d, 0xa4, 0x74, 0xb8, 0xfd, 0x73, 0x66, 0x1c, 0x7f, 0x97, - 0xb8, 0xdb, 0x3b, 0x31, 0x69, 0xa5, 0x3f, 0xe7, 0xec, 0x8c, 0xe3, 0xff, 0xab, 0x05, 0xd3, 0xa9, - 0xd6, 0x91, 0x97, 0xe5, 0xbd, 0xf7, 0x80, 0x3a, 0xb7, 0xd9, 0xbe, 0xee, 0x7e, 0xf3, 0x00, 0xea, - 0x61, 0x4b, 0xea, 0xa3, 0x98, 0x5c, 0xae, 0x5e, 0xbe, 0x22, 0xac, 0x61, 0xa0, 0x57, 0x61, 0x3c, - 0x0e, 0xda, 0x81, 0x17, 0x6c, 0x1f, 0xdc, 0x20, 0x32, 0xaa, 0x93, 0x7a, 0x7e, 0xdc, 0x48, 0x40, - 0x58, 0xc7, 0xb3, 0x7f, 0xaa, 0x08, 0xe9, 0x0c, 0xfa, 0xdf, 0x5a, 0x93, 0x9f, 0xcc, 0x35, 0xf9, - 0x0d, 0x0b, 0x66, 0x68, 0xeb, 0xcc, 0xbe, 0x4c, 0x1e, 0x87, 0x2a, 0x9b, 0x97, 0xd5, 0x23, 0x9b, - 0xd7, 0xd3, 0x94, 0x77, 0xb5, 0x82, 0x4e, 0x2c, 0xf4, 0x59, 0x1a, 0x73, 0xa2, 0xa5, 0x58, 0x40, - 0x05, 0x1e, 0x09, 0x43, 0xe1, 0x93, 0xa7, 0xe3, 0x91, 0x30, 0xc4, 0x02, 0x2a, 0x93, 0x7d, 0x0d, - 0xe5, 0x24, 0xfb, 0x62, 0x31, 0x2a, 0x85, 0x4d, 0x93, 0x10, 0x4c, 0xb4, 0x18, 0x95, 0xd2, 0xd8, - 0x29, 0xc1, 0xb1, 0x7f, 0xb6, 0x08, 0x13, 0xf5, 0xa0, 0x95, 0x3c, 0x2d, 0xbd, 0x62, 0x3c, 0x2d, - 0x5d, 0x4e, 0x3d, 0x2d, 0xcd, 0xe8, 0xb8, 0xdf, 0x7a, 0x48, 0xfa, 0xb8, 0x1e, 0x92, 0xfe, 0x95, - 0xc5, 0x66, 0xad, 0xba, 0xde, 0x10, 0xc9, 0xa8, 0x5f, 0x82, 0x71, 0xc6, 0x90, 0x98, 0x13, 0xa8, - 0x7c, 0x6f, 0x61, 0x39, 0x27, 0xd6, 0x93, 0x62, 0xac, 0xe3, 0xa0, 0x2b, 0x30, 0x16, 0x11, 0x27, - 0x6c, 0xee, 0x28, 0x1e, 0x27, 0x5e, 0x23, 0x78, 0x19, 0x56, 0x50, 0xf4, 0x76, 0x12, 0x1e, 0xb1, - 0x98, 0x9f, 0x56, 0x59, 0xef, 0x0f, 0xdf, 0x22, 0xf9, 0x31, 0x11, 0xed, 0xbb, 0x80, 0xba, 0xf1, - 0x07, 0x30, 0xf4, 0xaa, 0x98, 0x81, 0xd0, 0x4a, 0x5d, 0x41, 0xd0, 0xfe, 0xd4, 0x82, 0xa9, 0x7a, - 0xd0, 0xa2, 0x5b, 0xf7, 0x9b, 0x69, 0x9f, 0xea, 0xb1, 0x61, 0x47, 0x7a, 0xc4, 0x86, 0xfd, 0x07, - 0x16, 0x8c, 0xd6, 0x83, 0xd6, 0x19, 0x68, 0xc1, 0xbf, 0x60, 0x6a, 0xc1, 0x1f, 0xcd, 0x59, 0x12, - 0x39, 0x8a, 0xef, 0x9f, 0x2f, 0xc2, 0x24, 0xed, 0x67, 0xb0, 0x2d, 0x67, 0xc9, 0x18, 0x11, 0x6b, - 0x80, 0x11, 0xa1, 0x62, 0x6e, 0xe0, 0x79, 0xc1, 0xbd, 0xf4, 0x8c, 0xad, 0xb2, 0x52, 0x2c, 0xa0, - 0xe8, 0x05, 0x18, 0x6b, 0x87, 0x64, 0xdf, 0x0d, 0x3a, 0x51, 0xda, 0xdf, 0xb9, 0x2e, 0xca, 0xb1, - 0xc2, 0xa0, 0x37, 0xa3, 0xc8, 0xf5, 0x9b, 0x44, 0x5a, 0x80, 0x0d, 0x31, 0x0b, 0x30, 0x1e, 0xf4, - 0x5d, 0x2b, 0xc7, 0x06, 0x16, 0xba, 0x0b, 0x25, 0xf6, 0x9f, 0x71, 0x94, 0x93, 0xa7, 0x21, 0x13, - 0x99, 0x56, 0x04, 0x01, 0x9c, 0xd0, 0x42, 0x57, 0x01, 0x62, 0x69, 0xab, 0x16, 0x09, 0x77, 0x7c, - 0x25, 0x6b, 0x2b, 0x2b, 0xb6, 0x08, 0x6b, 0x58, 0xe8, 0x79, 0x28, 0xc5, 0x8e, 0xeb, 0xdd, 0x74, - 0x7d, 0x12, 0x09, 0x5b, 0x3f, 0x91, 0x48, 0x45, 0x14, 0xe2, 0x04, 0x4e, 0x65, 0x1d, 0x16, 0xec, - 0x81, 0x27, 0x31, 0x1c, 0x63, 0xd8, 0x4c, 0xd6, 0xb9, 0xa9, 0x4a, 0xb1, 0x86, 0x61, 0xbf, 0x0e, - 0x17, 0xea, 0x41, 0xab, 0x1e, 0x84, 0xf1, 0x6a, 0x10, 0xde, 0x73, 0xc2, 0x96, 0x9c, 0xbf, 0x8a, - 0xcc, 0xe9, 0x41, 0x79, 0xcf, 0x30, 0xdf, 0x99, 0x46, 0xb6, 0x8e, 0x97, 0x99, 0xb4, 0x73, 0x42, - 0xc7, 0xac, 0x7f, 0x5f, 0x60, 0x8c, 0x22, 0x95, 0x59, 0x13, 0x7d, 0x05, 0xa6, 0x22, 0x72, 0xd3, - 0xf5, 0x3b, 0xf7, 0xe5, 0x0d, 0xb6, 0x87, 0xd7, 0x5b, 0x63, 0x45, 0xc7, 0xe4, 0x7a, 0x30, 0xb3, - 0x0c, 0xa7, 0xa8, 0xd1, 0x21, 0x0c, 0x3b, 0xfe, 0x62, 0x74, 0x3b, 0x22, 0xa1, 0xc8, 0xec, 0xc8, - 0x86, 0x10, 0xcb, 0x42, 0x9c, 0xc0, 0xe9, 0x92, 0x61, 0x7f, 0xd6, 0x03, 0x1f, 0x07, 0x41, 0x2c, - 0x17, 0x19, 0xcb, 0x0d, 0xa6, 0x95, 0x63, 0x03, 0x0b, 0xad, 0x02, 0x8a, 0x3a, 0xed, 0xb6, 0xc7, - 0x9e, 0xa8, 0x1d, 0xef, 0x5a, 0x18, 0x74, 0xda, 0xfc, 0x79, 0x50, 0xa4, 0xd5, 0x6a, 0x74, 0x41, - 0x71, 0x46, 0x0d, 0xca, 0x18, 0xb6, 0x22, 0xf6, 0x5b, 0xc4, 0x7b, 0xe0, 0xba, 0xe9, 0x06, 0x2b, - 0xc2, 0x12, 0x66, 0x7f, 0x0f, 0x3b, 0x30, 0x58, 0x42, 0xbe, 0xb8, 0x13, 0x12, 0xb4, 0x07, 0x93, - 0x6d, 0x76, 0x94, 0x8b, 0xd0, 0xe6, 0x62, 0x00, 0x1f, 0xcc, 0xb2, 0x8f, 0x27, 0xe8, 0xd2, 0xc9, - 0x61, 0x93, 0xba, 0xfd, 0x9f, 0xa6, 0x19, 0x5f, 0x6a, 0xf0, 0xeb, 0xdc, 0xa8, 0xb0, 0xd7, 0x17, - 0xb2, 0xeb, 0x5c, 0x7e, 0x0a, 0xcf, 0xe4, 0x08, 0x11, 0x36, 0xff, 0x58, 0xd6, 0x45, 0x6f, 0xb3, - 0x77, 0x55, 0xce, 0x0c, 0xfa, 0x65, 0xf6, 0xe6, 0x58, 0xc6, 0x13, 0xaa, 0xa8, 0x88, 0x35, 0x22, - 0xe8, 0x26, 0x4c, 0x8a, 0xfc, 0x6d, 0x42, 0xb5, 0x53, 0x34, 0x14, 0x03, 0x93, 0x58, 0x07, 0x1e, - 0xa7, 0x0b, 0xb0, 0x59, 0x19, 0x6d, 0xc3, 0x13, 0x5a, 0x32, 0xd3, 0x0c, 0xeb, 0x52, 0xce, 0x5b, - 0x9e, 0x3c, 0x3a, 0xac, 0x3c, 0xb1, 0xd1, 0x0b, 0x11, 0xf7, 0xa6, 0x83, 0x6e, 0xc1, 0x05, 0xa7, - 0x19, 0xbb, 0xfb, 0xa4, 0x4a, 0x9c, 0x96, 0xe7, 0xfa, 0xc4, 0x0c, 0x00, 0x72, 0xf1, 0xe8, 0xb0, - 0x72, 0x61, 0x31, 0x0b, 0x01, 0x67, 0xd7, 0x43, 0x5f, 0x80, 0x52, 0xcb, 0x8f, 0xc4, 0x18, 0x8c, - 0x18, 0x79, 0x7a, 0x4b, 0xd5, 0xf5, 0x86, 0xfa, 0xfe, 0xe4, 0x0f, 0x4e, 0x2a, 0xa0, 0x6d, 0x98, - 0xd0, 0x9d, 0xfc, 0x44, 0x8e, 0xe7, 0x17, 0x7b, 0xdc, 0xfa, 0x0d, 0xcf, 0x38, 0xae, 0xd7, 0x54, - 0xb6, 0xdb, 0x86, 0xd3, 0x9c, 0x41, 0x18, 0xbd, 0x05, 0x88, 0x0a, 0x33, 0x6e, 0x93, 0x2c, 0x36, - 0x59, 0x84, 0x79, 0xa6, 0x0d, 0x1b, 0x33, 0x1c, 0x91, 0x50, 0xa3, 0x0b, 0x03, 0x67, 0xd4, 0x42, - 0xd7, 0x29, 0x47, 0xd1, 0x4b, 0x85, 0xa9, 0xbd, 0x14, 0x80, 0xcb, 0x55, 0xd2, 0x0e, 0x49, 0xd3, - 0x89, 0x49, 0xcb, 0xa4, 0x88, 0x53, 0xf5, 0xe8, 0x79, 0xa3, 0x92, 0x4d, 0x81, 0x69, 0x20, 0xde, - 0x9d, 0x70, 0x8a, 0xde, 0x1d, 0x77, 0x82, 0x28, 0x5e, 0x27, 0xf1, 0xbd, 0x20, 0xdc, 0x15, 0x51, - 0xfb, 0x92, 0x00, 0xb2, 0x09, 0x08, 0xeb, 0x78, 0x54, 0x56, 0x64, 0x0f, 0x9b, 0xb5, 0x2a, 0x7b, - 0x67, 0x1a, 0x4b, 0xf6, 0xc9, 0x75, 0x5e, 0x8c, 0x25, 0x5c, 0xa2, 0xd6, 0xea, 0xcb, 0xec, 0xcd, - 0x28, 0x85, 0x5a, 0xab, 0x2f, 0x63, 0x09, 0x47, 0xa4, 0x3b, 0x07, 0xf2, 0x54, 0xfe, 0xbb, 0x5f, - 0x37, 0x5f, 0x1e, 0x30, 0x0d, 0xb2, 0x0f, 0x33, 0x2a, 0xfb, 0x32, 0x0f, 0x67, 0x18, 0x95, 0xa7, - 0xd9, 0x22, 0x19, 0x3c, 0x16, 0xa2, 0xd2, 0x76, 0xd6, 0x52, 0x94, 0x70, 0x17, 0x6d, 0x23, 0xb0, - 0xcc, 0x4c, 0xdf, 0x64, 0x61, 0x0b, 0x50, 0x8a, 0x3a, 0x9b, 0xad, 0x60, 0xcf, 0x71, 0x7d, 0xf6, - 0xc4, 0xa3, 0x09, 0x22, 0x0d, 0x09, 0xc0, 0x09, 0x0e, 0x5a, 0x85, 0x31, 0x47, 0x5c, 0x4b, 0xc5, - 0xa3, 0x4c, 0x66, 0xa4, 0x09, 0x79, 0x75, 0xe5, 0x62, 0xb6, 0xfc, 0x87, 0x55, 0x5d, 0xf4, 0x06, - 0x4c, 0x0a, 0x67, 0x48, 0x61, 0xc7, 0x7c, 0xce, 0xf4, 0x9b, 0x69, 0xe8, 0x40, 0x6c, 0xe2, 0xa2, - 0xef, 0x82, 0x29, 0x4a, 0x25, 0x61, 0x6c, 0xe5, 0xf3, 0x83, 0x70, 0x44, 0x2d, 0x09, 0x8c, 0x5e, - 0x19, 0xa7, 0x88, 0xa1, 0x16, 0x3c, 0xee, 0x74, 0xe2, 0x80, 0xa9, 0x83, 0xcd, 0xf5, 0xbf, 0x11, - 0xec, 0x12, 0x9f, 0xbd, 0xc4, 0x8c, 0x2d, 0x5d, 0x3e, 0x3a, 0xac, 0x3c, 0xbe, 0xd8, 0x03, 0x0f, - 0xf7, 0xa4, 0x82, 0x6e, 0xc3, 0x78, 0x1c, 0x78, 0xc2, 0x01, 0x21, 0x2a, 0x3f, 0x92, 0x1f, 0x18, - 0x6b, 0x43, 0xa1, 0xe9, 0x8a, 0x16, 0x55, 0x15, 0xeb, 0x74, 0xd0, 0x06, 0xdf, 0x63, 0x2c, 0x64, - 0x30, 0x89, 0xca, 0x8f, 0xe6, 0x0f, 0x8c, 0x8a, 0x2c, 0x6c, 0x6e, 0x41, 0x51, 0x13, 0xeb, 0x64, - 0xd0, 0x35, 0x98, 0x6d, 0x87, 0x6e, 0xc0, 0x16, 0xb6, 0x52, 0xc5, 0x97, 0xcd, 0xbc, 0x1f, 0xf5, - 0x34, 0x02, 0xee, 0xae, 0x43, 0x2f, 0x62, 0xb2, 0xb0, 0x7c, 0x91, 0x27, 0x91, 0xe3, 0xc2, 0x29, - 0x2f, 0xc3, 0x0a, 0x8a, 0xd6, 0x18, 0x5f, 0xe6, 0x57, 0xa6, 0xf2, 0x5c, 0x7e, 0x84, 0x0e, 0xfd, - 0x6a, 0xc5, 0x05, 0x17, 0xf5, 0x17, 0x27, 0x14, 0xe6, 0xbe, 0x1d, 0x66, 0xbb, 0x18, 0xef, 0x89, - 0x6c, 0xcb, 0xff, 0xe9, 0x30, 0x94, 0x94, 0xde, 0x15, 0x2d, 0x98, 0xea, 0xf4, 0x8b, 0x69, 0x75, - 0xfa, 0x18, 0x15, 0xff, 0x74, 0x0d, 0xfa, 0x86, 0x61, 0x19, 0x55, 0xc8, 0xcf, 0x05, 0xa7, 0x2b, - 0x1d, 0xfa, 0x3a, 0x82, 0x6a, 0xd7, 0xe8, 0xe2, 0xc0, 0x7a, 0xf9, 0xa1, 0x9e, 0x37, 0xf3, 0x01, - 0xd3, 0x5b, 0xd3, 0x9b, 0x66, 0x3b, 0x68, 0xd5, 0xea, 0xe9, 0x7c, 0xaf, 0x75, 0x5a, 0x88, 0x39, - 0x8c, 0xdd, 0x15, 0xa8, 0x94, 0xc0, 0xee, 0x0a, 0xa3, 0x0f, 0x78, 0x57, 0x90, 0x04, 0x70, 0x42, - 0x0b, 0x79, 0x30, 0xdb, 0x34, 0x53, 0xf5, 0x2a, 0xe7, 0xcf, 0xa7, 0xfa, 0x26, 0xcd, 0xed, 0x68, - 0x39, 0xfc, 0x96, 0xd3, 0x54, 0x70, 0x37, 0x61, 0xf4, 0x06, 0x8c, 0xbd, 0x1f, 0x44, 0x6c, 0x15, - 0x8b, 0xa3, 0x52, 0xba, 0xdb, 0x8d, 0xbd, 0x7d, 0xab, 0xc1, 0xca, 0x8f, 0x0f, 0x2b, 0xe3, 0xf5, - 0xa0, 0x25, 0xff, 0x62, 0x55, 0x01, 0xdd, 0x87, 0x0b, 0x06, 0x83, 0x51, 0xdd, 0x85, 0xc1, 0xbb, - 0xfb, 0x84, 0x68, 0xee, 0x42, 0x2d, 0x8b, 0x12, 0xce, 0x6e, 0xc0, 0xfe, 0x25, 0xae, 0x5d, 0x16, - 0x3a, 0x28, 0x12, 0x75, 0xbc, 0xb3, 0x48, 0xd4, 0xb5, 0x62, 0xa8, 0xc7, 0x1e, 0xf8, 0x05, 0xe3, - 0xd7, 0x2c, 0xf6, 0x82, 0xb1, 0x41, 0xf6, 0xda, 0x9e, 0x13, 0x9f, 0x85, 0x47, 0xc1, 0xdb, 0x30, - 0x16, 0x8b, 0xd6, 0x7a, 0xe5, 0x16, 0xd3, 0x3a, 0xc5, 0x5e, 0x71, 0xd4, 0xf9, 0x2a, 0x4b, 0xb1, - 0x22, 0x63, 0xff, 0x73, 0x3e, 0x03, 0x12, 0x72, 0x06, 0xaa, 0x8a, 0xaa, 0xa9, 0xaa, 0xa8, 0xf4, - 0xf9, 0x82, 0x1c, 0x95, 0xc5, 0x3f, 0x33, 0xfb, 0xcd, 0xae, 0x32, 0x9f, 0xf4, 0xa7, 0x33, 0xfb, - 0x47, 0x2c, 0x38, 0x9f, 0x65, 0x0d, 0x42, 0x65, 0x22, 0x7e, 0x91, 0x52, 0x4f, 0x89, 0x6a, 0x04, - 0xef, 0x88, 0x72, 0xac, 0x30, 0x06, 0x4e, 0xdb, 0x71, 0xb2, 0xd8, 0x72, 0xb7, 0xc0, 0xcc, 0xea, - 0x8c, 0xde, 0xe4, 0x2e, 0x42, 0x96, 0x4a, 0xbb, 0x7c, 0x32, 0xf7, 0x20, 0xfb, 0xa7, 0x0b, 0x70, - 0x9e, 0xbf, 0x05, 0x2c, 0xee, 0x07, 0x6e, 0xab, 0x1e, 0xb4, 0x84, 0xc3, 0xd4, 0x3b, 0x30, 0xd1, - 0xd6, 0x6e, 0xbf, 0xbd, 0xa2, 0x5b, 0xe9, 0xb7, 0xe4, 0xe4, 0x16, 0xa2, 0x97, 0x62, 0x83, 0x16, - 0x6a, 0xc1, 0x04, 0xd9, 0x77, 0x9b, 0x4a, 0xa1, 0x5c, 0x38, 0x31, 0x4b, 0x57, 0xad, 0xac, 0x68, - 0x74, 0xb0, 0x41, 0xf5, 0x21, 0x64, 0xe1, 0xb3, 0x7f, 0xd4, 0x82, 0x47, 0x73, 0x62, 0x61, 0xd1, - 0xe6, 0xee, 0xb1, 0x57, 0x17, 0x91, 0xd0, 0x4b, 0x35, 0xc7, 0xdf, 0x62, 0xb0, 0x80, 0xa2, 0x2f, - 0x01, 0xf0, 0xb7, 0x14, 0x2a, 0x94, 0x8b, 0x4f, 0x1f, 0x2c, 0x46, 0x8c, 0x16, 0x48, 0x44, 0xd6, - 0xc7, 0x1a, 0x2d, 0xfb, 0x27, 0x8b, 0x30, 0xcc, 0x74, 0xf7, 0x68, 0x15, 0x46, 0x77, 0x78, 0xe4, - 0xed, 0x41, 0x82, 0x7c, 0x27, 0xb7, 0x1b, 0x5e, 0x80, 0x65, 0x65, 0xb4, 0x06, 0xe7, 0x84, 0x53, - 0x5e, 0x95, 0x78, 0xce, 0x81, 0xbc, 0x24, 0xf3, 0x24, 0x58, 0x2a, 0xed, 0x5b, 0xad, 0x1b, 0x05, - 0x67, 0xd5, 0x43, 0x6f, 0x76, 0xc5, 0xdb, 0xe4, 0x31, 0xcb, 0x95, 0x48, 0xdd, 0x27, 0xe6, 0xe6, - 0x1b, 0x30, 0xd9, 0xee, 0x52, 0x07, 0x0c, 0x27, 0xe2, 0xbe, 0xa9, 0x02, 0x30, 0x71, 0x99, 0x19, - 0x48, 0x87, 0x19, 0xbd, 0x6c, 0xec, 0x84, 0x24, 0xda, 0x09, 0xbc, 0x96, 0xc8, 0x4b, 0x9f, 0x98, - 0x81, 0xa4, 0xe0, 0xb8, 0xab, 0x06, 0xa5, 0xb2, 0xe5, 0xb8, 0x5e, 0x27, 0x24, 0x09, 0x95, 0x11, - 0x93, 0xca, 0x6a, 0x0a, 0x8e, 0xbb, 0x6a, 0xd0, 0x75, 0x74, 0x41, 0x24, 0x35, 0x97, 0xa1, 0x1a, - 0x94, 0x6d, 0xcf, 0xa8, 0x74, 0xd9, 0xe8, 0x11, 0x3e, 0x48, 0xd8, 0x56, 0xa8, 0xb4, 0xe8, 0x5a, - 0xca, 0x5c, 0xe1, 0xac, 0x21, 0xa9, 0x3c, 0x48, 0x6a, 0xed, 0xdf, 0xb3, 0xe0, 0x5c, 0x86, 0x0d, - 0x21, 0x67, 0x55, 0xdb, 0x6e, 0x14, 0xab, 0x44, 0x3f, 0x1a, 0xab, 0xe2, 0xe5, 0x58, 0x61, 0xd0, - 0xfd, 0xc0, 0x99, 0x61, 0x9a, 0x01, 0x0a, 0x1b, 0x1d, 0x01, 0x3d, 0x19, 0x03, 0x44, 0x97, 0x61, - 0xa8, 0x13, 0x91, 0x50, 0xe6, 0xa4, 0x96, 0xfc, 0x9b, 0x29, 0x18, 0x19, 0x84, 0x4a, 0x94, 0xdb, - 0x4a, 0xb7, 0xa7, 0x49, 0x94, 0x5c, 0xbb, 0xc7, 0x61, 0xf6, 0xd7, 0x8a, 0x70, 0x31, 0xd7, 0x46, - 0x98, 0x76, 0x69, 0x2f, 0xf0, 0xdd, 0x38, 0x50, 0xef, 0x42, 0x3c, 0xce, 0x0d, 0x69, 0xef, 0xac, - 0x89, 0x72, 0xac, 0x30, 0xd0, 0xd3, 0x30, 0xcc, 0xee, 0xcf, 0x5d, 0xa9, 0x8c, 0x96, 0xaa, 0x3c, - 0xfc, 0x02, 0x07, 0x0f, 0x9c, 0x26, 0xee, 0x29, 0x18, 0x6a, 0x07, 0x81, 0x97, 0x66, 0x46, 0xb4, - 0xbb, 0x41, 0xe0, 0x61, 0x06, 0x44, 0x9f, 0x11, 0xe3, 0x90, 0x7a, 0x08, 0xc1, 0x4e, 0x2b, 0x88, - 0xb4, 0xc1, 0x78, 0x16, 0x46, 0x77, 0xc9, 0x41, 0xe8, 0xfa, 0xdb, 0xe9, 0x07, 0xb2, 0x1b, 0xbc, - 0x18, 0x4b, 0xb8, 0x99, 0xc9, 0x63, 0xf4, 0xb4, 0xf3, 0xbb, 0x8d, 0xf5, 0x3d, 0xda, 0x7e, 0xa0, - 0x08, 0xd3, 0x78, 0xa9, 0xfa, 0xad, 0x89, 0xb8, 0xdd, 0x3d, 0x11, 0xa7, 0x9d, 0xdf, 0xad, 0xff, - 0x6c, 0xfc, 0xbc, 0x05, 0xd3, 0x2c, 0xda, 0xb5, 0x88, 0xd3, 0xe2, 0x06, 0xfe, 0x19, 0x88, 0x6e, - 0x4f, 0xc1, 0x70, 0x48, 0x1b, 0x4d, 0x27, 0x6d, 0x62, 0x3d, 0xc1, 0x1c, 0x86, 0x1e, 0x87, 0x21, - 0xd6, 0x05, 0x3a, 0x79, 0x13, 0x3c, 0xdf, 0x45, 0xd5, 0x89, 0x1d, 0xcc, 0x4a, 0x99, 0xc3, 0x2c, - 0x26, 0x6d, 0xcf, 0xe5, 0x9d, 0x4e, 0x14, 0xea, 0x9f, 0x0c, 0x87, 0xd9, 0xcc, 0xae, 0x7d, 0x34, - 0x87, 0xd9, 0x6c, 0x92, 0xbd, 0xaf, 0x45, 0xff, 0xa3, 0x00, 0x97, 0x32, 0xeb, 0x0d, 0xec, 0x30, - 0xdb, 0xbb, 0xf6, 0xe9, 0xd8, 0x39, 0x64, 0x9b, 0x1f, 0x14, 0xcf, 0xd0, 0xfc, 0x60, 0x68, 0x50, - 0xc9, 0x71, 0x78, 0x00, 0x3f, 0xd6, 0xcc, 0x21, 0xfb, 0x84, 0xf8, 0xb1, 0x66, 0xf6, 0x2d, 0xe7, - 0x5a, 0xf7, 0x67, 0x85, 0x9c, 0x6f, 0x61, 0x17, 0xbc, 0x2b, 0x94, 0xcf, 0x30, 0x60, 0x24, 0x24, - 0xe1, 0x09, 0xce, 0x63, 0x78, 0x19, 0x56, 0x50, 0xe4, 0x6a, 0x1e, 0xa1, 0x85, 0xfc, 0x94, 0x9e, - 0xb9, 0x4d, 0xcd, 0x9b, 0xef, 0x1f, 0x7a, 0x50, 0x99, 0xb4, 0x77, 0xe8, 0x9a, 0x76, 0x29, 0x2f, - 0x0e, 0x7e, 0x29, 0x9f, 0xc8, 0xbe, 0x90, 0xa3, 0x45, 0x98, 0xde, 0x73, 0x7d, 0xca, 0x36, 0x0f, - 0x4c, 0x51, 0x54, 0x05, 0x48, 0x58, 0x33, 0xc1, 0x38, 0x8d, 0x3f, 0xf7, 0x06, 0x4c, 0x3e, 0xb8, - 0x16, 0xf1, 0x1b, 0x45, 0x78, 0xac, 0xc7, 0xb6, 0xe7, 0xbc, 0xde, 0x98, 0x03, 0x8d, 0xd7, 0x77, - 0xcd, 0x43, 0x1d, 0xce, 0x6f, 0x75, 0x3c, 0xef, 0x80, 0x59, 0xf8, 0x91, 0x96, 0xc4, 0x10, 0xb2, - 0xa2, 0x0a, 0x65, 0xbf, 0x9a, 0x81, 0x83, 0x33, 0x6b, 0xa2, 0xb7, 0x00, 0x05, 0x22, 0x9f, 0x70, - 0x12, 0x2a, 0x87, 0x0d, 0x7c, 0x31, 0xd9, 0x8c, 0xb7, 0xba, 0x30, 0x70, 0x46, 0x2d, 0x2a, 0xf4, - 0xd3, 0x53, 0xe9, 0x40, 0x75, 0x2b, 0x25, 0xf4, 0x63, 0x1d, 0x88, 0x4d, 0x5c, 0x74, 0x0d, 0x66, - 0x9d, 0x7d, 0xc7, 0xe5, 0xa1, 0x13, 0x25, 0x01, 0x2e, 0xf5, 0x2b, 0xdd, 0xdd, 0x62, 0x1a, 0x01, - 0x77, 0xd7, 0x49, 0xb9, 0xa4, 0x8e, 0xe4, 0xbb, 0xa4, 0xf6, 0xe6, 0x8b, 0xfd, 0x54, 0xb1, 0xf6, - 0x7f, 0xb6, 0xe8, 0xf1, 0xa5, 0xa5, 0xf9, 0xd7, 0x33, 0x2a, 0x28, 0x95, 0xa2, 0xe6, 0x1d, 0xaa, - 0xc6, 0x61, 0x59, 0x07, 0x62, 0x13, 0x97, 0x2f, 0x88, 0x28, 0x71, 0x54, 0x30, 0x44, 0x77, 0xe1, - 0xfe, 0xad, 0x30, 0xd0, 0x97, 0x61, 0xb4, 0xe5, 0xee, 0xbb, 0x51, 0x10, 0x8a, 0xcd, 0x72, 0x42, - 0x63, 0xf2, 0x84, 0x0f, 0x56, 0x39, 0x19, 0x2c, 0xe9, 0xd9, 0x3f, 0x50, 0x80, 0x49, 0xd9, 0xe2, - 0xdb, 0x9d, 0x20, 0x76, 0xce, 0xe0, 0x58, 0xbe, 0x66, 0x1c, 0xcb, 0x9f, 0xe9, 0xe5, 0x03, 0xcf, - 0xba, 0x94, 0x7b, 0x1c, 0xdf, 0x4a, 0x1d, 0xc7, 0xcf, 0xf4, 0x27, 0xd5, 0xfb, 0x18, 0xfe, 0x17, - 0x16, 0xcc, 0x1a, 0xf8, 0x67, 0x70, 0x1a, 0xac, 0x9a, 0xa7, 0xc1, 0x93, 0x7d, 0xbf, 0x21, 0xe7, - 0x14, 0xf8, 0x7a, 0x21, 0xd5, 0x77, 0xc6, 0xfd, 0xdf, 0x87, 0xa1, 0x1d, 0x27, 0x6c, 0xf5, 0x0a, - 0x00, 0xdc, 0x55, 0x69, 0xfe, 0xba, 0x13, 0xb6, 0x38, 0x0f, 0x7f, 0x41, 0x65, 0x21, 0x75, 0xc2, - 0x56, 0x5f, 0xbf, 0x1c, 0xd6, 0x14, 0x7a, 0x1d, 0x46, 0xa2, 0x66, 0xd0, 0x56, 0x36, 0x79, 0x97, - 0x79, 0x86, 0x52, 0x5a, 0x72, 0x7c, 0x58, 0x41, 0x66, 0x73, 0xb4, 0x18, 0x0b, 0xfc, 0xb9, 0x6d, - 0x28, 0xa9, 0xa6, 0x1f, 0xaa, 0x47, 0xc5, 0x6f, 0x17, 0xe1, 0x5c, 0xc6, 0xba, 0x40, 0x91, 0x31, - 0x5a, 0x2f, 0x0d, 0xb8, 0x9c, 0x3e, 0xe2, 0x78, 0x45, 0xec, 0xc6, 0xd2, 0x12, 0xf3, 0x3f, 0x70, - 0xa3, 0xb7, 0x23, 0x92, 0x6e, 0x94, 0x16, 0xf5, 0x6f, 0x94, 0x36, 0x76, 0x66, 0x43, 0x4d, 0x1b, - 0x52, 0x3d, 0x7d, 0xa8, 0x73, 0xfa, 0xc7, 0x45, 0x38, 0x9f, 0x15, 0x3a, 0x03, 0x7d, 0x77, 0x2a, - 0x9d, 0xd0, 0x2b, 0x83, 0x06, 0xdd, 0xe0, 0x39, 0x86, 0x44, 0xac, 0xb1, 0x79, 0x33, 0xc1, 0x50, - 0xdf, 0x61, 0x16, 0x6d, 0x32, 0x47, 0xb9, 0x90, 0xa7, 0x81, 0x92, 0x5b, 0xfc, 0x73, 0x03, 0x77, - 0x40, 0xe4, 0x8f, 0x8a, 0x52, 0x8e, 0x72, 0xb2, 0xb8, 0xbf, 0xa3, 0x9c, 0x6c, 0x79, 0xce, 0x85, - 0x71, 0xed, 0x6b, 0x1e, 0xea, 0x8c, 0xef, 0xd2, 0x13, 0x45, 0xeb, 0xf7, 0x43, 0x9d, 0xf5, 0x1f, - 0xb5, 0x20, 0x65, 0x09, 0xa7, 0x54, 0x52, 0x56, 0xae, 0x4a, 0xea, 0x32, 0x0c, 0x85, 0x81, 0x47, - 0xd2, 0x19, 0x66, 0x70, 0xe0, 0x11, 0xcc, 0x20, 0x14, 0x23, 0x4e, 0x14, 0x12, 0x13, 0xfa, 0x65, - 0x4b, 0x5c, 0xa3, 0x9e, 0x82, 0x61, 0x8f, 0xec, 0x13, 0xa9, 0x8d, 0x50, 0x3c, 0xf9, 0x26, 0x2d, - 0xc4, 0x1c, 0x66, 0xff, 0xfc, 0x10, 0x3c, 0xd1, 0xd3, 0xd5, 0x94, 0x5e, 0x59, 0xb6, 0x9d, 0x98, - 0xdc, 0x73, 0x0e, 0xd2, 0xf1, 0xaf, 0xaf, 0xf1, 0x62, 0x2c, 0xe1, 0xcc, 0x6e, 0x97, 0x87, 0xd0, - 0x4c, 0x29, 0xf0, 0x44, 0xe4, 0x4c, 0x01, 0x35, 0x15, 0x47, 0xc5, 0xd3, 0x50, 0x1c, 0x5d, 0x05, - 0x88, 0x22, 0x6f, 0xc5, 0xa7, 0x12, 0x58, 0x4b, 0x18, 0x04, 0x27, 0xa1, 0x56, 0x1b, 0x37, 0x05, - 0x04, 0x6b, 0x58, 0xa8, 0x0a, 0x33, 0xed, 0x30, 0x88, 0xb9, 0x3e, 0xb4, 0xca, 0x4d, 0x51, 0x86, - 0x4d, 0x2f, 0xbf, 0x7a, 0x0a, 0x8e, 0xbb, 0x6a, 0xa0, 0x57, 0x61, 0x5c, 0x78, 0xfe, 0xd5, 0x83, - 0xc0, 0x13, 0xaa, 0x1a, 0x65, 0xd8, 0xd0, 0x48, 0x40, 0x58, 0xc7, 0xd3, 0xaa, 0x31, 0x25, 0xeb, - 0x68, 0x66, 0x35, 0xae, 0x68, 0xd5, 0xf0, 0x52, 0x61, 0x74, 0xc6, 0x06, 0x0a, 0xa3, 0x93, 0x28, - 0xaf, 0x4a, 0x03, 0xbf, 0x2b, 0x41, 0x5f, 0x75, 0xcf, 0xcf, 0x0c, 0xc1, 0x39, 0xb1, 0x70, 0x1e, - 0xf6, 0x72, 0xb9, 0xdd, 0xbd, 0x5c, 0x4e, 0x43, 0xbd, 0xf5, 0xad, 0x35, 0x73, 0xd6, 0x6b, 0xe6, - 0x97, 0x8a, 0x30, 0xc2, 0xa7, 0xe2, 0x0c, 0x64, 0xf8, 0x55, 0xa1, 0xf4, 0xeb, 0x11, 0x40, 0x86, - 0xf7, 0x65, 0xbe, 0xea, 0xc4, 0x0e, 0x3f, 0xbf, 0x14, 0x1b, 0x4d, 0xd4, 0x83, 0x68, 0xde, 0x60, - 0xb4, 0x73, 0x29, 0xad, 0x16, 0x70, 0x1a, 0x1a, 0xdb, 0xfd, 0x0a, 0x40, 0xc4, 0x52, 0xe8, 0x53, - 0x1a, 0x22, 0x14, 0xd1, 0x73, 0x3d, 0x5a, 0x6f, 0x28, 0x64, 0xde, 0x87, 0x64, 0x09, 0x2a, 0x00, - 0xd6, 0x28, 0xce, 0xbd, 0x06, 0x25, 0x85, 0xdc, 0x4f, 0x05, 0x30, 0xa1, 0x9f, 0x7a, 0x5f, 0x84, - 0xe9, 0x54, 0x5b, 0x27, 0xd2, 0x20, 0xfc, 0x82, 0x05, 0xd3, 0xbc, 0xcb, 0x2b, 0xfe, 0xbe, 0xd8, - 0xec, 0x1f, 0xc0, 0x79, 0x2f, 0x63, 0xd3, 0x89, 0x19, 0x1d, 0x7c, 0x93, 0x2a, 0x8d, 0x41, 0x16, - 0x14, 0x67, 0xb6, 0x81, 0xae, 0xc0, 0x18, 0x77, 0x74, 0x71, 0x3c, 0xe1, 0x9c, 0x30, 0xc1, 0x53, - 0x52, 0xf0, 0x32, 0xac, 0xa0, 0xf6, 0xef, 0x58, 0x30, 0xcb, 0x7b, 0x7e, 0x83, 0x1c, 0xa8, 0xdb, - 0xf1, 0xc7, 0xd9, 0x77, 0x91, 0x71, 0xa3, 0x90, 0x93, 0x71, 0x43, 0xff, 0xb4, 0x62, 0xcf, 0x4f, - 0xfb, 0x69, 0x0b, 0xc4, 0x0a, 0x3c, 0x83, 0x7b, 0xe0, 0xb7, 0x9b, 0xf7, 0xc0, 0xb9, 0xfc, 0x45, - 0x9d, 0x73, 0x01, 0xfc, 0x53, 0x0b, 0x66, 0x38, 0x42, 0xf2, 0x10, 0xf9, 0xb1, 0xce, 0xc3, 0x20, - 0x69, 0xe0, 0x54, 0xde, 0xed, 0xec, 0x8f, 0x32, 0x26, 0x6b, 0xa8, 0xe7, 0x64, 0xb5, 0xe4, 0x06, - 0x3a, 0x41, 0x7a, 0xc3, 0x13, 0x07, 0x89, 0xb5, 0xff, 0xc8, 0x02, 0xc4, 0x9b, 0x31, 0xce, 0x65, - 0x7a, 0xda, 0xb1, 0x52, 0x4d, 0x13, 0x94, 0xb0, 0x1a, 0x05, 0xc1, 0x1a, 0xd6, 0xa9, 0x0c, 0x4f, - 0xea, 0x35, 0xb9, 0xd8, 0xff, 0x35, 0xf9, 0x04, 0x23, 0xfa, 0xd7, 0x87, 0x20, 0x6d, 0x09, 0x8d, - 0xee, 0xc0, 0x44, 0xd3, 0x69, 0x3b, 0x9b, 0xae, 0xe7, 0xc6, 0x2e, 0x89, 0x7a, 0x99, 0xa1, 0x2c, - 0x6b, 0x78, 0xe2, 0x9d, 0x50, 0x2b, 0xc1, 0x06, 0x1d, 0x34, 0x0f, 0xd0, 0x0e, 0xdd, 0x7d, 0xd7, - 0x23, 0xdb, 0xec, 0x2a, 0xcc, 0xdc, 0xa1, 0xb8, 0x6d, 0x85, 0x2c, 0xc5, 0x1a, 0x46, 0x86, 0xfb, - 0x4c, 0xf1, 0xe1, 0xb9, 0xcf, 0x0c, 0x9d, 0xd0, 0x7d, 0x66, 0x78, 0x20, 0xf7, 0x19, 0x0c, 0x8f, - 0xc8, 0xb3, 0x9b, 0xfe, 0x5f, 0x75, 0x3d, 0x22, 0x04, 0x36, 0xee, 0x24, 0x35, 0x77, 0x74, 0x58, - 0x79, 0x04, 0x67, 0x62, 0xe0, 0x9c, 0x9a, 0xe8, 0x4b, 0x50, 0x76, 0x3c, 0x2f, 0xb8, 0xa7, 0x46, - 0x6d, 0x25, 0x6a, 0x3a, 0x5e, 0x12, 0x33, 0x7d, 0x6c, 0xe9, 0xf1, 0xa3, 0xc3, 0x4a, 0x79, 0x31, - 0x07, 0x07, 0xe7, 0xd6, 0xb6, 0x77, 0xe1, 0x5c, 0x83, 0x84, 0x32, 0x63, 0xaa, 0xda, 0x62, 0x1b, - 0x50, 0x0a, 0x53, 0x4c, 0x65, 0xa0, 0x58, 0x25, 0x5a, 0x3c, 0x4b, 0xc9, 0x44, 0x12, 0x42, 0xf6, - 0x9f, 0x58, 0x30, 0x2a, 0xac, 0xab, 0xcf, 0x40, 0x96, 0x59, 0x34, 0xf4, 0x91, 0x95, 0x6c, 0xc6, - 0xcb, 0x3a, 0x93, 0xab, 0x89, 0xac, 0xa5, 0x34, 0x91, 0x4f, 0xf6, 0x22, 0xd2, 0x5b, 0x07, 0xf9, - 0xc3, 0x45, 0x98, 0x32, 0x2d, 0xcb, 0xcf, 0x60, 0x08, 0xd6, 0x61, 0x34, 0x12, 0x6e, 0x0c, 0x85, - 0x7c, 0xfb, 0xd5, 0xf4, 0x24, 0x26, 0x56, 0x2e, 0xc2, 0x71, 0x41, 0x12, 0xc9, 0xf4, 0x8f, 0x28, - 0x3e, 0x44, 0xff, 0x88, 0x7e, 0xc6, 0xfd, 0x43, 0xa7, 0x61, 0xdc, 0x6f, 0xff, 0x32, 0x63, 0xfe, - 0x7a, 0xf9, 0x19, 0xc8, 0x05, 0xd7, 0xcc, 0x63, 0xc2, 0xee, 0xb1, 0xb2, 0x44, 0xa7, 0x72, 0xe4, - 0x83, 0x7f, 0x6c, 0xc1, 0xb8, 0x40, 0x3c, 0x83, 0x6e, 0x7f, 0x87, 0xd9, 0xed, 0xc7, 0x7a, 0x74, - 0x3b, 0xa7, 0xbf, 0x7f, 0xb7, 0xa0, 0xfa, 0x5b, 0x0f, 0xc2, 0x78, 0xa0, 0x1c, 0x1a, 0x63, 0xf4, - 0x36, 0x18, 0x34, 0x03, 0x4f, 0x1c, 0xe6, 0x8f, 0x27, 0x7e, 0xb2, 0xbc, 0xfc, 0x58, 0xfb, 0x8d, - 0x15, 0x36, 0x73, 0xe3, 0x0c, 0xc2, 0x58, 0x1c, 0xa0, 0x89, 0x1b, 0x67, 0x10, 0xc6, 0x98, 0x41, - 0x50, 0x0b, 0x20, 0x76, 0xc2, 0x6d, 0x12, 0xd3, 0x32, 0xe1, 0x72, 0x9f, 0xbf, 0x0b, 0x3b, 0xb1, - 0xeb, 0xcd, 0xbb, 0x7e, 0x1c, 0xc5, 0xe1, 0x7c, 0xcd, 0x8f, 0x6f, 0x85, 0xfc, 0x6e, 0xa0, 0x39, - 0xbe, 0x2a, 0x5a, 0x58, 0xa3, 0x2b, 0x3d, 0xaf, 0x58, 0x1b, 0xc3, 0xe6, 0x43, 0xe1, 0xba, 0x28, - 0xc7, 0x0a, 0xc3, 0x7e, 0x8d, 0xf1, 0x64, 0x36, 0x40, 0x27, 0xf3, 0x49, 0xfd, 0xcd, 0x31, 0x35, - 0xb4, 0xec, 0x95, 0xa0, 0xaa, 0x7b, 0xbe, 0xf6, 0x66, 0x81, 0xb4, 0x61, 0xdd, 0x2d, 0x20, 0x71, - 0x8f, 0x45, 0xdf, 0xd9, 0xf5, 0x7e, 0xfc, 0x62, 0x1f, 0x5e, 0x7a, 0x82, 0x17, 0x63, 0x16, 0x88, - 0x95, 0x05, 0xac, 0xac, 0xd5, 0xd3, 0x59, 0x4e, 0x96, 0x25, 0x00, 0x27, 0x38, 0x68, 0x41, 0xdc, - 0x2c, 0xb9, 0x7e, 0xee, 0xb1, 0xd4, 0xcd, 0x52, 0x7e, 0xbe, 0x76, 0xb5, 0x7c, 0x09, 0xc6, 0x55, - 0xe6, 0xb8, 0x3a, 0x4f, 0xc0, 0x25, 0x02, 0x10, 0xac, 0x24, 0xc5, 0x58, 0xc7, 0x41, 0x1b, 0x30, - 0x1d, 0xf1, 0xb4, 0x76, 0xd2, 0x19, 0x4a, 0xe8, 0x0d, 0x9e, 0x93, 0xef, 0xce, 0x0d, 0x13, 0x7c, - 0xcc, 0x8a, 0xf8, 0x66, 0x95, 0xee, 0x53, 0x69, 0x12, 0xe8, 0x4d, 0x98, 0xf2, 0xf4, 0xf4, 0xde, - 0x75, 0xa1, 0x56, 0x50, 0x66, 0x99, 0x46, 0xf2, 0xef, 0x3a, 0x4e, 0x61, 0x53, 0x21, 0x40, 0x2f, - 0x11, 0xd1, 0xcb, 0x1c, 0x7f, 0x9b, 0x44, 0x22, 0xef, 0x15, 0x13, 0x02, 0x6e, 0xe6, 0xe0, 0xe0, - 0xdc, 0xda, 0xe8, 0x75, 0x98, 0x90, 0x9f, 0xaf, 0x39, 0x07, 0x26, 0xc6, 0xbf, 0x1a, 0x0c, 0x1b, - 0x98, 0xe8, 0x1e, 0x5c, 0x90, 0xff, 0x37, 0x42, 0x67, 0x6b, 0xcb, 0x6d, 0x0a, 0xdf, 0xcc, 0x71, - 0x46, 0x62, 0x51, 0x7a, 0x42, 0xac, 0x64, 0x21, 0x1d, 0x1f, 0x56, 0x2e, 0x8b, 0x51, 0xcb, 0x84, - 0xb3, 0x49, 0xcc, 0xa6, 0x8f, 0xd6, 0xe0, 0xdc, 0x0e, 0x71, 0xbc, 0x78, 0x67, 0x79, 0x87, 0x34, - 0x77, 0xe5, 0x26, 0x62, 0x2e, 0x87, 0x9a, 0xc9, 0xec, 0xf5, 0x6e, 0x14, 0x9c, 0x55, 0x0f, 0xbd, - 0x0b, 0xe5, 0x76, 0x67, 0xd3, 0x73, 0xa3, 0x9d, 0xf5, 0x20, 0x66, 0x4f, 0xdd, 0x2a, 0xf1, 0x9a, - 0xf0, 0x4d, 0x54, 0xee, 0x96, 0xf5, 0x1c, 0x3c, 0x9c, 0x4b, 0x01, 0x7d, 0x00, 0x17, 0x52, 0x8b, - 0x41, 0x78, 0x4a, 0x4d, 0xe5, 0xc7, 0x82, 0x6c, 0x64, 0x55, 0xe0, 0x1e, 0xb3, 0x99, 0x20, 0x9c, - 0xdd, 0xc4, 0x47, 0x33, 0x80, 0x78, 0x9f, 0x56, 0xd6, 0xa4, 0x1b, 0xf4, 0x55, 0x98, 0xd0, 0x57, - 0x91, 0x38, 0x60, 0x9e, 0xee, 0x97, 0xca, 0x5e, 0xc8, 0x46, 0x6a, 0x45, 0xe9, 0x30, 0x6c, 0x50, - 0xb4, 0x09, 0x64, 0x7f, 0x1f, 0xba, 0x09, 0x63, 0x4d, 0xcf, 0x25, 0x7e, 0x5c, 0xab, 0xf7, 0xf2, - 0xa9, 0x5f, 0x16, 0x38, 0x62, 0xc0, 0x44, 0xf0, 0x3c, 0x5e, 0x86, 0x15, 0x05, 0xfb, 0x57, 0x0b, - 0x50, 0xe9, 0x13, 0x89, 0x31, 0xa5, 0x03, 0xb4, 0x06, 0xd2, 0x01, 0x2e, 0xca, 0x34, 0x72, 0xeb, - 0xa9, 0xfb, 0x67, 0x2a, 0x45, 0x5c, 0x72, 0x0b, 0x4d, 0xe3, 0x0f, 0x6c, 0x37, 0xa9, 0xab, 0x11, - 0x87, 0xfa, 0x5a, 0xf4, 0x1a, 0xcf, 0x07, 0xc3, 0x83, 0x4b, 0xf4, 0xb9, 0xaa, 0x60, 0xfb, 0x97, - 0x0b, 0x70, 0x41, 0x0d, 0xe1, 0x37, 0xef, 0xc0, 0xdd, 0xee, 0x1e, 0xb8, 0x53, 0x50, 0xa4, 0xdb, - 0xb7, 0x60, 0xa4, 0x71, 0x10, 0x35, 0x63, 0x6f, 0x00, 0x01, 0xe8, 0x29, 0x33, 0xb6, 0x8c, 0x3a, - 0xa6, 0x8d, 0xf8, 0x32, 0x7f, 0xc5, 0x82, 0xe9, 0x8d, 0xe5, 0x7a, 0x23, 0x68, 0xee, 0x92, 0x78, - 0x91, 0xab, 0x89, 0xb0, 0x90, 0x7f, 0xac, 0x07, 0x94, 0x6b, 0xb2, 0x24, 0xa6, 0xcb, 0x30, 0xb4, - 0x13, 0x44, 0x71, 0xfa, 0x95, 0xed, 0x7a, 0x10, 0xc5, 0x98, 0x41, 0xec, 0xdf, 0xb5, 0x60, 0x98, - 0x25, 0x3f, 0xed, 0x97, 0x24, 0x77, 0x90, 0xef, 0x42, 0xaf, 0xc2, 0x08, 0xd9, 0xda, 0x22, 0xcd, - 0x58, 0xcc, 0xaa, 0xf4, 0xae, 0x1b, 0x59, 0x61, 0xa5, 0xf4, 0xd0, 0x67, 0x8d, 0xf1, 0xbf, 0x58, - 0x20, 0xa3, 0xbb, 0x50, 0x8a, 0xdd, 0x3d, 0xb2, 0xd8, 0x6a, 0x89, 0x77, 0x8a, 0x07, 0x70, 0x66, - 0xdc, 0x90, 0x04, 0x70, 0x42, 0xcb, 0xfe, 0x5a, 0x01, 0x20, 0x71, 0xe8, 0xed, 0xf7, 0x89, 0x4b, - 0x5d, 0x79, 0x80, 0x9f, 0xce, 0xc8, 0x03, 0x8c, 0x12, 0x82, 0x19, 0x59, 0x80, 0xd5, 0x30, 0x15, - 0x07, 0x1a, 0xa6, 0xa1, 0x93, 0x0c, 0xd3, 0x32, 0xcc, 0x26, 0x0e, 0xc9, 0x66, 0x74, 0x06, 0x16, - 0x99, 0x7d, 0x23, 0x0d, 0xc4, 0xdd, 0xf8, 0xf6, 0xf7, 0x5b, 0x20, 0xdc, 0x0d, 0x06, 0x58, 0xcc, - 0xef, 0xc8, 0x94, 0x9d, 0x46, 0x40, 0xd7, 0xcb, 0xf9, 0xfe, 0x17, 0x22, 0x8c, 0xab, 0x3a, 0x3c, - 0x8c, 0xe0, 0xad, 0x06, 0x2d, 0xbb, 0x05, 0x02, 0x5a, 0x25, 0x4c, 0xc9, 0xd0, 0xbf, 0x37, 0x57, - 0x01, 0x5a, 0x0c, 0x57, 0x4b, 0x01, 0xa8, 0x58, 0x55, 0x55, 0x41, 0xb0, 0x86, 0x65, 0xff, 0xcd, - 0x02, 0x8c, 0xcb, 0x00, 0xa2, 0xf4, 0x1e, 0xdf, 0xbf, 0x95, 0x13, 0x65, 0x0f, 0x60, 0x39, 0x33, - 0x29, 0x61, 0x15, 0x64, 0x5e, 0xcf, 0x99, 0x29, 0x01, 0x38, 0xc1, 0x41, 0xcf, 0xc2, 0x68, 0xd4, - 0xd9, 0x64, 0xe8, 0x29, 0x23, 0xfa, 0x06, 0x2f, 0xc6, 0x12, 0x8e, 0xbe, 0x04, 0x33, 0xbc, 0x5e, - 0x18, 0xb4, 0x9d, 0x6d, 0xae, 0x41, 0x1a, 0x56, 0x5e, 0x6d, 0x33, 0x6b, 0x29, 0xd8, 0xf1, 0x61, - 0xe5, 0x7c, 0xba, 0x8c, 0xe9, 0x1e, 0xbb, 0xa8, 0xd0, 0x7d, 0x31, 0x93, 0x76, 0x98, 0x41, 0xd7, - 0x61, 0x84, 0xb3, 0x3c, 0xc1, 0x82, 0x7a, 0xbc, 0x28, 0x69, 0x6e, 0x36, 0x2c, 0x9c, 0xba, 0xe0, - 0x9a, 0xa2, 0x3e, 0x7a, 0x17, 0xc6, 0x5b, 0xc1, 0x3d, 0xff, 0x9e, 0x13, 0xb6, 0x16, 0xeb, 0x35, - 0xb1, 0x6a, 0x32, 0x25, 0xa7, 0x6a, 0x82, 0xa6, 0xbb, 0xee, 0x30, 0xed, 0x69, 0x02, 0xc2, 0x3a, - 0x39, 0xb4, 0xc1, 0x62, 0x3c, 0xf1, 0xa4, 0xf6, 0xbd, 0xac, 0xce, 0x54, 0x1e, 0x7c, 0x8d, 0xf2, - 0xa4, 0x08, 0x04, 0x25, 0x52, 0xe2, 0x27, 0x84, 0xec, 0x0f, 0xcf, 0x81, 0xb1, 0x5a, 0x8d, 0xec, - 0x01, 0xd6, 0x29, 0x65, 0x0f, 0xc0, 0x30, 0x46, 0xf6, 0xda, 0xf1, 0x41, 0xd5, 0x0d, 0x7b, 0xa5, - 0x9f, 0x59, 0x11, 0x38, 0xdd, 0x34, 0x25, 0x04, 0x2b, 0x3a, 0xd9, 0x29, 0x1e, 0x8a, 0x1f, 0x63, - 0x8a, 0x87, 0xa1, 0x33, 0x4c, 0xf1, 0xb0, 0x0e, 0xa3, 0xdb, 0x6e, 0x8c, 0x49, 0x3b, 0x10, 0xc7, - 0x7d, 0xe6, 0x4a, 0xb8, 0xc6, 0x51, 0xba, 0x03, 0x8c, 0x0b, 0x00, 0x96, 0x44, 0xd0, 0x5b, 0x6a, - 0x0f, 0x8c, 0xe4, 0x4b, 0xcb, 0xdd, 0x8f, 0x0f, 0x99, 0xbb, 0x40, 0xa4, 0x74, 0x18, 0x7d, 0xd0, - 0x94, 0x0e, 0xab, 0x32, 0x11, 0xc3, 0x58, 0xbe, 0x91, 0x26, 0xcb, 0xb3, 0xd0, 0x27, 0xfd, 0x82, - 0x91, 0xb2, 0xa2, 0x74, 0x7a, 0x29, 0x2b, 0xbe, 0xdf, 0x82, 0x0b, 0xed, 0xac, 0xec, 0x2d, 0x22, - 0x91, 0xc2, 0xab, 0x03, 0xa7, 0xa7, 0x31, 0x1a, 0x64, 0xd7, 0xa6, 0x4c, 0x34, 0x9c, 0xdd, 0x1c, - 0x1d, 0xe8, 0x70, 0xb3, 0x25, 0xb2, 0x2f, 0x3c, 0x95, 0x93, 0xfb, 0xa2, 0x47, 0xc6, 0x8b, 0x8d, - 0x8c, 0x8c, 0x0b, 0x9f, 0xce, 0xcb, 0xb8, 0x30, 0x70, 0x9e, 0x85, 0x24, 0xeb, 0xc5, 0xe4, 0x47, - 0xce, 0x7a, 0xf1, 0x96, 0xca, 0x7a, 0xd1, 0x23, 0x92, 0x0e, 0xcf, 0x69, 0xd1, 0x37, 0xd7, 0x85, - 0x96, 0xaf, 0x62, 0xfa, 0x74, 0xf2, 0x55, 0x18, 0xcc, 0x9e, 0xa7, 0x4c, 0x78, 0xbe, 0x0f, 0xb3, - 0x37, 0xe8, 0xf6, 0x66, 0xf7, 0x3c, 0x37, 0xc7, 0xec, 0x03, 0xe5, 0xe6, 0xb8, 0xa3, 0xe7, 0xba, - 0x40, 0x7d, 0x92, 0x39, 0x50, 0xa4, 0x01, 0x33, 0x5c, 0xdc, 0xd1, 0x8f, 0xa0, 0x73, 0xf9, 0x74, - 0xd5, 0x49, 0xd3, 0x4d, 0x37, 0xeb, 0x10, 0xea, 0xce, 0x9c, 0x71, 0xfe, 0x6c, 0x32, 0x67, 0x5c, - 0x38, 0xf5, 0xcc, 0x19, 0x8f, 0x9c, 0x41, 0xe6, 0x8c, 0x47, 0x3f, 0xd6, 0xcc, 0x19, 0xe5, 0x87, - 0x90, 0x39, 0x63, 0x3d, 0xc9, 0x9c, 0x71, 0x31, 0x7f, 0x4a, 0x32, 0xac, 0xd2, 0x72, 0xf2, 0x65, - 0xdc, 0x81, 0x52, 0x5b, 0xfa, 0x54, 0x8b, 0x50, 0x3f, 0xd9, 0x29, 0xfb, 0xb2, 0x1c, 0xaf, 0xf9, - 0x94, 0x28, 0x10, 0x4e, 0x48, 0x51, 0xba, 0x49, 0xfe, 0x8c, 0xc7, 0x7a, 0x28, 0xc6, 0xb2, 0x54, - 0x0e, 0xf9, 0x59, 0x33, 0xec, 0xbf, 0x5a, 0x80, 0x4b, 0xbd, 0xd7, 0x75, 0xa2, 0xaf, 0xa8, 0x27, - 0xfa, 0xf5, 0x94, 0xbe, 0x82, 0x5f, 0x02, 0x12, 0xac, 0x81, 0x03, 0x4f, 0x5c, 0x83, 0x59, 0x65, - 0x8e, 0xe6, 0xb9, 0xcd, 0x03, 0x2d, 0x95, 0x9f, 0x72, 0x8d, 0x69, 0xa4, 0x11, 0x70, 0x77, 0x1d, - 0xb4, 0x08, 0xd3, 0x46, 0x61, 0xad, 0x2a, 0x84, 0x7d, 0xa5, 0x20, 0x69, 0x98, 0x60, 0x9c, 0xc6, - 0xb7, 0xbf, 0x6e, 0xc1, 0xa3, 0x39, 0x21, 0xab, 0x07, 0x8e, 0xab, 0xb0, 0x05, 0xd3, 0x6d, 0xb3, - 0x6a, 0x9f, 0xf0, 0x2b, 0x46, 0x60, 0x6c, 0xd5, 0xd7, 0x14, 0x00, 0xa7, 0x89, 0x2e, 0x5d, 0xf9, - 0xf5, 0xdf, 0xbf, 0xf4, 0xa9, 0xdf, 0xfa, 0xfd, 0x4b, 0x9f, 0xfa, 0x9d, 0xdf, 0xbf, 0xf4, 0xa9, - 0xbf, 0x78, 0x74, 0xc9, 0xfa, 0xf5, 0xa3, 0x4b, 0xd6, 0x6f, 0x1d, 0x5d, 0xb2, 0x7e, 0xe7, 0xe8, - 0x92, 0xf5, 0x7b, 0x47, 0x97, 0xac, 0xaf, 0xfd, 0xc1, 0xa5, 0x4f, 0xbd, 0x53, 0xd8, 0x7f, 0xe9, - 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x18, 0x48, 0x7e, 0xc6, 0x9b, 0xdf, 0x00, 0x00, + // 12392 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x6b, 0x90, 0x24, 0x57, + 0x56, 0x18, 0xbc, 0x59, 0xd5, 0xaf, 0x3a, 0xfd, 0xbe, 0x33, 0x23, 0xd5, 0xb4, 0xa4, 0xa9, 0x51, + 0x6a, 0x77, 0x34, 0x7a, 0x75, 0xaf, 0x46, 0xd2, 0x6a, 0x58, 0xed, 0x0a, 0xba, 0xbb, 0xba, 0x67, + 0x4a, 0x33, 0xdd, 0x53, 0xba, 0xd5, 0x33, 0xb3, 0x2b, 0xc4, 0x7e, 0x9b, 0x5d, 0x75, 0xbb, 0x3b, + 0xd5, 0xd9, 0x99, 0xa5, 0xcc, 0xac, 0x9e, 0x69, 0x05, 0x44, 0x7c, 0x96, 0x01, 0x3f, 0xe0, 0xc7, + 0x86, 0x4d, 0xd8, 0x18, 0x08, 0x1c, 0x61, 0xe3, 0x80, 0x35, 0xb6, 0xc3, 0x18, 0x0c, 0x18, 0xb0, + 0x8d, 0xb1, 0xc3, 0x01, 0x7f, 0x30, 0x98, 0x1f, 0x4b, 0x04, 0xe1, 0x36, 0x0c, 0x84, 0x1d, 0xfc, + 0xb0, 0xc3, 0x36, 0xbf, 0x68, 0x63, 0xe3, 0xb8, 0xcf, 0xbc, 0x37, 0x2b, 0xb3, 0xaa, 0x7a, 0xd4, + 0xd3, 0x12, 0x1b, 0xfb, 0xaf, 0xea, 0x9e, 0x73, 0xcf, 0xbd, 0x79, 0x1f, 0xe7, 0x9e, 0x7b, 0xee, + 0x79, 0xc0, 0x1b, 0xbb, 0x57, 0xa3, 0x79, 0x37, 0x58, 0xd8, 0xed, 0x6c, 0x92, 0xd0, 0x27, 0x31, + 0x89, 0x16, 0xf6, 0x89, 0xdf, 0x0a, 0xc2, 0x05, 0x01, 0x70, 0xda, 0xee, 0x42, 0x33, 0x08, 0xc9, + 0xc2, 0xfe, 0xcb, 0x0b, 0xdb, 0xc4, 0x27, 0xa1, 0x13, 0x93, 0xd6, 0x7c, 0x3b, 0x0c, 0xe2, 0x00, + 0x21, 0x8e, 0x33, 0xef, 0xb4, 0xdd, 0x79, 0x8a, 0x33, 0xbf, 0xff, 0xf2, 0xdc, 0x4b, 0xdb, 0x6e, + 0xbc, 0xd3, 0xd9, 0x9c, 0x6f, 0x06, 0x7b, 0x0b, 0xdb, 0xc1, 0x76, 0xb0, 0xc0, 0x50, 0x37, 0x3b, + 0x5b, 0xec, 0x1f, 0xfb, 0xc3, 0x7e, 0x71, 0x12, 0x73, 0x6b, 0x49, 0x33, 0xe4, 0x7e, 0x4c, 0xfc, + 0xc8, 0x0d, 0xfc, 0xe8, 0x25, 0xa7, 0xed, 0x46, 0x24, 0xdc, 0x27, 0xe1, 0x42, 0x7b, 0x77, 0x9b, + 0xc2, 0x22, 0x13, 0x61, 0x61, 0xff, 0xe5, 0x4d, 0x12, 0x3b, 0x5d, 0x3d, 0x9a, 0x7b, 0x35, 0x21, + 0xb7, 0xe7, 0x34, 0x77, 0x5c, 0x9f, 0x84, 0x07, 0x92, 0xc6, 0x42, 0x48, 0xa2, 0xa0, 0x13, 0x36, + 0xc9, 0xb1, 0x6a, 0x45, 0x0b, 0x7b, 0x24, 0x76, 0x32, 0xbe, 0x7e, 0x6e, 0x21, 0xaf, 0x56, 0xd8, + 0xf1, 0x63, 0x77, 0xaf, 0xbb, 0x99, 0xcf, 0xf5, 0xab, 0x10, 0x35, 0x77, 0xc8, 0x9e, 0xd3, 0x55, + 0xef, 0x95, 0xbc, 0x7a, 0x9d, 0xd8, 0xf5, 0x16, 0x5c, 0x3f, 0x8e, 0xe2, 0x30, 0x5d, 0xc9, 0xfe, + 0x86, 0x05, 0x17, 0x17, 0xef, 0x36, 0x56, 0x3c, 0x27, 0x8a, 0xdd, 0xe6, 0x92, 0x17, 0x34, 0x77, + 0x1b, 0x71, 0x10, 0x92, 0x3b, 0x81, 0xd7, 0xd9, 0x23, 0x0d, 0x36, 0x10, 0xe8, 0x45, 0x18, 0xdb, + 0x67, 0xff, 0x6b, 0xd5, 0xb2, 0x75, 0xd1, 0xba, 0x5c, 0x5a, 0x9a, 0xf9, 0xf5, 0xc3, 0xca, 0xa7, + 0x1e, 0x1c, 0x56, 0xc6, 0xee, 0x88, 0x72, 0xac, 0x30, 0xd0, 0x25, 0x18, 0xd9, 0x8a, 0x36, 0x0e, + 0xda, 0xa4, 0x5c, 0x60, 0xb8, 0x53, 0x02, 0x77, 0x64, 0xb5, 0x41, 0x4b, 0xb1, 0x80, 0xa2, 0x05, + 0x28, 0xb5, 0x9d, 0x30, 0x76, 0x63, 0x37, 0xf0, 0xcb, 0xc5, 0x8b, 0xd6, 0xe5, 0xe1, 0xa5, 0x59, + 0x81, 0x5a, 0xaa, 0x4b, 0x00, 0x4e, 0x70, 0x68, 0x37, 0x42, 0xe2, 0xb4, 0x6e, 0xf9, 0xde, 0x41, + 0x79, 0xe8, 0xa2, 0x75, 0x79, 0x2c, 0xe9, 0x06, 0x16, 0xe5, 0x58, 0x61, 0xd8, 0x3f, 0x52, 0x80, + 0xb1, 0xc5, 0xad, 0x2d, 0xd7, 0x77, 0xe3, 0x03, 0x74, 0x07, 0x26, 0xfc, 0xa0, 0x45, 0xe4, 0x7f, + 0xf6, 0x15, 0xe3, 0x57, 0x2e, 0xce, 0x77, 0xaf, 0xcc, 0xf9, 0x75, 0x0d, 0x6f, 0x69, 0xe6, 0xc1, + 0x61, 0x65, 0x42, 0x2f, 0xc1, 0x06, 0x1d, 0x84, 0x61, 0xbc, 0x1d, 0xb4, 0x14, 0xd9, 0x02, 0x23, + 0x5b, 0xc9, 0x22, 0x5b, 0x4f, 0xd0, 0x96, 0xa6, 0x1f, 0x1c, 0x56, 0xc6, 0xb5, 0x02, 0xac, 0x13, + 0x41, 0x9b, 0x30, 0x4d, 0xff, 0xfa, 0xb1, 0xab, 0xe8, 0x16, 0x19, 0xdd, 0x67, 0xf2, 0xe8, 0x6a, + 0xa8, 0x4b, 0x67, 0x1e, 0x1c, 0x56, 0xa6, 0x53, 0x85, 0x38, 0x4d, 0xd0, 0xfe, 0x00, 0xa6, 0x16, + 0xe3, 0xd8, 0x69, 0xee, 0x90, 0x16, 0x9f, 0x41, 0xf4, 0x2a, 0x0c, 0xf9, 0xce, 0x1e, 0x11, 0xf3, + 0x7b, 0x51, 0x0c, 0xec, 0xd0, 0xba, 0xb3, 0x47, 0x8e, 0x0e, 0x2b, 0x33, 0xb7, 0x7d, 0xf7, 0xfd, + 0x8e, 0x58, 0x15, 0xb4, 0x0c, 0x33, 0x6c, 0x74, 0x05, 0xa0, 0x45, 0xf6, 0xdd, 0x26, 0xa9, 0x3b, + 0xf1, 0x8e, 0x98, 0x6f, 0x24, 0xea, 0x42, 0x55, 0x41, 0xb0, 0x86, 0x65, 0xdf, 0x87, 0xd2, 0xe2, + 0x7e, 0xe0, 0xb6, 0xea, 0x41, 0x2b, 0x42, 0xbb, 0x30, 0xdd, 0x0e, 0xc9, 0x16, 0x09, 0x55, 0x51, + 0xd9, 0xba, 0x58, 0xbc, 0x3c, 0x7e, 0xe5, 0x72, 0xe6, 0xc7, 0x9a, 0xa8, 0x2b, 0x7e, 0x1c, 0x1e, + 0x2c, 0x3d, 0x2e, 0xda, 0x9b, 0x4e, 0x41, 0x71, 0x9a, 0xb2, 0xfd, 0xef, 0x0a, 0x70, 0x6e, 0xf1, + 0x83, 0x4e, 0x48, 0xaa, 0x6e, 0xb4, 0x9b, 0x5e, 0xe1, 0x2d, 0x37, 0xda, 0x5d, 0x4f, 0x46, 0x40, + 0x2d, 0xad, 0xaa, 0x28, 0xc7, 0x0a, 0x03, 0xbd, 0x04, 0xa3, 0xf4, 0xf7, 0x6d, 0x5c, 0x13, 0x9f, + 0x7c, 0x46, 0x20, 0x8f, 0x57, 0x9d, 0xd8, 0xa9, 0x72, 0x10, 0x96, 0x38, 0x68, 0x0d, 0xc6, 0x9b, + 0x6c, 0x43, 0x6e, 0xaf, 0x05, 0x2d, 0xc2, 0x26, 0xb3, 0xb4, 0xf4, 0x02, 0x45, 0x5f, 0x4e, 0x8a, + 0x8f, 0x0e, 0x2b, 0x65, 0xde, 0x37, 0x41, 0x42, 0x83, 0x61, 0xbd, 0x3e, 0xb2, 0xd5, 0xfe, 0x1a, + 0x62, 0x94, 0x20, 0x63, 0x6f, 0x5d, 0xd6, 0xb6, 0xca, 0x30, 0xdb, 0x2a, 0x13, 0xd9, 0xdb, 0x04, + 0xbd, 0x0c, 0x43, 0xbb, 0xae, 0xdf, 0x2a, 0x8f, 0x30, 0x5a, 0x4f, 0xd1, 0x39, 0xbf, 0xe1, 0xfa, + 0xad, 0xa3, 0xc3, 0xca, 0xac, 0xd1, 0x1d, 0x5a, 0x88, 0x19, 0xaa, 0xfd, 0x27, 0x16, 0x54, 0x18, + 0x6c, 0xd5, 0xf5, 0x48, 0x9d, 0x84, 0x91, 0x1b, 0xc5, 0xc4, 0x8f, 0x8d, 0x01, 0xbd, 0x02, 0x10, + 0x91, 0x66, 0x48, 0x62, 0x6d, 0x48, 0xd5, 0xc2, 0x68, 0x28, 0x08, 0xd6, 0xb0, 0x28, 0x43, 0x88, + 0x76, 0x9c, 0x90, 0xad, 0x2f, 0x31, 0xb0, 0x8a, 0x21, 0x34, 0x24, 0x00, 0x27, 0x38, 0x06, 0x43, + 0x28, 0xf6, 0x63, 0x08, 0xe8, 0x8b, 0x30, 0x9d, 0x34, 0x16, 0xb5, 0x9d, 0xa6, 0x1c, 0x40, 0xb6, + 0x65, 0x1a, 0x26, 0x08, 0xa7, 0x71, 0xed, 0x7f, 0x68, 0x89, 0xc5, 0x43, 0xbf, 0xfa, 0x13, 0xfe, + 0xad, 0xf6, 0x2f, 0x5a, 0x30, 0xba, 0xe4, 0xfa, 0x2d, 0xd7, 0xdf, 0x46, 0x5f, 0x85, 0x31, 0x7a, + 0x36, 0xb5, 0x9c, 0xd8, 0x11, 0x7c, 0xef, 0xb3, 0xda, 0xde, 0x52, 0x47, 0xc5, 0x7c, 0x7b, 0x77, + 0x9b, 0x16, 0x44, 0xf3, 0x14, 0x9b, 0xee, 0xb6, 0x5b, 0x9b, 0xef, 0x91, 0x66, 0xbc, 0x46, 0x62, + 0x27, 0xf9, 0x9c, 0xa4, 0x0c, 0x2b, 0xaa, 0xe8, 0x06, 0x8c, 0xc4, 0x4e, 0xb8, 0x4d, 0x62, 0xc1, + 0x00, 0x33, 0x19, 0x15, 0xaf, 0x89, 0xe9, 0x8e, 0x24, 0x7e, 0x93, 0x24, 0xc7, 0xc2, 0x06, 0xab, + 0x8a, 0x05, 0x09, 0xfb, 0x77, 0x2c, 0x38, 0xbf, 0xdc, 0xa8, 0xe5, 0xac, 0xab, 0x4b, 0x30, 0xd2, + 0x0a, 0xdd, 0x7d, 0x12, 0x8a, 0x71, 0x56, 0x54, 0xaa, 0xac, 0x14, 0x0b, 0x28, 0xba, 0x0a, 0x13, + 0xfc, 0x40, 0xba, 0xee, 0xf8, 0x2d, 0x4f, 0x0e, 0xf1, 0x59, 0x81, 0x3d, 0x71, 0x47, 0x83, 0x61, + 0x03, 0xf3, 0x98, 0x8b, 0xea, 0x52, 0x6a, 0x33, 0xe6, 0x1c, 0x76, 0x76, 0x13, 0x26, 0x96, 0x9d, + 0xb6, 0xb3, 0xe9, 0x7a, 0x6e, 0xec, 0x92, 0x08, 0x3d, 0x0b, 0x45, 0xa7, 0xd5, 0x62, 0xbc, 0xae, + 0xb4, 0x74, 0xee, 0xc1, 0x61, 0xa5, 0xb8, 0xd8, 0xa2, 0x9b, 0x0e, 0x14, 0xd6, 0x01, 0xa6, 0x18, + 0xe8, 0x79, 0x18, 0x6a, 0x85, 0x41, 0xbb, 0x5c, 0x60, 0x98, 0x8f, 0xd1, 0xfd, 0x59, 0x0d, 0x83, + 0x76, 0x0a, 0x95, 0xe1, 0xd8, 0xbf, 0x5a, 0x80, 0x27, 0x97, 0x49, 0x7b, 0x67, 0xb5, 0x91, 0x33, + 0x7a, 0x97, 0x61, 0x6c, 0x2f, 0xf0, 0xdd, 0x38, 0x08, 0x23, 0xd1, 0x34, 0x63, 0x0b, 0x6b, 0xa2, + 0x0c, 0x2b, 0x28, 0xba, 0x08, 0x43, 0xed, 0x84, 0xa5, 0x4f, 0xc8, 0xe3, 0x80, 0x31, 0x73, 0x06, + 0xa1, 0x18, 0x9d, 0x88, 0x84, 0x82, 0x9d, 0x29, 0x8c, 0xdb, 0x11, 0x09, 0x31, 0x83, 0x24, 0xfb, + 0x82, 0xee, 0x18, 0x31, 0x3e, 0xa9, 0x7d, 0x41, 0x21, 0x58, 0xc3, 0x42, 0x75, 0x28, 0xf1, 0x7f, + 0x98, 0x6c, 0x31, 0xce, 0x95, 0xb3, 0x9a, 0x1a, 0x12, 0x49, 0xac, 0xa6, 0x49, 0xb6, 0x71, 0x64, + 0x21, 0x4e, 0x88, 0x18, 0xf3, 0x39, 0xd2, 0x77, 0xe3, 0xfc, 0x72, 0x01, 0x10, 0x1f, 0xc2, 0xbf, + 0x60, 0x03, 0x77, 0xbb, 0x7b, 0xe0, 0x32, 0x8f, 0xd0, 0x9b, 0x41, 0xd3, 0xf1, 0xd2, 0x7b, 0xf1, + 0xa4, 0x46, 0xef, 0x87, 0x2d, 0x40, 0xcb, 0xae, 0xdf, 0x22, 0xe1, 0x29, 0xc8, 0x8f, 0xc7, 0xe3, + 0x88, 0x37, 0x61, 0x6a, 0xd9, 0x73, 0x89, 0x1f, 0xd7, 0xea, 0xcb, 0x81, 0xbf, 0xe5, 0x6e, 0xa3, + 0xcf, 0xc3, 0x14, 0x15, 0xa7, 0x83, 0x4e, 0xdc, 0x20, 0xcd, 0xc0, 0x67, 0x92, 0x07, 0x15, 0x42, + 0xd1, 0x83, 0xc3, 0xca, 0xd4, 0x86, 0x01, 0xc1, 0x29, 0x4c, 0xfb, 0xf7, 0xe8, 0x87, 0x06, 0x7b, + 0xed, 0xc0, 0x27, 0x7e, 0xbc, 0x1c, 0xf8, 0x2d, 0x2e, 0xa1, 0x7e, 0x1e, 0x86, 0x62, 0xda, 0x71, + 0xfe, 0x91, 0x97, 0xe4, 0xd4, 0xd2, 0xee, 0x1e, 0x1d, 0x56, 0x1e, 0xeb, 0xae, 0xc1, 0x3e, 0x88, + 0xd5, 0x41, 0xdf, 0x06, 0x23, 0x51, 0xec, 0xc4, 0x9d, 0x48, 0x7c, 0xf6, 0xd3, 0xf2, 0xb3, 0x1b, + 0xac, 0xf4, 0xe8, 0xb0, 0x32, 0xad, 0xaa, 0xf1, 0x22, 0x2c, 0x2a, 0xa0, 0xe7, 0x60, 0x74, 0x8f, + 0x44, 0x91, 0xb3, 0x2d, 0x85, 0x8b, 0x69, 0x51, 0x77, 0x74, 0x8d, 0x17, 0x63, 0x09, 0x47, 0xcf, + 0xc0, 0x30, 0x09, 0xc3, 0x20, 0x14, 0xab, 0x6a, 0x52, 0x20, 0x0e, 0xaf, 0xd0, 0x42, 0xcc, 0x61, + 0xf6, 0x7f, 0xb0, 0x60, 0x5a, 0xf5, 0x95, 0xb7, 0x75, 0x0a, 0xa7, 0xc8, 0x3b, 0x00, 0x4d, 0xf9, + 0x81, 0x11, 0xe3, 0x77, 0xe3, 0x57, 0x2e, 0x65, 0x2d, 0xe1, 0xee, 0x61, 0x4c, 0x28, 0xab, 0xa2, + 0x08, 0x6b, 0xd4, 0xec, 0x7f, 0x69, 0xc1, 0x99, 0xd4, 0x17, 0xdd, 0x74, 0xa3, 0x18, 0xbd, 0xdb, + 0xf5, 0x55, 0xf3, 0x83, 0x7d, 0x15, 0xad, 0xcd, 0xbe, 0x49, 0xad, 0x39, 0x59, 0xa2, 0x7d, 0xd1, + 0x75, 0x18, 0x76, 0x63, 0xb2, 0x27, 0x3f, 0xe6, 0x99, 0x9e, 0x1f, 0xc3, 0x7b, 0x95, 0xcc, 0x48, + 0x8d, 0xd6, 0xc4, 0x9c, 0x80, 0xfd, 0x3f, 0x2d, 0x28, 0xf1, 0x65, 0xbb, 0xe6, 0xb4, 0x4f, 0x61, + 0x2e, 0x6a, 0x30, 0xc4, 0xa8, 0xf3, 0x8e, 0x3f, 0x9b, 0xdd, 0x71, 0xd1, 0x9d, 0x79, 0x2a, 0x22, + 0x72, 0x51, 0x5c, 0x31, 0x33, 0x5a, 0x84, 0x19, 0x89, 0xb9, 0xd7, 0xa1, 0xa4, 0x10, 0xd0, 0x0c, + 0x14, 0x77, 0x09, 0xbf, 0x7e, 0x95, 0x30, 0xfd, 0x89, 0xce, 0xc2, 0xf0, 0xbe, 0xe3, 0x75, 0xc4, + 0x66, 0xc7, 0xfc, 0xcf, 0xe7, 0x0b, 0x57, 0x2d, 0xfb, 0x97, 0xd8, 0x1e, 0x13, 0x8d, 0xac, 0xf8, + 0xfb, 0x82, 0x99, 0x7c, 0x00, 0x67, 0xbd, 0x0c, 0x1e, 0x26, 0x06, 0x62, 0x70, 0x9e, 0xf7, 0xa4, + 0xe8, 0xeb, 0xd9, 0x2c, 0x28, 0xce, 0x6c, 0x83, 0x1e, 0x03, 0x41, 0x9b, 0xae, 0x28, 0xc7, 0x63, + 0xfd, 0x15, 0x62, 0xf5, 0x2d, 0x51, 0x86, 0x15, 0x94, 0x32, 0x88, 0xb3, 0xaa, 0xf3, 0x37, 0xc8, + 0x41, 0x83, 0x78, 0xa4, 0x19, 0x07, 0xe1, 0xc7, 0xda, 0xfd, 0xa7, 0xf8, 0xe8, 0x73, 0xfe, 0x32, + 0x2e, 0x08, 0x14, 0x6f, 0x90, 0x03, 0x3e, 0x15, 0xfa, 0xd7, 0x15, 0x7b, 0x7e, 0xdd, 0xcf, 0x58, + 0x30, 0xa9, 0xbe, 0xee, 0x14, 0x36, 0xd2, 0x92, 0xb9, 0x91, 0x9e, 0xea, 0xb9, 0x1e, 0x73, 0xb6, + 0xd0, 0x9f, 0x33, 0x16, 0x20, 0x70, 0xea, 0x61, 0x40, 0x87, 0x86, 0xf2, 0xec, 0x8f, 0x73, 0x42, + 0x06, 0xf9, 0xae, 0x1b, 0xe4, 0x60, 0x23, 0xa0, 0xe2, 0x43, 0xf6, 0x77, 0x19, 0xb3, 0x36, 0xd4, + 0x73, 0xd6, 0x7e, 0xae, 0x00, 0xe7, 0xd4, 0x08, 0x18, 0x07, 0xf4, 0x5f, 0xf4, 0x31, 0x78, 0x19, + 0xc6, 0x5b, 0x64, 0xcb, 0xe9, 0x78, 0xb1, 0xba, 0x61, 0x0f, 0x73, 0x2d, 0x4b, 0x35, 0x29, 0xc6, + 0x3a, 0xce, 0x31, 0x86, 0xed, 0x27, 0xc6, 0x19, 0xef, 0x8d, 0x1d, 0xba, 0x82, 0xa9, 0xf4, 0xa6, + 0xe9, 0x49, 0x26, 0x74, 0x3d, 0x89, 0xd0, 0x89, 0x3c, 0x03, 0xc3, 0xee, 0x1e, 0x3d, 0x8b, 0x0b, + 0xe6, 0x11, 0x5b, 0xa3, 0x85, 0x98, 0xc3, 0xd0, 0x67, 0x60, 0xb4, 0x19, 0xec, 0xed, 0x39, 0x7e, + 0xab, 0x5c, 0x64, 0xf2, 0xe4, 0x38, 0x3d, 0xae, 0x97, 0x79, 0x11, 0x96, 0x30, 0xf4, 0x24, 0x0c, + 0x39, 0xe1, 0x76, 0x54, 0x1e, 0x62, 0x38, 0x63, 0xb4, 0xa5, 0xc5, 0x70, 0x3b, 0xc2, 0xac, 0x94, + 0xca, 0x89, 0xf7, 0x82, 0x70, 0xd7, 0xf5, 0xb7, 0xab, 0x6e, 0xc8, 0x84, 0x3e, 0x4d, 0x4e, 0xbc, + 0xab, 0x20, 0x58, 0xc3, 0x42, 0xab, 0x30, 0xdc, 0x0e, 0xc2, 0x38, 0x2a, 0x8f, 0xb0, 0xe1, 0x7e, + 0x3a, 0x67, 0x2b, 0xf1, 0xaf, 0xad, 0x07, 0x61, 0x9c, 0x7c, 0x00, 0xfd, 0x17, 0x61, 0x5e, 0x1d, + 0x7d, 0x1b, 0x14, 0x89, 0xbf, 0x5f, 0x1e, 0x65, 0x54, 0xe6, 0xb2, 0xa8, 0xac, 0xf8, 0xfb, 0x77, + 0x9c, 0x30, 0xe1, 0x33, 0x2b, 0xfe, 0x3e, 0xa6, 0x75, 0xd0, 0x97, 0xa1, 0x24, 0x75, 0xac, 0x51, + 0x79, 0x2c, 0x7f, 0x89, 0x61, 0x81, 0x84, 0xc9, 0xfb, 0x1d, 0x37, 0x24, 0x7b, 0xc4, 0x8f, 0xa3, + 0xe4, 0x96, 0x2c, 0xa1, 0x11, 0x4e, 0xa8, 0xa1, 0x2f, 0xcb, 0x6b, 0xdf, 0x5a, 0xd0, 0xf1, 0xe3, + 0xa8, 0x5c, 0x62, 0xdd, 0xcb, 0x54, 0xc8, 0xdd, 0x49, 0xf0, 0xd2, 0xf7, 0x42, 0x5e, 0x19, 0x1b, + 0xa4, 0x10, 0x86, 0x49, 0xcf, 0xdd, 0x27, 0x3e, 0x89, 0xa2, 0x7a, 0x18, 0x6c, 0x92, 0x32, 0xb0, + 0x9e, 0x9f, 0xcf, 0xd6, 0x53, 0x05, 0x9b, 0x64, 0x69, 0xf6, 0xc1, 0x61, 0x65, 0xf2, 0xa6, 0x5e, + 0x07, 0x9b, 0x24, 0xd0, 0x6d, 0x98, 0xa2, 0x02, 0xaa, 0x9b, 0x10, 0x1d, 0xef, 0x47, 0x94, 0x49, + 0xa7, 0xd8, 0xa8, 0x84, 0x53, 0x44, 0xd0, 0x5b, 0x50, 0xf2, 0xdc, 0x2d, 0xd2, 0x3c, 0x68, 0x7a, + 0xa4, 0x3c, 0xc1, 0x28, 0x66, 0x6e, 0xab, 0x9b, 0x12, 0x89, 0x5f, 0x00, 0xd4, 0x5f, 0x9c, 0x54, + 0x47, 0x77, 0xe0, 0xb1, 0x98, 0x84, 0x7b, 0xae, 0xef, 0xd0, 0xed, 0x20, 0xe4, 0x49, 0xa6, 0xed, + 0x9b, 0x64, 0xeb, 0xed, 0x82, 0x18, 0xba, 0xc7, 0x36, 0x32, 0xb1, 0x70, 0x4e, 0x6d, 0x74, 0x0b, + 0xa6, 0xd9, 0x4e, 0xa8, 0x77, 0x3c, 0xaf, 0x1e, 0x78, 0x6e, 0xf3, 0xa0, 0x3c, 0xc5, 0x08, 0x7e, + 0x46, 0xaa, 0xf3, 0x6a, 0x26, 0x98, 0xde, 0x78, 0x93, 0x7f, 0x38, 0x5d, 0x1b, 0x6d, 0x32, 0xf5, + 0x4e, 0x27, 0x74, 0xe3, 0x03, 0xba, 0x7e, 0xc9, 0xfd, 0xb8, 0x3c, 0xdd, 0xf3, 0xfe, 0xa8, 0xa3, + 0x2a, 0x1d, 0x90, 0x5e, 0x88, 0xd3, 0x04, 0xe9, 0xd6, 0x8e, 0xe2, 0x96, 0xeb, 0x97, 0x67, 0x18, + 0xc7, 0x50, 0x3b, 0xa3, 0x41, 0x0b, 0x31, 0x87, 0x31, 0xd5, 0x0e, 0xfd, 0x71, 0x8b, 0x72, 0xd0, + 0x59, 0x86, 0x98, 0xa8, 0x76, 0x24, 0x00, 0x27, 0x38, 0xf4, 0x58, 0x8e, 0xe3, 0x83, 0x32, 0x62, + 0xa8, 0x6a, 0xbb, 0x6c, 0x6c, 0x7c, 0x19, 0xd3, 0x72, 0x74, 0x13, 0x46, 0x89, 0xbf, 0xbf, 0x1a, + 0x06, 0x7b, 0xe5, 0x33, 0xf9, 0x7b, 0x76, 0x85, 0xa3, 0x70, 0x86, 0x9e, 0x5c, 0x00, 0x44, 0x31, + 0x96, 0x24, 0xd0, 0x7d, 0x28, 0x67, 0xcc, 0x08, 0x9f, 0x80, 0xb3, 0x6c, 0x02, 0xbe, 0x20, 0xea, + 0x96, 0x37, 0x72, 0xf0, 0x8e, 0x7a, 0xc0, 0x70, 0x2e, 0x75, 0xf4, 0x5d, 0x30, 0xc9, 0x37, 0x14, + 0xd7, 0x0b, 0x47, 0xe5, 0x73, 0xec, 0x6b, 0x2e, 0xe6, 0x6f, 0x4e, 0x8e, 0xb8, 0x74, 0x4e, 0x74, + 0x68, 0x52, 0x2f, 0x8d, 0xb0, 0x49, 0xcd, 0xde, 0x84, 0x29, 0xc5, 0xb7, 0xd8, 0xd2, 0x41, 0x15, + 0x18, 0xa6, 0x0c, 0x59, 0xde, 0xd8, 0x4b, 0x74, 0xa6, 0x98, 0x3e, 0x0f, 0xf3, 0x72, 0x36, 0x53, + 0xee, 0x07, 0x64, 0xe9, 0x20, 0x26, 0xfc, 0xd6, 0x55, 0xd4, 0x66, 0x4a, 0x02, 0x70, 0x82, 0x63, + 0xff, 0x5f, 0x2e, 0xf7, 0x24, 0xcc, 0x71, 0x80, 0xe3, 0xe0, 0x45, 0x18, 0xdb, 0x09, 0xa2, 0x98, + 0x62, 0xb3, 0x36, 0x86, 0x13, 0x49, 0xe7, 0xba, 0x28, 0xc7, 0x0a, 0x03, 0xbd, 0x01, 0x93, 0x4d, + 0xbd, 0x01, 0x71, 0x96, 0xa9, 0x21, 0x30, 0x5a, 0xc7, 0x26, 0x2e, 0xba, 0x0a, 0x63, 0xec, 0x55, + 0xa7, 0x19, 0x78, 0xe2, 0x7e, 0x27, 0x0f, 0xe4, 0xb1, 0xba, 0x28, 0x3f, 0xd2, 0x7e, 0x63, 0x85, + 0x4d, 0xef, 0xdc, 0xb4, 0x0b, 0xb5, 0xba, 0x38, 0x45, 0xd4, 0x9d, 0xfb, 0x3a, 0x2b, 0xc5, 0x02, + 0x6a, 0xff, 0x8d, 0x82, 0x36, 0xca, 0xf4, 0xc6, 0x42, 0x50, 0x1d, 0x46, 0xef, 0x39, 0x6e, 0xec, + 0xfa, 0xdb, 0x42, 0x5c, 0x78, 0xae, 0xe7, 0x91, 0xc2, 0x2a, 0xdd, 0xe5, 0x15, 0xf8, 0xa1, 0x27, + 0xfe, 0x60, 0x49, 0x86, 0x52, 0x0c, 0x3b, 0xbe, 0x4f, 0x29, 0x16, 0x06, 0xa5, 0x88, 0x79, 0x05, + 0x4e, 0x51, 0xfc, 0xc1, 0x92, 0x0c, 0x7a, 0x17, 0x40, 0x2e, 0x4b, 0xd2, 0x12, 0xaf, 0x29, 0x2f, + 0xf6, 0x27, 0xba, 0xa1, 0xea, 0x2c, 0x4d, 0xd1, 0x23, 0x35, 0xf9, 0x8f, 0x35, 0x7a, 0x76, 0xcc, + 0xc4, 0xaa, 0xee, 0xce, 0xa0, 0xef, 0xa4, 0x9c, 0xc0, 0x09, 0x63, 0xd2, 0x5a, 0x8c, 0xc5, 0xe0, + 0x3c, 0x3f, 0x98, 0x54, 0xbc, 0xe1, 0xee, 0x11, 0x9d, 0x6b, 0x08, 0x22, 0x38, 0xa1, 0x67, 0xff, + 0x42, 0x11, 0xca, 0x79, 0xdd, 0xa5, 0x8b, 0x8e, 0xdc, 0x77, 0xe3, 0x65, 0x2a, 0x0d, 0x59, 0xe6, + 0xa2, 0x5b, 0x11, 0xe5, 0x58, 0x61, 0xd0, 0xd9, 0x8f, 0xdc, 0x6d, 0x79, 0xa9, 0x19, 0x4e, 0x66, + 0xbf, 0xc1, 0x4a, 0xb1, 0x80, 0x52, 0xbc, 0x90, 0x38, 0x91, 0x78, 0xae, 0xd3, 0x56, 0x09, 0x66, + 0xa5, 0x58, 0x40, 0x75, 0x7d, 0xc4, 0x50, 0x1f, 0x7d, 0x84, 0x31, 0x44, 0xc3, 0x27, 0x3b, 0x44, + 0xe8, 0x2b, 0x00, 0x5b, 0xae, 0xef, 0x46, 0x3b, 0x8c, 0xfa, 0xc8, 0xb1, 0xa9, 0x2b, 0x59, 0x6a, + 0x55, 0x51, 0xc1, 0x1a, 0x45, 0xf4, 0x1a, 0x8c, 0xab, 0x0d, 0x58, 0xab, 0x96, 0x47, 0xcd, 0xb7, + 0xa0, 0x84, 0x1b, 0x55, 0xb1, 0x8e, 0x67, 0xbf, 0x97, 0x5e, 0x2f, 0x62, 0x07, 0x68, 0xe3, 0x6b, + 0x0d, 0x3a, 0xbe, 0x85, 0xde, 0xe3, 0x6b, 0xff, 0x5a, 0x11, 0xa6, 0x8d, 0xc6, 0x3a, 0xd1, 0x00, + 0x3c, 0xeb, 0x1a, 0x3d, 0xe7, 0x9c, 0x98, 0x88, 0xfd, 0x67, 0xf7, 0xdf, 0x2a, 0xfa, 0x59, 0x48, + 0x77, 0x00, 0xaf, 0x8f, 0xbe, 0x02, 0x25, 0xcf, 0x89, 0x98, 0x6e, 0x83, 0x88, 0x7d, 0x37, 0x08, + 0xb1, 0xe4, 0x1e, 0xe1, 0x44, 0xb1, 0x76, 0xd4, 0x70, 0xda, 0x09, 0x49, 0x7a, 0x20, 0x53, 0xd9, + 0x47, 0xbe, 0x07, 0xab, 0x4e, 0x50, 0x01, 0xe9, 0x00, 0x73, 0x18, 0xba, 0x0a, 0x13, 0x21, 0x61, + 0xab, 0x62, 0x99, 0x8a, 0x72, 0x6c, 0x99, 0x0d, 0x27, 0x32, 0x1f, 0xd6, 0x60, 0xd8, 0xc0, 0x4c, + 0x44, 0xf9, 0x91, 0x1e, 0xa2, 0xfc, 0x73, 0x30, 0xca, 0x7e, 0xa8, 0x15, 0xa0, 0x66, 0xa3, 0xc6, + 0x8b, 0xb1, 0x84, 0xa7, 0x17, 0xcc, 0xd8, 0x80, 0x0b, 0xe6, 0x79, 0x98, 0xaa, 0x3a, 0x64, 0x2f, + 0xf0, 0x57, 0xfc, 0x56, 0x3b, 0x70, 0xfd, 0x18, 0x95, 0x61, 0x88, 0x9d, 0x0e, 0x7c, 0x6f, 0x0f, + 0x51, 0x0a, 0x78, 0x88, 0x0a, 0xe6, 0xf6, 0x6f, 0x17, 0x60, 0xb2, 0x4a, 0x3c, 0x12, 0x13, 0x7e, + 0x95, 0x89, 0xd0, 0x2a, 0xa0, 0xed, 0xd0, 0x69, 0x92, 0x3a, 0x09, 0xdd, 0xa0, 0xa5, 0xeb, 0x3a, + 0x8b, 0xec, 0x3d, 0x01, 0x5d, 0xeb, 0x82, 0xe2, 0x8c, 0x1a, 0xe8, 0x1d, 0x98, 0x6c, 0x87, 0xc4, + 0x50, 0xd1, 0x59, 0x79, 0xd2, 0x48, 0x5d, 0x47, 0xe4, 0x82, 0xb0, 0x51, 0x84, 0x4d, 0x52, 0xe8, + 0x3b, 0x60, 0x26, 0x08, 0xdb, 0x3b, 0x8e, 0x5f, 0x25, 0x6d, 0xe2, 0xb7, 0xa8, 0xa4, 0x2f, 0x54, + 0x10, 0x67, 0x1f, 0x1c, 0x56, 0x66, 0x6e, 0xa5, 0x60, 0xb8, 0x0b, 0x1b, 0xbd, 0x03, 0xb3, 0xed, + 0x30, 0x68, 0x3b, 0xdb, 0x6c, 0xa1, 0x08, 0x81, 0x86, 0x73, 0x9f, 0x17, 0x1f, 0x1c, 0x56, 0x66, + 0xeb, 0x69, 0xe0, 0xd1, 0x61, 0xe5, 0x0c, 0x1b, 0x28, 0x5a, 0x92, 0x00, 0x71, 0x37, 0x19, 0x7b, + 0x1b, 0xce, 0x55, 0x83, 0x7b, 0xfe, 0x3d, 0x27, 0x6c, 0x2d, 0xd6, 0x6b, 0x9a, 0xee, 0x60, 0x5d, + 0xde, 0x5d, 0xf9, 0x9b, 0x75, 0xe6, 0x39, 0xa5, 0xd5, 0xe4, 0xf2, 0xcb, 0xaa, 0xeb, 0x91, 0x1c, + 0x1d, 0xc5, 0xdf, 0x2e, 0x18, 0x2d, 0x25, 0xf8, 0xea, 0x59, 0xc1, 0xca, 0x7d, 0x56, 0x78, 0x1b, + 0xc6, 0xb6, 0x5c, 0xe2, 0xb5, 0x30, 0xd9, 0x12, 0x33, 0xf3, 0x6c, 0xfe, 0x33, 0xdc, 0x2a, 0xc5, + 0x94, 0x3a, 0x29, 0x7e, 0xf3, 0x5d, 0x15, 0x95, 0xb1, 0x22, 0x83, 0x76, 0x61, 0x46, 0x5e, 0xad, + 0x24, 0x54, 0x6c, 0xe2, 0xe7, 0x7a, 0xdd, 0xd7, 0x4c, 0xe2, 0x6c, 0x02, 0x71, 0x8a, 0x0c, 0xee, + 0x22, 0x4c, 0xaf, 0xba, 0x7b, 0xf4, 0xb8, 0x1a, 0x62, 0x4b, 0x9a, 0x5d, 0x75, 0xd9, 0xad, 0x9d, + 0x95, 0xda, 0x3f, 0x66, 0xc1, 0xe3, 0x5d, 0x23, 0x23, 0xb4, 0x17, 0x27, 0x3c, 0x0b, 0x69, 0x6d, + 0x42, 0xa1, 0xbf, 0x36, 0xc1, 0xfe, 0x47, 0x16, 0x9c, 0x5d, 0xd9, 0x6b, 0xc7, 0x07, 0x55, 0xd7, + 0x7c, 0xfa, 0x78, 0x1d, 0x46, 0xf6, 0x48, 0xcb, 0xed, 0xec, 0x89, 0x99, 0xab, 0x48, 0x96, 0xbe, + 0xc6, 0x4a, 0x8f, 0x0e, 0x2b, 0x93, 0x8d, 0x38, 0x08, 0x9d, 0x6d, 0xc2, 0x0b, 0xb0, 0x40, 0x67, + 0x07, 0xa3, 0xfb, 0x01, 0xb9, 0xe9, 0xee, 0xb9, 0xf2, 0x59, 0xb5, 0xa7, 0x46, 0x6d, 0x5e, 0x0e, + 0xe8, 0xfc, 0xdb, 0x1d, 0xc7, 0x8f, 0xdd, 0xf8, 0x40, 0xbc, 0xea, 0x48, 0x22, 0x38, 0xa1, 0x67, + 0x7f, 0xc3, 0x82, 0x69, 0xc9, 0x4b, 0x16, 0x5b, 0xad, 0x90, 0x44, 0x11, 0x9a, 0x83, 0x82, 0xdb, + 0x16, 0xbd, 0x04, 0xd1, 0xcb, 0x42, 0xad, 0x8e, 0x0b, 0x6e, 0x1b, 0xd5, 0xa1, 0xc4, 0x5f, 0x67, + 0x93, 0xc5, 0x35, 0xd0, 0x1b, 0x2f, 0xeb, 0xc1, 0x86, 0xac, 0x89, 0x13, 0x22, 0x52, 0x2a, 0x66, + 0xe7, 0x50, 0xd1, 0x7c, 0x12, 0xba, 0x2e, 0xca, 0xb1, 0xc2, 0x40, 0x97, 0x61, 0xcc, 0x0f, 0x5a, + 0xfc, 0xb1, 0x9c, 0xef, 0x69, 0xb6, 0x64, 0xd7, 0x45, 0x19, 0x56, 0x50, 0xfb, 0x07, 0x2d, 0x98, + 0x90, 0x5f, 0x36, 0xa0, 0x80, 0x4e, 0xb7, 0x56, 0x22, 0x9c, 0x27, 0x5b, 0x8b, 0x0a, 0xd8, 0x0c, + 0x62, 0xc8, 0xd5, 0xc5, 0xe3, 0xc8, 0xd5, 0xf6, 0x8f, 0x16, 0x60, 0x4a, 0x76, 0xa7, 0xd1, 0xd9, + 0x8c, 0x48, 0x8c, 0x36, 0xa0, 0xe4, 0xf0, 0x21, 0x27, 0x72, 0xc5, 0x3e, 0x93, 0x7d, 0xa1, 0x33, + 0xe6, 0x27, 0x11, 0x75, 0x16, 0x65, 0x6d, 0x9c, 0x10, 0x42, 0x1e, 0xcc, 0xfa, 0x41, 0xcc, 0x8e, + 0x3d, 0x05, 0xef, 0xf5, 0xec, 0x90, 0xa6, 0x7e, 0x5e, 0x50, 0x9f, 0x5d, 0x4f, 0x53, 0xc1, 0xdd, + 0x84, 0xd1, 0x8a, 0x54, 0x22, 0x15, 0xf3, 0xaf, 0x70, 0xfa, 0x2c, 0x64, 0xeb, 0x90, 0xec, 0x5f, + 0xb1, 0xa0, 0x24, 0xd1, 0x4e, 0xe3, 0x85, 0x69, 0x0d, 0x46, 0x23, 0x36, 0x09, 0x72, 0x68, 0xec, + 0x5e, 0x1d, 0xe7, 0xf3, 0x95, 0x9c, 0xe6, 0xfc, 0x7f, 0x84, 0x25, 0x0d, 0xa6, 0x05, 0x57, 0xdd, + 0xff, 0x84, 0x68, 0xc1, 0x55, 0x7f, 0x72, 0x4e, 0x98, 0xff, 0xca, 0xfa, 0xac, 0xa9, 0x0a, 0xa8, + 0xd0, 0xd9, 0x0e, 0xc9, 0x96, 0x7b, 0x3f, 0x2d, 0x74, 0xd6, 0x59, 0x29, 0x16, 0x50, 0xf4, 0x2e, + 0x4c, 0x34, 0xa5, 0xf2, 0x38, 0x61, 0x03, 0x97, 0x7a, 0xaa, 0xe2, 0xd5, 0xab, 0x0d, 0x37, 0xa4, + 0x5b, 0xd6, 0xea, 0x63, 0x83, 0x9a, 0xf9, 0xee, 0x5f, 0xec, 0xf7, 0xee, 0x9f, 0xd0, 0xcd, 0x7d, + 0xb9, 0xb6, 0x7f, 0xdc, 0x82, 0x11, 0xae, 0x82, 0x1c, 0x4c, 0x67, 0xab, 0xbd, 0x42, 0x25, 0x63, + 0x77, 0x87, 0x16, 0x8a, 0x47, 0x29, 0xb4, 0x06, 0x25, 0xf6, 0x83, 0xa9, 0x62, 0x8a, 0xf9, 0x16, + 0x84, 0xbc, 0x55, 0xbd, 0x83, 0x77, 0x64, 0x35, 0x9c, 0x50, 0xb0, 0x7f, 0xa8, 0x48, 0x59, 0x55, + 0x82, 0x6a, 0x9c, 0xe0, 0xd6, 0xa3, 0x3b, 0xc1, 0x0b, 0x8f, 0xea, 0x04, 0xdf, 0x86, 0xe9, 0xa6, + 0xf6, 0xe4, 0x95, 0xcc, 0xe4, 0xe5, 0x9e, 0x8b, 0x44, 0x7b, 0x1d, 0xe3, 0x6a, 0xb8, 0x65, 0x93, + 0x08, 0x4e, 0x53, 0x45, 0xdf, 0x09, 0x13, 0x7c, 0x9e, 0x45, 0x2b, 0x43, 0xac, 0x95, 0xcf, 0xe4, + 0xaf, 0x17, 0xbd, 0x09, 0xb6, 0x12, 0x1b, 0x5a, 0x75, 0x6c, 0x10, 0xb3, 0x7f, 0x61, 0x0c, 0x86, + 0x57, 0xf6, 0x89, 0x1f, 0x9f, 0x02, 0x43, 0x6a, 0xc2, 0x94, 0xeb, 0xef, 0x07, 0xde, 0x3e, 0x69, + 0x71, 0xf8, 0x71, 0x0e, 0xd7, 0xc7, 0x04, 0xe9, 0xa9, 0x9a, 0x41, 0x02, 0xa7, 0x48, 0x3e, 0x8a, + 0x5b, 0xfb, 0x35, 0x18, 0xe1, 0x73, 0x2f, 0xae, 0xec, 0x99, 0x0a, 0x76, 0x36, 0x88, 0x62, 0x17, + 0x24, 0x1a, 0x05, 0xae, 0xd1, 0x17, 0xd5, 0xd1, 0x7b, 0x30, 0xb5, 0xe5, 0x86, 0x51, 0x4c, 0xaf, + 0xdb, 0x51, 0xec, 0xec, 0xb5, 0x1f, 0xe2, 0x96, 0xae, 0xc6, 0x61, 0xd5, 0xa0, 0x84, 0x53, 0x94, + 0xd1, 0x36, 0x4c, 0xd2, 0x8b, 0x63, 0xd2, 0xd4, 0xe8, 0xb1, 0x9b, 0x52, 0x6a, 0xb8, 0x9b, 0x3a, + 0x21, 0x6c, 0xd2, 0xa5, 0xcc, 0xa4, 0xc9, 0x2e, 0x9a, 0x63, 0x4c, 0xa2, 0x50, 0xcc, 0x84, 0xdf, + 0x30, 0x39, 0x8c, 0xf2, 0x24, 0x66, 0x2a, 0x52, 0x32, 0x79, 0x92, 0x66, 0x10, 0xf2, 0x55, 0x28, + 0x11, 0x3a, 0x84, 0x94, 0xb0, 0x78, 0x6c, 0x58, 0x18, 0xac, 0xaf, 0x6b, 0x6e, 0x33, 0x0c, 0x4c, + 0xfd, 0xc8, 0x8a, 0xa4, 0x84, 0x13, 0xa2, 0x68, 0x19, 0x46, 0x22, 0x12, 0xba, 0x24, 0x12, 0xcf, + 0x0e, 0x3d, 0xa6, 0x91, 0xa1, 0x71, 0x53, 0x53, 0xfe, 0x1b, 0x8b, 0xaa, 0x74, 0x79, 0x39, 0xec, + 0x36, 0xc4, 0x5e, 0x1a, 0xb4, 0xe5, 0xb5, 0xc8, 0x4a, 0xb1, 0x80, 0xa2, 0xb7, 0x60, 0x34, 0x24, + 0x1e, 0x53, 0xc0, 0x4d, 0x0e, 0xbe, 0xc8, 0xb9, 0x3e, 0x8f, 0xd7, 0xc3, 0x92, 0x00, 0xba, 0x01, + 0x28, 0x24, 0x54, 0x86, 0x70, 0xfd, 0x6d, 0x65, 0x40, 0x21, 0xde, 0x0f, 0x9e, 0x10, 0xed, 0x9f, + 0xc1, 0x09, 0x86, 0x1f, 0x87, 0x81, 0xe7, 0x91, 0x10, 0x67, 0x54, 0x43, 0xd7, 0x60, 0x56, 0x95, + 0xd6, 0xfc, 0x28, 0x76, 0xfc, 0x26, 0x61, 0x4f, 0x07, 0xa5, 0x44, 0x2a, 0xc2, 0x69, 0x04, 0xdc, + 0x5d, 0xc7, 0xfe, 0x3a, 0x15, 0x67, 0xe8, 0x68, 0x9d, 0x82, 0x2c, 0xf0, 0xa6, 0x29, 0x0b, 0x9c, + 0xcf, 0x9d, 0xb9, 0x1c, 0x39, 0xe0, 0x81, 0x05, 0xe3, 0xda, 0xcc, 0x26, 0x6b, 0xd6, 0xea, 0xb1, + 0x66, 0x3b, 0x30, 0x43, 0x57, 0xfa, 0xad, 0x4d, 0xe6, 0x75, 0xd1, 0x62, 0x0b, 0xb3, 0xf0, 0x70, + 0x0b, 0xb3, 0x2c, 0x1a, 0x98, 0xb9, 0x99, 0x22, 0x88, 0xbb, 0x9a, 0x40, 0xaf, 0x4b, 0x6d, 0x54, + 0xd1, 0x30, 0x8c, 0xe2, 0x9a, 0xa6, 0xa3, 0xc3, 0xca, 0x8c, 0xf6, 0x21, 0xba, 0xf6, 0xc9, 0xfe, + 0xaa, 0xfc, 0x46, 0xce, 0x6c, 0x16, 0xa0, 0xd4, 0x54, 0x8b, 0xc5, 0x32, 0x6d, 0x6e, 0xd5, 0x72, + 0xc0, 0x09, 0x0e, 0xdd, 0xa3, 0xf4, 0x0a, 0x92, 0xb6, 0xe5, 0xa3, 0x17, 0x14, 0xcc, 0x20, 0xf6, + 0x2b, 0x00, 0x2b, 0xf7, 0x49, 0x93, 0x2f, 0x75, 0xfd, 0x51, 0xd7, 0xca, 0x7f, 0xd4, 0xb5, 0xff, + 0xa3, 0x05, 0x53, 0xab, 0xcb, 0xc6, 0x35, 0x71, 0x1e, 0x80, 0xdf, 0x8d, 0xee, 0xde, 0x5d, 0x97, + 0xef, 0x15, 0x5c, 0xe5, 0xac, 0x4a, 0xb1, 0x86, 0x81, 0xce, 0x43, 0xd1, 0xeb, 0xf8, 0xe2, 0xca, + 0x32, 0xfa, 0xe0, 0xb0, 0x52, 0xbc, 0xd9, 0xf1, 0x31, 0x2d, 0xd3, 0xcc, 0xe7, 0x8a, 0x03, 0x9b, + 0xcf, 0xf5, 0xf5, 0xa6, 0x40, 0x15, 0x18, 0xbe, 0x77, 0xcf, 0x6d, 0x45, 0xe5, 0xe1, 0xe4, 0x2d, + 0xe5, 0xee, 0xdd, 0x5a, 0x35, 0xc2, 0xbc, 0xdc, 0xfe, 0x5a, 0x11, 0xe6, 0x56, 0x3d, 0x72, 0xff, + 0x23, 0xda, 0xed, 0x0e, 0x6a, 0xfc, 0x77, 0x3c, 0x79, 0xf1, 0xb8, 0x96, 0x8e, 0xfd, 0xc7, 0x63, + 0x0b, 0x46, 0xb9, 0x81, 0x00, 0x1f, 0x91, 0xf1, 0x2b, 0x6f, 0x64, 0xb5, 0x9e, 0x3f, 0x20, 0xf3, + 0x42, 0x3b, 0xc7, 0xed, 0xa6, 0xd4, 0x49, 0x2b, 0x4a, 0xb1, 0x24, 0x3e, 0xf7, 0x79, 0x98, 0xd0, + 0x31, 0x8f, 0x65, 0x40, 0xf5, 0x97, 0x8a, 0x30, 0x43, 0x7b, 0xf0, 0x48, 0x27, 0xe2, 0x76, 0xf7, + 0x44, 0x9c, 0xb4, 0xdd, 0x69, 0xff, 0xd9, 0x78, 0x37, 0x3d, 0x1b, 0x2f, 0xe7, 0xcd, 0xc6, 0x69, + 0xcf, 0xc1, 0x5f, 0xb6, 0xe0, 0xcc, 0xaa, 0x17, 0x34, 0x77, 0x53, 0x26, 0xb1, 0xaf, 0xc1, 0x38, + 0xe5, 0xe3, 0x91, 0xe1, 0x34, 0x60, 0xb8, 0x91, 0x08, 0x10, 0xd6, 0xf1, 0xb4, 0x6a, 0xb7, 0x6f, + 0xd7, 0xaa, 0x59, 0xde, 0x27, 0x02, 0x84, 0x75, 0x3c, 0xfb, 0x37, 0x2d, 0x78, 0xea, 0xda, 0xf2, + 0x4a, 0xb2, 0x14, 0xbb, 0x1c, 0x60, 0xe8, 0x2d, 0xb0, 0xa5, 0x75, 0x25, 0xb9, 0x05, 0x56, 0x59, + 0x2f, 0x04, 0xf4, 0x93, 0xe2, 0xdc, 0xf5, 0x53, 0x16, 0x9c, 0xb9, 0xe6, 0xc6, 0xf4, 0x58, 0x4e, + 0xbb, 0x62, 0xd0, 0x73, 0x39, 0x72, 0xe3, 0x20, 0x3c, 0x48, 0xbb, 0x62, 0x60, 0x05, 0xc1, 0x1a, + 0x16, 0x6f, 0x79, 0xdf, 0x8d, 0x68, 0x4f, 0x0b, 0xa6, 0x2a, 0x0a, 0x8b, 0x72, 0xac, 0x30, 0xe8, + 0x87, 0xb5, 0xdc, 0x90, 0x5d, 0x25, 0x0e, 0x04, 0x87, 0x55, 0x1f, 0x56, 0x95, 0x00, 0x9c, 0xe0, + 0xd8, 0x3f, 0x66, 0xc1, 0xb9, 0x6b, 0x5e, 0x27, 0x8a, 0x49, 0xb8, 0x15, 0x19, 0x9d, 0x7d, 0x05, + 0x4a, 0x44, 0x5e, 0xd7, 0x45, 0x5f, 0x95, 0x80, 0xa9, 0xee, 0xf1, 0xdc, 0x0f, 0x44, 0xe1, 0x0d, + 0x60, 0x5f, 0x7e, 0x3c, 0xbb, 0xe8, 0x9f, 0x2d, 0xc0, 0xe4, 0xf5, 0x8d, 0x8d, 0xfa, 0x35, 0x12, + 0x8b, 0x53, 0xac, 0xbf, 0xaa, 0x19, 0x6b, 0x1a, 0xb3, 0x5e, 0x97, 0xa2, 0x4e, 0xec, 0x7a, 0xf3, + 0xdc, 0xf1, 0x70, 0xbe, 0xe6, 0xc7, 0xb7, 0xc2, 0x46, 0x1c, 0xba, 0xfe, 0x76, 0xa6, 0x8e, 0x4d, + 0x9e, 0xb5, 0xc5, 0xbc, 0xb3, 0x16, 0xbd, 0x02, 0x23, 0xcc, 0xf3, 0x51, 0x5e, 0x4f, 0x9e, 0x50, + 0x77, 0x0a, 0x56, 0x7a, 0x74, 0x58, 0x29, 0xdd, 0xc6, 0x35, 0xfe, 0x07, 0x0b, 0x54, 0x74, 0x1b, + 0xc6, 0x77, 0xe2, 0xb8, 0x7d, 0x9d, 0x38, 0x2d, 0x12, 0x4a, 0xee, 0x70, 0x21, 0x8b, 0x3b, 0xd0, + 0x41, 0xe0, 0x68, 0xc9, 0x86, 0x4a, 0xca, 0x22, 0xac, 0xd3, 0xb1, 0x1b, 0x00, 0x09, 0xec, 0x84, + 0xf4, 0x0b, 0xf6, 0x1f, 0x5a, 0x30, 0xca, 0x9d, 0x50, 0x42, 0xf4, 0x05, 0x18, 0x22, 0xf7, 0x49, + 0x53, 0x48, 0x8e, 0x99, 0x1d, 0x4e, 0x04, 0x0f, 0xae, 0x2d, 0xa7, 0xff, 0x31, 0xab, 0x85, 0xae, + 0xc3, 0x28, 0xed, 0xed, 0x35, 0xe5, 0x91, 0xf3, 0x74, 0xde, 0x17, 0xab, 0x69, 0xe7, 0xb2, 0x8a, + 0x28, 0xc2, 0xb2, 0x3a, 0xd3, 0xfc, 0x36, 0xdb, 0x0d, 0xca, 0xc0, 0xe2, 0x5e, 0xe7, 0xec, 0xc6, + 0x72, 0x9d, 0x23, 0x09, 0x6a, 0x5c, 0xf3, 0x2b, 0x0b, 0x71, 0x42, 0xc4, 0xde, 0x80, 0x12, 0x9d, + 0xd4, 0x45, 0xcf, 0x75, 0x7a, 0x2b, 0x9d, 0x5f, 0x80, 0x92, 0x54, 0x00, 0x47, 0xc2, 0xfd, 0x85, + 0x51, 0x95, 0xfa, 0xe1, 0x08, 0x27, 0x70, 0x7b, 0x0b, 0xce, 0x32, 0x6b, 0x0a, 0x27, 0xde, 0x31, + 0xf6, 0x58, 0xff, 0xc5, 0xfc, 0xa2, 0xb8, 0x88, 0xf1, 0x99, 0x29, 0x6b, 0xf6, 0xfa, 0x13, 0x92, + 0x62, 0x72, 0x29, 0xb3, 0xff, 0x78, 0x08, 0x9e, 0xa8, 0x35, 0xf2, 0xfd, 0x93, 0xae, 0xc2, 0x04, + 0x17, 0xd3, 0xe8, 0xd2, 0x76, 0x3c, 0xd1, 0xae, 0x7a, 0x6b, 0xdc, 0xd0, 0x60, 0xd8, 0xc0, 0x44, + 0x4f, 0x41, 0xd1, 0x7d, 0xdf, 0x4f, 0x1b, 0xe7, 0xd6, 0xde, 0x5e, 0xc7, 0xb4, 0x9c, 0x82, 0xa9, + 0xc4, 0xc7, 0x59, 0xa9, 0x02, 0x2b, 0xa9, 0xef, 0x4d, 0x98, 0x72, 0xa3, 0x66, 0xe4, 0xd6, 0x7c, + 0xca, 0x67, 0x12, 0xdf, 0xb6, 0x44, 0x49, 0x40, 0x3b, 0xad, 0xa0, 0x38, 0x85, 0xad, 0xf1, 0xf5, + 0xe1, 0x81, 0xa5, 0xc6, 0xbe, 0xfe, 0x20, 0x54, 0x20, 0x6e, 0xb3, 0xaf, 0x8b, 0x98, 0xa1, 0xa0, + 0x10, 0x88, 0xf9, 0x07, 0x47, 0x58, 0xc2, 0xe8, 0x0d, 0xac, 0xb9, 0xe3, 0xb4, 0x17, 0x3b, 0xf1, + 0x4e, 0xd5, 0x8d, 0x9a, 0xc1, 0x3e, 0x09, 0x0f, 0xd8, 0xe5, 0x79, 0x2c, 0xb9, 0x81, 0x29, 0xc0, + 0xf2, 0xf5, 0xc5, 0x3a, 0xc5, 0xc4, 0xdd, 0x75, 0x4c, 0xa9, 0x10, 0x4e, 0x42, 0x2a, 0x5c, 0x84, + 0x69, 0xd9, 0x4c, 0x83, 0x44, 0xec, 0x8c, 0x18, 0x67, 0x1d, 0x53, 0x5e, 0xa7, 0xa2, 0x58, 0x75, + 0x2b, 0x8d, 0x8f, 0x5e, 0x87, 0x49, 0xd7, 0x77, 0x63, 0xd7, 0x89, 0x83, 0x90, 0x9d, 0xb0, 0xfc, + 0x9e, 0xcc, 0x1e, 0x45, 0x6b, 0x3a, 0x00, 0x9b, 0x78, 0xf6, 0x1f, 0x0d, 0xc1, 0x2c, 0x9b, 0xb6, + 0x6f, 0xad, 0xb0, 0x4f, 0xcc, 0x0a, 0xbb, 0xdd, 0xbd, 0xc2, 0x4e, 0x42, 0xdc, 0xfd, 0x38, 0x97, + 0xd9, 0x7b, 0x50, 0x52, 0xf6, 0xd5, 0xd2, 0x45, 0xc0, 0xca, 0x71, 0x11, 0xe8, 0x2f, 0x7d, 0xc8, + 0x67, 0xdc, 0x62, 0xe6, 0x33, 0xee, 0xdf, 0xb1, 0x20, 0x31, 0x33, 0x45, 0xd7, 0xa1, 0xd4, 0x0e, + 0x98, 0x29, 0x47, 0x28, 0xed, 0xa3, 0x9e, 0xc8, 0x3c, 0xa8, 0xf8, 0xa1, 0xc8, 0xc7, 0xaf, 0x2e, + 0x6b, 0xe0, 0xa4, 0x32, 0x5a, 0x82, 0xd1, 0x76, 0x48, 0x1a, 0x31, 0x73, 0x94, 0xec, 0x4b, 0x87, + 0xaf, 0x11, 0x8e, 0x8f, 0x65, 0x45, 0xfb, 0xe7, 0x2c, 0x00, 0xfe, 0x52, 0xea, 0xf8, 0xdb, 0xe4, + 0x14, 0xb4, 0xbf, 0x55, 0x18, 0x8a, 0xda, 0xa4, 0xd9, 0xcb, 0xc8, 0x26, 0xe9, 0x4f, 0xa3, 0x4d, + 0x9a, 0xc9, 0x80, 0xd3, 0x7f, 0x98, 0xd5, 0xb6, 0xbf, 0x0f, 0x60, 0x2a, 0x41, 0xab, 0xc5, 0x64, + 0x0f, 0xbd, 0x64, 0xb8, 0xa1, 0x9d, 0x4f, 0xb9, 0xa1, 0x95, 0x18, 0xb6, 0xa6, 0x68, 0x7c, 0x0f, + 0x8a, 0x7b, 0xce, 0x7d, 0xa1, 0x49, 0x7a, 0xa1, 0x77, 0x37, 0x28, 0xfd, 0xf9, 0x35, 0xe7, 0x3e, + 0xbf, 0x33, 0xbd, 0x20, 0x17, 0xc8, 0x9a, 0x73, 0xff, 0x88, 0x9b, 0xd2, 0x30, 0x26, 0x75, 0xd3, + 0x8d, 0xe2, 0x0f, 0xff, 0x73, 0xf2, 0x9f, 0x2d, 0x3b, 0xda, 0x08, 0x6b, 0xcb, 0xf5, 0xc5, 0xbb, + 0xe1, 0x40, 0x6d, 0xb9, 0x7e, 0xba, 0x2d, 0xd7, 0x1f, 0xa0, 0x2d, 0xd7, 0x47, 0x1f, 0xc0, 0xa8, + 0x78, 0xa3, 0x67, 0xf6, 0xf3, 0xa6, 0x96, 0x2a, 0xaf, 0x3d, 0xf1, 0xc4, 0xcf, 0xdb, 0x5c, 0x90, + 0x77, 0x42, 0x51, 0xda, 0xb7, 0x5d, 0xd9, 0x20, 0xfa, 0x5b, 0x16, 0x4c, 0x89, 0xdf, 0x98, 0xbc, + 0xdf, 0x21, 0x51, 0x2c, 0x64, 0xcf, 0xcf, 0x0d, 0xde, 0x07, 0x51, 0x91, 0x77, 0xe5, 0x73, 0x92, + 0xcd, 0x9a, 0xc0, 0xbe, 0x3d, 0x4a, 0xf5, 0x02, 0xfd, 0x13, 0x0b, 0xce, 0xee, 0x39, 0xf7, 0x79, + 0x8b, 0xbc, 0x0c, 0x3b, 0xb1, 0x1b, 0x08, 0x7f, 0x80, 0x2f, 0x0c, 0x36, 0xfd, 0x5d, 0xd5, 0x79, + 0x27, 0xa5, 0xe9, 0xf0, 0xd9, 0x2c, 0x94, 0xbe, 0x5d, 0xcd, 0xec, 0xd7, 0xdc, 0x16, 0x8c, 0xc9, + 0xf5, 0x96, 0x71, 0xf3, 0xae, 0xea, 0x82, 0xf5, 0xb1, 0x4d, 0x24, 0xb4, 0x9b, 0x3a, 0x6b, 0x47, + 0xac, 0xb5, 0x47, 0xda, 0xce, 0x7b, 0x30, 0xa1, 0xaf, 0xb1, 0x47, 0xda, 0xd6, 0xfb, 0x70, 0x26, + 0x63, 0x2d, 0x3d, 0xd2, 0x26, 0xef, 0xc1, 0xf9, 0xdc, 0xf5, 0xf1, 0x28, 0x1b, 0xb6, 0x7f, 0xd6, + 0xd2, 0xf9, 0xe0, 0x29, 0xa8, 0xe0, 0x97, 0x4d, 0x15, 0xfc, 0x85, 0xde, 0x3b, 0x27, 0x47, 0x0f, + 0xff, 0xae, 0xde, 0x69, 0xca, 0xd5, 0xd1, 0x5b, 0x30, 0xe2, 0xd1, 0x12, 0x69, 0x1c, 0x62, 0xf7, + 0xdf, 0x91, 0x89, 0x2c, 0xc5, 0xca, 0x23, 0x2c, 0x28, 0xd8, 0xbf, 0x68, 0xc1, 0xd0, 0x29, 0x8c, + 0x04, 0x36, 0x47, 0xe2, 0xa5, 0x5c, 0xd2, 0x22, 0x82, 0xd1, 0x3c, 0x76, 0xee, 0xad, 0xc8, 0x28, + 0x4d, 0x39, 0x03, 0xf3, 0x7f, 0x0a, 0x30, 0x4e, 0x9b, 0x92, 0x56, 0x8c, 0x6f, 0xc0, 0xa4, 0xe7, + 0x6c, 0x12, 0x4f, 0xbe, 0xe3, 0xa6, 0x15, 0x26, 0x37, 0x75, 0x20, 0x36, 0x71, 0x69, 0xe5, 0x2d, + 0xfd, 0x49, 0x5b, 0xc8, 0x2f, 0xaa, 0xb2, 0xf1, 0xde, 0x8d, 0x4d, 0x5c, 0x7a, 0x77, 0xbf, 0xe7, + 0xc4, 0xcd, 0x1d, 0xa1, 0x4c, 0x51, 0xdd, 0xbd, 0x4b, 0x0b, 0x31, 0x87, 0x51, 0x01, 0x4e, 0xae, + 0xce, 0x3b, 0xf4, 0x66, 0x18, 0xf8, 0x42, 0x3c, 0x56, 0x02, 0x1c, 0x36, 0xc1, 0x38, 0x8d, 0x9f, + 0xe1, 0x8f, 0x3e, 0xcc, 0x6c, 0x34, 0x07, 0xf0, 0x47, 0x47, 0x75, 0x38, 0xeb, 0xfa, 0x4d, 0xaf, + 0xd3, 0x22, 0xb7, 0x7d, 0x2e, 0xdd, 0x79, 0xee, 0x07, 0xa4, 0x25, 0x04, 0x68, 0x65, 0x4e, 0x5b, + 0xcb, 0xc0, 0xc1, 0x99, 0x35, 0xed, 0xff, 0x0f, 0xce, 0xdc, 0x0c, 0x9c, 0xd6, 0x92, 0xe3, 0x39, + 0x7e, 0x93, 0x84, 0x35, 0x7f, 0xbb, 0xaf, 0x95, 0x98, 0x6e, 0xd3, 0x55, 0xe8, 0x67, 0xd3, 0x65, + 0xef, 0x00, 0xd2, 0x1b, 0x10, 0xb6, 0xc9, 0x18, 0x46, 0x5d, 0xde, 0x94, 0x58, 0xfe, 0xcf, 0x66, + 0x4b, 0xd7, 0x5d, 0x3d, 0xd3, 0xac, 0x6e, 0x79, 0x01, 0x96, 0x84, 0xec, 0xab, 0x90, 0xe9, 0x8f, + 0xd8, 0x5f, 0x6d, 0x63, 0xbf, 0x06, 0xb3, 0xac, 0xe6, 0xf1, 0x54, 0x0a, 0xf6, 0x5f, 0xb3, 0x60, + 0x7a, 0x3d, 0x15, 0x41, 0xe2, 0x12, 0x7b, 0x6b, 0xcd, 0xd0, 0xbb, 0x37, 0x58, 0x29, 0x16, 0xd0, + 0x13, 0xd7, 0xef, 0xfd, 0xb9, 0x05, 0x25, 0x15, 0xc4, 0xe6, 0x14, 0x84, 0xda, 0x65, 0x43, 0xa8, + 0xcd, 0xd4, 0x3b, 0xa9, 0xee, 0xe4, 0xc9, 0xb4, 0xe8, 0x86, 0x8a, 0x85, 0xd0, 0x43, 0xe5, 0x94, + 0x90, 0xe1, 0x9e, 0xf3, 0x53, 0x66, 0xc0, 0x04, 0x19, 0x1d, 0x81, 0x99, 0x69, 0x29, 0xdc, 0x4f, + 0x88, 0x99, 0x96, 0xea, 0x4f, 0x0e, 0xf7, 0xab, 0x6b, 0x5d, 0x66, 0xa7, 0xc2, 0xb7, 0x33, 0x57, + 0x06, 0xb6, 0x37, 0x55, 0x08, 0x92, 0x8a, 0x70, 0x4d, 0x10, 0xa5, 0x47, 0x8c, 0x91, 0x89, 0x7f, + 0x3c, 0xe0, 0x50, 0x52, 0xc5, 0xbe, 0x0e, 0xd3, 0xa9, 0x01, 0x43, 0xaf, 0xc1, 0x70, 0x7b, 0xc7, + 0x89, 0x48, 0xca, 0x34, 0x75, 0xb8, 0x4e, 0x0b, 0x8f, 0x0e, 0x2b, 0x53, 0xaa, 0x02, 0x2b, 0xc1, + 0x1c, 0xdb, 0xfe, 0x1f, 0x16, 0x0c, 0xad, 0x07, 0xad, 0xd3, 0x58, 0x4c, 0x6f, 0x1a, 0x8b, 0xe9, + 0xc9, 0xbc, 0x70, 0x6d, 0xb9, 0xeb, 0x68, 0x35, 0xb5, 0x8e, 0x2e, 0xe4, 0x52, 0xe8, 0xbd, 0x84, + 0xf6, 0x60, 0x9c, 0x05, 0x81, 0x13, 0xa6, 0xb2, 0xaf, 0x18, 0xf7, 0xab, 0x4a, 0xea, 0x7e, 0x35, + 0xad, 0xa1, 0x6a, 0xb7, 0xac, 0xe7, 0x60, 0x54, 0x98, 0x6b, 0xa6, 0x9d, 0x36, 0x04, 0x2e, 0x96, + 0x70, 0xfb, 0xc7, 0x8b, 0x60, 0x04, 0x9d, 0x43, 0xbf, 0x62, 0xc1, 0x7c, 0xc8, 0xbd, 0x60, 0x5b, + 0xd5, 0x4e, 0xe8, 0xfa, 0xdb, 0x8d, 0xe6, 0x0e, 0x69, 0x75, 0x3c, 0xd7, 0xdf, 0xae, 0x6d, 0xfb, + 0x81, 0x2a, 0x5e, 0xb9, 0x4f, 0x9a, 0x1d, 0xf6, 0xe6, 0xd2, 0x27, 0xc2, 0x9d, 0x32, 0x87, 0xba, + 0xf2, 0xe0, 0xb0, 0x32, 0x8f, 0x8f, 0x45, 0x1b, 0x1f, 0xb3, 0x2f, 0xe8, 0x37, 0x2d, 0x58, 0xe0, + 0xb1, 0xd8, 0x06, 0xef, 0x7f, 0x8f, 0xdb, 0x68, 0x5d, 0x92, 0x4a, 0x88, 0x6c, 0x90, 0x70, 0x6f, + 0xe9, 0x75, 0x31, 0xa0, 0x0b, 0xf5, 0xe3, 0xb5, 0x85, 0x8f, 0xdb, 0x39, 0xfb, 0xdf, 0x14, 0x61, + 0x92, 0x8e, 0x62, 0x12, 0xf9, 0xe5, 0x35, 0x63, 0x49, 0x3c, 0x9d, 0x5a, 0x12, 0xb3, 0x06, 0xf2, + 0xc9, 0x04, 0x7d, 0x89, 0x60, 0xd6, 0x73, 0xa2, 0xf8, 0x3a, 0x71, 0xc2, 0x78, 0x93, 0x38, 0xdc, + 0x4c, 0xa8, 0x78, 0x6c, 0x93, 0x26, 0xa5, 0xfe, 0xba, 0x99, 0x26, 0x86, 0xbb, 0xe9, 0xa3, 0x7d, + 0x40, 0xcc, 0xd6, 0x29, 0x74, 0xfc, 0x88, 0x7f, 0x8b, 0x2b, 0xde, 0x63, 0x8e, 0xd7, 0xea, 0x9c, + 0x68, 0x15, 0xdd, 0xec, 0xa2, 0x86, 0x33, 0x5a, 0xd0, 0x6c, 0xd8, 0x86, 0x07, 0xb5, 0x61, 0x1b, + 0xe9, 0xe3, 0x19, 0xb5, 0x07, 0x33, 0x62, 0x56, 0xb6, 0xdc, 0x6d, 0x71, 0x48, 0x7f, 0x39, 0x65, + 0xe3, 0x6a, 0x0d, 0x6e, 0xa8, 0xd4, 0xc7, 0xc0, 0xd5, 0xfe, 0x6e, 0x38, 0x43, 0x9b, 0x33, 0xfd, + 0x78, 0x22, 0x44, 0x60, 0x7a, 0xb7, 0xb3, 0x49, 0x3c, 0x12, 0xcb, 0x32, 0xd1, 0x68, 0xa6, 0xd8, + 0x6f, 0xd6, 0x4e, 0x64, 0xcb, 0x1b, 0x26, 0x09, 0x9c, 0xa6, 0x69, 0xff, 0xa4, 0x05, 0xcc, 0x5a, + 0xfe, 0x14, 0x8e, 0xbf, 0x2f, 0x9a, 0xc7, 0x5f, 0x39, 0x8f, 0x03, 0xe5, 0x9c, 0x7c, 0xaf, 0xf2, + 0x69, 0xa9, 0x87, 0xc1, 0xfd, 0x03, 0x29, 0xfb, 0xf7, 0x97, 0xb8, 0xfe, 0xb7, 0xc5, 0x37, 0xa4, + 0x0a, 0x0a, 0x80, 0xbe, 0x07, 0xc6, 0x9a, 0x4e, 0xdb, 0x69, 0xf2, 0x68, 0x9f, 0xb9, 0xda, 0x1f, + 0xa3, 0xd2, 0xfc, 0xb2, 0xa8, 0xc1, 0xb5, 0x19, 0x9f, 0x95, 0x5f, 0x29, 0x8b, 0xfb, 0x6a, 0x30, + 0x54, 0x93, 0x73, 0xbb, 0x30, 0x69, 0x10, 0x7b, 0xa4, 0x57, 0xdf, 0xef, 0xe1, 0xc7, 0x85, 0xba, + 0xb1, 0xec, 0xc1, 0xac, 0xaf, 0xfd, 0xa7, 0xcc, 0x51, 0x8a, 0xd3, 0x9f, 0xee, 0x77, 0x20, 0x30, + 0x4e, 0xaa, 0x79, 0x03, 0xa4, 0xc8, 0xe0, 0x6e, 0xca, 0xf6, 0xdf, 0xb3, 0xe0, 0x71, 0x1d, 0x51, + 0x8b, 0xd7, 0xd0, 0x4f, 0x9f, 0x5c, 0x85, 0xb1, 0xa0, 0x4d, 0x42, 0x27, 0xb9, 0x93, 0x5d, 0x96, + 0x83, 0x7e, 0x4b, 0x94, 0x1f, 0x1d, 0x56, 0xce, 0xea, 0xd4, 0x65, 0x39, 0x56, 0x35, 0x91, 0x0d, + 0x23, 0x6c, 0x30, 0x22, 0x11, 0x4b, 0x83, 0x99, 0x29, 0xb2, 0xa7, 0xd5, 0x08, 0x0b, 0x88, 0xfd, + 0x7d, 0x16, 0x5f, 0x58, 0x7a, 0xd7, 0xd1, 0xfb, 0x30, 0xb3, 0x47, 0xaf, 0x6f, 0x2b, 0xf7, 0xdb, + 0x21, 0x57, 0xa3, 0xcb, 0x71, 0x7a, 0xa1, 0xdf, 0x38, 0x69, 0x1f, 0x99, 0x18, 0xb3, 0xad, 0xa5, + 0x88, 0xe1, 0x2e, 0xf2, 0xf6, 0x9f, 0x16, 0xf8, 0x4e, 0x64, 0x52, 0xdd, 0x73, 0x30, 0xda, 0x0e, + 0x5a, 0xcb, 0xb5, 0x2a, 0x16, 0x23, 0xa4, 0xd8, 0x55, 0x9d, 0x17, 0x63, 0x09, 0x47, 0x57, 0x00, + 0xc8, 0xfd, 0x98, 0x84, 0xbe, 0xe3, 0x29, 0xc3, 0x0f, 0x25, 0x3c, 0xad, 0x28, 0x08, 0xd6, 0xb0, + 0x68, 0x9d, 0x76, 0x18, 0xec, 0xbb, 0x2d, 0xe6, 0x6d, 0x58, 0x34, 0xeb, 0xd4, 0x15, 0x04, 0x6b, + 0x58, 0xf4, 0xaa, 0xdc, 0xf1, 0x23, 0x7e, 0x00, 0x3a, 0x9b, 0x22, 0xfc, 0xdc, 0x58, 0x72, 0x55, + 0xbe, 0xad, 0x03, 0xb1, 0x89, 0x8b, 0x16, 0x61, 0x24, 0x76, 0x98, 0x39, 0xc3, 0x70, 0xbe, 0x59, + 0xe2, 0x06, 0xc5, 0xd0, 0xc3, 0x3f, 0xd2, 0x0a, 0x58, 0x54, 0x44, 0xef, 0x48, 0x16, 0xcc, 0x59, + 0xb2, 0xb0, 0x07, 0xce, 0x5d, 0xb6, 0x3a, 0xfb, 0xd6, 0x79, 0xb0, 0xb0, 0x33, 0x36, 0x68, 0xd9, + 0xdf, 0x5b, 0x02, 0x48, 0xa4, 0x3d, 0xf4, 0x41, 0x17, 0x8b, 0x78, 0xb1, 0xb7, 0x7c, 0x78, 0x72, + 0xfc, 0x01, 0x7d, 0xbf, 0x05, 0xe3, 0x8e, 0xe7, 0x05, 0x4d, 0x27, 0x66, 0xa3, 0x5c, 0xe8, 0xcd, + 0xa2, 0x44, 0xfb, 0x8b, 0x49, 0x0d, 0xde, 0x85, 0x57, 0xa4, 0xa5, 0x82, 0x06, 0xe9, 0xdb, 0x0b, + 0xbd, 0x61, 0xf4, 0x59, 0x79, 0x09, 0xe0, 0xcb, 0x63, 0x2e, 0x7d, 0x09, 0x28, 0x31, 0x6e, 0xac, + 0xc9, 0xff, 0xe8, 0xb6, 0x11, 0xa7, 0x6d, 0x28, 0x3f, 0x24, 0x85, 0x21, 0xf4, 0xf4, 0x0b, 0xd1, + 0x86, 0xea, 0xba, 0x5f, 0xd4, 0x70, 0x7e, 0xdc, 0x16, 0x4d, 0xba, 0xee, 0xe3, 0x13, 0xf5, 0x1e, + 0x4c, 0xb7, 0xcc, 0xe3, 0x56, 0xac, 0xa6, 0x67, 0xf3, 0xe8, 0xa6, 0x4e, 0xe7, 0xe4, 0x80, 0x4d, + 0x01, 0x70, 0x9a, 0x30, 0xaa, 0x73, 0x0f, 0xb5, 0x9a, 0xbf, 0x15, 0x08, 0xbb, 0x72, 0x3b, 0x77, + 0x2e, 0x0f, 0xa2, 0x98, 0xec, 0x51, 0xcc, 0xe4, 0x1c, 0x5d, 0x17, 0x75, 0xb1, 0xa2, 0x82, 0xde, + 0x82, 0x11, 0xe6, 0x36, 0x1c, 0x95, 0xc7, 0xf2, 0xf5, 0x80, 0x66, 0xc4, 0x8b, 0x64, 0x53, 0xb1, + 0xbf, 0x11, 0x16, 0x14, 0xd0, 0x75, 0x19, 0x16, 0x27, 0xaa, 0xf9, 0xb7, 0x23, 0xc2, 0xc2, 0xe2, + 0x94, 0x96, 0x3e, 0x9d, 0x44, 0xbc, 0xe1, 0xe5, 0x99, 0x81, 0x9e, 0x8d, 0x9a, 0x54, 0x5e, 0x11, + 0xff, 0x65, 0xfc, 0xe8, 0x32, 0xe4, 0x77, 0xcf, 0x8c, 0x31, 0x9d, 0x0c, 0xe7, 0x1d, 0x93, 0x04, + 0x4e, 0xd3, 0x3c, 0xd5, 0xe3, 0x73, 0xce, 0x87, 0x99, 0xf4, 0xc6, 0x7a, 0xa4, 0xc7, 0xf5, 0x1f, + 0x0e, 0xc1, 0x94, 0xb9, 0x10, 0xd0, 0x02, 0x94, 0x04, 0x11, 0x15, 0x22, 0x53, 0xad, 0xed, 0x35, + 0x09, 0xc0, 0x09, 0x0e, 0x0b, 0x11, 0xca, 0xaa, 0x6b, 0x76, 0x80, 0x49, 0x88, 0x50, 0x05, 0xc1, + 0x1a, 0x16, 0x15, 0xa2, 0x37, 0x83, 0x20, 0x56, 0x47, 0x81, 0x5a, 0x2d, 0x4b, 0xac, 0x14, 0x0b, + 0x28, 0x3d, 0x02, 0x76, 0x49, 0xe8, 0x13, 0xcf, 0xd4, 0x64, 0xaa, 0x23, 0xe0, 0x86, 0x0e, 0xc4, + 0x26, 0x2e, 0x3d, 0xd2, 0x82, 0x88, 0x2d, 0x3f, 0x21, 0xaa, 0x27, 0x76, 0x95, 0x0d, 0xee, 0x36, + 0x2f, 0xe1, 0xe8, 0xcb, 0xf0, 0xb8, 0xf2, 0x72, 0xc7, 0x5c, 0x33, 0x2c, 0x5b, 0x1c, 0x31, 0x6e, + 0xd6, 0x8f, 0x2f, 0x67, 0xa3, 0xe1, 0xbc, 0xfa, 0xe8, 0x4d, 0x98, 0x12, 0x22, 0xb0, 0xa4, 0x38, + 0x6a, 0x1a, 0x2b, 0xdc, 0x30, 0xa0, 0x38, 0x85, 0x8d, 0xaa, 0x30, 0x43, 0x4b, 0x98, 0x14, 0x2a, + 0x29, 0x70, 0x6f, 0x7d, 0x75, 0xd6, 0xdf, 0x48, 0xc1, 0x71, 0x57, 0x0d, 0xb4, 0x08, 0xd3, 0x5c, + 0x46, 0xa1, 0x77, 0x4a, 0x36, 0x0f, 0xc2, 0xdd, 0x43, 0x6d, 0x84, 0x5b, 0x26, 0x18, 0xa7, 0xf1, + 0xd1, 0x55, 0x98, 0x70, 0xc2, 0xe6, 0x8e, 0x1b, 0x93, 0x66, 0xdc, 0x09, 0xb9, 0x1f, 0x88, 0x66, + 0xed, 0xb1, 0xa8, 0xc1, 0xb0, 0x81, 0x69, 0x7f, 0x00, 0x67, 0x32, 0x3c, 0xc5, 0xe8, 0xc2, 0x71, + 0xda, 0xae, 0xfc, 0xa6, 0x94, 0x85, 0xe4, 0x62, 0xbd, 0x26, 0xbf, 0x46, 0xc3, 0xa2, 0xab, 0x93, + 0xa9, 0xc4, 0xb5, 0x20, 0xef, 0x6a, 0x75, 0xae, 0x4a, 0x00, 0x4e, 0x70, 0xec, 0xdf, 0x00, 0xd0, + 0x14, 0x3a, 0x03, 0xd8, 0xc7, 0x5d, 0x85, 0x09, 0x99, 0x99, 0x40, 0x8b, 0x88, 0xad, 0x3e, 0xf3, + 0x9a, 0x06, 0xc3, 0x06, 0x26, 0xed, 0x9b, 0xaf, 0xe2, 0x79, 0xa7, 0xec, 0x31, 0x93, 0x68, 0xde, + 0x09, 0x0e, 0x7a, 0x11, 0xc6, 0x22, 0xe2, 0x6d, 0xdd, 0x74, 0xfd, 0x5d, 0xb1, 0xb0, 0x15, 0x17, + 0x6e, 0x88, 0x72, 0xac, 0x30, 0xd0, 0x12, 0x14, 0x3b, 0x6e, 0x4b, 0x2c, 0x65, 0x79, 0xe0, 0x17, + 0x6f, 0xd7, 0xaa, 0x47, 0x87, 0x95, 0xa7, 0xf3, 0x12, 0x2e, 0xd0, 0xab, 0x7d, 0x34, 0x4f, 0xb7, + 0x1f, 0xad, 0x9c, 0xf5, 0x36, 0x30, 0x72, 0xcc, 0xb7, 0x81, 0x2b, 0x00, 0xe2, 0xab, 0xe5, 0x5a, + 0x2e, 0x26, 0xb3, 0x76, 0x4d, 0x41, 0xb0, 0x86, 0x85, 0x22, 0x98, 0x6d, 0x86, 0xc4, 0x91, 0x77, + 0x68, 0xee, 0xf3, 0x34, 0xf6, 0xf0, 0x0a, 0x82, 0xe5, 0x34, 0x31, 0xdc, 0x4d, 0x1f, 0x05, 0x30, + 0xdb, 0x12, 0x41, 0x15, 0x92, 0x46, 0x4b, 0xc7, 0x77, 0xb4, 0x62, 0x06, 0x39, 0x69, 0x42, 0xb8, + 0x9b, 0x36, 0xfa, 0x0a, 0xcc, 0xc9, 0xc2, 0xee, 0x38, 0x16, 0x6c, 0xbb, 0x14, 0x97, 0x2e, 0x3c, + 0x38, 0xac, 0xcc, 0x55, 0x73, 0xb1, 0x70, 0x0f, 0x0a, 0x08, 0xc3, 0x08, 0x7b, 0x4b, 0x8a, 0xca, + 0xe3, 0xec, 0x9c, 0x7b, 0x3e, 0x5f, 0x19, 0x40, 0xd7, 0xfa, 0x3c, 0x7b, 0x87, 0x12, 0x26, 0xe5, + 0xc9, 0xb3, 0x1c, 0x2b, 0xc4, 0x82, 0x12, 0xda, 0x82, 0x71, 0xc7, 0xf7, 0x83, 0xd8, 0xe1, 0x22, + 0xd4, 0x44, 0xbe, 0xec, 0xa7, 0x11, 0x5e, 0x4c, 0x6a, 0x70, 0xea, 0xca, 0x4a, 0x55, 0x83, 0x60, + 0x9d, 0x30, 0xba, 0x07, 0xd3, 0xc1, 0x3d, 0xca, 0x1c, 0xa5, 0x96, 0x22, 0x2a, 0x4f, 0xb2, 0xb6, + 0x5e, 0x1d, 0x50, 0x4f, 0x6b, 0x54, 0xd6, 0xb8, 0x96, 0x49, 0x14, 0xa7, 0x5b, 0x41, 0xf3, 0x86, + 0xb6, 0x7a, 0x2a, 0x71, 0x67, 0x49, 0xb4, 0xd5, 0xba, 0x72, 0x9a, 0xc5, 0x45, 0xe1, 0x26, 0xd2, + 0x6c, 0xf7, 0x4f, 0xa7, 0xe2, 0xa2, 0x24, 0x20, 0xac, 0xe3, 0xa1, 0x1d, 0x98, 0x48, 0x9e, 0xac, + 0xc2, 0x88, 0x45, 0x65, 0x1b, 0xbf, 0x72, 0x65, 0xb0, 0x8f, 0xab, 0x69, 0x35, 0xf9, 0xcd, 0x41, + 0x2f, 0xc1, 0x06, 0xe5, 0xb9, 0x6f, 0x83, 0x71, 0x6d, 0x62, 0x8f, 0xe3, 0x01, 0x30, 0xf7, 0x26, + 0xcc, 0xa4, 0xa7, 0xee, 0x58, 0x1e, 0x04, 0xff, 0xab, 0x00, 0xd3, 0x19, 0x2f, 0x57, 0x2c, 0x69, + 0x43, 0x8a, 0xa1, 0x26, 0x39, 0x1a, 0x4c, 0xb6, 0x58, 0x18, 0x80, 0x2d, 0x4a, 0x1e, 0x5d, 0xcc, + 0xe5, 0xd1, 0x82, 0x15, 0x0e, 0x7d, 0x14, 0x56, 0x68, 0x9e, 0x3e, 0xc3, 0x03, 0x9d, 0x3e, 0x27, + 0xc0, 0x3e, 0x8d, 0x03, 0x6c, 0x74, 0x80, 0x03, 0xec, 0x87, 0x0a, 0x30, 0x93, 0xb6, 0xf0, 0x3d, + 0x85, 0xf7, 0x8e, 0xb7, 0x8c, 0xf7, 0x8e, 0xec, 0x14, 0x28, 0x69, 0xbb, 0xe3, 0xbc, 0xb7, 0x0f, + 0x9c, 0x7a, 0xfb, 0x78, 0x7e, 0x20, 0x6a, 0xbd, 0xdf, 0x41, 0xfe, 0x7e, 0x01, 0xce, 0xa5, 0xab, + 0x2c, 0x7b, 0x8e, 0xbb, 0x77, 0x0a, 0x63, 0x73, 0xcb, 0x18, 0x9b, 0x97, 0x06, 0xf9, 0x1a, 0xd6, + 0xb5, 0xdc, 0x01, 0xba, 0x9b, 0x1a, 0xa0, 0x85, 0xc1, 0x49, 0xf6, 0x1e, 0xa5, 0x6f, 0x14, 0xe1, + 0x42, 0x66, 0xbd, 0xe4, 0xb9, 0x60, 0xd5, 0x78, 0x2e, 0xb8, 0x92, 0x7a, 0x2e, 0xb0, 0x7b, 0xd7, + 0x3e, 0x99, 0xf7, 0x03, 0xe1, 0x0e, 0xcd, 0x22, 0x86, 0x3e, 0xe4, 0xdb, 0x81, 0xe1, 0x0e, 0xad, + 0x08, 0x61, 0x93, 0xee, 0x37, 0xd3, 0x9b, 0xc1, 0x6f, 0x58, 0x70, 0x3e, 0x73, 0x6e, 0x4e, 0x41, + 0xaf, 0xbe, 0x6e, 0xea, 0xd5, 0x9f, 0x1b, 0x78, 0xb5, 0xe6, 0x28, 0xda, 0xff, 0xa8, 0x98, 0xf3, + 0x2d, 0x4c, 0x33, 0x79, 0x0b, 0xc6, 0x9d, 0x66, 0x93, 0x44, 0xd1, 0x5a, 0xd0, 0x52, 0x11, 0x34, + 0x5f, 0x62, 0xd2, 0x46, 0x52, 0x7c, 0x74, 0x58, 0x99, 0x4b, 0x93, 0x48, 0xc0, 0x58, 0xa7, 0x60, + 0x06, 0xfd, 0x2d, 0x9c, 0x68, 0xd0, 0xdf, 0x2b, 0x00, 0xfb, 0x4a, 0x5f, 0x91, 0x56, 0x73, 0x6a, + 0x9a, 0x0c, 0x0d, 0x0b, 0x7d, 0x17, 0xbb, 0x05, 0x70, 0x63, 0x20, 0xbe, 0x14, 0x5f, 0x19, 0x70, + 0xae, 0x74, 0xc3, 0x22, 0x1e, 0x77, 0x43, 0xa9, 0x84, 0x15, 0x49, 0xf4, 0x1d, 0x30, 0x13, 0xf1, + 0xb0, 0x4e, 0xcb, 0x9e, 0x13, 0x31, 0x27, 0x2e, 0xb1, 0x0a, 0x59, 0x30, 0x8d, 0x46, 0x0a, 0x86, + 0xbb, 0xb0, 0xd1, 0xaa, 0xfc, 0x28, 0x16, 0x83, 0x8a, 0x2f, 0xcc, 0x4b, 0xc9, 0x07, 0x89, 0x94, + 0x51, 0x67, 0xd3, 0xc3, 0xcf, 0x06, 0x5e, 0xab, 0x69, 0xff, 0xd0, 0x10, 0x3c, 0xd1, 0x83, 0x89, + 0xa1, 0x45, 0xd3, 0x08, 0xe0, 0x85, 0xb4, 0xfe, 0x6f, 0x2e, 0xb3, 0xb2, 0xa1, 0x10, 0x4c, 0xad, + 0x95, 0xc2, 0x47, 0x5e, 0x2b, 0x3f, 0x60, 0x69, 0x9a, 0x59, 0x6e, 0x2a, 0xfc, 0xc5, 0x63, 0x32, + 0xe7, 0x13, 0x54, 0xd5, 0x6e, 0x65, 0xe8, 0x3b, 0xaf, 0x0c, 0xdc, 0x9d, 0x81, 0x15, 0xa0, 0xa7, + 0xfb, 0x64, 0xf4, 0xa1, 0x05, 0x4f, 0x67, 0xf6, 0xd7, 0x30, 0x5a, 0x5a, 0x80, 0x52, 0x93, 0x16, + 0x6a, 0x8e, 0xa1, 0x89, 0xc7, 0xbc, 0x04, 0xe0, 0x04, 0xc7, 0xb0, 0x4d, 0x2a, 0xf4, 0xb5, 0x4d, + 0xfa, 0xd7, 0x16, 0x74, 0x2d, 0xe0, 0x53, 0xe0, 0xa4, 0x35, 0x93, 0x93, 0x7e, 0x7a, 0x90, 0xb9, + 0xcc, 0x61, 0xa2, 0xbf, 0x37, 0x0d, 0x8f, 0xe5, 0x78, 0x82, 0xed, 0xc3, 0xec, 0x76, 0x93, 0x98, + 0x2e, 0xb7, 0xe2, 0x63, 0x32, 0xbd, 0x93, 0x7b, 0xfa, 0xe7, 0xf2, 0x0b, 0x71, 0x17, 0x0a, 0xee, + 0x6e, 0x02, 0x7d, 0x68, 0xc1, 0x59, 0xe7, 0x5e, 0xd4, 0x95, 0xd1, 0x51, 0xac, 0x99, 0x57, 0x33, + 0xf5, 0xb4, 0x7d, 0x32, 0x40, 0x32, 0xb7, 0xb8, 0xb3, 0x59, 0x58, 0x38, 0xb3, 0x2d, 0x84, 0x45, + 0xd0, 0x63, 0x2a, 0x6f, 0xf7, 0x70, 0x0a, 0xcf, 0x72, 0xd9, 0xe3, 0x3c, 0x55, 0x42, 0xb0, 0xa2, + 0x83, 0xee, 0x40, 0x69, 0x5b, 0xfa, 0xd1, 0x0a, 0x9e, 0x9d, 0x79, 0x08, 0x66, 0x3a, 0xdb, 0x72, + 0xdf, 0x11, 0x05, 0xc2, 0x09, 0x29, 0xf4, 0x26, 0x14, 0xfd, 0xad, 0xa8, 0x57, 0xb2, 0xa9, 0x94, + 0x2d, 0x1f, 0x0f, 0xb8, 0xb0, 0xbe, 0xda, 0xc0, 0xb4, 0x22, 0xba, 0x0e, 0xc5, 0x70, 0xb3, 0x25, + 0x9e, 0x16, 0x32, 0xe5, 0x52, 0xbc, 0x54, 0xcd, 0x5e, 0x24, 0x9c, 0x12, 0x5e, 0xaa, 0x62, 0x4a, + 0x02, 0xd5, 0x61, 0x98, 0x39, 0x4d, 0x89, 0x17, 0x84, 0x4c, 0x81, 0xb4, 0x87, 0xf3, 0x21, 0x8f, + 0xca, 0xc0, 0x10, 0x30, 0x27, 0x84, 0xde, 0x82, 0x91, 0x26, 0xcb, 0xc7, 0x24, 0x14, 0x3f, 0xd9, + 0xe1, 0xba, 0xba, 0x32, 0x36, 0xf1, 0x17, 0x54, 0x5e, 0x8e, 0x05, 0x05, 0xb4, 0x01, 0x23, 0x4d, + 0xd2, 0xde, 0xd9, 0x8a, 0x84, 0x3e, 0xe7, 0xb3, 0x99, 0xb4, 0x7a, 0xa4, 0x1f, 0x13, 0x54, 0x19, + 0x06, 0x16, 0xb4, 0xd0, 0xe7, 0xa1, 0xb0, 0xd5, 0x14, 0x9e, 0x54, 0x99, 0x6f, 0x08, 0x66, 0xa4, + 0x8c, 0xa5, 0x91, 0x07, 0x87, 0x95, 0xc2, 0xea, 0x32, 0x2e, 0x6c, 0x35, 0xd1, 0x3a, 0x8c, 0x6e, + 0x71, 0xdf, 0x7a, 0x11, 0xc0, 0xe6, 0xd9, 0x6c, 0xb7, 0xff, 0x2e, 0xf7, 0x7b, 0xee, 0x01, 0x24, + 0x00, 0x58, 0x12, 0x61, 0xf1, 0x82, 0x55, 0x8c, 0x00, 0x11, 0x38, 0x7f, 0xfe, 0x78, 0x71, 0x1d, + 0x84, 0x9a, 0x43, 0x51, 0xc1, 0x1a, 0x45, 0xf4, 0x55, 0x28, 0x39, 0x32, 0x83, 0xa0, 0x08, 0x82, + 0xf3, 0x4a, 0xe6, 0x76, 0xec, 0x9d, 0x5c, 0x91, 0xaf, 0x65, 0x85, 0x84, 0x13, 0xa2, 0x68, 0x17, + 0x26, 0xf7, 0xa3, 0xf6, 0x0e, 0x91, 0xdb, 0x97, 0xc5, 0xc4, 0xc9, 0x39, 0xae, 0xee, 0x08, 0x44, + 0x37, 0x8c, 0x3b, 0x8e, 0xd7, 0xc5, 0x71, 0x98, 0xe3, 0xd8, 0x1d, 0x9d, 0x18, 0x36, 0x69, 0xd3, + 0xe1, 0x7f, 0xbf, 0x13, 0x6c, 0x1e, 0xc4, 0x44, 0x44, 0xda, 0xcf, 0x1c, 0xfe, 0xb7, 0x39, 0x4a, + 0xf7, 0xf0, 0x0b, 0x00, 0x96, 0x44, 0xe8, 0x06, 0x77, 0x64, 0x76, 0x4e, 0xa1, 0xcb, 0x79, 0x2e, + 0x77, 0x78, 0xba, 0xfa, 0x9b, 0x0c, 0x0a, 0xe3, 0x8c, 0x09, 0x29, 0xc6, 0x11, 0xdb, 0x3b, 0x41, + 0x1c, 0xf8, 0x29, 0x6e, 0x3c, 0x9b, 0xcf, 0x11, 0xeb, 0x19, 0xf8, 0xdd, 0x1c, 0x31, 0x0b, 0x0b, + 0x67, 0xb6, 0x85, 0x5a, 0x30, 0xd5, 0x0e, 0xc2, 0xf8, 0x5e, 0x10, 0xca, 0xf5, 0x85, 0x7a, 0x5c, + 0xf2, 0x0d, 0x4c, 0xd1, 0x22, 0xb3, 0x03, 0x37, 0x21, 0x38, 0x45, 0x13, 0x7d, 0x09, 0x46, 0xa3, + 0xa6, 0xe3, 0x91, 0xda, 0xad, 0xf2, 0x99, 0xfc, 0xa3, 0xa6, 0xc1, 0x51, 0x72, 0x56, 0x17, 0x9b, + 0x1c, 0x81, 0x82, 0x25, 0x39, 0xb4, 0x0a, 0xc3, 0x2c, 0x7d, 0x0b, 0x4b, 0x12, 0x90, 0x13, 0x6c, + 0xad, 0xcb, 0x56, 0x9a, 0x73, 0x24, 0x56, 0x8c, 0x79, 0x75, 0xba, 0x07, 0x84, 0xac, 0x1b, 0x44, + 0xe5, 0x73, 0xf9, 0x7b, 0x40, 0x88, 0xc8, 0xb7, 0x1a, 0xbd, 0xf6, 0x80, 0x42, 0xc2, 0x09, 0x51, + 0xca, 0x8f, 0x29, 0x0f, 0x7d, 0x2c, 0x9f, 0x1f, 0xe7, 0x73, 0x50, 0xc6, 0x8f, 0x29, 0xff, 0xa4, + 0x24, 0xec, 0x0f, 0x47, 0xbb, 0xe5, 0x13, 0x76, 0x3b, 0xfa, 0x5e, 0xab, 0xcb, 0x74, 0xe0, 0x73, + 0x83, 0x2a, 0x6b, 0x4e, 0x50, 0x32, 0xfd, 0xd0, 0x82, 0xc7, 0xda, 0x99, 0x1f, 0x22, 0x0e, 0xfb, + 0xc1, 0x74, 0x3e, 0xfc, 0xd3, 0x55, 0x22, 0x8f, 0x6c, 0x38, 0xce, 0x69, 0x29, 0x2d, 0xfd, 0x17, + 0x3f, 0xb2, 0xf4, 0xbf, 0x06, 0x63, 0x4c, 0xa0, 0x4c, 0x22, 0xfb, 0x0d, 0x64, 0x80, 0xc7, 0xc4, + 0x86, 0x65, 0x51, 0x11, 0x2b, 0x12, 0xe8, 0x07, 0x2d, 0x78, 0x2a, 0xdd, 0x75, 0x4c, 0x18, 0x58, + 0x44, 0x89, 0xe6, 0x17, 0xb3, 0x55, 0xf1, 0xfd, 0x4f, 0xd5, 0x7b, 0x21, 0x1f, 0xf5, 0x43, 0xc0, + 0xbd, 0x1b, 0x43, 0xd5, 0x8c, 0x9b, 0xe1, 0x88, 0xf9, 0xb2, 0x38, 0xc0, 0xed, 0xf0, 0x55, 0x98, + 0xd8, 0x0b, 0x3a, 0xbe, 0xf4, 0x8e, 0x11, 0xbe, 0xcf, 0x4c, 0x8b, 0xbd, 0xa6, 0x95, 0x63, 0x03, + 0x2b, 0x75, 0xa7, 0x1c, 0x7b, 0xd8, 0x3b, 0xe5, 0xe9, 0xde, 0x54, 0xbe, 0x6e, 0x65, 0x88, 0xd8, + 0xfc, 0xee, 0xfa, 0x05, 0xf3, 0xee, 0x7a, 0x29, 0x7d, 0x77, 0xed, 0xd2, 0x55, 0x1a, 0xd7, 0xd6, + 0xc1, 0xa3, 0xe8, 0x0f, 0x1a, 0x42, 0xd1, 0xf6, 0xe0, 0x62, 0xbf, 0x83, 0x83, 0x19, 0x33, 0xb6, + 0xd4, 0x2b, 0x7f, 0x62, 0xcc, 0xd8, 0xaa, 0x55, 0x31, 0x83, 0x0c, 0x1a, 0x63, 0xc7, 0xfe, 0x6f, + 0x16, 0x14, 0xeb, 0x41, 0xeb, 0x14, 0x74, 0xaf, 0x5f, 0x34, 0x74, 0xaf, 0x4f, 0xe4, 0xe4, 0x21, + 0xcf, 0xd5, 0xb4, 0xae, 0xa4, 0x34, 0xad, 0x4f, 0xe5, 0x11, 0xe8, 0xad, 0x57, 0xfd, 0x89, 0x22, + 0xe8, 0x59, 0xd3, 0xd1, 0xbf, 0x7d, 0x18, 0xab, 0xf8, 0x62, 0xaf, 0x44, 0xea, 0x82, 0x32, 0xb3, + 0x81, 0x94, 0x0e, 0xb7, 0x7f, 0xc1, 0x8c, 0xe3, 0xef, 0x12, 0x77, 0x7b, 0x27, 0x26, 0xad, 0xf4, + 0xe7, 0x9c, 0x9e, 0x71, 0xfc, 0x7f, 0xb1, 0x60, 0x3a, 0xd5, 0x3a, 0xf2, 0xb2, 0xbc, 0xf7, 0x1e, + 0x52, 0xe7, 0x36, 0xdb, 0xd7, 0xdd, 0x6f, 0x1e, 0x40, 0x3d, 0x6c, 0x49, 0x7d, 0x14, 0x93, 0xcb, + 0xd5, 0xcb, 0x57, 0x84, 0x35, 0x0c, 0xf4, 0x1a, 0x8c, 0xc7, 0x41, 0x3b, 0xf0, 0x82, 0xed, 0x83, + 0x1b, 0x44, 0x46, 0x75, 0x52, 0xcf, 0x8f, 0x1b, 0x09, 0x08, 0xeb, 0x78, 0xf6, 0x4f, 0x15, 0x21, + 0x9d, 0x69, 0xff, 0x5b, 0x6b, 0xf2, 0x93, 0xb9, 0x26, 0xbf, 0x61, 0xc1, 0x0c, 0x6d, 0x9d, 0xd9, + 0x97, 0xc9, 0xe3, 0x50, 0x65, 0xf3, 0xb2, 0x7a, 0x64, 0xf3, 0xba, 0x44, 0x79, 0x57, 0x2b, 0xe8, + 0xc4, 0x42, 0x9f, 0xa5, 0x31, 0x27, 0x5a, 0x8a, 0x05, 0x54, 0xe0, 0x91, 0x30, 0x14, 0x3e, 0x79, + 0x3a, 0x1e, 0x09, 0x43, 0x2c, 0xa0, 0x32, 0xd9, 0xd7, 0x50, 0x4e, 0xb2, 0x2f, 0x16, 0xa3, 0x52, + 0xd8, 0x34, 0x09, 0xc1, 0x44, 0x8b, 0x51, 0x29, 0x8d, 0x9d, 0x12, 0x1c, 0xfb, 0x67, 0x8b, 0x30, + 0x51, 0x0f, 0x5a, 0xc9, 0xd3, 0xd2, 0xab, 0xc6, 0xd3, 0xd2, 0xc5, 0xd4, 0xd3, 0xd2, 0x8c, 0x8e, + 0xfb, 0xad, 0x87, 0xa4, 0x8f, 0xeb, 0x21, 0xe9, 0x5f, 0x59, 0x6c, 0xd6, 0xaa, 0xeb, 0x0d, 0x91, + 0x8c, 0xfa, 0x65, 0x18, 0x67, 0x0c, 0x89, 0x39, 0x81, 0xca, 0xf7, 0x16, 0x96, 0x73, 0x62, 0x3d, + 0x29, 0xc6, 0x3a, 0x0e, 0xba, 0x0c, 0x63, 0x11, 0x71, 0xc2, 0xe6, 0x8e, 0xe2, 0x71, 0xe2, 0x35, + 0x82, 0x97, 0x61, 0x05, 0x45, 0x6f, 0x27, 0xe1, 0x11, 0x8b, 0xf9, 0x69, 0x95, 0xf5, 0xfe, 0xf0, + 0x2d, 0x92, 0x1f, 0x13, 0xd1, 0xbe, 0x0b, 0xa8, 0x1b, 0x7f, 0x00, 0x43, 0xaf, 0x8a, 0x19, 0x08, + 0xad, 0xd4, 0x15, 0x04, 0xed, 0xcf, 0x2c, 0x98, 0xaa, 0x07, 0x2d, 0xba, 0x75, 0xbf, 0x99, 0xf6, + 0xa9, 0x1e, 0x1b, 0x76, 0xa4, 0x47, 0x6c, 0xd8, 0x7f, 0x60, 0xc1, 0x68, 0x3d, 0x68, 0x9d, 0x82, + 0x16, 0xfc, 0x0b, 0xa6, 0x16, 0xfc, 0xf1, 0x9c, 0x25, 0x91, 0xa3, 0xf8, 0xfe, 0xf9, 0x22, 0x4c, + 0xd2, 0x7e, 0x06, 0xdb, 0x72, 0x96, 0x8c, 0x11, 0xb1, 0x06, 0x18, 0x11, 0x2a, 0xe6, 0x06, 0x9e, + 0x17, 0xdc, 0x4b, 0xcf, 0xd8, 0x2a, 0x2b, 0xc5, 0x02, 0x8a, 0x5e, 0x84, 0xb1, 0x76, 0x48, 0xf6, + 0xdd, 0xa0, 0x13, 0xa5, 0xfd, 0x9d, 0xeb, 0xa2, 0x1c, 0x2b, 0x0c, 0x7a, 0x33, 0x8a, 0x5c, 0xbf, + 0x49, 0xa4, 0x05, 0xd8, 0x10, 0xb3, 0x00, 0xe3, 0x41, 0xdf, 0xb5, 0x72, 0x6c, 0x60, 0xa1, 0xbb, + 0x50, 0x62, 0xff, 0x19, 0x47, 0x39, 0x7e, 0x1a, 0x32, 0x91, 0x69, 0x45, 0x10, 0xc0, 0x09, 0x2d, + 0x74, 0x05, 0x20, 0x96, 0xb6, 0x6a, 0x91, 0x70, 0xc7, 0x57, 0xb2, 0xb6, 0xb2, 0x62, 0x8b, 0xb0, + 0x86, 0x85, 0x5e, 0x80, 0x52, 0xec, 0xb8, 0xde, 0x4d, 0xd7, 0x27, 0x91, 0xb0, 0xf5, 0x13, 0x89, + 0x54, 0x44, 0x21, 0x4e, 0xe0, 0x54, 0xd6, 0x61, 0xc1, 0x1e, 0x78, 0x12, 0xc3, 0x31, 0x86, 0xcd, + 0x64, 0x9d, 0x9b, 0xaa, 0x14, 0x6b, 0x18, 0xf6, 0x55, 0x38, 0x57, 0x0f, 0x5a, 0xf5, 0x20, 0x8c, + 0x57, 0x83, 0xf0, 0x9e, 0x13, 0xb6, 0xe4, 0xfc, 0x55, 0x64, 0x4e, 0x0f, 0xca, 0x7b, 0x86, 0xf9, + 0xce, 0x34, 0xb2, 0x75, 0xbc, 0xc2, 0xa4, 0x9d, 0x63, 0x3a, 0x66, 0xfd, 0xfb, 0x02, 0x63, 0x14, + 0xa9, 0xcc, 0x9a, 0xe8, 0x2b, 0x30, 0x15, 0x91, 0x9b, 0xae, 0xdf, 0xb9, 0x2f, 0x6f, 0xb0, 0x3d, + 0xbc, 0xde, 0x1a, 0x2b, 0x3a, 0x26, 0xd7, 0x83, 0x99, 0x65, 0x38, 0x45, 0x8d, 0x0e, 0x61, 0xd8, + 0xf1, 0x17, 0xa3, 0xdb, 0x11, 0x09, 0x45, 0x66, 0x47, 0x36, 0x84, 0x58, 0x16, 0xe2, 0x04, 0x4e, + 0x97, 0x0c, 0xfb, 0xb3, 0x1e, 0xf8, 0x38, 0x08, 0x62, 0xb9, 0xc8, 0x58, 0x6e, 0x30, 0xad, 0x1c, + 0x1b, 0x58, 0x68, 0x15, 0x50, 0xd4, 0x69, 0xb7, 0x3d, 0xf6, 0x44, 0xed, 0x78, 0xd7, 0xc2, 0xa0, + 0xd3, 0xe6, 0xcf, 0x83, 0x22, 0xad, 0x56, 0xa3, 0x0b, 0x8a, 0x33, 0x6a, 0x50, 0xc6, 0xb0, 0x15, + 0xb1, 0xdf, 0x22, 0xde, 0x03, 0xd7, 0x4d, 0x37, 0x58, 0x11, 0x96, 0x30, 0xfb, 0x7b, 0xd8, 0x81, + 0xc1, 0x12, 0xf2, 0xc5, 0x9d, 0x90, 0xa0, 0x3d, 0x98, 0x6c, 0xb3, 0xa3, 0x5c, 0x84, 0x36, 0x17, + 0x03, 0xf8, 0x70, 0x96, 0x7d, 0x3c, 0x41, 0x97, 0x4e, 0x0e, 0x9b, 0xd4, 0xed, 0xdf, 0x99, 0x66, + 0x7c, 0xa9, 0xc1, 0xaf, 0x73, 0xa3, 0xc2, 0x5e, 0x5f, 0xc8, 0xae, 0x73, 0xf9, 0x29, 0x3c, 0x93, + 0x23, 0x44, 0xd8, 0xfc, 0x63, 0x59, 0x17, 0xbd, 0xcd, 0xde, 0x55, 0x39, 0x33, 0xe8, 0x97, 0xd9, + 0x9b, 0x63, 0x19, 0x4f, 0xa8, 0xa2, 0x22, 0xd6, 0x88, 0xa0, 0x9b, 0x30, 0x29, 0xf2, 0xb7, 0x09, + 0xd5, 0x4e, 0xd1, 0x50, 0x0c, 0x4c, 0x62, 0x1d, 0x78, 0x94, 0x2e, 0xc0, 0x66, 0x65, 0xb4, 0x0d, + 0x4f, 0x69, 0xc9, 0x4c, 0x33, 0xac, 0x4b, 0x39, 0x6f, 0x79, 0xfa, 0xc1, 0x61, 0xe5, 0xa9, 0x8d, + 0x5e, 0x88, 0xb8, 0x37, 0x1d, 0x74, 0x0b, 0xce, 0x39, 0xcd, 0xd8, 0xdd, 0x27, 0x55, 0xe2, 0xb4, + 0x3c, 0xd7, 0x27, 0x66, 0x00, 0x90, 0xf3, 0x0f, 0x0e, 0x2b, 0xe7, 0x16, 0xb3, 0x10, 0x70, 0x76, + 0x3d, 0xf4, 0x05, 0x28, 0xb5, 0xfc, 0x48, 0x8c, 0xc1, 0x88, 0x91, 0xa7, 0xb7, 0x54, 0x5d, 0x6f, + 0xa8, 0xef, 0x4f, 0xfe, 0xe0, 0xa4, 0x02, 0xda, 0x86, 0x09, 0xdd, 0xc9, 0x4f, 0xe4, 0x78, 0x7e, + 0xa9, 0xc7, 0xad, 0xdf, 0xf0, 0x8c, 0xe3, 0x7a, 0x4d, 0x65, 0xbb, 0x6d, 0x38, 0xcd, 0x19, 0x84, + 0xd1, 0x5b, 0x80, 0xa8, 0x30, 0xe3, 0x36, 0xc9, 0x62, 0x93, 0x45, 0x98, 0x67, 0xda, 0xb0, 0x31, + 0xc3, 0x11, 0x09, 0x35, 0xba, 0x30, 0x70, 0x46, 0x2d, 0x74, 0x9d, 0x72, 0x14, 0xbd, 0x54, 0x98, + 0xda, 0x4b, 0x01, 0xb8, 0x5c, 0x25, 0xed, 0x90, 0x34, 0x9d, 0x98, 0xb4, 0x4c, 0x8a, 0x38, 0x55, + 0x8f, 0x9e, 0x37, 0x2a, 0xd9, 0x14, 0x98, 0x06, 0xe2, 0xdd, 0x09, 0xa7, 0xe8, 0xdd, 0x71, 0x27, + 0x88, 0xe2, 0x75, 0x12, 0xdf, 0x0b, 0xc2, 0x5d, 0x11, 0xb5, 0x2f, 0x09, 0x20, 0x9b, 0x80, 0xb0, + 0x8e, 0x47, 0x65, 0x45, 0xf6, 0xb0, 0x59, 0xab, 0xb2, 0x77, 0xa6, 0xb1, 0x64, 0x9f, 0x5c, 0xe7, + 0xc5, 0x58, 0xc2, 0x25, 0x6a, 0xad, 0xbe, 0xcc, 0xde, 0x8c, 0x52, 0xa8, 0xb5, 0xfa, 0x32, 0x96, + 0x70, 0x44, 0xba, 0x73, 0x20, 0x4f, 0xe5, 0xbf, 0xfb, 0x75, 0xf3, 0xe5, 0x01, 0xd3, 0x20, 0xfb, + 0x30, 0xa3, 0xb2, 0x2f, 0xf3, 0x70, 0x86, 0x51, 0x79, 0x9a, 0x2d, 0x92, 0xc1, 0x63, 0x21, 0x2a, + 0x6d, 0x67, 0x2d, 0x45, 0x09, 0x77, 0xd1, 0x36, 0x02, 0xcb, 0xcc, 0xf4, 0x4d, 0x16, 0xb6, 0x00, + 0xa5, 0xa8, 0xb3, 0xd9, 0x0a, 0xf6, 0x1c, 0xd7, 0x67, 0x4f, 0x3c, 0x9a, 0x20, 0xd2, 0x90, 0x00, + 0x9c, 0xe0, 0xa0, 0x55, 0x18, 0x73, 0xc4, 0xb5, 0x54, 0x3c, 0xca, 0x64, 0x46, 0x9a, 0x90, 0x57, + 0x57, 0x2e, 0x66, 0xcb, 0x7f, 0x58, 0xd5, 0x45, 0x6f, 0xc0, 0xa4, 0x70, 0x86, 0x14, 0x76, 0xcc, + 0x67, 0x4c, 0xbf, 0x99, 0x86, 0x0e, 0xc4, 0x26, 0x2e, 0xfa, 0x2e, 0x98, 0xa2, 0x54, 0x12, 0xc6, + 0x56, 0x3e, 0x3b, 0x08, 0x47, 0xd4, 0x92, 0xc0, 0xe8, 0x95, 0x71, 0x8a, 0x18, 0x6a, 0xc1, 0x93, + 0x4e, 0x27, 0x0e, 0x98, 0x3a, 0xd8, 0x5c, 0xff, 0x1b, 0xc1, 0x2e, 0xf1, 0xd9, 0x4b, 0xcc, 0xd8, + 0xd2, 0xc5, 0x07, 0x87, 0x95, 0x27, 0x17, 0x7b, 0xe0, 0xe1, 0x9e, 0x54, 0xd0, 0x6d, 0x18, 0x8f, + 0x03, 0x4f, 0x38, 0x20, 0x44, 0xe5, 0xc7, 0xf2, 0x03, 0x63, 0x6d, 0x28, 0x34, 0x5d, 0xd1, 0xa2, + 0xaa, 0x62, 0x9d, 0x0e, 0xda, 0xe0, 0x7b, 0x8c, 0x85, 0x0c, 0x26, 0x51, 0xf9, 0xf1, 0xfc, 0x81, + 0x51, 0x91, 0x85, 0xcd, 0x2d, 0x28, 0x6a, 0x62, 0x9d, 0x0c, 0xba, 0x06, 0xb3, 0xed, 0xd0, 0x0d, + 0xd8, 0xc2, 0x56, 0xaa, 0xf8, 0xb2, 0x99, 0xf7, 0xa3, 0x9e, 0x46, 0xc0, 0xdd, 0x75, 0xe8, 0x45, + 0x4c, 0x16, 0x96, 0xcf, 0xf3, 0x24, 0x72, 0x5c, 0x38, 0xe5, 0x65, 0x58, 0x41, 0xd1, 0x1a, 0xe3, + 0xcb, 0xfc, 0xca, 0x54, 0x9e, 0xcb, 0x8f, 0xd0, 0xa1, 0x5f, 0xad, 0xb8, 0xe0, 0xa2, 0xfe, 0xe2, + 0x84, 0xc2, 0xdc, 0xb7, 0xc3, 0x6c, 0x17, 0xe3, 0x3d, 0x96, 0x6d, 0xf9, 0x3f, 0x1d, 0x86, 0x92, + 0xd2, 0xbb, 0xa2, 0x05, 0x53, 0x9d, 0x7e, 0x3e, 0xad, 0x4e, 0x1f, 0xa3, 0xe2, 0x9f, 0xae, 0x41, + 0xdf, 0x30, 0x2c, 0xa3, 0x0a, 0xf9, 0xb9, 0xe0, 0x74, 0xa5, 0x43, 0x5f, 0x47, 0x50, 0xed, 0x1a, + 0x5d, 0x1c, 0x58, 0x2f, 0x3f, 0xd4, 0xf3, 0x66, 0x3e, 0x60, 0x7a, 0x6b, 0x7a, 0xd3, 0x6c, 0x07, + 0xad, 0x5a, 0x3d, 0x9d, 0xef, 0xb5, 0x4e, 0x0b, 0x31, 0x87, 0xb1, 0xbb, 0x02, 0x95, 0x12, 0xd8, + 0x5d, 0x61, 0xf4, 0x21, 0xef, 0x0a, 0x92, 0x00, 0x4e, 0x68, 0x21, 0x0f, 0x66, 0x9b, 0x66, 0xaa, + 0x5e, 0xe5, 0xfc, 0xf9, 0x4c, 0xdf, 0xa4, 0xb9, 0x1d, 0x2d, 0x87, 0xdf, 0x72, 0x9a, 0x0a, 0xee, + 0x26, 0x8c, 0xde, 0x80, 0xb1, 0xf7, 0x83, 0x88, 0xad, 0x62, 0x71, 0x54, 0x4a, 0x77, 0xbb, 0xb1, + 0xb7, 0x6f, 0x35, 0x58, 0xf9, 0xd1, 0x61, 0x65, 0xbc, 0x1e, 0xb4, 0xe4, 0x5f, 0xac, 0x2a, 0xa0, + 0xfb, 0x70, 0xce, 0x60, 0x30, 0xaa, 0xbb, 0x30, 0x78, 0x77, 0x9f, 0x12, 0xcd, 0x9d, 0xab, 0x65, + 0x51, 0xc2, 0xd9, 0x0d, 0xd8, 0xbf, 0xc4, 0xb5, 0xcb, 0x42, 0x07, 0x45, 0xa2, 0x8e, 0x77, 0x1a, + 0x89, 0xba, 0x56, 0x0c, 0xf5, 0xd8, 0x43, 0xbf, 0x60, 0xfc, 0x9a, 0xc5, 0x5e, 0x30, 0x36, 0xc8, + 0x5e, 0xdb, 0x73, 0xe2, 0xd3, 0xf0, 0x28, 0x78, 0x1b, 0xc6, 0x62, 0xd1, 0x5a, 0xaf, 0xdc, 0x62, + 0x5a, 0xa7, 0xd8, 0x2b, 0x8e, 0x3a, 0x5f, 0x65, 0x29, 0x56, 0x64, 0xec, 0x7f, 0xce, 0x67, 0x40, + 0x42, 0x4e, 0x41, 0x55, 0x51, 0x35, 0x55, 0x15, 0x95, 0x3e, 0x5f, 0x90, 0xa3, 0xb2, 0xf8, 0x67, + 0x66, 0xbf, 0xd9, 0x55, 0xe6, 0x93, 0xfe, 0x74, 0x66, 0xff, 0x88, 0x05, 0x67, 0xb3, 0xac, 0x41, + 0xa8, 0x4c, 0xc4, 0x2f, 0x52, 0xea, 0x29, 0x51, 0x8d, 0xe0, 0x1d, 0x51, 0x8e, 0x15, 0xc6, 0xc0, + 0x69, 0x3b, 0x8e, 0x17, 0x5b, 0xee, 0x16, 0x98, 0x59, 0x9d, 0xd1, 0x9b, 0xdc, 0x45, 0xc8, 0x52, + 0x69, 0x97, 0x8f, 0xe7, 0x1e, 0x64, 0xff, 0x74, 0x01, 0xce, 0xf2, 0xb7, 0x80, 0xc5, 0xfd, 0xc0, + 0x6d, 0xd5, 0x83, 0x96, 0x70, 0x98, 0x7a, 0x07, 0x26, 0xda, 0xda, 0xed, 0xb7, 0x57, 0x74, 0x2b, + 0xfd, 0x96, 0x9c, 0xdc, 0x42, 0xf4, 0x52, 0x6c, 0xd0, 0x42, 0x2d, 0x98, 0x20, 0xfb, 0x6e, 0x53, + 0x29, 0x94, 0x0b, 0xc7, 0x66, 0xe9, 0xaa, 0x95, 0x15, 0x8d, 0x0e, 0x36, 0xa8, 0x3e, 0x82, 0x2c, + 0x7c, 0xf6, 0x8f, 0x5a, 0xf0, 0x78, 0x4e, 0x2c, 0x2c, 0xda, 0xdc, 0x3d, 0xf6, 0xea, 0x22, 0x12, + 0x7a, 0xa9, 0xe6, 0xf8, 0x5b, 0x0c, 0x16, 0x50, 0xf4, 0x25, 0x00, 0xfe, 0x96, 0x42, 0x85, 0x72, + 0xf1, 0xe9, 0x83, 0xc5, 0x88, 0xd1, 0x02, 0x89, 0xc8, 0xfa, 0x58, 0xa3, 0x65, 0xff, 0x64, 0x11, + 0x86, 0x99, 0xee, 0x1e, 0xad, 0xc2, 0xe8, 0x0e, 0x8f, 0xbc, 0x3d, 0x48, 0x90, 0xef, 0xe4, 0x76, + 0xc3, 0x0b, 0xb0, 0xac, 0x8c, 0xd6, 0xe0, 0x8c, 0x70, 0xca, 0xab, 0x12, 0xcf, 0x39, 0x90, 0x97, + 0x64, 0x9e, 0x04, 0x4b, 0xa5, 0x7d, 0xab, 0x75, 0xa3, 0xe0, 0xac, 0x7a, 0xe8, 0xcd, 0xae, 0x78, + 0x9b, 0x3c, 0x66, 0xb9, 0x12, 0xa9, 0xfb, 0xc4, 0xdc, 0x7c, 0x03, 0x26, 0xdb, 0x5d, 0xea, 0x80, + 0xe1, 0x44, 0xdc, 0x37, 0x55, 0x00, 0x26, 0x2e, 0x33, 0x03, 0xe9, 0x30, 0xa3, 0x97, 0x8d, 0x9d, + 0x90, 0x44, 0x3b, 0x81, 0xd7, 0x12, 0x79, 0xe9, 0x13, 0x33, 0x90, 0x14, 0x1c, 0x77, 0xd5, 0xa0, + 0x54, 0xb6, 0x1c, 0xd7, 0xeb, 0x84, 0x24, 0xa1, 0x32, 0x62, 0x52, 0x59, 0x4d, 0xc1, 0x71, 0x57, + 0x0d, 0xba, 0x8e, 0xce, 0x89, 0xa4, 0xe6, 0x32, 0x54, 0x83, 0xb2, 0xed, 0x19, 0x95, 0x2e, 0x1b, + 0x3d, 0xc2, 0x07, 0x09, 0xdb, 0x0a, 0x95, 0x16, 0x5d, 0x4b, 0x99, 0x2b, 0x9c, 0x35, 0x24, 0x95, + 0x87, 0x49, 0xad, 0xfd, 0xfb, 0x16, 0x9c, 0xc9, 0xb0, 0x21, 0xe4, 0xac, 0x6a, 0xdb, 0x8d, 0x62, + 0x95, 0xe8, 0x47, 0x63, 0x55, 0xbc, 0x1c, 0x2b, 0x0c, 0xba, 0x1f, 0x38, 0x33, 0x4c, 0x33, 0x40, + 0x61, 0xa3, 0x23, 0xa0, 0xc7, 0x63, 0x80, 0xe8, 0x22, 0x0c, 0x75, 0x22, 0x12, 0xca, 0x9c, 0xd4, + 0x92, 0x7f, 0x33, 0x05, 0x23, 0x83, 0x50, 0x89, 0x72, 0x5b, 0xe9, 0xf6, 0x34, 0x89, 0x92, 0x6b, + 0xf7, 0x38, 0xcc, 0xfe, 0x5a, 0x11, 0xce, 0xe7, 0xda, 0x08, 0xd3, 0x2e, 0xed, 0x05, 0xbe, 0x1b, + 0x07, 0xea, 0x5d, 0x88, 0xc7, 0xb9, 0x21, 0xed, 0x9d, 0x35, 0x51, 0x8e, 0x15, 0x06, 0xba, 0x04, + 0xc3, 0xec, 0xfe, 0xdc, 0x95, 0xca, 0x68, 0xa9, 0xca, 0xc3, 0x2f, 0x70, 0xf0, 0xc0, 0x69, 0xe2, + 0x9e, 0x81, 0xa1, 0x76, 0x10, 0x78, 0x69, 0x66, 0x44, 0xbb, 0x1b, 0x04, 0x1e, 0x66, 0x40, 0xf4, + 0x19, 0x31, 0x0e, 0xa9, 0x87, 0x10, 0xec, 0xb4, 0x82, 0x48, 0x1b, 0x8c, 0xe7, 0x60, 0x74, 0x97, + 0x1c, 0x84, 0xae, 0xbf, 0x9d, 0x7e, 0x20, 0xbb, 0xc1, 0x8b, 0xb1, 0x84, 0x9b, 0x99, 0x3c, 0x46, + 0x4f, 0x3a, 0xbf, 0xdb, 0x58, 0xdf, 0xa3, 0xed, 0x07, 0x8a, 0x30, 0x8d, 0x97, 0xaa, 0xdf, 0x9a, + 0x88, 0xdb, 0xdd, 0x13, 0x71, 0xd2, 0xf9, 0xdd, 0xfa, 0xcf, 0xc6, 0xcf, 0x5b, 0x30, 0xcd, 0xa2, + 0x5d, 0x8b, 0x38, 0x2d, 0x6e, 0xe0, 0x9f, 0x82, 0xe8, 0xf6, 0x0c, 0x0c, 0x87, 0xb4, 0xd1, 0x74, + 0xd2, 0x26, 0xd6, 0x13, 0xcc, 0x61, 0xe8, 0x49, 0x18, 0x62, 0x5d, 0xa0, 0x93, 0x37, 0xc1, 0xf3, + 0x5d, 0x54, 0x9d, 0xd8, 0xc1, 0xac, 0x94, 0x39, 0xcc, 0x62, 0xd2, 0xf6, 0x5c, 0xde, 0xe9, 0x44, + 0xa1, 0xfe, 0xc9, 0x70, 0x98, 0xcd, 0xec, 0xda, 0x47, 0x73, 0x98, 0xcd, 0x26, 0xd9, 0xfb, 0x5a, + 0xf4, 0xdf, 0x0b, 0x70, 0x21, 0xb3, 0xde, 0xc0, 0x0e, 0xb3, 0xbd, 0x6b, 0x9f, 0x8c, 0x9d, 0x43, + 0xb6, 0xf9, 0x41, 0xf1, 0x14, 0xcd, 0x0f, 0x86, 0x06, 0x95, 0x1c, 0x87, 0x07, 0xf0, 0x63, 0xcd, + 0x1c, 0xb2, 0x4f, 0x88, 0x1f, 0x6b, 0x66, 0xdf, 0x72, 0xae, 0x75, 0x7f, 0x5e, 0xc8, 0xf9, 0x16, + 0x76, 0xc1, 0xbb, 0x4c, 0xf9, 0x0c, 0x03, 0x46, 0x42, 0x12, 0x9e, 0xe0, 0x3c, 0x86, 0x97, 0x61, + 0x05, 0x45, 0xae, 0xe6, 0x11, 0x5a, 0xc8, 0x4f, 0xe9, 0x99, 0xdb, 0xd4, 0xbc, 0xf9, 0xfe, 0xa1, + 0x07, 0x95, 0x49, 0x7b, 0x87, 0xae, 0x69, 0x97, 0xf2, 0xe2, 0xe0, 0x97, 0xf2, 0x89, 0xec, 0x0b, + 0x39, 0x5a, 0x84, 0xe9, 0x3d, 0xd7, 0xa7, 0x6c, 0xf3, 0xc0, 0x14, 0x45, 0x55, 0x80, 0x84, 0x35, + 0x13, 0x8c, 0xd3, 0xf8, 0x73, 0x6f, 0xc0, 0xe4, 0xc3, 0x6b, 0x11, 0xbf, 0x51, 0x84, 0x27, 0x7a, + 0x6c, 0x7b, 0xce, 0xeb, 0x8d, 0x39, 0xd0, 0x78, 0x7d, 0xd7, 0x3c, 0xd4, 0xe1, 0xec, 0x56, 0xc7, + 0xf3, 0x0e, 0x98, 0x85, 0x1f, 0x69, 0x49, 0x0c, 0x21, 0x2b, 0xaa, 0x50, 0xf6, 0xab, 0x19, 0x38, + 0x38, 0xb3, 0x26, 0x7a, 0x0b, 0x50, 0x20, 0xf2, 0x09, 0x27, 0xa1, 0x72, 0xd8, 0xc0, 0x17, 0x93, + 0xcd, 0x78, 0xab, 0x0b, 0x03, 0x67, 0xd4, 0xa2, 0x42, 0x3f, 0x3d, 0x95, 0x0e, 0x54, 0xb7, 0x52, + 0x42, 0x3f, 0xd6, 0x81, 0xd8, 0xc4, 0x45, 0xd7, 0x60, 0xd6, 0xd9, 0x77, 0x5c, 0x1e, 0x3a, 0x51, + 0x12, 0xe0, 0x52, 0xbf, 0xd2, 0xdd, 0x2d, 0xa6, 0x11, 0x70, 0x77, 0x9d, 0x94, 0x4b, 0xea, 0x48, + 0xbe, 0x4b, 0x6a, 0x6f, 0xbe, 0xd8, 0x4f, 0x15, 0x6b, 0xff, 0x27, 0x8b, 0x1e, 0x5f, 0x5a, 0x9a, + 0x7f, 0x3d, 0xa3, 0x82, 0x52, 0x29, 0x6a, 0xde, 0xa1, 0x6a, 0x1c, 0x96, 0x75, 0x20, 0x36, 0x71, + 0xf9, 0x82, 0x88, 0x12, 0x47, 0x05, 0x43, 0x74, 0x17, 0xee, 0xdf, 0x0a, 0x03, 0x7d, 0x19, 0x46, + 0x5b, 0xee, 0xbe, 0x1b, 0x05, 0xa1, 0xd8, 0x2c, 0xc7, 0x34, 0x26, 0x4f, 0xf8, 0x60, 0x95, 0x93, + 0xc1, 0x92, 0x9e, 0xfd, 0x03, 0x05, 0x98, 0x94, 0x2d, 0xbe, 0xdd, 0x09, 0x62, 0xe7, 0x14, 0x8e, + 0xe5, 0x6b, 0xc6, 0xb1, 0xfc, 0x99, 0x5e, 0x3e, 0xf0, 0xac, 0x4b, 0xb9, 0xc7, 0xf1, 0xad, 0xd4, + 0x71, 0xfc, 0x6c, 0x7f, 0x52, 0xbd, 0x8f, 0xe1, 0x7f, 0x61, 0xc1, 0xac, 0x81, 0x7f, 0x0a, 0xa7, + 0xc1, 0xaa, 0x79, 0x1a, 0x3c, 0xdd, 0xf7, 0x1b, 0x72, 0x4e, 0x81, 0xaf, 0x17, 0x52, 0x7d, 0x67, + 0xdc, 0xff, 0x7d, 0x18, 0xda, 0x71, 0xc2, 0x56, 0xaf, 0x00, 0xc0, 0x5d, 0x95, 0xe6, 0xaf, 0x3b, + 0x61, 0x8b, 0xf3, 0xf0, 0x17, 0x55, 0x16, 0x52, 0x27, 0x6c, 0xf5, 0xf5, 0xcb, 0x61, 0x4d, 0xa1, + 0xab, 0x30, 0x12, 0x35, 0x83, 0xb6, 0xb2, 0xc9, 0xbb, 0xc8, 0x33, 0x94, 0xd2, 0x92, 0xa3, 0xc3, + 0x0a, 0x32, 0x9b, 0xa3, 0xc5, 0x58, 0xe0, 0xcf, 0x6d, 0x43, 0x49, 0x35, 0xfd, 0x48, 0x3d, 0x2a, + 0x7e, 0xbb, 0x08, 0x67, 0x32, 0xd6, 0x05, 0x8a, 0x8c, 0xd1, 0x7a, 0x79, 0xc0, 0xe5, 0xf4, 0x11, + 0xc7, 0x2b, 0x62, 0x37, 0x96, 0x96, 0x98, 0xff, 0x81, 0x1b, 0xbd, 0x1d, 0x91, 0x74, 0xa3, 0xb4, + 0xa8, 0x7f, 0xa3, 0xb4, 0xb1, 0x53, 0x1b, 0x6a, 0xda, 0x90, 0xea, 0xe9, 0x23, 0x9d, 0xd3, 0x3f, + 0x29, 0xc2, 0xd9, 0xac, 0xd0, 0x19, 0xe8, 0xbb, 0x53, 0xe9, 0x84, 0x5e, 0x1d, 0x34, 0xe8, 0x06, + 0xcf, 0x31, 0x24, 0x62, 0x8d, 0xcd, 0x9b, 0x09, 0x86, 0xfa, 0x0e, 0xb3, 0x68, 0x93, 0x39, 0xca, + 0x85, 0x3c, 0x0d, 0x94, 0xdc, 0xe2, 0x9f, 0x1b, 0xb8, 0x03, 0x22, 0x7f, 0x54, 0x94, 0x72, 0x94, + 0x93, 0xc5, 0xfd, 0x1d, 0xe5, 0x64, 0xcb, 0x73, 0x2e, 0x8c, 0x6b, 0x5f, 0xf3, 0x48, 0x67, 0x7c, + 0x97, 0x9e, 0x28, 0x5a, 0xbf, 0x1f, 0xe9, 0xac, 0xff, 0xa8, 0x05, 0x29, 0x4b, 0x38, 0xa5, 0x92, + 0xb2, 0x72, 0x55, 0x52, 0x17, 0x61, 0x28, 0x0c, 0x3c, 0x92, 0xce, 0x30, 0x83, 0x03, 0x8f, 0x60, + 0x06, 0xa1, 0x18, 0x71, 0xa2, 0x90, 0x98, 0xd0, 0x2f, 0x5b, 0xe2, 0x1a, 0xf5, 0x0c, 0x0c, 0x7b, + 0x64, 0x9f, 0x48, 0x6d, 0x84, 0xe2, 0xc9, 0x37, 0x69, 0x21, 0xe6, 0x30, 0xfb, 0xe7, 0x87, 0xe0, + 0xa9, 0x9e, 0xae, 0xa6, 0xf4, 0xca, 0xb2, 0xed, 0xc4, 0xe4, 0x9e, 0x73, 0x90, 0x8e, 0x7f, 0x7d, + 0x8d, 0x17, 0x63, 0x09, 0x67, 0x76, 0xbb, 0x3c, 0x84, 0x66, 0x4a, 0x81, 0x27, 0x22, 0x67, 0x0a, + 0xa8, 0xa9, 0x38, 0x2a, 0x9e, 0x84, 0xe2, 0xe8, 0x0a, 0x40, 0x14, 0x79, 0x2b, 0x3e, 0x95, 0xc0, + 0x5a, 0xc2, 0x20, 0x38, 0x09, 0xb5, 0xda, 0xb8, 0x29, 0x20, 0x58, 0xc3, 0x42, 0x55, 0x98, 0x69, + 0x87, 0x41, 0xcc, 0xf5, 0xa1, 0x55, 0x6e, 0x8a, 0x32, 0x6c, 0x7a, 0xf9, 0xd5, 0x53, 0x70, 0xdc, + 0x55, 0x03, 0xbd, 0x06, 0xe3, 0xc2, 0xf3, 0xaf, 0x1e, 0x04, 0x9e, 0x50, 0xd5, 0x28, 0xc3, 0x86, + 0x46, 0x02, 0xc2, 0x3a, 0x9e, 0x56, 0x8d, 0x29, 0x59, 0x47, 0x33, 0xab, 0x71, 0x45, 0xab, 0x86, + 0x97, 0x0a, 0xa3, 0x33, 0x36, 0x50, 0x18, 0x9d, 0x44, 0x79, 0x55, 0x1a, 0xf8, 0x5d, 0x09, 0xfa, + 0xaa, 0x7b, 0x7e, 0x66, 0x08, 0xce, 0x88, 0x85, 0xf3, 0xa8, 0x97, 0xcb, 0xed, 0xee, 0xe5, 0x72, + 0x12, 0xea, 0xad, 0x6f, 0xad, 0x99, 0xd3, 0x5e, 0x33, 0xbf, 0x54, 0x84, 0x11, 0x3e, 0x15, 0xa7, + 0x20, 0xc3, 0xaf, 0x0a, 0xa5, 0x5f, 0x8f, 0x00, 0x32, 0xbc, 0x2f, 0xf3, 0x55, 0x27, 0x76, 0xf8, + 0xf9, 0xa5, 0xd8, 0x68, 0xa2, 0x1e, 0x44, 0xf3, 0x06, 0xa3, 0x9d, 0x4b, 0x69, 0xb5, 0x80, 0xd3, + 0xd0, 0xd8, 0xee, 0x57, 0x00, 0x22, 0x96, 0x42, 0x9f, 0xd2, 0x10, 0xa1, 0x88, 0x9e, 0xef, 0xd1, + 0x7a, 0x43, 0x21, 0xf3, 0x3e, 0x24, 0x4b, 0x50, 0x01, 0xb0, 0x46, 0x71, 0xee, 0x75, 0x28, 0x29, + 0xe4, 0x7e, 0x2a, 0x80, 0x09, 0xfd, 0xd4, 0xfb, 0x22, 0x4c, 0xa7, 0xda, 0x3a, 0x96, 0x06, 0xe1, + 0x17, 0x2c, 0x98, 0xe6, 0x5d, 0x5e, 0xf1, 0xf7, 0xc5, 0x66, 0xff, 0x00, 0xce, 0x7a, 0x19, 0x9b, + 0x4e, 0xcc, 0xe8, 0xe0, 0x9b, 0x54, 0x69, 0x0c, 0xb2, 0xa0, 0x38, 0xb3, 0x0d, 0x74, 0x19, 0xc6, + 0xb8, 0xa3, 0x8b, 0xe3, 0x09, 0xe7, 0x84, 0x09, 0x9e, 0x92, 0x82, 0x97, 0x61, 0x05, 0xb5, 0x7f, + 0xd7, 0x82, 0x59, 0xde, 0xf3, 0x1b, 0xe4, 0x40, 0xdd, 0x8e, 0x3f, 0xce, 0xbe, 0x8b, 0x8c, 0x1b, + 0x85, 0x9c, 0x8c, 0x1b, 0xfa, 0xa7, 0x15, 0x7b, 0x7e, 0xda, 0x4f, 0x5b, 0x20, 0x56, 0xe0, 0x29, + 0xdc, 0x03, 0xbf, 0xdd, 0xbc, 0x07, 0xce, 0xe5, 0x2f, 0xea, 0x9c, 0x0b, 0xe0, 0x9f, 0x59, 0x30, + 0xc3, 0x11, 0x92, 0x87, 0xc8, 0x8f, 0x75, 0x1e, 0x06, 0x49, 0x03, 0xa7, 0xf2, 0x6e, 0x67, 0x7f, + 0x94, 0x31, 0x59, 0x43, 0x3d, 0x27, 0xab, 0x25, 0x37, 0xd0, 0x31, 0xd2, 0x1b, 0x1e, 0x3b, 0x48, + 0xac, 0xfd, 0xc7, 0x16, 0x20, 0xde, 0x8c, 0x71, 0x2e, 0xd3, 0xd3, 0x8e, 0x95, 0x6a, 0x9a, 0xa0, + 0x84, 0xd5, 0x28, 0x08, 0xd6, 0xb0, 0x4e, 0x64, 0x78, 0x52, 0xaf, 0xc9, 0xc5, 0xfe, 0xaf, 0xc9, + 0xc7, 0x18, 0xd1, 0xbf, 0x3e, 0x04, 0x69, 0x4b, 0x68, 0x74, 0x07, 0x26, 0x9a, 0x4e, 0xdb, 0xd9, + 0x74, 0x3d, 0x37, 0x76, 0x49, 0xd4, 0xcb, 0x0c, 0x65, 0x59, 0xc3, 0x13, 0xef, 0x84, 0x5a, 0x09, + 0x36, 0xe8, 0xa0, 0x79, 0x80, 0x76, 0xe8, 0xee, 0xbb, 0x1e, 0xd9, 0x66, 0x57, 0x61, 0xe6, 0x0e, + 0xc5, 0x6d, 0x2b, 0x64, 0x29, 0xd6, 0x30, 0x32, 0xdc, 0x67, 0x8a, 0x8f, 0xce, 0x7d, 0x66, 0xe8, + 0x98, 0xee, 0x33, 0xc3, 0x03, 0xb9, 0xcf, 0x60, 0x78, 0x4c, 0x9e, 0xdd, 0xf4, 0xff, 0xaa, 0xeb, + 0x11, 0x21, 0xb0, 0x71, 0x27, 0xa9, 0xb9, 0x07, 0x87, 0x95, 0xc7, 0x70, 0x26, 0x06, 0xce, 0xa9, + 0x89, 0xbe, 0x04, 0x65, 0xc7, 0xf3, 0x82, 0x7b, 0x6a, 0xd4, 0x56, 0xa2, 0xa6, 0xe3, 0x25, 0x31, + 0xd3, 0xc7, 0x96, 0x9e, 0x7c, 0x70, 0x58, 0x29, 0x2f, 0xe6, 0xe0, 0xe0, 0xdc, 0xda, 0xf6, 0x2e, + 0x9c, 0x69, 0x90, 0x50, 0x66, 0x4c, 0x55, 0x5b, 0x6c, 0x03, 0x4a, 0x61, 0x8a, 0xa9, 0x0c, 0x14, + 0xab, 0x44, 0x8b, 0x67, 0x29, 0x99, 0x48, 0x42, 0xc8, 0xfe, 0x53, 0x0b, 0x46, 0x85, 0x75, 0xf5, + 0x29, 0xc8, 0x32, 0x8b, 0x86, 0x3e, 0xb2, 0x92, 0xcd, 0x78, 0x59, 0x67, 0x72, 0x35, 0x91, 0xb5, + 0x94, 0x26, 0xf2, 0xe9, 0x5e, 0x44, 0x7a, 0xeb, 0x20, 0x7f, 0xb8, 0x08, 0x53, 0xa6, 0x65, 0xf9, + 0x29, 0x0c, 0xc1, 0x3a, 0x8c, 0x46, 0xc2, 0x8d, 0xa1, 0x90, 0x6f, 0xbf, 0x9a, 0x9e, 0xc4, 0xc4, + 0xca, 0x45, 0x38, 0x2e, 0x48, 0x22, 0x99, 0xfe, 0x11, 0xc5, 0x47, 0xe8, 0x1f, 0xd1, 0xcf, 0xb8, + 0x7f, 0xe8, 0x24, 0x8c, 0xfb, 0xed, 0x5f, 0x66, 0xcc, 0x5f, 0x2f, 0x3f, 0x05, 0xb9, 0xe0, 0x9a, + 0x79, 0x4c, 0xd8, 0x3d, 0x56, 0x96, 0xe8, 0x54, 0x8e, 0x7c, 0xf0, 0x8f, 0x2d, 0x18, 0x17, 0x88, + 0xa7, 0xd0, 0xed, 0xef, 0x30, 0xbb, 0xfd, 0x44, 0x8f, 0x6e, 0xe7, 0xf4, 0xf7, 0xef, 0x16, 0x54, + 0x7f, 0xeb, 0x41, 0x18, 0x0f, 0x94, 0x43, 0x63, 0x8c, 0xde, 0x06, 0x83, 0x66, 0xe0, 0x89, 0xc3, + 0xfc, 0xc9, 0xc4, 0x4f, 0x96, 0x97, 0x1f, 0x69, 0xbf, 0xb1, 0xc2, 0x66, 0x6e, 0x9c, 0x41, 0x18, + 0x8b, 0x03, 0x34, 0x71, 0xe3, 0x0c, 0xc2, 0x18, 0x33, 0x08, 0x6a, 0x01, 0xc4, 0x4e, 0xb8, 0x4d, + 0x62, 0x5a, 0x26, 0x5c, 0xee, 0xf3, 0x77, 0x61, 0x27, 0x76, 0xbd, 0x79, 0xd7, 0x8f, 0xa3, 0x38, + 0x9c, 0xaf, 0xf9, 0xf1, 0xad, 0x90, 0xdf, 0x0d, 0x34, 0xc7, 0x57, 0x45, 0x0b, 0x6b, 0x74, 0xa5, + 0xe7, 0x15, 0x6b, 0x63, 0xd8, 0x7c, 0x28, 0x5c, 0x17, 0xe5, 0x58, 0x61, 0xd8, 0xaf, 0x33, 0x9e, + 0xcc, 0x06, 0xe8, 0x78, 0x3e, 0xa9, 0xbf, 0x39, 0xa6, 0x86, 0x96, 0xbd, 0x12, 0x54, 0x75, 0xcf, + 0xd7, 0xde, 0x2c, 0x90, 0x36, 0xac, 0xbb, 0x05, 0x24, 0xee, 0xb1, 0xe8, 0x3b, 0xbb, 0xde, 0x8f, + 0x5f, 0xea, 0xc3, 0x4b, 0x8f, 0xf1, 0x62, 0xcc, 0x02, 0xb1, 0xb2, 0x80, 0x95, 0xb5, 0x7a, 0x3a, + 0xcb, 0xc9, 0xb2, 0x04, 0xe0, 0x04, 0x07, 0x2d, 0x88, 0x9b, 0x25, 0xd7, 0xcf, 0x3d, 0x91, 0xba, + 0x59, 0xca, 0xcf, 0xd7, 0xae, 0x96, 0x2f, 0xc3, 0xb8, 0xca, 0x1c, 0x57, 0xe7, 0x09, 0xb8, 0x44, + 0x00, 0x82, 0x95, 0xa4, 0x18, 0xeb, 0x38, 0x68, 0x03, 0xa6, 0x23, 0x9e, 0xd6, 0x4e, 0x3a, 0x43, + 0x09, 0xbd, 0xc1, 0xf3, 0xf2, 0xdd, 0xb9, 0x61, 0x82, 0x8f, 0x58, 0x11, 0xdf, 0xac, 0xd2, 0x7d, + 0x2a, 0x4d, 0x02, 0xbd, 0x09, 0x53, 0x9e, 0x9e, 0xde, 0xbb, 0x2e, 0xd4, 0x0a, 0xca, 0x2c, 0xd3, + 0x48, 0xfe, 0x5d, 0xc7, 0x29, 0x6c, 0x2a, 0x04, 0xe8, 0x25, 0x22, 0x7a, 0x99, 0xe3, 0x6f, 0x93, + 0x48, 0xe4, 0xbd, 0x62, 0x42, 0xc0, 0xcd, 0x1c, 0x1c, 0x9c, 0x5b, 0x1b, 0x5d, 0x85, 0x09, 0xf9, + 0xf9, 0x9a, 0x73, 0x60, 0x62, 0xfc, 0xab, 0xc1, 0xb0, 0x81, 0x89, 0xee, 0xc1, 0x39, 0xf9, 0x7f, + 0x23, 0x74, 0xb6, 0xb6, 0xdc, 0xa6, 0xf0, 0xcd, 0x1c, 0x67, 0x24, 0x16, 0xa5, 0x27, 0xc4, 0x4a, + 0x16, 0xd2, 0xd1, 0x61, 0xe5, 0xa2, 0x18, 0xb5, 0x4c, 0x38, 0x9b, 0xc4, 0x6c, 0xfa, 0x68, 0x0d, + 0xce, 0xec, 0x10, 0xc7, 0x8b, 0x77, 0x96, 0x77, 0x48, 0x73, 0x57, 0x6e, 0x22, 0xe6, 0x72, 0xa8, + 0x99, 0xcc, 0x5e, 0xef, 0x46, 0xc1, 0x59, 0xf5, 0xd0, 0xbb, 0x50, 0x6e, 0x77, 0x36, 0x3d, 0x37, + 0xda, 0x59, 0x0f, 0x62, 0xf6, 0xd4, 0xad, 0x12, 0xaf, 0x09, 0xdf, 0x44, 0xe5, 0x6e, 0x59, 0xcf, + 0xc1, 0xc3, 0xb9, 0x14, 0xd0, 0x07, 0x70, 0x2e, 0xb5, 0x18, 0x84, 0xa7, 0xd4, 0x54, 0x7e, 0x2c, + 0xc8, 0x46, 0x56, 0x05, 0xee, 0x31, 0x9b, 0x09, 0xc2, 0xd9, 0x4d, 0x7c, 0x34, 0x03, 0x88, 0xf7, + 0x69, 0x65, 0x4d, 0xba, 0x41, 0x5f, 0x85, 0x09, 0x7d, 0x15, 0x89, 0x03, 0xe6, 0x52, 0xbf, 0x54, + 0xf6, 0x42, 0x36, 0x52, 0x2b, 0x4a, 0x87, 0x61, 0x83, 0xa2, 0x4d, 0x20, 0xfb, 0xfb, 0xd0, 0x4d, + 0x18, 0x6b, 0x7a, 0x2e, 0xf1, 0xe3, 0x5a, 0xbd, 0x97, 0x4f, 0xfd, 0xb2, 0xc0, 0x11, 0x03, 0x26, + 0x82, 0xe7, 0xf1, 0x32, 0xac, 0x28, 0xd8, 0xbf, 0x5a, 0x80, 0x4a, 0x9f, 0x48, 0x8c, 0x29, 0x1d, + 0xa0, 0x35, 0x90, 0x0e, 0x70, 0x51, 0xa6, 0x91, 0x5b, 0x4f, 0xdd, 0x3f, 0x53, 0x29, 0xe2, 0x92, + 0x5b, 0x68, 0x1a, 0x7f, 0x60, 0xbb, 0x49, 0x5d, 0x8d, 0x38, 0xd4, 0xd7, 0xa2, 0xd7, 0x78, 0x3e, + 0x18, 0x1e, 0x5c, 0xa2, 0xcf, 0x55, 0x05, 0xdb, 0xbf, 0x5c, 0x80, 0x73, 0x6a, 0x08, 0xbf, 0x79, + 0x07, 0xee, 0x76, 0xf7, 0xc0, 0x9d, 0x80, 0x22, 0xdd, 0xbe, 0x05, 0x23, 0x8d, 0x83, 0xa8, 0x19, + 0x7b, 0x03, 0x08, 0x40, 0xcf, 0x98, 0xb1, 0x65, 0xd4, 0x31, 0x6d, 0xc4, 0x97, 0xf9, 0x2b, 0x16, + 0x4c, 0x6f, 0x2c, 0xd7, 0x1b, 0x41, 0x73, 0x97, 0xc4, 0x8b, 0x5c, 0x4d, 0x84, 0x85, 0xfc, 0x63, + 0x3d, 0xa4, 0x5c, 0x93, 0x25, 0x31, 0x5d, 0x84, 0xa1, 0x9d, 0x20, 0x8a, 0xd3, 0xaf, 0x6c, 0xd7, + 0x83, 0x28, 0xc6, 0x0c, 0x62, 0xff, 0x9e, 0x05, 0xc3, 0x2c, 0xf9, 0x69, 0xbf, 0x24, 0xb9, 0x83, + 0x7c, 0x17, 0x7a, 0x0d, 0x46, 0xc8, 0xd6, 0x16, 0x69, 0xc6, 0x62, 0x56, 0xa5, 0x77, 0xdd, 0xc8, + 0x0a, 0x2b, 0xa5, 0x87, 0x3e, 0x6b, 0x8c, 0xff, 0xc5, 0x02, 0x19, 0xdd, 0x85, 0x52, 0xec, 0xee, + 0x91, 0xc5, 0x56, 0x4b, 0xbc, 0x53, 0x3c, 0x84, 0x33, 0xe3, 0x86, 0x24, 0x80, 0x13, 0x5a, 0xf6, + 0xd7, 0x0a, 0x00, 0x89, 0x43, 0x6f, 0xbf, 0x4f, 0x5c, 0xea, 0xca, 0x03, 0x7c, 0x29, 0x23, 0x0f, + 0x30, 0x4a, 0x08, 0x66, 0x64, 0x01, 0x56, 0xc3, 0x54, 0x1c, 0x68, 0x98, 0x86, 0x8e, 0x33, 0x4c, + 0xcb, 0x30, 0x9b, 0x38, 0x24, 0x9b, 0xd1, 0x19, 0x58, 0x64, 0xf6, 0x8d, 0x34, 0x10, 0x77, 0xe3, + 0xdb, 0xdf, 0x6f, 0x81, 0x70, 0x37, 0x18, 0x60, 0x31, 0xbf, 0x23, 0x53, 0x76, 0x1a, 0x01, 0x5d, + 0x2f, 0xe6, 0xfb, 0x5f, 0x88, 0x30, 0xae, 0xea, 0xf0, 0x30, 0x82, 0xb7, 0x1a, 0xb4, 0xec, 0x16, + 0x08, 0x68, 0x95, 0x30, 0x25, 0x43, 0xff, 0xde, 0x5c, 0x01, 0x68, 0x31, 0x5c, 0x2d, 0x05, 0xa0, + 0x62, 0x55, 0x55, 0x05, 0xc1, 0x1a, 0x96, 0xfd, 0x37, 0x0b, 0x30, 0x2e, 0x03, 0x88, 0xd2, 0x7b, + 0x7c, 0xff, 0x56, 0x8e, 0x95, 0x3d, 0x80, 0xe5, 0xcc, 0xa4, 0x84, 0x55, 0x90, 0x79, 0x3d, 0x67, + 0xa6, 0x04, 0xe0, 0x04, 0x07, 0x3d, 0x07, 0xa3, 0x51, 0x67, 0x93, 0xa1, 0xa7, 0x8c, 0xe8, 0x1b, + 0xbc, 0x18, 0x4b, 0x38, 0xfa, 0x12, 0xcc, 0xf0, 0x7a, 0x61, 0xd0, 0x76, 0xb6, 0xb9, 0x06, 0x69, + 0x58, 0x79, 0xb5, 0xcd, 0xac, 0xa5, 0x60, 0x47, 0x87, 0x95, 0xb3, 0xe9, 0x32, 0xa6, 0x7b, 0xec, + 0xa2, 0x42, 0xf7, 0xc5, 0x4c, 0xda, 0x61, 0x06, 0x5d, 0x87, 0x11, 0xce, 0xf2, 0x04, 0x0b, 0xea, + 0xf1, 0xa2, 0xa4, 0xb9, 0xd9, 0xb0, 0x70, 0xea, 0x82, 0x6b, 0x8a, 0xfa, 0xe8, 0x5d, 0x18, 0x6f, + 0x05, 0xf7, 0xfc, 0x7b, 0x4e, 0xd8, 0x5a, 0xac, 0xd7, 0xc4, 0xaa, 0xc9, 0x94, 0x9c, 0xaa, 0x09, + 0x9a, 0xee, 0xba, 0xc3, 0xb4, 0xa7, 0x09, 0x08, 0xeb, 0xe4, 0xd0, 0x06, 0x8b, 0xf1, 0xc4, 0x93, + 0xda, 0xf7, 0xb2, 0x3a, 0x53, 0x79, 0xf0, 0x35, 0xca, 0x93, 0x22, 0x10, 0x94, 0x48, 0x89, 0x9f, + 0x10, 0xb2, 0x3f, 0x3c, 0x03, 0xc6, 0x6a, 0x35, 0xb2, 0x07, 0x58, 0x27, 0x94, 0x3d, 0x00, 0xc3, + 0x18, 0xd9, 0x6b, 0xc7, 0x07, 0x55, 0x37, 0xec, 0x95, 0x7e, 0x66, 0x45, 0xe0, 0x74, 0xd3, 0x94, + 0x10, 0xac, 0xe8, 0x64, 0xa7, 0x78, 0x28, 0x7e, 0x8c, 0x29, 0x1e, 0x86, 0x4e, 0x31, 0xc5, 0xc3, + 0x3a, 0x8c, 0x6e, 0xbb, 0x31, 0x26, 0xed, 0x40, 0x1c, 0xf7, 0x99, 0x2b, 0xe1, 0x1a, 0x47, 0xe9, + 0x0e, 0x30, 0x2e, 0x00, 0x58, 0x12, 0x41, 0x6f, 0xa9, 0x3d, 0x30, 0x92, 0x2f, 0x2d, 0x77, 0x3f, + 0x3e, 0x64, 0xee, 0x02, 0x91, 0xd2, 0x61, 0xf4, 0x61, 0x53, 0x3a, 0xac, 0xca, 0x44, 0x0c, 0x63, + 0xf9, 0x46, 0x9a, 0x2c, 0xcf, 0x42, 0x9f, 0xf4, 0x0b, 0x46, 0xca, 0x8a, 0xd2, 0xc9, 0xa5, 0xac, + 0xf8, 0x7e, 0x0b, 0xce, 0xb5, 0xb3, 0xb2, 0xb7, 0x88, 0x44, 0x0a, 0xaf, 0x0d, 0x9c, 0x9e, 0xc6, + 0x68, 0x90, 0x5d, 0x9b, 0x32, 0xd1, 0x70, 0x76, 0x73, 0x74, 0xa0, 0xc3, 0xcd, 0x96, 0xc8, 0xbe, + 0xf0, 0x4c, 0x4e, 0xee, 0x8b, 0x1e, 0x19, 0x2f, 0x36, 0x32, 0x32, 0x2e, 0x7c, 0x3a, 0x2f, 0xe3, + 0xc2, 0xc0, 0x79, 0x16, 0x92, 0xac, 0x17, 0x93, 0x1f, 0x39, 0xeb, 0xc5, 0x5b, 0x2a, 0xeb, 0x45, + 0x8f, 0x48, 0x3a, 0x3c, 0xa7, 0x45, 0xdf, 0x5c, 0x17, 0x5a, 0xbe, 0x8a, 0xe9, 0x93, 0xc9, 0x57, + 0x61, 0x30, 0x7b, 0x9e, 0x32, 0xe1, 0x85, 0x3e, 0xcc, 0xde, 0xa0, 0xdb, 0x9b, 0xdd, 0xf3, 0xdc, + 0x1c, 0xb3, 0x0f, 0x95, 0x9b, 0xe3, 0x8e, 0x9e, 0xeb, 0x02, 0xf5, 0x49, 0xe6, 0x40, 0x91, 0x06, + 0xcc, 0x70, 0x71, 0x47, 0x3f, 0x82, 0xce, 0xe4, 0xd3, 0x55, 0x27, 0x4d, 0x37, 0xdd, 0xac, 0x43, + 0xa8, 0x3b, 0x73, 0xc6, 0xd9, 0xd3, 0xc9, 0x9c, 0x71, 0xee, 0xc4, 0x33, 0x67, 0x3c, 0x76, 0x0a, + 0x99, 0x33, 0x1e, 0xff, 0x58, 0x33, 0x67, 0x94, 0x1f, 0x41, 0xe6, 0x8c, 0xf5, 0x24, 0x73, 0xc6, + 0xf9, 0xfc, 0x29, 0xc9, 0xb0, 0x4a, 0xcb, 0xc9, 0x97, 0x71, 0x07, 0x4a, 0x6d, 0xe9, 0x53, 0x2d, + 0x42, 0xfd, 0x64, 0xa7, 0xec, 0xcb, 0x72, 0xbc, 0xe6, 0x53, 0xa2, 0x40, 0x38, 0x21, 0x45, 0xe9, + 0x26, 0xf9, 0x33, 0x9e, 0xe8, 0xa1, 0x18, 0xcb, 0x52, 0x39, 0xe4, 0x67, 0xcd, 0xb0, 0xff, 0x6a, + 0x01, 0x2e, 0xf4, 0x5e, 0xd7, 0x89, 0xbe, 0xa2, 0x9e, 0xe8, 0xd7, 0x53, 0xfa, 0x0a, 0x7e, 0x09, + 0x48, 0xb0, 0x06, 0x0e, 0x3c, 0x71, 0x0d, 0x66, 0x95, 0x39, 0x9a, 0xe7, 0x36, 0x0f, 0xb4, 0x54, + 0x7e, 0xca, 0x35, 0xa6, 0x91, 0x46, 0xc0, 0xdd, 0x75, 0xd0, 0x22, 0x4c, 0x1b, 0x85, 0xb5, 0xaa, + 0x10, 0xf6, 0x95, 0x82, 0xa4, 0x61, 0x82, 0x71, 0x1a, 0xdf, 0xfe, 0xba, 0x05, 0x8f, 0xe7, 0x84, + 0xac, 0x1e, 0x38, 0xae, 0xc2, 0x16, 0x4c, 0xb7, 0xcd, 0xaa, 0x7d, 0xc2, 0xaf, 0x18, 0x81, 0xb1, + 0x55, 0x5f, 0x53, 0x00, 0x9c, 0x26, 0xba, 0x74, 0xf9, 0xd7, 0xff, 0xe0, 0xc2, 0xa7, 0x7e, 0xeb, + 0x0f, 0x2e, 0x7c, 0xea, 0x77, 0xff, 0xe0, 0xc2, 0xa7, 0xfe, 0xff, 0x07, 0x17, 0xac, 0x5f, 0x7f, + 0x70, 0xc1, 0xfa, 0xad, 0x07, 0x17, 0xac, 0xdf, 0x7d, 0x70, 0xc1, 0xfa, 0xfd, 0x07, 0x17, 0xac, + 0xaf, 0xfd, 0xe1, 0x85, 0x4f, 0xbd, 0x53, 0xd8, 0x7f, 0xf9, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, + 0xc2, 0x38, 0x28, 0x01, 0xc3, 0xdf, 0x00, 0x00, } diff --git a/staging/src/k8s.io/api/core/v1/generated.proto b/staging/src/k8s.io/api/core/v1/generated.proto index 6bef759dece..acd56881674 100644 --- a/staging/src/k8s.io/api/core/v1/generated.proto +++ b/staging/src/k8s.io/api/core/v1/generated.proto @@ -185,6 +185,12 @@ message CSIPersistentVolumeSource { // Defaults to false (read/write). // +optional optional bool readOnly = 3; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + optional string fsType = 4; } // Adds and removes POSIX capabilities from running containers. diff --git a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go index 80cacc974e5..6c565486411 100644 --- a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -121,6 +121,7 @@ var map_CSIPersistentVolumeSource = map[string]string{ "driver": "Driver is the name of the driver to use for this volume. Required.", "volumeHandle": "VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required.", "readOnly": "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", } func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { From 631119a7d65e01e48b5d8a46d7300b20c65262e1 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Tue, 16 Jan 2018 15:28:16 +0800 Subject: [PATCH 185/264] move prometheus init to k8s.io/apiserver/pkg/endpoints/metrics/metrics.go --- .../src/k8s.io/apiserver/pkg/endpoints/BUILD | 1 - .../apiserver/pkg/endpoints/apiserver.go | 25 ------------------- .../pkg/endpoints/metrics/metrics.go | 4 +-- 3 files changed, 2 insertions(+), 28 deletions(-) delete mode 100644 staging/src/k8s.io/apiserver/pkg/endpoints/apiserver.go diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/BUILD index f2c1ce34509..39fe40b5d9a 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/BUILD @@ -60,7 +60,6 @@ go_test( go_library( name = "go_default_library", srcs = [ - "apiserver.go", "doc.go", "groupversion.go", "installer.go", diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver.go b/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver.go deleted file mode 100644 index 933363bffeb..00000000000 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package endpoints - -import ( - "k8s.io/apiserver/pkg/endpoints/metrics" -) - -func init() { - metrics.Register() -} diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go index 65e651a3317..8a3b20d0d86 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go @@ -81,8 +81,8 @@ var ( kubectlExeRegexp = regexp.MustCompile(`^.*((?i:kubectl\.exe))`) ) -// Register all metrics. -func Register() { +func init() { + // Register all metrics. prometheus.MustRegister(requestCounter) prometheus.MustRegister(longRunningRequestGauge) prometheus.MustRegister(requestLatencies) From 3660563e22f2db2d27c4b66196e4b1603dca0ebc Mon Sep 17 00:00:00 2001 From: Yanqiang Miao Date: Tue, 16 Jan 2018 15:38:24 +0800 Subject: [PATCH 186/264] Benchmark non docker specific Signed-off-by: Yanqiang Miao --- test/e2e_node/resource_collector.go | 31 ++++++++++------------------- 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/test/e2e_node/resource_collector.go b/test/e2e_node/resource_collector.go index ec256e32f1f..c98a8c2ff2f 100644 --- a/test/e2e_node/resource_collector.go +++ b/test/e2e_node/resource_collector.go @@ -85,16 +85,16 @@ func NewResourceCollector(interval time.Duration) *ResourceCollector { // Start starts resource collector and connects to the standalone Cadvisor pod // then repeatedly runs collectStats. func (r *ResourceCollector) Start() { - // Get the cgroup container names for kubelet and docker + // Get the cgroup container names for kubelet and runtime kubeletContainer, err := getContainerNameForProcess(kubeletProcessName, "") - dockerContainer, err := getContainerNameForProcess(dockerProcessName, dockerPidFile) + runtimeContainer, err := getContainerNameForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile) if err == nil { systemContainers = map[string]string{ stats.SystemContainerKubelet: kubeletContainer, - stats.SystemContainerRuntime: dockerContainer, + stats.SystemContainerRuntime: runtimeContainer, } } else { - framework.Failf("Failed to get docker container name in test-e2e-node resource collector.") + framework.Failf("Failed to get runtime container name in test-e2e-node resource collector.") } wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) { @@ -240,12 +240,11 @@ func (r *ResourceCollector) GetBasicCPUStats(containerName string) map[float64]f func formatResourceUsageStats(containerStats framework.ResourceUsagePerContainer) string { // Example output: // - // Resource usage for node "e2e-test-foo-node-abcde": - // container cpu(cores) memory(MB) - // "/" 0.363 2942.09 - // "/docker-daemon" 0.088 521.80 - // "/kubelet" 0.086 424.37 - // "/system" 0.007 119.88 + // Resource usage: + //container cpu(cores) memory_working_set(MB) memory_rss(MB) + //"kubelet" 0.068 27.92 15.43 + //"runtime" 0.664 89.88 68.13 + buf := &bytes.Buffer{} w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0) fmt.Fprintf(w, "container\tcpu(cores)\tmemory_working_set(MB)\tmemory_rss(MB)\n") @@ -258,7 +257,7 @@ func formatResourceUsageStats(containerStats framework.ResourceUsagePerContainer func formatCPUSummary(summary framework.ContainersCPUSummary) string { // Example output for a node (the percentiles may differ): - // CPU usage of containers on node "e2e-test-foo-node-0vj7": + // CPU usage of containers: // container 5th% 50th% 90th% 95th% // "/" 0.051 0.159 0.387 0.455 // "/runtime 0.000 0.000 0.146 0.166 @@ -461,15 +460,7 @@ func (r *ResourceCollector) GetResourceTimeSeries() map[string]*perftype.Resourc return resourceSeries } -// Code for getting container name of docker, copied from pkg/kubelet/cm/container_manager_linux.go -// since they are not exposed -const ( - kubeletProcessName = "kubelet" - dockerProcessName = "docker" - dockerPidFile = "/var/run/docker.pid" - containerdProcessName = "docker-containerd" - containerdPidFile = "/run/docker/libcontainerd/docker-containerd.pid" -) +const kubeletProcessName = "kubelet" func getPidsForProcess(name, pidFile string) ([]int, error) { if len(pidFile) > 0 { From d77d20dd54777f95cbe88815ea47ec0bdf31332e Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Tue, 16 Jan 2018 17:01:48 +0800 Subject: [PATCH 187/264] fix azure TestGetInstanceIDByNodeName data race --- .../providers/azure/azure_vmss_test.go | 20 ++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_vmss_test.go b/pkg/cloudprovider/providers/azure/azure_vmss_test.go index 7830eab783d..08afdd56908 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmss_test.go +++ b/pkg/cloudprovider/providers/azure/azure_vmss_test.go @@ -24,12 +24,15 @@ import ( "github.com/stretchr/testify/assert" ) -func newTestScaleSet() *scaleSet { - ss := newScaleSet(getTestCloud()) +func newTestScaleSet(scaleSetName string, vmList []string) *scaleSet { + cloud := getTestCloud() + setTestVirtualMachineCloud(cloud, scaleSetName, vmList) + ss := newScaleSet(cloud) + return ss.(*scaleSet) } -func setTestVirtualMachineScaleSets(ss *scaleSet, scaleSetName string, vmList []string) { +func setTestVirtualMachineCloud(ss *Cloud, scaleSetName string, vmList []string) { virtualMachineScaleSetsClient := newFakeVirtualMachineScaleSetsClient() scaleSets := make(map[string]map[string]compute.VirtualMachineScaleSet) scaleSets["rg"] = map[string]compute.VirtualMachineScaleSet{ @@ -63,13 +66,13 @@ func setTestVirtualMachineScaleSets(ss *scaleSet, scaleSetName string, vmList [] }, ID: &ID, InstanceID: &instanceID, - Location: &ss.Cloud.Location, + Location: &ss.Location, } } virtualMachineScaleSetVMsClient.setFakeStore(ssVMs) - ss.Cloud.VirtualMachineScaleSetsClient = virtualMachineScaleSetsClient - ss.Cloud.VirtualMachineScaleSetVMsClient = virtualMachineScaleSetVMsClient + ss.VirtualMachineScaleSetsClient = virtualMachineScaleSetsClient + ss.VirtualMachineScaleSetVMsClient = virtualMachineScaleSetVMsClient } func TestGetScaleSetVMInstanceID(t *testing.T) { @@ -102,8 +105,6 @@ func TestGetScaleSetVMInstanceID(t *testing.T) { } func TestGetInstanceIDByNodeName(t *testing.T) { - ss := newTestScaleSet() - testCases := []struct { description string scaleSet string @@ -136,7 +137,8 @@ func TestGetInstanceIDByNodeName(t *testing.T) { } for _, test := range testCases { - setTestVirtualMachineScaleSets(ss, test.scaleSet, test.vmList) + ss := newTestScaleSet(test.scaleSet, test.vmList) + real, err := ss.GetInstanceIDByNodeName(test.nodeName) if test.expectError { assert.Error(t, err, test.description) From f57cc0b22d282bc8fe68faf91529e7175bc3918a Mon Sep 17 00:00:00 2001 From: yue9944882 <291271447@qq.com> Date: Thu, 21 Dec 2017 16:50:16 +0800 Subject: [PATCH 188/264] fix(fakeclient): write event to watch channel on add/update/delete fix races with watch call add test for non-namespace resource watch add matching for all-namespace-watch fix delete namespace watch & restrict test fix multiple invocation on same resource & namespace add descriptive doc for tracker.watchers --- staging/src/k8s.io/client-go/testing/BUILD | 19 ++ .../src/k8s.io/client-go/testing/fixture.go | 60 +++++- .../k8s.io/client-go/testing/fixture_test.go | 192 ++++++++++++++++++ .../fake/generator_fake_for_clientset.go | 10 +- 4 files changed, 277 insertions(+), 4 deletions(-) create mode 100644 staging/src/k8s.io/client-go/testing/fixture_test.go diff --git a/staging/src/k8s.io/client-go/testing/BUILD b/staging/src/k8s.io/client-go/testing/BUILD index b26e662876d..666a449bd44 100644 --- a/staging/src/k8s.io/client-go/testing/BUILD +++ b/staging/src/k8s.io/client-go/testing/BUILD @@ -3,6 +3,7 @@ package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", + "go_test", ) go_library( @@ -28,6 +29,24 @@ go_library( ], ) +go_test( + name = "go_default_test", + srcs = [ + "fixture_test.go", + ], + embed = [":go_default_library"], + importpath = "k8s.io/client-go/testing", + deps = [ + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + ], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), diff --git a/staging/src/k8s.io/client-go/testing/fixture.go b/staging/src/k8s.io/client-go/testing/fixture.go index 08379fb0897..ba8ee508f46 100644 --- a/staging/src/k8s.io/client-go/testing/fixture.go +++ b/staging/src/k8s.io/client-go/testing/fixture.go @@ -29,6 +29,11 @@ import ( restclient "k8s.io/client-go/rest" ) +// FakeWatchBufferSize is the max num of watch event can be buffered in the +// watch channel. Note that when watch event overflows or exceed this buffer +// size, manipulations via fake client may be blocked. +const FakeWatchBufferSize = 128 + // ObjectTracker keeps track of objects. It is intended to be used to // fake calls to a server by returning objects based on their kind, // namespace and name. @@ -54,6 +59,10 @@ type ObjectTracker interface { // didn't exist in the tracker prior to deletion, Delete returns // no error. Delete(gvr schema.GroupVersionResource, ns, name string) error + + // Watch watches objects from the tracker. Watch returns a channel + // which will push added / modified / deleted object. + Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) } // ObjectScheme abstracts the implementation of common operations on objects. @@ -132,6 +141,13 @@ type tracker struct { decoder runtime.Decoder lock sync.RWMutex objects map[schema.GroupVersionResource][]runtime.Object + // The value type of watchers is a map of which the key is either a namespace or + // all/non namespace aka "" and its value is list of fake watchers. Each of + // fake watcher holds a buffered channel of size "FakeWatchBufferSize" which + // is default to 128. Manipulations on resources will broadcast the notification + // events into the watchers' channel and note that too many unhandled event may + // potentially block the tracker. + watchers map[schema.GroupVersionResource]map[string][]*watch.FakeWatcher } var _ ObjectTracker = &tracker{} @@ -140,9 +156,10 @@ var _ ObjectTracker = &tracker{} // of objects for the fake clientset. Mostly useful for unit tests. func NewObjectTracker(scheme ObjectScheme, decoder runtime.Decoder) ObjectTracker { return &tracker{ - scheme: scheme, - decoder: decoder, - objects: make(map[schema.GroupVersionResource][]runtime.Object), + scheme: scheme, + decoder: decoder, + objects: make(map[schema.GroupVersionResource][]runtime.Object), + watchers: make(map[schema.GroupVersionResource]map[string][]*watch.FakeWatcher), } } @@ -185,6 +202,19 @@ func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionK return list.DeepCopyObject(), nil } +func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) { + t.lock.Lock() + defer t.lock.Unlock() + + fakewatcher := watch.NewFakeWithChanSize(FakeWatchBufferSize, true) + + if _, exists := t.watchers[gvr]; !exists { + t.watchers[gvr] = make(map[string][]*watch.FakeWatcher) + } + t.watchers[gvr][ns] = append(t.watchers[gvr][ns], fakewatcher) + return fakewatcher, nil +} + func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) { errNotFound := errors.NewNotFound(gvr.GroupResource(), name) @@ -263,6 +293,19 @@ func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns return t.add(gvr, obj, ns, true) } +func (t *tracker) getWatches(gvr schema.GroupVersionResource, ns string) []*watch.FakeWatcher { + watches := []*watch.FakeWatcher{} + if t.watchers[gvr] != nil { + if w := t.watchers[gvr][ns]; w != nil { + watches = append(watches, w...) + } + if w := t.watchers[gvr][""]; w != nil { + watches = append(watches, w...) + } + } + return watches +} + func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns string, replaceExisting bool) error { t.lock.Lock() defer t.lock.Unlock() @@ -296,6 +339,9 @@ func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns st } if oldMeta.GetNamespace() == newMeta.GetNamespace() && oldMeta.GetName() == newMeta.GetName() { if replaceExisting { + for _, w := range t.getWatches(gvr, ns) { + w.Modify(obj) + } t.objects[gvr][i] = obj return nil } @@ -310,6 +356,10 @@ func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns st t.objects[gvr] = append(t.objects[gvr], obj) + for _, w := range t.getWatches(gvr, ns) { + w.Add(obj) + } + return nil } @@ -342,7 +392,11 @@ func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error return err } if objMeta.GetNamespace() == ns && objMeta.GetName() == name { + obj := t.objects[gvr][i] t.objects[gvr] = append(t.objects[gvr][:i], t.objects[gvr][i+1:]...) + for _, w := range t.getWatches(gvr, ns) { + w.Delete(obj) + } found = true break } diff --git a/staging/src/k8s.io/client-go/testing/fixture_test.go b/staging/src/k8s.io/client-go/testing/fixture_test.go new file mode 100644 index 00000000000..967e0aefa93 --- /dev/null +++ b/staging/src/k8s.io/client-go/testing/fixture_test.go @@ -0,0 +1,192 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "math/rand" + "strconv" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/watch" +) + +func getArbitraryResource(s schema.GroupVersionResource, name, namespace string) *unstructured.Unstructured { + return &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": s.Resource, + "apiVersion": s.Version, + "metadata": map[string]interface{}{ + "name": name, + "namespace": namespace, + "generateName": "test_generateName", + "uid": "test_uid", + "resourceVersion": "test_resourceVersion", + "selfLink": "test_selfLink", + }, + "data": strconv.Itoa(rand.Int()), + }, + } +} + +func TestWatchCallNonNamespace(t *testing.T) { + testResource := schema.GroupVersionResource{Group: "", Version: "test_version", Resource: "test_kind"} + testObj := getArbitraryResource(testResource, "test_name", "test_namespace") + accessor, err := meta.Accessor(testObj) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + ns := accessor.GetNamespace() + scheme := runtime.NewScheme() + codecs := serializer.NewCodecFactory(scheme) + o := NewObjectTracker(scheme, codecs.UniversalDecoder()) + watch, err := o.Watch(testResource, ns) + go func() { + err := o.Create(testResource, testObj, ns) + if err != nil { + t.Errorf("test resource creation failed: %v", err) + } + }() + out := <-watch.ResultChan() + assert.Equal(t, testObj, out.Object, "watched object mismatch") +} + +func TestWatchCallAllNamespace(t *testing.T) { + testResource := schema.GroupVersionResource{Group: "", Version: "test_version", Resource: "test_kind"} + testObj := getArbitraryResource(testResource, "test_name", "test_namespace") + accessor, err := meta.Accessor(testObj) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + ns := accessor.GetNamespace() + scheme := runtime.NewScheme() + codecs := serializer.NewCodecFactory(scheme) + o := NewObjectTracker(scheme, codecs.UniversalDecoder()) + w, err := o.Watch(testResource, "test_namespace") + wAll, err := o.Watch(testResource, "") + go func() { + err := o.Create(testResource, testObj, ns) + assert.NoError(t, err, "test resource creation failed") + }() + out := <-w.ResultChan() + outAll := <-wAll.ResultChan() + assert.Equal(t, watch.Added, out.Type, "watch event mismatch") + assert.Equal(t, watch.Added, outAll.Type, "watch event mismatch") + assert.Equal(t, testObj, out.Object, "watched created object mismatch") + assert.Equal(t, testObj, outAll.Object, "watched created object mismatch") + go func() { + err := o.Update(testResource, testObj, ns) + assert.NoError(t, err, "test resource updating failed") + }() + out = <-w.ResultChan() + outAll = <-wAll.ResultChan() + assert.Equal(t, watch.Modified, out.Type, "watch event mismatch") + assert.Equal(t, watch.Modified, outAll.Type, "watch event mismatch") + assert.Equal(t, testObj, out.Object, "watched updated object mismatch") + assert.Equal(t, testObj, outAll.Object, "watched updated object mismatch") + go func() { + err := o.Delete(testResource, "test_namespace", "test_name") + assert.NoError(t, err, "test resource deletion failed") + }() + out = <-w.ResultChan() + outAll = <-wAll.ResultChan() + assert.Equal(t, watch.Deleted, out.Type, "watch event mismatch") + assert.Equal(t, watch.Deleted, outAll.Type, "watch event mismatch") + assert.Equal(t, testObj, out.Object, "watched deleted object mismatch") + assert.Equal(t, testObj, outAll.Object, "watched deleted object mismatch") +} + +func TestWatchCallMultipleInvocation(t *testing.T) { + cases := []struct { + name string + op watch.EventType + }{ + { + "foo", + watch.Added, + }, + { + "bar", + watch.Added, + }, + { + "bar", + watch.Modified, + }, + { + "foo", + watch.Deleted, + }, + { + "bar", + watch.Deleted, + }, + } + + scheme := runtime.NewScheme() + codecs := serializer.NewCodecFactory(scheme) + testResource := schema.GroupVersionResource{Group: "", Version: "test_version", Resource: "test_kind"} + + o := NewObjectTracker(scheme, codecs.UniversalDecoder()) + watchNamespaces := []string{ + "", + "", + "test_namespace", + "test_namespace", + } + var wg sync.WaitGroup + wg.Add(len(watchNamespaces)) + for idx, watchNamespace := range watchNamespaces { + i := idx + w, err := o.Watch(testResource, watchNamespace) + go func() { + assert.NoError(t, err, "watch invocation failed") + for _, c := range cases { + fmt.Printf("%#v %#v\n", c, i) + event := <-w.ResultChan() + accessor, err := meta.Accessor(event.Object) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + assert.Equal(t, c.op, event.Type, "watch event mismatched") + assert.Equal(t, c.name, accessor.GetName(), "watched object mismatch") + } + wg.Done() + }() + } + for _, c := range cases { + switch c.op { + case watch.Added: + obj := getArbitraryResource(testResource, c.name, "test_namespace") + o.Create(testResource, obj, "test_namespace") + case watch.Modified: + obj := getArbitraryResource(testResource, c.name, "test_namespace") + o.Update(testResource, obj, "test_namespace") + case watch.Deleted: + o.Delete(testResource, "test_namespace", c.name) + } + } + wg.Wait() +} diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go index ea9ed8deb45..f77ab057008 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go @@ -127,7 +127,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } From 6f381ab2cd351c96a28b7ccde704ea96c38612dd Mon Sep 17 00:00:00 2001 From: yue9944882 <291271447@qq.com> Date: Mon, 25 Dec 2017 11:46:56 +0800 Subject: [PATCH 189/264] Regenerating code of fake clientset --- .../internalclientset/fake/clientset_generated.go | 10 +++++++++- .../clientset/clientset/fake/clientset_generated.go | 10 +++++++++- .../internalclientset/fake/clientset_generated.go | 10 +++++++++- .../client-go/kubernetes/fake/clientset_generated.go | 10 +++++++++- .../internalversion/fake/clientset_generated.go | 10 +++++++++- .../clientset/versioned/fake/clientset_generated.go | 10 +++++++++- .../clientset/versioned/fake/clientset_generated.go | 10 +++++++++- .../clientset/fake/clientset_generated.go | 10 +++++++++- .../internalclientset/fake/clientset_generated.go | 10 +++++++++- .../clientset/fake/clientset_generated.go | 10 +++++++++- .../internalversion/fake/clientset_generated.go | 10 +++++++++- .../clientset/versioned/fake/clientset_generated.go | 10 +++++++++- .../clientset/versioned/fake/clientset_generated.go | 10 +++++++++- 13 files changed, 117 insertions(+), 13 deletions(-) diff --git a/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go b/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go index 28efaac5a73..1db023eb600 100644 --- a/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go +++ b/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go @@ -71,7 +71,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/clientset_generated.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/clientset_generated.go index 473f88f47da..4f2448fce5d 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/clientset_generated.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/clientset_generated.go @@ -41,7 +41,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/fake/clientset_generated.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/fake/clientset_generated.go index fdc0beee577..7647ef17ce4 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/fake/clientset_generated.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/fake/clientset_generated.go @@ -41,7 +41,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go index 12cfac0a8f2..982d7420b89 100644 --- a/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go +++ b/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go @@ -95,7 +95,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/clientset_generated.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/clientset_generated.go index 8b474ba6d7e..af74a734dd8 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/clientset_generated.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/clientset_generated.go @@ -43,7 +43,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go index 5cadbe26140..df811fcb30f 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go @@ -43,7 +43,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/clientset_generated.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/clientset_generated.go index 04135606b8c..0428810e8fb 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/clientset_generated.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/clientset_generated.go @@ -43,7 +43,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/clientset_generated.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/clientset_generated.go index bae14284200..5852846aca5 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/clientset_generated.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/clientset_generated.go @@ -41,7 +41,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go index d938683de71..48f7226cd82 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go @@ -41,7 +41,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake/clientset_generated.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake/clientset_generated.go index a3e65fa49f1..8dfdbfd4119 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake/clientset_generated.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake/clientset_generated.go @@ -43,7 +43,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/clientset_generated.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/clientset_generated.go index b4030b659e1..89b726e1178 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/clientset_generated.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/clientset_generated.go @@ -41,7 +41,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake/clientset_generated.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake/clientset_generated.go index 6b627a3c7a3..503ab9b931b 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -41,7 +41,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/fake/clientset_generated.go b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/fake/clientset_generated.go index 864cfe59b70..6a89220a266 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -41,7 +41,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } From 9c847fc4d6d5f86ddc17e7717e9357e4b0a54caa Mon Sep 17 00:00:00 2001 From: vikaschoudhary16 Date: Tue, 16 Jan 2018 01:04:18 -0500 Subject: [PATCH 190/264] Call Dial in blocking mode --- pkg/kubelet/cm/deviceplugin/device_plugin_stub.go | 14 ++++++-------- pkg/kubelet/cm/deviceplugin/endpoint.go | 5 +++-- pkg/kubelet/cm/deviceplugin/manager_test.go | 3 --- 3 files changed, 9 insertions(+), 13 deletions(-) diff --git a/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go b/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go index 08dcd5a992f..a80f16b9d19 100644 --- a/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go +++ b/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go @@ -86,14 +86,11 @@ func (m *Stub) Start() error { pluginapi.RegisterDevicePluginServer(m.server, m) go m.server.Serve(sock) - // Wait till grpc server is ready. - for i := 0; i < 10; i++ { - services := m.server.GetServiceInfo() - if len(services) > 0 { - break - } - time.Sleep(1 * time.Second) + _, conn, err := dial(m.socket) + if err != nil { + return err } + conn.Close() log.Println("Starting to serve on", m.socket) return nil @@ -109,7 +106,8 @@ func (m *Stub) Stop() error { // Register registers the device plugin for the given resourceName with Kubelet. func (m *Stub) Register(kubeletEndpoint, resourceName string) error { - conn, err := grpc.Dial(kubeletEndpoint, grpc.WithInsecure(), + conn, err := grpc.Dial(kubeletEndpoint, grpc.WithInsecure(), grpc.WithBlock(), + grpc.WithTimeout(10*time.Second), grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { return net.DialTimeout("unix", addr, timeout) })) diff --git a/pkg/kubelet/cm/deviceplugin/endpoint.go b/pkg/kubelet/cm/deviceplugin/endpoint.go index 523922d9dc0..f2fd8be9896 100644 --- a/pkg/kubelet/cm/deviceplugin/endpoint.go +++ b/pkg/kubelet/cm/deviceplugin/endpoint.go @@ -186,9 +186,10 @@ func (e *endpointImpl) stop() { e.clientConn.Close() } -// dial establishes the gRPC communication with the registered device plugin. +// dial establishes the gRPC communication with the registered device plugin. https://godoc.org/google.golang.org/grpc#Dial func dial(unixSocketPath string) (pluginapi.DevicePluginClient, *grpc.ClientConn, error) { - c, err := grpc.Dial(unixSocketPath, grpc.WithInsecure(), + c, err := grpc.Dial(unixSocketPath, grpc.WithInsecure(), grpc.WithBlock(), + grpc.WithTimeout(10*time.Second), grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { return net.DialTimeout("unix", addr, timeout) }), diff --git a/pkg/kubelet/cm/deviceplugin/manager_test.go b/pkg/kubelet/cm/deviceplugin/manager_test.go index 124f690acf7..783c2d48757 100644 --- a/pkg/kubelet/cm/deviceplugin/manager_test.go +++ b/pkg/kubelet/cm/deviceplugin/manager_test.go @@ -497,9 +497,6 @@ type TestResource struct { func TestPodContainerDeviceAllocation(t *testing.T) { flag.Set("alsologtostderr", fmt.Sprintf("%t", true)) - var logLevel string - flag.StringVar(&logLevel, "logLevel", "4", "test") - flag.Lookup("v").Value.Set(logLevel) res1 := TestResource{ resourceName: "domain1.com/resource1", resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI), From 5b854e7b17d68e191b74501afb72fc5fb9582e1b Mon Sep 17 00:00:00 2001 From: David Eads Date: Tue, 16 Jan 2018 11:00:43 -0500 Subject: [PATCH 191/264] say which lease is being acquired --- .../k8s.io/client-go/tools/leaderelection/leaderelection.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/staging/src/k8s.io/client-go/tools/leaderelection/leaderelection.go b/staging/src/k8s.io/client-go/tools/leaderelection/leaderelection.go index 2ce546bc426..e41b420c987 100644 --- a/staging/src/k8s.io/client-go/tools/leaderelection/leaderelection.go +++ b/staging/src/k8s.io/client-go/tools/leaderelection/leaderelection.go @@ -171,11 +171,11 @@ func (le *LeaderElector) IsLeader() bool { // acquire loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew succeeds. func (le *LeaderElector) acquire() { stop := make(chan struct{}) - glog.Infof("attempting to acquire leader lease...") + desc := le.config.Lock.Describe() + glog.Infof("attempting to acquire leader lease %v...", desc) wait.JitterUntil(func() { succeeded := le.tryAcquireOrRenew() le.maybeReportTransition() - desc := le.config.Lock.Describe() if !succeeded { glog.V(4).Infof("failed to acquire lease %v", desc) return From a8f7404a13f9db64606b6607fef3efd9655374d6 Mon Sep 17 00:00:00 2001 From: Huamin Chen Date: Tue, 16 Jan 2018 16:43:10 +0000 Subject: [PATCH 192/264] azure disk: if the disk is not found, immediately detach it. This prevents azure keeps the bad request and stops issuing new request Signed-off-by: Huamin Chen --- .../providers/azure/azure_controllerCommon.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_controllerCommon.go b/pkg/cloudprovider/providers/azure/azure_controllerCommon.go index ad40f3c5b85..8cef635c3bb 100644 --- a/pkg/cloudprovider/providers/azure/azure_controllerCommon.go +++ b/pkg/cloudprovider/providers/azure/azure_controllerCommon.go @@ -43,6 +43,7 @@ const ( errLeaseFailed = "AcquireDiskLeaseFailed" errLeaseIDMissing = "LeaseIdMissing" errContainerNotFound = "ContainerNotFound" + errDiskBlobNotFound = "DiskBlobNotFound" ) var defaultBackOff = kwait.Backoff{ @@ -124,9 +125,9 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri if err != nil { glog.Errorf("azureDisk - azure attach failed, err: %v", err) detail := err.Error() - if strings.Contains(detail, errLeaseFailed) { - // if lease cannot be acquired, immediately detach the disk and return the original error - glog.Infof("azureDisk - failed to acquire disk lease, try detach") + if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) { + // if lease cannot be acquired or disk not found, immediately detach the disk and return the original error + glog.Infof("azureDisk - err %s, try detach", detail) c.cloud.DetachDiskByName(diskName, diskURI, nodeName) } } else { From 96b5c332833d95667239d62ea9ca274007f3645d Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Sat, 13 Jan 2018 08:18:37 -0800 Subject: [PATCH 193/264] cluster: remove centos dependency on saltbase --- .../{saltbase/salt/generate-cert => centos}/make-ca-cert.sh | 5 ----- cluster/centos/util.sh | 2 +- 2 files changed, 1 insertion(+), 6 deletions(-) rename cluster/{saltbase/salt/generate-cert => centos}/make-ca-cert.sh (92%) diff --git a/cluster/saltbase/salt/generate-cert/make-ca-cert.sh b/cluster/centos/make-ca-cert.sh similarity index 92% rename from cluster/saltbase/salt/generate-cert/make-ca-cert.sh rename to cluster/centos/make-ca-cert.sh index 41531209ed5..c86f27bba2b 100755 --- a/cluster/saltbase/salt/generate-cert/make-ca-cert.sh +++ b/cluster/centos/make-ca-cert.sh @@ -33,11 +33,6 @@ mkdir -p "$cert_dir" use_cn=false -# TODO: Add support for discovery on other providers? -if [ "$cert_ip" == "_use_gce_external_ip_" ]; then - cert_ip=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip) -fi - sans="IP:${cert_ip}" if [[ -n "${extra_sans}" ]]; then sans="${sans},${extra_sans}" diff --git a/cluster/centos/util.sh b/cluster/centos/util.sh index 88302a31a93..10378428bca 100755 --- a/cluster/centos/util.sh +++ b/cluster/centos/util.sh @@ -234,7 +234,7 @@ echo "[INFO] tear-down-node on $1" # Generate the CA certificates for k8s components function make-ca-cert() { echo "[INFO] make-ca-cert" - bash "${ROOT}/../saltbase/salt/generate-cert/make-ca-cert.sh" "${MASTER_ADVERTISE_IP}" "IP:${MASTER_ADVERTISE_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local" + bash "${ROOT}/make-ca-cert.sh" "${MASTER_ADVERTISE_IP}" "IP:${MASTER_ADVERTISE_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local" } # Provision master From 13116457ba6ed28f1d72061aa52c99d53fe95040 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Sat, 13 Jan 2018 08:24:17 -0800 Subject: [PATCH 194/264] cluster: remove gce dependencies out of salt basically just: * move all manifests into the new gce/manifests dir * move limit-range into gce/addons/limit-range * move abac jsonl into gce/manifests. this is gross but we will hopefully be able to delete this config soon. it only exists to support a deprecated feature. * fix build, release, deploy to look for everything in its new home --- build/lib/release.sh | 41 +++++++++---------- cluster/BUILD | 22 +++------- cluster/gce/BUILD | 18 ++++++++ .../addons}/limit-range/limit-range.yaml | 0 cluster/gce/gci/configure-helper.sh | 8 ++-- .../manifests}/abac-authz-policy.jsonl | 0 .../manifests}/cluster-autoscaler.manifest | 0 .../manifests}/e2e-image-puller.manifest | 0 .../salt/etcd => gce/manifests}/etcd.manifest | 0 .../l7-gcp => gce/manifests}/glbc.manifest | 0 .../manifests}/kube-addon-manager.yaml | 0 .../manifests}/kube-apiserver.manifest | 0 .../kube-controller-manager.manifest | 0 .../manifests}/kube-proxy.manifest | 0 .../manifests}/kube-registry-proxy.yaml | 0 .../manifests}/kube-scheduler.manifest | 0 .../manifests}/rescheduler.manifest | 0 cluster/saltbase/BUILD | 39 ------------------ 18 files changed, 46 insertions(+), 82 deletions(-) rename cluster/{saltbase/salt/kube-admission-controls => gce/addons}/limit-range/limit-range.yaml (100%) rename cluster/{saltbase/salt/kube-apiserver => gce/manifests}/abac-authz-policy.jsonl (100%) rename cluster/{saltbase/salt/cluster-autoscaler => gce/manifests}/cluster-autoscaler.manifest (100%) rename cluster/{saltbase/salt/e2e-image-puller => gce/manifests}/e2e-image-puller.manifest (100%) rename cluster/{saltbase/salt/etcd => gce/manifests}/etcd.manifest (100%) rename cluster/{saltbase/salt/l7-gcp => gce/manifests}/glbc.manifest (100%) rename cluster/{saltbase/salt/kube-addons => gce/manifests}/kube-addon-manager.yaml (100%) rename cluster/{saltbase/salt/kube-apiserver => gce/manifests}/kube-apiserver.manifest (100%) rename cluster/{saltbase/salt/kube-controller-manager => gce/manifests}/kube-controller-manager.manifest (100%) rename cluster/{saltbase/salt/kube-proxy => gce/manifests}/kube-proxy.manifest (100%) rename cluster/{saltbase/salt/kube-registry-proxy => gce/manifests}/kube-registry-proxy.yaml (100%) rename cluster/{saltbase/salt/kube-scheduler => gce/manifests}/kube-scheduler.manifest (100%) rename cluster/{saltbase/salt/rescheduler => gce/manifests}/rescheduler.manifest (100%) diff --git a/build/lib/release.sh b/build/lib/release.sh index 9b820b1bbd3..1fd9296fd01 100644 --- a/build/lib/release.sh +++ b/build/lib/release.sh @@ -391,38 +391,35 @@ function kube::release::package_salt_tarball() { function kube::release::package_kube_manifests_tarball() { kube::log::status "Building tarball: manifests" - local salt_dir="${KUBE_ROOT}/cluster/saltbase/salt" + local src_dir="${KUBE_ROOT}/cluster/gce/manifests" local release_stage="${RELEASE_STAGE}/manifests/kubernetes" rm -rf "${release_stage}" - mkdir -p "${release_stage}" - cp "${salt_dir}/kube-registry-proxy/kube-registry-proxy.yaml" "${release_stage}/" - cp "${salt_dir}/kube-proxy/kube-proxy.manifest" "${release_stage}/" - - local gci_dst_dir="${release_stage}/gci-trusty" - mkdir -p "${gci_dst_dir}" - cp "${salt_dir}/cluster-autoscaler/cluster-autoscaler.manifest" "${gci_dst_dir}/" - cp "${salt_dir}/etcd/etcd.manifest" "${gci_dst_dir}" - cp "${salt_dir}/kube-scheduler/kube-scheduler.manifest" "${gci_dst_dir}" - cp "${salt_dir}/kube-apiserver/kube-apiserver.manifest" "${gci_dst_dir}" - cp "${salt_dir}/kube-apiserver/abac-authz-policy.jsonl" "${gci_dst_dir}" - cp "${salt_dir}/kube-controller-manager/kube-controller-manager.manifest" "${gci_dst_dir}" - cp "${salt_dir}/kube-addons/kube-addon-manager.yaml" "${gci_dst_dir}" - cp "${salt_dir}/l7-gcp/glbc.manifest" "${gci_dst_dir}" - cp "${salt_dir}/rescheduler/rescheduler.manifest" "${gci_dst_dir}/" - cp "${salt_dir}/e2e-image-puller/e2e-image-puller.manifest" "${gci_dst_dir}/" - cp "${KUBE_ROOT}/cluster/gce/gci/configure-helper.sh" "${gci_dst_dir}/gci-configure-helper.sh" - cp "${KUBE_ROOT}/cluster/gce/gci/health-monitor.sh" "${gci_dst_dir}/health-monitor.sh" - cp -r "${salt_dir}/kube-admission-controls/limit-range" "${gci_dst_dir}" + local dst_dir="${release_stage}/gci-trusty" + mkdir -p "${dst_dir}" + cp "${src_dir}/kube-registry-proxy.yaml" "${dst_dir}/" + cp "${src_dir}/kube-proxy.manifest" "${dst_dir}/" + cp "${src_dir}/cluster-autoscaler.manifest" "${dst_dir}/" + cp "${src_dir}/etcd.manifest" "${dst_dir}" + cp "${src_dir}/kube-scheduler.manifest" "${dst_dir}" + cp "${src_dir}/kube-apiserver.manifest" "${dst_dir}" + cp "${src_dir}/abac-authz-policy.jsonl" "${dst_dir}" + cp "${src_dir}/kube-controller-manager.manifest" "${dst_dir}" + cp "${src_dir}/kube-addon-manager.yaml" "${dst_dir}" + cp "${src_dir}/glbc.manifest" "${dst_dir}" + cp "${src_dir}/rescheduler.manifest" "${dst_dir}/" + cp "${src_dir}/e2e-image-puller.manifest" "${dst_dir}/" + cp "${KUBE_ROOT}/cluster/gce/gci/configure-helper.sh" "${dst_dir}/gci-configure-helper.sh" + cp "${KUBE_ROOT}/cluster/gce/gci/health-monitor.sh" "${dst_dir}/health-monitor.sh" local objects objects=$(cd "${KUBE_ROOT}/cluster/addons" && find . \( -name \*.yaml -or -name \*.yaml.in -or -name \*.json \) | grep -v demo) - tar c -C "${KUBE_ROOT}/cluster/addons" ${objects} | tar x -C "${gci_dst_dir}" + tar c -C "${KUBE_ROOT}/cluster/addons" ${objects} | tar x -C "${dst_dir}" # Merge GCE-specific addons with general purpose addons. local gce_objects gce_objects=$(cd "${KUBE_ROOT}/cluster/gce/addons" && find . \( -name \*.yaml -or -name \*.yaml.in -or -name \*.json \) \( -not -name \*demo\* \)) if [[ -n "${gce_objects}" ]]; then - tar c -C "${KUBE_ROOT}/cluster/gce/addons" ${gce_objects} | tar x -C "${gci_dst_dir}" + tar c -C "${KUBE_ROOT}/cluster/gce/addons" ${gce_objects} | tar x -C "${dst_dir}" fi kube::release::clean_cruft diff --git a/cluster/BUILD b/cluster/BUILD index 9d3ad2c9744..8dea73c4cd2 100644 --- a/cluster/BUILD +++ b/cluster/BUILD @@ -25,27 +25,15 @@ filegroup( tags = ["automanaged"], ) -# All of the manifests that are expected to be in a "gci-trusty" -# subdir of the manifests tarball. -pkg_tar( - name = "_manifests-gci-trusty", - package_dir = "gci-trusty", - visibility = ["//visibility:private"], - deps = [ - "//cluster/addons", - "//cluster/gce:gci-trusty-manifests", - "//cluster/gce/addons", - "//cluster/saltbase:gci-trusty-salt-manifests", - ], -) - pkg_tar( name = "manifests", mode = "0644", - package_dir = "kubernetes", + package_dir = "kubernetes/gci-trusty", deps = [ - ":_manifests-gci-trusty", - "//cluster/saltbase:salt-manifests", + "//cluster/addons", + "//cluster/gce:gce-master-manifests", + "//cluster/gce:gci-trusty-manifests", + "//cluster/gce/addons", ], ) diff --git a/cluster/gce/BUILD b/cluster/gce/BUILD index e297c36ef6f..9f0717fe71d 100644 --- a/cluster/gce/BUILD +++ b/cluster/gce/BUILD @@ -49,3 +49,21 @@ release_filegroup( "gci/node.yaml", ], ) + +pkg_tar( + name = "gce-master-manifests", + files = [ + "manifests/abac-authz-policy.jsonl", + "manifests/cluster-autoscaler.manifest", + "manifests/e2e-image-puller.manifest", + "manifests/etcd.manifest", + "manifests/glbc.manifest", + "manifests/kube-addon-manager.yaml", + "manifests/kube-apiserver.manifest", + "manifests/kube-controller-manager.manifest", + "manifests/kube-proxy.manifest", + "manifests/kube-scheduler.manifest", + "manifests/rescheduler.manifest", + ], + mode = "0644", +) diff --git a/cluster/saltbase/salt/kube-admission-controls/limit-range/limit-range.yaml b/cluster/gce/addons/limit-range/limit-range.yaml similarity index 100% rename from cluster/saltbase/salt/kube-admission-controls/limit-range/limit-range.yaml rename to cluster/gce/addons/limit-range/limit-range.yaml diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index 5d22636a5b5..53aaa48fd52 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -1334,7 +1334,7 @@ function prepare-kube-proxy-manifest-variables { function start-kube-proxy { echo "Start kube-proxy static pod" prepare-log-file /var/log/kube-proxy.log - local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/kube-proxy.manifest" + local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-proxy.manifest" prepare-kube-proxy-manifest-variables "${src_file}" cp "${src_file}" /etc/kubernetes/manifests @@ -2077,7 +2077,7 @@ EOF sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${kubedns_file}" if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then - setup-addon-manifests "addons" "dns-horizontal-autoscaler" + setup-addon-manifests "addons" "dns-horizontal-autoscaler" "gce" fi } @@ -2225,7 +2225,7 @@ EOF setup-addon-manifests "addons" "node-problem-detector/standalone" "node-problem-detector" fi if echo "${ADMISSION_CONTROL:-}" | grep -q "LimitRanger"; then - setup-addon-manifests "admission-controls" "limit-range" + setup-addon-manifests "admission-controls" "limit-range" "gce" fi if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then setup-addon-manifests "addons" "calico-policy-controller" @@ -2260,7 +2260,7 @@ function start-image-puller { # Starts kube-registry proxy function start-kube-registry-proxy { echo "Start kube-registry-proxy" - cp "${KUBE_HOME}/kube-manifests/kubernetes/kube-registry-proxy.yaml" /etc/kubernetes/manifests + cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-registry-proxy.yaml" /etc/kubernetes/manifests } # Starts a l7 loadbalancing controller for ingress. diff --git a/cluster/saltbase/salt/kube-apiserver/abac-authz-policy.jsonl b/cluster/gce/manifests/abac-authz-policy.jsonl similarity index 100% rename from cluster/saltbase/salt/kube-apiserver/abac-authz-policy.jsonl rename to cluster/gce/manifests/abac-authz-policy.jsonl diff --git a/cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest b/cluster/gce/manifests/cluster-autoscaler.manifest similarity index 100% rename from cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest rename to cluster/gce/manifests/cluster-autoscaler.manifest diff --git a/cluster/saltbase/salt/e2e-image-puller/e2e-image-puller.manifest b/cluster/gce/manifests/e2e-image-puller.manifest similarity index 100% rename from cluster/saltbase/salt/e2e-image-puller/e2e-image-puller.manifest rename to cluster/gce/manifests/e2e-image-puller.manifest diff --git a/cluster/saltbase/salt/etcd/etcd.manifest b/cluster/gce/manifests/etcd.manifest similarity index 100% rename from cluster/saltbase/salt/etcd/etcd.manifest rename to cluster/gce/manifests/etcd.manifest diff --git a/cluster/saltbase/salt/l7-gcp/glbc.manifest b/cluster/gce/manifests/glbc.manifest similarity index 100% rename from cluster/saltbase/salt/l7-gcp/glbc.manifest rename to cluster/gce/manifests/glbc.manifest diff --git a/cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml b/cluster/gce/manifests/kube-addon-manager.yaml similarity index 100% rename from cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml rename to cluster/gce/manifests/kube-addon-manager.yaml diff --git a/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest b/cluster/gce/manifests/kube-apiserver.manifest similarity index 100% rename from cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest rename to cluster/gce/manifests/kube-apiserver.manifest diff --git a/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest b/cluster/gce/manifests/kube-controller-manager.manifest similarity index 100% rename from cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest rename to cluster/gce/manifests/kube-controller-manager.manifest diff --git a/cluster/saltbase/salt/kube-proxy/kube-proxy.manifest b/cluster/gce/manifests/kube-proxy.manifest similarity index 100% rename from cluster/saltbase/salt/kube-proxy/kube-proxy.manifest rename to cluster/gce/manifests/kube-proxy.manifest diff --git a/cluster/saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml b/cluster/gce/manifests/kube-registry-proxy.yaml similarity index 100% rename from cluster/saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml rename to cluster/gce/manifests/kube-registry-proxy.yaml diff --git a/cluster/saltbase/salt/kube-scheduler/kube-scheduler.manifest b/cluster/gce/manifests/kube-scheduler.manifest similarity index 100% rename from cluster/saltbase/salt/kube-scheduler/kube-scheduler.manifest rename to cluster/gce/manifests/kube-scheduler.manifest diff --git a/cluster/saltbase/salt/rescheduler/rescheduler.manifest b/cluster/gce/manifests/rescheduler.manifest similarity index 100% rename from cluster/saltbase/salt/rescheduler/rescheduler.manifest rename to cluster/gce/manifests/rescheduler.manifest diff --git a/cluster/saltbase/BUILD b/cluster/saltbase/BUILD index 13723fb63e3..484ca06ca60 100644 --- a/cluster/saltbase/BUILD +++ b/cluster/saltbase/BUILD @@ -45,42 +45,3 @@ pkg_tar( ":_salt_kube-addons", ], ) - -# The following are used in the kubernetes salt tarball. -pkg_tar( - name = "salt-manifests", - files = [ - "salt/kube-proxy/kube-proxy.manifest", - "salt/kube-registry-proxy/kube-registry-proxy.yaml", - ], - mode = "0644", -) - -pkg_tar( - name = "_kube-admission-controls", - files = glob(["salt/kube-admission-controls/limit-range/**"]), - mode = "0644", - # Maintain limit-range/ subdirectory in tarball - strip_prefix = "./salt/kube-admission-controls/", - visibility = ["//visibility:private"], -) - -pkg_tar( - name = "gci-trusty-salt-manifests", - files = [ - "salt/cluster-autoscaler/cluster-autoscaler.manifest", - "salt/e2e-image-puller/e2e-image-puller.manifest", - "salt/etcd/etcd.manifest", - "salt/kube-addons/kube-addon-manager.yaml", - "salt/kube-apiserver/abac-authz-policy.jsonl", - "salt/kube-apiserver/kube-apiserver.manifest", - "salt/kube-controller-manager/kube-controller-manager.manifest", - "salt/kube-scheduler/kube-scheduler.manifest", - "salt/l7-gcp/glbc.manifest", - "salt/rescheduler/rescheduler.manifest", - ], - mode = "0644", - deps = [ - "_kube-admission-controls", - ], -) From 5f9735de53c0a7f1334d91b59bcd87c6fbe2a06f Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Sat, 13 Jan 2018 09:14:35 -0800 Subject: [PATCH 195/264] cluster: delete saltbase and don't build kubernetes-salt.tar.gz --- build/README.md | 2 - build/lib/release.sh | 39 +- build/release-tars/BUILD | 11 - cluster/BUILD | 1 - .../fluentd-es-configmap.yaml | 11 - cluster/common.sh | 17 - cluster/gce/util.sh | 11 - cluster/saltbase/BUILD | 47 -- cluster/saltbase/README.md | 19 - cluster/saltbase/install.sh | 109 ---- cluster/saltbase/pillar/README.md | 22 - cluster/saltbase/pillar/cluster-params.sls | 4 - cluster/saltbase/pillar/docker-images.sls | 5 - cluster/saltbase/pillar/logging.sls | 1 - cluster/saltbase/pillar/mine.sls | 12 - cluster/saltbase/pillar/privilege.sls | 2 - cluster/saltbase/pillar/systemd.sls | 9 - cluster/saltbase/pillar/top.sls | 8 - cluster/saltbase/reactor/README.md | 6 - .../saltbase/reactor/highstate-masters.sls | 10 - .../saltbase/reactor/highstate-minions.sls | 10 - cluster/saltbase/reactor/highstate-new.sls | 4 - cluster/saltbase/salt/README.md | 29 -- cluster/saltbase/salt/base.sls | 46 -- cluster/saltbase/salt/calico/OWNERS | 6 - cluster/saltbase/salt/calico/node.sls | 9 - .../saltbase/salt/cluster-autoscaler/OWNERS | 12 - .../saltbase/salt/cluster-autoscaler/init.sls | 25 - cluster/saltbase/salt/cni/OWNERS | 8 - cluster/saltbase/salt/cni/init.sls | 26 - .../salt/debian-auto-upgrades/20auto-upgrades | 4 - .../salt/debian-auto-upgrades/init.sls | 13 - cluster/saltbase/salt/docker/default | 8 - cluster/saltbase/salt/docker/docker-defaults | 18 - .../saltbase/salt/docker/docker-healthcheck | 44 -- .../salt/docker/docker-healthcheck.service | 9 - .../salt/docker/docker-healthcheck.timer | 9 - cluster/saltbase/salt/docker/docker-prestart | 22 - cluster/saltbase/salt/docker/docker.list | 1 - cluster/saltbase/salt/docker/docker.service | 21 - cluster/saltbase/salt/docker/init.sls | 493 ------------------ .../saltbase/salt/e2e-image-puller/init.sls | 12 - cluster/saltbase/salt/e2e/init.sls | 28 - cluster/saltbase/salt/etcd/init.sls | 83 --- cluster/saltbase/salt/generate-cert/init.sls | 37 -- .../saltbase/salt/generate-cert/make-cert.sh | 26 - cluster/saltbase/salt/kube-addons/init.sls | 215 -------- .../salt/kube-admission-controls/init.sls | 10 - cluster/saltbase/salt/kube-apiserver/init.sls | 71 --- cluster/saltbase/salt/kube-client-tools.sls | 6 - .../salt/kube-controller-manager/init.sls | 30 -- .../saltbase/salt/kube-master-addons/init.sls | 52 -- .../saltbase/salt/kube-master-addons/initd | 95 ---- .../kube-master-addons.service | 9 - .../kube-master-addons/kube-master-addons.sh | 90 ---- .../saltbase/salt/kube-node-unpacker/init.sls | 67 --- .../saltbase/salt/kube-node-unpacker/initd | 95 ---- .../kube-node-unpacker.service | 9 - .../kube-node-unpacker/kube-node-unpacker.sh | 46 -- cluster/saltbase/salt/kube-proxy/OWNERS | 12 - cluster/saltbase/salt/kube-proxy/init.sls | 40 -- cluster/saltbase/salt/kube-proxy/kubeconfig | 0 .../salt/kube-registry-proxy/init.sls | 8 - cluster/saltbase/salt/kube-scheduler/init.sls | 30 -- cluster/saltbase/salt/kubelet/default | 192 ------- cluster/saltbase/salt/kubelet/init.sls | 106 ---- cluster/saltbase/salt/kubelet/initd | 126 ----- cluster/saltbase/salt/kubelet/kubeconfig | 0 cluster/saltbase/salt/kubelet/kubelet.service | 14 - cluster/saltbase/salt/l7-gcp/OWNERS | 6 - cluster/saltbase/salt/l7-gcp/init.sls | 17 - cluster/saltbase/salt/logrotate/conf | 13 - cluster/saltbase/salt/logrotate/cron | 2 - .../saltbase/salt/logrotate/docker-containers | 10 - cluster/saltbase/salt/logrotate/init.sls | 35 -- cluster/saltbase/salt/ntp/init.sls | 11 - .../opencontrail-networking-master/init.sls | 15 - .../opencontrail-networking-minion/init.sls | 15 - cluster/saltbase/salt/rescheduler/init.sls | 15 - cluster/saltbase/salt/salt-helpers/init.sls | 24 - cluster/saltbase/salt/salt-helpers/pkg-apt | 70 --- cluster/saltbase/salt/salt-helpers/services | 72 --- .../salt/supervisor/docker-checker.sh | 87 ---- cluster/saltbase/salt/supervisor/docker.conf | 6 - cluster/saltbase/salt/supervisor/init.sls | 102 ---- .../salt/supervisor/kube-addons-checker.sh | 34 -- .../saltbase/salt/supervisor/kube-addons.conf | 6 - .../salt/supervisor/kubelet-checker.sh | 37 -- cluster/saltbase/salt/supervisor/kubelet.conf | 6 - .../salt/supervisor/supervisor_watcher.sh | 34 -- cluster/saltbase/salt/top.sls | 73 --- 91 files changed, 3 insertions(+), 3359 deletions(-) delete mode 100644 cluster/saltbase/BUILD delete mode 100644 cluster/saltbase/README.md delete mode 100755 cluster/saltbase/install.sh delete mode 100644 cluster/saltbase/pillar/README.md delete mode 100644 cluster/saltbase/pillar/cluster-params.sls delete mode 100644 cluster/saltbase/pillar/docker-images.sls delete mode 100644 cluster/saltbase/pillar/logging.sls delete mode 100644 cluster/saltbase/pillar/mine.sls delete mode 100644 cluster/saltbase/pillar/privilege.sls delete mode 100644 cluster/saltbase/pillar/systemd.sls delete mode 100644 cluster/saltbase/pillar/top.sls delete mode 100644 cluster/saltbase/reactor/README.md delete mode 100644 cluster/saltbase/reactor/highstate-masters.sls delete mode 100644 cluster/saltbase/reactor/highstate-minions.sls delete mode 100644 cluster/saltbase/reactor/highstate-new.sls delete mode 100644 cluster/saltbase/salt/README.md delete mode 100644 cluster/saltbase/salt/base.sls delete mode 100644 cluster/saltbase/salt/calico/OWNERS delete mode 100644 cluster/saltbase/salt/calico/node.sls delete mode 100644 cluster/saltbase/salt/cluster-autoscaler/OWNERS delete mode 100644 cluster/saltbase/salt/cluster-autoscaler/init.sls delete mode 100644 cluster/saltbase/salt/cni/OWNERS delete mode 100644 cluster/saltbase/salt/cni/init.sls delete mode 100644 cluster/saltbase/salt/debian-auto-upgrades/20auto-upgrades delete mode 100644 cluster/saltbase/salt/debian-auto-upgrades/init.sls delete mode 100644 cluster/saltbase/salt/docker/default delete mode 100644 cluster/saltbase/salt/docker/docker-defaults delete mode 100755 cluster/saltbase/salt/docker/docker-healthcheck delete mode 100644 cluster/saltbase/salt/docker/docker-healthcheck.service delete mode 100644 cluster/saltbase/salt/docker/docker-healthcheck.timer delete mode 100755 cluster/saltbase/salt/docker/docker-prestart delete mode 100644 cluster/saltbase/salt/docker/docker.list delete mode 100644 cluster/saltbase/salt/docker/docker.service delete mode 100644 cluster/saltbase/salt/docker/init.sls delete mode 100644 cluster/saltbase/salt/e2e-image-puller/init.sls delete mode 100644 cluster/saltbase/salt/e2e/init.sls delete mode 100644 cluster/saltbase/salt/etcd/init.sls delete mode 100644 cluster/saltbase/salt/generate-cert/init.sls delete mode 100755 cluster/saltbase/salt/generate-cert/make-cert.sh delete mode 100644 cluster/saltbase/salt/kube-addons/init.sls delete mode 100644 cluster/saltbase/salt/kube-admission-controls/init.sls delete mode 100644 cluster/saltbase/salt/kube-apiserver/init.sls delete mode 100644 cluster/saltbase/salt/kube-client-tools.sls delete mode 100644 cluster/saltbase/salt/kube-controller-manager/init.sls delete mode 100644 cluster/saltbase/salt/kube-master-addons/init.sls delete mode 100644 cluster/saltbase/salt/kube-master-addons/initd delete mode 100644 cluster/saltbase/salt/kube-master-addons/kube-master-addons.service delete mode 100755 cluster/saltbase/salt/kube-master-addons/kube-master-addons.sh delete mode 100644 cluster/saltbase/salt/kube-node-unpacker/init.sls delete mode 100755 cluster/saltbase/salt/kube-node-unpacker/initd delete mode 100644 cluster/saltbase/salt/kube-node-unpacker/kube-node-unpacker.service delete mode 100755 cluster/saltbase/salt/kube-node-unpacker/kube-node-unpacker.sh delete mode 100644 cluster/saltbase/salt/kube-proxy/OWNERS delete mode 100644 cluster/saltbase/salt/kube-proxy/init.sls delete mode 100644 cluster/saltbase/salt/kube-proxy/kubeconfig delete mode 100644 cluster/saltbase/salt/kube-registry-proxy/init.sls delete mode 100644 cluster/saltbase/salt/kube-scheduler/init.sls delete mode 100644 cluster/saltbase/salt/kubelet/default delete mode 100644 cluster/saltbase/salt/kubelet/init.sls delete mode 100644 cluster/saltbase/salt/kubelet/initd delete mode 100644 cluster/saltbase/salt/kubelet/kubeconfig delete mode 100644 cluster/saltbase/salt/kubelet/kubelet.service delete mode 100644 cluster/saltbase/salt/l7-gcp/OWNERS delete mode 100644 cluster/saltbase/salt/l7-gcp/init.sls delete mode 100644 cluster/saltbase/salt/logrotate/conf delete mode 100755 cluster/saltbase/salt/logrotate/cron delete mode 100644 cluster/saltbase/salt/logrotate/docker-containers delete mode 100644 cluster/saltbase/salt/logrotate/init.sls delete mode 100644 cluster/saltbase/salt/ntp/init.sls delete mode 100644 cluster/saltbase/salt/opencontrail-networking-master/init.sls delete mode 100644 cluster/saltbase/salt/opencontrail-networking-minion/init.sls delete mode 100644 cluster/saltbase/salt/rescheduler/init.sls delete mode 100644 cluster/saltbase/salt/salt-helpers/init.sls delete mode 100644 cluster/saltbase/salt/salt-helpers/pkg-apt delete mode 100644 cluster/saltbase/salt/salt-helpers/services delete mode 100755 cluster/saltbase/salt/supervisor/docker-checker.sh delete mode 100644 cluster/saltbase/salt/supervisor/docker.conf delete mode 100644 cluster/saltbase/salt/supervisor/init.sls delete mode 100644 cluster/saltbase/salt/supervisor/kube-addons-checker.sh delete mode 100644 cluster/saltbase/salt/supervisor/kube-addons.conf delete mode 100755 cluster/saltbase/salt/supervisor/kubelet-checker.sh delete mode 100644 cluster/saltbase/salt/supervisor/kubelet.conf delete mode 100644 cluster/saltbase/salt/supervisor/supervisor_watcher.sh delete mode 100644 cluster/saltbase/salt/top.sls diff --git a/build/README.md b/build/README.md index 60f37fb7019..cd453e3cc42 100644 --- a/build/README.md +++ b/build/README.md @@ -100,12 +100,10 @@ The main output is a tar file: `kubernetes.tar.gz`. This includes: * Examples * Cluster deployment scripts for various clouds * Tar file containing all server binaries -* Tar file containing salt deployment tree shared across multiple cloud deployments. In addition, there are some other tar files that are created: * `kubernetes-client-*.tar.gz` Client binaries for a specific platform. * `kubernetes-server-*.tar.gz` Server binaries for a specific platform. -* `kubernetes-salt.tar.gz` The salt script/tree shared across multiple deployment scripts. When building final release tars, they are first staged into `_output/release-stage` before being tar'd up and put into `_output/release-tars`. diff --git a/build/lib/release.sh b/build/lib/release.sh index 1fd9296fd01..f02444d537c 100644 --- a/build/lib/release.sh +++ b/build/lib/release.sh @@ -78,7 +78,6 @@ function kube::release::package_tarballs() { mkdir -p "${RELEASE_TARS}" kube::release::package_src_tarball & kube::release::package_client_tarballs & - kube::release::package_salt_tarball & kube::release::package_kube_manifests_tarball & kube::util::wait-for-jobs || { kube::log::error "previous tarball phase failed"; return 1; } @@ -359,35 +358,7 @@ function kube::release::create_docker_images_for_server() { } -# Package up the salt configuration tree. This is an optional helper to getting -# a cluster up and running. -function kube::release::package_salt_tarball() { - kube::log::status "Building tarball: salt" - - local release_stage="${RELEASE_STAGE}/salt/kubernetes" - rm -rf "${release_stage}" - mkdir -p "${release_stage}" - - cp -R "${KUBE_ROOT}/cluster/saltbase" "${release_stage}/" - - # TODO(#3579): This is a temporary hack. It gathers up the yaml, - # yaml.in, json files in cluster/addons (minus any demos) and overlays - # them into kube-addons, where we expect them. (This pipeline is a - # fancy copy, stripping anything but the files we don't want.) - local objects - objects=$(cd "${KUBE_ROOT}/cluster/addons" && find . \( -name \*.yaml -or -name \*.yaml.in -or -name \*.json \) | grep -v demo) - tar c -C "${KUBE_ROOT}/cluster/addons" ${objects} | tar x -C "${release_stage}/saltbase/salt/kube-addons" - - kube::release::clean_cruft - - local package_name="${RELEASE_TARS}/kubernetes-salt.tar.gz" - kube::release::create_tarball "${package_name}" "${release_stage}/.." -} - -# This will pack kube-system manifests files for distros without using salt -# such as GCI and Ubuntu Trusty. We directly copy manifests from -# cluster/addons and cluster/saltbase/salt. The script of cluster initialization -# will remove the salt configuration and evaluate the variables in the manifests. +# This will pack kube-system manifests files for distros such as COS. function kube::release::package_kube_manifests_tarball() { kube::log::status "Building tarball: manifests" @@ -468,8 +439,7 @@ function kube::release::package_test_tarball() { # using the bundled cluster/get-kube-binaries.sh script). # Included in this tarball: # - Cluster spin up/down scripts and configs for various cloud providers -# - Tarballs for salt configs that are ready to be uploaded -# to master by whatever means appropriate. +# - Tarballs for manifest configs that are ready to be uploaded # - Examples (which may or may not still work) # - The remnants of the docs/ directory function kube::release::package_final_tarball() { @@ -488,13 +458,10 @@ Client binaries are no longer included in the Kubernetes final tarball. Run cluster/get-kube-binaries.sh to download client and server binaries. EOF - # We want everything in /cluster except saltbase. That is only needed on the - # server. + # We want everything in /cluster. cp -R "${KUBE_ROOT}/cluster" "${release_stage}/" - rm -rf "${release_stage}/cluster/saltbase" mkdir -p "${release_stage}/server" - cp "${RELEASE_TARS}/kubernetes-salt.tar.gz" "${release_stage}/server/" cp "${RELEASE_TARS}/kubernetes-manifests.tar.gz" "${release_stage}/server/" cat < "${release_stage}/server/README" Server binary tarballs are no longer included in the Kubernetes final tarball. diff --git a/build/release-tars/BUILD b/build/release-tars/BUILD index 27773468028..9b5beefdc54 100644 --- a/build/release-tars/BUILD +++ b/build/release-tars/BUILD @@ -180,7 +180,6 @@ pkg_tar( build_tar = "@io_kubernetes_build//tools/build_tar", files = [ ":kubernetes-manifests.tar.gz", - ":kubernetes-salt.tar.gz", ], package_dir = "server", visibility = ["//visibility:private"], @@ -216,15 +215,6 @@ pkg_tar( ], ) -pkg_tar( - name = "kubernetes-salt", - build_tar = "@io_kubernetes_build//tools/build_tar", - extension = "tar.gz", - deps = [ - "//cluster/saltbase:salt", - ], -) - release_filegroup( name = "release-tars", srcs = [ @@ -233,7 +223,6 @@ release_filegroup( ":kubernetes-node-%s.tar.gz" % PLATFORM_ARCH_STRING, ":kubernetes-server-%s.tar.gz" % PLATFORM_ARCH_STRING, ":kubernetes-manifests.tar.gz", - ":kubernetes-salt.tar.gz", ":kubernetes-src.tar.gz", ":kubernetes-test.tar.gz", ], diff --git a/cluster/BUILD b/cluster/BUILD index 8dea73c4cd2..ed62e839b49 100644 --- a/cluster/BUILD +++ b/cluster/BUILD @@ -20,7 +20,6 @@ filegroup( "//cluster/images/etcd/rollback:all-srcs", "//cluster/images/hyperkube:all-srcs", "//cluster/images/kubemark:all-srcs", - "//cluster/saltbase:all-srcs", ], tags = ["automanaged"], ) diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml b/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml index 28ffb1c03b1..fc2079c5864 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml @@ -129,17 +129,6 @@ data: max_lines 1000 system.input.conf: |- - # Example: - # 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081 - - type tail - format /^(?