Merge branch 'master' into fix-guestbook-go-build

This commit is contained in:
Matthias Luebken
2016-03-24 13:37:16 +01:00
2366 changed files with 174197 additions and 66105 deletions

View File

@@ -22,10 +22,12 @@ docs/man/man1/kubectl-config-view.1
docs/man/man1/kubectl-config.1
docs/man/man1/kubectl-convert.1
docs/man/man1/kubectl-cordon.1
docs/man/man1/kubectl-create-configmap.1
docs/man/man1/kubectl-create-namespace.1
docs/man/man1/kubectl-create-secret-docker-registry.1
docs/man/man1/kubectl-create-secret-generic.1
docs/man/man1/kubectl-create-secret.1
docs/man/man1/kubectl-create-serviceaccount.1
docs/man/man1/kubectl-create.1
docs/man/man1/kubectl-delete.1
docs/man/man1/kubectl-describe.1
@@ -73,10 +75,12 @@ docs/user-guide/kubectl/kubectl_config_view.md
docs/user-guide/kubectl/kubectl_convert.md
docs/user-guide/kubectl/kubectl_cordon.md
docs/user-guide/kubectl/kubectl_create.md
docs/user-guide/kubectl/kubectl_create_configmap.md
docs/user-guide/kubectl/kubectl_create_namespace.md
docs/user-guide/kubectl/kubectl_create_secret.md
docs/user-guide/kubectl/kubectl_create_secret_docker-registry.md
docs/user-guide/kubectl/kubectl_create_secret_generic.md
docs/user-guide/kubectl/kubectl_create_serviceaccount.md
docs/user-guide/kubectl/kubectl_delete.md
docs/user-guide/kubectl/kubectl_describe.md
docs/user-guide/kubectl/kubectl_drain.md

5
.gitignore vendored
View File

@@ -14,7 +14,7 @@
*.iml
# Vscode files
.vscode/**
.vscode
# This is where the result of the go build goes
/output/**
@@ -88,6 +88,9 @@ doc_tmp/
# CoreOS stuff
cluster/libvirt-coreos/coreos_*.img
# Juju Stuff
cluster/juju/charms/*
# Downloaded Kubernetes binary release
kubernetes/

View File

@@ -1,4 +1,322 @@
Please see the [Releases Page](https://github.com/kubernetes/kubernetes/releases)
# Release Notes for Kubernetes 1.2.0
## [Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.2/examples)
## Downloads
binary | hash alg | hash
------ | -------- | ----
[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.2.0/kubernetes.tar.gz) | md5 | `c0ce9e6150e9d7a19455db82f3318b4c`
[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.2.0/kubernetes.tar.gz) | sha1 | `52dd998e1191f464f581a9b87017d70ce0b058d9`
## Changes since v1.1.1
## Major Themes
* <strong>Significant scale improvements</strong>. Increased cluster scale by 400% to 1000 nodes with 30,000 pods per cluster.
Kubelet supports 100 pods per node with 4x reduced system overhead.
* <strong>Simplified application deployment and management. </strong>
* Dynamic Configuration (ConfigMap API in the core API group) enables application
configuration to be stored as a Kubernetes API object and pulled dynamically on
container startup, as an alternative to baking in command-line flags when a
container is built.
* Turnkey Deployments (Deployment API (Beta) in the Extensions API group)
automate deployment and rolling updates of applications, specified
declaratively. It handles versioning, multiple simultaneous rollouts,
aggregating status across all pods, maintaining application availability, and
rollback.
* <strong>Automated cluster management: </strong>
* Kubernetes clusters can now span zones within a cloud provider. Pods from a
service will be automatically spread across zones, enabling applications to
tolerate zone failure.
* Simplified way to run a container on every node (DaemonSet API (Beta) in the
Extensions API group): Kubernetes can schedule a service (such as a logging
agent) that runs one, and only one, pod per node.
* TLS and L7 support (Ingress API (Beta) in the Extensions API group): Kubernetes
is now easier to integrate into custom networking environments by supporting
TLS for secure communication and L7 http-based traffic routing.
* Graceful Node Shutdown (aka drain) - The new “kubectl drain” command gracefully
evicts pods from nodes in preparation for disruptive operations like kernel
upgrades or maintenance.
* Custom Metrics for Autoscaling (HorizontalPodAutoscaler API in the Autoscaling
API group): The Horizontal Pod Autoscaling feature now supports custom metrics
(Alpha), allowing you to specify application-level metrics and thresholds to
trigger scaling up and down the number of pods in your application.
* <strong>New GUI</strong> (dashboard) allows you to get started quickly and enables the same
functionality found in the CLI as a more approachable and discoverable way of
interacting with the system. Note: the GUI is enabled by default in 1.2 clusters.
<img src="docs/images/newgui.png" width="" alt="Dashboard UI screenshot showing cards that represent applications that run inside a cluster" title="Dashboard UI apps screen">
## Other notable improvements
* Job was Beta in 1.1 and is GA in 1.2 .
* <code>apiVersion: batch/v1 </code>is now available. You now do not need to specify the <code>.spec.selector</code> field — a [unique selector is automatically generated ](http://kubernetes.io/docs/user-guide/jobs/#pod-selector)for you.
* The previous version, <code>apiVersion: extensions/v1beta1</code>, is still supported. Even if you roll back to 1.1, the objects created using
the new apiVersion will still be accessible, using the old version. You can
continue to use your existing JSON and YAML files until you are ready to switch
to <code>batch/v1</code>. We may remove support for Jobs with <code>apiVersion: extensions/v1beta1 </code>in 1.3 or 1.4.
* HorizontalPodAutoscaler was Beta in 1.1 and is GA in 1.2 .
* <code>apiVersion: autoscaling/v1 </code>is now available. Changes in this version are:
* Field CPUUtilization which was a nested structure CPUTargetUtilization in
HorizontalPodAutoscalerSpec was replaced by TargetCPUUtilizationPercentage
which is an integer.
* ScaleRef of type SubresourceReference in HorizontalPodAutoscalerSpec which
referred to scale subresource of the resource being scaled was replaced by
ScaleTargetRef which points just to the resource being scaled.
* In extensions/v1beta1 if CPUUtilization in HorizontalPodAutoscalerSpec was not
specified it was set to 80 by default while in autoscaling/v1 HPA object
without TargetCPUUtilizationPercentage specified is a valid object. Pod
autoscaler controller will apply a default scaling policy in this case which is
equivalent to the previous one but may change in the future.
* The previous version, <code>apiVersion: extensions/v1beta1</code>, is still supported. Even if you roll back to 1.1, the objects created using
the new apiVersions will still be accessible, using the old version. You can
continue to use your existing JSON and YAML files until you are ready to switch
to <code>autoscaling/v1</code>. We may remove support for HorizontalPodAutoscalers with <code>apiVersion: extensions/v1beta1 </code>in 1.3 or 1.4.
* Kube-Proxy now defaults to an iptables-based proxy. If the --proxy-mode flag is
specified while starting kube-proxy (userspace or iptables), the flag value
will be respected. If the flag value is not specified, the kube-proxy respects
the Node object annotation: net.beta.kubernetes.io/proxy-mode. If the
annotation is not specified, then iptables mode is the default. If kube-proxy
is unable to start in iptables mode because system requirements are not met
(kernel or iptables versions are insufficient), the kube-proxy will fall-back
to userspace mode. Kube-proxy is much more performant and less
resource-intensive in iptables mode.
* Node stability can be improved by reserving [resources](https://github.com/kubernetes/kubernetes/blob/release-1.2/docs/proposals/node-allocatable.md) for the base operating system using --system-reserved and --kube-reserved Kubelet flags
* Liveness and readiness probes now support more configuration parameters:
periodSeconds, successThreshold, failureThreshold
* The new ReplicaSet API (Beta) in the Extensions API group is similar to
ReplicationController, but its [selector](http://kubernetes.io/docs/user-guide/labels/#label-selectors) is more general (supports set-based selector; whereas ReplicationController
only supports equality-based selector).
* Scale subresource support is now expanded to ReplicaSets along with
ReplicationControllers and Deployments. Scale now supports two different types
of selectors to accommodate both [equality-based selectors](http://kubernetes.io/docs/user-guide/labels/#equality-based-requirement) supported by ReplicationControllers and [set-based selectors](http://kubernetes.io/docs/user-guide/labels/#set-based-requirement) supported by Deployments and ReplicaSets.
* “kubectl run” now produces Deployments (instead of ReplicationControllers) and
Jobs (instead of Pods) by default.
* Pods can now consume Secret data in environment variables and inject those
environment variables into a containers command-line args.
* Stable version of Heapster which scales up to 1000 nodes: more metrics, reduced
latency, reduced cpu/memory consumption (~4mb per monitored node).
* Pods now have a security context which allows users to specify:
* attributes which apply to the whole pod:
* User ID
* Whether all containers should be non-root
* Supplemental Groups
* FSGroup - a special supplemental group
* SELinux options
* If a pod defines an FSGroup, that Pods system (emptyDir, secret, configMap,
etc) volumes and block-device volumes will be owned by the FSGroup, and each
container in the pod will run with the FSGroup as a supplemental group
* Volumes that support SELinux labelling are now automatically relabeled with the
Pods SELinux context, if specified
* A stable client library release\_1\_2 is added. The library is [here](https://github.com/kubernetes/kubernetes/tree/master/pkg/client/clientset_generated/release_1_2), and detailed doc is [here](https://github.com/kubernetes/kubernetes/blob/master/docs/devel/generating-clientset.md#released-clientsets). We will keep the interface of this go client stable.
* New Azure File Service Volume Plugin enables mounting Microsoft Azure File
Volumes (SMB 2.1 and 3.0) into a Pod. See [example](https://github.com/kubernetes/kubernetes/blob/release-1.2/examples/azure_file/README.md) for details.
* Logs usage and root filesystem usage of a container, volumes usage of a pod and node disk usage are exposed through Kubelet new metrics API.
## Experimental Features
* Dynamic Provisioning of PersistentVolumes: Kubernetes previously required all
volumes to be manually provisioned by a cluster administrator before use. With
this feature, volume plugins that support it (GCE PD, AWS EBS, and Cinder) can
automatically provision a PersistentVolume to bind to an unfulfilled
PersistentVolumeClaim.
* Run multiple schedulers in parallel, e.g. one or more custom schedulers
alongside the default Kubernetes scheduler, using pod annotations to select
among the schedulers for each pod. Documentation is [here](http://kubernetes.io/docs/admin/multiple-schedulers.md), design doc is [here](https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/multiple-schedulers.md).
* More expressive node affinity syntax, and support for “soft” node affinity.
Node selectors (to constrain pods to schedule on a subset of nodes) now support
the operators {<code>In, NotIn, Exists, DoesNotExist, Gt, Lt</code>} instead of just conjunction of exact match on node label values. In
addition, weve introduced a new “soft” kind of node selector that is just a
hint to the scheduler; the scheduler will try to satisfy these requests but it
does not guarantee they will be satisfied. Both the “hard” and “soft” variants
of node affinity use the new syntax. Documentation is [here](http://kubernetes.io/docs/user-guide/node-selection/) (see section “Alpha feature in Kubernetes v1.2: Node Affinity“). Design doc is [here](https://github.com/kubernetes/kubernetes/blob/release-1.2/docs/design/nodeaffinity.md).
* A pod can specify its own Hostname and Subdomain via annotations (<code>pod.beta.kubernetes.io/hostname, pod.beta.kubernetes.io/subdomain)</code>. If the Subdomain matches the name of a [headless service](http://kubernetes.io/docs/user-guide/services/#headless-services) in the same namespace, a DNS A record is also created for the pods FQDN. More
details can be found in the [DNS README](https://github.com/kubernetes/kubernetes/blob/release-1.2/cluster/addons/dns/README.md#a-records-and-hostname-based-on-pod-annotations---a-beta-feature-in-kubernetes-v12). Changes were introduced in PR [#20688](https://github.com/kubernetes/kubernetes/pull/20688).
* New SchedulerExtender enables users to implement custom
out-of-(the-scheduler)-process scheduling predicates and priority functions,
for example to schedule pods based on resources that are not directly managed
by Kubernetes. Changes were introduced in PR [#13580](https://github.com/kubernetes/kubernetes/pull/13580). Example configuration and documentation is available [here](https://github.com/kubernetes/kubernetes/blob/master/docs/design/scheduler_extender.md). This is an alpha feature and may not be supported in its current form at beta
or GA.
* New Flex Volume Plugin enables users to use out-of-process volume plugins that
are installed to “/usr/libexec/kubernetes/kubelet-plugins/volume/exec/” on
every node, instead of being compiled into the Kubernetes binary. See [example](https://github.com/kubernetes/kubernetes/blob/master/examples/flexvolume/README.md) for details.
* vendor volumes into a pod. It expects vendor drivers are installed in the
volume plugin path on each kubelet node. This is an alpha feature and may
change in future.
* Kubelet exposes a new Alpha metrics API - /stats/summary in a user friendly format with reduced system overhead. The measurement is done in PR [#22542](https://github.com/kubernetes/kubernetes/pull/22542).
## Action required
* Docker v1.9.1 is officially recommended. Docker v1.8.3 and Docker v1.10 are
supported. If you are using an older release of Docker, please upgrade. Known
issues with Docker 1.9.1 can be found below.
* CPU hardcapping will be enabled by default for containers with CPU limit set,
if supported by the kernel. You should either adjust your CPU limit, or set CPU
request only, if you want to avoid hardcapping. If the kernel does not support
CPU Quota, NodeStatus will contain a warning indicating that CPU Limits cannot
be enforced.
* The following applies only if you use the Go language client (<code>/pkg/client/unversioned</code>) to create Job by defining Go variables of type "<code>k8s.io/kubernetes/pkg/apis/extensions".Job</code>). We think <strong>this is not common</strong>, so if you are not sure what this means, you probably aren't doing this. If
you do this, then, at the time you re-vendor the "<code>k8s.io/kubernetes/"</code> code, you will need to set <code>job.Spec.ManualSelector = true</code>, or else set <code>job.Spec.Selector = nil. </code>Otherwise, the jobs you create may be rejected. See [Specifying your own pod selector](http://kubernetes.io/docs/user-guide/jobs/#specifying-your-own-pod-selector).
* Deployment was Alpha in 1.1 (though it had apiVersion extensions/v1beta1) and
was disabled by default. Due to some non-backward-compatible API changes, any
Deployment objects you created in 1.1 wont work with in the 1.2 release.
* Before upgrading to 1.2, <strong>delete all Deployment alpha-version resources</strong>, including the Replication Controllers and Pods the Deployment manages. Then
create Deployment Beta resources after upgrading to 1.2. Not deleting the
Deployment objects may cause the deployment controller to mistakenly match
other pods and delete them, due to the selector API change.
* Client (kubectl) and server versions must match (both 1.1 or both 1.2) for any
Deployment-related operations.
* Behavior change:
* Deployment creates ReplicaSets instead of ReplicationControllers.
* Scale subresource now has a new <code>targetSelector</code> field in its status. This field supports the new set-based selectors supported
by Deployments, but in a serialized format.
* Spec change:
* Deployments [selector](http://kubernetes.io/docs/user-guide/labels/#label-selectors) is now more general (supports set-based selector; it only supported
equality-based selector in 1.1).
* .spec.uniqueLabelKey is removed -- users cant customize unique label key --
and its default value is changed from
“deployment.kubernetes.io/podTemplateHash” to “pod-template-hash”.
* .spec.strategy.rollingUpdate.minReadySeconds is moved to .spec.minReadySeconds
* DaemonSet was Alpha in 1.1 (though it had apiVersion extensions/v1beta1) and
was disabled by default. Due to some non-backward-compatible API changes, any
DaemonSet objects you created in 1.1 wont work with in the 1.2 release.
* Before upgrading to 1.2, <strong>delete all DaemonSet alpha-version resources</strong>. If you do not want to disrupt the pods, use kubectl delete daemonset <name>
--cascade=false. Then create DaemonSet Beta resources after upgrading to 1.2.
* Client (kubectl) and server versions must match (both 1.1 or both 1.2) for any
DaemonSet-related operations.
* Behavior change:
* DaemonSet pods will be created on nodes with .spec.unschedulable=true and will
not be evicted from nodes whose Ready condition is false.
* Updates to the pod template are now permitted. To perform a rolling update of a
DaemonSet, update the pod template and then delete its pods one by one; they
will be replaced using the updated template.
* Spec change:
* DaemonSets [selector](http://kubernetes.io/docs/user-guide/labels/#label-selectors) is now more general (supports set-based selector; it only supported
equality-based selector in 1.1).
* Running against a secured etcd requires these flags to be passed to
kube-apiserver (instead of --etcd-config):
* --etcd-certfile, --etcd-keyfile (if using client cert auth)
* --etcd-cafile (if not using system roots)
* As part of preparation in 1.2 for adding support for protocol buffers (and the
direct YAML support in the API available today), the Content-Type and Accept
headers are now properly handled as per the HTTP spec. As a consequence, if
you had a client that was sending an invalid Content-Type or Accept header to
the API, in 1.2 you will either receive a 415 or 406 error.
The only client
this is known to affect is curl when you use -d with JSON but don't set a
content type, helpfully sends "application/x-www-urlencoded", which is not
correct.
Other client authors should double check that you are sending proper
accept and content type headers, or set no value (in which case JSON is the
default).
An example using curl:
<code>curl -H "Content-Type: application/json" -XPOST -d
'{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"kube-system"}}' "[http://127.0.0.1:8080/api/v1/namespaces](http://127.0.0.1:8080/api/v1/namespaces)"</code>
* The version of InfluxDB is bumped from 0.8 to 0.9 which means storage schema
change. More details [here](https://docs.influxdata.com/influxdb/v0.9/administration/upgrading/).
* We have renamed “minions” to “nodes”. If you were specifying NUM\_MINIONS or
MINION\_SIZE to kube-up, you should now specify NUM\_NODES or NODE\_SIZE.
## Known Issues
* Paused deployments can't be resized and don't clean up old ReplicaSets.
* Minimum memory limit is 4MB. This is a docker limitation
* Minimum CPU limits is 10m. This is a Linux Kernel limitation
* “kubectl rollout undo” (i.e. rollback) will hang on paused deployments, because
paused deployments cant be rolled back (this is expected), and the command
waits for rollback events to return the result. Users should use “kubectl
rollout resume” to resume a deployment before rolling back.
* “kubectl edit <list>” will open the editor multiple times, once for each
resource in the list.
* If you create HPA object using autoscaling/v1 API without specifying
targetCPUUtilizationPercentage and read it using kubectl it will print default
value as specified in extensions/v1beta1 (see details in [#23196](https://github.com/kubernetes/kubernetes/issues/23196)).
* If a node or kubelet crashes with a volume attached, the volume will remain
attached to that node. If that volume can only be attached to one node at a
time (GCE PDs attached in RW mode, for example), then the volume must be
manually detached before Kubernetes can attach it to other nodes.
* If a volume is already attached to a node any subsequent attempts to attach it
again (due to kubelet restart, for example) will fail. The volume must either
be manually detached first or the pods referencing it deleted (which would
trigger automatic volume detach).
* In very large clusters it may happen that a few nodes wont register in API
server in a given timeframe for whatever reasons (networking issue, machine
failure, etc.). Normally when kube-up script will encounter even one NotReady
node it will fail, even though the cluster most likely will be working. We
added an environmental variable to kube-up ALLOWED\_NOTREADY\_NODES that
defines the number of nodes that if not Ready in time wont cause kube-up
failure.
* “kubectl rolling-update” only supports Replication Controllers (it doesnt
support Replica Sets). Its recommended to use Deployment 1.2 with “kubectl
rollout” commands instead, if you want to rolling update Replica Sets.
* When live upgrading Kubelet to 1.2 without draining the pods running on the node,
the containers will be restarted by Kubelet (see details in [#23104](https://github.com/kubernetes/kubernetes/issues/23104)).
### Docker Known Issues
#### 1.9.1
* Listing containers can be slow at times which will affect kubelet performance.
More information [here](https://github.com/docker/docker/issues/17720)
* Docker daemon restarts can fail. Docker checkpoints have to deleted between
restarts. More information [here](https://github.com/kubernetes/kubernetes/issues/20995)
* Pod IP allocation-related issues. Deleting the docker checkpoint prior to
restarting the daemon alleviates this issue, but hasnt been verified to
completely eliminate the IP allocation issue. More information [here](https://github.com/kubernetes/kubernetes/issues/21523#issuecomment-191498969)
* Daemon becomes unresponsive (rarely) due to kernel deadlocks. More information [here](https://github.com/kubernetes/kubernetes/issues/21866#issuecomment-189492391)
## Provider-specific Notes
### Various
Core changes:
* Support for load balancers with source ranges
### AWS
Core changes:
* Support for ELBs with complex configurations: better subnet selection with
multiple subnets, and internal ELBs
* Support for VPCs with private dns names
* Multiple fixes to EBS volume mounting code for robustness, and to support
mounting the full number of AWS recommended volumes.
* Multiple fixes to avoid hitting AWS rate limits, and to throttle if we do
* Support for the EC2 Container Registry (currently in us-east-1 only)
With kube-up:
* Automatically install updates on boot & reboot
* Use optimized image based on Jessie by default
* Add support for Ubuntu Wily
* Master is configured with automatic restart-on-failure, via CloudWatch
* Bootstrap reworked to be more similar to GCE; better supports reboots/restarts
* Use an elastic IP for the master by default
* Experimental support for node spot instances (set NODE\_SPOT\_PRICE=0.05)
### GCE
* Ubuntu Trusty support added
## Changelog
(Linked [github releases](https://github.com/kubernetes/kubernetes/releases) 1.1.2 to 1.2.0-beta-1 that are part of 1.2.0)
* [v1.1.2](https://github.com/kubernetes/kubernetes/releases/tag/v1.1.2)
* [v1.1.3](https://github.com/kubernetes/kubernetes/releases/tag/v1.1.3)
* [v1.1.4](https://github.com/kubernetes/kubernetes/releases/tag/v1.1.4)
* [v1.1.7](https://github.com/kubernetes/kubernetes/releases/tag/v1.1.7)
* [v1.1.8](https://github.com/kubernetes/kubernetes/releases/tag/v1.1.8)
* [v1.2.0-alpha.4](https://github.com/kubernetes/kubernetes/releases/tag/v1.2.0-alpha.4)
* [v1.2.0-alpha.5](https://github.com/kubernetes/kubernetes/releases/tag/v1.2.0-alpha.5)
* [v1.2.0-alpha.6](https://github.com/kubernetes/kubernetes/releases/tag/v1.2.0-alpha.6)
* [v1.2.0-alpha.7](https://github.com/kubernetes/kubernetes/releases/tag/v1.2.0-alpha.7)
* [v1.2.0-alpha.8](https://github.com/kubernetes/kubernetes/releases/tag/v1.2.0-alpha.8)
* [v1.2.0-beta.0](https://github.com/kubernetes/kubernetes/releases/tag/v1.2.0-beta.0)
* [v1.2.0-beta.1](https://github.com/kubernetes/kubernetes/releases/tag/v1.2.0-beta.1)
Please see the [Releases Page](https://github.com/kubernetes/kubernetes/releases) for older releases.
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/CHANGELOG.md?pixel)]()

362
Godeps/.license_file_state generated Normal file
View File

@@ -0,0 +1,362 @@
# These files have been checked upstream and are either missing or have no
# valuable license/copyright info in them
# To regenerate this list, remove this file and rerun update-godep-licenses.sh
#
bitbucket.org/ww/goautoneg/master/LICENSE
bitbucket.org/ww/goautoneg/master/LICENSE.code
bitbucket.org/ww/goautoneg/master/LICENSE.txt
bitbucket.org/ww/goautoneg/master/LICENSE.md
raw.githubusercontent.com/appc/cni/master/NOTICE
raw.githubusercontent.com/appc/cni/master/NOTICE.txt
raw.githubusercontent.com/appc/cni/master/README
raw.githubusercontent.com/appc/cni/master/README.md
raw.githubusercontent.com/appc/spec/master/NOTICE
raw.githubusercontent.com/appc/spec/master/NOTICE.txt
raw.githubusercontent.com/appc/spec/master/README
raw.githubusercontent.com/appc/spec/master/README.md
raw.githubusercontent.com/armon/go-metrics/master/NOTICE
raw.githubusercontent.com/armon/go-metrics/master/NOTICE.txt
raw.githubusercontent.com/armon/go-metrics/master/README
raw.githubusercontent.com/armon/go-metrics/master/README.md
raw.githubusercontent.com/beorn7/perks/master/LICENSE
raw.githubusercontent.com/beorn7/perks/master/LICENSE.code
raw.githubusercontent.com/beorn7/perks/master/LICENSE.txt
raw.githubusercontent.com/beorn7/perks/master/LICENSE.md
raw.githubusercontent.com/beorn7/perks/master/NOTICE
raw.githubusercontent.com/beorn7/perks/master/NOTICE.txt
raw.githubusercontent.com/beorn7/perks/master/README
raw.githubusercontent.com/blang/semver/master/NOTICE
raw.githubusercontent.com/blang/semver/master/NOTICE.txt
raw.githubusercontent.com/blang/semver/master/README
raw.githubusercontent.com/blang/semver/master/README.md
raw.githubusercontent.com/boltdb/bolt/master/NOTICE
raw.githubusercontent.com/boltdb/bolt/master/NOTICE.txt
raw.githubusercontent.com/boltdb/bolt/master/README
raw.githubusercontent.com/boltdb/bolt/master/README.md
raw.githubusercontent.com/camlistore/go4/master/NOTICE
raw.githubusercontent.com/camlistore/go4/master/NOTICE.txt
raw.githubusercontent.com/camlistore/go4/master/README
raw.githubusercontent.com/camlistore/go4/master/README.md
raw.githubusercontent.com/ClusterHQ/flocker-go/master/NOTICE
raw.githubusercontent.com/ClusterHQ/flocker-go/master/NOTICE.txt
raw.githubusercontent.com/ClusterHQ/flocker-go/master/README
raw.githubusercontent.com/ClusterHQ/flocker-go/master/README.md
raw.githubusercontent.com/codegangsta/negroni/master/NOTICE
raw.githubusercontent.com/codegangsta/negroni/master/NOTICE.txt
raw.githubusercontent.com/codegangsta/negroni/master/README
raw.githubusercontent.com/codegangsta/negroni/master/README.md
raw.githubusercontent.com/coreos/go-etcd/master/NOTICE
raw.githubusercontent.com/coreos/go-etcd/master/NOTICE.txt
raw.githubusercontent.com/coreos/go-etcd/master/README
raw.githubusercontent.com/coreos/go-etcd/master/README.md
raw.githubusercontent.com/coreos/go-semver/master/NOTICE
raw.githubusercontent.com/coreos/go-semver/master/NOTICE.txt
raw.githubusercontent.com/coreos/go-semver/master/README
raw.githubusercontent.com/coreos/go-semver/master/README.md
raw.githubusercontent.com/coreos/go-systemd/master/NOTICE
raw.githubusercontent.com/coreos/go-systemd/master/NOTICE.txt
raw.githubusercontent.com/coreos/go-systemd/master/README
raw.githubusercontent.com/coreos/go-systemd/master/README.md
raw.githubusercontent.com/coreos/rkt/master/NOTICE
raw.githubusercontent.com/coreos/rkt/master/NOTICE.txt
raw.githubusercontent.com/coreos/rkt/master/README
raw.githubusercontent.com/coreos/rkt/master/README.md
raw.githubusercontent.com/cpuguy83/go-md2man/master/NOTICE
raw.githubusercontent.com/cpuguy83/go-md2man/master/NOTICE.txt
raw.githubusercontent.com/cpuguy83/go-md2man/master/README
raw.githubusercontent.com/cpuguy83/go-md2man/master/README.md
raw.githubusercontent.com/davecgh/go-spew/master/NOTICE
raw.githubusercontent.com/davecgh/go-spew/master/NOTICE.txt
raw.githubusercontent.com/davecgh/go-spew/master/README
raw.githubusercontent.com/davecgh/go-spew/master/README.md
raw.githubusercontent.com/daviddengcn/go-colortext/master/NOTICE
raw.githubusercontent.com/daviddengcn/go-colortext/master/NOTICE.txt
raw.githubusercontent.com/daviddengcn/go-colortext/master/README
raw.githubusercontent.com/daviddengcn/go-colortext/master/README.md
raw.githubusercontent.com/dgrijalva/jwt-go/master/NOTICE
raw.githubusercontent.com/dgrijalva/jwt-go/master/NOTICE.txt
raw.githubusercontent.com/dgrijalva/jwt-go/master/README
raw.githubusercontent.com/dgrijalva/jwt-go/master/README.md
raw.githubusercontent.com/elazarl/go-bindata-assetfs/master/NOTICE
raw.githubusercontent.com/elazarl/go-bindata-assetfs/master/NOTICE.txt
raw.githubusercontent.com/elazarl/go-bindata-assetfs/master/README
raw.githubusercontent.com/elazarl/go-bindata-assetfs/master/README.md
raw.githubusercontent.com/elazarl/goproxy/master/NOTICE
raw.githubusercontent.com/elazarl/goproxy/master/NOTICE.txt
raw.githubusercontent.com/elazarl/goproxy/master/README
raw.githubusercontent.com/elazarl/goproxy/master/README.md
raw.githubusercontent.com/emicklei/go-restful/master/NOTICE
raw.githubusercontent.com/emicklei/go-restful/master/NOTICE.txt
raw.githubusercontent.com/emicklei/go-restful/master/README
raw.githubusercontent.com/emicklei/go-restful/master/README.md
raw.githubusercontent.com/evanphx/json-patch/master/NOTICE
raw.githubusercontent.com/evanphx/json-patch/master/NOTICE.txt
raw.githubusercontent.com/evanphx/json-patch/master/README
raw.githubusercontent.com/evanphx/json-patch/master/README.md
raw.githubusercontent.com/fsouza/go-dockerclient/master/NOTICE
raw.githubusercontent.com/fsouza/go-dockerclient/master/NOTICE.txt
raw.githubusercontent.com/fsouza/go-dockerclient/master/README
raw.githubusercontent.com/fsouza/go-dockerclient/master/README.md
raw.githubusercontent.com/garyburd/redigo/master/NOTICE
raw.githubusercontent.com/garyburd/redigo/master/NOTICE.txt
raw.githubusercontent.com/garyburd/redigo/master/README
raw.githubusercontent.com/garyburd/redigo/master/README.md
raw.githubusercontent.com/ghodss/yaml/master/NOTICE
raw.githubusercontent.com/ghodss/yaml/master/NOTICE.txt
raw.githubusercontent.com/ghodss/yaml/master/README
raw.githubusercontent.com/ghodss/yaml/master/README.md
raw.githubusercontent.com/godbus/dbus/master/NOTICE
raw.githubusercontent.com/godbus/dbus/master/NOTICE.txt
raw.githubusercontent.com/godbus/dbus/master/README
raw.githubusercontent.com/godbus/dbus/master/README.md
raw.githubusercontent.com/gogo/protobuf/master/NOTICE
raw.githubusercontent.com/gogo/protobuf/master/NOTICE.txt
raw.githubusercontent.com/go-ini/ini/master/NOTICE
raw.githubusercontent.com/go-ini/ini/master/NOTICE.txt
raw.githubusercontent.com/go-ini/ini/master/README
raw.githubusercontent.com/go-ini/ini/master/README.md
raw.githubusercontent.com/golang/glog/master/NOTICE
raw.githubusercontent.com/golang/glog/master/NOTICE.txt
raw.githubusercontent.com/golang/glog/master/README
raw.githubusercontent.com/golang/glog/master/README.md
raw.githubusercontent.com/golang/groupcache/master/NOTICE
raw.githubusercontent.com/golang/groupcache/master/NOTICE.txt
raw.githubusercontent.com/golang/groupcache/master/README
raw.githubusercontent.com/golang/groupcache/master/README.md
raw.githubusercontent.com/golang/protobuf/master/NOTICE
raw.githubusercontent.com/golang/protobuf/master/NOTICE.txt
raw.githubusercontent.com/golang/protobuf/master/README
raw.githubusercontent.com/google/btree/master/NOTICE
raw.githubusercontent.com/google/btree/master/NOTICE.txt
raw.githubusercontent.com/google/btree/master/README
raw.githubusercontent.com/google/btree/master/README.md
raw.githubusercontent.com/google/cadvisor/master/NOTICE
raw.githubusercontent.com/google/cadvisor/master/NOTICE.txt
raw.githubusercontent.com/google/cadvisor/master/README
raw.githubusercontent.com/google/cadvisor/master/README.md
raw.githubusercontent.com/google/gofuzz/master/NOTICE
raw.githubusercontent.com/google/gofuzz/master/NOTICE.txt
raw.githubusercontent.com/google/gofuzz/master/README
raw.githubusercontent.com/google/gofuzz/master/README.md
raw.githubusercontent.com/gorilla/context/master/NOTICE
raw.githubusercontent.com/gorilla/context/master/NOTICE.txt
raw.githubusercontent.com/gorilla/context/master/README
raw.githubusercontent.com/gorilla/context/master/README.md
raw.githubusercontent.com/gorilla/mux/master/NOTICE
raw.githubusercontent.com/gorilla/mux/master/NOTICE.txt
raw.githubusercontent.com/gorilla/mux/master/README
raw.githubusercontent.com/gorilla/mux/master/README.md
raw.githubusercontent.com/hashicorp/golang-lru/master/NOTICE
raw.githubusercontent.com/hashicorp/golang-lru/master/NOTICE.txt
raw.githubusercontent.com/hashicorp/golang-lru/master/README
raw.githubusercontent.com/hashicorp/golang-lru/master/README.md
raw.githubusercontent.com/hashicorp/go-msgpack/master/NOTICE
raw.githubusercontent.com/hashicorp/go-msgpack/master/NOTICE.txt
raw.githubusercontent.com/hashicorp/go-msgpack/master/README
raw.githubusercontent.com/hashicorp/go-msgpack/master/README.md
raw.githubusercontent.com/hashicorp/raft/master/NOTICE
raw.githubusercontent.com/hashicorp/raft/master/NOTICE.txt
raw.githubusercontent.com/hashicorp/raft/master/README
raw.githubusercontent.com/hashicorp/raft/master/README.md
raw.githubusercontent.com/hashicorp/raft-boltdb/master/NOTICE
raw.githubusercontent.com/hashicorp/raft-boltdb/master/NOTICE.txt
raw.githubusercontent.com/hashicorp/raft-boltdb/master/README
raw.githubusercontent.com/hashicorp/raft-boltdb/master/README.md
raw.githubusercontent.com/hawkular/hawkular-client-go/master/NOTICE
raw.githubusercontent.com/hawkular/hawkular-client-go/master/NOTICE.txt
raw.githubusercontent.com/hawkular/hawkular-client-go/master/README
raw.githubusercontent.com/hawkular/hawkular-client-go/master/README.md
raw.githubusercontent.com/imdario/mergo/master/NOTICE
raw.githubusercontent.com/imdario/mergo/master/NOTICE.txt
raw.githubusercontent.com/imdario/mergo/master/README
raw.githubusercontent.com/imdario/mergo/master/README.md
raw.githubusercontent.com/inconshreveable/mousetrap/master/NOTICE
raw.githubusercontent.com/inconshreveable/mousetrap/master/NOTICE.txt
raw.githubusercontent.com/inconshreveable/mousetrap/master/README
raw.githubusercontent.com/inconshreveable/mousetrap/master/README.md
raw.githubusercontent.com/influxdb/influxdb/master/NOTICE
raw.githubusercontent.com/influxdb/influxdb/master/NOTICE.txt
raw.githubusercontent.com/influxdb/influxdb/master/README
raw.githubusercontent.com/influxdb/influxdb/master/README.md
raw.githubusercontent.com/jmespath/go-jmespath/master/NOTICE
raw.githubusercontent.com/jmespath/go-jmespath/master/NOTICE.txt
raw.githubusercontent.com/jmespath/go-jmespath/master/README
raw.githubusercontent.com/jmespath/go-jmespath/master/README.md
raw.githubusercontent.com/jonboulle/clockwork/master/NOTICE
raw.githubusercontent.com/jonboulle/clockwork/master/NOTICE.txt
raw.githubusercontent.com/jonboulle/clockwork/master/README
raw.githubusercontent.com/jonboulle/clockwork/master/README.md
raw.githubusercontent.com/juju/ratelimit/master/NOTICE
raw.githubusercontent.com/juju/ratelimit/master/NOTICE.txt
raw.githubusercontent.com/juju/ratelimit/master/README
raw.githubusercontent.com/juju/ratelimit/master/README.md
raw.githubusercontent.com/kardianos/osext/master/NOTICE
raw.githubusercontent.com/kardianos/osext/master/NOTICE.txt
raw.githubusercontent.com/kardianos/osext/master/README
raw.githubusercontent.com/kardianos/osext/master/README.md
raw.githubusercontent.com/kr/pty/master/NOTICE
raw.githubusercontent.com/kr/pty/master/NOTICE.txt
raw.githubusercontent.com/kr/pty/master/README
raw.githubusercontent.com/kr/pty/master/README.md
raw.githubusercontent.com/miekg/dns/master/NOTICE
raw.githubusercontent.com/miekg/dns/master/NOTICE.txt
raw.githubusercontent.com/miekg/dns/master/README
raw.githubusercontent.com/miekg/dns/master/README.md
raw.githubusercontent.com/mistifyio/go-zfs/master/NOTICE
raw.githubusercontent.com/mistifyio/go-zfs/master/NOTICE.txt
raw.githubusercontent.com/mistifyio/go-zfs/master/README
raw.githubusercontent.com/mistifyio/go-zfs/master/README.md
raw.githubusercontent.com/mitchellh/mapstructure/master/NOTICE
raw.githubusercontent.com/mitchellh/mapstructure/master/NOTICE.txt
raw.githubusercontent.com/mitchellh/mapstructure/master/README
raw.githubusercontent.com/mitchellh/mapstructure/master/README.md
raw.githubusercontent.com/mvdan/xurls/master/NOTICE
raw.githubusercontent.com/mvdan/xurls/master/NOTICE.txt
raw.githubusercontent.com/mvdan/xurls/master/README
raw.githubusercontent.com/mvdan/xurls/master/README.md
raw.githubusercontent.com/mxk/go-flowrate/master/NOTICE
raw.githubusercontent.com/mxk/go-flowrate/master/NOTICE.txt
raw.githubusercontent.com/mxk/go-flowrate/master/README
raw.githubusercontent.com/mxk/go-flowrate/master/README.md
raw.githubusercontent.com/onsi/ginkgo/master/NOTICE
raw.githubusercontent.com/onsi/ginkgo/master/NOTICE.txt
raw.githubusercontent.com/onsi/ginkgo/master/README
raw.githubusercontent.com/onsi/ginkgo/master/README.md
raw.githubusercontent.com/onsi/gomega/master/NOTICE
raw.githubusercontent.com/onsi/gomega/master/NOTICE.txt
raw.githubusercontent.com/onsi/gomega/master/README
raw.githubusercontent.com/onsi/gomega/master/README.md
raw.githubusercontent.com/pborman/uuid/master/NOTICE
raw.githubusercontent.com/pborman/uuid/master/NOTICE.txt
raw.githubusercontent.com/pborman/uuid/master/README
raw.githubusercontent.com/pborman/uuid/master/README.md
raw.githubusercontent.com/pmezard/go-difflib/master/NOTICE
raw.githubusercontent.com/pmezard/go-difflib/master/NOTICE.txt
raw.githubusercontent.com/pmezard/go-difflib/master/README
raw.githubusercontent.com/pmezard/go-difflib/master/README.md
raw.githubusercontent.com/rackspace/gophercloud/master/NOTICE
raw.githubusercontent.com/rackspace/gophercloud/master/NOTICE.txt
raw.githubusercontent.com/rackspace/gophercloud/master/README
raw.githubusercontent.com/rackspace/gophercloud/master/README.md
raw.githubusercontent.com/russross/blackfriday/master/NOTICE
raw.githubusercontent.com/russross/blackfriday/master/NOTICE.txt
raw.githubusercontent.com/russross/blackfriday/master/README
raw.githubusercontent.com/russross/blackfriday/master/README.md
raw.githubusercontent.com/samuel/go-zookeeper/master/NOTICE
raw.githubusercontent.com/samuel/go-zookeeper/master/NOTICE.txt
raw.githubusercontent.com/samuel/go-zookeeper/master/README
raw.githubusercontent.com/samuel/go-zookeeper/master/README.md
raw.githubusercontent.com/scalingdata/gcfg/master/NOTICE
raw.githubusercontent.com/scalingdata/gcfg/master/NOTICE.txt
raw.githubusercontent.com/scalingdata/gcfg/master/README
raw.githubusercontent.com/scalingdata/gcfg/master/README.md
raw.githubusercontent.com/seccomp/libseccomp-golang/master/NOTICE
raw.githubusercontent.com/seccomp/libseccomp-golang/master/NOTICE.txt
raw.githubusercontent.com/seccomp/libseccomp-golang/master/README
raw.githubusercontent.com/seccomp/libseccomp-golang/master/README.md
raw.githubusercontent.com/shurcooL/sanitized_anchor_name/master/NOTICE
raw.githubusercontent.com/shurcooL/sanitized_anchor_name/master/NOTICE.txt
raw.githubusercontent.com/shurcooL/sanitized_anchor_name/master/README
raw.githubusercontent.com/shurcooL/sanitized_anchor_name/master/README.md
raw.githubusercontent.com/Sirupsen/logrus/master/NOTICE
raw.githubusercontent.com/Sirupsen/logrus/master/NOTICE.txt
raw.githubusercontent.com/Sirupsen/logrus/master/README
raw.githubusercontent.com/Sirupsen/logrus/master/README.md
raw.githubusercontent.com/skynetservices/skydns/master/NOTICE
raw.githubusercontent.com/skynetservices/skydns/master/NOTICE.txt
raw.githubusercontent.com/skynetservices/skydns/master/README
raw.githubusercontent.com/skynetservices/skydns/master/README.md
raw.githubusercontent.com/spf13/cobra/master/NOTICE
raw.githubusercontent.com/spf13/cobra/master/NOTICE.txt
raw.githubusercontent.com/spf13/cobra/master/README
raw.githubusercontent.com/spf13/cobra/master/README.md
raw.githubusercontent.com/spf13/pflag/master/NOTICE
raw.githubusercontent.com/spf13/pflag/master/NOTICE.txt
raw.githubusercontent.com/spf13/pflag/master/README
raw.githubusercontent.com/spf13/pflag/master/README.md
raw.githubusercontent.com/stretchr/objx/master/NOTICE
raw.githubusercontent.com/stretchr/objx/master/NOTICE.txt
raw.githubusercontent.com/stretchr/objx/master/README
raw.githubusercontent.com/stretchr/objx/master/README.md
raw.githubusercontent.com/stretchr/objx/master/LICENSE
raw.githubusercontent.com/stretchr/objx/master/LICENSE.code
raw.githubusercontent.com/stretchr/objx/master/LICENSE.txt
raw.githubusercontent.com/stretchr/testify/master/NOTICE
raw.githubusercontent.com/stretchr/testify/master/NOTICE.txt
raw.githubusercontent.com/stretchr/testify/master/README
raw.githubusercontent.com/syndtr/gocapability/master/NOTICE
raw.githubusercontent.com/syndtr/gocapability/master/NOTICE.txt
raw.githubusercontent.com/syndtr/gocapability/master/README
raw.githubusercontent.com/syndtr/gocapability/master/README.md
raw.githubusercontent.com/ugorji/go/master/NOTICE
raw.githubusercontent.com/ugorji/go/master/NOTICE.txt
raw.githubusercontent.com/ugorji/go/master/README
raw.githubusercontent.com/ugorji/go/master/README.md
raw.githubusercontent.com/vishvananda/netlink/master/NOTICE
raw.githubusercontent.com/vishvananda/netlink/master/NOTICE.txt
raw.githubusercontent.com/vishvananda/netlink/master/README
raw.githubusercontent.com/vishvananda/netlink/master/README.md
raw.githubusercontent.com/xiang90/probing/master/NOTICE
raw.githubusercontent.com/xiang90/probing/master/NOTICE.txt
raw.githubusercontent.com/xiang90/probing/master/README
raw.githubusercontent.com/xiang90/probing/master/README.md
raw.githubusercontent.com/xyproto/simpleredis/master/NOTICE
raw.githubusercontent.com/xyproto/simpleredis/master/NOTICE.txt
raw.githubusercontent.com/xyproto/simpleredis/master/README
raw.githubusercontent.com/xyproto/simpleredis/master/README.md
golang.org/x/crypto/master/NOTICE
golang.org/x/crypto/master/NOTICE.txt
golang.org/x/crypto/master/README
golang.org/x/crypto/master/README.md
golang.org/x/exp/master/NOTICE
golang.org/x/exp/master/NOTICE.txt
golang.org/x/exp/master/README
golang.org/x/exp/master/README.md
golang.org/x/net/master/NOTICE
golang.org/x/net/master/NOTICE.txt
golang.org/x/net/master/README
golang.org/x/net/master/README.md
golang.org/x/oauth2/master/NOTICE
golang.org/x/oauth2/master/NOTICE.txt
golang.org/x/oauth2/master/README
golang.org/x/oauth2/master/README.md
golang.org/x/sys/master/NOTICE
golang.org/x/sys/master/NOTICE.txt
golang.org/x/sys/master/README
golang.org/x/sys/master/README.md
golang.org/x/tools/master/NOTICE
golang.org/x/tools/master/NOTICE.txt
golang.org/x/tools/master/README
golang.org/x/tools/master/README.md
google.golang.org/api/master/NOTICE
google.golang.org/api/master/NOTICE.txt
google.golang.org/api/master/README
google.golang.org/api/master/README.md
google.golang.org/cloud/master/NOTICE
google.golang.org/cloud/master/NOTICE.txt
google.golang.org/cloud/master/README
google.golang.org/cloud/master/README.md
google.golang.org/grpc/master/NOTICE
google.golang.org/grpc/master/NOTICE.txt
google.golang.org/grpc/master/README
google.golang.org/grpc/master/README.md
gopkg.in/natefinch/master/NOTICE
gopkg.in/natefinch/master/NOTICE.txt
gopkg.in/natefinch/master/README
gopkg.in/natefinch/master/README.md
gopkg.in/yaml.v2/master/NOTICE
gopkg.in/yaml.v2/master/NOTICE.txt
gopkg.in/yaml.v2/master/README
gopkg.in/yaml.v2/master/README.md
k8s.io/heapster/master/NOTICE
k8s.io/heapster/master/NOTICE.txt
k8s.io/heapster/master/README
k8s.io/heapster/master/README.md
speter.net/go/master/NOTICE
speter.net/go/master/NOTICE.txt
speter.net/go/master/README
speter.net/go/master/README.md
gopkg.in/gcfg.v1/master/NOTICE
gopkg.in/gcfg.v1/master/NOTICE.txt
gopkg.in/gcfg.v1/master/README
gopkg.in/gcfg.v1/master/README.md

272
Godeps/Godeps.json generated
View File

@@ -16,7 +16,7 @@
},
{
"ImportPath": "github.com/ClusterHQ/flocker-go",
"Rev": "3f33ece70f6571f0ec45bfae2f243ab11fab6c52"
"Rev": "1c0a791b33bdc01d062b376612aa04e27eed7eb3"
},
{
"ImportPath": "github.com/Sirupsen/logrus",
@@ -144,125 +144,160 @@
"Comment": "v0.1-62-g8d75e11",
"Rev": "8d75e11374a1928608c906fe745b538483e7aeb2"
},
{
"ImportPath": "github.com/coreos/etcd/auth",
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/client",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/compactor",
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/discovery",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/error",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/etcdserver",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/lease",
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/adt",
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/contention",
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/crc",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/fileutil",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/httputil",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/idutil",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/ioutil",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/logutil",
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/netutil",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/pbutil",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/runtime",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/timeutil",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"ImportPath": "github.com/coreos/etcd/pkg/schedule",
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/testutil",
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/transport",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/types",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/wait",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/raft",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/rafthttp",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/snap",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/storage",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/store",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/version",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/etcd/wal",
"Comment": "v2.2.5",
"Rev": "bc9ddf260115d2680191c46977ae72b837785472"
"Comment": "v2.3.0",
"Rev": "5e6eb7e19d6385adfabb1f1caea03e732f9348ad"
},
{
"ImportPath": "github.com/coreos/go-etcd/etcd",
@@ -271,23 +306,23 @@
},
{
"ImportPath": "github.com/coreos/go-oidc/http",
"Rev": "024cdeee09d02fb439eb55bc422e582ac115615b"
"Rev": "d7cb66526fffc811d602b6770581064f4b66b507"
},
{
"ImportPath": "github.com/coreos/go-oidc/jose",
"Rev": "024cdeee09d02fb439eb55bc422e582ac115615b"
"Rev": "d7cb66526fffc811d602b6770581064f4b66b507"
},
{
"ImportPath": "github.com/coreos/go-oidc/key",
"Rev": "024cdeee09d02fb439eb55bc422e582ac115615b"
"Rev": "d7cb66526fffc811d602b6770581064f4b66b507"
},
{
"ImportPath": "github.com/coreos/go-oidc/oauth2",
"Rev": "024cdeee09d02fb439eb55bc422e582ac115615b"
"Rev": "d7cb66526fffc811d602b6770581064f4b66b507"
},
{
"ImportPath": "github.com/coreos/go-oidc/oidc",
"Rev": "024cdeee09d02fb439eb55bc422e582ac115615b"
"Rev": "d7cb66526fffc811d602b6770581064f4b66b507"
},
{
"ImportPath": "github.com/coreos/go-semver/semver",
@@ -399,7 +434,7 @@
},
{
"ImportPath": "github.com/docker/spdystream",
"Rev": "106e140db2cb50923efe088bf2906b2ee5a45fec"
"Rev": "449fdfce4d962303d702fec724ef0ad181c92528"
},
{
"ImportPath": "github.com/elazarl/go-bindata-assetfs",
@@ -578,93 +613,93 @@
},
{
"ImportPath": "github.com/google/cadvisor/api",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.2",
"Rev": "546a3771589bdb356777c646c6eca24914fdd48b"
},
{
"ImportPath": "github.com/google/cadvisor/cache/memory",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.2",
"Rev": "546a3771589bdb356777c646c6eca24914fdd48b"
},
{
"ImportPath": "github.com/google/cadvisor/collector",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.2",
"Rev": "546a3771589bdb356777c646c6eca24914fdd48b"
},
{
"ImportPath": "github.com/google/cadvisor/container",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.2",
"Rev": "546a3771589bdb356777c646c6eca24914fdd48b"
},
{
"ImportPath": "github.com/google/cadvisor/events",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.2",
"Rev": "546a3771589bdb356777c646c6eca24914fdd48b"
},
{
"ImportPath": "github.com/google/cadvisor/fs",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.2",
"Rev": "546a3771589bdb356777c646c6eca24914fdd48b"
},
{
"ImportPath": "github.com/google/cadvisor/healthz",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.2",
"Rev": "546a3771589bdb356777c646c6eca24914fdd48b"
},
{
"ImportPath": "github.com/google/cadvisor/http",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.2",
"Rev": "546a3771589bdb356777c646c6eca24914fdd48b"
},
{
"ImportPath": "github.com/google/cadvisor/info/v1",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.2",
"Rev": "546a3771589bdb356777c646c6eca24914fdd48b"
},
{
"ImportPath": "github.com/google/cadvisor/info/v2",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.2",
"Rev": "546a3771589bdb356777c646c6eca24914fdd48b"
},
{
"ImportPath": "github.com/google/cadvisor/manager",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.2",
"Rev": "546a3771589bdb356777c646c6eca24914fdd48b"
},
{
"ImportPath": "github.com/google/cadvisor/metrics",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.2",
"Rev": "546a3771589bdb356777c646c6eca24914fdd48b"
},
{
"ImportPath": "github.com/google/cadvisor/pages",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.2",
"Rev": "546a3771589bdb356777c646c6eca24914fdd48b"
},
{
"ImportPath": "github.com/google/cadvisor/storage",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.2",
"Rev": "546a3771589bdb356777c646c6eca24914fdd48b"
},
{
"ImportPath": "github.com/google/cadvisor/summary",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.2",
"Rev": "546a3771589bdb356777c646c6eca24914fdd48b"
},
{
"ImportPath": "github.com/google/cadvisor/utils",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.2",
"Rev": "546a3771589bdb356777c646c6eca24914fdd48b"
},
{
"ImportPath": "github.com/google/cadvisor/validate",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.2",
"Rev": "546a3771589bdb356777c646c6eca24914fdd48b"
},
{
"ImportPath": "github.com/google/cadvisor/version",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.2",
"Rev": "546a3771589bdb356777c646c6eca24914fdd48b"
},
{
"ImportPath": "github.com/google/gofuzz",
@@ -682,6 +717,10 @@
"ImportPath": "github.com/hashicorp/go-msgpack/codec",
"Rev": "fa3f63826f7c23912c15263591e65d54d080b458"
},
{
"ImportPath": "github.com/hashicorp/golang-lru",
"Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4"
},
{
"ImportPath": "github.com/hashicorp/raft",
"Rev": "057b893fd996696719e98b6c44649ea14968c811"
@@ -690,6 +729,11 @@
"ImportPath": "github.com/hashicorp/raft-boltdb",
"Rev": "d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee"
},
{
"ImportPath": "github.com/hawkular/hawkular-client-go/metrics",
"Comment": "v0.5.1-1-g1d46ce7",
"Rev": "1d46ce7e1eca635f372357a8ccbf1fa7cc28b7d2"
},
{
"ImportPath": "github.com/imdario/mergo",
"Comment": "0.1.3-8-g6633656",
@@ -757,43 +801,43 @@
},
{
"ImportPath": "github.com/mesos/mesos-go/auth",
"Comment": "before-0.26-protos-14-g4a7554a",
"Rev": "4a7554aad396c70d19c9fc3469980547c9f117ae"
"Comment": "before-0.26-protos-33-g45c8b08",
"Rev": "45c8b08e9af666add36a6f93ff8c1c75812367b0"
},
{
"ImportPath": "github.com/mesos/mesos-go/detector",
"Comment": "before-0.26-protos-14-g4a7554a",
"Rev": "4a7554aad396c70d19c9fc3469980547c9f117ae"
"Comment": "before-0.26-protos-33-g45c8b08",
"Rev": "45c8b08e9af666add36a6f93ff8c1c75812367b0"
},
{
"ImportPath": "github.com/mesos/mesos-go/executor",
"Comment": "before-0.26-protos-14-g4a7554a",
"Rev": "4a7554aad396c70d19c9fc3469980547c9f117ae"
"Comment": "before-0.26-protos-33-g45c8b08",
"Rev": "45c8b08e9af666add36a6f93ff8c1c75812367b0"
},
{
"ImportPath": "github.com/mesos/mesos-go/mesosproto",
"Comment": "before-0.26-protos-14-g4a7554a",
"Rev": "4a7554aad396c70d19c9fc3469980547c9f117ae"
"Comment": "before-0.26-protos-33-g45c8b08",
"Rev": "45c8b08e9af666add36a6f93ff8c1c75812367b0"
},
{
"ImportPath": "github.com/mesos/mesos-go/mesosutil",
"Comment": "before-0.26-protos-14-g4a7554a",
"Rev": "4a7554aad396c70d19c9fc3469980547c9f117ae"
"Comment": "before-0.26-protos-33-g45c8b08",
"Rev": "45c8b08e9af666add36a6f93ff8c1c75812367b0"
},
{
"ImportPath": "github.com/mesos/mesos-go/messenger",
"Comment": "before-0.26-protos-14-g4a7554a",
"Rev": "4a7554aad396c70d19c9fc3469980547c9f117ae"
"Comment": "before-0.26-protos-33-g45c8b08",
"Rev": "45c8b08e9af666add36a6f93ff8c1c75812367b0"
},
{
"ImportPath": "github.com/mesos/mesos-go/scheduler",
"Comment": "before-0.26-protos-14-g4a7554a",
"Rev": "4a7554aad396c70d19c9fc3469980547c9f117ae"
"Comment": "before-0.26-protos-33-g45c8b08",
"Rev": "45c8b08e9af666add36a6f93ff8c1c75812367b0"
},
{
"ImportPath": "github.com/mesos/mesos-go/upid",
"Comment": "before-0.26-protos-14-g4a7554a",
"Rev": "4a7554aad396c70d19c9fc3469980547c9f117ae"
"Comment": "before-0.26-protos-33-g45c8b08",
"Rev": "45c8b08e9af666add36a6f93ff8c1c75812367b0"
},
{
"ImportPath": "github.com/miekg/dns",
@@ -864,8 +908,8 @@
},
{
"ImportPath": "github.com/rackspace/gophercloud",
"Comment": "v1.0.0-665-gf928634",
"Rev": "f92863476c034f851073599c09d90cd61ee95b3d"
"Comment": "v1.0.0-842-g8992d74",
"Rev": "8992d7483a06748dea706e4716d042a4a9e73918"
},
{
"ImportPath": "github.com/russross/blackfriday",
@@ -876,10 +920,6 @@
"ImportPath": "github.com/samuel/go-zookeeper/zk",
"Rev": "177002e16a0061912f02377e2dd8951a8b3551bc"
},
{
"ImportPath": "github.com/scalingdata/gcfg",
"Rev": "37aabad69cfd3d20b8390d902a8b10e245c615ff"
},
{
"ImportPath": "github.com/seccomp/libseccomp-golang",
"Rev": "1b506fc7c24eec5a3693cdcbed40d9c226cfc6a1"
@@ -1029,6 +1069,10 @@
"ImportPath": "google.golang.org/grpc",
"Rev": "933601d8cd6418a8a891bd9075a7161b0a67badb"
},
{
"ImportPath": "gopkg.in/gcfg.v1",
"Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e"
},
{
"ImportPath": "gopkg.in/natefinch/lumberjack.v2",
"Comment": "v1.0-16-g20b71e5",

36054
Godeps/LICENSES generated Normal file

File diff suppressed because it is too large Load Diff

110
Godeps/LICENSES.md generated
View File

@@ -1,110 +0,0 @@
Dependency Licenses
-------------------
Package | License
------- | -------
bitbucket.org/bertimus9/systemstat | MITname
bitbucket.org/ww/goautoneg | spdxBSD3
github.com/abbot/go-http-auth | Apache-2
github.com/appc/cni | Apache-2
github.com/appc/spec | Apache-2
github.com/armon/go-metrics | MITname
github.com/aws/aws-sdk-go | Apache-2
github.com/beorn7/perks/quantile | MIT?
github.com/blang/semver | MITname
github.com/boltdb/bolt | MITname
github.com/camlistore/go4 | Apache-2
github.com/ClusterHQ/flocker-go | UNKNOWN
github.com/codegangsta/negroni | MITname
github.com/coreos/etcd | Apache-2
github.com/coreos/go-etcd | Apache-2
github.com/coreos/go-oidc | Apache-2
github.com/coreos/go-semver | Apache-2
github.com/coreos/go-systemd | Apache-2
github.com/coreos/pkg | Apache-2
github.com/coreos/rkt | MITname
github.com/cpuguy83/go-md2man | MITname
github.com/davecgh/go-spew | MIToldwithoutSellandNoDocumentationRequi
github.com/daviddengcn/go-colortext | BSD?
github.com/dgrijalva/jwt-go | spdxMIT
github.com/docker/docker | Apache-2
github.com/docker/docker/pkg/symlink | spdxBSD3
github.com/docker/go-units | Apache-2
github.com/docker/spdystream | SeeFile
github.com/elazarl/go-bindata-assetfs | spdxBSD2
github.com/elazarl/goproxy | BSDWarr
github.com/emicklei/go-restful | MITname
github.com/evanphx/json-patch | BSDWarr
github.com/fsouza/go-dockerclient | spdxBSD2
github.com/garyburd/redigo/internal | ApachesPermLim
github.com/garyburd/redigo/redis | ApachesPermLim
github.com/ghodss/yaml | MITname
github.com/go-ini/ini | Apache-2
github.com/godbus/dbus | spdxBSD2
github.com/gogo/protobuf | spdxBSD3
github.com/golang/glog | Apache-2
github.com/golang/groupcache | Apache-2
github.com/golang/protobuf | spdxBSD3
github.com/google/btree | Apache-2
github.com/google/cadvisor | Apache-2
github.com/google/gofuzz | Apache-2
github.com/gorilla/context | spdxBSD3
github.com/gorilla/mux | spdxBSD3
github.com/hashicorp/go-msgpack | spdxBSD3
github.com/hashicorp/raft | IntelPart08
github.com/hashicorp/raft-boltdb | IntelPart08
github.com/imdario/mergo | spdxBSD3
github.com/inconshreveable/mousetrap | Apache-2
github.com/influxdb/influxdb | MITname
github.com/jmespath/go-jmespath | Apache-2
github.com/jonboulle/clockwork | Apache-2
github.com/juju/ratelimit | LesserExceptionGPLVer3-TOOLONG
github.com/kardianos/osext | spdxBSD3
github.com/kr/pty | spdxMIT
github.com/matttproud/golang_protobuf_extensions | Apache-2
github.com/mesos/mesos-go | Apache-2
github.com/miekg/dns | spdxBSD3
github.com/mistifyio/go-zfs | Apache-2
github.com/mitchellh/mapstructure | MITname
github.com/mvdan/xurls | spdxBSD3
github.com/mxk/go-flowrate | spdxBSD3
github.com/onsi/ginkgo | spdxMIT
github.com/onsi/gomega | spdxMIT
github.com/opencontainers/runc | Apache-2
github.com/pborman/uuid | spdxBSD3
github.com/pmezard/go-difflib | BSD3
github.com/prometheus/client_golang | Apache-2
github.com/prometheus/client_model | Apache-2
github.com/prometheus/common/expfmt | Apache-2
github.com/prometheus/common/model | Apache-2
github.com/prometheus/procfs | Apache-2
github.com/rackspace/gophercloud | Apache-2
github.com/russross/blackfriday | AsIsVariant2-TOOLONG
github.com/samuel/go-zookeeper | spdxBSD3
github.com/scalingdata/gcfg | spdxBSD2
github.com/seccomp/libseccomp-golang | AllRights-TOOLONG
github.com/shurcooL/sanitized_anchor_name | MIT?
github.com/Sirupsen/logrus | MITname
github.com/skynetservices/skydns | MITname
github.com/spf13/cobra | Apache-2
github.com/spf13/pflag | spdxBSD3
github.com/stretchr/objx | MIT?
github.com/stretchr/testify | spdxMIT
github.com/syndtr/gocapability | spdxBSD2
github.com/ugorji/go | MITname
github.com/vishvananda/netlink | Apache-2
github.com/xiang90/probing | MITname
github.com/xyproto/simpleredis | MITname
golang.org/x/crypto | spdxBSD3
golang.org/x/exp | spdxBSD3
golang.org/x/net | spdxBSD3
golang.org/x/oauth2 | spdxBSD3
golang.org/x/sys | spdxBSD3
golang.org/x/tools | spdxBSD3
google.golang.org/api | spdxBSD3
google.golang.org/cloud | Apache-2
google.golang.org/grpc | spdxBSD3
gopkg.in/natefinch/lumberjack.v2 | MITname
gopkg.in/yaml.v2 | LesserExceptionGPLVer3-TOOLONG
k8s.io/heapster | Apache-2
speter.net/go/exp/math/dec/inf | spdxBSD2

6
Godeps/OWNERS generated Normal file
View File

@@ -0,0 +1,6 @@
assignees:
- davidopp
- eparis
- lavalamp
- quinton-hoole
- thockin

View File

@@ -0,0 +1,190 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2014-2016 ClusterHQ
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -121,6 +121,7 @@ func (c Client) getURL(path string) string {
}
type configurationPayload struct {
Deleted bool `json:"deleted"`
Primary string `json:"primary"`
DatasetID string `json:"dataset_id,omitempty"`
MaximumSize json.Number `json:"maximum_size,omitempty"`
@@ -313,7 +314,7 @@ func (c Client) GetDatasetID(metaName string) (datasetID string, err error) {
var configurations []configurationPayload
if err = json.NewDecoder(resp.Body).Decode(&configurations); err == nil {
for _, c := range configurations {
if c.Metadata.Name == metaName {
if c.Metadata.Name == metaName && c.Deleted == false {
return c.DatasetID, nil
}
}

View File

@@ -0,0 +1,66 @@
// Copyright 2016 Nippon Telegraph and Telephone Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package auth
import (
"github.com/coreos/etcd/storage/backend"
"github.com/coreos/pkg/capnslog"
)
type backendGetter interface {
Backend() backend.Backend
}
var (
enableFlagKey = []byte("authEnabled")
authBucketName = []byte("auth")
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "auth")
)
type AuthStore interface {
// AuthEnable() turns on the authentication feature
AuthEnable()
}
type authStore struct {
bgetter backendGetter
}
func (as *authStore) AuthEnable() {
value := []byte{1}
b := as.bgetter.Backend()
tx := b.BatchTx()
tx.Lock()
tx.UnsafePut(authBucketName, enableFlagKey, value)
tx.Unlock()
b.ForceCommit()
plog.Noticef("Authentication enabled")
}
func NewAuthStore(bgetter backendGetter) *authStore {
b := bgetter.Backend()
tx := b.BatchTx()
tx.Lock()
tx.UnsafeCreateBucket(authBucketName)
tx.Unlock()
b.ForceCommit()
return &authStore{
bgetter: bgetter,
}
}

View File

@@ -35,9 +35,25 @@ func main() {
log.Fatal(err)
}
kapi := client.NewKeysAPI(c)
resp, err := kapi.Set(context.Background(), "foo", "bar", nil)
// set "/foo" key with "bar" value
log.Print("Setting '/foo' key with 'bar' value")
resp, err := kapi.Set(context.Background(), "/foo", "bar", nil)
if err != nil {
log.Fatal(err)
} else {
// print common key info
log.Printf("Set is done. Metadata is %q\n", resp)
}
// get "/foo" key's value
log.Print("Getting '/foo' key value")
resp, err = kapi.Get(context.Background(), "/foo", nil)
if err != nil {
log.Fatal(err)
} else {
// print common key info
log.Printf("Get is done. Metadata is %q\n", resp)
// print value
log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value)
}
}
```
@@ -61,7 +77,7 @@ If the response gets from the cluster is invalid, a plain string error will be r
Here is the example code to handle client errors:
```go
cfg := client.Config{Endpoints: []string{"http://etcd1:2379,http://etcd2:2379,http://etcd3:2379"}}
cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}}
c, err := client.New(cfg)
if err != nil {
log.Fatal(err)

View File

@@ -56,22 +56,22 @@ func NewAuthRoleAPI(c Client) AuthRoleAPI {
}
type AuthRoleAPI interface {
// Add a role.
// AddRole adds a role.
AddRole(ctx context.Context, role string) error
// Remove a role.
// RemoveRole removes a role.
RemoveRole(ctx context.Context, role string) error
// Get role details.
// GetRole retrieves role details.
GetRole(ctx context.Context, role string) (*Role, error)
// Grant a role some permission prefixes for the KV store.
// GrantRoleKV grants a role some permission prefixes for the KV store.
GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
// Revoke some some permission prefixes for a role on the KV store.
// RevokeRoleKV revokes some permission prefixes for a role on the KV store.
RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
// List roles.
// ListRoles lists roles.
ListRoles(ctx context.Context) ([]string, error)
}
@@ -115,17 +115,20 @@ func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) {
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
return nil, err
}
var userList struct {
Roles []string `json:"roles"`
var roleList struct {
Roles []Role `json:"roles"`
}
err = json.Unmarshal(body, &userList)
if err != nil {
if err = json.Unmarshal(body, &roleList); err != nil {
return nil, err
}
return userList.Roles, nil
ret := make([]string, 0, len(roleList.Roles))
for _, r := range roleList.Roles {
ret = append(ret, r.Role)
}
return ret, nil
}
func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error {
@@ -218,17 +221,16 @@ func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
var sec authError
err := json.Unmarshal(body, &sec)
err = json.Unmarshal(body, &sec)
if err != nil {
return nil, err
}
return nil, sec
}
var role Role
err = json.Unmarshal(body, &role)
if err != nil {
if err = json.Unmarshal(body, &role); err != nil {
return nil, err
}
return &role, nil

View File

@@ -36,6 +36,15 @@ type User struct {
Revoke []string `json:"revoke,omitempty"`
}
type UserRoles struct {
User string `json:"user"`
Roles []Role `json:"roles"`
}
type userName struct {
User string `json:"user"`
}
func v2AuthURL(ep url.URL, action string, name string) *url.URL {
if name != "" {
ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name)
@@ -78,9 +87,9 @@ func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error {
if err != nil {
return err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
var sec authError
err := json.Unmarshal(body, &sec)
err = json.Unmarshal(body, &sec)
if err != nil {
return err
}
@@ -117,25 +126,25 @@ func NewAuthUserAPI(c Client) AuthUserAPI {
}
type AuthUserAPI interface {
// Add a user.
// AddUser adds a user.
AddUser(ctx context.Context, username string, password string) error
// Remove a user.
// RemoveUser removes a user.
RemoveUser(ctx context.Context, username string) error
// Get user details.
// GetUser retrieves user details.
GetUser(ctx context.Context, username string) (*User, error)
// Grant a user some permission roles.
// GrantUser grants a user some permission roles.
GrantUser(ctx context.Context, username string, roles []string) (*User, error)
// Revoke some permission roles from a user.
// RevokeUser revokes some permission roles from a user.
RevokeUser(ctx context.Context, username string, roles []string) (*User, error)
// Change the user's password.
// ChangePassword changes the user's password.
ChangePassword(ctx context.Context, username string, password string) (*User, error)
// List users.
// ListUsers lists the users.
ListUsers(ctx context.Context) ([]string, error)
}
@@ -179,22 +188,28 @@ func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) {
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
var sec authError
err := json.Unmarshal(body, &sec)
err = json.Unmarshal(body, &sec)
if err != nil {
return nil, err
}
return nil, sec
}
var userList struct {
Users []string `json:"users"`
Users []User `json:"users"`
}
err = json.Unmarshal(body, &userList)
if err != nil {
if err = json.Unmarshal(body, &userList); err != nil {
return nil, err
}
return userList.Users, nil
ret := make([]string, 0, len(userList.Users))
for _, u := range userList.Users {
ret = append(ret, u.User)
}
return ret, nil
}
func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error {
@@ -221,9 +236,9 @@ func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAct
if err != nil {
return err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
var sec authError
err := json.Unmarshal(body, &sec)
err = json.Unmarshal(body, &sec)
if err != nil {
return err
}
@@ -280,18 +295,24 @@ func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
var sec authError
err := json.Unmarshal(body, &sec)
err = json.Unmarshal(body, &sec)
if err != nil {
return nil, err
}
return nil, sec
}
var user User
err = json.Unmarshal(body, &user)
if err != nil {
if err = json.Unmarshal(body, &user); err != nil {
var userR UserRoles
if urerr := json.Unmarshal(body, &userR); urerr != nil {
return nil, err
}
user.User = userR.User
for _, r := range userR.Roles {
user.Roles = append(user.Roles, r.Role)
}
}
return &user, nil
}

View File

@@ -35,6 +35,7 @@ var (
ErrNoEndpoints = errors.New("client: no endpoints available")
ErrTooManyRedirects = errors.New("client: too many redirects")
ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
errTooManyRedirectChecks = errors.New("client: too many redirect checks")
)
@@ -49,6 +50,29 @@ var DefaultTransport CancelableTransport = &http.Transport{
TLSHandshakeTimeout: 10 * time.Second,
}
type EndpointSelectionMode int
const (
// EndpointSelectionRandom is the default value of the 'SelectionMode'.
// As the name implies, the client object will pick a node from the members
// of the cluster in a random fashion. If the cluster has three members, A, B,
// and C, the client picks any node from its three members as its request
// destination.
EndpointSelectionRandom EndpointSelectionMode = iota
// If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader',
// requests are sent directly to the cluster leader. This reduces
// forwarding roundtrips compared to making requests to etcd followers
// who then forward them to the cluster leader. In the event of a leader
// failure, however, clients configured this way cannot prioritize among
// the remaining etcd followers. Therefore, when a client sets 'SelectionMode'
// to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to
// maintain its knowledge of current cluster state.
//
// This mode should be used with Client.AutoSync().
EndpointSelectionPrioritizeLeader
)
type Config struct {
// Endpoints defines a set of URLs (schemes, hosts and ports only)
// that can be used to communicate with a logical etcd cluster. For
@@ -74,7 +98,7 @@ type Config struct {
// CheckRedirect specifies the policy for handling HTTP redirects.
// If CheckRedirect is not nil, the Client calls it before
// following an HTTP redirect. The sole argument is the number of
// requests that have alrady been made. If CheckRedirect returns
// requests that have already been made. If CheckRedirect returns
// an error, Client.Do will not make any further requests and return
// the error back it to the caller.
//
@@ -107,6 +131,10 @@ type Config struct {
//
// A HeaderTimeoutPerRequest of zero means no timeout.
HeaderTimeoutPerRequest time.Duration
// SelectionMode is an EndpointSelectionMode enum that specifies the
// policy for choosing the etcd cluster node to which requests are sent.
SelectionMode EndpointSelectionMode
}
func (cfg *Config) transport() CancelableTransport {
@@ -177,6 +205,7 @@ func New(cfg Config) (Client, error) {
c := &httpClusterClient{
clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest),
rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
selectionMode: cfg.SelectionMode,
}
if cfg.Username != "" {
c.credentials = &credentials{
@@ -225,6 +254,17 @@ type httpClusterClient struct {
credentials *credentials
sync.RWMutex
rand *rand.Rand
selectionMode EndpointSelectionMode
}
func (c *httpClusterClient) getLeaderEndpoint() (string, error) {
mAPI := NewMembersAPI(c)
leader, err := mAPI.Leader(context.Background())
if err != nil {
return "", err
}
return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
}
func (c *httpClusterClient) SetEndpoints(eps []string) error {
@@ -241,9 +281,28 @@ func (c *httpClusterClient) SetEndpoints(eps []string) error {
neps[i] = *u
}
switch c.selectionMode {
case EndpointSelectionRandom:
c.endpoints = shuffleEndpoints(c.rand, neps)
// TODO: pin old endpoint if possible, and rebalance when new endpoint appears
c.pinned = 0
case EndpointSelectionPrioritizeLeader:
c.endpoints = neps
lep, err := c.getLeaderEndpoint()
if err != nil {
return ErrNoLeaderEndpoint
}
for i := range c.endpoints {
if c.endpoints[i].String() == lep {
c.pinned = i
break
}
}
// If endpoints doesn't have the lu, just keep c.pinned = 0.
// Forwarding between follower and leader would be required but it works.
default:
return errors.New(fmt.Sprintf("invalid endpoint selection mode: %d", c.selectionMode))
}
return nil
}

View File

@@ -16,6 +16,6 @@ package client
// Discoverer is an interface that wraps the Discover method.
type Discoverer interface {
// Dicover looks up the etcd servers for the domain.
// Discover looks up the etcd servers for the domain.
Discover(domain string) ([]string, error)
}

View File

@@ -106,7 +106,7 @@ type KeysAPI interface {
// Set assigns a new value to a Node identified by a given key. The caller
// may define a set of conditions in the SetOptions. If SetOptions.Dir=true
// than value is ignored.
// then value is ignored.
Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error)
// Delete removes a Node identified by the given key, optionally destroying
@@ -184,6 +184,11 @@ type SetOptions struct {
// a TTL of 0.
TTL time.Duration
// Refresh set to true means a TTL value can be updated
// without firing a watch or changing the node value. A
// value must not be provided when refreshing a key.
Refresh bool
// Dir specifies whether or not this Node should be created as a directory.
Dir bool
}
@@ -234,7 +239,7 @@ type DeleteOptions struct {
type Watcher interface {
// Next blocks until an etcd event occurs, then returns a Response
// represeting that event. The behavior of Next depends on the
// representing that event. The behavior of Next depends on the
// WatcherOptions used to construct the Watcher. Next is designed to
// be called repeatedly, each time blocking until a subsequent event
// is available.
@@ -306,6 +311,7 @@ func (n *Node) TTLDuration() time.Duration {
type Nodes []*Node
// interfaces for sorting
func (ns Nodes) Len() int { return len(ns) }
func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key }
func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
@@ -327,6 +333,7 @@ func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions
act.PrevIndex = opts.PrevIndex
act.PrevExist = opts.PrevExist
act.TTL = opts.TTL
act.Refresh = opts.Refresh
act.Dir = opts.Dir
}
@@ -518,6 +525,7 @@ type setAction struct {
PrevIndex uint64
PrevExist PrevExistType
TTL time.Duration
Refresh bool
Dir bool
}
@@ -549,6 +557,10 @@ func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
}
if a.Refresh {
form.Add("refresh", "true")
}
u.RawQuery = params.Encode()
body := strings.NewReader(form.Encode())

View File

@@ -29,6 +29,7 @@ import (
var (
defaultV2MembersPrefix = "/v2/members"
defaultLeaderSuffix = "/leader"
)
type Member struct {
@@ -105,6 +106,9 @@ type MembersAPI interface {
// Update instructs etcd to update an existing Member in the cluster.
Update(ctx context.Context, mID string, peerURLs []string) error
// Leader gets current leader of the cluster
Leader(ctx context.Context) (*Member, error)
}
type httpMembersAPI struct {
@@ -199,6 +203,25 @@ func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error {
return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone)
}
func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) {
req := &membersAPIActionLeader{}
resp, body, err := m.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
return nil, err
}
var leader Member
if err := json.Unmarshal(body, &leader); err != nil {
return nil, err
}
return &leader, nil
}
type membersAPIActionList struct{}
func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request {
@@ -255,6 +278,15 @@ func assertStatusCode(got int, want ...int) (err error) {
return fmt.Errorf("unexpected status code %d", got)
}
type membersAPIActionLeader struct{}
func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
u.Path = path.Join(u.Path, defaultLeaderSuffix)
req, _ := http.NewRequest("GET", u.String(), nil)
return req
}
// v2MembersURL add the necessary path to the provided endpoint
// to route requests to the default v2 members API.
func v2MembersURL(ep url.URL) *url.URL {

View File

@@ -27,7 +27,7 @@ var (
type srvDiscover struct{}
// NewSRVDiscover constructs a new Dicoverer that uses the stdlib to lookup SRV records.
// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
func NewSRVDiscover() Discoverer {
return &srvDiscover{}
}
@@ -50,8 +50,8 @@ func (d *srvDiscover) Discover(domain string) ([]string, error) {
return nil
}
errHTTPS := updateURLs("etcd-server-ssl", "https")
errHTTP := updateURLs("etcd-server", "http")
errHTTPS := updateURLs("etcd-client-ssl", "https")
errHTTP := updateURLs("etcd-client", "http")
if errHTTPS != nil && errHTTP != nil {
return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP)

View File

@@ -0,0 +1,23 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
func IsKeyNotFound(err error) bool {
if cErr, ok := err.(Error); ok {
return cErr.Code == ErrorCodeKeyNotFound
}
return false
}

View File

@@ -0,0 +1,133 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compactor
import (
"sync"
"time"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/storage"
"github.com/coreos/pkg/capnslog"
"github.com/jonboulle/clockwork"
"golang.org/x/net/context"
)
var (
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver")
)
const (
checkCompactionInterval = 5 * time.Minute
)
type Compactable interface {
Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error)
}
type RevGetter interface {
Rev() int64
}
type Periodic struct {
clock clockwork.Clock
periodInHour int
rg RevGetter
c Compactable
revs []int64
ctx context.Context
cancel context.CancelFunc
mu sync.Mutex
paused bool
}
func NewPeriodic(h int, rg RevGetter, c Compactable) *Periodic {
return &Periodic{
clock: clockwork.NewRealClock(),
periodInHour: h,
rg: rg,
c: c,
}
}
func (t *Periodic) Run() {
t.ctx, t.cancel = context.WithCancel(context.Background())
t.revs = make([]int64, 0)
clock := t.clock
go func() {
last := clock.Now()
for {
t.revs = append(t.revs, t.rg.Rev())
select {
case <-t.ctx.Done():
return
case <-clock.After(checkCompactionInterval):
t.mu.Lock()
p := t.paused
t.mu.Unlock()
if p {
continue
}
}
if clock.Now().Sub(last) < time.Duration(t.periodInHour)*time.Hour {
continue
}
rev := t.getRev(t.periodInHour)
if rev < 0 {
continue
}
plog.Noticef("Starting auto-compaction at revision %d", rev)
_, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev})
if err == nil || err == storage.ErrCompacted {
t.revs = make([]int64, 0)
last = clock.Now()
plog.Noticef("Finished auto-compaction at revision %d", rev)
} else {
plog.Noticef("Failed auto-compaction at revision %d (%v)", err, rev)
plog.Noticef("Retry after %v", checkCompactionInterval)
}
}
}()
}
func (t *Periodic) Stop() {
t.cancel()
}
func (t *Periodic) Pause() {
t.mu.Lock()
defer t.mu.Unlock()
t.paused = true
}
func (t *Periodic) Resume() {
t.mu.Lock()
defer t.mu.Unlock()
t.paused = false
}
func (t *Periodic) getRev(h int) int64 {
i := len(t.revs) - int(time.Duration(h)*time.Hour/checkCompactionInterval)
if i < 0 {
return -1
}
return t.revs[i]
}

View File

@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package discovery provides an implementation of the cluster discovery that
// is used by etcd.
package discovery
import (

View File

@@ -28,6 +28,7 @@ var (
resolveTCPAddr = net.ResolveTCPAddr
)
// SRVGetCluster gets the cluster information via DNS discovery.
// TODO(barakmich): Currently ignores priority and weight (as they don't make as much sense for a bootstrap)
// Also doesn't do any lookups for the token (though it could)
// Also sees each entry as a separate instance.

View File

@@ -12,9 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// error package describes errors in etcd project.
// When any change happens, Documentation/errorcode.md needs to be updated
// correspondingly.
// error package describes errors in etcd project. When any change happens,
// Documentation/errorcode.md needs to be updated correspondingly.
package error
import (
@@ -49,6 +48,8 @@ var errors = map[int]string{
ecodeIndexValueMutex: "Index and value cannot both be specified",
EcodeInvalidField: "Invalid field",
EcodeInvalidForm: "Invalid POST form",
EcodeRefreshValue: "Value provided on refresh",
EcodeRefreshTTLRequired: "A TTL must be provided on refresh",
// raft related errors
EcodeRaftInternal: "Raft Internal Error",
@@ -100,6 +101,8 @@ const (
ecodeIndexValueMutex = 208
EcodeInvalidField = 209
EcodeInvalidForm = 210
EcodeRefreshValue = 211
EcodeRefreshTTLRequired = 212
EcodeRaftInternal = 300
EcodeLeaderElect = 301
@@ -133,7 +136,7 @@ func NewError(errorCode int, cause string, index uint64) *Error {
}
}
// Only for error interface
// Error is for the error interface
func (e Error) Error() string {
return e.Message + " (" + e.Cause + ")"
}
@@ -143,7 +146,7 @@ func (e Error) toJsonString() string {
return string(b)
}
func (e Error) statusCode() int {
func (e Error) StatusCode() int {
status, ok := errorStatus[e.ErrorCode]
if !ok {
status = http.StatusBadRequest
@@ -154,6 +157,6 @@ func (e Error) statusCode() int {
func (e Error) WriteTo(w http.ResponseWriter) {
w.Header().Add("X-Etcd-Index", fmt.Sprint(e.Index))
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(e.statusCode())
w.WriteHeader(e.StatusCode())
fmt.Fprintln(w, e.toJsonString())
}

View File

@@ -0,0 +1,98 @@
// Copyright 2016 Nippon Telegraph and Telephone Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3rpc
import (
"github.com/coreos/etcd/etcdserver"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
)
type AuthServer struct {
authenticator etcdserver.Authenticator
}
func NewAuthServer(s *etcdserver.EtcdServer) *AuthServer {
return &AuthServer{authenticator: s}
}
func (as *AuthServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
return as.authenticator.AuthEnable(ctx, r)
}
func (as *AuthServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
plog.Info("not implemented yet")
return nil, nil
}
func (as *AuthServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
plog.Info("not implemented yet")
return nil, nil
}
func (as *AuthServer) RoleAdd(ctx context.Context, r *pb.RoleAddRequest) (*pb.RoleAddResponse, error) {
plog.Info("not implemented yet")
return nil, nil
}
func (as *AuthServer) RoleDelete(ctx context.Context, r *pb.RoleDeleteRequest) (*pb.RoleDeleteResponse, error) {
plog.Info("not implemented yet")
return nil, nil
}
func (as *AuthServer) RoleGet(ctx context.Context, r *pb.RoleGetRequest) (*pb.RoleGetResponse, error) {
plog.Info("not implemented yet")
return nil, nil
}
func (as *AuthServer) RoleRevoke(ctx context.Context, r *pb.RoleRevokeRequest) (*pb.RoleRevokeResponse, error) {
plog.Info("not implemented yet")
return nil, nil
}
func (as *AuthServer) RoleGrant(ctx context.Context, r *pb.RoleGrantRequest) (*pb.RoleGrantResponse, error) {
plog.Info("not implemented yet")
return nil, nil
}
func (as *AuthServer) UserAdd(ctx context.Context, r *pb.UserAddRequest) (*pb.UserAddResponse, error) {
plog.Info("not implemented yet")
return nil, nil
}
func (as *AuthServer) UserDelete(ctx context.Context, r *pb.UserDeleteRequest) (*pb.UserDeleteResponse, error) {
plog.Info("not implemented yet")
return nil, nil
}
func (as *AuthServer) UserGet(ctx context.Context, r *pb.UserGetRequest) (*pb.UserGetResponse, error) {
plog.Info("not implemented yet")
return nil, nil
}
func (as *AuthServer) UserGrant(ctx context.Context, r *pb.UserGrantRequest) (*pb.UserGrantResponse, error) {
plog.Info("not implemented yet")
return nil, nil
}
func (as *AuthServer) UserRevoke(ctx context.Context, r *pb.UserRevokeRequest) (*pb.UserRevokeResponse, error) {
plog.Info("not implemented yet")
return nil, nil
}
func (as *AuthServer) UserChangePassword(ctx context.Context, r *pb.UserChangePasswordRequest) (*pb.UserChangePasswordResponse, error) {
plog.Info("not implemented yet")
return nil, nil
}

View File

@@ -0,0 +1,42 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3rpc
import (
"github.com/coreos/etcd/etcdserver"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/pkg/transport"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
func Server(s *etcdserver.EtcdServer, tls *transport.TLSInfo) (*grpc.Server, error) {
var opts []grpc.ServerOption
if tls != nil {
creds, err := credentials.NewServerTLSFromFile(tls.CertFile, tls.KeyFile)
if err != nil {
return nil, err
}
opts = append(opts, grpc.Creds(creds))
}
grpcServer := grpc.NewServer(opts...)
pb.RegisterKVServer(grpcServer, NewKVServer(s))
pb.RegisterWatchServer(grpcServer, NewWatchServer(s))
pb.RegisterLeaseServer(grpcServer, NewLeaseServer(s))
pb.RegisterClusterServer(grpcServer, NewClusterServer(s))
pb.RegisterAuthServer(grpcServer, NewAuthServer(s))
pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s))
return grpcServer, nil
}

View File

@@ -12,42 +12,292 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package v3rpc implements etcd v3 RPC system based on gRPC.
package v3rpc
import (
"sort"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/lease"
"github.com/coreos/etcd/storage"
"github.com/coreos/pkg/capnslog"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
type handler struct {
server etcdserver.V3DemoServer
var (
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver/api", "v3rpc")
// Max operations per txn list. For example, Txn.Success can have at most 128 operations,
// and Txn.Failure can have at most 128 operations.
MaxOpsPerTxn = 128
)
type kvServer struct {
clusterID int64
memberID int64
raftTimer etcdserver.RaftTimer
kv etcdserver.RaftKV
}
func New(s etcdserver.V3DemoServer) pb.EtcdServer {
return &handler{s}
func NewKVServer(s *etcdserver.EtcdServer) pb.KVServer {
return &kvServer{
clusterID: int64(s.Cluster().ID()),
memberID: int64(s.ID()),
raftTimer: s,
kv: s,
}
}
func (h *handler) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
resp := h.server.V3DemoDo(ctx, pb.InternalRaftRequest{Range: r})
return resp.(*pb.RangeResponse), nil
func (s *kvServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
if err := checkRangeRequest(r); err != nil {
return nil, err
}
func (h *handler) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
resp := h.server.V3DemoDo(ctx, pb.InternalRaftRequest{Put: r})
return resp.(*pb.PutResponse), nil
resp, err := s.kv.Range(ctx, r)
if err != nil {
return nil, togRPCError(err)
}
func (h *handler) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
resp := h.server.V3DemoDo(ctx, pb.InternalRaftRequest{DeleteRange: r})
return resp.(*pb.DeleteRangeResponse), nil
if resp.Header == nil {
plog.Panic("unexpected nil resp.Header")
}
s.fillInHeader(resp.Header)
return resp, err
}
func (h *handler) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
resp := h.server.V3DemoDo(ctx, pb.InternalRaftRequest{Txn: r})
return resp.(*pb.TxnResponse), nil
func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
if err := checkPutRequest(r); err != nil {
return nil, err
}
func (h *handler) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
panic("not implemented")
resp, err := s.kv.Put(ctx, r)
if err != nil {
return nil, togRPCError(err)
}
if resp.Header == nil {
plog.Panic("unexpected nil resp.Header")
}
s.fillInHeader(resp.Header)
return resp, err
}
func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
if err := checkDeleteRequest(r); err != nil {
return nil, err
}
resp, err := s.kv.DeleteRange(ctx, r)
if err != nil {
return nil, togRPCError(err)
}
if resp.Header == nil {
plog.Panic("unexpected nil resp.Header")
}
s.fillInHeader(resp.Header)
return resp, err
}
func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
if err := checkTxnRequest(r); err != nil {
return nil, err
}
resp, err := s.kv.Txn(ctx, r)
if err != nil {
return nil, togRPCError(err)
}
if resp.Header == nil {
plog.Panic("unexpected nil resp.Header")
}
s.fillInHeader(resp.Header)
return resp, err
}
func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
resp, err := s.kv.Compact(ctx, r)
if err != nil {
return nil, togRPCError(err)
}
if resp.Header == nil {
plog.Panic("unexpected nil resp.Header")
}
s.fillInHeader(resp.Header)
return resp, nil
}
func (s *kvServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
resp, err := s.kv.Hash(ctx, r)
if err != nil {
return nil, togRPCError(err)
}
s.fillInHeader(resp.Header)
return resp, nil
}
// fillInHeader populates pb.ResponseHeader from kvServer, except Revision.
func (s *kvServer) fillInHeader(h *pb.ResponseHeader) {
h.ClusterId = uint64(s.clusterID)
h.MemberId = uint64(s.memberID)
h.RaftTerm = s.raftTimer.Term()
}
func checkRangeRequest(r *pb.RangeRequest) error {
if len(r.Key) == 0 {
return rpctypes.ErrEmptyKey
}
return nil
}
func checkPutRequest(r *pb.PutRequest) error {
if len(r.Key) == 0 {
return rpctypes.ErrEmptyKey
}
return nil
}
func checkDeleteRequest(r *pb.DeleteRangeRequest) error {
if len(r.Key) == 0 {
return rpctypes.ErrEmptyKey
}
return nil
}
func checkTxnRequest(r *pb.TxnRequest) error {
if len(r.Compare) > MaxOpsPerTxn || len(r.Success) > MaxOpsPerTxn || len(r.Failure) > MaxOpsPerTxn {
return rpctypes.ErrTooManyOps
}
for _, c := range r.Compare {
if len(c.Key) == 0 {
return rpctypes.ErrEmptyKey
}
}
for _, u := range r.Success {
if err := checkRequestUnion(u); err != nil {
return err
}
}
if err := checkRequestDupKeys(r.Success); err != nil {
return err
}
for _, u := range r.Failure {
if err := checkRequestUnion(u); err != nil {
return err
}
}
if err := checkRequestDupKeys(r.Failure); err != nil {
return err
}
return nil
}
// checkRequestDupKeys gives rpctypes.ErrDuplicateKey if the same key is modified twice
func checkRequestDupKeys(reqs []*pb.RequestUnion) error {
// check put overlap
keys := make(map[string]struct{})
for _, requ := range reqs {
tv, ok := requ.Request.(*pb.RequestUnion_RequestPut)
if !ok {
continue
}
preq := tv.RequestPut
if preq == nil {
continue
}
key := string(preq.Key)
if _, ok := keys[key]; ok {
return rpctypes.ErrDuplicateKey
}
keys[key] = struct{}{}
}
// no need to check deletes if no puts; delete overlaps are permitted
if len(keys) == 0 {
return nil
}
// sort keys for range checking
sortedKeys := []string{}
for k := range keys {
sortedKeys = append(sortedKeys, k)
}
sort.Strings(sortedKeys)
// check put overlap with deletes
for _, requ := range reqs {
tv, ok := requ.Request.(*pb.RequestUnion_RequestDeleteRange)
if !ok {
continue
}
dreq := tv.RequestDeleteRange
if dreq == nil {
continue
}
key := string(dreq.Key)
if dreq.RangeEnd == nil {
if _, found := keys[key]; found {
return rpctypes.ErrDuplicateKey
}
} else {
lo := sort.SearchStrings(sortedKeys, key)
hi := sort.SearchStrings(sortedKeys, string(dreq.RangeEnd))
if lo != hi {
// element between lo and hi => overlap
return rpctypes.ErrDuplicateKey
}
}
}
return nil
}
func checkRequestUnion(u *pb.RequestUnion) error {
// TODO: ensure only one of the field is set.
switch uv := u.Request.(type) {
case *pb.RequestUnion_RequestRange:
if uv.RequestRange != nil {
return checkRangeRequest(uv.RequestRange)
}
case *pb.RequestUnion_RequestPut:
if uv.RequestPut != nil {
return checkPutRequest(uv.RequestPut)
}
case *pb.RequestUnion_RequestDeleteRange:
if uv.RequestDeleteRange != nil {
return checkDeleteRequest(uv.RequestDeleteRange)
}
default:
// empty union
return nil
}
return nil
}
func togRPCError(err error) error {
switch err {
case storage.ErrCompacted:
return rpctypes.ErrCompacted
case storage.ErrFutureRev:
return rpctypes.ErrFutureRev
case lease.ErrLeaseNotFound:
return rpctypes.ErrLeaseNotFound
// TODO: handle error from raft and timeout
case etcdserver.ErrRequestTooLarge:
return rpctypes.ErrRequestTooLarge
default:
return grpc.Errorf(codes.Internal, err.Error())
}
}

View File

@@ -0,0 +1,76 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3rpc
import (
"io"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/lease"
"golang.org/x/net/context"
)
type LeaseServer struct {
le etcdserver.Lessor
}
func NewLeaseServer(le etcdserver.Lessor) pb.LeaseServer {
return &LeaseServer{le: le}
}
func (ls *LeaseServer) LeaseCreate(ctx context.Context, cr *pb.LeaseCreateRequest) (*pb.LeaseCreateResponse, error) {
resp, err := ls.le.LeaseCreate(ctx, cr)
if err == lease.ErrLeaseExists {
return nil, rpctypes.ErrLeaseExist
}
return resp, err
}
func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
r, err := ls.le.LeaseRevoke(ctx, rr)
if err != nil {
return nil, rpctypes.ErrLeaseNotFound
}
return r, nil
}
func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error {
for {
req, err := stream.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
ttl, err := ls.le.LeaseRenew(lease.LeaseID(req.ID))
if err == lease.ErrLeaseNotFound {
return rpctypes.ErrLeaseNotFound
}
if err != nil && err != lease.ErrLeaseNotFound {
return err
}
resp := &pb.LeaseKeepAliveResponse{ID: req.ID, TTL: ttl}
err = stream.Send(resp)
if err != nil {
return err
}
}
}

View File

@@ -0,0 +1,45 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3rpc
import (
"github.com/coreos/etcd/etcdserver"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/storage/backend"
"golang.org/x/net/context"
)
type BackendGetter interface {
Backend() backend.Backend
}
type maintenanceServer struct {
bg BackendGetter
}
func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer {
return &maintenanceServer{bg: s}
}
func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
plog.Noticef("starting to defragment the storage backend...")
err := ms.bg.Backend().Defrag()
if err != nil {
plog.Errorf("failed to deframent the storage backend (%v)", err)
return nil, err
}
plog.Noticef("finished defragmenting the storage backend")
return &pb.DefragmentResponse{}, nil
}

View File

@@ -0,0 +1,118 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3rpc
import (
"time"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/pkg/types"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
type ClusterServer struct {
cluster etcdserver.Cluster
server etcdserver.Server
raftTimer etcdserver.RaftTimer
}
func NewClusterServer(s *etcdserver.EtcdServer) *ClusterServer {
return &ClusterServer{
cluster: s.Cluster(),
server: s,
raftTimer: s,
}
}
func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) {
urls, err := types.NewURLs(r.PeerURLs)
if err != nil {
return nil, rpctypes.ErrMemberBadURLs
}
now := time.Now()
m := etcdserver.NewMember("", urls, "", &now)
err = cs.server.AddMember(ctx, *m)
switch {
case err == etcdserver.ErrIDExists:
return nil, rpctypes.ErrMemberExist
case err == etcdserver.ErrPeerURLexists:
return nil, rpctypes.ErrPeerURLExist
case err != nil:
return nil, grpc.Errorf(codes.Internal, err.Error())
}
return &pb.MemberAddResponse{
Header: cs.header(),
Member: &pb.Member{ID: uint64(m.ID), IsLeader: m.ID == cs.server.Leader(), PeerURLs: m.PeerURLs},
}, nil
}
func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) {
err := cs.server.RemoveMember(ctx, r.ID)
switch {
case err == etcdserver.ErrIDRemoved:
fallthrough
case err == etcdserver.ErrIDNotFound:
return nil, rpctypes.ErrMemberNotFound
case err != nil:
return nil, grpc.Errorf(codes.Internal, err.Error())
}
return &pb.MemberRemoveResponse{Header: cs.header()}, nil
}
func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) {
m := etcdserver.Member{
ID: types.ID(r.ID),
RaftAttributes: etcdserver.RaftAttributes{PeerURLs: r.PeerURLs},
}
err := cs.server.UpdateMember(ctx, m)
switch {
case err == etcdserver.ErrPeerURLexists:
return nil, rpctypes.ErrPeerURLExist
case err == etcdserver.ErrIDNotFound:
return nil, rpctypes.ErrMemberNotFound
case err != nil:
return nil, grpc.Errorf(codes.Internal, err.Error())
}
return &pb.MemberUpdateResponse{Header: cs.header()}, nil
}
func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) {
membs := cs.cluster.Members()
protoMembs := make([]*pb.Member, len(membs))
for i := range membs {
protoMembs[i] = &pb.Member{
Name: membs[i].Name,
ID: uint64(membs[i].ID),
IsLeader: membs[i].ID == cs.server.Leader(),
PeerURLs: membs[i].PeerURLs,
ClientURLs: membs[i].ClientURLs,
}
}
return &pb.MemberListResponse{Header: cs.header(), Members: protoMembs}, nil
}
func (cs *ClusterServer) header() *pb.ResponseHeader {
return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.raftTimer.Term()}
}

View File

@@ -0,0 +1,38 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rpctypes
import (
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
var (
ErrEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided")
ErrTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request")
ErrDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request")
ErrCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: storage: required revision has been compacted")
ErrFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: storage: required revision is a future revision")
ErrLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found")
ErrLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists")
ErrMemberExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: member ID already exist")
ErrPeerURLExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: Peer URLs already exists")
ErrMemberBadURLs = grpc.Errorf(codes.InvalidArgument, "etcdserver: given member URLs are invalid")
ErrMemberNotFound = grpc.Errorf(codes.NotFound, "etcdserver: member not found")
ErrRequestTooLarge = grpc.Errorf(codes.InvalidArgument, "etcdserver: request is too large")
)

View File

@@ -0,0 +1,265 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3rpc
import (
"io"
"time"
"github.com/coreos/etcd/etcdserver"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/storage"
"github.com/coreos/etcd/storage/storagepb"
)
type watchServer struct {
clusterID int64
memberID int64
raftTimer etcdserver.RaftTimer
watchable storage.Watchable
}
func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
return &watchServer{
clusterID: int64(s.Cluster().ID()),
memberID: int64(s.ID()),
raftTimer: s,
watchable: s.Watchable(),
}
}
var (
// expose for testing purpose. External test can change this to a
// small value to finish fast.
ProgressReportInterval = 10 * time.Minute
)
const (
// We send ctrl response inside the read loop. We do not want
// send to block read, but we still want ctrl response we sent to
// be serialized. Thus we use a buffered chan to solve the problem.
// A small buffer should be OK for most cases, since we expect the
// ctrl requests are infrequent.
ctrlStreamBufLen = 16
)
// serverWatchStream is an etcd server side stream. It receives requests
// from client side gRPC stream. It receives watch events from storage.WatchStream,
// and creates responses that forwarded to gRPC stream.
// It also forwards control message like watch created and canceled.
type serverWatchStream struct {
clusterID int64
memberID int64
raftTimer etcdserver.RaftTimer
gRPCStream pb.Watch_WatchServer
watchStream storage.WatchStream
ctrlStream chan *pb.WatchResponse
// progress tracks the watchID that stream might need to send
// progress to.
progress map[storage.WatchID]bool
// closec indicates the stream is closed.
closec chan struct{}
}
func (ws *watchServer) Watch(stream pb.Watch_WatchServer) error {
sws := serverWatchStream{
clusterID: ws.clusterID,
memberID: ws.memberID,
raftTimer: ws.raftTimer,
gRPCStream: stream,
watchStream: ws.watchable.NewWatchStream(),
// chan for sending control response like watcher created and canceled.
ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen),
progress: make(map[storage.WatchID]bool),
closec: make(chan struct{}),
}
defer sws.close()
go sws.sendLoop()
return sws.recvLoop()
}
func (sws *serverWatchStream) recvLoop() error {
for {
req, err := sws.gRPCStream.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
switch uv := req.RequestUnion.(type) {
case *pb.WatchRequest_CreateRequest:
if uv.CreateRequest == nil {
break
}
creq := uv.CreateRequest
if len(creq.Key) == 0 {
// \x00 is the smallest key
creq.Key = []byte{0}
}
if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 {
// support >= key queries
creq.RangeEnd = []byte{}
}
wsrev := sws.watchStream.Rev()
rev := creq.StartRevision
if rev == 0 {
rev = wsrev + 1
}
id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev)
if id != -1 && creq.ProgressNotify {
sws.progress[id] = true
}
sws.ctrlStream <- &pb.WatchResponse{
Header: sws.newResponseHeader(wsrev),
WatchId: int64(id),
Created: true,
Canceled: id == -1,
}
case *pb.WatchRequest_CancelRequest:
if uv.CancelRequest != nil {
id := uv.CancelRequest.WatchId
err := sws.watchStream.Cancel(storage.WatchID(id))
if err == nil {
sws.ctrlStream <- &pb.WatchResponse{
Header: sws.newResponseHeader(sws.watchStream.Rev()),
WatchId: id,
Canceled: true,
}
delete(sws.progress, storage.WatchID(id))
}
}
// TODO: do we need to return error back to client?
default:
panic("not implemented")
}
}
}
func (sws *serverWatchStream) sendLoop() {
// watch ids that are currently active
ids := make(map[storage.WatchID]struct{})
// watch responses pending on a watch id creation message
pending := make(map[storage.WatchID][]*pb.WatchResponse)
progressTicker := time.NewTicker(ProgressReportInterval)
defer progressTicker.Stop()
for {
select {
case wresp, ok := <-sws.watchStream.Chan():
if !ok {
return
}
// TODO: evs is []storagepb.Event type
// either return []*storagepb.Event from storage package
// or define protocol buffer with []storagepb.Event.
evs := wresp.Events
events := make([]*storagepb.Event, len(evs))
for i := range evs {
events[i] = &evs[i]
}
wr := &pb.WatchResponse{
Header: sws.newResponseHeader(wresp.Revision),
WatchId: int64(wresp.WatchID),
Events: events,
CompactRevision: wresp.CompactRevision,
}
if _, hasId := ids[wresp.WatchID]; !hasId {
// buffer if id not yet announced
wrs := append(pending[wresp.WatchID], wr)
pending[wresp.WatchID] = wrs
continue
}
storage.ReportEventReceived()
if err := sws.gRPCStream.Send(wr); err != nil {
return
}
if _, ok := sws.progress[wresp.WatchID]; ok {
sws.progress[wresp.WatchID] = false
}
case c, ok := <-sws.ctrlStream:
if !ok {
return
}
if err := sws.gRPCStream.Send(c); err != nil {
return
}
// track id creation
wid := storage.WatchID(c.WatchId)
if c.Canceled {
delete(ids, wid)
continue
}
if c.Created {
// flush buffered events
ids[wid] = struct{}{}
for _, v := range pending[wid] {
storage.ReportEventReceived()
if err := sws.gRPCStream.Send(v); err != nil {
return
}
}
delete(pending, wid)
}
case <-progressTicker.C:
for id, ok := range sws.progress {
if ok {
sws.watchStream.RequestProgress(id)
}
sws.progress[id] = true
}
case <-sws.closec:
// drain the chan to clean up pending events
for range sws.watchStream.Chan() {
storage.ReportEventReceived()
}
for _, wrs := range pending {
for range wrs {
storage.ReportEventReceived()
}
}
}
}
}
func (sws *serverWatchStream) close() {
sws.watchStream.Close()
close(sws.closec)
close(sws.ctrlStream)
}
func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader {
return &pb.ResponseHeader{
ClusterId: uint64(sws.clusterID),
MemberId: uint64(sws.memberID),
Revision: rev,
RaftTerm: sws.raftTimer.Term(),
}
}

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package auth implements etcd authentication.
package auth
import (
@@ -22,7 +23,6 @@ import (
"reflect"
"sort"
"strings"
"sync"
"time"
etcderr "github.com/coreos/etcd/error"
@@ -88,6 +88,12 @@ type Store interface {
AuthEnabled() bool
EnableAuth() error
DisableAuth() error
PasswordStore
}
type PasswordStore interface {
CheckPassword(user User, password string) bool
HashPassword(password string) (string, error)
}
type store struct {
@@ -95,7 +101,7 @@ type store struct {
timeout time.Duration
ensuredOnce bool
mu sync.Mutex
PasswordStore
}
type User struct {
@@ -142,10 +148,24 @@ func NewStore(server doer, timeout time.Duration) Store {
s := &store{
server: server,
timeout: timeout,
PasswordStore: passwordStore{},
}
return s
}
// passwordStore implements PasswordStore using bcrypt to hash user passwords
type passwordStore struct{}
func (_ passwordStore) CheckPassword(user User, password string) bool {
err := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(password))
return err == nil
}
func (_ passwordStore) HashPassword(password string) (string, error) {
hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
return string(hash), err
}
func (s *store) AllUsers() ([]string, error) {
resp, err := s.requestResource("/users/", false)
if err != nil {
@@ -216,11 +236,11 @@ func (s *store) createUserInternal(user User) (User, error) {
if user.Password == "" {
return user, authErr(http.StatusBadRequest, "Cannot create user %s with an empty password", user.User)
}
hash, err := bcrypt.GenerateFromPassword([]byte(user.Password), bcrypt.DefaultCost)
hash, err := s.HashPassword(user.Password)
if err != nil {
return user, err
}
user.Password = string(hash)
user.Password = hash
_, err = s.createResource("/users/"+user.User, user)
if err != nil {
@@ -260,6 +280,13 @@ func (s *store) UpdateUser(user User) (User, error) {
}
return old, err
}
hash, err := s.HashPassword(user.Password)
if err != nil {
return old, err
}
user.Password = hash
newUser, err := old.merge(user)
if err != nil {
return old, err
@@ -379,9 +406,6 @@ func (s *store) UpdateRole(role Role) (Role, error) {
}
func (s *store) AuthEnabled() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.detectAuth()
}
@@ -390,39 +414,31 @@ func (s *store) EnableAuth() error {
return authErr(http.StatusConflict, "already enabled")
}
s.mu.Lock()
defer s.mu.Unlock()
_, err := s.GetUser("root")
if err != nil {
if _, err := s.GetUser("root"); err != nil {
return authErr(http.StatusConflict, "No root user available, please create one")
}
_, err = s.GetRole(GuestRoleName)
if err != nil {
if _, err := s.GetRole(GuestRoleName); err != nil {
plog.Printf("no guest role access found, creating default")
err := s.CreateRole(guestRole)
if err != nil {
if err := s.CreateRole(guestRole); err != nil {
plog.Errorf("error creating guest role. aborting auth enable.")
return err
}
}
err = s.enableAuth()
if err == nil {
plog.Noticef("auth: enabled auth")
} else {
if err := s.enableAuth(); err != nil {
plog.Errorf("error enabling auth (%v)", err)
}
return err
}
plog.Noticef("auth: enabled auth")
return nil
}
func (s *store) DisableAuth() error {
if !s.AuthEnabled() {
return authErr(http.StatusConflict, "already disabled")
}
s.mu.Lock()
defer s.mu.Unlock()
err := s.disableAuth()
if err == nil {
plog.Noticef("auth: disabled auth")
@@ -443,11 +459,7 @@ func (u User) merge(n User) (User, error) {
}
out.User = u.User
if n.Password != "" {
hash, err := bcrypt.GenerateFromPassword([]byte(n.Password), bcrypt.DefaultCost)
if err != nil {
return User{}, err
}
out.Password = string(hash)
out.Password = n.Password
} else {
out.Password = u.Password
}
@@ -471,11 +483,6 @@ func (u User) merge(n User) (User, error) {
return out, nil
}
func (u User) CheckPassword(password string) bool {
err := bcrypt.CompareHashAndPassword([]byte(u.Password), []byte(password))
return err == nil
}
// merge for a role works the same as User above -- atomic Role application to
// each of the substructures.
func (r Role) merge(n Role) (Role, error) {

View File

@@ -53,7 +53,7 @@ type Cluster interface {
// IsIDRemoved checks whether the given ID has been removed from this
// cluster at some point in the past
IsIDRemoved(id types.ID) bool
// ClusterVersion is the cluster-wide minimum major.minor version.
// Version is the cluster-wide minimum major.minor version.
Version() *semver.Version
}
@@ -226,6 +226,13 @@ func (c *cluster) Recover() {
c.members, c.removed = membersFromStore(c.store)
c.version = clusterVersionFromStore(c.store)
MustDetectDowngrade(c.version)
for _, m := range c.members {
plog.Infof("added member %s %v to cluster %s from store", m.ID, m.PeerURLs, c.id)
}
if c.version != nil {
plog.Infof("set the cluster version to %v from store", version.Cluster(c.version.String()))
}
}
// ValidateConfigurationChange takes a proposed ConfChange and
@@ -299,7 +306,7 @@ func (c *cluster) AddMember(m *Member) {
plog.Panicf("marshal raftAttributes should never fail: %v", err)
}
p := path.Join(memberStoreKey(m.ID), raftAttributesSuffix)
if _, err := c.store.Create(p, false, string(b), false, store.Permanent); err != nil {
if _, err := c.store.Create(p, false, string(b), false, store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil {
plog.Panicf("create raftAttributes should never fail: %v", err)
}
c.members[m.ID] = m
@@ -314,26 +321,27 @@ func (c *cluster) RemoveMember(id types.ID) {
plog.Panicf("delete member should never fail: %v", err)
}
delete(c.members, id)
if _, err := c.store.Create(removedMemberStoreKey(id), false, "", false, store.Permanent); err != nil {
if _, err := c.store.Create(removedMemberStoreKey(id), false, "", false, store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil {
plog.Panicf("create removedMember should never fail: %v", err)
}
c.removed[id] = true
}
func (c *cluster) UpdateAttributes(id types.ID, attr Attributes) {
func (c *cluster) UpdateAttributes(id types.ID, attr Attributes) bool {
c.Lock()
defer c.Unlock()
if m, ok := c.members[id]; ok {
m.Attributes = attr
return
return true
}
_, ok := c.removed[id]
if ok {
plog.Debugf("skipped updating attributes of removed member %s", id)
plog.Warningf("skipped updating attributes of removed member %s", id)
} else {
plog.Panicf("error updating attributes of unknown member %s", id)
}
// TODO: update store in this function
return false
}
func (c *cluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes) {
@@ -344,7 +352,7 @@ func (c *cluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes) {
plog.Panicf("marshal raftAttributes should never fail: %v", err)
}
p := path.Join(memberStoreKey(id), raftAttributesSuffix)
if _, err := c.store.Update(p, string(b), store.Permanent); err != nil {
if _, err := c.store.Update(p, string(b), store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil {
plog.Panicf("update raftAttributes should never fail: %v", err)
}
c.members[id].RaftAttributes = raftAttr
@@ -371,6 +379,58 @@ func (c *cluster) SetVersion(ver *semver.Version) {
MustDetectDowngrade(c.version)
}
func (c *cluster) isReadyToAddNewMember() bool {
nmembers := 1
nstarted := 0
for _, member := range c.members {
if member.IsStarted() {
nstarted++
}
nmembers++
}
if nstarted == 1 && nmembers == 2 {
// a case of adding a new node to 1-member cluster for restoring cluster data
// https://github.com/coreos/etcd/blob/master/Documentation/admin_guide.md#restoring-the-cluster
plog.Debugf("The number of started member is 1. This cluster can accept add member request.")
return true
}
nquorum := nmembers/2 + 1
if nstarted < nquorum {
plog.Warningf("Reject add member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum)
return false
}
return true
}
func (c *cluster) isReadyToRemoveMember(id uint64) bool {
nmembers := 0
nstarted := 0
for _, member := range c.members {
if uint64(member.ID) == id {
continue
}
if member.IsStarted() {
nstarted++
}
nmembers++
}
nquorum := nmembers/2 + 1
if nstarted < nquorum {
plog.Warningf("Reject remove member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum)
return false
}
return true
}
func membersFromStore(st store.Store) (map[types.ID]*Member, map[types.ID]bool) {
members := make(map[types.ID]*Member)
removed := make(map[types.ID]bool)

View File

@@ -29,8 +29,8 @@ import (
// isMemberBootstrapped tries to check if the given member has been bootstrapped
// in the given cluster.
func isMemberBootstrapped(cl *cluster, member string, tr *http.Transport) bool {
rcl, err := getClusterFromRemotePeers(getRemotePeerURLs(cl, member), time.Second, false, tr)
func isMemberBootstrapped(cl *cluster, member string, rt http.RoundTripper, timeout time.Duration) bool {
rcl, err := getClusterFromRemotePeers(getRemotePeerURLs(cl, member), timeout, false, rt)
if err != nil {
return false
}
@@ -52,14 +52,14 @@ func isMemberBootstrapped(cl *cluster, member string, tr *http.Transport) bool {
// response, an error is returned.
// Each request has a 10-second timeout. Because the upper limit of TTL is 5s,
// 10 second is enough for building connection and finishing request.
func GetClusterFromRemotePeers(urls []string, tr *http.Transport) (*cluster, error) {
return getClusterFromRemotePeers(urls, 10*time.Second, true, tr)
func GetClusterFromRemotePeers(urls []string, rt http.RoundTripper) (*cluster, error) {
return getClusterFromRemotePeers(urls, 10*time.Second, true, rt)
}
// If logerr is true, it prints out more error messages.
func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool, tr *http.Transport) (*cluster, error) {
func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool, rt http.RoundTripper) (*cluster, error) {
cc := &http.Client{
Transport: tr,
Transport: rt,
Timeout: timeout,
}
for _, u := range urls {
@@ -78,7 +78,7 @@ func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool
continue
}
var membs []*Member
if err := json.Unmarshal(b, &membs); err != nil {
if err = json.Unmarshal(b, &membs); err != nil {
if logerr {
plog.Warningf("could not unmarshal cluster response: %v", err)
}
@@ -114,7 +114,7 @@ func getRemotePeerURLs(cl Cluster, local string) []string {
// The key of the returned map is the member's ID. The value of the returned map
// is the semver versions string, including server and cluster.
// If it fails to get the version of a member, the key will be nil.
func getVersions(cl Cluster, local types.ID, tr *http.Transport) map[string]*version.Versions {
func getVersions(cl Cluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions {
members := cl.Members()
vers := make(map[string]*version.Versions)
for _, m := range members {
@@ -126,7 +126,7 @@ func getVersions(cl Cluster, local types.ID, tr *http.Transport) map[string]*ver
vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv}
continue
}
ver, err := getVersion(m, tr)
ver, err := getVersion(m, rt)
if err != nil {
plog.Warningf("cannot get the version of member %s (%v)", m.ID, err)
vers[m.ID.String()] = nil
@@ -166,14 +166,14 @@ func decideClusterVersion(vers map[string]*version.Versions) *semver.Version {
return cv
}
// isCompatibleWithCluster return true if the local member has a compitable version with
// isCompatibleWithCluster return true if the local member has a compatible version with
// the current running cluster.
// The version is considered as compitable when at least one of the other members in the cluster has a
// The version is considered as compatible when at least one of the other members in the cluster has a
// cluster version in the range of [MinClusterVersion, Version] and no known members has a cluster version
// out of the range.
// We set this rule since when the local member joins, another member might be offline.
func isCompatibleWithCluster(cl Cluster, local types.ID, tr *http.Transport) bool {
vers := getVersions(cl, local, tr)
func isCompatibleWithCluster(cl Cluster, local types.ID, rt http.RoundTripper) bool {
vers := getVersions(cl, local, rt)
minV := semver.Must(semver.NewVersion(version.MinClusterVersion))
maxV := semver.Must(semver.NewVersion(version.Version))
maxV = &semver.Version{
@@ -187,7 +187,7 @@ func isCompatibleWithCluster(cl Cluster, local types.ID, tr *http.Transport) boo
func isCompatibleWithVers(vers map[string]*version.Versions, local types.ID, minV, maxV *semver.Version) bool {
var ok bool
for id, v := range vers {
// ignore comparasion with local version
// ignore comparison with local version
if id == local.String() {
continue
}
@@ -214,9 +214,9 @@ func isCompatibleWithVers(vers map[string]*version.Versions, local types.ID, min
// getVersion returns the Versions of the given member via its
// peerURLs. Returns the last error if it fails to get the version.
func getVersion(m *Member, tr *http.Transport) (*version.Versions, error) {
func getVersion(m *Member, rt http.RoundTripper) (*version.Versions, error) {
cc := &http.Client{
Transport: tr,
Transport: rt,
}
var (
err error
@@ -246,7 +246,7 @@ func getVersion(m *Member, tr *http.Transport) (*version.Versions, error) {
continue
}
var vers version.Versions
if err := json.Unmarshal(b, &vers); err != nil {
if err = json.Unmarshal(b, &vers); err != nil {
plog.Warningf("failed to unmarshal the response body got from the peerURL(%s) of member %s (%v)", u, m.ID, err)
continue
}

View File

@@ -16,13 +16,13 @@ package etcdserver
import (
"fmt"
"net/http"
"path"
"sort"
"strings"
"time"
"github.com/coreos/etcd/pkg/netutil"
"github.com/coreos/etcd/pkg/transport"
"github.com/coreos/etcd/pkg/types"
)
@@ -44,15 +44,21 @@ type ServerConfig struct {
InitialClusterToken string
NewCluster bool
ForceNewCluster bool
Transport *http.Transport
PeerTLSInfo transport.TLSInfo
TickMs uint
ElectionTicks int
BootstrapTimeout time.Duration
V3demo bool
AutoCompactionRetention int
StrictReconfigCheck bool
EnablePprof bool
}
// VerifyBootstrapConfig sanity-checks the initial config for bootstrap case
// VerifyBootstrap sanity-checks the initial config for bootstrap case
// and returns an error for things that should never happen.
func (c *ServerConfig) VerifyBootstrap() error {
if err := c.verifyLocalMember(true); err != nil {
@@ -128,6 +134,16 @@ func (c *ServerConfig) ReqTimeout() time.Duration {
return 5*time.Second + 2*time.Duration(c.ElectionTicks)*time.Duration(c.TickMs)*time.Millisecond
}
func (c *ServerConfig) electionTimeout() time.Duration {
return time.Duration(c.ElectionTicks) * time.Duration(c.TickMs) * time.Millisecond
}
func (c *ServerConfig) peerDialTimeout() time.Duration {
// 1s for queue wait and system delay
// + one RTT, which is smaller than 1/5 election timeout
return time.Second + time.Duration(c.ElectionTicks)*time.Duration(c.TickMs)*time.Millisecond/5
}
func (c *ServerConfig) PrintWithInitial() { c.print(true) }
func (c *ServerConfig) Print() { c.print(false) }
@@ -171,3 +187,10 @@ func checkDuplicateURL(urlsmap types.URLsMap) bool {
}
return false
}
func (c *ServerConfig) bootstrapTimeout() time.Duration {
if c.BootstrapTimeout != 0 {
return c.BootstrapTimeout
}
return time.Second
}

View File

@@ -0,0 +1,25 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdserver
// consistentIndex represents the offset of an entry in a consistent replica log.
// It implements the storage.ConsistentIndexGetter interface.
// It is always set to the offset of current entry before executing the entry,
// so ConsistentWatchableKV could get the consistent index from it.
type consistentIndex uint64
func (i *consistentIndex) setConsistentIndex(v uint64) { *i = consistentIndex(v) }
func (i *consistentIndex) ConsistentIndex() uint64 { return uint64(*i) }

View File

@@ -0,0 +1,16 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package etcdserver defines how etcd servers interact and store their states.
package etcdserver

View File

@@ -32,6 +32,9 @@ var (
ErrTimeout = errors.New("etcdserver: request timed out")
ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure")
ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost")
ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members")
ErrNoLeader = errors.New("etcdserver: no leader")
ErrRequestTooLarge = errors.New("etcdserver: request is too large")
)
func isKeyNotFound(err error) bool {

View File

@@ -1,3 +1,17 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdhttp
import (
@@ -18,20 +32,21 @@ const (
)
var (
// capabilityMap is a static map of version to capability map.
// capabilityMaps is a static map of version to capability map.
// the base capabilities is the set of capability 2.0 supports.
capabilityMaps = map[string]map[capability]bool{
"2.1.0": {authCapability: true},
"2.2.0": {authCapability: true},
"2.3.0": {authCapability: true},
}
enableMapMu sync.Mutex
// enabled points to a map in cpapbilityMaps
// enabledMap points to a map in capabilityMaps
enabledMap map[capability]bool
)
// capabilityLoop checks the cluster version every 500ms and updates
// the enabledCapability when the cluster version increased.
// the enabledMap when the cluster version increased.
// capabilityLoop MUST be ran in a goroutine before checking capability
// or using capabilityHandler.
func capabilityLoop(s *etcdserver.EtcdServer) {
@@ -70,14 +85,16 @@ func isCapabilityEnabled(c capability) bool {
func capabilityHandler(c capability, fn func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if !isCapabilityEnabled(c) {
notCapable(w, c)
notCapable(w, r, c)
return
}
fn(w, r)
}
}
func notCapable(w http.ResponseWriter, c capability) {
func notCapable(w http.ResponseWriter, r *http.Request, c capability) {
herr := httptypes.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("Not capable of accessing %s feature during rolling upgrades.", c))
herr.WriteTo(w)
if err := herr.WriteTo(w); err != nil {
plog.Debugf("error writing HTTPError (%v) to %s", err, r.RemoteAddr)
}
}

View File

@@ -21,6 +21,7 @@ import (
"fmt"
"io/ioutil"
"net/http"
"net/http/pprof"
"net/url"
"path"
"strconv"
@@ -54,6 +55,7 @@ const (
healthPath = "/health"
versionPath = "/version"
configPath = "/config"
pprofPrefix = "/debug/pprof"
)
// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests.
@@ -108,6 +110,23 @@ func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http
mux.Handle(deprecatedMachinesPrefix, dmh)
handleAuth(mux, sech)
if server.IsPprofEnabled() {
plog.Infof("pprof is enabled under %s", pprofPrefix)
mux.HandleFunc(pprofPrefix, pprof.Index)
mux.HandleFunc(pprofPrefix+"/profile", pprof.Profile)
mux.HandleFunc(pprofPrefix+"/symbol", pprof.Symbol)
mux.HandleFunc(pprofPrefix+"/cmdline", pprof.Cmdline)
// TODO: currently, we don't create an entry for pprof.Trace,
// because go 1.4 doesn't provide it. After support of go 1.4 is dropped,
// we should add the entry.
mux.Handle(pprofPrefix+"/heap", pprof.Handler("heap"))
mux.Handle(pprofPrefix+"/goroutine", pprof.Handler("goroutine"))
mux.Handle(pprofPrefix+"/threadcreate", pprof.Handler("threadcreate"))
mux.Handle(pprofPrefix+"/block", pprof.Handler("block"))
}
return requestLogger(mux)
}
@@ -128,8 +147,9 @@ func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(context.Background(), h.timeout)
defer cancel()
rr, err := parseKeyRequest(r, clockwork.NewRealClock())
clock := clockwork.NewRealClock()
startTime := clock.Now()
rr, err := parseKeyRequest(r, clock)
if err != nil {
writeKeyError(w, err)
return
@@ -139,11 +159,14 @@ func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
writeKeyNoAuth(w)
return
}
if !rr.Wait {
reportRequestReceived(rr)
}
resp, err := h.server.Do(ctx, rr)
if err != nil {
err = trimErrorPrefix(err, etcdserver.StoreKeysPrefix)
writeKeyError(w, err)
reportRequestFailed(rr, err)
return
}
switch {
@@ -152,6 +175,7 @@ func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Should never be reached
plog.Errorf("error writing event (%v)", err)
}
reportRequestCompleted(rr, resp, startTime)
case resp.Watcher != nil:
ctx, cancel := context.WithTimeout(context.Background(), defaultWatchTimeout)
defer cancel()
@@ -186,7 +210,7 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
if !hasWriteRootAccess(h.sec, r) {
writeNoAuth(w)
writeNoAuth(w, r)
return
}
w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())
@@ -206,7 +230,7 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
case "leader":
id := h.server.Leader()
if id == 0 {
writeError(w, httptypes.NewHTTPError(http.StatusServiceUnavailable, "During election"))
writeError(w, r, httptypes.NewHTTPError(http.StatusServiceUnavailable, "During election"))
return
}
m := newMember(h.cluster.Member(id))
@@ -215,7 +239,7 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
plog.Warningf("failed to encode members response (%v)", err)
}
default:
writeError(w, httptypes.NewHTTPError(http.StatusNotFound, "Not found"))
writeError(w, r, httptypes.NewHTTPError(http.StatusNotFound, "Not found"))
}
case "POST":
req := httptypes.MemberCreateRequest{}
@@ -227,11 +251,11 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
err := h.server.AddMember(ctx, *m)
switch {
case err == etcdserver.ErrIDExists || err == etcdserver.ErrPeerURLexists:
writeError(w, httptypes.NewHTTPError(http.StatusConflict, err.Error()))
writeError(w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error()))
return
case err != nil:
plog.Errorf("error adding member %s (%v)", m.ID, err)
writeError(w, err)
writeError(w, r, err)
return
}
res := newMember(m)
@@ -248,12 +272,12 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
err := h.server.RemoveMember(ctx, uint64(id))
switch {
case err == etcdserver.ErrIDRemoved:
writeError(w, httptypes.NewHTTPError(http.StatusGone, fmt.Sprintf("Member permanently removed: %s", id)))
writeError(w, r, httptypes.NewHTTPError(http.StatusGone, fmt.Sprintf("Member permanently removed: %s", id)))
case err == etcdserver.ErrIDNotFound:
writeError(w, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", id)))
writeError(w, r, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", id)))
case err != nil:
plog.Errorf("error removing member %s (%v)", id, err)
writeError(w, err)
writeError(w, r, err)
default:
w.WriteHeader(http.StatusNoContent)
}
@@ -273,12 +297,12 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
err := h.server.UpdateMember(ctx, m)
switch {
case err == etcdserver.ErrPeerURLexists:
writeError(w, httptypes.NewHTTPError(http.StatusConflict, err.Error()))
writeError(w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error()))
case err == etcdserver.ErrIDNotFound:
writeError(w, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", id)))
writeError(w, r, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", id)))
case err != nil:
plog.Errorf("error updating member %s (%v)", m.ID, err)
writeError(w, err)
writeError(w, r, err)
default:
w.WriteHeader(http.StatusNoContent)
}
@@ -311,7 +335,7 @@ func (h *statsHandler) serveLeader(w http.ResponseWriter, r *http.Request) {
}
stats := h.stats.LeaderStats()
if stats == nil {
writeError(w, httptypes.NewHTTPError(http.StatusForbidden, "not current leader"))
writeError(w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader"))
return
}
w.Header().Set("Content-Type", "application/json")
@@ -319,6 +343,10 @@ func (h *statsHandler) serveLeader(w http.ResponseWriter, r *http.Request) {
}
func serveVars(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r.Method, "GET") {
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintf(w, "{\n")
first := true
@@ -333,7 +361,7 @@ func serveVars(w http.ResponseWriter, r *http.Request) {
}
// TODO: change etcdserver to raft interface when we have it.
// add test for healthHeadler when we have the interface ready.
// add test for healthHandler when we have the interface ready.
func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r.Method, "GET") {
@@ -398,13 +426,13 @@ func logHandleFunc(w http.ResponseWriter, r *http.Request) {
d := json.NewDecoder(r.Body)
if err := d.Decode(&in); err != nil {
writeError(w, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body"))
writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body"))
return
}
logl, err := capnslog.ParseLevel(strings.ToUpper(in.Level))
if err != nil {
writeError(w, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level))
writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level))
return
}
@@ -530,6 +558,34 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
pe = &bv
}
// refresh is nullable, so leave it null if not specified
var refresh *bool
if _, ok := r.Form["refresh"]; ok {
bv, err := getBool(r.Form, "refresh")
if err != nil {
return emptyReq, etcdErr.NewRequestError(
etcdErr.EcodeInvalidField,
"invalid value for refresh",
)
}
refresh = &bv
if refresh != nil && *refresh {
val := r.FormValue("value")
if _, ok := r.Form["value"]; ok && val != "" {
return emptyReq, etcdErr.NewRequestError(
etcdErr.EcodeRefreshValue,
`A value was provided on a refresh`,
)
}
if ttl == nil {
return emptyReq, etcdErr.NewRequestError(
etcdErr.EcodeRefreshTTLRequired,
`No TTL value set`,
)
}
}
}
rr := etcdserverpb.Request{
Method: r.Method,
Path: p,
@@ -550,6 +606,10 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
rr.PrevExist = pe
}
if refresh != nil {
rr.Refresh = refresh
}
// Null TTL is equivalent to unset Expiration
if ttl != nil {
expr := time.Duration(*ttl) * time.Second
@@ -596,9 +656,9 @@ func writeKeyError(w http.ResponseWriter, err error) {
default:
switch err {
case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost:
plog.Error(err)
mlog.MergeError(err)
default:
plog.Errorf("got unexpected response error (%v)", err)
mlog.MergeErrorf("got unexpected response error (%v)", err)
}
ee := etcdErr.NewError(etcdErr.EcodeRaftInternal, err.Error(), 0)
ee.WriteTo(w)
@@ -684,16 +744,16 @@ func trimErrorPrefix(err error, prefix string) error {
func unmarshalRequest(r *http.Request, req json.Unmarshaler, w http.ResponseWriter) bool {
ctype := r.Header.Get("Content-Type")
if ctype != "application/json" {
writeError(w, httptypes.NewHTTPError(http.StatusUnsupportedMediaType, fmt.Sprintf("Bad Content-Type %s, accept application/json", ctype)))
writeError(w, r, httptypes.NewHTTPError(http.StatusUnsupportedMediaType, fmt.Sprintf("Bad Content-Type %s, accept application/json", ctype)))
return false
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
writeError(w, httptypes.NewHTTPError(http.StatusBadRequest, err.Error()))
writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error()))
return false
}
if err := req.UnmarshalJSON(b); err != nil {
writeError(w, httptypes.NewHTTPError(http.StatusBadRequest, err.Error()))
writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error()))
return false
}
return true
@@ -707,7 +767,7 @@ func getID(p string, w http.ResponseWriter) (types.ID, bool) {
}
id, err := types.IDFromString(idStr)
if err != nil {
writeError(w, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", idStr)))
writeError(w, nil, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", idStr)))
return 0, false
}
return id, true

View File

@@ -23,7 +23,6 @@ import (
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/auth"
"github.com/coreos/etcd/etcdserver/etcdhttp/httptypes"
"github.com/coreos/etcd/pkg/netutil"
)
type authHandler struct {
@@ -46,7 +45,7 @@ func hasRootAccess(sec auth.Store, r *http.Request) bool {
if !sec.AuthEnabled() {
return true
}
username, password, ok := netutil.BasicAuth(r)
username, password, ok := r.BasicAuth()
if !ok {
return false
}
@@ -54,7 +53,8 @@ func hasRootAccess(sec auth.Store, r *http.Request) bool {
if err != nil {
return false
}
ok = rootUser.CheckPassword(password)
ok = sec.CheckPassword(rootUser, password)
if !ok {
plog.Warningf("auth: wrong password for user %s", username)
return false
@@ -80,7 +80,7 @@ func hasKeyPrefixAccess(sec auth.Store, r *http.Request, key string, recursive b
plog.Warningf("auth: no authorization provided, checking guest access")
return hasGuestAccess(sec, r, key)
}
username, password, ok := netutil.BasicAuth(r)
username, password, ok := r.BasicAuth()
if !ok {
plog.Warningf("auth: malformed basic auth encoding")
return false
@@ -90,7 +90,7 @@ func hasKeyPrefixAccess(sec auth.Store, r *http.Request, key string, recursive b
plog.Warningf("auth: no such user: %s.", username)
return false
}
authAsUser := user.CheckPassword(password)
authAsUser := sec.CheckPassword(user, password)
if !authAsUser {
plog.Warningf("auth: incorrect password for user: %s.", username)
return false
@@ -126,9 +126,11 @@ func hasGuestAccess(sec auth.Store, r *http.Request, key string) bool {
return false
}
func writeNoAuth(w http.ResponseWriter) {
func writeNoAuth(w http.ResponseWriter, r *http.Request) {
herr := httptypes.NewHTTPError(http.StatusUnauthorized, "Insufficient credentials")
herr.WriteTo(w)
if err := herr.WriteTo(w); err != nil {
plog.Debugf("error writing HTTPError (%v) to %s", err, r.RemoteAddr)
}
}
func handleAuth(mux *http.ServeMux, sh *authHandler) {
@@ -144,27 +146,46 @@ func (sh *authHandler) baseRoles(w http.ResponseWriter, r *http.Request) {
return
}
if !hasRootAccess(sh.sec, r) {
writeNoAuth(w)
writeNoAuth(w, r)
return
}
w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
w.Header().Set("Content-Type", "application/json")
var rolesCollections struct {
Roles []string `json:"roles"`
}
roles, err := sh.sec.AllRoles()
if err != nil {
writeError(w, err)
writeError(w, r, err)
return
}
if roles == nil {
roles = make([]string, 0)
}
rolesCollections.Roles = roles
err = r.ParseForm()
if err != nil {
writeError(w, r, err)
return
}
var rolesCollections struct {
Roles []auth.Role `json:"roles"`
}
for _, roleName := range roles {
var role auth.Role
role, err = sh.sec.GetRole(roleName)
if err != nil {
writeError(w, r, err)
return
}
rolesCollections.Roles = append(rolesCollections.Roles, role)
}
err = json.NewEncoder(w).Encode(rolesCollections)
if err != nil {
plog.Warningf("baseRoles error encoding on %s", r.URL)
writeError(w, r, err)
return
}
}
@@ -178,7 +199,7 @@ func (sh *authHandler) handleRoles(w http.ResponseWriter, r *http.Request) {
return
}
if len(pieces) != 3 {
writeError(w, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path"))
writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path"))
return
}
sh.forRole(w, r, pieces[2])
@@ -189,7 +210,7 @@ func (sh *authHandler) forRole(w http.ResponseWriter, r *http.Request, role stri
return
}
if !hasRootAccess(sh.sec, r) {
writeNoAuth(w)
writeNoAuth(w, r)
return
}
w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
@@ -199,7 +220,7 @@ func (sh *authHandler) forRole(w http.ResponseWriter, r *http.Request, role stri
case "GET":
data, err := sh.sec.GetRole(role)
if err != nil {
writeError(w, err)
writeError(w, r, err)
return
}
err = json.NewEncoder(w).Encode(data)
@@ -212,11 +233,11 @@ func (sh *authHandler) forRole(w http.ResponseWriter, r *http.Request, role stri
var in auth.Role
err := json.NewDecoder(r.Body).Decode(&in)
if err != nil {
writeError(w, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body."))
writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body."))
return
}
if in.Role != role {
writeError(w, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON name does not match the name in the URL"))
writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON name does not match the name in the URL"))
return
}
@@ -226,19 +247,19 @@ func (sh *authHandler) forRole(w http.ResponseWriter, r *http.Request, role stri
if in.Grant.IsEmpty() && in.Revoke.IsEmpty() {
err = sh.sec.CreateRole(in)
if err != nil {
writeError(w, err)
writeError(w, r, err)
return
}
w.WriteHeader(http.StatusCreated)
out = in
} else {
if !in.Permissions.IsEmpty() {
writeError(w, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON contains both permissions and grant/revoke"))
writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON contains both permissions and grant/revoke"))
return
}
out, err = sh.sec.UpdateRole(in)
if err != nil {
writeError(w, err)
writeError(w, r, err)
return
}
w.WriteHeader(http.StatusOK)
@@ -253,38 +274,73 @@ func (sh *authHandler) forRole(w http.ResponseWriter, r *http.Request, role stri
case "DELETE":
err := sh.sec.DeleteRole(role)
if err != nil {
writeError(w, err)
writeError(w, r, err)
return
}
}
}
type userWithRoles struct {
User string `json:"user"`
Roles []auth.Role `json:"roles,omitempty"`
}
func (sh *authHandler) baseUsers(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r.Method, "GET") {
return
}
if !hasRootAccess(sh.sec, r) {
writeNoAuth(w)
writeNoAuth(w, r)
return
}
w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
w.Header().Set("Content-Type", "application/json")
var usersCollections struct {
Users []string `json:"users"`
}
users, err := sh.sec.AllUsers()
if err != nil {
writeError(w, err)
writeError(w, r, err)
return
}
if users == nil {
users = make([]string, 0)
}
usersCollections.Users = users
err = r.ParseForm()
if err != nil {
writeError(w, r, err)
return
}
var usersCollections struct {
Users []userWithRoles `json:"users"`
}
for _, userName := range users {
var user auth.User
user, err = sh.sec.GetUser(userName)
if err != nil {
writeError(w, r, err)
return
}
uwr := userWithRoles{User: user.User}
for _, roleName := range user.Roles {
var role auth.Role
role, err = sh.sec.GetRole(roleName)
if err != nil {
writeError(w, r, err)
return
}
uwr.Roles = append(uwr.Roles, role)
}
usersCollections.Users = append(usersCollections.Users, uwr)
}
err = json.NewEncoder(w).Encode(usersCollections)
if err != nil {
plog.Warningf("baseUsers error encoding on %s", r.URL)
writeError(w, r, err)
return
}
}
@@ -298,7 +354,7 @@ func (sh *authHandler) handleUsers(w http.ResponseWriter, r *http.Request) {
return
}
if len(pieces) != 3 {
writeError(w, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path"))
writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path"))
return
}
sh.forUser(w, r, pieces[2])
@@ -309,7 +365,7 @@ func (sh *authHandler) forUser(w http.ResponseWriter, r *http.Request, user stri
return
}
if !hasRootAccess(sh.sec, r) {
writeNoAuth(w)
writeNoAuth(w, r)
return
}
w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
@@ -319,12 +375,28 @@ func (sh *authHandler) forUser(w http.ResponseWriter, r *http.Request, user stri
case "GET":
u, err := sh.sec.GetUser(user)
if err != nil {
writeError(w, err)
writeError(w, r, err)
return
}
u.Password = ""
err = json.NewEncoder(w).Encode(u)
err = r.ParseForm()
if err != nil {
writeError(w, r, err)
return
}
uwr := userWithRoles{User: u.User}
for _, roleName := range u.Roles {
var role auth.Role
role, err = sh.sec.GetRole(roleName)
if err != nil {
writeError(w, r, err)
return
}
uwr.Roles = append(uwr.Roles, role)
}
err = json.NewEncoder(w).Encode(uwr)
if err != nil {
plog.Warningf("forUser error encoding on %s", r.URL)
return
@@ -334,11 +406,11 @@ func (sh *authHandler) forUser(w http.ResponseWriter, r *http.Request, user stri
var u auth.User
err := json.NewDecoder(r.Body).Decode(&u)
if err != nil {
writeError(w, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body."))
writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body."))
return
}
if u.User != user {
writeError(w, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON name does not match the name in the URL"))
writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON name does not match the name in the URL"))
return
}
@@ -358,18 +430,18 @@ func (sh *authHandler) forUser(w http.ResponseWriter, r *http.Request, user stri
}
if err != nil {
writeError(w, err)
writeError(w, r, err)
return
}
} else {
// update case
if len(u.Roles) != 0 {
writeError(w, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON contains both roles and grant/revoke"))
writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON contains both roles and grant/revoke"))
return
}
out, err = sh.sec.UpdateUser(u)
if err != nil {
writeError(w, err)
writeError(w, r, err)
return
}
}
@@ -391,7 +463,7 @@ func (sh *authHandler) forUser(w http.ResponseWriter, r *http.Request, user stri
case "DELETE":
err := sh.sec.DeleteUser(user)
if err != nil {
writeError(w, err)
writeError(w, r, err)
return
}
}
@@ -406,7 +478,7 @@ func (sh *authHandler) enableDisable(w http.ResponseWriter, r *http.Request) {
return
}
if !hasWriteRootAccess(sh.sec, r) {
writeNoAuth(w)
writeNoAuth(w, r)
return
}
w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
@@ -422,13 +494,13 @@ func (sh *authHandler) enableDisable(w http.ResponseWriter, r *http.Request) {
case "PUT":
err := sh.sec.EnableAuth()
if err != nil {
writeError(w, err)
writeError(w, r, err)
return
}
case "DELETE":
err := sh.sec.DisableAuth()
if err != nil {
writeError(w, err)
writeError(w, r, err)
return
}
}

View File

@@ -0,0 +1,16 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package etcdhttp provides etcd client and server implementations.
package etcdhttp

View File

@@ -25,6 +25,7 @@ import (
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/auth"
"github.com/coreos/etcd/etcdserver/etcdhttp/httptypes"
"github.com/coreos/etcd/pkg/logutil"
"github.com/coreos/pkg/capnslog"
)
@@ -35,13 +36,14 @@ const (
var (
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdhttp")
mlog = logutil.NewMergeLogger(plog)
errClosed = errors.New("etcdhttp: client closed connection")
)
// writeError logs and writes the given Error to the ResponseWriter
// If Error is an etcdErr, it is rendered to the ResponseWriter
// Otherwise, it is assumed to be an InternalServerError
func writeError(w http.ResponseWriter, err error) {
// Otherwise, it is assumed to be a StatusInternalServerError
func writeError(w http.ResponseWriter, r *http.Request, err error) {
if err == nil {
return
}
@@ -49,19 +51,25 @@ func writeError(w http.ResponseWriter, err error) {
case *etcdErr.Error:
e.WriteTo(w)
case *httptypes.HTTPError:
e.WriteTo(w)
if et := e.WriteTo(w); et != nil {
plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
}
case auth.Error:
herr := httptypes.NewHTTPError(e.HTTPStatus(), e.Error())
herr.WriteTo(w)
if et := herr.WriteTo(w); et != nil {
plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
}
default:
switch err {
case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost:
plog.Error(err)
case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers:
mlog.MergeError(err)
default:
plog.Errorf("got unexpected response error (%v)", err)
mlog.MergeErrorf("got unexpected response error (%v)", err)
}
herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error")
herr.WriteTo(w)
if et := herr.WriteTo(w); et != nil {
plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
}
}
}

View File

@@ -27,7 +27,7 @@ var (
type HTTPError struct {
Message string `json:"message"`
// HTTP return code
// Code is the HTTP status code
Code int `json:"-"`
}
@@ -35,15 +35,17 @@ func (e HTTPError) Error() string {
return e.Message
}
// TODO(xiangli): handle http write errors
func (e HTTPError) WriteTo(w http.ResponseWriter) {
func (e HTTPError) WriteTo(w http.ResponseWriter) error {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(e.Code)
b, err := json.Marshal(e)
if err != nil {
plog.Panicf("marshal HTTPError should never fail (%v)", err)
}
w.Write(b)
if _, err := w.Write(b); err != nil {
return err
}
return nil
}
func NewHTTPError(code int, m string) *HTTPError {

View File

@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package httptypes defines how etcd's HTTP API entities are serialized to and
// deserialized from JSON.
package httptypes
import (

View File

@@ -0,0 +1,96 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdhttp
import (
"strconv"
"time"
"net/http"
etcdErr "github.com/coreos/etcd/error"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/etcdhttp/httptypes"
"github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/prometheus/client_golang/prometheus"
)
var (
incomingEvents = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "etcd",
Subsystem: "http",
Name: "received_total",
Help: "Counter of requests received into the system (successfully parsed and authd).",
}, []string{"method"})
failedEvents = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "etcd",
Subsystem: "http",
Name: "failed_total",
Help: "Counter of handle failures of requests (non-watches), by method (GET/PUT etc.) and code (400, 500 etc.).",
}, []string{"method", "code"})
successfulEventsHandlingTime = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "etcd",
Subsystem: "http",
Name: "successful_duration_second",
Help: "Bucketed histogram of processing time (s) of successfully handled requests (non-watches), by method (GET/PUT etc.).",
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13),
}, []string{"method"})
)
func init() {
prometheus.MustRegister(incomingEvents)
prometheus.MustRegister(failedEvents)
prometheus.MustRegister(successfulEventsHandlingTime)
}
func reportRequestReceived(request etcdserverpb.Request) {
incomingEvents.WithLabelValues(methodFromRequest(request)).Inc()
}
func reportRequestCompleted(request etcdserverpb.Request, response etcdserver.Response, startTime time.Time) {
method := methodFromRequest(request)
successfulEventsHandlingTime.WithLabelValues(method).Observe(time.Since(startTime).Seconds())
}
func reportRequestFailed(request etcdserverpb.Request, err error) {
method := methodFromRequest(request)
failedEvents.WithLabelValues(method, strconv.Itoa(codeFromError(err))).Inc()
}
func methodFromRequest(request etcdserverpb.Request) string {
if request.Method == "GET" && request.Quorum {
return "QGET"
}
return request.Method
}
func codeFromError(err error) int {
if err == nil {
return http.StatusInternalServerError
}
switch e := err.(type) {
case *etcdErr.Error:
return (*etcdErr.Error)(e).StatusCode()
case *httptypes.HTTPError:
return (*httptypes.HTTPError)(e).Code
default:
return http.StatusInternalServerError
}
}

View File

@@ -19,15 +19,25 @@ import (
"net/http"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/lease/leasehttp"
"github.com/coreos/etcd/rafthttp"
)
const (
peerMembersPrefix = "/members"
leasesPrefix = "/leases"
)
// NewPeerHandler generates an http.Handler to handle etcd peer (raft) requests.
func NewPeerHandler(cluster etcdserver.Cluster, raftHandler http.Handler) http.Handler {
// NewPeerHandler generates an http.Handler to handle etcd peer requests.
func NewPeerHandler(s *etcdserver.EtcdServer) http.Handler {
var lh http.Handler
if l := s.Lessor(); l != nil {
lh = leasehttp.NewHandler(l)
}
return newPeerHandler(s.Cluster(), s.RaftHandler(), lh)
}
func newPeerHandler(cluster etcdserver.Cluster, raftHandler http.Handler, leaseHandler http.Handler) http.Handler {
mh := &peerMembersHandler{
cluster: cluster,
}
@@ -37,6 +47,9 @@ func NewPeerHandler(cluster etcdserver.Cluster, raftHandler http.Handler) http.H
mux.Handle(rafthttp.RaftPrefix, raftHandler)
mux.Handle(rafthttp.RaftPrefix+"/", raftHandler)
mux.Handle(peerMembersPrefix, mh)
if leaseHandler != nil {
mux.Handle(leasesPrefix, leaseHandler)
}
mux.HandleFunc(versionPath, versionHandler(cluster, serveVersion))
return mux
}

View File

@@ -14,6 +14,7 @@
Request
Metadata
InternalRaftRequest
EmptyResponse
ResponseHeader
RangeRequest
RangeResponse
@@ -28,6 +29,57 @@
TxnResponse
CompactionRequest
CompactionResponse
HashRequest
HashResponse
WatchRequest
WatchCreateRequest
WatchCancelRequest
WatchResponse
LeaseCreateRequest
LeaseCreateResponse
LeaseRevokeRequest
LeaseRevokeResponse
LeaseKeepAliveRequest
LeaseKeepAliveResponse
Member
MemberAddRequest
MemberAddResponse
MemberRemoveRequest
MemberRemoveResponse
MemberUpdateRequest
MemberUpdateResponse
MemberListRequest
MemberListResponse
DefragmentRequest
DefragmentResponse
AuthEnableRequest
AuthDisableRequest
AuthenticateRequest
UserAddRequest
UserGetRequest
UserDeleteRequest
UserChangePasswordRequest
UserGrantRequest
UserRevokeRequest
RoleAddRequest
RoleGetRequest
RoleDeleteRequest
RoleGrantRequest
RoleRevokeRequest
AuthEnableResponse
AuthDisableResponse
AuthenticateResponse
UserAddResponse
UserGetResponse
UserDeleteResponse
UserChangePasswordResponse
UserGrantResponse
UserRevokeResponse
RoleAddResponse
RoleGetResponse
RoleDeleteResponse
RoleGrantResponse
RoleRevokeResponse
*/
package etcdserverpb
@@ -63,6 +115,7 @@ type Request struct {
Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"`
Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"`
Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"`
Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
@@ -190,6 +243,18 @@ func (m *Request) MarshalTo(data []byte) (int, error) {
data[i] = 0
}
i++
if m.Refresh != nil {
data[i] = 0x88
i++
data[i] = 0x1
i++
if *m.Refresh {
data[i] = 1
} else {
data[i] = 0
}
i++
}
if m.XXX_unrecognized != nil {
i += copy(data[i:], m.XXX_unrecognized)
}
@@ -275,6 +340,9 @@ func (m *Request) Size() (n int) {
n += 2
n += 1 + sovEtcdserver(uint64(m.Time))
n += 3
if m.Refresh != nil {
n += 3
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
@@ -686,6 +754,27 @@ func (m *Request) Unmarshal(data []byte) error {
}
}
m.Stream = bool(v != 0)
case 17:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Refresh", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEtcdserver
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
b := bool(v != 0)
m.Refresh = &b
default:
iNdEx = preIndex
skippy, err := skipEtcdserver(data[iNdEx:])

View File

@@ -25,6 +25,7 @@ message Request {
optional bool Quorum = 14 [(gogoproto.nullable) = false];
optional int64 Time = 15 [(gogoproto.nullable) = false];
optional bool Stream = 16 [(gogoproto.nullable) = false];
optional bool Refresh = 17 [(gogoproto.nullable) = true];
}
message Metadata {

View File

@@ -22,19 +22,32 @@ var _ = math.Inf
// An InternalRaftRequest is the union of all requests which can be
// sent via raft.
type InternalRaftRequest struct {
V2 *Request `protobuf:"bytes,1,opt,name=v2" json:"v2,omitempty"`
Range *RangeRequest `protobuf:"bytes,2,opt,name=range" json:"range,omitempty"`
Put *PutRequest `protobuf:"bytes,3,opt,name=put" json:"put,omitempty"`
DeleteRange *DeleteRangeRequest `protobuf:"bytes,4,opt,name=delete_range" json:"delete_range,omitempty"`
Txn *TxnRequest `protobuf:"bytes,5,opt,name=txn" json:"txn,omitempty"`
ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
V2 *Request `protobuf:"bytes,2,opt,name=v2" json:"v2,omitempty"`
Range *RangeRequest `protobuf:"bytes,3,opt,name=range" json:"range,omitempty"`
Put *PutRequest `protobuf:"bytes,4,opt,name=put" json:"put,omitempty"`
DeleteRange *DeleteRangeRequest `protobuf:"bytes,5,opt,name=delete_range" json:"delete_range,omitempty"`
Txn *TxnRequest `protobuf:"bytes,6,opt,name=txn" json:"txn,omitempty"`
Compaction *CompactionRequest `protobuf:"bytes,7,opt,name=compaction" json:"compaction,omitempty"`
LeaseCreate *LeaseCreateRequest `protobuf:"bytes,8,opt,name=lease_create" json:"lease_create,omitempty"`
LeaseRevoke *LeaseRevokeRequest `protobuf:"bytes,9,opt,name=lease_revoke" json:"lease_revoke,omitempty"`
AuthEnable *AuthEnableRequest `protobuf:"bytes,10,opt,name=auth_enable" json:"auth_enable,omitempty"`
}
func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} }
func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) }
func (*InternalRaftRequest) ProtoMessage() {}
type EmptyResponse struct {
}
func (m *EmptyResponse) Reset() { *m = EmptyResponse{} }
func (m *EmptyResponse) String() string { return proto.CompactTextString(m) }
func (*EmptyResponse) ProtoMessage() {}
func init() {
proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest")
proto.RegisterType((*EmptyResponse)(nil), "etcdserverpb.EmptyResponse")
}
func (m *InternalRaftRequest) Marshal() (data []byte, err error) {
size := m.Size()
@@ -51,8 +64,13 @@ func (m *InternalRaftRequest) MarshalTo(data []byte) (int, error) {
_ = i
var l int
_ = l
if m.ID != 0 {
data[i] = 0x8
i++
i = encodeVarintRaftInternal(data, i, uint64(m.ID))
}
if m.V2 != nil {
data[i] = 0xa
data[i] = 0x12
i++
i = encodeVarintRaftInternal(data, i, uint64(m.V2.Size()))
n1, err := m.V2.MarshalTo(data[i:])
@@ -62,7 +80,7 @@ func (m *InternalRaftRequest) MarshalTo(data []byte) (int, error) {
i += n1
}
if m.Range != nil {
data[i] = 0x12
data[i] = 0x1a
i++
i = encodeVarintRaftInternal(data, i, uint64(m.Range.Size()))
n2, err := m.Range.MarshalTo(data[i:])
@@ -72,7 +90,7 @@ func (m *InternalRaftRequest) MarshalTo(data []byte) (int, error) {
i += n2
}
if m.Put != nil {
data[i] = 0x1a
data[i] = 0x22
i++
i = encodeVarintRaftInternal(data, i, uint64(m.Put.Size()))
n3, err := m.Put.MarshalTo(data[i:])
@@ -82,7 +100,7 @@ func (m *InternalRaftRequest) MarshalTo(data []byte) (int, error) {
i += n3
}
if m.DeleteRange != nil {
data[i] = 0x22
data[i] = 0x2a
i++
i = encodeVarintRaftInternal(data, i, uint64(m.DeleteRange.Size()))
n4, err := m.DeleteRange.MarshalTo(data[i:])
@@ -92,7 +110,7 @@ func (m *InternalRaftRequest) MarshalTo(data []byte) (int, error) {
i += n4
}
if m.Txn != nil {
data[i] = 0x2a
data[i] = 0x32
i++
i = encodeVarintRaftInternal(data, i, uint64(m.Txn.Size()))
n5, err := m.Txn.MarshalTo(data[i:])
@@ -101,6 +119,64 @@ func (m *InternalRaftRequest) MarshalTo(data []byte) (int, error) {
}
i += n5
}
if m.Compaction != nil {
data[i] = 0x3a
i++
i = encodeVarintRaftInternal(data, i, uint64(m.Compaction.Size()))
n6, err := m.Compaction.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n6
}
if m.LeaseCreate != nil {
data[i] = 0x42
i++
i = encodeVarintRaftInternal(data, i, uint64(m.LeaseCreate.Size()))
n7, err := m.LeaseCreate.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n7
}
if m.LeaseRevoke != nil {
data[i] = 0x4a
i++
i = encodeVarintRaftInternal(data, i, uint64(m.LeaseRevoke.Size()))
n8, err := m.LeaseRevoke.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n8
}
if m.AuthEnable != nil {
data[i] = 0x52
i++
i = encodeVarintRaftInternal(data, i, uint64(m.AuthEnable.Size()))
n9, err := m.AuthEnable.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n9
}
return i, nil
}
func (m *EmptyResponse) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *EmptyResponse) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
return i, nil
}
@@ -134,6 +210,9 @@ func encodeVarintRaftInternal(data []byte, offset int, v uint64) int {
func (m *InternalRaftRequest) Size() (n int) {
var l int
_ = l
if m.ID != 0 {
n += 1 + sovRaftInternal(uint64(m.ID))
}
if m.V2 != nil {
l = m.V2.Size()
n += 1 + l + sovRaftInternal(uint64(l))
@@ -154,6 +233,28 @@ func (m *InternalRaftRequest) Size() (n int) {
l = m.Txn.Size()
n += 1 + l + sovRaftInternal(uint64(l))
}
if m.Compaction != nil {
l = m.Compaction.Size()
n += 1 + l + sovRaftInternal(uint64(l))
}
if m.LeaseCreate != nil {
l = m.LeaseCreate.Size()
n += 1 + l + sovRaftInternal(uint64(l))
}
if m.LeaseRevoke != nil {
l = m.LeaseRevoke.Size()
n += 1 + l + sovRaftInternal(uint64(l))
}
if m.AuthEnable != nil {
l = m.AuthEnable.Size()
n += 1 + l + sovRaftInternal(uint64(l))
}
return n
}
func (m *EmptyResponse) Size() (n int) {
var l int
_ = l
return n
}
@@ -200,6 +301,25 @@ func (m *InternalRaftRequest) Unmarshal(data []byte) error {
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
}
m.ID = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaftInternal
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.ID |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field V2", wireType)
}
@@ -232,7 +352,7 @@ func (m *InternalRaftRequest) Unmarshal(data []byte) error {
return err
}
iNdEx = postIndex
case 2:
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType)
}
@@ -265,7 +385,7 @@ func (m *InternalRaftRequest) Unmarshal(data []byte) error {
return err
}
iNdEx = postIndex
case 3:
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType)
}
@@ -298,7 +418,7 @@ func (m *InternalRaftRequest) Unmarshal(data []byte) error {
return err
}
iNdEx = postIndex
case 4:
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DeleteRange", wireType)
}
@@ -331,7 +451,7 @@ func (m *InternalRaftRequest) Unmarshal(data []byte) error {
return err
}
iNdEx = postIndex
case 5:
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType)
}
@@ -364,6 +484,188 @@ func (m *InternalRaftRequest) Unmarshal(data []byte) error {
return err
}
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Compaction", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaftInternal
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthRaftInternal
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Compaction == nil {
m.Compaction = &CompactionRequest{}
}
if err := m.Compaction.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LeaseCreate", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaftInternal
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthRaftInternal
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.LeaseCreate == nil {
m.LeaseCreate = &LeaseCreateRequest{}
}
if err := m.LeaseCreate.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 9:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LeaseRevoke", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaftInternal
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthRaftInternal
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.LeaseRevoke == nil {
m.LeaseRevoke = &LeaseRevokeRequest{}
}
if err := m.LeaseRevoke.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 10:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field AuthEnable", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaftInternal
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthRaftInternal
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.AuthEnable == nil {
m.AuthEnable = &AuthEnableRequest{}
}
if err := m.AuthEnable.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipRaftInternal(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthRaftInternal
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *EmptyResponse) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaftInternal
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: EmptyResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: EmptyResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
default:
iNdEx = preIndex
skippy, err := skipRaftInternal(data[iNdEx:])

View File

@@ -13,9 +13,20 @@ option (gogoproto.goproto_getters_all) = false;
// An InternalRaftRequest is the union of all requests which can be
// sent via raft.
message InternalRaftRequest {
Request v2 = 1;
RangeRequest range = 2;
PutRequest put = 3;
DeleteRangeRequest delete_range = 4;
TxnRequest txn = 5;
uint64 ID = 1;
Request v2 = 2;
RangeRequest range = 3;
PutRequest put = 4;
DeleteRangeRequest delete_range = 5;
TxnRequest txn = 6;
CompactionRequest compaction = 7;
LeaseCreateRequest lease_create = 8;
LeaseRevokeRequest lease_revoke = 9;
AuthEnableRequest auth_enable = 10;
}
message EmptyResponse {
}

File diff suppressed because it is too large Load Diff

View File

@@ -7,8 +7,7 @@ import "etcd/storage/storagepb/kv.proto";
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
// Interface exported by the server.
service etcd {
service KV {
// Range gets the keys in the range from the store.
rpc Range(RangeRequest) returns (RangeResponse) {}
@@ -25,28 +24,134 @@ service etcd {
// Txn processes all the requests in one transaction.
// A txn request increases the revision of the store,
// and generates events with the same revision in the event history.
// It is not allowed to modify the same key several times within one txn.
rpc Txn(TxnRequest) returns (TxnResponse) {}
// Compact compacts the event history in etcd. User should compact the
// event history periodically, or it will grow infinitely.
rpc Compact(CompactionRequest) returns (CompactionResponse) {}
// Hash returns the hash of local KV state for consistency checking purpose.
// This is designed for testing purpose. Do not use this in production when there
// are ongoing transactions.
rpc Hash(HashRequest) returns (HashResponse) {}
}
service Watch {
// Watch watches the events happening or happened. Both input and output
// are stream. One watch rpc can watch for multiple keys or prefixs and
// get a stream of events. The whole events history can be watched unless
// compacted.
rpc Watch(stream WatchRequest) returns (stream WatchResponse) {}
}
service Lease {
// LeaseCreate creates a lease. A lease has a TTL. The lease will expire if the
// server does not receive a keepAlive within TTL from the lease holder.
// All keys attached to the lease will be expired and deleted if the lease expires.
// The key expiration generates an event in event history.
rpc LeaseCreate(LeaseCreateRequest) returns (LeaseCreateResponse) {}
// LeaseRevoke revokes a lease. All the key attached to the lease will be expired and deleted.
rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) {}
// KeepAlive keeps the lease alive.
rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) {}
// TODO(xiangli) List all existing Leases?
// TODO(xiangli) Get details information (expirations, leased keys, etc.) of a lease?
}
service Cluster {
// MemberAdd adds a member into the cluster.
rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) {}
// MemberRemove removes an existing member from the cluster.
rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) {}
// MemberUpdate updates the member configuration.
rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) {}
// MemberList lists all the members in the cluster.
rpc MemberList(MemberListRequest) returns (MemberListResponse) {}
}
service Maintenance {
// TODO: move Hash from kv to Maintenance
rpc Defragment(DefragmentRequest) returns (DefragmentResponse) {}
}
service Auth {
// AuthEnable enables authentication.
rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) {}
// AuthDisable disables authentication.
rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) {}
// Authenticate processes authenticate request.
rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) {}
// UserAdd adds a new user.
rpc UserAdd(UserAddRequest) returns (UserAddResponse) {}
// UserGet gets a detailed information of a user or lists entire users.
rpc UserGet(UserGetRequest) returns (UserGetResponse) {}
// UserDelete deletes a specified user.
rpc UserDelete(UserDeleteRequest) returns (UserDeleteResponse) {}
// UserChangePassword changes password of a specified user.
rpc UserChangePassword(UserChangePasswordRequest) returns (UserChangePasswordResponse) {}
// UserGrant grants a role to a specified user.
rpc UserGrant(UserGrantRequest) returns (UserGrantResponse) {}
// UserRevoke revokes a role of specified user.
rpc UserRevoke(UserRevokeRequest) returns (UserRevokeResponse) {}
// RoleAdd adds a new role.
rpc RoleAdd(RoleAddRequest) returns (RoleAddResponse) {}
// RoleGet gets a detailed information of a role or lists entire roles.
rpc RoleGet(RoleGetRequest) returns (RoleGetResponse) {}
// RoleDelete deletes a specified role.
rpc RoleDelete(RoleDeleteRequest) returns (RoleDeleteResponse) {}
// RoleGrant grants a permission of a specified key or range to a specified role.
rpc RoleGrant(RoleGrantRequest) returns (RoleGrantResponse) {}
// RoleRevoke revokes a key or range permission of a specified role.
rpc RoleRevoke(RoleRevokeRequest) returns (RoleRevokeResponse) {}
}
message ResponseHeader {
// an error type message?
string error = 1;
uint64 cluster_id = 2;
uint64 member_id = 3;
uint64 cluster_id = 1;
uint64 member_id = 2;
// revision of the store when the request was applied.
int64 revision = 4;
int64 revision = 3;
// term of raft when the request was applied.
uint64 raft_term = 5;
uint64 raft_term = 4;
}
message RangeRequest {
enum SortOrder {
NONE = 0; // default, no sorting
ASCEND = 1; // lowest target value first
DESCEND = 2; // highest target value first
}
enum SortTarget {
KEY = 0;
VERSION = 1;
CREATE = 2;
MOD = 3;
VALUE = 4;
}
// if the range_end is not given, the request returns the key.
bytes key = 1;
// if the range_end is given, it gets the keys in range [key, range_end).
// if the range_end is given, it gets the keys in range [key, range_end)
// if range_end is nonempty, otherwise it returns all keys >= key.
bytes range_end = 2;
// limit the number of keys returned.
int64 limit = 3;
@@ -55,6 +160,19 @@ message RangeRequest {
// if the revision has been compacted, ErrCompaction will be returned in
// response.
int64 revision = 4;
// sort_order is the requested order for returned the results
SortOrder sort_order = 5;
// sort_target is the kv field to use for sorting
SortTarget sort_target = 6;
// range request is linearizable by default. Linearizable requests has a higher
// latency and lower throughput than serializable request.
// To reduce latency, serializable can be set. If serializable is set, range request
// will be serializable, but not linearizable with other requests.
// Serializable range can be served locally without waiting for other nodes in the cluster.
bool serializable = 7;
}
message RangeResponse {
@@ -67,6 +185,7 @@ message RangeResponse {
message PutRequest {
bytes key = 1;
bytes value = 2;
int64 lease = 3;
}
message PutResponse {
@@ -82,6 +201,8 @@ message DeleteRangeRequest {
message DeleteRangeResponse {
ResponseHeader header = 1;
// Deleted is the number of keys that got deleted.
int64 deleted = 2;
}
message RequestUnion {
@@ -171,3 +292,246 @@ message CompactionRequest {
message CompactionResponse {
ResponseHeader header = 1;
}
message HashRequest {
}
message HashResponse {
ResponseHeader header = 1;
uint32 hash = 2;
}
message WatchRequest {
oneof request_union {
WatchCreateRequest create_request = 1;
WatchCancelRequest cancel_request = 2;
}
}
message WatchCreateRequest {
// the key to be watched
bytes key = 1;
// if the range_end is given, keys in [key, range_end) are watched
// NOTE: only range_end == prefixEnd(key) is accepted now
bytes range_end = 2;
// start_revision is an optional revision (including) to watch from. No start_revision is "now".
int64 start_revision = 3;
// if progress_notify is set, etcd server sends WatchResponse with empty events to the
// created watcher when there are no recent events. It is useful when clients want always to be
// able to recover a disconnected watcher from a recent known revision.
// etcdsever can decide how long it should send a notification based on current load.
bool progress_notify = 4;
}
message WatchCancelRequest {
int64 watch_id = 1;
}
message WatchResponse {
ResponseHeader header = 1;
// watch_id is the ID of the watching the response sent to.
int64 watch_id = 2;
// If the response is for a create watch request, created is set to true.
// Client should record the watch_id and prepare for receiving events for
// that watching from the same stream.
// All events sent to the created watching will attach with the same watch_id.
bool created = 3;
// If the response is for a cancel watch request, cancel is set to true.
// No further events will be sent to the canceled watching.
bool canceled = 4;
// CompactRevision is set to the minimum index if a watching tries to watch
// at a compacted index.
//
// This happens when creating a watching at a compacted revision or the watching cannot
// catch up with the progress of the KV.
//
// Client should treat the watching as canceled and should not try to create any
// watching with same start_revision again.
int64 compact_revision = 5;
repeated storagepb.Event events = 11;
}
message LeaseCreateRequest {
// advisory ttl in seconds
int64 TTL = 1;
// requested ID to create; 0 lets lessor choose
int64 ID = 2;
}
message LeaseCreateResponse {
ResponseHeader header = 1;
int64 ID = 2;
// server decided ttl in second
int64 TTL = 3;
string error = 4;
}
message LeaseRevokeRequest {
int64 ID = 1;
}
message LeaseRevokeResponse {
ResponseHeader header = 1;
}
message LeaseKeepAliveRequest {
int64 ID = 1;
}
message LeaseKeepAliveResponse {
ResponseHeader header = 1;
int64 ID = 2;
int64 TTL = 3;
}
message Member {
uint64 ID = 1;
// If the member is not started, name will be an empty string.
string name = 2;
bool IsLeader = 3;
repeated string peerURLs = 4;
// If the member is not started, client_URLs will be an zero length
// string array.
repeated string clientURLs = 5;
}
message MemberAddRequest {
repeated string peerURLs = 1;
}
message MemberAddResponse {
ResponseHeader header = 1;
Member member = 2;
}
message MemberRemoveRequest {
uint64 ID = 1;
}
message MemberRemoveResponse {
ResponseHeader header = 1;
}
message MemberUpdateRequest {
uint64 ID = 1;
repeated string peerURLs = 2;
}
message MemberUpdateResponse{
ResponseHeader header = 1;
}
message MemberListRequest {
}
message MemberListResponse {
ResponseHeader header = 1;
repeated Member members = 2;
}
message DefragmentRequest {
}
message DefragmentResponse {
ResponseHeader header = 1;
}
message AuthEnableRequest {
}
message AuthDisableRequest {
}
message AuthenticateRequest {
}
message UserAddRequest {
}
message UserGetRequest {
}
message UserDeleteRequest {
}
message UserChangePasswordRequest {
}
message UserGrantRequest {
}
message UserRevokeRequest {
}
message RoleAddRequest {
}
message RoleGetRequest {
}
message RoleDeleteRequest {
}
message RoleGrantRequest {
}
message RoleRevokeRequest {
}
message AuthEnableResponse {
ResponseHeader header = 1;
}
message AuthDisableResponse {
ResponseHeader header = 1;
}
message AuthenticateResponse {
ResponseHeader header = 1;
}
message UserAddResponse {
ResponseHeader header = 1;
}
message UserGetResponse {
ResponseHeader header = 1;
}
message UserDeleteResponse {
ResponseHeader header = 1;
}
message UserChangePasswordResponse {
ResponseHeader header = 1;
}
message UserGrantResponse {
ResponseHeader header = 1;
}
message UserRevokeResponse {
ResponseHeader header = 1;
}
message RoleAddResponse {
ResponseHeader header = 1;
}
message RoleGetResponse {
ResponseHeader header = 1;
}
message RoleDeleteResponse {
ResponseHeader header = 1;
}
message RoleGrantResponse {
ResponseHeader header = 1;
}
message RoleRevokeResponse {
ResponseHeader header = 1;
}

View File

@@ -35,6 +35,7 @@ var (
// RaftAttributes represents the raft related attributes of an etcd member.
type RaftAttributes struct {
// PeerURLs is the list of peers in the raft cluster.
// TODO(philips): ensure these are URLs
PeerURLs []string `json:"peerURLs"`
}
@@ -52,7 +53,7 @@ type Member struct {
}
// NewMember creates a Member without an ID and generates one based on the
// name, peer URLs. This is used for bootstrapping/adding new member.
// cluster name, peer URLs, and time. This is used for bootstrapping/adding new member.
func NewMember(name string, peerURLs types.URLs, clusterName string, now *time.Time) *Member {
m := &Member{
RaftAttributes: RaftAttributes{PeerURLs: peerURLs.StringSlice()},
@@ -105,6 +106,10 @@ func (m *Member) Clone() *Member {
return mm
}
func (m *Member) IsStarted() bool {
return len(m.Name) != 0
}
func memberStoreKey(id types.ID) string {
return path.Join(storeMembersPrefix, id.String())
}
@@ -153,14 +158,14 @@ func nodeToMember(n *store.NodeExtern) (*Member, error) {
return m, nil
}
// implement sort by ID interface
// MembersByID implements sort by ID interface
type MembersByID []*Member
func (ms MembersByID) Len() int { return len(ms) }
func (ms MembersByID) Less(i, j int) bool { return ms[i].ID < ms[j].ID }
func (ms MembersByID) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] }
// implement sort by peer urls interface
// MembersByPeerURLs implements sort by peer urls interface
type MembersByPeerURLs []*Member
func (ms MembersByPeerURLs) Len() int { return len(ms) }

View File

@@ -23,11 +23,12 @@ import (
var (
// TODO: with label in v3?
proposeDurations = prometheus.NewSummary(prometheus.SummaryOpts{
proposeDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
Namespace: "etcd",
Subsystem: "server",
Name: "proposal_durations_milliseconds",
Name: "proposal_durations_seconds",
Help: "The latency distributions of committing proposal.",
Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
})
proposePending = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "etcd",

View File

@@ -24,6 +24,7 @@ import (
"time"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/pkg/contention"
"github.com/coreos/etcd/pkg/pbutil"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/raft"
@@ -31,7 +32,6 @@ import (
"github.com/coreos/etcd/rafthttp"
"github.com/coreos/etcd/wal"
"github.com/coreos/etcd/wal/walpb"
"github.com/coreos/pkg/capnslog"
)
@@ -76,13 +76,14 @@ type RaftTimer interface {
Term() uint64
}
// apply contains entries, snapshot be applied.
// After applied all the items, the application needs
// to send notification to done chan.
// apply contains entries, snapshot to be applied. Once
// an apply is consumed, the entries will be persisted to
// to raft storage concurrently; the application must read
// raftDone before assuming the raft messages are stable.
type apply struct {
entries []raftpb.Entry
snapshot raftpb.Snapshot
done chan struct{}
raftDone <-chan struct{} // rx {} after raft has persisted messages
}
type raftNode struct {
@@ -117,6 +118,8 @@ type raftNode struct {
// If transport is nil, server will panic.
transport rafthttp.Transporter
td *contention.TimeoutDetector
stopped chan struct{}
done chan struct{}
}
@@ -130,10 +133,20 @@ func (r *raftNode) start(s *EtcdServer) {
r.stopped = make(chan struct{})
r.done = make(chan struct{})
heartbeat := 200 * time.Millisecond
if s.cfg != nil {
heartbeat = time.Duration(s.cfg.TickMs) * time.Millisecond
}
// set up contention detectors for raft heartbeat message.
// expect to send a heartbeat within 2 heartbeat intervals.
r.td = contention.NewTimeoutDetector(2 * heartbeat)
go func() {
var syncC <-chan time.Time
defer r.onStop()
islead := false
for {
select {
case <-r.ticker:
@@ -147,29 +160,54 @@ func (r *raftNode) start(s *EtcdServer) {
}
atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
if rd.RaftState == raft.StateLeader {
islead = true
// TODO: raft should send server a notification through chan when
// it promotes or demotes instead of modifying server directly.
syncC = r.s.SyncTicker
if r.s.lessor != nil {
r.s.lessor.Promote(r.s.cfg.electionTimeout())
}
// TODO: remove the nil checking
// current test utility does not provide the stats
if r.s.stats != nil {
r.s.stats.BecomeLeader()
}
if r.s.compactor != nil {
r.s.compactor.Resume()
}
r.td.Reset()
} else {
islead = false
if r.s.lessor != nil {
r.s.lessor.Demote()
}
if r.s.compactor != nil {
r.s.compactor.Pause()
}
syncC = nil
}
}
apply := apply{
raftDone := make(chan struct{}, 1)
ap := apply{
entries: rd.CommittedEntries,
snapshot: rd.Snapshot,
done: make(chan struct{}),
raftDone: raftDone,
}
select {
case r.applyc <- apply:
case r.applyc <- ap:
case <-r.stopped:
return
}
// the leader can write to its disk in parallel with replicating to the followers and them
// writing to their disks.
// For more details, check raft thesis 10.2.1
if islead {
r.s.send(rd.Messages)
}
if !raft.IsEmptySnap(rd.Snapshot) {
if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
plog.Fatalf("raft save snapshot error: %v", err)
@@ -182,13 +220,10 @@ func (r *raftNode) start(s *EtcdServer) {
}
r.raftStorage.Append(rd.Entries)
if !islead {
r.s.send(rd.Messages)
select {
case <-apply.done:
case <-r.stopped:
return
}
raftDone <- struct{}{}
r.Advance()
case <-syncC:
r.s.sync(r.s.cfg.ReqTimeout())
@@ -253,7 +288,7 @@ func startNode(cfg *ServerConfig, cl *cluster, ids []types.ID) (id types.ID, n r
ClusterID: uint64(cl.ID()),
},
)
if err := os.MkdirAll(cfg.SnapDir(), privateDirMode); err != nil {
if err = os.MkdirAll(cfg.SnapDir(), privateDirMode); err != nil {
plog.Fatalf("create snapshot directory error: %v", err)
}
if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
@@ -278,6 +313,11 @@ func startNode(cfg *ServerConfig, cl *cluster, ids []types.ID) (id types.ID, n r
MaxSizePerMsg: maxSizePerMsg,
MaxInflightMsgs: maxInflightMsgs,
}
if cfg.V3demo {
c.CheckQuorum = true
}
n = raft.StartNode(c, peers)
raftStatusMu.Lock()
raftStatus = n.Status
@@ -310,6 +350,11 @@ func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *clust
MaxSizePerMsg: maxSizePerMsg,
MaxInflightMsgs: maxInflightMsgs,
}
if cfg.V3demo {
c.CheckQuorum = true
}
n := raft.RestartNode(c)
raftStatusMu.Lock()
raftStatus = n.Status
@@ -373,7 +418,7 @@ func restartAsStandaloneNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (type
// the entries. The given snapshot/entries can contain two kinds of
// ID-related entry:
// - ConfChangeAddNode, in which case the contained ID will be added into the set.
// - ConfChangeAddRemove, in which case the contained ID will be removed from the set.
// - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
ids := make(map[uint64]bool)
if snap != nil {

View File

@@ -16,6 +16,7 @@ package etcdserver
import (
"encoding/json"
"errors"
"expvar"
"fmt"
"math/rand"
@@ -23,18 +24,22 @@ import (
"os"
"path"
"regexp"
"sync"
"sync/atomic"
"time"
"github.com/coreos/etcd/auth"
"github.com/coreos/etcd/compactor"
"github.com/coreos/etcd/discovery"
"github.com/coreos/etcd/etcdserver/etcdhttp/httptypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/etcdserver/stats"
"github.com/coreos/etcd/lease"
"github.com/coreos/etcd/pkg/fileutil"
"github.com/coreos/etcd/pkg/idutil"
"github.com/coreos/etcd/pkg/pbutil"
"github.com/coreos/etcd/pkg/runtime"
"github.com/coreos/etcd/pkg/timeutil"
"github.com/coreos/etcd/pkg/schedule"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/pkg/wait"
"github.com/coreos/etcd/raft"
@@ -42,6 +47,7 @@ import (
"github.com/coreos/etcd/rafthttp"
"github.com/coreos/etcd/snap"
dstorage "github.com/coreos/etcd/storage"
"github.com/coreos/etcd/storage/backend"
"github.com/coreos/etcd/store"
"github.com/coreos/etcd/version"
"github.com/coreos/etcd/wal"
@@ -60,7 +66,17 @@ const (
StoreKeysPrefix = "/1"
purgeFileInterval = 30 * time.Second
monitorVersionInterval = 5 * time.Second
// monitorVersionInterval should be smaller than the timeout
// on the connection. Or we will not be able to reuse the connection
// (since it will timeout).
monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second
databaseFilename = "db"
// max number of in-flight snapshot messages etcdserver allows to have
// This number is more than enough for most clusters with 5 machines.
maxInFlightMsgSnap = 16
releaseDelayAfterSnapshot = 30 * time.Second
)
var (
@@ -116,12 +132,12 @@ type Server interface {
// ErrIDNotFound if member ID is not in the cluster.
RemoveMember(ctx context.Context, id uint64) error
// UpdateMember attempts to update a existing member in the cluster. It will
// UpdateMember attempts to update an existing member in the cluster. It will
// return ErrIDNotFound if the member ID does not exist.
UpdateMember(ctx context.Context, updateMemb Member) error
// ClusterVersion is the cluster-wide minimum major.minor version.
// Cluster version is set to the min version that a etcd member is
// Cluster version is set to the min version that an etcd member is
// compatible with when first bootstrap.
//
// ClusterVersion is nil until the cluster is bootstrapped (has a quorum).
@@ -156,35 +172,59 @@ type EtcdServer struct {
cluster *cluster
store store.Store
kv dstorage.KV
kv dstorage.ConsistentWatchableKV
lessor lease.Lessor
bemu sync.Mutex
be backend.Backend
authStore auth.AuthStore
stats *stats.ServerStats
lstats *stats.LeaderStats
SyncTicker <-chan time.Time
// compactor is used to auto-compact the KV.
compactor *compactor.Periodic
// consistent index used to hold the offset of current executing entry
// It is initialized to 0 before executing any entry.
consistIndex consistentIndex
// peerRt used to send requests (version, lease) to peers.
peerRt http.RoundTripper
reqIDGen *idutil.Generator
// forceVersionC is used to force the version monitor loop
// to detect the cluster version immediately.
forceVersionC chan struct{}
msgSnapC chan raftpb.Message
// count the number of inflight snapshots.
// MUST use atomic operation to access this field.
inflightSnapshots int64
}
// NewServer creates a new EtcdServer from the supplied configuration. The
// configuration is considered static for the lifetime of the EtcdServer.
func NewServer(cfg *ServerConfig) (*EtcdServer, error) {
st := store.New(StoreClusterPrefix, StoreKeysPrefix)
var w *wal.WAL
var n raft.Node
var s *raft.MemoryStorage
var id types.ID
var cl *cluster
var (
w *wal.WAL
n raft.Node
s *raft.MemoryStorage
id types.ID
cl *cluster
)
if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil {
return nil, fmt.Errorf("cannot access data directory: %v", terr)
}
if !cfg.V3demo && fileutil.Exist(path.Join(cfg.SnapDir(), databaseFilename)) {
return nil, errors.New("experimental-v3demo cannot be disabled once it is enabled")
}
// Run the migrations.
dataVer, err := version.DetectDataDir(cfg.DataDir)
if err != nil {
@@ -197,6 +237,10 @@ func NewServer(cfg *ServerConfig) (*EtcdServer, error) {
haveWAL := wal.Exist(cfg.WALDir())
ss := snap.New(cfg.SnapDir())
prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.peerDialTimeout())
if err != nil {
return nil, err
}
var remotes []*Member
switch {
case !haveWAL && !cfg.NewCluster:
@@ -207,14 +251,14 @@ func NewServer(cfg *ServerConfig) (*EtcdServer, error) {
if err != nil {
return nil, err
}
existingCluster, err := GetClusterFromRemotePeers(getRemotePeerURLs(cl, cfg.Name), cfg.Transport)
existingCluster, err := GetClusterFromRemotePeers(getRemotePeerURLs(cl, cfg.Name), prt)
if err != nil {
return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", err)
}
if err := ValidateClusterAndAssignIDs(cl, existingCluster); err != nil {
return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
}
if !isCompatibleWithCluster(cl, cl.MemberByName(cfg.Name).ID, cfg.Transport) {
if !isCompatibleWithCluster(cl, cl.MemberByName(cfg.Name).ID, prt) {
return nil, fmt.Errorf("incomptible with current running cluster")
}
@@ -232,11 +276,13 @@ func NewServer(cfg *ServerConfig) (*EtcdServer, error) {
return nil, err
}
m := cl.MemberByName(cfg.Name)
if isMemberBootstrapped(cl, cfg.Name, cfg.Transport) {
if isMemberBootstrapped(cl, cfg.Name, prt, cfg.bootstrapTimeout()) {
return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
}
if cfg.ShouldDiscover() {
str, err := discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
var str string
var err error
str, err = discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
if err != nil {
return nil, &DiscoveryError{Op: "join", Err: err}
}
@@ -266,7 +312,9 @@ func NewServer(cfg *ServerConfig) (*EtcdServer, error) {
if cfg.ShouldDiscover() {
plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir())
}
snapshot, err := ss.Load()
var snapshot *raftpb.Snapshot
var err error
snapshot, err = ss.Load()
if err != nil && err != snap.ErrNoSnapshot {
return nil, err
}
@@ -277,9 +325,6 @@ func NewServer(cfg *ServerConfig) (*EtcdServer, error) {
plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index)
}
cfg.Print()
if snapshot != nil {
plog.Infof("loaded cluster information from store: %s", cl)
}
if !cfg.ForceNewCluster {
id, cl, n, s, w = restartNode(cfg, snapshot)
} else {
@@ -319,19 +364,40 @@ func NewServer(cfg *ServerConfig) (*EtcdServer, error) {
stats: sstats,
lstats: lstats,
SyncTicker: time.Tick(500 * time.Millisecond),
reqIDGen: idutil.NewGenerator(uint8(id), time.Now()),
peerRt: prt,
reqIDGen: idutil.NewGenerator(uint16(id), time.Now()),
forceVersionC: make(chan struct{}),
msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
}
if cfg.V3demo {
srv.kv = dstorage.New(path.Join(cfg.DataDir, "member", "v3demo"))
} else {
// we do not care about the error of the removal
os.RemoveAll(path.Join(cfg.DataDir, "member", "v3demo"))
srv.be = backend.NewDefaultBackend(path.Join(cfg.SnapDir(), databaseFilename))
srv.lessor = lease.NewLessor(srv.be)
srv.kv = dstorage.New(srv.be, srv.lessor, &srv.consistIndex)
srv.authStore = auth.NewAuthStore(srv)
if h := cfg.AutoCompactionRetention; h != 0 {
srv.compactor = compactor.NewPeriodic(h, srv.kv, srv)
srv.compactor.Run()
}
}
// TODO: move transport initialization near the definition of remote
tr := rafthttp.NewTransporter(cfg.Transport, id, cl.ID(), srv, srv.errorc, sstats, lstats)
tr := &rafthttp.Transport{
TLSInfo: cfg.PeerTLSInfo,
DialTimeout: cfg.peerDialTimeout(),
ID: id,
URLs: cfg.PeerURLs,
ClusterID: cl.ID(),
Raft: srv,
Snapshotter: ss,
ServerStats: sstats,
LeaderStats: lstats,
ErrorC: srv.errorc,
V3demo: cfg.V3demo,
}
if err := tr.Start(); err != nil {
return nil, err
}
// add all remotes into transport
for _, m := range remotes {
if m.ID != id {
@@ -344,6 +410,7 @@ func NewServer(cfg *ServerConfig) (*EtcdServer, error) {
}
}
srv.r.transport = tr
return srv, nil
}
@@ -403,6 +470,8 @@ func (s *EtcdServer) Cluster() Cluster { return s.cluster }
func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler() }
func (s *EtcdServer) Lessor() lease.Lessor { return s.lessor }
func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
if s.cluster.IsIDRemoved(types.ID(m.From)) {
plog.Warningf("reject message from removed member %s", types.ID(m.From).String())
@@ -418,35 +487,141 @@ func (s *EtcdServer) IsIDRemoved(id uint64) bool { return s.cluster.IsIDRemoved(
func (s *EtcdServer) ReportUnreachable(id uint64) { s.r.ReportUnreachable(id) }
// ReportSnapshot reports snapshot sent status to the raft state machine,
// and clears the used snapshot from the snapshot store.
func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) {
s.r.ReportSnapshot(id, status)
}
type etcdProgress struct {
confState raftpb.ConfState
snapi uint64
appliedi uint64
}
func (s *EtcdServer) run() {
snap, err := s.r.raftStorage.Snapshot()
if err != nil {
plog.Panicf("get snapshot from raft storage error: %v", err)
}
confState := snap.Metadata.ConfState
snapi := snap.Metadata.Index
appliedi := snapi
s.r.start(s)
// asynchronously accept apply packets, dispatch progress in-order
sched := schedule.NewFIFOScheduler()
ep := etcdProgress{
confState: snap.Metadata.ConfState,
snapi: snap.Metadata.Index,
appliedi: snap.Metadata.Index,
}
defer func() {
s.r.stop()
sched.Stop()
// kv, lessor and backend can be nil if running without v3 enabled
// or running unit tests.
if s.lessor != nil {
s.lessor.Stop()
}
if s.kv != nil {
s.kv.Close()
}
if s.be != nil {
s.be.Close()
}
if s.compactor != nil {
s.compactor.Stop()
}
close(s.done)
}()
var shouldstop bool
for {
select {
case apply := <-s.r.apply():
// apply snapshot
if !raft.IsEmptySnap(apply.snapshot) {
if apply.snapshot.Metadata.Index <= appliedi {
plog.Panicf("snapshot index [%d] should > appliedi[%d] + 1",
apply.snapshot.Metadata.Index, appliedi)
var expiredLeaseC <-chan []*lease.Lease
if s.lessor != nil {
expiredLeaseC = s.lessor.ExpiredLeasesC()
}
for {
select {
case ap := <-s.r.apply():
f := func(context.Context) { s.applyAll(&ep, &ap) }
sched.Schedule(f)
case leases := <-expiredLeaseC:
go func() {
for _, l := range leases {
s.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: int64(l.ID)})
}
}()
case err := <-s.errorc:
plog.Errorf("%s", err)
plog.Infof("the data-dir used by this member must be removed.")
return
case <-s.stop:
return
}
}
}
func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
s.applySnapshot(ep, apply)
s.applyEntries(ep, apply)
// wait for the raft routine to finish the disk writes before triggering a
// snapshot. or applied index might be greater than the last index in raft
// storage, since the raft routine might be slower than apply routine.
<-apply.raftDone
s.triggerSnapshot(ep)
select {
// snapshot requested via send()
case m := <-s.msgSnapC:
merged := s.createMergedSnapshotMessage(m, ep.appliedi, ep.confState)
s.sendMergedSnap(merged)
default:
}
}
func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
if raft.IsEmptySnap(apply.snapshot) {
return
}
if apply.snapshot.Metadata.Index <= ep.appliedi {
plog.Panicf("snapshot index [%d] should > appliedi[%d] + 1",
apply.snapshot.Metadata.Index, ep.appliedi)
}
if s.cfg.V3demo {
snapfn, err := s.r.storage.DBFilePath(apply.snapshot.Metadata.Index)
if err != nil {
plog.Panicf("get database snapshot file path error: %v", err)
}
fn := path.Join(s.cfg.SnapDir(), databaseFilename)
if err := os.Rename(snapfn, fn); err != nil {
plog.Panicf("rename snapshot file error: %v", err)
}
newbe := backend.NewDefaultBackend(fn)
if err := s.kv.Restore(newbe); err != nil {
plog.Panicf("restore KV error: %v", err)
}
// Closing old backend might block until all the txns
// on the backend are finished.
// We do not want to wait on closing the old backend.
s.bemu.Lock()
oldbe := s.be
go func() {
if err := oldbe.Close(); err != nil {
plog.Panicf("close backend error: %v", err)
}
}()
s.be = newbe
s.bemu.Unlock()
if s.lessor != nil {
s.lessor.Recover(newbe, s.kv)
}
}
if err := s.store.Recovery(apply.snapshot.Data); err != nil {
plog.Panicf("recovery store error: %v", err)
}
@@ -461,46 +636,50 @@ func (s *EtcdServer) run() {
s.r.transport.AddPeer(m.ID, m.PeerURLs)
}
appliedi = apply.snapshot.Metadata.Index
snapi = appliedi
confState = apply.snapshot.Metadata.ConfState
plog.Infof("recovered from incoming snapshot at index %d", snapi)
ep.appliedi = apply.snapshot.Metadata.Index
ep.snapi = ep.appliedi
ep.confState = apply.snapshot.Metadata.ConfState
plog.Infof("recovered from incoming snapshot at index %d", ep.snapi)
}
// apply entries
if len(apply.entries) != 0 {
func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) {
if len(apply.entries) == 0 {
return
}
firsti := apply.entries[0].Index
if firsti > appliedi+1 {
plog.Panicf("first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, appliedi)
if firsti > ep.appliedi+1 {
plog.Panicf("first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, ep.appliedi)
}
var ents []raftpb.Entry
if appliedi+1-firsti < uint64(len(apply.entries)) {
ents = apply.entries[appliedi+1-firsti:]
if ep.appliedi+1-firsti < uint64(len(apply.entries)) {
ents = apply.entries[ep.appliedi+1-firsti:]
}
if appliedi, shouldstop = s.apply(ents, &confState); shouldstop {
if len(ents) == 0 {
return
}
var shouldstop bool
if ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop {
go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster"))
}
}
// wait for the raft routine to finish the disk writes before triggering a
// snapshot. or applied index might be greater than the last index in raft
// storage, since the raft routine might be slower than apply routine.
apply.done <- struct{}{}
func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) {
if ep.appliedi-ep.snapi <= s.snapCount {
return
}
// trigger snapshot
if appliedi-snapi > s.snapCount {
plog.Infof("start to snapshot (applied: %d, lastsnap: %d)", appliedi, snapi)
s.snapshot(appliedi, confState)
snapi = appliedi
}
case err := <-s.errorc:
plog.Errorf("%s", err)
plog.Infof("the data-dir used by this member must be removed.")
return
case <-s.stop:
// When sending a snapshot, etcd will pause compaction.
// After receives a snapshot, the slow follower needs to get all the entries right after
// the snapshot sent to catch up. If we do not pause compaction, the log entries right after
// the snapshot sent might already be compacted. It happens when the snapshot takes long time
// to send and save. Pausing compaction avoids triggering a snapshot sending cycle.
if atomic.LoadInt64(&s.inflightSnapshots) != 0 {
return
}
}
plog.Infof("start to snapshot (applied: %d, lastsnap: %d)", ep.appliedi, ep.snapi)
s.snapshot(ep.appliedi, ep.confState)
ep.snapi = ep.appliedi
}
// Stop stops the server gracefully, and shuts down the running goroutine.
@@ -515,7 +694,10 @@ func (s *EtcdServer) Stop() {
}
func (s *EtcdServer) stopWithDelay(d time.Duration, err error) {
time.Sleep(d)
select {
case <-time.After(d):
case <-s.done:
}
select {
case s.errorc <- err:
default:
@@ -554,7 +736,7 @@ func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) {
select {
case x := <-ch:
proposeDurations.Observe(float64(time.Since(start).Nanoseconds() / int64(time.Millisecond)))
proposeDurations.Observe(float64(time.Since(start)) / float64(time.Second))
resp := x.(Response)
return resp, resp.err
case <-ctx.Done():
@@ -603,6 +785,12 @@ func (s *EtcdServer) LeaderStats() []byte {
func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() }
func (s *EtcdServer) AddMember(ctx context.Context, memb Member) error {
if s.cfg.StrictReconfigCheck && !s.cluster.isReadyToAddNewMember() {
// If s.cfg.StrictReconfigCheck is false, it means the option -strict-reconfig-check isn't passed to etcd.
// In such a case adding a new member is allowed unconditionally
return ErrNotEnoughStartedMembers
}
// TODO: move Member to protobuf type
b, err := json.Marshal(memb)
if err != nil {
@@ -617,6 +805,12 @@ func (s *EtcdServer) AddMember(ctx context.Context, memb Member) error {
}
func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) error {
if s.cfg.StrictReconfigCheck && !s.cluster.isReadyToRemoveMember(id) {
// If s.cfg.StrictReconfigCheck is false, it means the option -strict-reconfig-check isn't passed to etcd.
// In such a case removing a member is allowed unconditionally
return ErrNotEnoughStartedMembers
}
cc := raftpb.ConfChange{
Type: raftpb.ConfChangeRemoveNode,
NodeID: id,
@@ -638,17 +832,20 @@ func (s *EtcdServer) UpdateMember(ctx context.Context, memb Member) error {
}
// Implement the RaftTimer interface
func (s *EtcdServer) Index() uint64 { return atomic.LoadUint64(&s.r.index) }
func (s *EtcdServer) Term() uint64 { return atomic.LoadUint64(&s.r.term) }
// Only for testing purpose
// Lead is only for testing purposes.
// TODO: add Raft server interface to expose raft related info:
// Index, Term, Lead, Committed, Applied, LastIndex, etc.
func (s *EtcdServer) Lead() uint64 { return atomic.LoadUint64(&s.r.lead) }
func (s *EtcdServer) Leader() types.ID { return types.ID(s.Lead()) }
func (s *EtcdServer) IsPprofEnabled() bool { return s.cfg.EnablePprof }
// configure sends a configuration change through consensus and
// then waits for it to be applied to the server. It
// will block until the change is performed or there is an error.
@@ -679,7 +876,7 @@ func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error
// sync proposes a SYNC request and is non-blocking.
// This makes no guarantee that the request will be proposed or performed.
// The request will be cancelled after the given timeout.
// The request will be canceled after the given timeout.
func (s *EtcdServer) sync(timeout time.Duration) {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
req := pb.Request{
@@ -730,24 +927,75 @@ func (s *EtcdServer) publish(timeout time.Duration) {
}
}
// TODO: move this function into raft.go
func (s *EtcdServer) send(ms []raftpb.Message) {
for i := range ms {
if s.cluster.IsIDRemoved(types.ID(ms[i].To)) {
ms[i].To = 0
}
if s.cfg.V3demo {
if ms[i].Type == raftpb.MsgSnap {
// There are two separate data store when v3 demo is enabled: the store for v2,
// and the KV for v3.
// The msgSnap only contains the most recent snapshot of store without KV.
// So we need to redirect the msgSnap to etcd server main loop for merging in the
// current store snapshot and KV snapshot.
select {
case s.msgSnapC <- ms[i]:
default:
// drop msgSnap if the inflight chan if full.
}
ms[i].To = 0
}
}
if ms[i].Type == raftpb.MsgHeartbeat {
ok, exceed := s.r.td.Observe(ms[i].To)
if !ok {
// TODO: limit request rate.
plog.Warningf("failed to send out heartbeat on time (deadline exceeded for %v)", exceed)
plog.Warningf("server is likely overloaded")
}
}
}
s.r.transport.Send(ms)
}
func (s *EtcdServer) sendMergedSnap(merged snap.Message) {
atomic.AddInt64(&s.inflightSnapshots, 1)
s.r.transport.SendSnapshot(merged)
go func() {
select {
case ok := <-merged.CloseNotify():
// delay releasing inflight snapshot for another 30 seconds to
// block log compaction.
// If the follower still fails to catch up, it is probably just too slow
// to catch up. We cannot avoid the snapshot cycle anyway.
if ok {
select {
case <-time.After(releaseDelayAfterSnapshot):
case <-s.done:
}
}
atomic.AddInt64(&s.inflightSnapshots, -1)
case <-s.done:
return
}
}()
}
// apply takes entries received from Raft (after it has been committed) and
// applies them to the current state of the EtcdServer.
// The given entries should not be empty.
func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (uint64, bool) {
var applied uint64
var shouldstop bool
var err error
for i := range es {
e := es[i]
// set the consistent index of current executing entry
s.consistIndex.setConsistentIndex(e.Index)
switch e.Type {
case raftpb.EntryNormal:
// raft state machine may generate noop entry when leader confirmation.
@@ -770,12 +1018,15 @@ func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (uint
case raftReq.V2 != nil:
req := raftReq.V2
s.w.Trigger(req.ID, s.applyRequest(*req))
default:
s.w.Trigger(raftReq.ID, s.applyV3Request(&raftReq))
}
}
case raftpb.EntryConfChange:
var cc raftpb.ConfChange
pbutil.MustUnmarshal(&cc, e.Data)
shouldstop, err = s.applyConfChange(cc, confState)
removedSelf, err := s.applyConfChange(cc, confState)
shouldstop = shouldstop || removedSelf
s.w.Trigger(cc.ID, err)
default:
plog.Panicf("entry type should be either EntryNormal or EntryConfChange")
@@ -793,24 +1044,30 @@ func (s *EtcdServer) applyRequest(r pb.Request) Response {
f := func(ev *store.Event, err error) Response {
return Response{Event: ev, err: err}
}
expr := timeutil.UnixNanoToTime(r.Expiration)
refresh, _ := pbutil.GetBool(r.Refresh)
ttlOptions := store.TTLOptionSet{Refresh: refresh}
if r.Expiration != 0 {
ttlOptions.ExpireTime = time.Unix(0, r.Expiration)
}
switch r.Method {
case "POST":
return f(s.store.Create(r.Path, r.Dir, r.Val, true, expr))
return f(s.store.Create(r.Path, r.Dir, r.Val, true, ttlOptions))
case "PUT":
exists, existsSet := pbutil.GetBool(r.PrevExist)
switch {
case existsSet:
if exists {
if r.PrevIndex == 0 && r.PrevValue == "" {
return f(s.store.Update(r.Path, r.Val, expr))
return f(s.store.Update(r.Path, r.Val, ttlOptions))
} else {
return f(s.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, expr))
return f(s.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions))
}
}
return f(s.store.Create(r.Path, r.Dir, r.Val, false, expr))
return f(s.store.Create(r.Path, r.Dir, r.Val, false, ttlOptions))
case r.PrevIndex > 0 || r.PrevValue != "":
return f(s.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, expr))
return f(s.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions))
default:
// TODO (yicheng): cluster should be the owner of cluster prefix store
// we should not modify cluster store here.
@@ -820,12 +1077,15 @@ func (s *EtcdServer) applyRequest(r pb.Request) Response {
if err := json.Unmarshal([]byte(r.Val), &attr); err != nil {
plog.Panicf("unmarshal %s should never fail: %v", r.Val, err)
}
s.cluster.UpdateAttributes(id, attr)
ok := s.cluster.UpdateAttributes(id, attr)
if !ok {
return Response{}
}
}
if r.Path == path.Join(StoreClusterPrefix, "version") {
s.cluster.SetVersion(semver.Must(semver.NewVersion(r.Val)))
}
return f(s.store.Set(r.Path, r.Dir, r.Val, expr))
return f(s.store.Set(r.Path, r.Dir, r.Val, ttlOptions))
}
case "DELETE":
switch {
@@ -918,7 +1178,14 @@ func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) {
}
plog.Panicf("unexpected create snapshot error %v", err)
}
if err := s.r.storage.SaveSnap(snap); err != nil {
if s.cfg.V3demo {
// commit v3 storage because WAL file before snapshot index
// could be removed after SaveSnap.
s.getKV().Commit()
}
// SaveSnap saves the snapshot and releases the locked wal files
// to the snapshot index.
if err = s.r.storage.SaveSnap(snap); err != nil {
plog.Fatalf("save snapshot error: %v", err)
}
plog.Infof("saved snapshot at index %d", snap.Metadata.Index)
@@ -952,7 +1219,7 @@ func (s *EtcdServer) ClusterVersion() *semver.Version {
return s.cluster.Version()
}
// monitorVersions checks the member's version every monitorVersion interval.
// monitorVersions checks the member's version every monitorVersionInterval.
// It updates the cluster version if all members agrees on a higher one.
// It prints out log if there is a member with a higher version than the
// local version.
@@ -969,9 +1236,9 @@ func (s *EtcdServer) monitorVersions() {
continue
}
v := decideClusterVersion(getVersions(s.cluster, s.id, s.cfg.Transport))
v := decideClusterVersion(getVersions(s.cluster, s.id, s.peerRt))
if v != nil {
// only keep major.minor version for comparasion
// only keep major.minor version for comparison
v = &semver.Version{
Major: v.Major,
Minor: v.Minor,
@@ -1054,21 +1321,11 @@ func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error {
}
}
// isConnectedToQuorumSince checks whether the local member is connected to the
// quorum of the cluster since the given time.
func isConnectedToQuorumSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*Member) bool {
var connectedNum int
for _, m := range members {
if m.ID == self || isConnectedSince(transport, since, m.ID) {
connectedNum++
}
}
return connectedNum >= (len(members)+1)/2
func (s *EtcdServer) getKV() dstorage.ConsistentWatchableKV { return s.kv }
func (s *EtcdServer) Backend() backend.Backend {
s.bemu.Lock()
defer s.bemu.Unlock()
return s.be
}
// isConnectedSince checks whether the local member is connected to the
// remote member since the given time.
func isConnectedSince(transport rafthttp.Transporter, since time.Time, remote types.ID) bool {
t := transport.ActiveSince(remote)
return !t.IsZero() && t.Before(since)
}
func (s *EtcdServer) AuthStore() auth.AuthStore { return s.authStore }

View File

@@ -0,0 +1,71 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdserver
import (
"io"
"log"
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/snap"
"github.com/coreos/etcd/storage/backend"
)
// createMergedSnapshotMessage creates a snapshot message that contains: raft status (term, conf),
// a snapshot of v2 store inside raft.Snapshot as []byte, a snapshot of v3 KV in the top level message
// as ReadCloser.
func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapi uint64, confState raftpb.ConfState) snap.Message {
snapt, err := s.r.raftStorage.Term(snapi)
if err != nil {
log.Panicf("get term should never fail: %v", err)
}
// get a snapshot of v2 store as []byte
clone := s.store.Clone()
d, err := clone.SaveNoCopy()
if err != nil {
plog.Panicf("store save should never fail: %v", err)
}
// get a snapshot of v3 KV as readCloser
rc := newSnapshotReaderCloser(s.be.Snapshot())
// put the []byte snapshot of store into raft snapshot and return the merged snapshot with
// KV readCloser snapshot.
snapshot := raftpb.Snapshot{
Metadata: raftpb.SnapshotMetadata{
Index: snapi,
Term: snapt,
ConfState: confState,
},
Data: d,
}
m.Snapshot = snapshot
return *snap.NewMessage(m, rc)
}
func newSnapshotReaderCloser(snapshot backend.Snapshot) io.ReadCloser {
pr, pw := io.Pipe()
go func() {
n, err := snapshot.WriteTo(pw)
if err == nil {
plog.Infof("wrote database snapshot out [total bytes: %d]", n)
}
pw.CloseWithError(err)
snapshot.Close()
}()
return pr
}

View File

@@ -24,6 +24,7 @@ import (
// LeaderStats is used by the leader in an etcd cluster, and encapsulates
// statistics about communication with its followers
type LeaderStats struct {
// Leader is the ID of the leader in the etcd cluster.
// TODO(jonboulle): clarify that these are IDs, not names
Leader string `json:"leader"`
Followers map[string]*FollowerStats `json:"followers"`

View File

@@ -27,6 +27,7 @@ import (
// communication with other members of the cluster
type ServerStats struct {
Name string `json:"name"`
// ID is the raft ID of the node.
// TODO(jonboulle): use ID instead of name?
ID string `json:"id"`
State raft.StateType `json:"state"`

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package stats defines a standard interface for etcd cluster statistics.
package stats
import "github.com/coreos/pkg/capnslog"

View File

@@ -35,6 +35,9 @@ type Storage interface {
Save(st raftpb.HardState, ents []raftpb.Entry) error
// SaveSnap function saves snapshot to the underlying stable storage.
SaveSnap(snap raftpb.Snapshot) error
// DBFilePath returns the file path of database snapshot saved with given
// id.
DBFilePath(id uint64) (string, error)
// Close closes the Storage and performs finalization.
Close() error
}
@@ -104,7 +107,7 @@ func readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID,
return
}
// upgradeWAL converts an older version of the etcdServer data to the newest version.
// upgradeDataDir converts an older version of the etcdServer data to the newest version.
// It must ensure that, after upgrading, the most recent version is present.
func upgradeDataDir(baseDataDir string, name string, ver version.DataDirVersion) error {
switch ver {

View File

@@ -0,0 +1,41 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdserver
import (
"time"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/rafthttp"
)
// isConnectedToQuorumSince checks whether the local member is connected to the
// quorum of the cluster since the given time.
func isConnectedToQuorumSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*Member) bool {
var connectedNum int
for _, m := range members {
if m.ID == self || isConnectedSince(transport, since, m.ID) {
connectedNum++
}
}
return connectedNum >= (len(members)+1)/2
}
// isConnectedSince checks whether the local member is connected to the
// remote member since the given time.
func isConnectedSince(transport rafthttp.Transporter, since time.Time, remote types.ID) bool {
t := transport.ActiveSince(remote)
return !t.IsZero() && t.Before(since)
}

View File

@@ -16,70 +16,455 @@ package etcdserver
import (
"bytes"
"fmt"
"sort"
"time"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/lease"
"github.com/coreos/etcd/lease/leasehttp"
dstorage "github.com/coreos/etcd/storage"
"github.com/coreos/etcd/storage/storagepb"
"github.com/gogo/protobuf/proto"
"golang.org/x/net/context"
)
type V3DemoServer interface {
V3DemoDo(ctx context.Context, r pb.InternalRaftRequest) proto.Message
const (
// the max request size that raft accepts.
// TODO: make this a flag? But we probably do not want to
// accept large request which might block raft stream. User
// specify a large value might end up with shooting in the foot.
maxRequestBytes = 1.5 * 1024 * 1024
)
type RaftKV interface {
Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error)
Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error)
DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error)
Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error)
Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error)
}
func (s *EtcdServer) V3DemoDo(ctx context.Context, r pb.InternalRaftRequest) proto.Message {
type Lessor interface {
// LeaseCreate sends LeaseCreate request to raft and apply it after committed.
LeaseCreate(ctx context.Context, r *pb.LeaseCreateRequest) (*pb.LeaseCreateResponse, error)
// LeaseRevoke sends LeaseRevoke request to raft and apply it after committed.
LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error)
// LeaseRenew renews the lease with given ID. The renewed TTL is returned. Or an error
// is returned.
LeaseRenew(id lease.LeaseID) (int64, error)
}
type Authenticator interface {
AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error)
}
func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
if r.Serializable {
return applyRange(noTxn, s.kv, r)
}
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Range: r})
if err != nil {
return nil, err
}
return result.resp.(*pb.RangeResponse), result.err
}
func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Put: r})
if err != nil {
return nil, err
}
return result.resp.(*pb.PutResponse), result.err
}
func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r})
if err != nil {
return nil, err
}
return result.resp.(*pb.DeleteRangeResponse), result.err
}
func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Txn: r})
if err != nil {
return nil, err
}
return result.resp.(*pb.TxnResponse), result.err
}
func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Compaction: r})
if err != nil {
return nil, err
}
resp := result.resp.(*pb.CompactionResponse)
if resp == nil {
resp = &pb.CompactionResponse{}
}
if resp.Header == nil {
resp.Header = &pb.ResponseHeader{}
}
resp.Header.Revision = s.kv.Rev()
return resp, result.err
}
func (s *EtcdServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
h, err := s.be.Hash()
if err != nil {
return nil, err
}
return &pb.HashResponse{Header: &pb.ResponseHeader{Revision: s.kv.Rev()}, Hash: h}, nil
}
func (s *EtcdServer) LeaseCreate(ctx context.Context, r *pb.LeaseCreateRequest) (*pb.LeaseCreateResponse, error) {
// no id given? choose one
for r.ID == int64(lease.NoLease) {
// only use positive int64 id's
r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1))
}
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{LeaseCreate: r})
if err != nil {
return nil, err
}
return result.resp.(*pb.LeaseCreateResponse), result.err
}
func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{LeaseRevoke: r})
if err != nil {
return nil, err
}
return result.resp.(*pb.LeaseRevokeResponse), result.err
}
func (s *EtcdServer) LeaseRenew(id lease.LeaseID) (int64, error) {
ttl, err := s.lessor.Renew(id)
if err == nil {
return ttl, nil
}
if err != lease.ErrNotPrimary {
return -1, err
}
// renewals don't go through raft; forward to leader manually
leader := s.cluster.Member(s.Leader())
for i := 0; i < 5 && leader == nil; i++ {
// wait an election
dur := time.Duration(s.cfg.ElectionTicks) * time.Duration(s.cfg.TickMs) * time.Millisecond
select {
case <-time.After(dur):
leader = s.cluster.Member(s.Leader())
case <-s.done:
return -1, ErrStopped
}
}
if leader == nil || len(leader.PeerURLs) == 0 {
return -1, ErrNoLeader
}
for _, url := range leader.PeerURLs {
lurl := url + "/leases"
ttl, err = leasehttp.RenewHTTP(id, lurl, s.peerRt, s.cfg.peerDialTimeout())
if err == nil {
break
}
}
return ttl, err
}
func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthEnable: r})
if err != nil {
return nil, err
}
return result.resp.(*pb.AuthEnableResponse), result.err
}
type applyResult struct {
resp proto.Message
err error
}
func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
r.ID = s.reqIDGen.Next()
data, err := r.Marshal()
if err != nil {
return nil, err
}
if len(data) > maxRequestBytes {
return nil, ErrRequestTooLarge
}
ch := s.w.Register(r.ID)
s.r.Propose(ctx, data)
select {
case x := <-ch:
return x.(*applyResult), nil
case <-ctx.Done():
s.w.Trigger(r.ID, nil) // GC wait
return nil, ctx.Err()
case <-s.done:
return nil, ErrStopped
}
}
// Watchable returns a watchable interface attached to the etcdserver.
func (s *EtcdServer) Watchable() dstorage.Watchable {
return s.getKV()
}
const (
// noTxn is an invalid txn ID.
// To apply with independent Range, Put, Delete, you can pass noTxn
// to apply functions instead of a valid txn ID.
noTxn = -1
)
func (s *EtcdServer) applyV3Request(r *pb.InternalRaftRequest) interface{} {
kv := s.getKV()
le := s.lessor
ar := &applyResult{}
switch {
case r.Range != nil:
return doRange(s.kv, r.Range)
ar.resp, ar.err = applyRange(noTxn, kv, r.Range)
case r.Put != nil:
return doPut(s.kv, r.Put)
ar.resp, ar.err = applyPut(noTxn, kv, le, r.Put)
case r.DeleteRange != nil:
return doDeleteRange(s.kv, r.DeleteRange)
ar.resp, ar.err = applyDeleteRange(noTxn, kv, r.DeleteRange)
case r.Txn != nil:
return doTxn(s.kv, r.Txn)
ar.resp, ar.err = applyTxn(kv, le, r.Txn)
case r.Compaction != nil:
ar.resp, ar.err = applyCompaction(kv, r.Compaction)
case r.LeaseCreate != nil:
ar.resp, ar.err = applyLeaseCreate(le, r.LeaseCreate)
case r.LeaseRevoke != nil:
ar.resp, ar.err = applyLeaseRevoke(le, r.LeaseRevoke)
case r.AuthEnable != nil:
ar.resp, ar.err = applyAuthEnable(s)
default:
panic("not implemented")
}
return ar
}
func doPut(kv dstorage.KV, p *pb.PutRequest) *pb.PutResponse {
func applyPut(txnID int64, kv dstorage.KV, le lease.Lessor, p *pb.PutRequest) (*pb.PutResponse, error) {
resp := &pb.PutResponse{}
resp.Header = &pb.ResponseHeader{}
rev := kv.Put(p.Key, p.Value)
var (
rev int64
err error
)
if txnID != noTxn {
rev, err = kv.TxnPut(txnID, p.Key, p.Value, lease.LeaseID(p.Lease))
if err != nil {
return nil, err
}
} else {
leaseID := lease.LeaseID(p.Lease)
if leaseID != lease.NoLease {
if l := le.Lookup(leaseID); l == nil {
return nil, lease.ErrLeaseNotFound
}
}
rev = kv.Put(p.Key, p.Value, leaseID)
}
resp.Header.Revision = rev
return resp
return resp, nil
}
func doRange(kv dstorage.KV, r *pb.RangeRequest) *pb.RangeResponse {
type kvSort struct{ kvs []storagepb.KeyValue }
func (s *kvSort) Swap(i, j int) {
t := s.kvs[i]
s.kvs[i] = s.kvs[j]
s.kvs[j] = t
}
func (s *kvSort) Len() int { return len(s.kvs) }
type kvSortByKey struct{ *kvSort }
func (s *kvSortByKey) Less(i, j int) bool {
return bytes.Compare(s.kvs[i].Key, s.kvs[j].Key) < 0
}
type kvSortByVersion struct{ *kvSort }
func (s *kvSortByVersion) Less(i, j int) bool {
return (s.kvs[i].Version - s.kvs[j].Version) < 0
}
type kvSortByCreate struct{ *kvSort }
func (s *kvSortByCreate) Less(i, j int) bool {
return (s.kvs[i].CreateRevision - s.kvs[j].CreateRevision) < 0
}
type kvSortByMod struct{ *kvSort }
func (s *kvSortByMod) Less(i, j int) bool {
return (s.kvs[i].ModRevision - s.kvs[j].ModRevision) < 0
}
type kvSortByValue struct{ *kvSort }
func (s *kvSortByValue) Less(i, j int) bool {
return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0
}
func applyRange(txnID int64, kv dstorage.KV, r *pb.RangeRequest) (*pb.RangeResponse, error) {
resp := &pb.RangeResponse{}
resp.Header = &pb.ResponseHeader{}
kvs, rev, err := kv.Range(r.Key, r.RangeEnd, r.Limit, 0)
var (
kvs []storagepb.KeyValue
rev int64
err error
)
if isGteRange(r.RangeEnd) {
r.RangeEnd = []byte{}
}
limit := r.Limit
if r.SortOrder != pb.RangeRequest_NONE {
// fetch everything; sort and truncate afterwards
limit = 0
}
if limit > 0 {
// fetch one extra for 'more' flag
limit = limit + 1
}
if txnID != noTxn {
kvs, rev, err = kv.TxnRange(txnID, r.Key, r.RangeEnd, limit, r.Revision)
if err != nil {
panic("not handled error")
return nil, err
}
} else {
kvs, rev, err = kv.Range(r.Key, r.RangeEnd, limit, r.Revision)
if err != nil {
return nil, err
}
}
if r.SortOrder != pb.RangeRequest_NONE {
var sorter sort.Interface
switch {
case r.SortTarget == pb.RangeRequest_KEY:
sorter = &kvSortByKey{&kvSort{kvs}}
case r.SortTarget == pb.RangeRequest_VERSION:
sorter = &kvSortByVersion{&kvSort{kvs}}
case r.SortTarget == pb.RangeRequest_CREATE:
sorter = &kvSortByCreate{&kvSort{kvs}}
case r.SortTarget == pb.RangeRequest_MOD:
sorter = &kvSortByMod{&kvSort{kvs}}
case r.SortTarget == pb.RangeRequest_VALUE:
sorter = &kvSortByValue{&kvSort{kvs}}
}
switch {
case r.SortOrder == pb.RangeRequest_ASCEND:
sort.Sort(sorter)
case r.SortOrder == pb.RangeRequest_DESCEND:
sort.Sort(sort.Reverse(sorter))
}
}
if r.Limit > 0 && len(kvs) > int(r.Limit) {
kvs = kvs[:r.Limit]
resp.More = true
}
resp.Header.Revision = rev
for i := range kvs {
resp.Kvs = append(resp.Kvs, &kvs[i])
}
return resp
return resp, nil
}
func doDeleteRange(kv dstorage.KV, dr *pb.DeleteRangeRequest) *pb.DeleteRangeResponse {
func applyDeleteRange(txnID int64, kv dstorage.KV, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
resp := &pb.DeleteRangeResponse{}
resp.Header = &pb.ResponseHeader{}
_, rev := kv.DeleteRange(dr.Key, dr.RangeEnd)
resp.Header.Revision = rev
return resp
var (
n int64
rev int64
err error
)
if isGteRange(dr.RangeEnd) {
dr.RangeEnd = []byte{}
}
func doTxn(kv dstorage.KV, rt *pb.TxnRequest) *pb.TxnResponse {
if txnID != noTxn {
n, rev, err = kv.TxnDeleteRange(txnID, dr.Key, dr.RangeEnd)
if err != nil {
return nil, err
}
} else {
n, rev = kv.DeleteRange(dr.Key, dr.RangeEnd)
}
resp.Deleted = n
resp.Header.Revision = rev
return resp, nil
}
func checkRequestLeases(le lease.Lessor, reqs []*pb.RequestUnion) error {
for _, requ := range reqs {
tv, ok := requ.Request.(*pb.RequestUnion_RequestPut)
if !ok {
continue
}
preq := tv.RequestPut
if preq == nil || lease.LeaseID(preq.Lease) == lease.NoLease {
continue
}
if l := le.Lookup(lease.LeaseID(preq.Lease)); l == nil {
return lease.ErrLeaseNotFound
}
}
return nil
}
func checkRequestRange(kv dstorage.KV, reqs []*pb.RequestUnion) error {
for _, requ := range reqs {
tv, ok := requ.Request.(*pb.RequestUnion_RequestRange)
if !ok {
continue
}
greq := tv.RequestRange
if greq == nil || greq.Revision == 0 {
continue
}
if greq.Revision > kv.Rev() {
return dstorage.ErrFutureRev
}
if greq.Revision < kv.FirstRev() {
return dstorage.ErrCompacted
}
}
return nil
}
func applyTxn(kv dstorage.KV, le lease.Lessor, rt *pb.TxnRequest) (*pb.TxnResponse, error) {
var revision int64
ok := true
for _, c := range rt.Compare {
if revision, ok = doCompare(kv, c); !ok {
if revision, ok = applyCompare(kv, c); !ok {
break
}
}
@@ -90,10 +475,29 @@ func doTxn(kv dstorage.KV, rt *pb.TxnRequest) *pb.TxnResponse {
} else {
reqs = rt.Failure
}
if err := checkRequestLeases(le, reqs); err != nil {
return nil, err
}
if err := checkRequestRange(kv, reqs); err != nil {
return nil, err
}
// When executing the operations of txn, we need to hold the txn lock.
// So the reader will not see any intermediate results.
txnID := kv.TxnBegin()
defer func() {
err := kv.TxnEnd(txnID)
if err != nil {
panic(fmt.Sprint("unexpected error when closing txn", txnID))
}
}()
resps := make([]*pb.ResponseUnion, len(reqs))
for i := range reqs {
resps[i] = doUnion(kv, reqs[i])
resps[i] = applyUnion(txnID, kv, reqs[i])
}
if len(resps) != 0 {
revision += 1
}
@@ -103,22 +507,46 @@ func doTxn(kv dstorage.KV, rt *pb.TxnRequest) *pb.TxnResponse {
txnResp.Header.Revision = revision
txnResp.Responses = resps
txnResp.Succeeded = ok
return txnResp
return txnResp, nil
}
func doUnion(kv dstorage.KV, union *pb.RequestUnion) *pb.ResponseUnion {
func applyCompaction(kv dstorage.KV, compaction *pb.CompactionRequest) (*pb.CompactionResponse, error) {
resp := &pb.CompactionResponse{}
resp.Header = &pb.ResponseHeader{}
err := kv.Compact(compaction.Revision)
if err != nil {
return nil, err
}
// get the current revision. which key to get is not important.
_, resp.Header.Revision, _ = kv.Range([]byte("compaction"), nil, 1, 0)
return resp, err
}
func applyUnion(txnID int64, kv dstorage.KV, union *pb.RequestUnion) *pb.ResponseUnion {
switch tv := union.Request.(type) {
case *pb.RequestUnion_RequestRange:
if tv.RequestRange != nil {
return &pb.ResponseUnion{Response: &pb.ResponseUnion_ResponseRange{ResponseRange: doRange(kv, tv.RequestRange)}}
resp, err := applyRange(txnID, kv, tv.RequestRange)
if err != nil {
panic("unexpected error during txn")
}
return &pb.ResponseUnion{Response: &pb.ResponseUnion_ResponseRange{ResponseRange: resp}}
}
case *pb.RequestUnion_RequestPut:
if tv.RequestPut != nil {
return &pb.ResponseUnion{Response: &pb.ResponseUnion_ResponsePut{ResponsePut: doPut(kv, tv.RequestPut)}}
resp, err := applyPut(txnID, kv, nil, tv.RequestPut)
if err != nil {
panic("unexpected error during txn")
}
return &pb.ResponseUnion{Response: &pb.ResponseUnion_ResponsePut{ResponsePut: resp}}
}
case *pb.RequestUnion_RequestDeleteRange:
if tv.RequestDeleteRange != nil {
return &pb.ResponseUnion{Response: &pb.ResponseUnion_ResponseDeleteRange{ResponseDeleteRange: doDeleteRange(kv, tv.RequestDeleteRange)}}
resp, err := applyDeleteRange(txnID, kv, tv.RequestDeleteRange)
if err != nil {
panic("unexpected error during txn")
}
return &pb.ResponseUnion{Response: &pb.ResponseUnion_ResponseDeleteRange{ResponseDeleteRange: resp}}
}
default:
// empty union
@@ -127,14 +555,29 @@ func doUnion(kv dstorage.KV, union *pb.RequestUnion) *pb.ResponseUnion {
return nil
}
func doCompare(kv dstorage.KV, c *pb.Compare) (int64, bool) {
// applyCompare applies the compare request.
// It returns the revision at which the comparison happens. If the comparison
// succeeds, the it returns true. Otherwise it returns false.
func applyCompare(kv dstorage.KV, c *pb.Compare) (int64, bool) {
ckvs, rev, err := kv.Range(c.Key, nil, 1, 0)
if err != nil {
if err == dstorage.ErrTxnIDMismatch {
panic("unexpected txn ID mismatch error")
}
return rev, false
}
var ckv storagepb.KeyValue
if len(ckvs) != 0 {
ckv = ckvs[0]
} else {
// Use the zero value of ckv normally. However...
if c.Target == pb.Compare_VALUE {
// Always fail if we're comparing a value on a key that doesn't exist.
// We can treat non-existence as the empty set explicitly, such that
// even a key with a value of length 0 bytes is still a real key
// that was written that way
return rev, false
}
}
// -1 is less, 0 is equal, 1 is greater
@@ -180,6 +623,22 @@ func doCompare(kv dstorage.KV, c *pb.Compare) (int64, bool) {
return rev, true
}
func applyLeaseCreate(le lease.Lessor, lc *pb.LeaseCreateRequest) (*pb.LeaseCreateResponse, error) {
l, err := le.Grant(lease.LeaseID(lc.ID), lc.TTL)
resp := &pb.LeaseCreateResponse{}
if err == nil {
resp.ID = int64(l.ID)
resp.TTL = l.TTL
}
return resp, err
}
func applyLeaseRevoke(le lease.Lessor, lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
err := le.Revoke(lease.LeaseID(lc.ID))
return &pb.LeaseRevokeResponse{}, err
}
func compareInt64(a, b int64) int {
switch {
case a < b:
@@ -190,3 +649,14 @@ func compareInt64(a, b int64) int {
return 0
}
}
// isGteRange determines if the range end is a >= range. This works around grpc
// sending empty byte strings as nil; >= is encoded in the range end as '\0'.
func isGteRange(rangeEnd []byte) bool {
return len(rangeEnd) == 1 && rangeEnd[0] == 0
}
func applyAuthEnable(s *EtcdServer) (*pb.AuthEnableResponse, error) {
s.AuthStore().AuthEnable()
return &pb.AuthEnableResponse{}, nil
}

View File

@@ -0,0 +1,112 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package leasehttp
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"time"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/lease"
)
// NewHandler returns an http Handler for lease renewals
func NewHandler(l lease.Lessor) http.Handler {
return &leaseHandler{l}
}
type leaseHandler struct{ l lease.Lessor }
func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
return
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
http.Error(w, "error reading body", http.StatusBadRequest)
return
}
lreq := pb.LeaseKeepAliveRequest{}
if err := lreq.Unmarshal(b); err != nil {
http.Error(w, "error unmarshalling request", http.StatusBadRequest)
return
}
ttl, err := h.l.Renew(lease.LeaseID(lreq.ID))
if err != nil {
if err == lease.ErrLeaseNotFound {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
// TODO: fill out ResponseHeader
resp := &pb.LeaseKeepAliveResponse{ID: lreq.ID, TTL: ttl}
v, err := resp.Marshal()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/protobuf")
w.Write(v)
}
// RenewHTTP renews a lease at a given primary server.
// TODO: Batch request in future?
func RenewHTTP(id lease.LeaseID, url string, rt http.RoundTripper, timeout time.Duration) (int64, error) {
// will post lreq protobuf to leader
lreq, err := (&pb.LeaseKeepAliveRequest{ID: int64(id)}).Marshal()
if err != nil {
return -1, err
}
cc := &http.Client{Transport: rt, Timeout: timeout}
resp, err := cc.Post(url, "application/protobuf", bytes.NewReader(lreq))
if err != nil {
// TODO detect if leader failed and retry?
return -1, err
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return -1, err
}
if resp.StatusCode == http.StatusNotFound {
return -1, lease.ErrLeaseNotFound
}
if resp.StatusCode != http.StatusOK {
return -1, fmt.Errorf("lease: unknown error(%s)", string(b))
}
lresp := &pb.LeaseKeepAliveResponse{}
if err := lresp.Unmarshal(b); err != nil {
return -1, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b))
}
if lresp.ID != int64(id) {
return -1, fmt.Errorf("lease: renew id mismatch")
}
return lresp.TTL, nil
}

View File

@@ -0,0 +1,314 @@
// Code generated by protoc-gen-gogo.
// source: lease.proto
// DO NOT EDIT!
/*
Package leasepb is a generated protocol buffer package.
It is generated from these files:
lease.proto
It has these top-level messages:
Lease
*/
package leasepb
import (
"fmt"
proto "github.com/gogo/protobuf/proto"
)
import math "math"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
type Lease struct {
ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
TTL int64 `protobuf:"varint,2,opt,name=TTL,proto3" json:"TTL,omitempty"`
}
func (m *Lease) Reset() { *m = Lease{} }
func (m *Lease) String() string { return proto.CompactTextString(m) }
func (*Lease) ProtoMessage() {}
func init() {
proto.RegisterType((*Lease)(nil), "leasepb.Lease")
}
func (m *Lease) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Lease) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.ID != 0 {
data[i] = 0x8
i++
i = encodeVarintLease(data, i, uint64(m.ID))
}
if m.TTL != 0 {
data[i] = 0x10
i++
i = encodeVarintLease(data, i, uint64(m.TTL))
}
return i, nil
}
func encodeFixed64Lease(data []byte, offset int, v uint64) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
data[offset+4] = uint8(v >> 32)
data[offset+5] = uint8(v >> 40)
data[offset+6] = uint8(v >> 48)
data[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Lease(data []byte, offset int, v uint32) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintLease(data []byte, offset int, v uint64) int {
for v >= 1<<7 {
data[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
data[offset] = uint8(v)
return offset + 1
}
func (m *Lease) Size() (n int) {
var l int
_ = l
if m.ID != 0 {
n += 1 + sovLease(uint64(m.ID))
}
if m.TTL != 0 {
n += 1 + sovLease(uint64(m.TTL))
}
return n
}
func sovLease(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozLease(x uint64) (n int) {
return sovLease(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *Lease) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLease
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Lease: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Lease: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
}
m.ID = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLease
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.ID |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
}
m.TTL = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLease
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.TTL |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipLease(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthLease
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipLease(data []byte) (n int, err error) {
l := len(data)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowLease
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowLease
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if data[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowLease
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthLease
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowLease
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipLease(data[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthLease = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowLease = fmt.Errorf("proto: integer overflow")
)

View File

@@ -0,0 +1,15 @@
syntax = "proto3";
package leasepb;
import "gogoproto/gogo.proto";
option (gogoproto.marshaler_all) = true;
option (gogoproto.sizer_all) = true;
option (gogoproto.unmarshaler_all) = true;
option (gogoproto.goproto_getters_all) = false;
option (gogoproto.goproto_enum_prefix_all) = false;
message Lease {
int64 ID = 1;
int64 TTL = 2;
}

View File

@@ -0,0 +1,507 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lease
import (
"encoding/binary"
"errors"
"math"
"sync"
"time"
"github.com/coreos/etcd/lease/leasepb"
"github.com/coreos/etcd/storage/backend"
)
const (
// NoLease is a special LeaseID representing the absence of a lease.
NoLease = LeaseID(0)
)
var (
minLeaseTTL = int64(5)
leaseBucketName = []byte("lease")
// do not use maxInt64 since it can overflow time which will add
// the offset of unix time (1970yr to seconds).
forever = time.Unix(math.MaxInt64>>1, 0)
ErrNotPrimary = errors.New("not a primary lessor")
ErrLeaseNotFound = errors.New("lease not found")
ErrLeaseExists = errors.New("lease already exists")
)
type LeaseID int64
// RangeDeleter defines an interface with DeleteRange method.
// We define this interface only for lessor to limit the number
// of methods of storage.KV to what lessor actually needs.
//
// Having a minimum interface makes testing easy.
type RangeDeleter interface {
DeleteRange(key, end []byte) (int64, int64)
}
// A Lessor is the owner of leases. It can grant, revoke, renew and modify leases for lessee.
type Lessor interface {
// SetRangeDeleter sets the RangeDeleter to the Lessor.
// Lessor deletes the items in the revoked or expired lease from the
// the set RangeDeleter.
SetRangeDeleter(dr RangeDeleter)
// Grant grants a lease that expires at least after TTL seconds.
Grant(id LeaseID, ttl int64) (*Lease, error)
// Revoke revokes a lease with given ID. The item attached to the
// given lease will be removed. If the ID does not exist, an error
// will be returned.
Revoke(id LeaseID) error
// Attach attaches given leaseItem to the lease with given LeaseID.
// If the lease does not exist, an error will be returned.
Attach(id LeaseID, items []LeaseItem) error
// Detach detaches given leaseItem from the lease with given LeaseID.
// If the lease does not exist, an error will be returned.
Detach(id LeaseID, items []LeaseItem) error
// Promote promotes the lessor to be the primary lessor. Primary lessor manages
// the expiration and renew of leases.
// Newly promoted lessor renew the TTL of all lease to extend + previous TTL.
Promote(extend time.Duration)
// Demote demotes the lessor from being the primary lessor.
Demote()
// Renew renews a lease with given ID. It returns the renewed TTL. If the ID does not exist,
// an error will be returned.
Renew(id LeaseID) (int64, error)
// Lookup gives the lease at a given lease id, if any
Lookup(id LeaseID) *Lease
// ExpiredLeasesC returns a chan that is used to receive expired leases.
ExpiredLeasesC() <-chan []*Lease
// Recover recovers the lessor state from the given backend and RangeDeleter.
Recover(b backend.Backend, rd RangeDeleter)
// Stop stops the lessor for managing leases. The behavior of calling Stop multiple
// times is undefined.
Stop()
}
// lessor implements Lessor interface.
// TODO: use clockwork for testability.
type lessor struct {
mu sync.Mutex
// primary indicates if this lessor is the primary lessor. The primary
// lessor manages lease expiration and renew.
//
// in etcd, raft leader is the primary. Thus there might be two primary
// leaders at the same time (raft allows concurrent leader but with different term)
// for at most a leader election timeout.
// The old primary leader cannot affect the correctness since its proposal has a
// smaller term and will not be committed.
//
// TODO: raft follower do not forward lease management proposals. There might be a
// very small window (within second normally which depends on go scheduling) that
// a raft follow is the primary between the raft leader demotion and lessor demotion.
// Usually this should not be a problem. Lease should not be that sensitive to timing.
primary bool
// TODO: probably this should be a heap with a secondary
// id index.
// Now it is O(N) to loop over the leases to find expired ones.
// We want to make Grant, Revoke, and findExpiredLeases all O(logN) and
// Renew O(1).
// findExpiredLeases and Renew should be the most frequent operations.
leaseMap map[LeaseID]*Lease
// When a lease expires, the lessor will delete the
// leased range (or key) by the RangeDeleter.
rd RangeDeleter
// backend to persist leases. We only persist lease ID and expiry for now.
// The leased items can be recovered by iterating all the keys in kv.
b backend.Backend
expiredC chan []*Lease
// stopC is a channel whose closure indicates that the lessor should be stopped.
stopC chan struct{}
// doneC is a channel whose closure indicates that the lessor is stopped.
doneC chan struct{}
}
func NewLessor(b backend.Backend) Lessor {
return newLessor(b)
}
func newLessor(b backend.Backend) *lessor {
l := &lessor{
leaseMap: make(map[LeaseID]*Lease),
b: b,
// expiredC is a small buffered chan to avoid unnecessary blocking.
expiredC: make(chan []*Lease, 16),
stopC: make(chan struct{}),
doneC: make(chan struct{}),
}
l.initAndRecover()
go l.runLoop()
return l
}
func (le *lessor) SetRangeDeleter(rd RangeDeleter) {
le.mu.Lock()
defer le.mu.Unlock()
le.rd = rd
}
// TODO: when lessor is under high load, it should give out lease
// with longer TTL to reduce renew load.
func (le *lessor) Grant(id LeaseID, ttl int64) (*Lease, error) {
if id == NoLease {
return nil, ErrLeaseNotFound
}
l := &Lease{ID: id, TTL: ttl, itemSet: make(map[LeaseItem]struct{})}
le.mu.Lock()
defer le.mu.Unlock()
if _, ok := le.leaseMap[id]; ok {
return nil, ErrLeaseExists
}
if le.primary {
l.refresh(0)
} else {
l.forever()
}
le.leaseMap[id] = l
l.persistTo(le.b)
return l, nil
}
func (le *lessor) Revoke(id LeaseID) error {
le.mu.Lock()
l := le.leaseMap[id]
if l == nil {
le.mu.Unlock()
return ErrLeaseNotFound
}
// unlock before doing external work
le.mu.Unlock()
if le.rd != nil {
for item := range l.itemSet {
le.rd.DeleteRange([]byte(item.Key), nil)
}
}
le.mu.Lock()
defer le.mu.Unlock()
delete(le.leaseMap, l.ID)
l.removeFrom(le.b)
return nil
}
// Renew renews an existing lease. If the given lease does not exist or
// has expired, an error will be returned.
func (le *lessor) Renew(id LeaseID) (int64, error) {
le.mu.Lock()
defer le.mu.Unlock()
if !le.primary {
// forward renew request to primary instead of returning error.
return -1, ErrNotPrimary
}
l := le.leaseMap[id]
if l == nil {
return -1, ErrLeaseNotFound
}
l.refresh(0)
return l.TTL, nil
}
func (le *lessor) Lookup(id LeaseID) *Lease {
le.mu.Lock()
defer le.mu.Unlock()
if l, ok := le.leaseMap[id]; ok {
return l
}
return nil
}
func (le *lessor) Promote(extend time.Duration) {
le.mu.Lock()
defer le.mu.Unlock()
le.primary = true
// refresh the expiries of all leases.
for _, l := range le.leaseMap {
l.refresh(extend)
}
}
func (le *lessor) Demote() {
le.mu.Lock()
defer le.mu.Unlock()
// set the expiries of all leases to forever
for _, l := range le.leaseMap {
l.forever()
}
le.primary = false
}
// Attach attaches items to the lease with given ID. When the lease
// expires, the attached items will be automatically removed.
// If the given lease does not exist, an error will be returned.
func (le *lessor) Attach(id LeaseID, items []LeaseItem) error {
le.mu.Lock()
defer le.mu.Unlock()
l := le.leaseMap[id]
if l == nil {
return ErrLeaseNotFound
}
for _, it := range items {
l.itemSet[it] = struct{}{}
}
return nil
}
// Detach detaches items from the lease with given ID.
// If the given lease does not exist, an error will be returned.
func (le *lessor) Detach(id LeaseID, items []LeaseItem) error {
le.mu.Lock()
defer le.mu.Unlock()
l := le.leaseMap[id]
if l == nil {
return ErrLeaseNotFound
}
for _, it := range items {
delete(l.itemSet, it)
}
return nil
}
func (le *lessor) Recover(b backend.Backend, rd RangeDeleter) {
le.mu.Lock()
defer le.mu.Unlock()
le.b = b
le.rd = rd
le.leaseMap = make(map[LeaseID]*Lease)
le.initAndRecover()
}
func (le *lessor) ExpiredLeasesC() <-chan []*Lease {
return le.expiredC
}
func (le *lessor) Stop() {
close(le.stopC)
<-le.doneC
}
func (le *lessor) runLoop() {
defer close(le.doneC)
for {
var ls []*Lease
le.mu.Lock()
if le.primary {
ls = le.findExpiredLeases()
}
le.mu.Unlock()
if len(ls) != 0 {
select {
case <-le.stopC:
return
case le.expiredC <- ls:
default:
// the receiver of expiredC is probably busy handling
// other stuff
// let's try this next time after 500ms
}
}
select {
case <-time.After(500 * time.Millisecond):
case <-le.stopC:
return
}
}
}
// findExpiredLeases loops all the leases in the leaseMap and returns the expired
// leases that needed to be revoked.
func (le *lessor) findExpiredLeases() []*Lease {
leases := make([]*Lease, 0, 16)
now := time.Now()
for _, l := range le.leaseMap {
// TODO: probably should change to <= 100-500 millisecond to
// make up committing latency.
if l.expiry.Sub(now) <= 0 {
leases = append(leases, l)
}
}
return leases
}
// get gets the lease with given id.
// get is a helper function for testing, at least for now.
func (le *lessor) get(id LeaseID) *Lease {
le.mu.Lock()
defer le.mu.Unlock()
return le.leaseMap[id]
}
func (le *lessor) initAndRecover() {
tx := le.b.BatchTx()
tx.Lock()
tx.UnsafeCreateBucket(leaseBucketName)
_, vs := tx.UnsafeRange(leaseBucketName, int64ToBytes(0), int64ToBytes(math.MaxInt64), 0)
// TODO: copy vs and do decoding outside tx lock if lock contention becomes an issue.
for i := range vs {
var lpb leasepb.Lease
err := lpb.Unmarshal(vs[i])
if err != nil {
tx.Unlock()
panic("failed to unmarshal lease proto item")
}
ID := LeaseID(lpb.ID)
le.leaseMap[ID] = &Lease{
ID: ID,
TTL: lpb.TTL,
// itemSet will be filled in when recover key-value pairs
// set expiry to forever, refresh when promoted
itemSet: make(map[LeaseItem]struct{}),
expiry: forever,
}
}
tx.Unlock()
le.b.ForceCommit()
}
type Lease struct {
ID LeaseID
TTL int64 // time to live in seconds
itemSet map[LeaseItem]struct{}
// expiry time in unixnano
expiry time.Time
}
func (l Lease) persistTo(b backend.Backend) {
key := int64ToBytes(int64(l.ID))
lpb := leasepb.Lease{ID: int64(l.ID), TTL: int64(l.TTL)}
val, err := lpb.Marshal()
if err != nil {
panic("failed to marshal lease proto item")
}
b.BatchTx().Lock()
b.BatchTx().UnsafePut(leaseBucketName, key, val)
b.BatchTx().Unlock()
}
func (l Lease) removeFrom(b backend.Backend) {
key := int64ToBytes(int64(l.ID))
b.BatchTx().Lock()
b.BatchTx().UnsafeDelete(leaseBucketName, key)
b.BatchTx().Unlock()
}
// refresh refreshes the expiry of the lease. It extends the expiry at least
// minLeaseTTL second.
func (l *Lease) refresh(extend time.Duration) {
if l.TTL < minLeaseTTL {
l.TTL = minLeaseTTL
}
l.expiry = time.Now().Add(extend + time.Second*time.Duration(l.TTL))
}
// forever sets the expiry of lease to be forever.
func (l *Lease) forever() {
if l.TTL < minLeaseTTL {
l.TTL = minLeaseTTL
}
l.expiry = forever
}
type LeaseItem struct {
Key string
}
func int64ToBytes(n int64) []byte {
bytes := make([]byte, 8)
binary.BigEndian.PutUint64(bytes, uint64(n))
return bytes
}
// FakeLessor is a fake implementation of Lessor interface.
// Used for testing only.
type FakeLessor struct{}
func (fl *FakeLessor) SetRangeDeleter(dr RangeDeleter) {}
func (fl *FakeLessor) Grant(id LeaseID, ttl int64) (*Lease, error) { return nil, nil }
func (fl *FakeLessor) Revoke(id LeaseID) error { return nil }
func (fl *FakeLessor) Attach(id LeaseID, items []LeaseItem) error { return nil }
func (fl *FakeLessor) Detach(id LeaseID, items []LeaseItem) error { return nil }
func (fl *FakeLessor) Promote(extend time.Duration) {}
func (fl *FakeLessor) Demote() {}
func (fl *FakeLessor) Renew(id LeaseID) (int64, error) { return 10, nil }
func (le *FakeLessor) Lookup(id LeaseID) *Lease { return nil }
func (fl *FakeLessor) ExpiredLeasesC() <-chan []*Lease { return nil }
func (fl *FakeLessor) Recover(b backend.Backend, rd RangeDeleter) {}
func (fl *FakeLessor) Stop() {}

View File

@@ -0,0 +1,526 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adt
import (
"math"
)
// Comparable is an interface for trichotomic comparisons.
type Comparable interface {
// Compare gives the result of a 3-way comparison
// a.Compare(b) = 1 => a > b
// a.Compare(b) = 0 => a == b
// a.Compare(b) = -1 => a < b
Compare(c Comparable) int
}
type rbcolor bool
const black = true
const red = false
// Interval implements a Comparable interval [begin, end)
// TODO: support different sorts of intervals: (a,b), [a,b], (a, b]
type Interval struct {
Begin Comparable
End Comparable
}
// Compare on an interval gives == if the interval overlaps.
func (ivl *Interval) Compare(c Comparable) int {
ivl2 := c.(*Interval)
ivbCmpBegin := ivl.Begin.Compare(ivl2.Begin)
ivbCmpEnd := ivl.Begin.Compare(ivl2.End)
iveCmpBegin := ivl.End.Compare(ivl2.Begin)
// ivl is left of ivl2
if ivbCmpBegin < 0 && iveCmpBegin <= 0 {
return -1
}
// iv is right of iv2
if ivbCmpEnd >= 0 {
return 1
}
return 0
}
type intervalNode struct {
// iv is the interval-value pair entry.
iv IntervalValue
// max endpoint of all descendent nodes.
max Comparable
// left and right are sorted by low endpoint of key interval
left, right *intervalNode
// parent is the direct ancestor of the node
parent *intervalNode
c rbcolor
}
func (x *intervalNode) color() rbcolor {
if x == nil {
return black
}
return x.c
}
func (n *intervalNode) height() int {
if n == nil {
return 0
}
ld := n.left.height()
rd := n.right.height()
if ld < rd {
return rd + 1
}
return ld + 1
}
func (x *intervalNode) min() *intervalNode {
for x.left != nil {
x = x.left
}
return x
}
// successor is the next in-order node in the tree
func (x *intervalNode) successor() *intervalNode {
if x.right != nil {
return x.right.min()
}
y := x.parent
for y != nil && x == y.right {
x = y
y = y.parent
}
return y
}
// updateMax updates the maximum values for a node and its ancestors
func (x *intervalNode) updateMax() {
for x != nil {
oldmax := x.max
max := x.iv.Ivl.End
if x.left != nil && x.left.max.Compare(max) > 0 {
max = x.left.max
}
if x.right != nil && x.right.max.Compare(max) > 0 {
max = x.right.max
}
if oldmax.Compare(max) == 0 {
break
}
x.max = max
x = x.parent
}
}
type nodeVisitor func(n *intervalNode) bool
// visit will call a node visitor on each node that overlaps the given interval
func (x *intervalNode) visit(iv *Interval, nv nodeVisitor) {
if x == nil {
return
}
v := iv.Compare(&x.iv.Ivl)
switch {
case v < 0:
x.left.visit(iv, nv)
case v > 0:
maxiv := Interval{x.iv.Ivl.Begin, x.max}
if maxiv.Compare(iv) == 0 {
x.left.visit(iv, nv)
x.right.visit(iv, nv)
}
default:
nv(x)
x.left.visit(iv, nv)
x.right.visit(iv, nv)
}
}
type IntervalValue struct {
Ivl Interval
Val interface{}
}
// IntervalTree represents a (mostly) textbook implementation of the
// "Introduction to Algorithms" (Cormen et al, 2nd ed.) chapter 13 red-black tree
// and chapter 14.3 interval tree with search supporting "stabbing queries".
type IntervalTree struct {
root *intervalNode
count int
}
// Delete removes the node with the given interval from the tree, returning
// true if a node is in fact removed.
func (ivt *IntervalTree) Delete(ivl Interval) bool {
z := ivt.find(ivl)
if z == nil {
return false
}
y := z
if z.left != nil && z.right != nil {
y = z.successor()
}
x := y.left
if x == nil {
x = y.right
}
if x != nil {
x.parent = y.parent
}
if y.parent == nil {
ivt.root = x
} else {
if y == y.parent.left {
y.parent.left = x
} else {
y.parent.right = x
}
y.parent.updateMax()
}
if y != z {
z.iv = y.iv
z.updateMax()
}
if y.color() == black && x != nil {
ivt.deleteFixup(x)
}
ivt.count--
return true
}
func (ivt *IntervalTree) deleteFixup(x *intervalNode) {
for x != ivt.root && x.color() == black && x.parent != nil {
if x == x.parent.left {
w := x.parent.right
if w.color() == red {
w.c = black
x.parent.c = red
ivt.rotateLeft(x.parent)
w = x.parent.right
}
if w == nil {
break
}
if w.left.color() == black && w.right.color() == black {
w.c = red
x = x.parent
} else {
if w.right.color() == black {
w.left.c = black
w.c = red
ivt.rotateRight(w)
w = x.parent.right
}
w.c = x.parent.color()
x.parent.c = black
w.right.c = black
ivt.rotateLeft(x.parent)
x = ivt.root
}
} else {
// same as above but with left and right exchanged
w := x.parent.left
if w.color() == red {
w.c = black
x.parent.c = red
ivt.rotateRight(x.parent)
w = x.parent.left
}
if w == nil {
break
}
if w.left.color() == black && w.right.color() == black {
w.c = red
x = x.parent
} else {
if w.left.color() == black {
w.right.c = black
w.c = red
ivt.rotateLeft(w)
w = x.parent.left
}
w.c = x.parent.color()
x.parent.c = black
w.left.c = black
ivt.rotateRight(x.parent)
x = ivt.root
}
}
}
if x != nil {
x.c = black
}
}
// Insert adds a node with the given interval into the tree.
func (ivt *IntervalTree) Insert(ivl Interval, val interface{}) {
var y *intervalNode
z := &intervalNode{iv: IntervalValue{ivl, val}, max: ivl.End, c: red}
x := ivt.root
for x != nil {
y = x
if z.iv.Ivl.Begin.Compare(x.iv.Ivl.Begin) < 0 {
x = x.left
} else {
x = x.right
}
}
z.parent = y
if y == nil {
ivt.root = z
} else {
if z.iv.Ivl.Begin.Compare(y.iv.Ivl.Begin) < 0 {
y.left = z
} else {
y.right = z
}
y.updateMax()
}
z.c = red
ivt.insertFixup(z)
ivt.count++
}
func (ivt *IntervalTree) insertFixup(z *intervalNode) {
for z.parent != nil && z.parent.parent != nil && z.parent.color() == red {
if z.parent == z.parent.parent.left {
y := z.parent.parent.right
if y.color() == red {
y.c = black
z.parent.c = black
z.parent.parent.c = red
z = z.parent.parent
} else {
if z == z.parent.right {
z = z.parent
ivt.rotateLeft(z)
}
z.parent.c = black
z.parent.parent.c = red
ivt.rotateRight(z.parent.parent)
}
} else {
// same as then with left/right exchanged
y := z.parent.parent.left
if y.color() == red {
y.c = black
z.parent.c = black
z.parent.parent.c = red
z = z.parent.parent
} else {
if z == z.parent.left {
z = z.parent
ivt.rotateRight(z)
}
z.parent.c = black
z.parent.parent.c = red
ivt.rotateLeft(z.parent.parent)
}
}
}
ivt.root.c = black
}
// rotateLeft moves x so it is left of its right child
func (ivt *IntervalTree) rotateLeft(x *intervalNode) {
y := x.right
x.right = y.left
if y.left != nil {
y.left.parent = x
}
x.updateMax()
ivt.replaceParent(x, y)
y.left = x
y.updateMax()
}
// rotateLeft moves x so it is right of its left child
func (ivt *IntervalTree) rotateRight(x *intervalNode) {
if x == nil {
return
}
y := x.left
x.left = y.right
if y.right != nil {
y.right.parent = x
}
x.updateMax()
ivt.replaceParent(x, y)
y.right = x
y.updateMax()
}
// replaceParent replaces x's parent with y
func (ivt *IntervalTree) replaceParent(x *intervalNode, y *intervalNode) {
y.parent = x.parent
if x.parent == nil {
ivt.root = y
} else {
if x == x.parent.left {
x.parent.left = y
} else {
x.parent.right = y
}
x.parent.updateMax()
}
x.parent = y
}
// Len gives the number of elements in the tree
func (ivt *IntervalTree) Len() int { return ivt.count }
// Height is the number of levels in the tree; one node has height 1.
func (ivt *IntervalTree) Height() int { return ivt.root.height() }
// MaxHeight is the expected maximum tree height given the number of nodes
func (ivt *IntervalTree) MaxHeight() int {
return int((2 * math.Log2(float64(ivt.Len()+1))) + 0.5)
}
// InternalVisitor is used on tree searchs; return false to stop searching.
type IntervalVisitor func(n *IntervalValue) bool
// Visit calls a visitor function on every tree node intersecting the given interval.
func (ivt *IntervalTree) Visit(ivl Interval, ivv IntervalVisitor) {
ivt.root.visit(&ivl, func(n *intervalNode) bool { return ivv(&n.iv) })
}
// find the exact node for a given interval
func (ivt *IntervalTree) find(ivl Interval) (ret *intervalNode) {
f := func(n *intervalNode) bool {
if n.iv.Ivl != ivl {
return true
}
ret = n
return false
}
ivt.root.visit(&ivl, f)
return ret
}
// Find gets the IntervalValue for the node matching the given interval
func (ivt *IntervalTree) Find(ivl Interval) (ret *IntervalValue) {
n := ivt.find(ivl)
if n == nil {
return nil
}
return &n.iv
}
// Contains returns true if there is some tree node intersecting the given interval.
func (ivt *IntervalTree) Contains(iv Interval) bool {
x := ivt.root
for x != nil && iv.Compare(&x.iv.Ivl) != 0 {
if x.left != nil && x.left.max.Compare(iv.Begin) > 0 {
x = x.left
} else {
x = x.right
}
}
return x != nil
}
// Stab returns a slice with all elements in the tree intersecting the interval.
func (ivt *IntervalTree) Stab(iv Interval) (ivs []*IntervalValue) {
f := func(n *IntervalValue) bool { ivs = append(ivs, n); return true }
ivt.Visit(iv, f)
return ivs
}
type StringComparable string
func (s StringComparable) Compare(c Comparable) int {
sc := c.(StringComparable)
if s < sc {
return -1
}
if s > sc {
return 1
}
return 0
}
func NewStringInterval(begin, end string) Interval {
return Interval{StringComparable(begin), StringComparable(end)}
}
func NewStringPoint(s string) Interval {
return Interval{StringComparable(s), StringComparable(s + "\x00")}
}
// StringAffineComparable treats "" as > all other strings
type StringAffineComparable string
func (s StringAffineComparable) Compare(c Comparable) int {
sc := c.(StringAffineComparable)
if len(s) == 0 {
if len(sc) == 0 {
return 0
}
return 1
}
if len(sc) == 0 {
return -1
}
if s < sc {
return -1
}
if s > sc {
return 1
}
return 0
}
func NewStringAffineInterval(begin, end string) Interval {
return Interval{StringAffineComparable(begin), StringAffineComparable(end)}
}
func NewStringAffinePoint(s string) Interval {
return NewStringAffineInterval(s, s+"\x00")
}
func NewInt64Interval(a int64, b int64) Interval {
return Interval{Int64Comparable(a), Int64Comparable(b)}
}
func NewInt64Point(a int64) Interval {
return Interval{Int64Comparable(a), Int64Comparable(a + 1)}
}
type Int64Comparable int64
func (v Int64Comparable) Compare(c Comparable) int {
vc := c.(Int64Comparable)
cmp := v - vc
if cmp < 0 {
return -1
}
if cmp > 0 {
return 1
}
return 0
}

View File

@@ -0,0 +1,69 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package contention
import (
"sync"
"time"
)
// TimeoutDetector detects routine starvations by
// observing the actual time duration to finish an action
// or between two events that should happen in a fixed
// interval. If the observed duration is longer than
// the expectation, the detector will report the result.
type TimeoutDetector struct {
mu sync.Mutex // protects all
maxDuration time.Duration
// map from event to time
// time is the last seen time of the event.
records map[uint64]time.Time
}
// NewTimeoutDetector creates the TimeoutDetector.
func NewTimeoutDetector(maxDuration time.Duration) *TimeoutDetector {
return &TimeoutDetector{
maxDuration: maxDuration,
records: make(map[uint64]time.Time),
}
}
// Reset resets the NewTimeoutDetector.
func (td *TimeoutDetector) Reset() {
td.mu.Lock()
defer td.mu.Unlock()
td.records = make(map[uint64]time.Time)
}
// Observe observes an event for given id. It returns false and exceeded duration
// if the interval is longer than the expectation.
func (td *TimeoutDetector) Observe(which uint64) (bool, time.Duration) {
td.mu.Lock()
defer td.mu.Unlock()
ok := true
now := time.Now()
exceed := time.Duration(0)
if pt, found := td.records[which]; found {
exceed = now.Sub(pt) - td.maxDuration
if exceed > 0 {
ok = false
}
}
td.records[which] = now
return ok, exceed
}

View File

@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package crc provides utility function for cyclic redundancy check
// algorithms.
package crc
import (

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package fileutil implements utility functions related to files and paths.
package fileutil
import (
@@ -58,7 +59,7 @@ func ReadDir(dirpath string) ([]string, error) {
return names, nil
}
// TouchDirAll is simliar to os.MkdirAll. It creates directories with 0700 permission if any directory
// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory
// does not exists. TouchDirAll also ensures the given directory is writable.
func TouchDirAll(dir string) error {
err := os.MkdirAll(dir, privateDirMode)
@@ -67,3 +68,8 @@ func TouchDirAll(dir string) error {
}
return IsDirWriteable(dir)
}
func Exist(name string) bool {
_, err := os.Stat(name)
return err == nil
}

View File

@@ -0,0 +1,29 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
type Lock interface {
// Name returns the name of the file.
Name() string
// TryLock acquires exclusivity on the lock without blocking.
TryLock() error
// Lock acquires exclusivity on the lock.
Lock() error
// Unlock unlocks the lock.
Unlock() error
// Destroy should be called after Unlock to clean up
// the resources.
Destroy() error
}

View File

@@ -25,14 +25,6 @@ var (
ErrLocked = errors.New("file already locked")
)
type Lock interface {
Name() string
TryLock() error
Lock() error
Unlock() error
Destroy() error
}
type lock struct {
fname string
file *os.File
@@ -42,7 +34,6 @@ func (l *lock) Name() string {
return l.fname
}
// TryLock acquires exclusivity on the lock without blocking
func (l *lock) TryLock() error {
err := os.Chmod(l.fname, syscall.DMEXCL|0600)
if err != nil {
@@ -58,7 +49,6 @@ func (l *lock) TryLock() error {
return nil
}
// Lock acquires exclusivity on the lock with blocking
func (l *lock) Lock() error {
err := os.Chmod(l.fname, syscall.DMEXCL|0600)
if err != nil {
@@ -75,7 +65,6 @@ func (l *lock) Lock() error {
}
}
// Unlock unlocks the lock
func (l *lock) Unlock() error {
return l.file.Close()
}

View File

@@ -26,14 +26,6 @@ var (
ErrLocked = errors.New("file already locked")
)
type Lock interface {
Name() string
TryLock() error
Lock() error
Unlock() error
Destroy() error
}
type lock struct {
fd int
file *os.File
@@ -43,7 +35,6 @@ func (l *lock) Name() string {
return l.file.Name()
}
// TryLock acquires exclusivity on the lock without blocking
func (l *lock) TryLock() error {
var lock syscall.Flock_t
lock.Start = 0
@@ -59,7 +50,6 @@ func (l *lock) TryLock() error {
return err
}
// Lock acquires exclusivity on the lock without blocking
func (l *lock) Lock() error {
var lock syscall.Flock_t
lock.Start = 0
@@ -70,7 +60,6 @@ func (l *lock) Lock() error {
return syscall.FcntlFlock(uintptr(l.fd), syscall.F_SETLK, &lock)
}
// Unlock unlocks the lock
func (l *lock) Unlock() error {
var lock syscall.Flock_t
lock.Start = 0

View File

@@ -26,14 +26,6 @@ var (
ErrLocked = errors.New("file already locked")
)
type Lock interface {
Name() string
TryLock() error
Lock() error
Unlock() error
Destroy() error
}
type lock struct {
fd int
file *os.File
@@ -43,7 +35,6 @@ func (l *lock) Name() string {
return l.file.Name()
}
// TryLock acquires exclusivity on the lock without blocking
func (l *lock) TryLock() error {
err := syscall.Flock(l.fd, syscall.LOCK_EX|syscall.LOCK_NB)
if err != nil && err == syscall.EWOULDBLOCK {
@@ -52,12 +43,10 @@ func (l *lock) TryLock() error {
return err
}
// Lock acquires exclusivity on the lock without blocking
func (l *lock) Lock() error {
return syscall.Flock(l.fd, syscall.LOCK_EX)
}
// Unlock unlocks the lock
func (l *lock) Unlock() error {
return syscall.Flock(l.fd, syscall.LOCK_UN)
}

View File

@@ -25,14 +25,6 @@ var (
ErrLocked = errors.New("file already locked")
)
type Lock interface {
Name() string
TryLock() error
Lock() error
Unlock() error
Destroy() error
}
type lock struct {
fd int
file *os.File
@@ -42,17 +34,14 @@ func (l *lock) Name() string {
return l.file.Name()
}
// TryLock acquires exclusivity on the lock without blocking
func (l *lock) TryLock() error {
return nil
}
// Lock acquires exclusivity on the lock without blocking
func (l *lock) Lock() error {
return nil
}
// Unlock unlocks the lock
func (l *lock) Unlock() error {
return nil
}

View File

@@ -0,0 +1,26 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !linux
package fileutil
import "os"
// Fdatasync is similar to fsync(), but does not flush modified metadata
// unless that metadata is needed in order to allow a subsequent data retrieval
// to be correctly handled.
func Fdatasync(f *os.File) error {
return f.Sync()
}

View File

@@ -0,0 +1,29 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
package fileutil
import (
"os"
"syscall"
)
// Fdatasync is similar to fsync(), but does not flush modified metadata
// unless that metadata is needed in order to allow a subsequent data retrieval
// to be correctly handled.
func Fdatasync(f *os.File) error {
return syscall.Fdatasync(int(f.Fd()))
}

View File

@@ -6,6 +6,7 @@
// +build go1.5
// Package httputil provides HTTP utility functions.
package httputil
import "net/http"

View File

@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package idutil implements utility functions for generating unique,
// randomized ids.
package idutil
import (
@@ -22,15 +24,18 @@ import (
const (
tsLen = 5 * 8
cntLen = 2 * 8
cntLen = 8
suffixLen = tsLen + cntLen
)
// Generator generates unique identifiers based on counters, timestamps, and
// a node member ID.
//
// The initial id is in this format:
// High order byte is memberID, next 5 bytes are from timestamp,
// and low order 2 bytes are 0s.
// | prefix | suffix |
// | 1 byte | 5 bytes | 2 bytes |
// | 2 bytes | 5 bytes | 1 byte |
// | memberID | timestamp | cnt |
//
// The timestamp 5 bytes is different when the machine is restart
@@ -40,16 +45,16 @@ const (
// The count field may overflow to timestamp field, which is intentional.
// It helps to extend the event window to 2^56. This doesn't break that
// id generated after restart is unique because etcd throughput is <<
// 65536req/ms.
// 256req/ms(250k reqs/second).
type Generator struct {
mu sync.Mutex
// high order byte
// high order 2 bytes
prefix uint64
// low order 7 bytes
// low order 6 bytes
suffix uint64
}
func NewGenerator(memberID uint8, now time.Time) *Generator {
func NewGenerator(memberID uint16, now time.Time) *Generator {
prefix := uint64(memberID) << suffixLen
unixMilli := uint64(now.UnixNano()) / uint64(time.Millisecond/time.Nanosecond)
suffix := lowbit(unixMilli, tsLen) << cntLen

View File

@@ -12,16 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package timeutil
package ioutil
import "time"
import "io"
// UnixNanoToTime returns the local time corresponding to the given Unix time in nanoseconds.
// If the given Unix time is zero, an uninitialized zero time is returned.
func UnixNanoToTime(ns int64) time.Time {
var t time.Time
if ns != 0 {
t = time.Unix(0, ns)
}
return t
// ReaderAndCloser implements io.ReadCloser interface by combining
// reader and closer together.
type ReaderAndCloser struct {
io.Reader
io.Closer
}

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package ioutil implements I/O utility functions.
package ioutil
import "io"

View File

@@ -19,7 +19,7 @@ import (
"os"
)
// WriteAndSyncFile behaviors just like ioutil.WriteFile in standard library
// WriteAndSyncFile behaves just like ioutil.WriteFile in the standard library,
// but calls Sync before closing the file. WriteAndSyncFile guarantees the data
// is synced if there is no error returned.
func WriteAndSyncFile(filename string, data []byte, perm os.FileMode) error {

View File

@@ -0,0 +1,195 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package logutil includes utilities to facilitate logging.
package logutil
import (
"fmt"
"sync"
"time"
"github.com/coreos/pkg/capnslog"
)
var (
defaultMergePeriod = time.Second
defaultTimeOutputScale = 10 * time.Millisecond
outputInterval = time.Second
)
// line represents a log line that can be printed out
// through capnslog.PackageLogger.
type line struct {
level capnslog.LogLevel
str string
}
func (l line) append(s string) line {
return line{
level: l.level,
str: l.str + " " + s,
}
}
// status represents the merge status of a line.
type status struct {
period time.Duration
start time.Time // start time of latest merge period
count int // number of merged lines from starting
}
func (s *status) isInMergePeriod(now time.Time) bool {
return s.period == 0 || s.start.Add(s.period).After(now)
}
func (s *status) isEmpty() bool { return s.count == 0 }
func (s *status) summary(now time.Time) string {
ts := s.start.Round(defaultTimeOutputScale)
took := now.Round(defaultTimeOutputScale).Sub(ts)
return fmt.Sprintf("[merged %d repeated lines in %s]", s.count, took)
}
func (s *status) reset(now time.Time) {
s.start = now
s.count = 0
}
// MergeLogger supports merge logging, which merges repeated log lines
// and prints summary log lines instead.
//
// For merge logging, MergeLogger prints out the line when the line appears
// at the first time. MergeLogger holds the same log line printed within
// defaultMergePeriod, and prints out summary log line at the end of defaultMergePeriod.
// It stops merging when the line doesn't appear within the
// defaultMergePeriod.
type MergeLogger struct {
*capnslog.PackageLogger
mu sync.Mutex // protect statusm
statusm map[line]*status
}
func NewMergeLogger(logger *capnslog.PackageLogger) *MergeLogger {
l := &MergeLogger{
PackageLogger: logger,
statusm: make(map[line]*status),
}
go l.outputLoop()
return l
}
func (l *MergeLogger) MergeInfo(entries ...interface{}) {
l.merge(line{
level: capnslog.INFO,
str: fmt.Sprint(entries...),
})
}
func (l *MergeLogger) MergeInfof(format string, args ...interface{}) {
l.merge(line{
level: capnslog.INFO,
str: fmt.Sprintf(format, args...),
})
}
func (l *MergeLogger) MergeNotice(entries ...interface{}) {
l.merge(line{
level: capnslog.NOTICE,
str: fmt.Sprint(entries...),
})
}
func (l *MergeLogger) MergeNoticef(format string, args ...interface{}) {
l.merge(line{
level: capnslog.NOTICE,
str: fmt.Sprintf(format, args...),
})
}
func (l *MergeLogger) MergeWarning(entries ...interface{}) {
l.merge(line{
level: capnslog.WARNING,
str: fmt.Sprint(entries...),
})
}
func (l *MergeLogger) MergeWarningf(format string, args ...interface{}) {
l.merge(line{
level: capnslog.WARNING,
str: fmt.Sprintf(format, args...),
})
}
func (l *MergeLogger) MergeError(entries ...interface{}) {
l.merge(line{
level: capnslog.ERROR,
str: fmt.Sprint(entries...),
})
}
func (l *MergeLogger) MergeErrorf(format string, args ...interface{}) {
l.merge(line{
level: capnslog.ERROR,
str: fmt.Sprintf(format, args...),
})
}
func (l *MergeLogger) merge(ln line) {
l.mu.Lock()
// increase count if the logger is merging the line
if status, ok := l.statusm[ln]; ok {
status.count++
l.mu.Unlock()
return
}
// initialize status of the line
l.statusm[ln] = &status{
period: defaultMergePeriod,
start: time.Now(),
}
// release the lock before IO operation
l.mu.Unlock()
// print out the line at its first time
l.PackageLogger.Logf(ln.level, ln.str)
}
func (l *MergeLogger) outputLoop() {
for now := range time.Tick(outputInterval) {
var outputs []line
l.mu.Lock()
for ln, status := range l.statusm {
if status.isInMergePeriod(now) {
continue
}
if status.isEmpty() {
delete(l.statusm, ln)
continue
}
outputs = append(outputs, ln.append(status.summary(now)))
status.reset(now)
}
l.mu.Unlock()
for _, o := range outputs {
l.PackageLogger.Logf(o.level, o.str)
}
}
}

View File

@@ -40,3 +40,27 @@ func RecoverPort(port int) error {
_, err := exec.Command("/bin/sh", "-c", cmdStr).Output()
return err
}
// SetLatency adds latency in millisecond scale with random variations.
func SetLatency(ms, rv int) error {
if rv > ms {
rv = 1
}
cmdStr := fmt.Sprintf("sudo tc qdisc add dev eth0 root netem delay %dms %dms distribution normal", ms, rv)
_, err := exec.Command("/bin/sh", "-c", cmdStr).Output()
if err != nil {
// the rule has already been added. Overwrite it.
cmdStr = fmt.Sprintf("sudo tc qdisc change dev eth0 root netem delay %dms %dms distribution normal", ms, rv)
_, err = exec.Command("/bin/sh", "-c", cmdStr).Output()
if err != nil {
return err
}
}
return nil
}
// RemoveLatency resets latency configurations.
func RemoveLatency() error {
_, err := exec.Command("/bin/sh", "-c", "sudo tc qdisc del dev eth0 root netem").Output()
return err
}

View File

@@ -19,3 +19,7 @@ package netutil
func DropPort(port int) error { return nil }
func RecoverPort(port int) error { return nil }
func SetLatency(ms, rv int) error { return nil }
func RemoveLatency() error { return nil }

View File

@@ -12,16 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package netutil implements network-related utility functions.
package netutil
import (
"encoding/base64"
"net"
"net/http"
"net/url"
"reflect"
"sort"
"strings"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/pkg/capnslog"
@@ -118,36 +116,3 @@ func URLStringsEqual(a []string, b []string) bool {
return urlsEqual(urlsA, urlsB)
}
// BasicAuth returns the username and password provided in the request's
// Authorization header, if the request uses HTTP Basic Authentication.
// See RFC 2617, Section 2.
// Based on the BasicAuth method from the Golang standard lib.
// TODO: use the standard lib BasicAuth method when we move to Go 1.4.
func BasicAuth(r *http.Request) (username, password string, ok bool) {
auth := r.Header.Get("Authorization")
if auth == "" {
return
}
return parseBasicAuth(auth)
}
// parseBasicAuth parses an HTTP Basic Authentication string.
// "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true).
// Taken from the Golang standard lib.
// TODO: use the standard lib BasicAuth method when we move to Go 1.4.
func parseBasicAuth(auth string) (username, password string, ok bool) {
if !strings.HasPrefix(auth, "Basic ") {
return
}
c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic "))
if err != nil {
return
}
cs := string(c)
s := strings.IndexByte(cs, ':')
if s < 0 {
return
}
return cs[:s], cs[s+1:], true
}

View File

@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package pathutil implements utility functions for handling slash-separated
// paths.
package pathutil
import "path"

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package pbutil defines interfaces for handling Protocol Buffer objects.
package pbutil
import "github.com/coreos/pkg/capnslog"

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package runtime implements utility functions for runtime systems.
package runtime
import (

View File

@@ -0,0 +1,168 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schedule
import (
"sync"
"golang.org/x/net/context"
)
type Job func(context.Context)
// Scheduler can schedule jobs.
type Scheduler interface {
// Schedule asks the scheduler to schedule a job defined by the given func.
// Schedule to a stopped scheduler might panic.
Schedule(j Job)
// Pending returns number of pending jobs
Pending() int
// Scheduled returns the number of scheduled jobs (excluding pending jobs)
Scheduled() int
// Finished returns the number of finished jobs
Finished() int
// WaitFinish waits until at least n job are finished and all pending jobs are finished.
WaitFinish(n int)
// Stop stops the scheduler.
Stop()
}
type fifo struct {
mu sync.Mutex
resume chan struct{}
scheduled int
finished int
pendings []Job
ctx context.Context
cancel context.CancelFunc
finishCond *sync.Cond
donec chan struct{}
}
// NewFIFOScheduler returns a Scheduler that schedules jobs in FIFO
// order sequentially
func NewFIFOScheduler() Scheduler {
f := &fifo{
resume: make(chan struct{}, 1),
donec: make(chan struct{}, 1),
}
f.finishCond = sync.NewCond(&f.mu)
f.ctx, f.cancel = context.WithCancel(context.Background())
go f.run()
return f
}
// Schedule schedules a job that will be ran in FIFO order sequentially.
func (f *fifo) Schedule(j Job) {
f.mu.Lock()
defer f.mu.Unlock()
if f.cancel == nil {
panic("schedule: schedule to stopped scheduler")
}
if len(f.pendings) == 0 {
select {
case f.resume <- struct{}{}:
default:
}
}
f.pendings = append(f.pendings, j)
return
}
func (f *fifo) Pending() int {
f.mu.Lock()
defer f.mu.Unlock()
return len(f.pendings)
}
func (f *fifo) Scheduled() int {
f.mu.Lock()
defer f.mu.Unlock()
return f.scheduled
}
func (f *fifo) Finished() int {
f.finishCond.L.Lock()
defer f.finishCond.L.Unlock()
return f.finished
}
func (f *fifo) WaitFinish(n int) {
f.finishCond.L.Lock()
for f.finished < n || len(f.pendings) != 0 {
f.finishCond.Wait()
}
f.finishCond.L.Unlock()
}
// Stop stops the scheduler and cancels all pending jobs.
func (f *fifo) Stop() {
f.mu.Lock()
f.cancel()
f.cancel = nil
f.mu.Unlock()
<-f.donec
}
func (f *fifo) run() {
// TODO: recover from job panic?
defer func() {
close(f.donec)
close(f.resume)
}()
for {
var todo Job
f.mu.Lock()
if len(f.pendings) != 0 {
f.scheduled++
todo = f.pendings[0]
}
f.mu.Unlock()
if todo == nil {
select {
case <-f.resume:
case <-f.ctx.Done():
f.mu.Lock()
pendings := f.pendings
f.pendings = nil
f.mu.Unlock()
// clean up pending jobs
for _, todo := range pendings {
todo(f.ctx)
}
return
}
} else {
todo(f.ctx)
f.finishCond.L.Lock()
f.finished++
f.pendings = f.pendings[1:]
f.finishCond.Broadcast()
f.finishCond.L.Unlock()
}
}
}

View File

@@ -0,0 +1,136 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testutil
import (
"fmt"
"net/http"
"os"
"regexp"
"runtime"
"sort"
"strings"
"testing"
"time"
)
/*
CheckLeakedGoroutine verifies tests do not leave any leaky
goroutines. It returns true when there are goroutines still
running(leaking) after all tests.
import "github.com/coreos/etcd/pkg/testutil"
func TestMain(m *testing.M) {
v := m.Run()
if v == 0 && testutil.CheckLeakedGoroutine() {
os.Exit(1)
}
os.Exit(v)
}
func TestSample(t *testing.T) {
defer testutil.AfterTest(t)
...
}
*/
func CheckLeakedGoroutine() bool {
if testing.Short() {
// not counting goroutines for leakage in -short mode
return false
}
gs := interestingGoroutines()
if len(gs) == 0 {
return false
}
stackCount := make(map[string]int)
re := regexp.MustCompile("\\(0[0-9a-fx, ]*\\)")
for _, g := range gs {
// strip out pointer arguments in first function of stack dump
normalized := string(re.ReplaceAll([]byte(g), []byte("(...)")))
stackCount[normalized]++
}
fmt.Fprintf(os.Stderr, "Too many goroutines running after all test(s).\n")
for stack, count := range stackCount {
fmt.Fprintf(os.Stderr, "%d instances of:\n%s\n", count, stack)
}
return true
}
func AfterTest(t *testing.T) {
http.DefaultTransport.(*http.Transport).CloseIdleConnections()
if testing.Short() {
return
}
var bad string
badSubstring := map[string]string{
").writeLoop(": "a Transport",
"created by net/http/httptest.(*Server).Start": "an httptest.Server",
"timeoutHandler": "a TimeoutHandler",
"net.(*netFD).connect(": "a timing out dial",
").noteClientGone(": "a closenotifier sender",
}
// readLoop was buggy before go1.5:
// https://github.com/golang/go/issues/10457
if getAtLeastGo15() {
badSubstring[").readLoop("] = "a Transport"
}
var stacks string
for i := 0; i < 6; i++ {
bad = ""
stacks = strings.Join(interestingGoroutines(), "\n\n")
for substr, what := range badSubstring {
if strings.Contains(stacks, substr) {
bad = what
}
}
if bad == "" {
return
}
// Bad stuff found, but goroutines might just still be
// shutting down, so give it some time.
time.Sleep(50 * time.Millisecond)
}
t.Errorf("Test appears to have leaked %s:\n%s", bad, stacks)
}
func interestingGoroutines() (gs []string) {
buf := make([]byte, 2<<20)
buf = buf[:runtime.Stack(buf, true)]
for _, g := range strings.Split(string(buf), "\n\n") {
sl := strings.SplitN(g, "\n", 2)
if len(sl) != 2 {
continue
}
stack := strings.TrimSpace(sl[1])
if stack == "" ||
strings.Contains(stack, "created by testing.RunTests") ||
strings.Contains(stack, "testing.Main(") ||
strings.Contains(stack, "runtime.goexit") ||
strings.Contains(stack, "github.com/coreos/etcd/pkg/testutil.interestingGoroutines") ||
strings.Contains(stack, "github.com/coreos/etcd/pkg/logutil.(*MergeLogger).outputLoop") ||
strings.Contains(stack, "github.com/golang/glog.(*loggingT).flushDaemon") ||
strings.Contains(stack, "created by runtime.gc") ||
strings.Contains(stack, "runtime.MHeap_Scavenger") {
continue
}
gs = append(gs, stack)
}
sort.Strings(gs)
return
}
// getAtLeastGo15 returns true if the runtime has go1.5+.
func getAtLeastGo15() bool {
var major, minor int
var discard string
i, err := fmt.Sscanf(runtime.Version(), "go%d.%d%s", &major, &minor, &discard)
return (err == nil && i == 3 && (major > 1 || major == 1 && minor >= 5))
}

View File

@@ -0,0 +1,57 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package testutil
import (
"net/http"
"sync"
)
type PauseableHandler struct {
Next http.Handler
mu sync.Mutex
paused bool
}
func (ph *PauseableHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ph.mu.Lock()
paused := ph.paused
ph.mu.Unlock()
if !paused {
ph.Next.ServeHTTP(w, r)
} else {
hj, ok := w.(http.Hijacker)
if !ok {
panic("webserver doesn't support hijacking")
}
conn, _, err := hj.Hijack()
if err != nil {
panic(err.Error())
}
conn.Close()
}
}
func (ph *PauseableHandler) Pause() {
ph.mu.Lock()
defer ph.mu.Unlock()
ph.paused = true
}
func (ph *PauseableHandler) Resume() {
ph.mu.Lock()
defer ph.mu.Unlock()
ph.paused = false
}

View File

@@ -0,0 +1,132 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package testutil
import (
"errors"
"fmt"
"sync"
"time"
)
type Action struct {
Name string
Params []interface{}
}
type Recorder interface {
// Record publishes an Action (e.g., function call) which will
// be reflected by Wait() or Chan()
Record(a Action)
// Wait waits until at least n Actions are available or returns with error
Wait(n int) ([]Action, error)
// Action returns immediately available Actions
Action() []Action
// Chan returns the channel for actions published by Record
Chan() <-chan Action
}
// RecorderBuffered appends all Actions to a slice
type RecorderBuffered struct {
sync.Mutex
actions []Action
}
func (r *RecorderBuffered) Record(a Action) {
r.Lock()
r.actions = append(r.actions, a)
r.Unlock()
}
func (r *RecorderBuffered) Action() []Action {
r.Lock()
cpy := make([]Action, len(r.actions))
copy(cpy, r.actions)
r.Unlock()
return cpy
}
func (r *RecorderBuffered) Wait(n int) (acts []Action, err error) {
// legacy racey behavior
WaitSchedule()
acts = r.Action()
if len(acts) < n {
err = newLenErr(n, len(acts))
}
return acts, err
}
func (r *RecorderBuffered) Chan() <-chan Action {
ch := make(chan Action)
go func() {
acts := r.Action()
for i := range acts {
ch <- acts[i]
}
close(ch)
}()
return ch
}
// RecorderStream writes all Actions to an unbuffered channel
type recorderStream struct {
ch chan Action
}
func NewRecorderStream() Recorder {
return &recorderStream{ch: make(chan Action)}
}
func (r *recorderStream) Record(a Action) {
r.ch <- a
}
func (r *recorderStream) Action() (acts []Action) {
for {
select {
case act := <-r.ch:
acts = append(acts, act)
default:
return acts
}
}
}
func (r *recorderStream) Chan() <-chan Action {
return r.ch
}
func (r *recorderStream) Wait(n int) ([]Action, error) {
acts := make([]Action, n)
timeoutC := time.After(5 * time.Second)
for i := 0; i < n; i++ {
select {
case acts[i] = <-r.ch:
case <-timeoutC:
acts = acts[:i]
return acts, newLenErr(n, i)
}
}
// extra wait to catch any Action spew
select {
case act := <-r.ch:
acts = append(acts, act)
case <-time.After(10 * time.Millisecond):
}
return acts, nil
}
func newLenErr(expected int, actual int) error {
s := fmt.Sprintf("len(actions) = %d, expected >= %d", actual, expected)
return errors.New(s)
}

View File

@@ -0,0 +1,56 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package testutil provides test utility functions.
package testutil
import (
"net/url"
"runtime"
"testing"
"time"
)
// TODO: improve this when we are able to know the schedule or status of target go-routine.
func WaitSchedule() {
time.Sleep(10 * time.Millisecond)
}
func MustNewURLs(t *testing.T, urls []string) []url.URL {
if urls == nil {
return nil
}
var us []url.URL
for _, url := range urls {
u := MustNewURL(t, url)
us = append(us, *u)
}
return us
}
func MustNewURL(t *testing.T, s string) *url.URL {
u, err := url.Parse(s)
if err != nil {
t.Fatalf("parse %v error: %v", s, err)
}
return u
}
// FatalStack helps to fatal the test and print out the stacks of all running goroutines.
func FatalStack(t *testing.T, s string) {
stackTrace := make([]byte, 8*1024)
n := runtime.Stack(stackTrace, true)
t.Error(string(stackTrace[:n]))
t.Fatalf(s)
}

View File

@@ -0,0 +1,17 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package transport implements various HTTP transport utilities based on Go
// net package.
package transport

View File

@@ -26,7 +26,13 @@ import (
)
func NewListener(addr string, scheme string, info TLSInfo) (net.Listener, error) {
l, err := net.Listen("tcp", addr)
nettype := "tcp"
if scheme == "unix" {
// unix sockets via unix://laddr
nettype = scheme
}
l, err := net.Listen(nettype, addr)
if err != nil {
return nil, err
}
@@ -46,18 +52,19 @@ func NewListener(addr string, scheme string, info TLSInfo) (net.Listener, error)
return l, nil
}
func NewTransport(info TLSInfo) (*http.Transport, error) {
func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, error) {
cfg, err := info.ClientConfig()
if err != nil {
return nil, err
}
t := &http.Transport{
// timeouts taken from http.DefaultTransport
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
Timeout: dialtimeoutd,
// value taken from http.DefaultTransport
KeepAlive: 30 * time.Second,
}).Dial,
// value taken from http.DefaultTransport
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: cfg,
}

View File

@@ -23,14 +23,19 @@ import (
// NewTimeoutTransport returns a transport created using the given TLS info.
// If read/write on the created connection blocks longer than its time limit,
// it will return timeout error.
// If read/write timeout is set, transport will not be able to reuse connection.
func NewTimeoutTransport(info TLSInfo, dialtimeoutd, rdtimeoutd, wtimeoutd time.Duration) (*http.Transport, error) {
tr, err := NewTransport(info)
tr, err := NewTransport(info, dialtimeoutd)
if err != nil {
return nil, err
}
// the timeouted connection will tiemout soon after it is idle.
if rdtimeoutd != 0 || wtimeoutd != 0 {
// the timed out connection will timeout soon after it is idle.
// it should not be put back to http transport as an idle connection for future usage.
tr.MaxIdleConnsPerHost = -1
}
tr.Dial = (&rwTimeoutDialer{
Dialer: net.Dialer{
Timeout: dialtimeoutd,

Some files were not shown because too many files have changed in this diff Show More