123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376 |
- /*
- Copyright 2015 The Kubernetes Authors.
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- */
- package e2e
- import (
- "fmt"
- "reflect"
- "strings"
- "time"
- "k8s.io/kubernetes/pkg/api"
- apierrs "k8s.io/kubernetes/pkg/api/errors"
- "k8s.io/kubernetes/pkg/api/unversioned"
- "k8s.io/kubernetes/pkg/apimachinery/registered"
- "k8s.io/kubernetes/pkg/apis/extensions"
- client "k8s.io/kubernetes/pkg/client/unversioned"
- "k8s.io/kubernetes/pkg/kubectl"
- "k8s.io/kubernetes/pkg/labels"
- "k8s.io/kubernetes/pkg/runtime"
- "k8s.io/kubernetes/pkg/util/wait"
- "k8s.io/kubernetes/test/e2e/framework"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- )
- const (
- // this should not be a multiple of 5, because node status updates
- // every 5 seconds. See https://github.com/kubernetes/kubernetes/pull/14915.
- dsRetryPeriod = 2 * time.Second
- dsRetryTimeout = 5 * time.Minute
- daemonsetLabelPrefix = "daemonset-"
- daemonsetNameLabel = daemonsetLabelPrefix + "name"
- daemonsetColorLabel = daemonsetLabelPrefix + "color"
- )
- // This test must be run in serial because it assumes the Daemon Set pods will
- // always get scheduled. If we run other tests in parallel, this may not
- // happen. In the future, running in parallel may work if we have an eviction
- // model which lets the DS controller kick out other pods to make room.
- // See http://issues.k8s.io/21767 for more details
- var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
- var f *framework.Framework
- AfterEach(func() {
- if daemonsets, err := f.Client.DaemonSets(f.Namespace.Name).List(api.ListOptions{}); err == nil {
- framework.Logf("daemonset: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), daemonsets))
- } else {
- framework.Logf("unable to dump daemonsets: %v", err)
- }
- if pods, err := f.Client.Pods(f.Namespace.Name).List(api.ListOptions{}); err == nil {
- framework.Logf("pods: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), pods))
- } else {
- framework.Logf("unable to dump pods: %v", err)
- }
- err := clearDaemonSetNodeLabels(f.Client)
- Expect(err).NotTo(HaveOccurred())
- })
- f = framework.NewDefaultFramework("daemonsets")
- image := "gcr.io/google_containers/serve_hostname:v1.4"
- dsName := "daemon-set"
- var ns string
- var c *client.Client
- BeforeEach(func() {
- ns = f.Namespace.Name
- c = f.Client
- err := clearDaemonSetNodeLabels(c)
- Expect(err).NotTo(HaveOccurred())
- })
- It("should run and stop simple daemon", func() {
- label := map[string]string{daemonsetNameLabel: dsName}
- framework.Logf("Creating simple daemon set %s", dsName)
- _, err := c.DaemonSets(ns).Create(&extensions.DaemonSet{
- ObjectMeta: api.ObjectMeta{
- Name: dsName,
- },
- Spec: extensions.DaemonSetSpec{
- Template: api.PodTemplateSpec{
- ObjectMeta: api.ObjectMeta{
- Labels: label,
- },
- Spec: api.PodSpec{
- Containers: []api.Container{
- {
- Name: dsName,
- Image: image,
- Ports: []api.ContainerPort{{ContainerPort: 9376}},
- },
- },
- },
- },
- },
- })
- Expect(err).NotTo(HaveOccurred())
- defer func() {
- framework.Logf("Check that reaper kills all daemon pods for %s", dsName)
- dsReaper, err := kubectl.ReaperFor(extensions.Kind("DaemonSet"), c)
- Expect(err).NotTo(HaveOccurred())
- err = dsReaper.Stop(ns, dsName, 0, nil)
- Expect(err).NotTo(HaveOccurred())
- err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, label))
- Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped")
- }()
- By("Check that daemon pods launch on every node of the cluster.")
- Expect(err).NotTo(HaveOccurred())
- err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label))
- Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
- By("Stop a daemon pod, check that the daemon pod is revived.")
- podClient := c.Pods(ns)
- selector := labels.Set(label).AsSelector()
- options := api.ListOptions{LabelSelector: selector}
- podList, err := podClient.List(options)
- Expect(err).NotTo(HaveOccurred())
- Expect(len(podList.Items)).To(BeNumerically(">", 0))
- pod := podList.Items[0]
- err = podClient.Delete(pod.Name, nil)
- Expect(err).NotTo(HaveOccurred())
- err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label))
- Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive")
- })
- It("should run and stop complex daemon", func() {
- complexLabel := map[string]string{daemonsetNameLabel: dsName}
- nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
- framework.Logf("Creating daemon with a node selector %s", dsName)
- _, err := c.DaemonSets(ns).Create(&extensions.DaemonSet{
- ObjectMeta: api.ObjectMeta{
- Name: dsName,
- },
- Spec: extensions.DaemonSetSpec{
- Selector: &unversioned.LabelSelector{MatchLabels: complexLabel},
- Template: api.PodTemplateSpec{
- ObjectMeta: api.ObjectMeta{
- Labels: complexLabel,
- },
- Spec: api.PodSpec{
- NodeSelector: nodeSelector,
- Containers: []api.Container{
- {
- Name: dsName,
- Image: image,
- Ports: []api.ContainerPort{{ContainerPort: 9376}},
- },
- },
- },
- },
- },
- })
- Expect(err).NotTo(HaveOccurred())
- By("Initially, daemon pods should not be running on any nodes.")
- err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, complexLabel))
- Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
- By("Change label of node, check that daemon pod is launched.")
- nodeList := framework.GetReadySchedulableNodesOrDie(f.Client)
- Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
- newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
- Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
- daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
- Expect(len(daemonSetLabels)).To(Equal(1))
- err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, complexLabel, []string{newNode.Name}))
- Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
- By("remove the node selector and wait for daemons to be unscheduled")
- _, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
- Expect(err).NotTo(HaveOccurred(), "error removing labels on node")
- Expect(wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, complexLabel))).
- NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
- By("We should now be able to delete the daemon set.")
- Expect(c.DaemonSets(ns).Delete(dsName)).NotTo(HaveOccurred())
- })
- It("should run and stop complex daemon with node affinity", func() {
- complexLabel := map[string]string{daemonsetNameLabel: dsName}
- nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
- framework.Logf("Creating daemon with a node affinity %s", dsName)
- affinity := map[string]string{
- api.AffinityAnnotationKey: fmt.Sprintf(`
- {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
- "nodeSelectorTerms": [{
- "matchExpressions": [{
- "key": "%s",
- "operator": "In",
- "values": ["%s"]
- }]
- }]
- }}}`, daemonsetColorLabel, nodeSelector[daemonsetColorLabel]),
- }
- _, err := c.DaemonSets(ns).Create(&extensions.DaemonSet{
- ObjectMeta: api.ObjectMeta{
- Name: dsName,
- },
- Spec: extensions.DaemonSetSpec{
- Selector: &unversioned.LabelSelector{MatchLabels: complexLabel},
- Template: api.PodTemplateSpec{
- ObjectMeta: api.ObjectMeta{
- Labels: complexLabel,
- Annotations: affinity,
- },
- Spec: api.PodSpec{
- Containers: []api.Container{
- {
- Name: dsName,
- Image: image,
- Ports: []api.ContainerPort{{ContainerPort: 9376}},
- },
- },
- },
- },
- },
- })
- Expect(err).NotTo(HaveOccurred())
- By("Initially, daemon pods should not be running on any nodes.")
- err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, complexLabel))
- Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
- By("Change label of node, check that daemon pod is launched.")
- nodeList := framework.GetReadySchedulableNodesOrDie(f.Client)
- Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
- newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
- Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
- daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
- Expect(len(daemonSetLabels)).To(Equal(1))
- err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, complexLabel, []string{newNode.Name}))
- Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
- By("remove the node selector and wait for daemons to be unscheduled")
- _, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
- Expect(err).NotTo(HaveOccurred(), "error removing labels on node")
- Expect(wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, complexLabel))).
- NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
- By("We should now be able to delete the daemon set.")
- Expect(c.DaemonSets(ns).Delete(dsName)).NotTo(HaveOccurred())
- })
- })
- func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, map[string]string) {
- daemonSetLabels := map[string]string{}
- otherLabels := map[string]string{}
- for k, v := range labels {
- if strings.HasPrefix(k, daemonsetLabelPrefix) {
- daemonSetLabels[k] = v
- } else {
- otherLabels[k] = v
- }
- }
- return daemonSetLabels, otherLabels
- }
- func clearDaemonSetNodeLabels(c *client.Client) error {
- nodeList := framework.GetReadySchedulableNodesOrDie(c)
- for _, node := range nodeList.Items {
- _, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{})
- if err != nil {
- return err
- }
- }
- return nil
- }
- func setDaemonSetNodeLabels(c *client.Client, nodeName string, labels map[string]string) (*api.Node, error) {
- nodeClient := c.Nodes()
- var newNode *api.Node
- var newLabels map[string]string
- err := wait.Poll(dsRetryPeriod, dsRetryTimeout, func() (bool, error) {
- node, err := nodeClient.Get(nodeName)
- if err != nil {
- return false, err
- }
- // remove all labels this test is creating
- daemonSetLabels, otherLabels := separateDaemonSetNodeLabels(node.Labels)
- if reflect.DeepEqual(daemonSetLabels, labels) {
- newNode = node
- return true, nil
- }
- node.Labels = otherLabels
- for k, v := range labels {
- node.Labels[k] = v
- }
- newNode, err = nodeClient.Update(node)
- if err == nil {
- newLabels, _ = separateDaemonSetNodeLabels(newNode.Labels)
- return true, err
- }
- if se, ok := err.(*apierrs.StatusError); ok && se.ErrStatus.Reason == unversioned.StatusReasonConflict {
- framework.Logf("failed to update node due to resource version conflict")
- return false, nil
- }
- return false, err
- })
- if err != nil {
- return nil, err
- } else if len(newLabels) != len(labels) {
- return nil, fmt.Errorf("Could not set daemon set test labels as expected.")
- }
- return newNode, nil
- }
- func checkDaemonPodOnNodes(f *framework.Framework, selector map[string]string, nodeNames []string) func() (bool, error) {
- return func() (bool, error) {
- selector := labels.Set(selector).AsSelector()
- options := api.ListOptions{LabelSelector: selector}
- podList, err := f.Client.Pods(f.Namespace.Name).List(options)
- if err != nil {
- return false, nil
- }
- pods := podList.Items
- nodesToPodCount := make(map[string]int)
- for _, pod := range pods {
- nodesToPodCount[pod.Spec.NodeName] += 1
- }
- framework.Logf("nodesToPodCount: %#v", nodesToPodCount)
- // Ensure that exactly 1 pod is running on all nodes in nodeNames.
- for _, nodeName := range nodeNames {
- if nodesToPodCount[nodeName] != 1 {
- return false, nil
- }
- }
- // Ensure that sizes of the lists are the same. We've verified that every element of nodeNames is in
- // nodesToPodCount, so verifying the lengths are equal ensures that there aren't pods running on any
- // other nodes.
- return len(nodesToPodCount) == len(nodeNames), nil
- }
- }
- func checkRunningOnAllNodes(f *framework.Framework, selector map[string]string) func() (bool, error) {
- return func() (bool, error) {
- nodeList, err := f.Client.Nodes().List(api.ListOptions{})
- framework.ExpectNoError(err)
- nodeNames := make([]string, 0)
- for _, node := range nodeList.Items {
- nodeNames = append(nodeNames, node.Name)
- }
- return checkDaemonPodOnNodes(f, selector, nodeNames)()
- }
- }
- func checkRunningOnNoNodes(f *framework.Framework, selector map[string]string) func() (bool, error) {
- return checkDaemonPodOnNodes(f, selector, make([]string, 0))
- }
|