daemon_set.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376
  1. /*
  2. Copyright 2015 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package e2e
  14. import (
  15. "fmt"
  16. "reflect"
  17. "strings"
  18. "time"
  19. "k8s.io/kubernetes/pkg/api"
  20. apierrs "k8s.io/kubernetes/pkg/api/errors"
  21. "k8s.io/kubernetes/pkg/api/unversioned"
  22. "k8s.io/kubernetes/pkg/apimachinery/registered"
  23. "k8s.io/kubernetes/pkg/apis/extensions"
  24. client "k8s.io/kubernetes/pkg/client/unversioned"
  25. "k8s.io/kubernetes/pkg/kubectl"
  26. "k8s.io/kubernetes/pkg/labels"
  27. "k8s.io/kubernetes/pkg/runtime"
  28. "k8s.io/kubernetes/pkg/util/wait"
  29. "k8s.io/kubernetes/test/e2e/framework"
  30. . "github.com/onsi/ginkgo"
  31. . "github.com/onsi/gomega"
  32. )
  33. const (
  34. // this should not be a multiple of 5, because node status updates
  35. // every 5 seconds. See https://github.com/kubernetes/kubernetes/pull/14915.
  36. dsRetryPeriod = 2 * time.Second
  37. dsRetryTimeout = 5 * time.Minute
  38. daemonsetLabelPrefix = "daemonset-"
  39. daemonsetNameLabel = daemonsetLabelPrefix + "name"
  40. daemonsetColorLabel = daemonsetLabelPrefix + "color"
  41. )
  42. // This test must be run in serial because it assumes the Daemon Set pods will
  43. // always get scheduled. If we run other tests in parallel, this may not
  44. // happen. In the future, running in parallel may work if we have an eviction
  45. // model which lets the DS controller kick out other pods to make room.
  46. // See http://issues.k8s.io/21767 for more details
  47. var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
  48. var f *framework.Framework
  49. AfterEach(func() {
  50. if daemonsets, err := f.Client.DaemonSets(f.Namespace.Name).List(api.ListOptions{}); err == nil {
  51. framework.Logf("daemonset: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), daemonsets))
  52. } else {
  53. framework.Logf("unable to dump daemonsets: %v", err)
  54. }
  55. if pods, err := f.Client.Pods(f.Namespace.Name).List(api.ListOptions{}); err == nil {
  56. framework.Logf("pods: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), pods))
  57. } else {
  58. framework.Logf("unable to dump pods: %v", err)
  59. }
  60. err := clearDaemonSetNodeLabels(f.Client)
  61. Expect(err).NotTo(HaveOccurred())
  62. })
  63. f = framework.NewDefaultFramework("daemonsets")
  64. image := "gcr.io/google_containers/serve_hostname:v1.4"
  65. dsName := "daemon-set"
  66. var ns string
  67. var c *client.Client
  68. BeforeEach(func() {
  69. ns = f.Namespace.Name
  70. c = f.Client
  71. err := clearDaemonSetNodeLabels(c)
  72. Expect(err).NotTo(HaveOccurred())
  73. })
  74. It("should run and stop simple daemon", func() {
  75. label := map[string]string{daemonsetNameLabel: dsName}
  76. framework.Logf("Creating simple daemon set %s", dsName)
  77. _, err := c.DaemonSets(ns).Create(&extensions.DaemonSet{
  78. ObjectMeta: api.ObjectMeta{
  79. Name: dsName,
  80. },
  81. Spec: extensions.DaemonSetSpec{
  82. Template: api.PodTemplateSpec{
  83. ObjectMeta: api.ObjectMeta{
  84. Labels: label,
  85. },
  86. Spec: api.PodSpec{
  87. Containers: []api.Container{
  88. {
  89. Name: dsName,
  90. Image: image,
  91. Ports: []api.ContainerPort{{ContainerPort: 9376}},
  92. },
  93. },
  94. },
  95. },
  96. },
  97. })
  98. Expect(err).NotTo(HaveOccurred())
  99. defer func() {
  100. framework.Logf("Check that reaper kills all daemon pods for %s", dsName)
  101. dsReaper, err := kubectl.ReaperFor(extensions.Kind("DaemonSet"), c)
  102. Expect(err).NotTo(HaveOccurred())
  103. err = dsReaper.Stop(ns, dsName, 0, nil)
  104. Expect(err).NotTo(HaveOccurred())
  105. err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, label))
  106. Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped")
  107. }()
  108. By("Check that daemon pods launch on every node of the cluster.")
  109. Expect(err).NotTo(HaveOccurred())
  110. err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label))
  111. Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
  112. By("Stop a daemon pod, check that the daemon pod is revived.")
  113. podClient := c.Pods(ns)
  114. selector := labels.Set(label).AsSelector()
  115. options := api.ListOptions{LabelSelector: selector}
  116. podList, err := podClient.List(options)
  117. Expect(err).NotTo(HaveOccurred())
  118. Expect(len(podList.Items)).To(BeNumerically(">", 0))
  119. pod := podList.Items[0]
  120. err = podClient.Delete(pod.Name, nil)
  121. Expect(err).NotTo(HaveOccurred())
  122. err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label))
  123. Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive")
  124. })
  125. It("should run and stop complex daemon", func() {
  126. complexLabel := map[string]string{daemonsetNameLabel: dsName}
  127. nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
  128. framework.Logf("Creating daemon with a node selector %s", dsName)
  129. _, err := c.DaemonSets(ns).Create(&extensions.DaemonSet{
  130. ObjectMeta: api.ObjectMeta{
  131. Name: dsName,
  132. },
  133. Spec: extensions.DaemonSetSpec{
  134. Selector: &unversioned.LabelSelector{MatchLabels: complexLabel},
  135. Template: api.PodTemplateSpec{
  136. ObjectMeta: api.ObjectMeta{
  137. Labels: complexLabel,
  138. },
  139. Spec: api.PodSpec{
  140. NodeSelector: nodeSelector,
  141. Containers: []api.Container{
  142. {
  143. Name: dsName,
  144. Image: image,
  145. Ports: []api.ContainerPort{{ContainerPort: 9376}},
  146. },
  147. },
  148. },
  149. },
  150. },
  151. })
  152. Expect(err).NotTo(HaveOccurred())
  153. By("Initially, daemon pods should not be running on any nodes.")
  154. err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, complexLabel))
  155. Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
  156. By("Change label of node, check that daemon pod is launched.")
  157. nodeList := framework.GetReadySchedulableNodesOrDie(f.Client)
  158. Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
  159. newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
  160. Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
  161. daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
  162. Expect(len(daemonSetLabels)).To(Equal(1))
  163. err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, complexLabel, []string{newNode.Name}))
  164. Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
  165. By("remove the node selector and wait for daemons to be unscheduled")
  166. _, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
  167. Expect(err).NotTo(HaveOccurred(), "error removing labels on node")
  168. Expect(wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, complexLabel))).
  169. NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
  170. By("We should now be able to delete the daemon set.")
  171. Expect(c.DaemonSets(ns).Delete(dsName)).NotTo(HaveOccurred())
  172. })
  173. It("should run and stop complex daemon with node affinity", func() {
  174. complexLabel := map[string]string{daemonsetNameLabel: dsName}
  175. nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
  176. framework.Logf("Creating daemon with a node affinity %s", dsName)
  177. affinity := map[string]string{
  178. api.AffinityAnnotationKey: fmt.Sprintf(`
  179. {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
  180. "nodeSelectorTerms": [{
  181. "matchExpressions": [{
  182. "key": "%s",
  183. "operator": "In",
  184. "values": ["%s"]
  185. }]
  186. }]
  187. }}}`, daemonsetColorLabel, nodeSelector[daemonsetColorLabel]),
  188. }
  189. _, err := c.DaemonSets(ns).Create(&extensions.DaemonSet{
  190. ObjectMeta: api.ObjectMeta{
  191. Name: dsName,
  192. },
  193. Spec: extensions.DaemonSetSpec{
  194. Selector: &unversioned.LabelSelector{MatchLabels: complexLabel},
  195. Template: api.PodTemplateSpec{
  196. ObjectMeta: api.ObjectMeta{
  197. Labels: complexLabel,
  198. Annotations: affinity,
  199. },
  200. Spec: api.PodSpec{
  201. Containers: []api.Container{
  202. {
  203. Name: dsName,
  204. Image: image,
  205. Ports: []api.ContainerPort{{ContainerPort: 9376}},
  206. },
  207. },
  208. },
  209. },
  210. },
  211. })
  212. Expect(err).NotTo(HaveOccurred())
  213. By("Initially, daemon pods should not be running on any nodes.")
  214. err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, complexLabel))
  215. Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
  216. By("Change label of node, check that daemon pod is launched.")
  217. nodeList := framework.GetReadySchedulableNodesOrDie(f.Client)
  218. Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
  219. newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
  220. Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
  221. daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
  222. Expect(len(daemonSetLabels)).To(Equal(1))
  223. err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, complexLabel, []string{newNode.Name}))
  224. Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
  225. By("remove the node selector and wait for daemons to be unscheduled")
  226. _, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
  227. Expect(err).NotTo(HaveOccurred(), "error removing labels on node")
  228. Expect(wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, complexLabel))).
  229. NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
  230. By("We should now be able to delete the daemon set.")
  231. Expect(c.DaemonSets(ns).Delete(dsName)).NotTo(HaveOccurred())
  232. })
  233. })
  234. func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, map[string]string) {
  235. daemonSetLabels := map[string]string{}
  236. otherLabels := map[string]string{}
  237. for k, v := range labels {
  238. if strings.HasPrefix(k, daemonsetLabelPrefix) {
  239. daemonSetLabels[k] = v
  240. } else {
  241. otherLabels[k] = v
  242. }
  243. }
  244. return daemonSetLabels, otherLabels
  245. }
  246. func clearDaemonSetNodeLabels(c *client.Client) error {
  247. nodeList := framework.GetReadySchedulableNodesOrDie(c)
  248. for _, node := range nodeList.Items {
  249. _, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{})
  250. if err != nil {
  251. return err
  252. }
  253. }
  254. return nil
  255. }
  256. func setDaemonSetNodeLabels(c *client.Client, nodeName string, labels map[string]string) (*api.Node, error) {
  257. nodeClient := c.Nodes()
  258. var newNode *api.Node
  259. var newLabels map[string]string
  260. err := wait.Poll(dsRetryPeriod, dsRetryTimeout, func() (bool, error) {
  261. node, err := nodeClient.Get(nodeName)
  262. if err != nil {
  263. return false, err
  264. }
  265. // remove all labels this test is creating
  266. daemonSetLabels, otherLabels := separateDaemonSetNodeLabels(node.Labels)
  267. if reflect.DeepEqual(daemonSetLabels, labels) {
  268. newNode = node
  269. return true, nil
  270. }
  271. node.Labels = otherLabels
  272. for k, v := range labels {
  273. node.Labels[k] = v
  274. }
  275. newNode, err = nodeClient.Update(node)
  276. if err == nil {
  277. newLabels, _ = separateDaemonSetNodeLabels(newNode.Labels)
  278. return true, err
  279. }
  280. if se, ok := err.(*apierrs.StatusError); ok && se.ErrStatus.Reason == unversioned.StatusReasonConflict {
  281. framework.Logf("failed to update node due to resource version conflict")
  282. return false, nil
  283. }
  284. return false, err
  285. })
  286. if err != nil {
  287. return nil, err
  288. } else if len(newLabels) != len(labels) {
  289. return nil, fmt.Errorf("Could not set daemon set test labels as expected.")
  290. }
  291. return newNode, nil
  292. }
  293. func checkDaemonPodOnNodes(f *framework.Framework, selector map[string]string, nodeNames []string) func() (bool, error) {
  294. return func() (bool, error) {
  295. selector := labels.Set(selector).AsSelector()
  296. options := api.ListOptions{LabelSelector: selector}
  297. podList, err := f.Client.Pods(f.Namespace.Name).List(options)
  298. if err != nil {
  299. return false, nil
  300. }
  301. pods := podList.Items
  302. nodesToPodCount := make(map[string]int)
  303. for _, pod := range pods {
  304. nodesToPodCount[pod.Spec.NodeName] += 1
  305. }
  306. framework.Logf("nodesToPodCount: %#v", nodesToPodCount)
  307. // Ensure that exactly 1 pod is running on all nodes in nodeNames.
  308. for _, nodeName := range nodeNames {
  309. if nodesToPodCount[nodeName] != 1 {
  310. return false, nil
  311. }
  312. }
  313. // Ensure that sizes of the lists are the same. We've verified that every element of nodeNames is in
  314. // nodesToPodCount, so verifying the lengths are equal ensures that there aren't pods running on any
  315. // other nodes.
  316. return len(nodesToPodCount) == len(nodeNames), nil
  317. }
  318. }
  319. func checkRunningOnAllNodes(f *framework.Framework, selector map[string]string) func() (bool, error) {
  320. return func() (bool, error) {
  321. nodeList, err := f.Client.Nodes().List(api.ListOptions{})
  322. framework.ExpectNoError(err)
  323. nodeNames := make([]string, 0)
  324. for _, node := range nodeList.Items {
  325. nodeNames = append(nodeNames, node.Name)
  326. }
  327. return checkDaemonPodOnNodes(f, selector, nodeNames)()
  328. }
  329. }
  330. func checkRunningOnNoNodes(f *framework.Framework, selector map[string]string) func() (bool, error) {
  331. return checkDaemonPodOnNodes(f, selector, make([]string, 0))
  332. }