restart.go 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166
  1. /*
  2. Copyright 2015 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package e2e
  14. import (
  15. "fmt"
  16. "time"
  17. "k8s.io/kubernetes/pkg/api"
  18. "k8s.io/kubernetes/pkg/fields"
  19. "k8s.io/kubernetes/pkg/labels"
  20. "k8s.io/kubernetes/pkg/util/wait"
  21. "k8s.io/kubernetes/test/e2e/framework"
  22. . "github.com/onsi/ginkgo"
  23. . "github.com/onsi/gomega"
  24. )
  25. var _ = framework.KubeDescribe("Restart [Disruptive]", func() {
  26. f := framework.NewDefaultFramework("restart")
  27. var ps *framework.PodStore
  28. BeforeEach(func() {
  29. // This test requires the ability to restart all nodes, so the provider
  30. // check must be identical to that call.
  31. framework.SkipUnlessProviderIs("gce", "gke")
  32. ps = framework.NewPodStore(f.Client, api.NamespaceSystem, labels.Everything(), fields.Everything())
  33. })
  34. AfterEach(func() {
  35. if ps != nil {
  36. ps.Stop()
  37. }
  38. })
  39. It("should restart all nodes and ensure all nodes and pods recover", func() {
  40. nn := framework.TestContext.CloudConfig.NumNodes
  41. By("ensuring all nodes are ready")
  42. nodeNamesBefore, err := framework.CheckNodesReady(f.Client, framework.NodeReadyInitialTimeout, nn)
  43. Expect(err).NotTo(HaveOccurred())
  44. framework.Logf("Got the following nodes before restart: %v", nodeNamesBefore)
  45. By("ensuring all pods are running and ready")
  46. pods := ps.List()
  47. podNamesBefore := make([]string, len(pods))
  48. for i, p := range pods {
  49. podNamesBefore[i] = p.ObjectMeta.Name
  50. }
  51. ns := api.NamespaceSystem
  52. if !framework.CheckPodsRunningReadyOrSucceeded(f.Client, ns, podNamesBefore, framework.PodReadyBeforeTimeout) {
  53. framework.Failf("At least one pod wasn't running and ready or succeeded at test start.")
  54. }
  55. By("restarting all of the nodes")
  56. err = restartNodes(framework.TestContext.Provider, framework.RestartPerNodeTimeout)
  57. Expect(err).NotTo(HaveOccurred())
  58. By("ensuring all nodes are ready after the restart")
  59. nodeNamesAfter, err := framework.CheckNodesReady(f.Client, framework.RestartNodeReadyAgainTimeout, nn)
  60. Expect(err).NotTo(HaveOccurred())
  61. framework.Logf("Got the following nodes after restart: %v", nodeNamesAfter)
  62. // Make sure that we have the same number of nodes. We're not checking
  63. // that the names match because that's implementation specific.
  64. By("ensuring the same number of nodes exist after the restart")
  65. if len(nodeNamesBefore) != len(nodeNamesAfter) {
  66. framework.Failf("Had %d nodes before nodes were restarted, but now only have %d",
  67. len(nodeNamesBefore), len(nodeNamesAfter))
  68. }
  69. // Make sure that we have the same number of pods. We're not checking
  70. // that the names match because they are recreated with different names
  71. // across node restarts.
  72. By("ensuring the same number of pods are running and ready after restart")
  73. podCheckStart := time.Now()
  74. podNamesAfter, err := waitForNPods(ps, len(podNamesBefore), framework.RestartPodReadyAgainTimeout)
  75. Expect(err).NotTo(HaveOccurred())
  76. remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart)
  77. if !framework.CheckPodsRunningReadyOrSucceeded(f.Client, ns, podNamesAfter, remaining) {
  78. framework.Failf("At least one pod wasn't running and ready after the restart.")
  79. }
  80. })
  81. })
  82. // waitForNPods tries to list pods using c until it finds expect of them,
  83. // returning their names if it can do so before timeout.
  84. func waitForNPods(ps *framework.PodStore, expect int, timeout time.Duration) ([]string, error) {
  85. // Loop until we find expect pods or timeout is passed.
  86. var pods []*api.Pod
  87. var errLast error
  88. found := wait.Poll(framework.Poll, timeout, func() (bool, error) {
  89. pods = ps.List()
  90. if len(pods) != expect {
  91. errLast = fmt.Errorf("expected to find %d pods but found only %d", expect, len(pods))
  92. framework.Logf("Error getting pods: %v", errLast)
  93. return false, nil
  94. }
  95. return true, nil
  96. }) == nil
  97. // Extract the names of all found pods.
  98. podNames := make([]string, len(pods))
  99. for i, p := range pods {
  100. podNames[i] = p.ObjectMeta.Name
  101. }
  102. if !found {
  103. return podNames, fmt.Errorf("couldn't find %d pods within %v; last error: %v",
  104. expect, timeout, errLast)
  105. }
  106. return podNames, nil
  107. }
  108. // restartNodes uses provider to do a restart of all nodes in the cluster,
  109. // allowing up to nt per node.
  110. func restartNodes(provider string, nt time.Duration) error {
  111. switch provider {
  112. case "gce", "gke":
  113. return migRollingUpdateSelf(nt)
  114. default:
  115. return fmt.Errorf("restartNodes(...) not implemented for %s", provider)
  116. }
  117. }
  118. // TODO(marekbiskup): Switch this to MIG recreate-instances. This can be done
  119. // with the following bash, but needs to be written in Go:
  120. //
  121. // # Step 1: Get instance names.
  122. // list=$(gcloud compute instance-groups --project=${PROJECT} --zone=${ZONE} instances --group=${GROUP} list)
  123. // i=""
  124. // for l in $list; do
  125. // i="${l##*/},${i}"
  126. // done
  127. //
  128. // # Step 2: Start the recreate.
  129. // output=$(gcloud compute instance-groups managed --project=${PROJECT} --zone=${ZONE} recreate-instances ${GROUP} --instance="${i}")
  130. // op=${output##*:}
  131. //
  132. // # Step 3: Wait until it's complete.
  133. // status=""
  134. // while [[ "${status}" != "DONE" ]]; do
  135. // output=$(gcloud compute instance-groups managed --zone="${ZONE}" get-operation ${op} | grep status)
  136. // status=${output##*:}
  137. // done
  138. func migRollingUpdateSelf(nt time.Duration) error {
  139. By("getting the name of the template for the managed instance group")
  140. tmpl, err := framework.MigTemplate()
  141. if err != nil {
  142. return fmt.Errorf("couldn't get MIG template name: %v", err)
  143. }
  144. return framework.MigRollingUpdate(tmpl, nt)
  145. }