job.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. /*
  2. Copyright 2015 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package e2e
  14. import (
  15. "time"
  16. "k8s.io/kubernetes/pkg/api"
  17. "k8s.io/kubernetes/pkg/api/errors"
  18. "k8s.io/kubernetes/pkg/apis/batch"
  19. client "k8s.io/kubernetes/pkg/client/unversioned"
  20. "k8s.io/kubernetes/pkg/kubectl"
  21. "k8s.io/kubernetes/pkg/labels"
  22. "k8s.io/kubernetes/pkg/util/wait"
  23. "k8s.io/kubernetes/test/e2e/framework"
  24. . "github.com/onsi/ginkgo"
  25. . "github.com/onsi/gomega"
  26. )
  27. const (
  28. // How long to wait for a job to finish.
  29. jobTimeout = 15 * time.Minute
  30. // Job selector name
  31. jobSelectorKey = "job"
  32. )
  33. var _ = framework.KubeDescribe("Job", func() {
  34. f := framework.NewDefaultFramework("job")
  35. parallelism := int32(2)
  36. completions := int32(4)
  37. lotsOfFailures := int32(5) // more than completions
  38. // Simplest case: all pods succeed promptly
  39. It("should run a job to completion when tasks succeed", func() {
  40. By("Creating a job")
  41. job := newTestJob("succeed", "all-succeed", api.RestartPolicyNever, parallelism, completions)
  42. job, err := createJob(f.Client, f.Namespace.Name, job)
  43. Expect(err).NotTo(HaveOccurred())
  44. By("Ensuring job reaches completions")
  45. err = waitForJobFinish(f.Client, f.Namespace.Name, job.Name, completions)
  46. Expect(err).NotTo(HaveOccurred())
  47. })
  48. // Pods sometimes fail, but eventually succeed.
  49. It("should run a job to completion when tasks sometimes fail and are locally restarted", func() {
  50. By("Creating a job")
  51. // One failure, then a success, local restarts.
  52. // We can't use the random failure approach used by the
  53. // non-local test below, because kubelet will throttle
  54. // frequently failing containers in a given pod, ramping
  55. // up to 5 minutes between restarts, making test timeouts
  56. // due to successive failures too likely with a reasonable
  57. // test timeout.
  58. job := newTestJob("failOnce", "fail-once-local", api.RestartPolicyOnFailure, parallelism, completions)
  59. job, err := createJob(f.Client, f.Namespace.Name, job)
  60. Expect(err).NotTo(HaveOccurred())
  61. By("Ensuring job reaches completions")
  62. err = waitForJobFinish(f.Client, f.Namespace.Name, job.Name, completions)
  63. Expect(err).NotTo(HaveOccurred())
  64. })
  65. // Pods sometimes fail, but eventually succeed, after pod restarts
  66. It("should run a job to completion when tasks sometimes fail and are not locally restarted", func() {
  67. By("Creating a job")
  68. // 50% chance of container success, local restarts.
  69. // Can't use the failOnce approach because that relies
  70. // on an emptyDir, which is not preserved across new pods.
  71. // Worst case analysis: 15 failures, each taking 1 minute to
  72. // run due to some slowness, 1 in 2^15 chance of happening,
  73. // causing test flake. Should be very rare.
  74. job := newTestJob("randomlySucceedOrFail", "rand-non-local", api.RestartPolicyNever, parallelism, completions)
  75. job, err := createJob(f.Client, f.Namespace.Name, job)
  76. Expect(err).NotTo(HaveOccurred())
  77. By("Ensuring job reaches completions")
  78. err = waitForJobFinish(f.Client, f.Namespace.Name, job.Name, completions)
  79. Expect(err).NotTo(HaveOccurred())
  80. })
  81. It("should keep restarting failed pods", func() {
  82. By("Creating a job")
  83. job := newTestJob("fail", "all-fail", api.RestartPolicyNever, parallelism, completions)
  84. job, err := createJob(f.Client, f.Namespace.Name, job)
  85. Expect(err).NotTo(HaveOccurred())
  86. By("Ensuring job shows many failures")
  87. err = wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
  88. curr, err := f.Client.Extensions().Jobs(f.Namespace.Name).Get(job.Name)
  89. if err != nil {
  90. return false, err
  91. }
  92. return curr.Status.Failed > lotsOfFailures, nil
  93. })
  94. })
  95. It("should scale a job up", func() {
  96. startParallelism := int32(1)
  97. endParallelism := int32(2)
  98. By("Creating a job")
  99. job := newTestJob("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions)
  100. job, err := createJob(f.Client, f.Namespace.Name, job)
  101. Expect(err).NotTo(HaveOccurred())
  102. By("Ensuring active pods == startParallelism")
  103. err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, startParallelism)
  104. Expect(err).NotTo(HaveOccurred())
  105. By("scale job up")
  106. scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.Client)
  107. Expect(err).NotTo(HaveOccurred())
  108. waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
  109. waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
  110. scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas)
  111. Expect(err).NotTo(HaveOccurred())
  112. By("Ensuring active pods == endParallelism")
  113. err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, endParallelism)
  114. Expect(err).NotTo(HaveOccurred())
  115. })
  116. It("should scale a job down", func() {
  117. startParallelism := int32(2)
  118. endParallelism := int32(1)
  119. By("Creating a job")
  120. job := newTestJob("notTerminate", "scale-down", api.RestartPolicyNever, startParallelism, completions)
  121. job, err := createJob(f.Client, f.Namespace.Name, job)
  122. Expect(err).NotTo(HaveOccurred())
  123. By("Ensuring active pods == startParallelism")
  124. err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, startParallelism)
  125. Expect(err).NotTo(HaveOccurred())
  126. By("scale job down")
  127. scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.Client)
  128. Expect(err).NotTo(HaveOccurred())
  129. waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
  130. waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
  131. err = scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas)
  132. Expect(err).NotTo(HaveOccurred())
  133. By("Ensuring active pods == endParallelism")
  134. err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, endParallelism)
  135. Expect(err).NotTo(HaveOccurred())
  136. })
  137. It("should delete a job", func() {
  138. By("Creating a job")
  139. job := newTestJob("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions)
  140. job, err := createJob(f.Client, f.Namespace.Name, job)
  141. Expect(err).NotTo(HaveOccurred())
  142. By("Ensuring active pods == parallelism")
  143. err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, parallelism)
  144. Expect(err).NotTo(HaveOccurred())
  145. By("delete a job")
  146. reaper, err := kubectl.ReaperFor(batch.Kind("Job"), f.Client)
  147. Expect(err).NotTo(HaveOccurred())
  148. timeout := 1 * time.Minute
  149. err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))
  150. Expect(err).NotTo(HaveOccurred())
  151. By("Ensuring job was deleted")
  152. _, err = f.Client.Extensions().Jobs(f.Namespace.Name).Get(job.Name)
  153. Expect(err).To(HaveOccurred())
  154. Expect(errors.IsNotFound(err)).To(BeTrue())
  155. })
  156. It("should fail a job", func() {
  157. By("Creating a job")
  158. job := newTestJob("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions)
  159. activeDeadlineSeconds := int64(10)
  160. job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
  161. job, err := createJob(f.Client, f.Namespace.Name, job)
  162. Expect(err).NotTo(HaveOccurred())
  163. By("Ensuring job was failed")
  164. err = waitForJobFail(f.Client, f.Namespace.Name, job.Name)
  165. Expect(err).NotTo(HaveOccurred())
  166. })
  167. })
  168. // newTestJob returns a job which does one of several testing behaviors.
  169. func newTestJob(behavior, name string, rPol api.RestartPolicy, parallelism, completions int32) *batch.Job {
  170. job := &batch.Job{
  171. ObjectMeta: api.ObjectMeta{
  172. Name: name,
  173. },
  174. Spec: batch.JobSpec{
  175. Parallelism: &parallelism,
  176. Completions: &completions,
  177. ManualSelector: newBool(true),
  178. Template: api.PodTemplateSpec{
  179. ObjectMeta: api.ObjectMeta{
  180. Labels: map[string]string{jobSelectorKey: name},
  181. },
  182. Spec: api.PodSpec{
  183. RestartPolicy: rPol,
  184. Volumes: []api.Volume{
  185. {
  186. Name: "data",
  187. VolumeSource: api.VolumeSource{
  188. EmptyDir: &api.EmptyDirVolumeSource{},
  189. },
  190. },
  191. },
  192. Containers: []api.Container{
  193. {
  194. Name: "c",
  195. Image: "gcr.io/google_containers/busybox:1.24",
  196. Command: []string{},
  197. VolumeMounts: []api.VolumeMount{
  198. {
  199. MountPath: "/data",
  200. Name: "data",
  201. },
  202. },
  203. },
  204. },
  205. },
  206. },
  207. },
  208. }
  209. switch behavior {
  210. case "notTerminate":
  211. job.Spec.Template.Spec.Containers[0].Command = []string{"sleep", "1000000"}
  212. case "fail":
  213. job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit 1"}
  214. case "succeed":
  215. job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit 0"}
  216. case "randomlySucceedOrFail":
  217. // Bash's $RANDOM generates pseudorandom int in range 0 - 32767.
  218. // Dividing by 16384 gives roughly 50/50 chance of success.
  219. job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit $(( $RANDOM / 16384 ))"}
  220. case "failOnce":
  221. // Fail the first the container of the pod is run, and
  222. // succeed the second time. Checks for file on emptydir.
  223. // If present, succeed. If not, create but fail.
  224. // Note that this cannot be used with RestartNever because
  225. // it always fails the first time for a pod.
  226. job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "if [[ -r /data/foo ]] ; then exit 0 ; else touch /data/foo ; exit 1 ; fi"}
  227. }
  228. return job
  229. }
  230. func createJob(c *client.Client, ns string, job *batch.Job) (*batch.Job, error) {
  231. return c.Extensions().Jobs(ns).Create(job)
  232. }
  233. func deleteJob(c *client.Client, ns, name string) error {
  234. return c.Extensions().Jobs(ns).Delete(name, nil)
  235. }
  236. // Wait for all pods to become Running. Only use when pods will run for a long time, or it will be racy.
  237. func waitForAllPodsRunning(c *client.Client, ns, jobName string, parallelism int32) error {
  238. label := labels.SelectorFromSet(labels.Set(map[string]string{jobSelectorKey: jobName}))
  239. return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
  240. options := api.ListOptions{LabelSelector: label}
  241. pods, err := c.Pods(ns).List(options)
  242. if err != nil {
  243. return false, err
  244. }
  245. count := int32(0)
  246. for _, p := range pods.Items {
  247. if p.Status.Phase == api.PodRunning {
  248. count++
  249. }
  250. }
  251. return count == parallelism, nil
  252. })
  253. }
  254. // Wait for job to reach completions.
  255. func waitForJobFinish(c *client.Client, ns, jobName string, completions int32) error {
  256. return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
  257. curr, err := c.Extensions().Jobs(ns).Get(jobName)
  258. if err != nil {
  259. return false, err
  260. }
  261. return curr.Status.Succeeded == completions, nil
  262. })
  263. }
  264. // Wait for job fail.
  265. func waitForJobFail(c *client.Client, ns, jobName string) error {
  266. return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
  267. curr, err := c.Extensions().Jobs(ns).Get(jobName)
  268. if err != nil {
  269. return false, err
  270. }
  271. for _, c := range curr.Status.Conditions {
  272. if c.Type == batch.JobFailed && c.Status == api.ConditionTrue {
  273. return true, nil
  274. }
  275. }
  276. return false, nil
  277. })
  278. }
  279. func newBool(val bool) *bool {
  280. p := new(bool)
  281. *p = val
  282. return p
  283. }