disruption_test.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492
  1. /*
  2. Copyright 2016 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package disruption
  14. import (
  15. "fmt"
  16. "reflect"
  17. "testing"
  18. "k8s.io/kubernetes/pkg/api"
  19. "k8s.io/kubernetes/pkg/api/testapi"
  20. "k8s.io/kubernetes/pkg/api/unversioned"
  21. "k8s.io/kubernetes/pkg/apis/extensions"
  22. "k8s.io/kubernetes/pkg/apis/policy"
  23. "k8s.io/kubernetes/pkg/client/cache"
  24. "k8s.io/kubernetes/pkg/client/record"
  25. "k8s.io/kubernetes/pkg/controller"
  26. "k8s.io/kubernetes/pkg/util/intstr"
  27. "k8s.io/kubernetes/pkg/util/uuid"
  28. )
  29. type pdbStates map[string]policy.PodDisruptionBudget
  30. func (ps *pdbStates) Set(pdb *policy.PodDisruptionBudget) error {
  31. key, err := controller.KeyFunc(pdb)
  32. if err != nil {
  33. return err
  34. }
  35. obj, err := api.Scheme.DeepCopy(*pdb)
  36. if err != nil {
  37. return err
  38. }
  39. (*ps)[key] = obj.(policy.PodDisruptionBudget)
  40. return nil
  41. }
  42. func (ps *pdbStates) Get(key string) policy.PodDisruptionBudget {
  43. return (*ps)[key]
  44. }
  45. func (ps *pdbStates) VerifyPdbStatus(t *testing.T, key string, disruptionAllowed bool, currentHealthy, desiredHealthy, expectedPods int32) {
  46. expectedStatus := policy.PodDisruptionBudgetStatus{
  47. PodDisruptionAllowed: disruptionAllowed,
  48. CurrentHealthy: currentHealthy,
  49. DesiredHealthy: desiredHealthy,
  50. ExpectedPods: expectedPods,
  51. }
  52. actualStatus := ps.Get(key).Status
  53. if !reflect.DeepEqual(actualStatus, expectedStatus) {
  54. t.Fatalf("PDB %q status mismatch. Expected %+v but got %+v.", key, expectedStatus, actualStatus)
  55. }
  56. }
  57. func (ps *pdbStates) VerifyDisruptionAllowed(t *testing.T, key string, disruptionAllowed bool) {
  58. pdb := ps.Get(key)
  59. if pdb.Status.PodDisruptionAllowed != disruptionAllowed {
  60. t.Fatalf("PodDisruptionAllowed mismatch for PDB %q. Expected %v but got %v.", key, disruptionAllowed, pdb.Status.PodDisruptionAllowed)
  61. }
  62. }
  63. func newFakeDisruptionController() (*DisruptionController, *pdbStates) {
  64. ps := &pdbStates{}
  65. dc := &DisruptionController{
  66. pdbLister: cache.StoreToPodDisruptionBudgetLister{Store: cache.NewStore(controller.KeyFunc)},
  67. podLister: cache.StoreToPodLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{})},
  68. rcLister: cache.StoreToReplicationControllerLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})},
  69. rsLister: cache.StoreToReplicaSetLister{Store: cache.NewStore(controller.KeyFunc)},
  70. dLister: cache.StoreToDeploymentLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{})},
  71. getUpdater: func() updater { return ps.Set },
  72. broadcaster: record.NewBroadcaster(),
  73. }
  74. dc.recorder = dc.broadcaster.NewRecorder(api.EventSource{Component: "disruption_test"})
  75. return dc, ps
  76. }
  77. func fooBar() map[string]string {
  78. return map[string]string{"foo": "bar"}
  79. }
  80. func newSel(labels map[string]string) *unversioned.LabelSelector {
  81. return &unversioned.LabelSelector{MatchLabels: labels}
  82. }
  83. func newSelFooBar() *unversioned.LabelSelector {
  84. return newSel(map[string]string{"foo": "bar"})
  85. }
  86. func newPodDisruptionBudget(t *testing.T, minAvailable intstr.IntOrString) (*policy.PodDisruptionBudget, string) {
  87. pdb := &policy.PodDisruptionBudget{
  88. TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.GroupVersion().String()},
  89. ObjectMeta: api.ObjectMeta{
  90. UID: uuid.NewUUID(),
  91. Name: "foobar",
  92. Namespace: api.NamespaceDefault,
  93. ResourceVersion: "18",
  94. },
  95. Spec: policy.PodDisruptionBudgetSpec{
  96. MinAvailable: minAvailable,
  97. Selector: newSelFooBar(),
  98. },
  99. }
  100. pdbName, err := controller.KeyFunc(pdb)
  101. if err != nil {
  102. t.Fatalf("Unexpected error naming pdb %q: %v", pdb.Name, err)
  103. }
  104. return pdb, pdbName
  105. }
  106. func newPod(t *testing.T, name string) (*api.Pod, string) {
  107. pod := &api.Pod{
  108. TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.GroupVersion().String()},
  109. ObjectMeta: api.ObjectMeta{
  110. UID: uuid.NewUUID(),
  111. Annotations: make(map[string]string),
  112. Name: name,
  113. Namespace: api.NamespaceDefault,
  114. ResourceVersion: "18",
  115. Labels: fooBar(),
  116. },
  117. Spec: api.PodSpec{},
  118. Status: api.PodStatus{
  119. Conditions: []api.PodCondition{
  120. {Type: api.PodReady, Status: api.ConditionTrue},
  121. },
  122. },
  123. }
  124. podName, err := controller.KeyFunc(pod)
  125. if err != nil {
  126. t.Fatalf("Unexpected error naming pod %q: %v", pod.Name, err)
  127. }
  128. return pod, podName
  129. }
  130. func newReplicationController(t *testing.T, size int32) (*api.ReplicationController, string) {
  131. rc := &api.ReplicationController{
  132. TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.GroupVersion().String()},
  133. ObjectMeta: api.ObjectMeta{
  134. UID: uuid.NewUUID(),
  135. Name: "foobar",
  136. Namespace: api.NamespaceDefault,
  137. ResourceVersion: "18",
  138. Labels: fooBar(),
  139. },
  140. Spec: api.ReplicationControllerSpec{
  141. Replicas: size,
  142. Selector: fooBar(),
  143. },
  144. }
  145. rcName, err := controller.KeyFunc(rc)
  146. if err != nil {
  147. t.Fatalf("Unexpected error naming RC %q", rc.Name)
  148. }
  149. return rc, rcName
  150. }
  151. func newDeployment(t *testing.T, size int32) (*extensions.Deployment, string) {
  152. d := &extensions.Deployment{
  153. TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.GroupVersion().String()},
  154. ObjectMeta: api.ObjectMeta{
  155. UID: uuid.NewUUID(),
  156. Name: "foobar",
  157. Namespace: api.NamespaceDefault,
  158. ResourceVersion: "18",
  159. Labels: fooBar(),
  160. },
  161. Spec: extensions.DeploymentSpec{
  162. Replicas: size,
  163. Selector: newSelFooBar(),
  164. },
  165. }
  166. dName, err := controller.KeyFunc(d)
  167. if err != nil {
  168. t.Fatalf("Unexpected error naming Deployment %q: %v", d.Name, err)
  169. }
  170. return d, dName
  171. }
  172. func newReplicaSet(t *testing.T, size int32) (*extensions.ReplicaSet, string) {
  173. rs := &extensions.ReplicaSet{
  174. TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.GroupVersion().String()},
  175. ObjectMeta: api.ObjectMeta{
  176. UID: uuid.NewUUID(),
  177. Name: "foobar",
  178. Namespace: api.NamespaceDefault,
  179. ResourceVersion: "18",
  180. Labels: fooBar(),
  181. },
  182. Spec: extensions.ReplicaSetSpec{
  183. Replicas: size,
  184. Selector: newSelFooBar(),
  185. },
  186. }
  187. rsName, err := controller.KeyFunc(rs)
  188. if err != nil {
  189. t.Fatalf("Unexpected error naming ReplicaSet %q: %v", rs.Name, err)
  190. }
  191. return rs, rsName
  192. }
  193. func update(t *testing.T, store cache.Store, obj interface{}) {
  194. if err := store.Update(obj); err != nil {
  195. t.Fatalf("Could not add %+v to %+v: %v", obj, store, err)
  196. }
  197. }
  198. func add(t *testing.T, store cache.Store, obj interface{}) {
  199. if err := store.Add(obj); err != nil {
  200. t.Fatalf("Could not add %+v to %+v: %v", obj, store, err)
  201. }
  202. }
  203. // Create one with no selector. Verify it matches 0 pods.
  204. func TestNoSelector(t *testing.T) {
  205. dc, ps := newFakeDisruptionController()
  206. pdb, pdbName := newPodDisruptionBudget(t, intstr.FromInt(3))
  207. pdb.Spec.Selector = &unversioned.LabelSelector{}
  208. pod, _ := newPod(t, "yo-yo-yo")
  209. add(t, dc.pdbLister.Store, pdb)
  210. dc.sync(pdbName)
  211. ps.VerifyPdbStatus(t, pdbName, false, 0, 3, 0)
  212. add(t, dc.podLister.Indexer, pod)
  213. dc.sync(pdbName)
  214. ps.VerifyPdbStatus(t, pdbName, false, 0, 3, 0)
  215. }
  216. // Verify that available/expected counts go up as we add pods, then verify that
  217. // available count goes down when we make a pod unavailable.
  218. func TestUnavailable(t *testing.T) {
  219. dc, ps := newFakeDisruptionController()
  220. pdb, pdbName := newPodDisruptionBudget(t, intstr.FromInt(3))
  221. add(t, dc.pdbLister.Store, pdb)
  222. dc.sync(pdbName)
  223. // Add three pods, verifying that the counts go up at each step.
  224. pods := []*api.Pod{}
  225. for i := int32(0); i < 3; i++ {
  226. ps.VerifyPdbStatus(t, pdbName, false, i, 3, i)
  227. pod, _ := newPod(t, fmt.Sprintf("yo-yo-yo %d", i))
  228. pods = append(pods, pod)
  229. add(t, dc.podLister.Indexer, pod)
  230. dc.sync(pdbName)
  231. }
  232. ps.VerifyPdbStatus(t, pdbName, true, 3, 3, 3)
  233. // Now set one pod as unavailable
  234. pods[0].Status.Conditions = []api.PodCondition{}
  235. update(t, dc.podLister.Indexer, pods[0])
  236. dc.sync(pdbName)
  237. // Verify expected update
  238. ps.VerifyPdbStatus(t, pdbName, false, 2, 3, 3)
  239. }
  240. // Create a pod with no controller, and verify that a PDB with a percentage
  241. // specified won't allow a disruption.
  242. func TestNakedPod(t *testing.T) {
  243. dc, ps := newFakeDisruptionController()
  244. pdb, pdbName := newPodDisruptionBudget(t, intstr.FromString("28%"))
  245. add(t, dc.pdbLister.Store, pdb)
  246. dc.sync(pdbName)
  247. // This verifies that when a PDB has 0 pods, disruptions are not allowed.
  248. ps.VerifyDisruptionAllowed(t, pdbName, false)
  249. pod, _ := newPod(t, "naked")
  250. add(t, dc.podLister.Indexer, pod)
  251. dc.sync(pdbName)
  252. ps.VerifyDisruptionAllowed(t, pdbName, false)
  253. }
  254. // Verify that we count the scale of a ReplicaSet even when it has no Deployment.
  255. func TestReplicaSet(t *testing.T) {
  256. dc, ps := newFakeDisruptionController()
  257. pdb, pdbName := newPodDisruptionBudget(t, intstr.FromString("20%"))
  258. add(t, dc.pdbLister.Store, pdb)
  259. rs, _ := newReplicaSet(t, 10)
  260. add(t, dc.rsLister.Store, rs)
  261. pod, _ := newPod(t, "pod")
  262. add(t, dc.podLister.Indexer, pod)
  263. dc.sync(pdbName)
  264. ps.VerifyPdbStatus(t, pdbName, false, 1, 2, 10)
  265. }
  266. // Verify that multiple controllers doesn't allow the PDB to be set true.
  267. func TestMultipleControllers(t *testing.T) {
  268. const rcCount = 2
  269. const podCount = 2
  270. dc, ps := newFakeDisruptionController()
  271. pdb, pdbName := newPodDisruptionBudget(t, intstr.FromString("1%"))
  272. add(t, dc.pdbLister.Store, pdb)
  273. for i := 0; i < podCount; i++ {
  274. pod, _ := newPod(t, fmt.Sprintf("pod %d", i))
  275. add(t, dc.podLister.Indexer, pod)
  276. }
  277. dc.sync(pdbName)
  278. // No controllers yet => no disruption allowed
  279. ps.VerifyDisruptionAllowed(t, pdbName, false)
  280. rc, _ := newReplicationController(t, 1)
  281. rc.Name = "rc 1"
  282. add(t, dc.rcLister.Indexer, rc)
  283. dc.sync(pdbName)
  284. // One RC and 200%>1% healthy => disruption allowed
  285. ps.VerifyDisruptionAllowed(t, pdbName, true)
  286. rc, _ = newReplicationController(t, 1)
  287. rc.Name = "rc 2"
  288. add(t, dc.rcLister.Indexer, rc)
  289. dc.sync(pdbName)
  290. // 100%>1% healthy BUT two RCs => no disruption allowed
  291. ps.VerifyDisruptionAllowed(t, pdbName, false)
  292. }
  293. func TestReplicationController(t *testing.T) {
  294. // The budget in this test matches foo=bar, but the RC and its pods match
  295. // {foo=bar, baz=quux}. Later, when we add a rogue pod with only a foo=bar
  296. // label, it will match the budget but have no controllers, which should
  297. // trigger the controller to set PodDisruptionAllowed to false.
  298. labels := map[string]string{
  299. "foo": "bar",
  300. "baz": "quux",
  301. }
  302. dc, ps := newFakeDisruptionController()
  303. // 67% should round up to 3
  304. pdb, pdbName := newPodDisruptionBudget(t, intstr.FromString("67%"))
  305. add(t, dc.pdbLister.Store, pdb)
  306. rc, _ := newReplicationController(t, 3)
  307. rc.Spec.Selector = labels
  308. add(t, dc.rcLister.Indexer, rc)
  309. dc.sync(pdbName)
  310. // It starts out at 0 expected because, with no pods, the PDB doesn't know
  311. // about the RC. This is a known bug. TODO(mml): file issue
  312. ps.VerifyPdbStatus(t, pdbName, false, 0, 0, 0)
  313. pods := []*api.Pod{}
  314. for i := int32(0); i < 3; i++ {
  315. pod, _ := newPod(t, fmt.Sprintf("foobar %d", i))
  316. pods = append(pods, pod)
  317. pod.Labels = labels
  318. add(t, dc.podLister.Indexer, pod)
  319. dc.sync(pdbName)
  320. if i < 2 {
  321. ps.VerifyPdbStatus(t, pdbName, false, i+1, 3, 3)
  322. } else {
  323. ps.VerifyPdbStatus(t, pdbName, true, 3, 3, 3)
  324. }
  325. }
  326. rogue, _ := newPod(t, "rogue")
  327. add(t, dc.podLister.Indexer, rogue)
  328. dc.sync(pdbName)
  329. ps.VerifyDisruptionAllowed(t, pdbName, false)
  330. }
  331. func TestTwoControllers(t *testing.T) {
  332. // Most of this test is in verifying intermediate cases as we define the
  333. // three controllers and create the pods.
  334. rcLabels := map[string]string{
  335. "foo": "bar",
  336. "baz": "quux",
  337. }
  338. dLabels := map[string]string{
  339. "foo": "bar",
  340. "baz": "quuux",
  341. }
  342. dc, ps := newFakeDisruptionController()
  343. pdb, pdbName := newPodDisruptionBudget(t, intstr.FromString("28%"))
  344. add(t, dc.pdbLister.Store, pdb)
  345. rc, _ := newReplicationController(t, 11)
  346. rc.Spec.Selector = rcLabels
  347. add(t, dc.rcLister.Indexer, rc)
  348. dc.sync(pdbName)
  349. ps.VerifyPdbStatus(t, pdbName, false, 0, 0, 0)
  350. pods := []*api.Pod{}
  351. for i := int32(0); i < 11; i++ {
  352. pod, _ := newPod(t, fmt.Sprintf("quux %d", i))
  353. pods = append(pods, pod)
  354. pod.Labels = rcLabels
  355. if i < 7 {
  356. pod.Status.Conditions = []api.PodCondition{}
  357. }
  358. add(t, dc.podLister.Indexer, pod)
  359. dc.sync(pdbName)
  360. if i < 7 {
  361. ps.VerifyPdbStatus(t, pdbName, false, 0, 4, 11)
  362. } else if i < 10 {
  363. ps.VerifyPdbStatus(t, pdbName, false, i-6, 4, 11)
  364. } else {
  365. ps.VerifyPdbStatus(t, pdbName, true, 4, 4, 11)
  366. }
  367. }
  368. d, _ := newDeployment(t, 11)
  369. d.Spec.Selector = newSel(dLabels)
  370. add(t, dc.dLister.Indexer, d)
  371. dc.sync(pdbName)
  372. ps.VerifyPdbStatus(t, pdbName, true, 4, 4, 11)
  373. rs, _ := newReplicaSet(t, 11)
  374. rs.Spec.Selector = newSel(dLabels)
  375. rs.Labels = dLabels
  376. add(t, dc.rsLister.Store, rs)
  377. dc.sync(pdbName)
  378. ps.VerifyPdbStatus(t, pdbName, true, 4, 4, 11)
  379. for i := int32(0); i < 11; i++ {
  380. pod, _ := newPod(t, fmt.Sprintf("quuux %d", i))
  381. pods = append(pods, pod)
  382. pod.Labels = dLabels
  383. if i < 7 {
  384. pod.Status.Conditions = []api.PodCondition{}
  385. }
  386. add(t, dc.podLister.Indexer, pod)
  387. dc.sync(pdbName)
  388. if i < 7 {
  389. ps.VerifyPdbStatus(t, pdbName, false, 4, 7, 22)
  390. } else if i < 9 {
  391. ps.VerifyPdbStatus(t, pdbName, false, 4+i-6, 7, 22)
  392. } else {
  393. ps.VerifyPdbStatus(t, pdbName, true, 4+i-6, 7, 22)
  394. }
  395. }
  396. // Now we verify we can bring down 1 pod and a disruption is still permitted,
  397. // but if we bring down two, it's not. Then we make the pod ready again and
  398. // verify that a disruption is permitted again.
  399. ps.VerifyPdbStatus(t, pdbName, true, 8, 7, 22)
  400. pods[10].Status.Conditions = []api.PodCondition{}
  401. update(t, dc.podLister.Indexer, pods[10])
  402. dc.sync(pdbName)
  403. ps.VerifyPdbStatus(t, pdbName, true, 7, 7, 22)
  404. pods[9].Status.Conditions = []api.PodCondition{}
  405. update(t, dc.podLister.Indexer, pods[9])
  406. dc.sync(pdbName)
  407. ps.VerifyPdbStatus(t, pdbName, false, 6, 7, 22)
  408. pods[10].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue}}
  409. update(t, dc.podLister.Indexer, pods[10])
  410. dc.sync(pdbName)
  411. ps.VerifyPdbStatus(t, pdbName, true, 7, 7, 22)
  412. }