daemoncontroller_test.go 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613
  1. /*
  2. Copyright 2015 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package daemon
  14. import (
  15. "fmt"
  16. "testing"
  17. "k8s.io/kubernetes/pkg/api"
  18. "k8s.io/kubernetes/pkg/api/resource"
  19. "k8s.io/kubernetes/pkg/api/testapi"
  20. "k8s.io/kubernetes/pkg/api/unversioned"
  21. "k8s.io/kubernetes/pkg/apis/extensions"
  22. "k8s.io/kubernetes/pkg/client/cache"
  23. clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
  24. "k8s.io/kubernetes/pkg/client/restclient"
  25. "k8s.io/kubernetes/pkg/controller"
  26. "k8s.io/kubernetes/pkg/securitycontext"
  27. )
  28. var (
  29. simpleDaemonSetLabel = map[string]string{"name": "simple-daemon", "type": "production"}
  30. simpleDaemonSetLabel2 = map[string]string{"name": "simple-daemon", "type": "test"}
  31. simpleNodeLabel = map[string]string{"color": "blue", "speed": "fast"}
  32. simpleNodeLabel2 = map[string]string{"color": "red", "speed": "fast"}
  33. alwaysReady = func() bool { return true }
  34. )
  35. func getKey(ds *extensions.DaemonSet, t *testing.T) string {
  36. if key, err := controller.KeyFunc(ds); err != nil {
  37. t.Errorf("Unexpected error getting key for ds %v: %v", ds.Name, err)
  38. return ""
  39. } else {
  40. return key
  41. }
  42. }
  43. func newDaemonSet(name string) *extensions.DaemonSet {
  44. return &extensions.DaemonSet{
  45. TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Extensions.GroupVersion().String()},
  46. ObjectMeta: api.ObjectMeta{
  47. Name: name,
  48. Namespace: api.NamespaceDefault,
  49. },
  50. Spec: extensions.DaemonSetSpec{
  51. Selector: &unversioned.LabelSelector{MatchLabels: simpleDaemonSetLabel},
  52. Template: api.PodTemplateSpec{
  53. ObjectMeta: api.ObjectMeta{
  54. Labels: simpleDaemonSetLabel,
  55. },
  56. Spec: api.PodSpec{
  57. Containers: []api.Container{
  58. {
  59. Image: "foo/bar",
  60. TerminationMessagePath: api.TerminationMessagePathDefault,
  61. ImagePullPolicy: api.PullIfNotPresent,
  62. SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
  63. },
  64. },
  65. DNSPolicy: api.DNSDefault,
  66. },
  67. },
  68. },
  69. }
  70. }
  71. func newNode(name string, label map[string]string) *api.Node {
  72. return &api.Node{
  73. TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.GroupVersion().String()},
  74. ObjectMeta: api.ObjectMeta{
  75. Name: name,
  76. Labels: label,
  77. Namespace: api.NamespaceDefault,
  78. },
  79. Status: api.NodeStatus{
  80. Conditions: []api.NodeCondition{
  81. {Type: api.NodeReady, Status: api.ConditionTrue},
  82. },
  83. Allocatable: api.ResourceList{
  84. api.ResourcePods: resource.MustParse("100"),
  85. },
  86. },
  87. }
  88. }
  89. func addNodes(nodeStore cache.Store, startIndex, numNodes int, label map[string]string) {
  90. for i := startIndex; i < startIndex+numNodes; i++ {
  91. nodeStore.Add(newNode(fmt.Sprintf("node-%d", i), label))
  92. }
  93. }
  94. func newPod(podName string, nodeName string, label map[string]string) *api.Pod {
  95. pod := &api.Pod{
  96. TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.GroupVersion().String()},
  97. ObjectMeta: api.ObjectMeta{
  98. GenerateName: podName,
  99. Labels: label,
  100. Namespace: api.NamespaceDefault,
  101. },
  102. Spec: api.PodSpec{
  103. NodeName: nodeName,
  104. Containers: []api.Container{
  105. {
  106. Image: "foo/bar",
  107. TerminationMessagePath: api.TerminationMessagePathDefault,
  108. ImagePullPolicy: api.PullIfNotPresent,
  109. SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
  110. },
  111. },
  112. DNSPolicy: api.DNSDefault,
  113. },
  114. }
  115. api.GenerateName(api.SimpleNameGenerator, &pod.ObjectMeta)
  116. return pod
  117. }
  118. func addPods(podStore cache.Store, nodeName string, label map[string]string, number int) {
  119. for i := 0; i < number; i++ {
  120. podStore.Add(newPod(fmt.Sprintf("%s-", nodeName), nodeName, label))
  121. }
  122. }
  123. func newTestController() (*DaemonSetsController, *controller.FakePodControl) {
  124. clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
  125. manager := NewDaemonSetsControllerFromClient(clientset, controller.NoResyncPeriodFunc, 0)
  126. manager.podStoreSynced = alwaysReady
  127. podControl := &controller.FakePodControl{}
  128. manager.podControl = podControl
  129. return manager, podControl
  130. }
  131. func validateSyncDaemonSets(t *testing.T, fakePodControl *controller.FakePodControl, expectedCreates, expectedDeletes int) {
  132. if len(fakePodControl.Templates) != expectedCreates {
  133. t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", expectedCreates, len(fakePodControl.Templates))
  134. }
  135. if len(fakePodControl.DeletePodName) != expectedDeletes {
  136. t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", expectedDeletes, len(fakePodControl.DeletePodName))
  137. }
  138. }
  139. func syncAndValidateDaemonSets(t *testing.T, manager *DaemonSetsController, ds *extensions.DaemonSet, podControl *controller.FakePodControl, expectedCreates, expectedDeletes int) {
  140. key, err := controller.KeyFunc(ds)
  141. if err != nil {
  142. t.Errorf("Could not get key for daemon.")
  143. }
  144. manager.syncHandler(key)
  145. validateSyncDaemonSets(t, podControl, expectedCreates, expectedDeletes)
  146. }
  147. func TestDeleteFinalStateUnknown(t *testing.T) {
  148. manager, _ := newTestController()
  149. addNodes(manager.nodeStore.Store, 0, 1, nil)
  150. ds := newDaemonSet("foo")
  151. // DeletedFinalStateUnknown should queue the embedded DS if found.
  152. manager.deleteDaemonset(cache.DeletedFinalStateUnknown{Key: "foo", Obj: ds})
  153. enqueuedKey, _ := manager.queue.Get()
  154. if enqueuedKey.(string) != "default/foo" {
  155. t.Errorf("expected delete of DeletedFinalStateUnknown to enqueue the daemonset but found: %#v", enqueuedKey)
  156. }
  157. }
  158. // DaemonSets without node selectors should launch pods on every node.
  159. func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
  160. manager, podControl := newTestController()
  161. addNodes(manager.nodeStore.Store, 0, 5, nil)
  162. ds := newDaemonSet("foo")
  163. manager.dsStore.Add(ds)
  164. syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0)
  165. }
  166. // DaemonSets should do nothing if there aren't any nodes
  167. func TestNoNodesDoesNothing(t *testing.T) {
  168. manager, podControl := newTestController()
  169. ds := newDaemonSet("foo")
  170. manager.dsStore.Add(ds)
  171. syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
  172. }
  173. // DaemonSets without node selectors should launch on a single node in a
  174. // single node cluster.
  175. func TestOneNodeDaemonLaunchesPod(t *testing.T) {
  176. manager, podControl := newTestController()
  177. manager.nodeStore.Add(newNode("only-node", nil))
  178. ds := newDaemonSet("foo")
  179. manager.dsStore.Add(ds)
  180. syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
  181. }
  182. // DaemonSets should place onto NotReady nodes
  183. func TestNotReadNodeDaemonDoesNotLaunchPod(t *testing.T) {
  184. manager, podControl := newTestController()
  185. node := newNode("not-ready", nil)
  186. node.Status.Conditions = []api.NodeCondition{
  187. {Type: api.NodeReady, Status: api.ConditionFalse},
  188. }
  189. manager.nodeStore.Add(node)
  190. ds := newDaemonSet("foo")
  191. manager.dsStore.Add(ds)
  192. syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
  193. }
  194. // DaemonSets should not place onto OutOfDisk nodes
  195. func TestOutOfDiskNodeDaemonDoesNotLaunchPod(t *testing.T) {
  196. manager, podControl := newTestController()
  197. node := newNode("not-enough-disk", nil)
  198. node.Status.Conditions = []api.NodeCondition{{Type: api.NodeOutOfDisk, Status: api.ConditionTrue}}
  199. manager.nodeStore.Add(node)
  200. ds := newDaemonSet("foo")
  201. manager.dsStore.Add(ds)
  202. syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
  203. }
  204. func resourcePodSpec(nodeName, memory, cpu string) api.PodSpec {
  205. return api.PodSpec{
  206. NodeName: nodeName,
  207. Containers: []api.Container{{
  208. Resources: api.ResourceRequirements{
  209. Requests: allocatableResources(memory, cpu),
  210. },
  211. }},
  212. }
  213. }
  214. func allocatableResources(memory, cpu string) api.ResourceList {
  215. return api.ResourceList{
  216. api.ResourceMemory: resource.MustParse(memory),
  217. api.ResourceCPU: resource.MustParse(cpu),
  218. api.ResourcePods: resource.MustParse("100"),
  219. }
  220. }
  221. // DaemonSets should not place onto nodes with insufficient free resource
  222. func TestInsufficentCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
  223. podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
  224. manager, podControl := newTestController()
  225. node := newNode("too-much-mem", nil)
  226. node.Status.Allocatable = allocatableResources("100M", "200m")
  227. manager.nodeStore.Add(node)
  228. manager.podStore.Add(&api.Pod{
  229. Spec: podSpec,
  230. })
  231. ds := newDaemonSet("foo")
  232. ds.Spec.Template.Spec = podSpec
  233. manager.dsStore.Add(ds)
  234. syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
  235. }
  236. func TestSufficentCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) {
  237. podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
  238. manager, podControl := newTestController()
  239. node := newNode("too-much-mem", nil)
  240. node.Status.Allocatable = allocatableResources("100M", "200m")
  241. manager.nodeStore.Add(node)
  242. manager.podStore.Add(&api.Pod{
  243. Spec: podSpec,
  244. Status: api.PodStatus{Phase: api.PodSucceeded},
  245. })
  246. ds := newDaemonSet("foo")
  247. ds.Spec.Template.Spec = podSpec
  248. manager.dsStore.Add(ds)
  249. syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
  250. }
  251. // DaemonSets should place onto nodes with sufficient free resource
  252. func TestSufficentCapacityNodeDaemonLaunchesPod(t *testing.T) {
  253. podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m")
  254. manager, podControl := newTestController()
  255. node := newNode("not-too-much-mem", nil)
  256. node.Status.Allocatable = allocatableResources("200M", "200m")
  257. manager.nodeStore.Add(node)
  258. manager.podStore.Add(&api.Pod{
  259. Spec: podSpec,
  260. })
  261. ds := newDaemonSet("foo")
  262. ds.Spec.Template.Spec = podSpec
  263. manager.dsStore.Add(ds)
  264. syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
  265. }
  266. // DaemonSets not take any actions when being deleted
  267. func TestDontDoAnythingIfBeingDeleted(t *testing.T) {
  268. podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m")
  269. manager, podControl := newTestController()
  270. node := newNode("not-too-much-mem", nil)
  271. node.Status.Allocatable = allocatableResources("200M", "200m")
  272. manager.nodeStore.Add(node)
  273. manager.podStore.Add(&api.Pod{
  274. Spec: podSpec,
  275. })
  276. ds := newDaemonSet("foo")
  277. ds.Spec.Template.Spec = podSpec
  278. now := unversioned.Now()
  279. ds.DeletionTimestamp = &now
  280. manager.dsStore.Add(ds)
  281. syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
  282. }
  283. // DaemonSets should not place onto nodes that would cause port conflicts
  284. func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) {
  285. podSpec := api.PodSpec{
  286. NodeName: "port-conflict",
  287. Containers: []api.Container{{
  288. Ports: []api.ContainerPort{{
  289. HostPort: 666,
  290. }},
  291. }},
  292. }
  293. manager, podControl := newTestController()
  294. node := newNode("port-conflict", nil)
  295. manager.nodeStore.Add(node)
  296. manager.podStore.Add(&api.Pod{
  297. Spec: podSpec,
  298. })
  299. ds := newDaemonSet("foo")
  300. ds.Spec.Template.Spec = podSpec
  301. manager.dsStore.Add(ds)
  302. syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
  303. }
  304. // Test that if the node is already scheduled with a pod using a host port
  305. // but belonging to the same daemonset, we don't delete that pod
  306. //
  307. // Issue: https://github.com/kubernetes/kubernetes/issues/22309
  308. func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
  309. podSpec := api.PodSpec{
  310. NodeName: "port-conflict",
  311. Containers: []api.Container{{
  312. Ports: []api.ContainerPort{{
  313. HostPort: 666,
  314. }},
  315. }},
  316. }
  317. manager, podControl := newTestController()
  318. node := newNode("port-conflict", nil)
  319. manager.nodeStore.Add(node)
  320. manager.podStore.Add(&api.Pod{
  321. ObjectMeta: api.ObjectMeta{
  322. Labels: simpleDaemonSetLabel,
  323. Namespace: api.NamespaceDefault,
  324. },
  325. Spec: podSpec,
  326. })
  327. ds := newDaemonSet("foo")
  328. ds.Spec.Template.Spec = podSpec
  329. manager.dsStore.Add(ds)
  330. syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
  331. }
  332. // DaemonSets should place onto nodes that would not cause port conflicts
  333. func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
  334. podSpec1 := api.PodSpec{
  335. NodeName: "no-port-conflict",
  336. Containers: []api.Container{{
  337. Ports: []api.ContainerPort{{
  338. HostPort: 6661,
  339. }},
  340. }},
  341. }
  342. podSpec2 := api.PodSpec{
  343. NodeName: "no-port-conflict",
  344. Containers: []api.Container{{
  345. Ports: []api.ContainerPort{{
  346. HostPort: 6662,
  347. }},
  348. }},
  349. }
  350. manager, podControl := newTestController()
  351. node := newNode("no-port-conflict", nil)
  352. manager.nodeStore.Add(node)
  353. manager.podStore.Add(&api.Pod{
  354. Spec: podSpec1,
  355. })
  356. ds := newDaemonSet("foo")
  357. ds.Spec.Template.Spec = podSpec2
  358. manager.dsStore.Add(ds)
  359. syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
  360. }
  361. // DaemonSetController should not sync DaemonSets with empty pod selectors.
  362. //
  363. // issue https://github.com/kubernetes/kubernetes/pull/23223
  364. func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
  365. manager, podControl := newTestController()
  366. manager.nodeStore.Store.Add(newNode("node1", nil))
  367. // Create pod not controlled by a daemonset.
  368. manager.podStore.Add(&api.Pod{
  369. ObjectMeta: api.ObjectMeta{
  370. Labels: map[string]string{"bang": "boom"},
  371. Namespace: api.NamespaceDefault,
  372. },
  373. Spec: api.PodSpec{
  374. NodeName: "node1",
  375. },
  376. })
  377. // Create a misconfigured DaemonSet. An empty pod selector is invalid but could happen
  378. // if we upgrade and make a backwards incompatible change.
  379. //
  380. // The node selector matches no nodes which mimics the behavior of kubectl delete.
  381. //
  382. // The DaemonSet should not schedule pods and should not delete scheduled pods in
  383. // this case even though it's empty pod selector matches all pods. The DaemonSetController
  384. // should detect this misconfiguration and choose not to sync the DaemonSet. We should
  385. // not observe a deletion of the pod on node1.
  386. ds := newDaemonSet("foo")
  387. ls := unversioned.LabelSelector{}
  388. ds.Spec.Selector = &ls
  389. ds.Spec.Template.Spec.NodeSelector = map[string]string{"foo": "bar"}
  390. manager.dsStore.Add(ds)
  391. syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
  392. }
  393. // Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods.
  394. func TestDealsWithExistingPods(t *testing.T) {
  395. manager, podControl := newTestController()
  396. addNodes(manager.nodeStore.Store, 0, 5, nil)
  397. addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 1)
  398. addPods(manager.podStore.Indexer, "node-2", simpleDaemonSetLabel, 2)
  399. addPods(manager.podStore.Indexer, "node-3", simpleDaemonSetLabel, 5)
  400. addPods(manager.podStore.Indexer, "node-4", simpleDaemonSetLabel2, 2)
  401. ds := newDaemonSet("foo")
  402. manager.dsStore.Add(ds)
  403. syncAndValidateDaemonSets(t, manager, ds, podControl, 2, 5)
  404. }
  405. // Daemon with node selector should launch pods on nodes matching selector.
  406. func TestSelectorDaemonLaunchesPods(t *testing.T) {
  407. manager, podControl := newTestController()
  408. addNodes(manager.nodeStore.Store, 0, 4, nil)
  409. addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel)
  410. daemon := newDaemonSet("foo")
  411. daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel
  412. manager.dsStore.Add(daemon)
  413. syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0)
  414. }
  415. // Daemon with node selector should delete pods from nodes that do not satisfy selector.
  416. func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) {
  417. manager, podControl := newTestController()
  418. addNodes(manager.nodeStore.Store, 0, 5, nil)
  419. addNodes(manager.nodeStore.Store, 5, 5, simpleNodeLabel)
  420. addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel2, 2)
  421. addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 3)
  422. addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel2, 1)
  423. addPods(manager.podStore.Indexer, "node-4", simpleDaemonSetLabel, 1)
  424. daemon := newDaemonSet("foo")
  425. daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel
  426. manager.dsStore.Add(daemon)
  427. syncAndValidateDaemonSets(t, manager, daemon, podControl, 5, 4)
  428. }
  429. // DaemonSet with node selector should launch pods on nodes matching selector, but also deal with existing pods on nodes.
  430. func TestSelectorDaemonDealsWithExistingPods(t *testing.T) {
  431. manager, podControl := newTestController()
  432. addNodes(manager.nodeStore.Store, 0, 5, nil)
  433. addNodes(manager.nodeStore.Store, 5, 5, simpleNodeLabel)
  434. addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel, 1)
  435. addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 3)
  436. addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel2, 2)
  437. addPods(manager.podStore.Indexer, "node-2", simpleDaemonSetLabel, 4)
  438. addPods(manager.podStore.Indexer, "node-6", simpleDaemonSetLabel, 13)
  439. addPods(manager.podStore.Indexer, "node-7", simpleDaemonSetLabel2, 4)
  440. addPods(manager.podStore.Indexer, "node-9", simpleDaemonSetLabel, 1)
  441. addPods(manager.podStore.Indexer, "node-9", simpleDaemonSetLabel2, 1)
  442. ds := newDaemonSet("foo")
  443. ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
  444. manager.dsStore.Add(ds)
  445. syncAndValidateDaemonSets(t, manager, ds, podControl, 3, 20)
  446. }
  447. // DaemonSet with node selector which does not match any node labels should not launch pods.
  448. func TestBadSelectorDaemonDoesNothing(t *testing.T) {
  449. manager, podControl := newTestController()
  450. addNodes(manager.nodeStore.Store, 0, 4, nil)
  451. addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel)
  452. ds := newDaemonSet("foo")
  453. ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel2
  454. manager.dsStore.Add(ds)
  455. syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
  456. }
  457. // DaemonSet with node name should launch pod on node with corresponding name.
  458. func TestNameDaemonSetLaunchesPods(t *testing.T) {
  459. manager, podControl := newTestController()
  460. addNodes(manager.nodeStore.Store, 0, 5, nil)
  461. ds := newDaemonSet("foo")
  462. ds.Spec.Template.Spec.NodeName = "node-0"
  463. manager.dsStore.Add(ds)
  464. syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
  465. }
  466. // DaemonSet with node name that does not exist should not launch pods.
  467. func TestBadNameDaemonSetDoesNothing(t *testing.T) {
  468. manager, podControl := newTestController()
  469. addNodes(manager.nodeStore.Store, 0, 5, nil)
  470. ds := newDaemonSet("foo")
  471. ds.Spec.Template.Spec.NodeName = "node-10"
  472. manager.dsStore.Add(ds)
  473. syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
  474. }
  475. // DaemonSet with node selector, and node name, matching a node, should launch a pod on the node.
  476. func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) {
  477. manager, podControl := newTestController()
  478. addNodes(manager.nodeStore.Store, 0, 4, nil)
  479. addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel)
  480. ds := newDaemonSet("foo")
  481. ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
  482. ds.Spec.Template.Spec.NodeName = "node-6"
  483. manager.dsStore.Add(ds)
  484. syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
  485. }
  486. // DaemonSet with node selector that matches some nodes, and node name that matches a different node, should do nothing.
  487. func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) {
  488. manager, podControl := newTestController()
  489. addNodes(manager.nodeStore.Store, 0, 4, nil)
  490. addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel)
  491. ds := newDaemonSet("foo")
  492. ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
  493. ds.Spec.Template.Spec.NodeName = "node-0"
  494. manager.dsStore.Add(ds)
  495. syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
  496. }
  497. func TestDSManagerNotReady(t *testing.T) {
  498. manager, podControl := newTestController()
  499. manager.podStoreSynced = func() bool { return false }
  500. addNodes(manager.nodeStore.Store, 0, 1, nil)
  501. // Simulates the ds reflector running before the pod reflector. We don't
  502. // want to end up creating daemon pods in this case until the pod reflector
  503. // has synced, so the ds manager should just requeue the ds.
  504. ds := newDaemonSet("foo")
  505. manager.dsStore.Add(ds)
  506. dsKey := getKey(ds, t)
  507. syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
  508. queueDS, _ := manager.queue.Get()
  509. if queueDS != dsKey {
  510. t.Fatalf("Expected to find key %v in queue, found %v", dsKey, queueDS)
  511. }
  512. manager.podStoreSynced = alwaysReady
  513. syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
  514. }
  515. // Daemon with node affinity should launch pods on nodes matching affinity.
  516. func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
  517. manager, podControl := newTestController()
  518. addNodes(manager.nodeStore.Store, 0, 4, nil)
  519. addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel)
  520. daemon := newDaemonSet("foo")
  521. affinity := map[string]string{
  522. api.AffinityAnnotationKey: fmt.Sprintf(`
  523. {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
  524. "nodeSelectorTerms": [{
  525. "matchExpressions": [{
  526. "key": "color",
  527. "operator": "In",
  528. "values": ["%s"]
  529. }]
  530. }]
  531. }}}`, simpleNodeLabel["color"]),
  532. }
  533. daemon.Spec.Template.ObjectMeta.Annotations = affinity
  534. manager.dsStore.Add(daemon)
  535. syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0)
  536. }
  537. func TestNodeTaintDaemonDoesntLaunchIntolerantPods(t *testing.T) {
  538. manager, podControl := newTestController()
  539. node := newNode("", nil)
  540. node.ObjectMeta.Annotations = map[string]string{
  541. api.TaintsAnnotationKey: `[{"key":"dedictated","value":"master","effect":"NoSchedule"}]`,
  542. }
  543. manager.nodeStore.Store.Add(node)
  544. daemon := newDaemonSet("foo")
  545. manager.dsStore.Add(daemon)
  546. syncAndValidateDaemonSets(t, manager, daemon, podControl, 0, 0)
  547. }
  548. func TestNodeTaintDaemonLaunchesTolerantPods(t *testing.T) {
  549. manager, podControl := newTestController()
  550. node := newNode("", nil)
  551. node.ObjectMeta.Annotations = map[string]string{
  552. api.TaintsAnnotationKey: `[{"key":"dedictated","value":"master","effect":"NoSchedule"}]`,
  553. }
  554. manager.nodeStore.Store.Add(node)
  555. daemon := newDaemonSet("foo")
  556. daemon.Spec.Template.ObjectMeta.Annotations = map[string]string{
  557. api.TolerationsAnnotationKey: `[{"key":"dedictated","operator":"Equal","value":"master"}]`,
  558. }
  559. manager.dsStore.Add(daemon)
  560. syncAndValidateDaemonSets(t, manager, daemon, podControl, 1, 0)
  561. }