1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141 |
- /*
- Copyright 2016 The Kubernetes Authors.
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- */
- package eviction
- import (
- "testing"
- "time"
- "k8s.io/kubernetes/pkg/api"
- "k8s.io/kubernetes/pkg/api/resource"
- "k8s.io/kubernetes/pkg/client/record"
- statsapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
- "k8s.io/kubernetes/pkg/kubelet/lifecycle"
- "k8s.io/kubernetes/pkg/types"
- "k8s.io/kubernetes/pkg/util/clock"
- )
- // mockPodKiller is used to testing which pod is killed
- type mockPodKiller struct {
- pod *api.Pod
- status api.PodStatus
- gracePeriodOverride *int64
- }
- // killPodNow records the pod that was killed
- func (m *mockPodKiller) killPodNow(pod *api.Pod, status api.PodStatus, gracePeriodOverride *int64) error {
- m.pod = pod
- m.status = status
- m.gracePeriodOverride = gracePeriodOverride
- return nil
- }
- // mockDiskInfoProvider is used to simulate testing.
- type mockDiskInfoProvider struct {
- dedicatedImageFs bool
- }
- // HasDedicatedImageFs returns the mocked value
- func (m *mockDiskInfoProvider) HasDedicatedImageFs() (bool, error) {
- return m.dedicatedImageFs, nil
- }
- // mockImageGC is used to simulate invoking image garbage collection.
- type mockImageGC struct {
- err error
- freed int64
- invoked bool
- }
- // DeleteUnusedImages returns the mocked values.
- func (m *mockImageGC) DeleteUnusedImages() (int64, error) {
- m.invoked = true
- return m.freed, m.err
- }
- // TestMemoryPressure
- func TestMemoryPressure(t *testing.T) {
- podMaker := func(name string, requests api.ResourceList, limits api.ResourceList, memoryWorkingSet string) (*api.Pod, statsapi.PodStats) {
- pod := newPod(name, []api.Container{
- newContainer(name, requests, limits),
- }, nil)
- podStats := newPodMemoryStats(pod, resource.MustParse(memoryWorkingSet))
- return pod, podStats
- }
- summaryStatsMaker := func(nodeAvailableBytes string, podStats map[*api.Pod]statsapi.PodStats) *statsapi.Summary {
- val := resource.MustParse(nodeAvailableBytes)
- availableBytes := uint64(val.Value())
- WorkingSetBytes := uint64(val.Value())
- result := &statsapi.Summary{
- Node: statsapi.NodeStats{
- Memory: &statsapi.MemoryStats{
- AvailableBytes: &availableBytes,
- WorkingSetBytes: &WorkingSetBytes,
- },
- },
- Pods: []statsapi.PodStats{},
- }
- for _, podStat := range podStats {
- result.Pods = append(result.Pods, podStat)
- }
- return result
- }
- podsToMake := []struct {
- name string
- requests api.ResourceList
- limits api.ResourceList
- memoryWorkingSet string
- }{
- {name: "best-effort-high", requests: newResourceList("", ""), limits: newResourceList("", ""), memoryWorkingSet: "500Mi"},
- {name: "best-effort-low", requests: newResourceList("", ""), limits: newResourceList("", ""), memoryWorkingSet: "300Mi"},
- {name: "burstable-high", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), memoryWorkingSet: "800Mi"},
- {name: "burstable-low", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), memoryWorkingSet: "300Mi"},
- {name: "guaranteed-high", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), memoryWorkingSet: "800Mi"},
- {name: "guaranteed-low", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), memoryWorkingSet: "200Mi"},
- }
- pods := []*api.Pod{}
- podStats := map[*api.Pod]statsapi.PodStats{}
- for _, podToMake := range podsToMake {
- pod, podStat := podMaker(podToMake.name, podToMake.requests, podToMake.limits, podToMake.memoryWorkingSet)
- pods = append(pods, pod)
- podStats[pod] = podStat
- }
- activePodsFunc := func() []*api.Pod {
- return pods
- }
- fakeClock := clock.NewFakeClock(time.Now())
- podKiller := &mockPodKiller{}
- diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
- imageGC := &mockImageGC{freed: int64(0), err: nil}
- nodeRef := &api.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
- config := Config{
- MaxPodGracePeriodSeconds: 5,
- PressureTransitionPeriod: time.Minute * 5,
- Thresholds: []Threshold{
- {
- Signal: SignalMemoryAvailable,
- Operator: OpLessThan,
- Value: ThresholdValue{
- Quantity: quantityMustParse("1Gi"),
- },
- },
- {
- Signal: SignalMemoryAvailable,
- Operator: OpLessThan,
- Value: ThresholdValue{
- Quantity: quantityMustParse("2Gi"),
- },
- GracePeriod: time.Minute * 2,
- },
- },
- }
- summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("2Gi", podStats)}
- manager := &managerImpl{
- clock: fakeClock,
- killPodFunc: podKiller.killPodNow,
- imageGC: imageGC,
- config: config,
- recorder: &record.FakeRecorder{},
- summaryProvider: summaryProvider,
- nodeRef: nodeRef,
- nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
- thresholdsFirstObservedAt: thresholdsObservedAt{},
- }
- // create a best effort pod to test admission
- bestEffortPodToAdmit, _ := podMaker("best-admit", newResourceList("", ""), newResourceList("", ""), "0Gi")
- burstablePodToAdmit, _ := podMaker("burst-admit", newResourceList("100m", "100Mi"), newResourceList("200m", "200Mi"), "0Gi")
- // synchronize
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should not have memory pressure
- if manager.IsUnderMemoryPressure() {
- t.Errorf("Manager should not report memory pressure")
- }
- // try to admit our pods (they should succeed)
- expected := []bool{true, true}
- for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
- if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit {
- t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit)
- }
- }
- // induce soft threshold
- fakeClock.Step(1 * time.Minute)
- summaryProvider.result = summaryStatsMaker("1500Mi", podStats)
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should have memory pressure
- if !manager.IsUnderMemoryPressure() {
- t.Errorf("Manager should report memory pressure since soft threshold was met")
- }
- // verify no pod was yet killed because there has not yet been enough time passed.
- if podKiller.pod != nil {
- t.Errorf("Manager should not have killed a pod yet, but killed: %v", podKiller.pod)
- }
- // step forward in time pass the grace period
- fakeClock.Step(3 * time.Minute)
- summaryProvider.result = summaryStatsMaker("1500Mi", podStats)
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should have memory pressure
- if !manager.IsUnderMemoryPressure() {
- t.Errorf("Manager should report memory pressure since soft threshold was met")
- }
- // verify the right pod was killed with the right grace period.
- if podKiller.pod != pods[0] {
- t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod, pods[0])
- }
- if podKiller.gracePeriodOverride == nil {
- t.Errorf("Manager chose to kill pod but should have had a grace period override.")
- }
- observedGracePeriod := *podKiller.gracePeriodOverride
- if observedGracePeriod != manager.config.MaxPodGracePeriodSeconds {
- t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", manager.config.MaxPodGracePeriodSeconds, observedGracePeriod)
- }
- // reset state
- podKiller.pod = nil
- podKiller.gracePeriodOverride = nil
- // remove memory pressure
- fakeClock.Step(20 * time.Minute)
- summaryProvider.result = summaryStatsMaker("3Gi", podStats)
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should not have memory pressure
- if manager.IsUnderMemoryPressure() {
- t.Errorf("Manager should not report memory pressure")
- }
- // induce memory pressure!
- fakeClock.Step(1 * time.Minute)
- summaryProvider.result = summaryStatsMaker("500Mi", podStats)
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should have memory pressure
- if !manager.IsUnderMemoryPressure() {
- t.Errorf("Manager should report memory pressure")
- }
- // check the right pod was killed
- if podKiller.pod != pods[0] {
- t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod, pods[0])
- }
- observedGracePeriod = *podKiller.gracePeriodOverride
- if observedGracePeriod != int64(0) {
- t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod)
- }
- // the best-effort pod should not admit, burstable should
- expected = []bool{false, true}
- for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
- if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit {
- t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit)
- }
- }
- // reduce memory pressure
- fakeClock.Step(1 * time.Minute)
- summaryProvider.result = summaryStatsMaker("2Gi", podStats)
- podKiller.pod = nil // reset state
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should have memory pressure (because transition period not yet met)
- if !manager.IsUnderMemoryPressure() {
- t.Errorf("Manager should report memory pressure")
- }
- // no pod should have been killed
- if podKiller.pod != nil {
- t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod)
- }
- // the best-effort pod should not admit, burstable should
- expected = []bool{false, true}
- for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
- if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit {
- t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit)
- }
- }
- // move the clock past transition period to ensure that we stop reporting pressure
- fakeClock.Step(5 * time.Minute)
- summaryProvider.result = summaryStatsMaker("2Gi", podStats)
- podKiller.pod = nil // reset state
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should not have memory pressure (because transition period met)
- if manager.IsUnderMemoryPressure() {
- t.Errorf("Manager should not report memory pressure")
- }
- // no pod should have been killed
- if podKiller.pod != nil {
- t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod)
- }
- // all pods should admit now
- expected = []bool{true, true}
- for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
- if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit {
- t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit)
- }
- }
- }
- // parseQuantity parses the specified value (if provided) otherwise returns 0 value
- func parseQuantity(value string) resource.Quantity {
- if len(value) == 0 {
- return resource.MustParse("0")
- }
- return resource.MustParse(value)
- }
- func TestDiskPressureNodeFs(t *testing.T) {
- podMaker := func(name string, requests api.ResourceList, limits api.ResourceList, rootFsUsed, logsUsed, perLocalVolumeUsed string) (*api.Pod, statsapi.PodStats) {
- pod := newPod(name, []api.Container{
- newContainer(name, requests, limits),
- }, nil)
- podStats := newPodDiskStats(pod, parseQuantity(rootFsUsed), parseQuantity(logsUsed), parseQuantity(perLocalVolumeUsed))
- return pod, podStats
- }
- summaryStatsMaker := func(rootFsAvailableBytes, imageFsAvailableBytes string, podStats map[*api.Pod]statsapi.PodStats) *statsapi.Summary {
- rootFsVal := resource.MustParse(rootFsAvailableBytes)
- rootFsBytes := uint64(rootFsVal.Value())
- rootFsCapacityBytes := uint64(rootFsVal.Value() * 2)
- imageFsVal := resource.MustParse(imageFsAvailableBytes)
- imageFsBytes := uint64(imageFsVal.Value())
- imageFsCapacityBytes := uint64(imageFsVal.Value() * 2)
- result := &statsapi.Summary{
- Node: statsapi.NodeStats{
- Fs: &statsapi.FsStats{
- AvailableBytes: &rootFsBytes,
- CapacityBytes: &rootFsCapacityBytes,
- },
- Runtime: &statsapi.RuntimeStats{
- ImageFs: &statsapi.FsStats{
- AvailableBytes: &imageFsBytes,
- CapacityBytes: &imageFsCapacityBytes,
- },
- },
- },
- Pods: []statsapi.PodStats{},
- }
- for _, podStat := range podStats {
- result.Pods = append(result.Pods, podStat)
- }
- return result
- }
- podsToMake := []struct {
- name string
- requests api.ResourceList
- limits api.ResourceList
- rootFsUsed string
- logsFsUsed string
- perLocalVolumeUsed string
- }{
- {name: "best-effort-high", requests: newResourceList("", ""), limits: newResourceList("", ""), rootFsUsed: "500Mi"},
- {name: "best-effort-low", requests: newResourceList("", ""), limits: newResourceList("", ""), perLocalVolumeUsed: "300Mi"},
- {name: "burstable-high", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), rootFsUsed: "800Mi"},
- {name: "burstable-low", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), logsFsUsed: "300Mi"},
- {name: "guaranteed-high", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), rootFsUsed: "800Mi"},
- {name: "guaranteed-low", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), rootFsUsed: "200Mi"},
- }
- pods := []*api.Pod{}
- podStats := map[*api.Pod]statsapi.PodStats{}
- for _, podToMake := range podsToMake {
- pod, podStat := podMaker(podToMake.name, podToMake.requests, podToMake.limits, podToMake.rootFsUsed, podToMake.logsFsUsed, podToMake.perLocalVolumeUsed)
- pods = append(pods, pod)
- podStats[pod] = podStat
- }
- activePodsFunc := func() []*api.Pod {
- return pods
- }
- fakeClock := clock.NewFakeClock(time.Now())
- podKiller := &mockPodKiller{}
- diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
- imageGC := &mockImageGC{freed: int64(0), err: nil}
- nodeRef := &api.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
- config := Config{
- MaxPodGracePeriodSeconds: 5,
- PressureTransitionPeriod: time.Minute * 5,
- Thresholds: []Threshold{
- {
- Signal: SignalNodeFsAvailable,
- Operator: OpLessThan,
- Value: ThresholdValue{
- Quantity: quantityMustParse("1Gi"),
- },
- },
- {
- Signal: SignalNodeFsAvailable,
- Operator: OpLessThan,
- Value: ThresholdValue{
- Quantity: quantityMustParse("2Gi"),
- },
- GracePeriod: time.Minute * 2,
- },
- },
- }
- summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("16Gi", "200Gi", podStats)}
- manager := &managerImpl{
- clock: fakeClock,
- killPodFunc: podKiller.killPodNow,
- imageGC: imageGC,
- config: config,
- recorder: &record.FakeRecorder{},
- summaryProvider: summaryProvider,
- nodeRef: nodeRef,
- nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
- thresholdsFirstObservedAt: thresholdsObservedAt{},
- }
- // create a best effort pod to test admission
- podToAdmit, _ := podMaker("pod-to-admit", newResourceList("", ""), newResourceList("", ""), "0Gi", "0Gi", "0Gi")
- // synchronize
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should not have disk pressure
- if manager.IsUnderDiskPressure() {
- t.Errorf("Manager should not report disk pressure")
- }
- // try to admit our pod (should succeed)
- if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: podToAdmit}); !result.Admit {
- t.Errorf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, true, result.Admit)
- }
- // induce soft threshold
- fakeClock.Step(1 * time.Minute)
- summaryProvider.result = summaryStatsMaker("1.5Gi", "200Gi", podStats)
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should have disk pressure
- if !manager.IsUnderDiskPressure() {
- t.Errorf("Manager should report disk pressure since soft threshold was met")
- }
- // verify no pod was yet killed because there has not yet been enough time passed.
- if podKiller.pod != nil {
- t.Errorf("Manager should not have killed a pod yet, but killed: %v", podKiller.pod)
- }
- // step forward in time pass the grace period
- fakeClock.Step(3 * time.Minute)
- summaryProvider.result = summaryStatsMaker("1.5Gi", "200Gi", podStats)
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should have disk pressure
- if !manager.IsUnderDiskPressure() {
- t.Errorf("Manager should report disk pressure since soft threshold was met")
- }
- // verify the right pod was killed with the right grace period.
- if podKiller.pod != pods[0] {
- t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod, pods[0])
- }
- if podKiller.gracePeriodOverride == nil {
- t.Errorf("Manager chose to kill pod but should have had a grace period override.")
- }
- observedGracePeriod := *podKiller.gracePeriodOverride
- if observedGracePeriod != manager.config.MaxPodGracePeriodSeconds {
- t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", manager.config.MaxPodGracePeriodSeconds, observedGracePeriod)
- }
- // reset state
- podKiller.pod = nil
- podKiller.gracePeriodOverride = nil
- // remove disk pressure
- fakeClock.Step(20 * time.Minute)
- summaryProvider.result = summaryStatsMaker("16Gi", "200Gi", podStats)
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should not have disk pressure
- if manager.IsUnderDiskPressure() {
- t.Errorf("Manager should not report disk pressure")
- }
- // induce disk pressure!
- fakeClock.Step(1 * time.Minute)
- summaryProvider.result = summaryStatsMaker("500Mi", "200Gi", podStats)
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should have disk pressure
- if !manager.IsUnderDiskPressure() {
- t.Errorf("Manager should report disk pressure")
- }
- // check the right pod was killed
- if podKiller.pod != pods[0] {
- t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod, pods[0])
- }
- observedGracePeriod = *podKiller.gracePeriodOverride
- if observedGracePeriod != int64(0) {
- t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod)
- }
- // try to admit our pod (should fail)
- if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: podToAdmit}); result.Admit {
- t.Errorf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, false, result.Admit)
- }
- // reduce disk pressure
- fakeClock.Step(1 * time.Minute)
- summaryProvider.result = summaryStatsMaker("16Gi", "200Gi", podStats)
- podKiller.pod = nil // reset state
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should have disk pressure (because transition period not yet met)
- if !manager.IsUnderDiskPressure() {
- t.Errorf("Manager should report disk pressure")
- }
- // no pod should have been killed
- if podKiller.pod != nil {
- t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod)
- }
- // try to admit our pod (should fail)
- if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: podToAdmit}); result.Admit {
- t.Errorf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, false, result.Admit)
- }
- // move the clock past transition period to ensure that we stop reporting pressure
- fakeClock.Step(5 * time.Minute)
- summaryProvider.result = summaryStatsMaker("16Gi", "200Gi", podStats)
- podKiller.pod = nil // reset state
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should not have disk pressure (because transition period met)
- if manager.IsUnderDiskPressure() {
- t.Errorf("Manager should not report disk pressure")
- }
- // no pod should have been killed
- if podKiller.pod != nil {
- t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod)
- }
- // try to admit our pod (should succeed)
- if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: podToAdmit}); !result.Admit {
- t.Errorf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, true, result.Admit)
- }
- }
- // TestMinReclaim verifies that min-reclaim works as desired.
- func TestMinReclaim(t *testing.T) {
- podMaker := func(name string, requests api.ResourceList, limits api.ResourceList, memoryWorkingSet string) (*api.Pod, statsapi.PodStats) {
- pod := newPod(name, []api.Container{
- newContainer(name, requests, limits),
- }, nil)
- podStats := newPodMemoryStats(pod, resource.MustParse(memoryWorkingSet))
- return pod, podStats
- }
- summaryStatsMaker := func(nodeAvailableBytes string, podStats map[*api.Pod]statsapi.PodStats) *statsapi.Summary {
- val := resource.MustParse(nodeAvailableBytes)
- availableBytes := uint64(val.Value())
- WorkingSetBytes := uint64(val.Value())
- result := &statsapi.Summary{
- Node: statsapi.NodeStats{
- Memory: &statsapi.MemoryStats{
- AvailableBytes: &availableBytes,
- WorkingSetBytes: &WorkingSetBytes,
- },
- },
- Pods: []statsapi.PodStats{},
- }
- for _, podStat := range podStats {
- result.Pods = append(result.Pods, podStat)
- }
- return result
- }
- podsToMake := []struct {
- name string
- requests api.ResourceList
- limits api.ResourceList
- memoryWorkingSet string
- }{
- {name: "best-effort-high", requests: newResourceList("", ""), limits: newResourceList("", ""), memoryWorkingSet: "500Mi"},
- {name: "best-effort-low", requests: newResourceList("", ""), limits: newResourceList("", ""), memoryWorkingSet: "300Mi"},
- {name: "burstable-high", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), memoryWorkingSet: "800Mi"},
- {name: "burstable-low", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), memoryWorkingSet: "300Mi"},
- {name: "guaranteed-high", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), memoryWorkingSet: "800Mi"},
- {name: "guaranteed-low", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), memoryWorkingSet: "200Mi"},
- }
- pods := []*api.Pod{}
- podStats := map[*api.Pod]statsapi.PodStats{}
- for _, podToMake := range podsToMake {
- pod, podStat := podMaker(podToMake.name, podToMake.requests, podToMake.limits, podToMake.memoryWorkingSet)
- pods = append(pods, pod)
- podStats[pod] = podStat
- }
- activePodsFunc := func() []*api.Pod {
- return pods
- }
- fakeClock := clock.NewFakeClock(time.Now())
- podKiller := &mockPodKiller{}
- diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
- imageGC := &mockImageGC{freed: int64(0), err: nil}
- nodeRef := &api.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
- config := Config{
- MaxPodGracePeriodSeconds: 5,
- PressureTransitionPeriod: time.Minute * 5,
- Thresholds: []Threshold{
- {
- Signal: SignalMemoryAvailable,
- Operator: OpLessThan,
- Value: ThresholdValue{
- Quantity: quantityMustParse("1Gi"),
- },
- MinReclaim: quantityMustParse("500Mi"),
- },
- },
- }
- summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("2Gi", podStats)}
- manager := &managerImpl{
- clock: fakeClock,
- killPodFunc: podKiller.killPodNow,
- imageGC: imageGC,
- config: config,
- recorder: &record.FakeRecorder{},
- summaryProvider: summaryProvider,
- nodeRef: nodeRef,
- nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
- thresholdsFirstObservedAt: thresholdsObservedAt{},
- }
- // synchronize
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should not have memory pressure
- if manager.IsUnderMemoryPressure() {
- t.Errorf("Manager should not report memory pressure")
- }
- // induce memory pressure!
- fakeClock.Step(1 * time.Minute)
- summaryProvider.result = summaryStatsMaker("500Mi", podStats)
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should have memory pressure
- if !manager.IsUnderMemoryPressure() {
- t.Errorf("Manager should report memory pressure")
- }
- // check the right pod was killed
- if podKiller.pod != pods[0] {
- t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod, pods[0])
- }
- observedGracePeriod := *podKiller.gracePeriodOverride
- if observedGracePeriod != int64(0) {
- t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod)
- }
- // reduce memory pressure, but not below the min-reclaim amount
- fakeClock.Step(1 * time.Minute)
- summaryProvider.result = summaryStatsMaker("1.2Gi", podStats)
- podKiller.pod = nil // reset state
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should have memory pressure (because transition period not yet met)
- if !manager.IsUnderMemoryPressure() {
- t.Errorf("Manager should report memory pressure")
- }
- // check the right pod was killed
- if podKiller.pod != pods[0] {
- t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod, pods[0])
- }
- observedGracePeriod = *podKiller.gracePeriodOverride
- if observedGracePeriod != int64(0) {
- t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod)
- }
- // reduce memory pressure and ensure the min-reclaim amount
- fakeClock.Step(1 * time.Minute)
- summaryProvider.result = summaryStatsMaker("2Gi", podStats)
- podKiller.pod = nil // reset state
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should have memory pressure (because transition period not yet met)
- if !manager.IsUnderMemoryPressure() {
- t.Errorf("Manager should report memory pressure")
- }
- // no pod should have been killed
- if podKiller.pod != nil {
- t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod)
- }
- // move the clock past transition period to ensure that we stop reporting pressure
- fakeClock.Step(5 * time.Minute)
- summaryProvider.result = summaryStatsMaker("2Gi", podStats)
- podKiller.pod = nil // reset state
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should not have memory pressure (because transition period met)
- if manager.IsUnderMemoryPressure() {
- t.Errorf("Manager should not report memory pressure")
- }
- // no pod should have been killed
- if podKiller.pod != nil {
- t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod)
- }
- }
- func TestNodeReclaimFuncs(t *testing.T) {
- podMaker := func(name string, requests api.ResourceList, limits api.ResourceList, rootFsUsed, logsUsed, perLocalVolumeUsed string) (*api.Pod, statsapi.PodStats) {
- pod := newPod(name, []api.Container{
- newContainer(name, requests, limits),
- }, nil)
- podStats := newPodDiskStats(pod, parseQuantity(rootFsUsed), parseQuantity(logsUsed), parseQuantity(perLocalVolumeUsed))
- return pod, podStats
- }
- summaryStatsMaker := func(rootFsAvailableBytes, imageFsAvailableBytes string, podStats map[*api.Pod]statsapi.PodStats) *statsapi.Summary {
- rootFsVal := resource.MustParse(rootFsAvailableBytes)
- rootFsBytes := uint64(rootFsVal.Value())
- rootFsCapacityBytes := uint64(rootFsVal.Value() * 2)
- imageFsVal := resource.MustParse(imageFsAvailableBytes)
- imageFsBytes := uint64(imageFsVal.Value())
- imageFsCapacityBytes := uint64(imageFsVal.Value() * 2)
- result := &statsapi.Summary{
- Node: statsapi.NodeStats{
- Fs: &statsapi.FsStats{
- AvailableBytes: &rootFsBytes,
- CapacityBytes: &rootFsCapacityBytes,
- },
- Runtime: &statsapi.RuntimeStats{
- ImageFs: &statsapi.FsStats{
- AvailableBytes: &imageFsBytes,
- CapacityBytes: &imageFsCapacityBytes,
- },
- },
- },
- Pods: []statsapi.PodStats{},
- }
- for _, podStat := range podStats {
- result.Pods = append(result.Pods, podStat)
- }
- return result
- }
- podsToMake := []struct {
- name string
- requests api.ResourceList
- limits api.ResourceList
- rootFsUsed string
- logsFsUsed string
- perLocalVolumeUsed string
- }{
- {name: "best-effort-high", requests: newResourceList("", ""), limits: newResourceList("", ""), rootFsUsed: "500Mi"},
- {name: "best-effort-low", requests: newResourceList("", ""), limits: newResourceList("", ""), perLocalVolumeUsed: "300Mi"},
- {name: "burstable-high", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), rootFsUsed: "800Mi"},
- {name: "burstable-low", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), logsFsUsed: "300Mi"},
- {name: "guaranteed-high", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), rootFsUsed: "800Mi"},
- {name: "guaranteed-low", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), rootFsUsed: "200Mi"},
- }
- pods := []*api.Pod{}
- podStats := map[*api.Pod]statsapi.PodStats{}
- for _, podToMake := range podsToMake {
- pod, podStat := podMaker(podToMake.name, podToMake.requests, podToMake.limits, podToMake.rootFsUsed, podToMake.logsFsUsed, podToMake.perLocalVolumeUsed)
- pods = append(pods, pod)
- podStats[pod] = podStat
- }
- activePodsFunc := func() []*api.Pod {
- return pods
- }
- fakeClock := clock.NewFakeClock(time.Now())
- podKiller := &mockPodKiller{}
- diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
- imageGcFree := resource.MustParse("700Mi")
- imageGC := &mockImageGC{freed: imageGcFree.Value(), err: nil}
- nodeRef := &api.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
- config := Config{
- MaxPodGracePeriodSeconds: 5,
- PressureTransitionPeriod: time.Minute * 5,
- Thresholds: []Threshold{
- {
- Signal: SignalNodeFsAvailable,
- Operator: OpLessThan,
- Value: ThresholdValue{
- Quantity: quantityMustParse("1Gi"),
- },
- MinReclaim: quantityMustParse("500Mi"),
- },
- },
- }
- summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("16Gi", "200Gi", podStats)}
- manager := &managerImpl{
- clock: fakeClock,
- killPodFunc: podKiller.killPodNow,
- imageGC: imageGC,
- config: config,
- recorder: &record.FakeRecorder{},
- summaryProvider: summaryProvider,
- nodeRef: nodeRef,
- nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
- thresholdsFirstObservedAt: thresholdsObservedAt{},
- }
- // synchronize
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should not have disk pressure
- if manager.IsUnderDiskPressure() {
- t.Errorf("Manager should not report disk pressure")
- }
- // induce hard threshold
- fakeClock.Step(1 * time.Minute)
- summaryProvider.result = summaryStatsMaker(".9Gi", "200Gi", podStats)
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should have disk pressure
- if !manager.IsUnderDiskPressure() {
- t.Errorf("Manager should report disk pressure since soft threshold was met")
- }
- // verify image gc was invoked
- if !imageGC.invoked {
- t.Errorf("Manager should have invoked image gc")
- }
- // verify no pod was killed because image gc was sufficient
- if podKiller.pod != nil {
- t.Errorf("Manager should not have killed a pod, but killed: %v", podKiller.pod)
- }
- // reset state
- imageGC.invoked = false
- // remove disk pressure
- fakeClock.Step(20 * time.Minute)
- summaryProvider.result = summaryStatsMaker("16Gi", "200Gi", podStats)
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should not have disk pressure
- if manager.IsUnderDiskPressure() {
- t.Errorf("Manager should not report disk pressure")
- }
- // induce disk pressure!
- fakeClock.Step(1 * time.Minute)
- summaryProvider.result = summaryStatsMaker("400Mi", "200Gi", podStats)
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should have disk pressure
- if !manager.IsUnderDiskPressure() {
- t.Errorf("Manager should report disk pressure")
- }
- // ensure image gc was invoked
- if !imageGC.invoked {
- t.Errorf("Manager should have invoked image gc")
- }
- // check the right pod was killed
- if podKiller.pod != pods[0] {
- t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod, pods[0])
- }
- observedGracePeriod := *podKiller.gracePeriodOverride
- if observedGracePeriod != int64(0) {
- t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod)
- }
- // reduce disk pressure
- fakeClock.Step(1 * time.Minute)
- summaryProvider.result = summaryStatsMaker("16Gi", "200Gi", podStats)
- imageGC.invoked = false // reset state
- podKiller.pod = nil // reset state
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should have disk pressure (because transition period not yet met)
- if !manager.IsUnderDiskPressure() {
- t.Errorf("Manager should report disk pressure")
- }
- // no image gc should have occurred
- if imageGC.invoked {
- t.Errorf("Manager chose to perform image gc when it was not neeed")
- }
- // no pod should have been killed
- if podKiller.pod != nil {
- t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod)
- }
- // move the clock past transition period to ensure that we stop reporting pressure
- fakeClock.Step(5 * time.Minute)
- summaryProvider.result = summaryStatsMaker("16Gi", "200Gi", podStats)
- imageGC.invoked = false // reset state
- podKiller.pod = nil // reset state
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should not have disk pressure (because transition period met)
- if manager.IsUnderDiskPressure() {
- t.Errorf("Manager should not report disk pressure")
- }
- // no image gc should have occurred
- if imageGC.invoked {
- t.Errorf("Manager chose to perform image gc when it was not neeed")
- }
- // no pod should have been killed
- if podKiller.pod != nil {
- t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod)
- }
- }
- func TestDiskPressureNodeFsInodes(t *testing.T) {
- // TODO: we need to know inodes used when cadvisor supports per container stats
- podMaker := func(name string, requests api.ResourceList, limits api.ResourceList) (*api.Pod, statsapi.PodStats) {
- pod := newPod(name, []api.Container{
- newContainer(name, requests, limits),
- }, nil)
- podStats := newPodInodeStats(pod)
- return pod, podStats
- }
- summaryStatsMaker := func(rootFsInodesFree, rootFsInodes string, podStats map[*api.Pod]statsapi.PodStats) *statsapi.Summary {
- rootFsInodesFreeVal := resource.MustParse(rootFsInodesFree)
- internalRootFsInodesFree := uint64(rootFsInodesFreeVal.Value())
- rootFsInodesVal := resource.MustParse(rootFsInodes)
- internalRootFsInodes := uint64(rootFsInodesVal.Value())
- result := &statsapi.Summary{
- Node: statsapi.NodeStats{
- Fs: &statsapi.FsStats{
- InodesFree: &internalRootFsInodesFree,
- Inodes: &internalRootFsInodes,
- },
- },
- Pods: []statsapi.PodStats{},
- }
- for _, podStat := range podStats {
- result.Pods = append(result.Pods, podStat)
- }
- return result
- }
- // TODO: pass inodes used in future when supported by cadvisor.
- podsToMake := []struct {
- name string
- requests api.ResourceList
- limits api.ResourceList
- }{
- {name: "best-effort-high", requests: newResourceList("", ""), limits: newResourceList("", "")},
- {name: "best-effort-low", requests: newResourceList("", ""), limits: newResourceList("", "")},
- {name: "burstable-high", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi")},
- {name: "burstable-low", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi")},
- {name: "guaranteed-high", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi")},
- {name: "guaranteed-low", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi")},
- }
- pods := []*api.Pod{}
- podStats := map[*api.Pod]statsapi.PodStats{}
- for _, podToMake := range podsToMake {
- pod, podStat := podMaker(podToMake.name, podToMake.requests, podToMake.limits)
- pods = append(pods, pod)
- podStats[pod] = podStat
- }
- activePodsFunc := func() []*api.Pod {
- return pods
- }
- fakeClock := clock.NewFakeClock(time.Now())
- podKiller := &mockPodKiller{}
- diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
- imageGC := &mockImageGC{freed: int64(0), err: nil}
- nodeRef := &api.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
- config := Config{
- MaxPodGracePeriodSeconds: 5,
- PressureTransitionPeriod: time.Minute * 5,
- Thresholds: []Threshold{
- {
- Signal: SignalNodeFsInodesFree,
- Operator: OpLessThan,
- Value: ThresholdValue{
- Quantity: quantityMustParse("1Mi"),
- },
- },
- {
- Signal: SignalNodeFsInodesFree,
- Operator: OpLessThan,
- Value: ThresholdValue{
- Quantity: quantityMustParse("2Mi"),
- },
- GracePeriod: time.Minute * 2,
- },
- },
- }
- summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("3Mi", "4Mi", podStats)}
- manager := &managerImpl{
- clock: fakeClock,
- killPodFunc: podKiller.killPodNow,
- imageGC: imageGC,
- config: config,
- recorder: &record.FakeRecorder{},
- summaryProvider: summaryProvider,
- nodeRef: nodeRef,
- nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
- thresholdsFirstObservedAt: thresholdsObservedAt{},
- }
- // create a best effort pod to test admission
- podToAdmit, _ := podMaker("pod-to-admit", newResourceList("", ""), newResourceList("", ""))
- // synchronize
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should not have disk pressure
- if manager.IsUnderDiskPressure() {
- t.Errorf("Manager should not report disk pressure")
- }
- // try to admit our pod (should succeed)
- if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: podToAdmit}); !result.Admit {
- t.Errorf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, true, result.Admit)
- }
- // induce soft threshold
- fakeClock.Step(1 * time.Minute)
- summaryProvider.result = summaryStatsMaker("1.5Mi", "4Mi", podStats)
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should have disk pressure
- if !manager.IsUnderDiskPressure() {
- t.Errorf("Manager should report disk pressure since soft threshold was met")
- }
- // verify no pod was yet killed because there has not yet been enough time passed.
- if podKiller.pod != nil {
- t.Errorf("Manager should not have killed a pod yet, but killed: %v", podKiller.pod)
- }
- // step forward in time pass the grace period
- fakeClock.Step(3 * time.Minute)
- summaryProvider.result = summaryStatsMaker("1.5Mi", "4Mi", podStats)
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should have disk pressure
- if !manager.IsUnderDiskPressure() {
- t.Errorf("Manager should report disk pressure since soft threshold was met")
- }
- // verify the right pod was killed with the right grace period.
- if podKiller.pod != pods[0] {
- t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod, pods[0])
- }
- if podKiller.gracePeriodOverride == nil {
- t.Errorf("Manager chose to kill pod but should have had a grace period override.")
- }
- observedGracePeriod := *podKiller.gracePeriodOverride
- if observedGracePeriod != manager.config.MaxPodGracePeriodSeconds {
- t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", manager.config.MaxPodGracePeriodSeconds, observedGracePeriod)
- }
- // reset state
- podKiller.pod = nil
- podKiller.gracePeriodOverride = nil
- // remove disk pressure
- fakeClock.Step(20 * time.Minute)
- summaryProvider.result = summaryStatsMaker("3Mi", "4Mi", podStats)
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should not have disk pressure
- if manager.IsUnderDiskPressure() {
- t.Errorf("Manager should not report disk pressure")
- }
- // induce disk pressure!
- fakeClock.Step(1 * time.Minute)
- summaryProvider.result = summaryStatsMaker("0.5Mi", "4Mi", podStats)
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should have disk pressure
- if !manager.IsUnderDiskPressure() {
- t.Errorf("Manager should report disk pressure")
- }
- // check the right pod was killed
- if podKiller.pod != pods[0] {
- t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod, pods[0])
- }
- observedGracePeriod = *podKiller.gracePeriodOverride
- if observedGracePeriod != int64(0) {
- t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod)
- }
- // try to admit our pod (should fail)
- if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: podToAdmit}); result.Admit {
- t.Errorf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, false, result.Admit)
- }
- // reduce disk pressure
- fakeClock.Step(1 * time.Minute)
- summaryProvider.result = summaryStatsMaker("3Mi", "4Mi", podStats)
- podKiller.pod = nil // reset state
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should have disk pressure (because transition period not yet met)
- if !manager.IsUnderDiskPressure() {
- t.Errorf("Manager should report disk pressure")
- }
- // no pod should have been killed
- if podKiller.pod != nil {
- t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod)
- }
- // try to admit our pod (should fail)
- if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: podToAdmit}); result.Admit {
- t.Errorf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, false, result.Admit)
- }
- // move the clock past transition period to ensure that we stop reporting pressure
- fakeClock.Step(5 * time.Minute)
- summaryProvider.result = summaryStatsMaker("3Mi", "4Mi", podStats)
- podKiller.pod = nil // reset state
- manager.synchronize(diskInfoProvider, activePodsFunc)
- // we should not have disk pressure (because transition period met)
- if manager.IsUnderDiskPressure() {
- t.Errorf("Manager should not report disk pressure")
- }
- // no pod should have been killed
- if podKiller.pod != nil {
- t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod)
- }
- // try to admit our pod (should succeed)
- if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: podToAdmit}); !result.Admit {
- t.Errorf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, true, result.Admit)
- }
- }
|