replica_set.go 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680
  1. /*
  2. Copyright 2016 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. // If you make changes to this file, you should also make the corresponding change in ReplicationController.
  14. package replicaset
  15. import (
  16. "fmt"
  17. "reflect"
  18. "sort"
  19. "sync"
  20. "time"
  21. "github.com/golang/glog"
  22. "k8s.io/kubernetes/pkg/api"
  23. "k8s.io/kubernetes/pkg/api/errors"
  24. "k8s.io/kubernetes/pkg/api/unversioned"
  25. "k8s.io/kubernetes/pkg/apis/extensions"
  26. "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
  27. "k8s.io/kubernetes/pkg/client/cache"
  28. clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
  29. unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
  30. "k8s.io/kubernetes/pkg/client/record"
  31. "k8s.io/kubernetes/pkg/controller"
  32. "k8s.io/kubernetes/pkg/controller/framework"
  33. "k8s.io/kubernetes/pkg/controller/framework/informers"
  34. "k8s.io/kubernetes/pkg/labels"
  35. "k8s.io/kubernetes/pkg/runtime"
  36. utilerrors "k8s.io/kubernetes/pkg/util/errors"
  37. "k8s.io/kubernetes/pkg/util/metrics"
  38. utilruntime "k8s.io/kubernetes/pkg/util/runtime"
  39. "k8s.io/kubernetes/pkg/util/wait"
  40. "k8s.io/kubernetes/pkg/util/workqueue"
  41. "k8s.io/kubernetes/pkg/watch"
  42. )
  43. const (
  44. // We'll attempt to recompute the required replicas of all ReplicaSets
  45. // that have fulfilled their expectations at least this often. This recomputation
  46. // happens based on contents in local pod storage.
  47. FullControllerResyncPeriod = 30 * time.Second
  48. // Realistic value of the burstReplica field for the replica set manager based off
  49. // performance requirements for kubernetes 1.0.
  50. BurstReplicas = 500
  51. // We must avoid counting pods until the pod store has synced. If it hasn't synced, to
  52. // avoid a hot loop, we'll wait this long between checks.
  53. PodStoreSyncedPollPeriod = 100 * time.Millisecond
  54. // The number of times we retry updating a ReplicaSet's status.
  55. statusUpdateRetries = 1
  56. )
  57. func getRSKind() unversioned.GroupVersionKind {
  58. return v1beta1.SchemeGroupVersion.WithKind("ReplicaSet")
  59. }
  60. // ReplicaSetController is responsible for synchronizing ReplicaSet objects stored
  61. // in the system with actual running pods.
  62. type ReplicaSetController struct {
  63. kubeClient clientset.Interface
  64. podControl controller.PodControlInterface
  65. // internalPodInformer is used to hold a personal informer. If we're using
  66. // a normal shared informer, then the informer will be started for us. If
  67. // we have a personal informer, we must start it ourselves. If you start
  68. // the controller using NewReplicationManager(passing SharedInformer), this
  69. // will be null
  70. internalPodInformer framework.SharedIndexInformer
  71. // A ReplicaSet is temporarily suspended after creating/deleting these many replicas.
  72. // It resumes normal action after observing the watch events for them.
  73. burstReplicas int
  74. // To allow injection of syncReplicaSet for testing.
  75. syncHandler func(rsKey string) error
  76. // A TTLCache of pod creates/deletes each rc expects to see.
  77. expectations *controller.UIDTrackingControllerExpectations
  78. // A store of ReplicaSets, populated by the rsController
  79. rsStore cache.StoreToReplicaSetLister
  80. // Watches changes to all ReplicaSets
  81. rsController *framework.Controller
  82. // A store of pods, populated by the podController
  83. podStore cache.StoreToPodLister
  84. // Watches changes to all pods
  85. podController framework.ControllerInterface
  86. // podStoreSynced returns true if the pod store has been synced at least once.
  87. // Added as a member to the struct to allow injection for testing.
  88. podStoreSynced func() bool
  89. lookupCache *controller.MatchingCache
  90. // Controllers that need to be synced
  91. queue *workqueue.Type
  92. // garbageCollectorEnabled denotes if the garbage collector is enabled. RC
  93. // manager behaves differently if GC is enabled.
  94. garbageCollectorEnabled bool
  95. }
  96. // NewReplicaSetController creates a new ReplicaSetController.
  97. func NewReplicaSetController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController {
  98. eventBroadcaster := record.NewBroadcaster()
  99. eventBroadcaster.StartLogging(glog.Infof)
  100. eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
  101. return newReplicaSetController(
  102. eventBroadcaster.NewRecorder(api.EventSource{Component: "replicaset-controller"}),
  103. podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize, garbageCollectorEnabled)
  104. }
  105. // newReplicaSetController configures a replica set controller with the specified event recorder
  106. func newReplicaSetController(eventRecorder record.EventRecorder, podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController {
  107. if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
  108. metrics.RegisterMetricAndTrackRateLimiterUsage("replicaset_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
  109. }
  110. rsc := &ReplicaSetController{
  111. kubeClient: kubeClient,
  112. podControl: controller.RealPodControl{
  113. KubeClient: kubeClient,
  114. Recorder: eventRecorder,
  115. },
  116. burstReplicas: burstReplicas,
  117. expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
  118. queue: workqueue.NewNamed("replicaset"),
  119. garbageCollectorEnabled: garbageCollectorEnabled,
  120. }
  121. rsc.rsStore.Store, rsc.rsController = framework.NewInformer(
  122. &cache.ListWatch{
  123. ListFunc: func(options api.ListOptions) (runtime.Object, error) {
  124. return rsc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).List(options)
  125. },
  126. WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
  127. return rsc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).Watch(options)
  128. },
  129. },
  130. &extensions.ReplicaSet{},
  131. // TODO: Can we have much longer period here?
  132. FullControllerResyncPeriod,
  133. framework.ResourceEventHandlerFuncs{
  134. AddFunc: rsc.enqueueReplicaSet,
  135. UpdateFunc: rsc.updateRS,
  136. // This will enter the sync loop and no-op, because the replica set has been deleted from the store.
  137. // Note that deleting a replica set immediately after scaling it to 0 will not work. The recommended
  138. // way of achieving this is by performing a `stop` operation on the replica set.
  139. DeleteFunc: rsc.enqueueReplicaSet,
  140. },
  141. )
  142. podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
  143. AddFunc: rsc.addPod,
  144. // This invokes the ReplicaSet for every pod change, eg: host assignment. Though this might seem like
  145. // overkill the most frequent pod update is status, and the associated ReplicaSet will only list from
  146. // local storage, so it should be ok.
  147. UpdateFunc: rsc.updatePod,
  148. DeleteFunc: rsc.deletePod,
  149. })
  150. rsc.podStore.Indexer = podInformer.GetIndexer()
  151. rsc.podController = podInformer.GetController()
  152. rsc.syncHandler = rsc.syncReplicaSet
  153. rsc.podStoreSynced = rsc.podController.HasSynced
  154. rsc.lookupCache = controller.NewMatchingCache(lookupCacheSize)
  155. return rsc
  156. }
  157. // NewReplicationManagerFromClient creates a new ReplicationManager that runs its own informer.
  158. func NewReplicaSetControllerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicaSetController {
  159. podInformer := informers.NewPodInformer(kubeClient, resyncPeriod())
  160. garbageCollectorEnabled := false
  161. rsc := NewReplicaSetController(podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize, garbageCollectorEnabled)
  162. rsc.internalPodInformer = podInformer
  163. return rsc
  164. }
  165. // SetEventRecorder replaces the event recorder used by the ReplicaSetController
  166. // with the given recorder. Only used for testing.
  167. func (rsc *ReplicaSetController) SetEventRecorder(recorder record.EventRecorder) {
  168. // TODO: Hack. We can't cleanly shutdown the event recorder, so benchmarks
  169. // need to pass in a fake.
  170. rsc.podControl = controller.RealPodControl{KubeClient: rsc.kubeClient, Recorder: recorder}
  171. }
  172. // Run begins watching and syncing.
  173. func (rsc *ReplicaSetController) Run(workers int, stopCh <-chan struct{}) {
  174. defer utilruntime.HandleCrash()
  175. go rsc.rsController.Run(stopCh)
  176. go rsc.podController.Run(stopCh)
  177. for i := 0; i < workers; i++ {
  178. go wait.Until(rsc.worker, time.Second, stopCh)
  179. }
  180. if rsc.internalPodInformer != nil {
  181. go rsc.internalPodInformer.Run(stopCh)
  182. }
  183. <-stopCh
  184. glog.Infof("Shutting down ReplicaSet Controller")
  185. rsc.queue.ShutDown()
  186. }
  187. // getPodReplicaSet returns the replica set managing the given pod.
  188. // TODO: Surface that we are ignoring multiple replica sets for a single pod.
  189. // TODO: use ownerReference.Controller to determine if the rs controls the pod.
  190. func (rsc *ReplicaSetController) getPodReplicaSet(pod *api.Pod) *extensions.ReplicaSet {
  191. // look up in the cache, if cached and the cache is valid, just return cached value
  192. if obj, cached := rsc.lookupCache.GetMatchingObject(pod); cached {
  193. rs, ok := obj.(*extensions.ReplicaSet)
  194. if !ok {
  195. // This should not happen
  196. glog.Errorf("lookup cache does not return a ReplicaSet object")
  197. return nil
  198. }
  199. if cached && rsc.isCacheValid(pod, rs) {
  200. return rs
  201. }
  202. }
  203. // if not cached or cached value is invalid, search all the rs to find the matching one, and update cache
  204. rss, err := rsc.rsStore.GetPodReplicaSets(pod)
  205. if err != nil {
  206. glog.V(4).Infof("No ReplicaSets found for pod %v, ReplicaSet controller will avoid syncing", pod.Name)
  207. return nil
  208. }
  209. // In theory, overlapping ReplicaSets is user error. This sorting will not prevent
  210. // oscillation of replicas in all cases, eg:
  211. // rs1 (older rs): [(k1=v1)], replicas=1 rs2: [(k2=v2)], replicas=2
  212. // pod: [(k1:v1), (k2:v2)] will wake both rs1 and rs2, and we will sync rs1.
  213. // pod: [(k2:v2)] will wake rs2 which creates a new replica.
  214. if len(rss) > 1 {
  215. // More than two items in this list indicates user error. If two replicasets
  216. // overlap, sort by creation timestamp, subsort by name, then pick
  217. // the first.
  218. glog.Errorf("user error! more than one ReplicaSet is selecting pods with labels: %+v", pod.Labels)
  219. sort.Sort(overlappingReplicaSets(rss))
  220. }
  221. // update lookup cache
  222. rsc.lookupCache.Update(pod, &rss[0])
  223. return &rss[0]
  224. }
  225. // callback when RS is updated
  226. func (rsc *ReplicaSetController) updateRS(old, cur interface{}) {
  227. oldRS := old.(*extensions.ReplicaSet)
  228. curRS := cur.(*extensions.ReplicaSet)
  229. // We should invalidate the whole lookup cache if a RS's selector has been updated.
  230. //
  231. // Imagine that you have two RSs:
  232. // * old RS1
  233. // * new RS2
  234. // You also have a pod that is attached to RS2 (because it doesn't match RS1 selector).
  235. // Now imagine that you are changing RS1 selector so that it is now matching that pod,
  236. // in such case we must invalidate the whole cache so that pod could be adopted by RS1
  237. //
  238. // This makes the lookup cache less helpful, but selector update does not happen often,
  239. // so it's not a big problem
  240. if !reflect.DeepEqual(oldRS.Spec.Selector, curRS.Spec.Selector) {
  241. rsc.lookupCache.InvalidateAll()
  242. }
  243. // You might imagine that we only really need to enqueue the
  244. // replica set when Spec changes, but it is safer to sync any
  245. // time this function is triggered. That way a full informer
  246. // resync can requeue any replica set that don't yet have pods
  247. // but whose last attempts at creating a pod have failed (since
  248. // we don't block on creation of pods) instead of those
  249. // replica sets stalling indefinitely. Enqueueing every time
  250. // does result in some spurious syncs (like when Status.Replica
  251. // is updated and the watch notification from it retriggers
  252. // this function), but in general extra resyncs shouldn't be
  253. // that bad as ReplicaSets that haven't met expectations yet won't
  254. // sync, and all the listing is done using local stores.
  255. if oldRS.Status.Replicas != curRS.Status.Replicas {
  256. glog.V(4).Infof("Observed updated replica count for ReplicaSet: %v, %d->%d", curRS.Name, oldRS.Status.Replicas, curRS.Status.Replicas)
  257. }
  258. rsc.enqueueReplicaSet(cur)
  259. }
  260. // isCacheValid check if the cache is valid
  261. func (rsc *ReplicaSetController) isCacheValid(pod *api.Pod, cachedRS *extensions.ReplicaSet) bool {
  262. _, exists, err := rsc.rsStore.Get(cachedRS)
  263. // rs has been deleted or updated, cache is invalid
  264. if err != nil || !exists || !isReplicaSetMatch(pod, cachedRS) {
  265. return false
  266. }
  267. return true
  268. }
  269. // isReplicaSetMatch take a Pod and ReplicaSet, return whether the Pod and ReplicaSet are matching
  270. // TODO(mqliang): This logic is a copy from GetPodReplicaSets(), remove the duplication
  271. func isReplicaSetMatch(pod *api.Pod, rs *extensions.ReplicaSet) bool {
  272. if rs.Namespace != pod.Namespace {
  273. return false
  274. }
  275. selector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector)
  276. if err != nil {
  277. err = fmt.Errorf("invalid selector: %v", err)
  278. return false
  279. }
  280. // If a ReplicaSet with a nil or empty selector creeps in, it should match nothing, not everything.
  281. if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
  282. return false
  283. }
  284. return true
  285. }
  286. // When a pod is created, enqueue the replica set that manages it and update it's expectations.
  287. func (rsc *ReplicaSetController) addPod(obj interface{}) {
  288. pod := obj.(*api.Pod)
  289. glog.V(4).Infof("Pod %s created: %#v.", pod.Name, pod)
  290. rs := rsc.getPodReplicaSet(pod)
  291. if rs == nil {
  292. return
  293. }
  294. rsKey, err := controller.KeyFunc(rs)
  295. if err != nil {
  296. glog.Errorf("Couldn't get key for replica set %#v: %v", rs, err)
  297. return
  298. }
  299. if pod.DeletionTimestamp != nil {
  300. // on a restart of the controller manager, it's possible a new pod shows up in a state that
  301. // is already pending deletion. Prevent the pod from being a creation observation.
  302. rsc.deletePod(pod)
  303. return
  304. }
  305. rsc.expectations.CreationObserved(rsKey)
  306. rsc.enqueueReplicaSet(rs)
  307. }
  308. // When a pod is updated, figure out what replica set/s manage it and wake them
  309. // up. If the labels of the pod have changed we need to awaken both the old
  310. // and new replica set. old and cur must be *api.Pod types.
  311. func (rsc *ReplicaSetController) updatePod(old, cur interface{}) {
  312. curPod := cur.(*api.Pod)
  313. oldPod := old.(*api.Pod)
  314. if curPod.ResourceVersion == oldPod.ResourceVersion {
  315. // Periodic resync will send update events for all known pods.
  316. // Two different versions of the same pod will always have different RVs.
  317. return
  318. }
  319. glog.V(4).Infof("Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta)
  320. labelChanged := !reflect.DeepEqual(curPod.Labels, oldPod.Labels)
  321. if curPod.DeletionTimestamp != nil {
  322. // when a pod is deleted gracefully it's deletion timestamp is first modified to reflect a grace period,
  323. // and after such time has passed, the kubelet actually deletes it from the store. We receive an update
  324. // for modification of the deletion timestamp and expect an rs to create more replicas asap, not wait
  325. // until the kubelet actually deletes the pod. This is different from the Phase of a pod changing, because
  326. // an rs never initiates a phase change, and so is never asleep waiting for the same.
  327. rsc.deletePod(curPod)
  328. if labelChanged {
  329. // we don't need to check the oldPod.DeletionTimestamp because DeletionTimestamp cannot be unset.
  330. rsc.deletePod(oldPod)
  331. }
  332. return
  333. }
  334. // Enqueue the oldRC before the curRC to give curRC a chance to adopt the oldPod.
  335. if labelChanged {
  336. // If the old and new ReplicaSet are the same, the first one that syncs
  337. // will set expectations preventing any damage from the second.
  338. if oldRS := rsc.getPodReplicaSet(oldPod); oldRS != nil {
  339. rsc.enqueueReplicaSet(oldRS)
  340. }
  341. }
  342. if curRS := rsc.getPodReplicaSet(curPod); curRS != nil {
  343. rsc.enqueueReplicaSet(curRS)
  344. }
  345. }
  346. // When a pod is deleted, enqueue the replica set that manages the pod and update its expectations.
  347. // obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item.
  348. func (rsc *ReplicaSetController) deletePod(obj interface{}) {
  349. pod, ok := obj.(*api.Pod)
  350. // When a delete is dropped, the relist will notice a pod in the store not
  351. // in the list, leading to the insertion of a tombstone object which contains
  352. // the deleted key/value. Note that this value might be stale. If the pod
  353. // changed labels the new ReplicaSet will not be woken up till the periodic resync.
  354. if !ok {
  355. tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
  356. if !ok {
  357. glog.Errorf("Couldn't get object from tombstone %+v", obj)
  358. return
  359. }
  360. pod, ok = tombstone.Obj.(*api.Pod)
  361. if !ok {
  362. glog.Errorf("Tombstone contained object that is not a pod %#v", obj)
  363. return
  364. }
  365. }
  366. glog.V(4).Infof("Pod %s/%s deleted through %v, timestamp %+v: %#v.", pod.Namespace, pod.Name, utilruntime.GetCaller(), pod.DeletionTimestamp, pod)
  367. if rs := rsc.getPodReplicaSet(pod); rs != nil {
  368. rsKey, err := controller.KeyFunc(rs)
  369. if err != nil {
  370. glog.Errorf("Couldn't get key for ReplicaSet %#v: %v", rs, err)
  371. return
  372. }
  373. rsc.expectations.DeletionObserved(rsKey, controller.PodKey(pod))
  374. rsc.enqueueReplicaSet(rs)
  375. }
  376. }
  377. // obj could be an *extensions.ReplicaSet, or a DeletionFinalStateUnknown marker item.
  378. func (rsc *ReplicaSetController) enqueueReplicaSet(obj interface{}) {
  379. key, err := controller.KeyFunc(obj)
  380. if err != nil {
  381. glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
  382. return
  383. }
  384. // TODO: Handle overlapping replica sets better. Either disallow them at admission time or
  385. // deterministically avoid syncing replica sets that fight over pods. Currently, we only
  386. // ensure that the same replica set is synced for a given pod. When we periodically relist
  387. // all replica sets there will still be some replica instability. One way to handle this is
  388. // by querying the store for all replica sets that this replica set overlaps, as well as all
  389. // replica sets that overlap this ReplicaSet, and sorting them.
  390. rsc.queue.Add(key)
  391. }
  392. // worker runs a worker thread that just dequeues items, processes them, and marks them done.
  393. // It enforces that the syncHandler is never invoked concurrently with the same key.
  394. func (rsc *ReplicaSetController) worker() {
  395. for {
  396. func() {
  397. key, quit := rsc.queue.Get()
  398. if quit {
  399. return
  400. }
  401. defer rsc.queue.Done(key)
  402. err := rsc.syncHandler(key.(string))
  403. if err != nil {
  404. glog.Errorf("Error syncing ReplicaSet: %v", err)
  405. }
  406. }()
  407. }
  408. }
  409. // manageReplicas checks and updates replicas for the given ReplicaSet.
  410. // Does NOT modify <filteredPods>.
  411. func (rsc *ReplicaSetController) manageReplicas(filteredPods []*api.Pod, rs *extensions.ReplicaSet) {
  412. diff := len(filteredPods) - int(rs.Spec.Replicas)
  413. rsKey, err := controller.KeyFunc(rs)
  414. if err != nil {
  415. glog.Errorf("Couldn't get key for ReplicaSet %#v: %v", rs, err)
  416. return
  417. }
  418. if diff < 0 {
  419. diff *= -1
  420. if diff > rsc.burstReplicas {
  421. diff = rsc.burstReplicas
  422. }
  423. // TODO: Track UIDs of creates just like deletes. The problem currently
  424. // is we'd need to wait on the result of a create to record the pod's
  425. // UID, which would require locking *across* the create, which will turn
  426. // into a performance bottleneck. We should generate a UID for the pod
  427. // beforehand and store it via ExpectCreations.
  428. rsc.expectations.ExpectCreations(rsKey, diff)
  429. var wg sync.WaitGroup
  430. wg.Add(diff)
  431. glog.V(2).Infof("Too few %q/%q replicas, need %d, creating %d", rs.Namespace, rs.Name, rs.Spec.Replicas, diff)
  432. for i := 0; i < diff; i++ {
  433. go func() {
  434. defer wg.Done()
  435. var err error
  436. if rsc.garbageCollectorEnabled {
  437. var trueVar = true
  438. controllerRef := &api.OwnerReference{
  439. APIVersion: getRSKind().GroupVersion().String(),
  440. Kind: getRSKind().Kind,
  441. Name: rs.Name,
  442. UID: rs.UID,
  443. Controller: &trueVar,
  444. }
  445. err = rsc.podControl.CreatePodsWithControllerRef(rs.Namespace, &rs.Spec.Template, rs, controllerRef)
  446. } else {
  447. err = rsc.podControl.CreatePods(rs.Namespace, &rs.Spec.Template, rs)
  448. }
  449. if err != nil {
  450. // Decrement the expected number of creates because the informer won't observe this pod
  451. glog.V(2).Infof("Failed creation, decrementing expectations for replica set %q/%q", rs.Namespace, rs.Name)
  452. rsc.expectations.CreationObserved(rsKey)
  453. utilruntime.HandleError(err)
  454. }
  455. }()
  456. }
  457. wg.Wait()
  458. } else if diff > 0 {
  459. if diff > rsc.burstReplicas {
  460. diff = rsc.burstReplicas
  461. }
  462. glog.V(2).Infof("Too many %q/%q replicas, need %d, deleting %d", rs.Namespace, rs.Name, rs.Spec.Replicas, diff)
  463. // No need to sort pods if we are about to delete all of them
  464. if rs.Spec.Replicas != 0 {
  465. // Sort the pods in the order such that not-ready < ready, unscheduled
  466. // < scheduled, and pending < running. This ensures that we delete pods
  467. // in the earlier stages whenever possible.
  468. sort.Sort(controller.ActivePods(filteredPods))
  469. }
  470. // Snapshot the UIDs (ns/name) of the pods we're expecting to see
  471. // deleted, so we know to record their expectations exactly once either
  472. // when we see it as an update of the deletion timestamp, or as a delete.
  473. // Note that if the labels on a pod/rs change in a way that the pod gets
  474. // orphaned, the rs will only wake up after the expectations have
  475. // expired even if other pods are deleted.
  476. deletedPodKeys := []string{}
  477. for i := 0; i < diff; i++ {
  478. deletedPodKeys = append(deletedPodKeys, controller.PodKey(filteredPods[i]))
  479. }
  480. rsc.expectations.ExpectDeletions(rsKey, deletedPodKeys)
  481. var wg sync.WaitGroup
  482. wg.Add(diff)
  483. for i := 0; i < diff; i++ {
  484. go func(ix int) {
  485. defer wg.Done()
  486. if err := rsc.podControl.DeletePod(rs.Namespace, filteredPods[ix].Name, rs); err != nil {
  487. // Decrement the expected number of deletes because the informer won't observe this deletion
  488. podKey := controller.PodKey(filteredPods[ix])
  489. glog.V(2).Infof("Failed to delete %v, decrementing expectations for controller %q/%q", podKey, rs.Namespace, rs.Name)
  490. rsc.expectations.DeletionObserved(rsKey, podKey)
  491. utilruntime.HandleError(err)
  492. }
  493. }(i)
  494. }
  495. wg.Wait()
  496. }
  497. }
  498. // syncReplicaSet will sync the ReplicaSet with the given key if it has had its expectations fulfilled,
  499. // meaning it did not expect to see any more of its pods created or deleted. This function is not meant to be
  500. // invoked concurrently with the same key.
  501. func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
  502. startTime := time.Now()
  503. defer func() {
  504. glog.V(4).Infof("Finished syncing replica set %q (%v)", key, time.Now().Sub(startTime))
  505. }()
  506. if !rsc.podStoreSynced() {
  507. // Sleep so we give the pod reflector goroutine a chance to run.
  508. time.Sleep(PodStoreSyncedPollPeriod)
  509. glog.Infof("Waiting for pods controller to sync, requeuing ReplicaSet %v", key)
  510. rsc.queue.Add(key)
  511. return nil
  512. }
  513. obj, exists, err := rsc.rsStore.Store.GetByKey(key)
  514. if !exists {
  515. glog.Infof("ReplicaSet has been deleted %v", key)
  516. rsc.expectations.DeleteExpectations(key)
  517. return nil
  518. }
  519. if err != nil {
  520. glog.Infof("Unable to retrieve ReplicaSet %v from store: %v", key, err)
  521. rsc.queue.Add(key)
  522. return err
  523. }
  524. rs := *obj.(*extensions.ReplicaSet)
  525. // Check the expectations of the ReplicaSet before counting active pods, otherwise a new pod can sneak
  526. // in and update the expectations after we've retrieved active pods from the store. If a new pod enters
  527. // the store after we've checked the expectation, the ReplicaSet sync is just deferred till the next
  528. // relist.
  529. rsKey, err := controller.KeyFunc(&rs)
  530. if err != nil {
  531. glog.Errorf("Couldn't get key for ReplicaSet %#v: %v", rs, err)
  532. return err
  533. }
  534. rsNeedsSync := rsc.expectations.SatisfiedExpectations(rsKey)
  535. selector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector)
  536. if err != nil {
  537. glog.Errorf("Error converting pod selector to selector: %v", err)
  538. return err
  539. }
  540. // NOTE: filteredPods are pointing to objects from cache - if you need to
  541. // modify them, you need to copy it first.
  542. // TODO: Do the List and Filter in a single pass, or use an index.
  543. var filteredPods []*api.Pod
  544. if rsc.garbageCollectorEnabled {
  545. // list all pods to include the pods that don't match the rs`s selector
  546. // anymore but has the stale controller ref.
  547. pods, err := rsc.podStore.Pods(rs.Namespace).List(labels.Everything())
  548. if err != nil {
  549. glog.Errorf("Error getting pods for rs %q: %v", key, err)
  550. rsc.queue.Add(key)
  551. return err
  552. }
  553. cm := controller.NewPodControllerRefManager(rsc.podControl, rs.ObjectMeta, selector, getRSKind())
  554. matchesAndControlled, matchesNeedsController, controlledDoesNotMatch := cm.Classify(pods)
  555. for _, pod := range matchesNeedsController {
  556. err := cm.AdoptPod(pod)
  557. // continue to next pod if adoption fails.
  558. if err != nil {
  559. // If the pod no longer exists, don't even log the error.
  560. if !errors.IsNotFound(err) {
  561. utilruntime.HandleError(err)
  562. }
  563. } else {
  564. matchesAndControlled = append(matchesAndControlled, pod)
  565. }
  566. }
  567. filteredPods = matchesAndControlled
  568. // remove the controllerRef for the pods that no longer have matching labels
  569. var errlist []error
  570. for _, pod := range controlledDoesNotMatch {
  571. err := cm.ReleasePod(pod)
  572. if err != nil {
  573. errlist = append(errlist, err)
  574. }
  575. }
  576. if len(errlist) != 0 {
  577. aggregate := utilerrors.NewAggregate(errlist)
  578. // push the RS into work queue again. We need to try to free the
  579. // pods again otherwise they will stuck with the stale
  580. // controllerRef.
  581. rsc.queue.Add(key)
  582. return aggregate
  583. }
  584. } else {
  585. pods, err := rsc.podStore.Pods(rs.Namespace).List(selector)
  586. if err != nil {
  587. glog.Errorf("Error getting pods for rs %q: %v", key, err)
  588. rsc.queue.Add(key)
  589. return err
  590. }
  591. filteredPods = controller.FilterActivePods(pods)
  592. }
  593. if rsNeedsSync && rs.DeletionTimestamp == nil {
  594. rsc.manageReplicas(filteredPods, &rs)
  595. }
  596. // Count the number of pods that have labels matching the labels of the pod
  597. // template of the replicaSet, the matching pods may have more labels than
  598. // are in the template. Because the label of podTemplateSpec is a superset
  599. // of the selector of the replicaset, so the possible matching pods must be
  600. // part of the filteredPods.
  601. fullyLabeledReplicasCount := 0
  602. readyReplicasCount := 0
  603. templateLabel := labels.Set(rs.Spec.Template.Labels).AsSelectorPreValidated()
  604. for _, pod := range filteredPods {
  605. if templateLabel.Matches(labels.Set(pod.Labels)) {
  606. fullyLabeledReplicasCount++
  607. }
  608. if api.IsPodReady(pod) {
  609. readyReplicasCount++
  610. }
  611. }
  612. // Always updates status as pods come up or die.
  613. if err := updateReplicaCount(rsc.kubeClient.Extensions().ReplicaSets(rs.Namespace), rs, len(filteredPods), fullyLabeledReplicasCount, readyReplicasCount); err != nil {
  614. // Multiple things could lead to this update failing. Requeuing the replica set ensures
  615. // we retry with some fairness.
  616. glog.V(2).Infof("Failed to update replica count for controller %v/%v; requeuing; error: %v", rs.Namespace, rs.Name, err)
  617. rsc.enqueueReplicaSet(&rs)
  618. }
  619. return nil
  620. }