123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240 |
- /*
- Copyright 2015 The Kubernetes Authors.
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- */
- package e2e
- import (
- "fmt"
- "math"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- "k8s.io/kubernetes/pkg/api"
- "k8s.io/kubernetes/pkg/api/unversioned"
- client "k8s.io/kubernetes/pkg/client/unversioned"
- "k8s.io/kubernetes/pkg/labels"
- "k8s.io/kubernetes/pkg/util/intstr"
- "k8s.io/kubernetes/pkg/util/sets"
- "k8s.io/kubernetes/pkg/util/uuid"
- "k8s.io/kubernetes/test/e2e/framework"
- )
- var _ = framework.KubeDescribe("Multi-AZ Clusters", func() {
- f := framework.NewDefaultFramework("multi-az")
- var zoneCount int
- var err error
- image := "gcr.io/google_containers/serve_hostname:v1.4"
- BeforeEach(func() {
- framework.SkipUnlessProviderIs("gce", "gke", "aws")
- if zoneCount <= 0 {
- zoneCount, err = getZoneCount(f.Client)
- Expect(err).NotTo(HaveOccurred())
- }
- By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount))
- framework.SkipUnlessAtLeast(zoneCount, 2, "Zone count is %d, only run for multi-zone clusters, skipping test")
- // TODO: SkipUnlessDefaultScheduler() // Non-default schedulers might not spread
- })
- It("should spread the pods of a service across zones", func() {
- SpreadServiceOrFail(f, (2*zoneCount)+1, image)
- })
- It("should spread the pods of a replication controller across zones", func() {
- SpreadRCOrFail(f, int32((2*zoneCount)+1), image)
- })
- })
- // Check that the pods comprising a service get spread evenly across available zones
- func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) {
- // First create the service
- serviceName := "test-service"
- serviceSpec := &api.Service{
- ObjectMeta: api.ObjectMeta{
- Name: serviceName,
- Namespace: f.Namespace.Name,
- },
- Spec: api.ServiceSpec{
- Selector: map[string]string{
- "service": serviceName,
- },
- Ports: []api.ServicePort{{
- Port: 80,
- TargetPort: intstr.FromInt(80),
- }},
- },
- }
- _, err := f.Client.Services(f.Namespace.Name).Create(serviceSpec)
- Expect(err).NotTo(HaveOccurred())
- // Now create some pods behind the service
- podSpec := &api.Pod{
- ObjectMeta: api.ObjectMeta{
- Name: serviceName,
- Labels: map[string]string{"service": serviceName},
- },
- Spec: api.PodSpec{
- Containers: []api.Container{
- {
- Name: "test",
- Image: framework.GetPauseImageName(f.Client),
- },
- },
- },
- }
- // Caution: StartPods requires at least one pod to replicate.
- // Based on the callers, replicas is always positive number: zoneCount >= 0 implies (2*zoneCount)+1 > 0.
- // Thus, no need to test for it. Once the precondition changes to zero number of replicas,
- // test for replicaCount > 0. Otherwise, StartPods panics.
- framework.StartPods(f.Client, replicaCount, f.Namespace.Name, serviceName, *podSpec, false)
- // Wait for all of them to be scheduled
- selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName}))
- pods, err := framework.WaitForPodsWithLabelScheduled(f.Client, f.Namespace.Name, selector)
- Expect(err).NotTo(HaveOccurred())
- // Now make sure they're spread across zones
- zoneNames, err := getZoneNames(f.Client)
- Expect(err).NotTo(HaveOccurred())
- Expect(checkZoneSpreading(f.Client, pods, zoneNames)).To(Equal(true))
- }
- // Find the name of the zone in which a Node is running
- func getZoneNameForNode(node api.Node) (string, error) {
- for key, value := range node.Labels {
- if key == unversioned.LabelZoneFailureDomain {
- return value, nil
- }
- }
- return "", fmt.Errorf("Zone name for node %s not found. No label with key %s",
- node.Name, unversioned.LabelZoneFailureDomain)
- }
- // Find the names of all zones in which we have nodes in this cluster.
- func getZoneNames(c *client.Client) ([]string, error) {
- zoneNames := sets.NewString()
- nodes, err := c.Nodes().List(api.ListOptions{})
- if err != nil {
- return nil, err
- }
- for _, node := range nodes.Items {
- zoneName, err := getZoneNameForNode(node)
- Expect(err).NotTo(HaveOccurred())
- zoneNames.Insert(zoneName)
- }
- return zoneNames.List(), nil
- }
- // Return the number of zones in which we have nodes in this cluster.
- func getZoneCount(c *client.Client) (int, error) {
- zoneNames, err := getZoneNames(c)
- if err != nil {
- return -1, err
- }
- return len(zoneNames), nil
- }
- // Find the name of the zone in which the pod is scheduled
- func getZoneNameForPod(c *client.Client, pod api.Pod) (string, error) {
- By(fmt.Sprintf("Getting zone name for pod %s, on node %s", pod.Name, pod.Spec.NodeName))
- node, err := c.Nodes().Get(pod.Spec.NodeName)
- Expect(err).NotTo(HaveOccurred())
- return getZoneNameForNode(*node)
- }
- // Determine whether a set of pods are approximately evenly spread
- // across a given set of zones
- func checkZoneSpreading(c *client.Client, pods *api.PodList, zoneNames []string) (bool, error) {
- podsPerZone := make(map[string]int)
- for _, zoneName := range zoneNames {
- podsPerZone[zoneName] = 0
- }
- for _, pod := range pods.Items {
- if pod.DeletionTimestamp != nil {
- continue
- }
- zoneName, err := getZoneNameForPod(c, pod)
- Expect(err).NotTo(HaveOccurred())
- podsPerZone[zoneName] = podsPerZone[zoneName] + 1
- }
- minPodsPerZone := math.MaxInt32
- maxPodsPerZone := 0
- for _, podCount := range podsPerZone {
- if podCount < minPodsPerZone {
- minPodsPerZone = podCount
- }
- if podCount > maxPodsPerZone {
- maxPodsPerZone = podCount
- }
- }
- Expect(minPodsPerZone).To(BeNumerically("~", maxPodsPerZone, 1),
- "Pods were not evenly spread across zones. %d in one zone and %d in another zone",
- minPodsPerZone, maxPodsPerZone)
- return true, nil
- }
- // Check that the pods comprising a replication controller get spread evenly across available zones
- func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
- name := "ubelite-spread-rc-" + string(uuid.NewUUID())
- By(fmt.Sprintf("Creating replication controller %s", name))
- controller, err := f.Client.ReplicationControllers(f.Namespace.Name).Create(&api.ReplicationController{
- ObjectMeta: api.ObjectMeta{
- Namespace: f.Namespace.Name,
- Name: name,
- },
- Spec: api.ReplicationControllerSpec{
- Replicas: replicaCount,
- Selector: map[string]string{
- "name": name,
- },
- Template: &api.PodTemplateSpec{
- ObjectMeta: api.ObjectMeta{
- Labels: map[string]string{"name": name},
- },
- Spec: api.PodSpec{
- Containers: []api.Container{
- {
- Name: name,
- Image: image,
- Ports: []api.ContainerPort{{ContainerPort: 9376}},
- },
- },
- },
- },
- },
- })
- Expect(err).NotTo(HaveOccurred())
- // Cleanup the replication controller when we are done.
- defer func() {
- // Resize the replication controller to zero to get rid of pods.
- if err := framework.DeleteRCAndPods(f.Client, f.Namespace.Name, controller.Name); err != nil {
- framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
- }
- }()
- // List the pods, making sure we observe all the replicas.
- selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
- pods, err := framework.PodsCreated(f.Client, f.Namespace.Name, name, replicaCount)
- Expect(err).NotTo(HaveOccurred())
- // Wait for all of them to be scheduled
- By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled. Selector: %v", replicaCount, name, selector))
- pods, err = framework.WaitForPodsWithLabelScheduled(f.Client, f.Namespace.Name, selector)
- Expect(err).NotTo(HaveOccurred())
- // Now make sure they're spread across zones
- zoneNames, err := getZoneNames(f.Client)
- Expect(err).NotTo(HaveOccurred())
- Expect(checkZoneSpreading(f.Client, pods, zoneNames)).To(Equal(true))
- }
|