service_latency.go 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343
  1. /*
  2. Copyright 2015 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package e2e
  14. import (
  15. "fmt"
  16. "sort"
  17. "strings"
  18. "time"
  19. "k8s.io/kubernetes/pkg/api"
  20. "k8s.io/kubernetes/pkg/client/cache"
  21. controllerframework "k8s.io/kubernetes/pkg/controller/framework"
  22. "k8s.io/kubernetes/pkg/runtime"
  23. "k8s.io/kubernetes/pkg/util/flowcontrol"
  24. "k8s.io/kubernetes/pkg/util/sets"
  25. "k8s.io/kubernetes/pkg/watch"
  26. "k8s.io/kubernetes/test/e2e/framework"
  27. . "github.com/onsi/ginkgo"
  28. )
  29. type durations []time.Duration
  30. func (d durations) Len() int { return len(d) }
  31. func (d durations) Less(i, j int) bool { return d[i] < d[j] }
  32. func (d durations) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
  33. var _ = framework.KubeDescribe("Service endpoints latency", func() {
  34. f := framework.NewDefaultFramework("svc-latency")
  35. It("should not be very high [Conformance]", func() {
  36. const (
  37. // These are very generous criteria. Ideally we will
  38. // get this much lower in the future. See issue
  39. // #10436.
  40. limitMedian = time.Second * 20
  41. limitTail = time.Second * 50
  42. // Numbers chosen to make the test complete in a short amount
  43. // of time. This sample size is not actually large enough to
  44. // reliably measure tails (it may give false positives, but not
  45. // false negatives), but it should catch low hanging fruit.
  46. //
  47. // Note that these are fixed and do not depend on the
  48. // size of the cluster. Setting parallelTrials larger
  49. // distorts the measurements. Perhaps this wouldn't be
  50. // true on HA clusters.
  51. totalTrials = 200
  52. parallelTrials = 15
  53. minSampleSize = 100
  54. )
  55. // Turn off rate limiting--it interferes with our measurements.
  56. oldThrottle := f.Client.RESTClient.Throttle
  57. f.Client.RESTClient.Throttle = flowcontrol.NewFakeAlwaysRateLimiter()
  58. defer func() { f.Client.RESTClient.Throttle = oldThrottle }()
  59. failing := sets.NewString()
  60. d, err := runServiceLatencies(f, parallelTrials, totalTrials)
  61. if err != nil {
  62. failing.Insert(fmt.Sprintf("Not all RC/pod/service trials succeeded: %v", err))
  63. }
  64. dSorted := durations(d)
  65. sort.Sort(dSorted)
  66. n := len(dSorted)
  67. if n < minSampleSize {
  68. failing.Insert(fmt.Sprintf("Did not get a good sample size: %v", dSorted))
  69. }
  70. if n < 2 {
  71. failing.Insert("Less than two runs succeeded; aborting.")
  72. Fail(strings.Join(failing.List(), "\n"))
  73. }
  74. percentile := func(p int) time.Duration {
  75. est := n * p / 100
  76. if est >= n {
  77. return dSorted[n-1]
  78. }
  79. return dSorted[est]
  80. }
  81. framework.Logf("Latencies: %v", dSorted)
  82. p50 := percentile(50)
  83. p90 := percentile(90)
  84. p99 := percentile(99)
  85. framework.Logf("50 %%ile: %v", p50)
  86. framework.Logf("90 %%ile: %v", p90)
  87. framework.Logf("99 %%ile: %v", p99)
  88. framework.Logf("Total sample count: %v", len(dSorted))
  89. if p50 > limitMedian {
  90. failing.Insert("Median latency should be less than " + limitMedian.String())
  91. }
  92. if p99 > limitTail {
  93. failing.Insert("Tail (99 percentile) latency should be less than " + limitTail.String())
  94. }
  95. if failing.Len() > 0 {
  96. errList := strings.Join(failing.List(), "\n")
  97. helpfulInfo := fmt.Sprintf("\n50, 90, 99 percentiles: %v %v %v", p50, p90, p99)
  98. Fail(errList + helpfulInfo)
  99. }
  100. })
  101. })
  102. func runServiceLatencies(f *framework.Framework, inParallel, total int) (output []time.Duration, err error) {
  103. cfg := framework.RCConfig{
  104. Client: f.Client,
  105. Image: framework.GetPauseImageName(f.Client),
  106. Name: "svc-latency-rc",
  107. Namespace: f.Namespace.Name,
  108. Replicas: 1,
  109. PollInterval: time.Second,
  110. }
  111. if err := framework.RunRC(cfg); err != nil {
  112. return nil, err
  113. }
  114. defer framework.DeleteRCAndPods(f.Client, f.Namespace.Name, cfg.Name)
  115. // Run a single watcher, to reduce the number of API calls we have to
  116. // make; this is to minimize the timing error. It's how kube-proxy
  117. // consumes the endpoints data, so it seems like the right thing to
  118. // test.
  119. endpointQueries := newQuerier()
  120. startEndpointWatcher(f, endpointQueries)
  121. defer close(endpointQueries.stop)
  122. // run one test and throw it away-- this is to make sure that the pod's
  123. // ready status has propagated.
  124. singleServiceLatency(f, cfg.Name, endpointQueries)
  125. // These channels are never closed, and each attempt sends on exactly
  126. // one of these channels, so the sum of the things sent over them will
  127. // be exactly total.
  128. errs := make(chan error, total)
  129. durations := make(chan time.Duration, total)
  130. blocker := make(chan struct{}, inParallel)
  131. for i := 0; i < total; i++ {
  132. go func() {
  133. defer GinkgoRecover()
  134. blocker <- struct{}{}
  135. defer func() { <-blocker }()
  136. if d, err := singleServiceLatency(f, cfg.Name, endpointQueries); err != nil {
  137. errs <- err
  138. } else {
  139. durations <- d
  140. }
  141. }()
  142. }
  143. errCount := 0
  144. for i := 0; i < total; i++ {
  145. select {
  146. case e := <-errs:
  147. framework.Logf("Got error: %v", e)
  148. errCount += 1
  149. case d := <-durations:
  150. output = append(output, d)
  151. }
  152. }
  153. if errCount != 0 {
  154. return output, fmt.Errorf("got %v errors", errCount)
  155. }
  156. return output, nil
  157. }
  158. type endpointQuery struct {
  159. endpointsName string
  160. endpoints *api.Endpoints
  161. result chan<- struct{}
  162. }
  163. type endpointQueries struct {
  164. requests map[string]*endpointQuery
  165. stop chan struct{}
  166. requestChan chan *endpointQuery
  167. seenChan chan *api.Endpoints
  168. }
  169. func newQuerier() *endpointQueries {
  170. eq := &endpointQueries{
  171. requests: map[string]*endpointQuery{},
  172. stop: make(chan struct{}, 100),
  173. requestChan: make(chan *endpointQuery),
  174. seenChan: make(chan *api.Endpoints, 100),
  175. }
  176. go eq.join()
  177. return eq
  178. }
  179. // join merges the incoming streams of requests and added endpoints. It has
  180. // nice properties like:
  181. // * remembering an endpoint if it happens to arrive before it is requested.
  182. // * closing all outstanding requests (returning nil) if it is stopped.
  183. func (eq *endpointQueries) join() {
  184. defer func() {
  185. // Terminate all pending requests, so that no goroutine will
  186. // block indefinitely.
  187. for _, req := range eq.requests {
  188. if req.result != nil {
  189. close(req.result)
  190. }
  191. }
  192. }()
  193. for {
  194. select {
  195. case <-eq.stop:
  196. return
  197. case req := <-eq.requestChan:
  198. if cur, ok := eq.requests[req.endpointsName]; ok && cur.endpoints != nil {
  199. // We've already gotten the result, so we can
  200. // immediately satisfy this request.
  201. delete(eq.requests, req.endpointsName)
  202. req.endpoints = cur.endpoints
  203. close(req.result)
  204. } else {
  205. // Save this request.
  206. eq.requests[req.endpointsName] = req
  207. }
  208. case got := <-eq.seenChan:
  209. if req, ok := eq.requests[got.Name]; ok {
  210. if req.result != nil {
  211. // Satisfy a request.
  212. delete(eq.requests, got.Name)
  213. req.endpoints = got
  214. close(req.result)
  215. } else {
  216. // We've already recorded a result, but
  217. // haven't gotten the request yet. Only
  218. // keep the first result.
  219. }
  220. } else {
  221. // We haven't gotten the corresponding request
  222. // yet, save this result.
  223. eq.requests[got.Name] = &endpointQuery{
  224. endpoints: got,
  225. }
  226. }
  227. }
  228. }
  229. }
  230. // request blocks until the requested endpoint is seen.
  231. func (eq *endpointQueries) request(endpointsName string) *api.Endpoints {
  232. result := make(chan struct{})
  233. req := &endpointQuery{
  234. endpointsName: endpointsName,
  235. result: result,
  236. }
  237. eq.requestChan <- req
  238. <-result
  239. return req.endpoints
  240. }
  241. // marks e as added; does not block.
  242. func (eq *endpointQueries) added(e *api.Endpoints) {
  243. eq.seenChan <- e
  244. }
  245. // blocks until it has finished syncing.
  246. func startEndpointWatcher(f *framework.Framework, q *endpointQueries) {
  247. _, controller := controllerframework.NewInformer(
  248. &cache.ListWatch{
  249. ListFunc: func(options api.ListOptions) (runtime.Object, error) {
  250. return f.Client.Endpoints(f.Namespace.Name).List(options)
  251. },
  252. WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
  253. return f.Client.Endpoints(f.Namespace.Name).Watch(options)
  254. },
  255. },
  256. &api.Endpoints{},
  257. 0,
  258. controllerframework.ResourceEventHandlerFuncs{
  259. AddFunc: func(obj interface{}) {
  260. if e, ok := obj.(*api.Endpoints); ok {
  261. if len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0 {
  262. q.added(e)
  263. }
  264. }
  265. },
  266. UpdateFunc: func(old, cur interface{}) {
  267. if e, ok := cur.(*api.Endpoints); ok {
  268. if len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0 {
  269. q.added(e)
  270. }
  271. }
  272. },
  273. },
  274. )
  275. go controller.Run(q.stop)
  276. // Wait for the controller to sync, so that we don't count any warm-up time.
  277. for !controller.HasSynced() {
  278. time.Sleep(100 * time.Millisecond)
  279. }
  280. }
  281. func singleServiceLatency(f *framework.Framework, name string, q *endpointQueries) (time.Duration, error) {
  282. // Make a service that points to that pod.
  283. svc := &api.Service{
  284. ObjectMeta: api.ObjectMeta{
  285. GenerateName: "latency-svc-",
  286. },
  287. Spec: api.ServiceSpec{
  288. Ports: []api.ServicePort{{Protocol: api.ProtocolTCP, Port: 80}},
  289. Selector: map[string]string{"name": name},
  290. Type: api.ServiceTypeClusterIP,
  291. SessionAffinity: api.ServiceAffinityNone,
  292. },
  293. }
  294. startTime := time.Now()
  295. gotSvc, err := f.Client.Services(f.Namespace.Name).Create(svc)
  296. if err != nil {
  297. return 0, err
  298. }
  299. framework.Logf("Created: %v", gotSvc.Name)
  300. defer f.Client.Services(gotSvc.Namespace).Delete(gotSvc.Name)
  301. if e := q.request(gotSvc.Name); e == nil {
  302. return 0, fmt.Errorf("Never got a result for endpoint %v", gotSvc.Name)
  303. }
  304. stopTime := time.Now()
  305. d := stopTime.Sub(startTime)
  306. framework.Logf("Got endpoints: %v [%v]", gotSvc.Name, d)
  307. return d, nil
  308. }