Browse Source

Improve yamux benches

Refactor benches and add finer grained bench cases:

1. Minor cleanups (inconsistent var names between similar code, etc)
2. Report allocs and throughput
3. Run benches with fixed parameters with a wider array of inputs to better show scaling effects
4. Implement parallel version of primary bench test

Example output:
```
BenchmarkPing-16                    	  200000	      7719 ns/op	     458 B/op	       8 allocs/op
BenchmarkAccept-16                  	  100000	     21225 ns/op	    1412 B/op	      22 allocs/op
BenchmarkSendRecv32-16              	  200000	      6279 ns/op	   5.10 MB/s	      80 B/op	       2 allocs/op
BenchmarkSendRecv64-16              	  200000	      6380 ns/op	  10.03 MB/s	      80 B/op	       2 allocs/op
BenchmarkSendRecv128-16             	  200000	      6192 ns/op	  20.67 MB/s	      80 B/op	       2 allocs/op
BenchmarkSendRecv256-16             	  200000	      6384 ns/op	  40.10 MB/s	      80 B/op	       2 allocs/op
BenchmarkSendRecv512-16             	  200000	      6285 ns/op	  81.46 MB/s	      80 B/op	       2 allocs/op
BenchmarkSendRecv1024-16            	  200000	      6519 ns/op	 157.06 MB/s	      80 B/op	       2 allocs/op
BenchmarkSendRecv2048-16            	  200000	      6598 ns/op	 310.36 MB/s	      81 B/op	       2 allocs/op
BenchmarkSendRecv4096-16            	  200000	      7000 ns/op	 585.14 MB/s	      83 B/op	       2 allocs/op
BenchmarkSendRecvLarge-16           	      10	 147034782 ns/op	3651.32 MB/s	  359793 B/op	    6946 allocs/op
BenchmarkSendRecvParallel32-16      	  200000	      6215 ns/op	   5.15 MB/s	      81 B/op	       2 allocs/op
BenchmarkSendRecvParallel64-16      	  200000	      6759 ns/op	   9.47 MB/s	      81 B/op	       2 allocs/op
BenchmarkSendRecvParallel128-16     	  200000	      6632 ns/op	  19.30 MB/s	      81 B/op	       2 allocs/op
BenchmarkSendRecvParallel256-16     	  200000	      6804 ns/op	  37.62 MB/s	      81 B/op	       2 allocs/op
BenchmarkSendRecvParallel512-16     	  200000	      6818 ns/op	  75.09 MB/s	      81 B/op	       2 allocs/op
BenchmarkSendRecvParallel1024-16    	  200000	      6852 ns/op	 149.44 MB/s	      81 B/op	       2 allocs/op
BenchmarkSendRecvParallel2048-16    	  200000	      7732 ns/op	 264.85 MB/s	      81 B/op	       2 allocs/op
BenchmarkSendRecvParallel4096-16    	  200000	      7449 ns/op	 549.84 MB/s	      82 B/op	       2 allocs/op
```
New code passes with ``--race``. (there is a race condition in existing tests)
Tylor Arndt 5 năm trước cách đây
mục cha
commit
c50977066d
1 tập tin đã thay đổi với 175 bổ sung44 xóa
  1. 175 44
      bench_test.go

+ 175 - 44
bench_test.go

@@ -1,13 +1,20 @@
 package yamux
 
 import (
+	"io"
+	"io/ioutil"
 	"testing"
 )
 
 func BenchmarkPing(b *testing.B) {
 	client, server := testClientServer()
-	defer client.Close()
-	defer server.Close()
+	defer func() {
+		client.Close()
+		server.Close()
+	}()
+
+	b.ReportAllocs()
+	b.ResetTimer()
 
 	for i := 0; i < b.N; i++ {
 		rtt, err := client.Ping()
@@ -22,10 +29,18 @@ func BenchmarkPing(b *testing.B) {
 
 func BenchmarkAccept(b *testing.B) {
 	client, server := testClientServer()
-	defer client.Close()
-	defer server.Close()
+	defer func() {
+		client.Close()
+		server.Close()
+	}()
+
+	doneCh := make(chan struct{})
+	b.ReportAllocs()
+	b.ResetTimer()
 
 	go func() {
+		defer close(doneCh)
+
 		for i := 0; i < b.N; i++ {
 			stream, err := server.AcceptStream()
 			if err != nil {
@@ -42,71 +57,100 @@ func BenchmarkAccept(b *testing.B) {
 		}
 		stream.Close()
 	}
+	<-doneCh
 }
 
-func BenchmarkSendRecv(b *testing.B) {
-	client, server := testClientServer()
-	defer client.Close()
-	defer server.Close()
+func BenchmarkSendRecv32(b *testing.B) {
+	const payloadSize = 32
+	benchmarkSendRecv(b, payloadSize, payloadSize)
+}
 
-	sendBuf := make([]byte, 512)
-	recvBuf := make([]byte, 512)
+func BenchmarkSendRecv64(b *testing.B) {
+	const payloadSize = 64
+	benchmarkSendRecv(b, payloadSize, payloadSize)
+}
 
-	doneCh := make(chan struct{})
-	go func() {
-		stream, err := server.AcceptStream()
-		if err != nil {
-			return
-		}
-		defer stream.Close()
-		for i := 0; i < b.N; i++ {
-			if _, err := stream.Read(recvBuf); err != nil {
-				b.Fatalf("err: %v", err)
-			}
-		}
-		close(doneCh)
-	}()
+func BenchmarkSendRecv128(b *testing.B) {
+	const payloadSize = 128
+	benchmarkSendRecv(b, payloadSize, payloadSize)
+}
 
-	stream, err := client.Open()
-	if err != nil {
-		b.Fatalf("err: %v", err)
-	}
-	defer stream.Close()
-	for i := 0; i < b.N; i++ {
-		if _, err := stream.Write(sendBuf); err != nil {
-			b.Fatalf("err: %v", err)
-		}
-	}
-	<-doneCh
+func BenchmarkSendRecv256(b *testing.B) {
+	const payloadSize = 256
+	benchmarkSendRecv(b, payloadSize, payloadSize)
+}
+
+func BenchmarkSendRecv512(b *testing.B) {
+	const payloadSize = 512
+	benchmarkSendRecv(b, payloadSize, payloadSize)
+}
+
+func BenchmarkSendRecv1024(b *testing.B) {
+	const payloadSize = 1024
+	benchmarkSendRecv(b, payloadSize, payloadSize)
+}
+
+func BenchmarkSendRecv2048(b *testing.B) {
+	const payloadSize = 2048
+	benchmarkSendRecv(b, payloadSize, payloadSize)
+}
+
+func BenchmarkSendRecv4096(b *testing.B) {
+	const payloadSize = 4096
+	benchmarkSendRecv(b, payloadSize, payloadSize)
 }
 
 func BenchmarkSendRecvLarge(b *testing.B) {
+	const sendSize = 512 * 1024 * 1024 //512 MB
+	const recvSize = 4 * 1024          //4 KB
+	benchmarkSendRecv(b, sendSize, recvSize)
+}
+
+func benchmarkSendRecv(b *testing.B, sendSize, recvSize int) {
 	client, server := testClientServer()
-	defer client.Close()
-	defer server.Close()
-	const sendSize = 512 * 1024 * 1024
-	const recvSize = 4 * 1024
+	defer func() {
+		client.Close()
+		server.Close()
+	}()
 
 	sendBuf := make([]byte, sendSize)
 	recvBuf := make([]byte, recvSize)
+	doneCh := make(chan struct{})
 
+	b.SetBytes(int64(sendSize))
+	b.ReportAllocs()
 	b.ResetTimer()
-	recvDone := make(chan struct{})
 
 	go func() {
+		defer close(doneCh)
+
 		stream, err := server.AcceptStream()
 		if err != nil {
 			return
 		}
 		defer stream.Close()
-		for i := 0; i < b.N; i++ {
-			for j := 0; j < sendSize/recvSize; j++ {
+
+		switch {
+		case sendSize == recvSize:
+			for i := 0; i < b.N; i++ {
 				if _, err := stream.Read(recvBuf); err != nil {
 					b.Fatalf("err: %v", err)
 				}
 			}
+
+		case recvSize > sendSize:
+			b.Fatalf("bad test case; recvSize was: %d and sendSize was: %d, but recvSize must be <= sendSize!", recvSize, sendSize)
+
+		default:
+			chunks := sendSize / recvSize
+			for i := 0; i < b.N; i++ {
+				for j := 0; j < chunks; j++ {
+					if _, err := stream.Read(recvBuf); err != nil {
+						b.Fatalf("err: %v", err)
+					}
+				}
+			}
 		}
-		close(recvDone)
 	}()
 
 	stream, err := client.Open()
@@ -114,10 +158,97 @@ func BenchmarkSendRecvLarge(b *testing.B) {
 		b.Fatalf("err: %v", err)
 	}
 	defer stream.Close()
+
 	for i := 0; i < b.N; i++ {
 		if _, err := stream.Write(sendBuf); err != nil {
 			b.Fatalf("err: %v", err)
 		}
 	}
-	<-recvDone
+	<-doneCh
+}
+
+func BenchmarkSendRecvParallel32(b *testing.B) {
+	const payloadSize = 32
+	benchmarkSendRecvParallel(b, payloadSize)
+}
+
+func BenchmarkSendRecvParallel64(b *testing.B) {
+	const payloadSize = 64
+	benchmarkSendRecvParallel(b, payloadSize)
+}
+
+func BenchmarkSendRecvParallel128(b *testing.B) {
+	const payloadSize = 128
+	benchmarkSendRecvParallel(b, payloadSize)
+}
+
+func BenchmarkSendRecvParallel256(b *testing.B) {
+	const payloadSize = 256
+	benchmarkSendRecvParallel(b, payloadSize)
+}
+
+func BenchmarkSendRecvParallel512(b *testing.B) {
+	const payloadSize = 512
+	benchmarkSendRecvParallel(b, payloadSize)
+}
+
+func BenchmarkSendRecvParallel1024(b *testing.B) {
+	const payloadSize = 1024
+	benchmarkSendRecvParallel(b, payloadSize)
+}
+
+func BenchmarkSendRecvParallel2048(b *testing.B) {
+	const payloadSize = 2048
+	benchmarkSendRecvParallel(b, payloadSize)
+}
+
+func BenchmarkSendRecvParallel4096(b *testing.B) {
+	const payloadSize = 4096
+	benchmarkSendRecvParallel(b, payloadSize)
+}
+
+func benchmarkSendRecvParallel(b *testing.B, sendSize int) {
+	client, server := testClientServer()
+	defer func() {
+		client.Close()
+		server.Close()
+	}()
+
+	sendBuf := make([]byte, sendSize)
+	discarder := ioutil.Discard.(io.ReaderFrom)
+	b.SetBytes(int64(sendSize))
+	b.ReportAllocs()
+	b.ResetTimer()
+
+	b.RunParallel(func(pb *testing.PB) {
+		doneCh := make(chan struct{})
+
+		go func() {
+			defer close(doneCh)
+
+			stream, err := server.AcceptStream()
+			if err != nil {
+				return
+			}
+			defer stream.Close()
+
+			if _, err := discarder.ReadFrom(stream); err != nil {
+				b.Fatalf("err: %v", err)
+			}
+		}()
+
+		stream, err := client.Open()
+		if err != nil {
+			b.Fatalf("err: %v", err)
+		}
+
+		for pb.Next() {
+			if _, err := stream.Write(sendBuf); err != nil {
+				b.Fatalf("err: %v", err)
+			}
+		}
+
+		stream.Close()
+		<-doneCh
+	})
 }