crypto/tls: adjust dynamic record sizes to grow arithmetically

The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).

Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.

Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.

I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.

Comparing MaxPacket vs DynamicPacket benchmarks:

name              maxpkt time/op  dyn. time/op delta
Throughput/1MB-8    5.07ms ± 7%   5.21ms ± 7%  +2.73%  (p=0.023 n=16+16)
Throughput/2MB-8   15.7ms ±201%    8.4ms ± 5%    ~     (p=0.604 n=20+16)
Throughput/4MB-8    14.3ms ± 1%   14.5ms ± 1%  +1.53%  (p=0.000 n=16+16)
Throughput/8MB-8    26.6ms ± 1%   26.8ms ± 1%  +0.47%  (p=0.003 n=19+18)
Throughput/16MB-8   51.0ms ± 1%   51.3ms ± 1%  +0.47%  (p=0.000 n=20+20)
Throughput/32MB-8    100ms ± 1%    100ms ± 1%  +0.24%  (p=0.033 n=20+20)
Throughput/64MB-8    197ms ± 0%    198ms ± 0%  +0.56%   (p=0.000 n=18+7)

The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.

Fixes #15713.

Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
This commit is contained in:
Russ Cox 2016-05-27 09:50:06 -04:00
parent 6781899784
commit 81aa612742
3 changed files with 174 additions and 14 deletions

24
conn.go
View File

@ -76,10 +76,11 @@ type Conn struct {
input *block // application data waiting to be read input *block // application data waiting to be read
hand bytes.Buffer // handshake data waiting to be read hand bytes.Buffer // handshake data waiting to be read
// bytesSent counts the number of bytes of application data that have // bytesSent counts the bytes of application data sent.
// been sent. // packetsSent counts packets.
bytesSent int64 bytesSent int64
packetsSent int64
// activeCall is an atomic int32; the low bit is whether Close has // activeCall is an atomic int32; the low bit is whether Close has
// been called. the rest of the bits are the number of goroutines // been called. the rest of the bits are the number of goroutines
// in Conn.Write. // in Conn.Write.
@ -732,7 +733,7 @@ const (
// recordSizeBoostThreshold is the number of bytes of application data // recordSizeBoostThreshold is the number of bytes of application data
// sent after which the TLS record size will be increased to the // sent after which the TLS record size will be increased to the
// maximum. // maximum.
recordSizeBoostThreshold = 1 * 1024 * 1024 recordSizeBoostThreshold = 128 * 1024
) )
// maxPayloadSizeForWrite returns the maximum TLS payload size to use for the // maxPayloadSizeForWrite returns the maximum TLS payload size to use for the
@ -787,8 +788,19 @@ func (c *Conn) maxPayloadSizeForWrite(typ recordType, explicitIVLen int) int {
panic("unknown cipher type") panic("unknown cipher type")
} }
} }
return payloadBytes // Allow packet growth in arithmetic progression up to max.
pkt := c.packetsSent
c.packetsSent++
if pkt > 1000 {
return maxPlaintext // avoid overflow in multiply below
}
n := payloadBytes * int(pkt + 1)
if n > maxPlaintext {
n = maxPlaintext
}
return n
} }
// writeRecordLocked writes a TLS record with the given type and payload to the // writeRecordLocked writes a TLS record with the given type and payload to the

View File

@ -208,13 +208,10 @@ func runDynamicRecordSizingTest(t *testing.T, config *Config) {
seenLargeRecord := false seenLargeRecord := false
for i, size := range recordSizes { for i, size := range recordSizes {
if !seenLargeRecord { if !seenLargeRecord {
if size > tcpMSSEstimate { if size > (i+1)*tcpMSSEstimate {
if i < 100 { t.Fatalf("Record #%d has size %d, which is too large too soon", i, size)
t.Fatalf("Record #%d has size %d, which is too large too soon", i, size) }
} if size >= maxPlaintext {
if size <= maxPlaintext {
t.Fatalf("Record #%d has odd size %d", i, size)
}
seenLargeRecord = true seenLargeRecord = true
} }
} else if size <= maxPlaintext { } else if size <= maxPlaintext {

View File

@ -10,6 +10,7 @@ import (
"fmt" "fmt"
"internal/testenv" "internal/testenv"
"io" "io"
"math"
"net" "net"
"strings" "strings"
"testing" "testing"
@ -146,7 +147,7 @@ func TestX509MixedKeyPair(t *testing.T) {
} }
} }
func newLocalListener(t *testing.T) net.Listener { func newLocalListener(t testing.TB) net.Listener {
ln, err := net.Listen("tcp", "127.0.0.1:0") ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil { if err != nil {
ln, err = net.Listen("tcp6", "[::1]:0") ln, err = net.Listen("tcp6", "[::1]:0")
@ -473,3 +474,153 @@ func (w *changeImplConn) Close() error {
} }
return w.Conn.Close() return w.Conn.Close()
} }
func throughput(b *testing.B, totalBytes int64, dynamicRecordSizingDisabled bool) {
ln := newLocalListener(b)
defer ln.Close()
var serr error
go func() {
for i := 0; i < b.N; i++ {
sconn, err := ln.Accept()
if err != nil {
serr = err
return
}
serverConfig := *testConfig
serverConfig.DynamicRecordSizingDisabled = dynamicRecordSizingDisabled
srv := Server(sconn, &serverConfig)
if err := srv.Handshake(); err != nil {
serr = fmt.Errorf("handshake: %v", err)
return
}
io.Copy(srv, srv)
}
}()
b.SetBytes(totalBytes)
clientConfig := *testConfig
clientConfig.DynamicRecordSizingDisabled = dynamicRecordSizingDisabled
buf := make([]byte, 1<<16)
chunks := int(math.Ceil(float64(totalBytes) / float64(len(buf))))
for i := 0; i < b.N; i++ {
conn, err := Dial("tcp", ln.Addr().String(), &clientConfig)
if err != nil {
b.Fatal(err)
}
for j := 0; j < chunks; j++ {
_, err := conn.Write(buf)
if err != nil {
b.Fatal(err)
}
_, err = io.ReadFull(conn, buf)
if err != nil {
b.Fatal(err)
}
}
conn.Close()
}
}
func BenchmarkThroughput(b *testing.B) {
for _, mode := range []string{"Max", "Dynamic"} {
for size := 1; size <= 64; size<<=1{
name := fmt.Sprintf("%sPacket/%dMB", mode, size)
b.Run(name, func(b *testing.B) {
throughput(b, int64(size<<20), mode == "Max")
})
}
}
}
type slowConn struct {
net.Conn
bps int
}
func (c *slowConn) Write(p []byte) (int, error) {
if c.bps == 0 {
panic("too slow")
}
t0 := time.Now()
wrote := 0
for wrote < len(p) {
time.Sleep(100*time.Microsecond)
allowed := int(time.Since(t0).Seconds() * float64(c.bps)) / 8
if allowed > len(p) {
allowed = len(p)
}
if wrote < allowed {
n, err := c.Conn.Write(p[wrote:allowed])
wrote += n
if err != nil {
return wrote, err
}
}
}
return len(p), nil
}
func latency(b *testing.B, bps int, dynamicRecordSizingDisabled bool) {
ln := newLocalListener(b)
defer ln.Close()
var serr error
go func() {
for i := 0; i < b.N; i++ {
sconn, err := ln.Accept()
if err != nil {
serr = err
return
}
serverConfig := *testConfig
serverConfig.DynamicRecordSizingDisabled = dynamicRecordSizingDisabled
srv := Server(&slowConn{sconn, bps}, &serverConfig)
if err := srv.Handshake(); err != nil {
serr = fmt.Errorf("handshake: %v", err)
return
}
io.Copy(srv, srv)
}
}()
clientConfig := *testConfig
clientConfig.DynamicRecordSizingDisabled = dynamicRecordSizingDisabled
buf := make([]byte, 16384)
peek := make([]byte, 1)
for i := 0; i < b.N; i++ {
conn, err := Dial("tcp", ln.Addr().String(), &clientConfig)
if err != nil {
b.Fatal(err)
}
// make sure we're connected and previous connection has stopped
if _, err := conn.Write(buf[:1]); err != nil {
b.Fatal(err)
}
if _, err := io.ReadFull(conn, peek); err != nil {
b.Fatal(err)
}
if _, err := conn.Write(buf); err != nil {
b.Fatal(err)
}
if _, err = io.ReadFull(conn, peek); err != nil {
b.Fatal(err)
}
conn.Close()
}
}
func BenchmarkLatency(b *testing.B) {
for _, mode := range []string{"Max", "Dynamic"} {
for _, kbps := range []int{200, 500, 1000, 2000, 5000} {
name := fmt.Sprintf("%sPacket/%dkbps", mode, kbps)
b.Run(name, func(b *testing.B) {
latency(b, kbps*1000, mode == "Max")
})
}
}
}