2012-09-13 16:00:16 +01:00
|
|
|
// Copyright 2012 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package tls
|
|
|
|
|
|
|
|
import (
|
2014-08-12 00:40:42 +01:00
|
|
|
"bytes"
|
2016-06-21 15:00:41 +01:00
|
|
|
"crypto/x509"
|
2016-01-12 21:15:51 +00:00
|
|
|
"errors"
|
2014-04-02 22:31:57 +01:00
|
|
|
"fmt"
|
crypto/tls: make Conn.Read return (n, io.EOF) when EOF is next in buffer
Update #3514
An io.Reader is permitted to return either (n, nil)
or (n, io.EOF) on EOF or other error.
The tls package previously always returned (n, nil) for a read
of size n if n bytes were available, not surfacing errors at
the same time.
Amazon's HTTPS frontends like to hang up on clients without
sending the appropriate HTTP headers. (In their defense,
they're allowed to hang up any time, but generally a server
hangs up after a bit of inactivity, not immediately.) In any
case, the Go HTTP client tries to re-use connections by
looking at whether the response headers say to keep the
connection open, and because the connection looks okay, under
heavy load it's possible we'll reuse it immediately, writing
the next request, just as the Transport's always-reading
goroutine returns from tls.Conn.Read and sees (0, io.EOF).
But because Amazon does send an AlertCloseNotify record before
it hangs up on us, and the tls package does its own internal
buffering (up to 1024 bytes) of pending data, we have the
AlertCloseNotify in an unread buffer when our Conn.Read (to
the HTTP Transport code) reads its final bit of data in the
HTTP response body.
This change makes that final Read return (n, io.EOF) when
an AlertCloseNotify record is buffered right after, if we'd
otherwise return (n, nil).
A dependent change in the HTTP code then notes whether a
client connection has seen an io.EOF and uses that as an
additional signal to not reuse a HTTPS connection. With both
changes, the majority of Amazon request failures go
away. Without either one, 10-20 goroutines hitting the S3 API
leads to such an error rate that empirically up to 5 retries
are needed to complete an API call.
LGTM=agl, rsc
R=agl, rsc
CC=golang-codereviews
https://golang.org/cl/76400046
2014-03-25 17:58:35 +00:00
|
|
|
"io"
|
2016-10-17 22:47:48 +01:00
|
|
|
"io/ioutil"
|
crypto/tls: adjust dynamic record sizes to grow arithmetically
The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).
Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.
Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.
I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.
Comparing MaxPacket vs DynamicPacket benchmarks:
name maxpkt time/op dyn. time/op delta
Throughput/1MB-8 5.07ms ± 7% 5.21ms ± 7% +2.73% (p=0.023 n=16+16)
Throughput/2MB-8 15.7ms ±201% 8.4ms ± 5% ~ (p=0.604 n=20+16)
Throughput/4MB-8 14.3ms ± 1% 14.5ms ± 1% +1.53% (p=0.000 n=16+16)
Throughput/8MB-8 26.6ms ± 1% 26.8ms ± 1% +0.47% (p=0.003 n=19+18)
Throughput/16MB-8 51.0ms ± 1% 51.3ms ± 1% +0.47% (p=0.000 n=20+20)
Throughput/32MB-8 100ms ± 1% 100ms ± 1% +0.24% (p=0.033 n=20+20)
Throughput/64MB-8 197ms ± 0% 198ms ± 0% +0.56% (p=0.000 n=18+7)
The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.
Fixes #15713.
Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-05-27 14:50:06 +01:00
|
|
|
"math"
|
2014-02-28 14:40:12 +00:00
|
|
|
"net"
|
2016-06-21 15:00:41 +01:00
|
|
|
"os"
|
|
|
|
"reflect"
|
2014-02-28 14:40:12 +00:00
|
|
|
"strings"
|
2012-09-13 16:00:16 +01:00
|
|
|
"testing"
|
2014-02-28 14:40:12 +00:00
|
|
|
"time"
|
2012-09-13 16:00:16 +01:00
|
|
|
)
|
|
|
|
|
2012-11-16 08:33:59 +00:00
|
|
|
var rsaCertPEM = `-----BEGIN CERTIFICATE-----
|
2012-09-13 16:00:16 +01:00
|
|
|
MIIB0zCCAX2gAwIBAgIJAI/M7BYjwB+uMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV
|
|
|
|
BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX
|
|
|
|
aWRnaXRzIFB0eSBMdGQwHhcNMTIwOTEyMjE1MjAyWhcNMTUwOTEyMjE1MjAyWjBF
|
|
|
|
MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50
|
|
|
|
ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANLJ
|
|
|
|
hPHhITqQbPklG3ibCVxwGMRfp/v4XqhfdQHdcVfHap6NQ5Wok/4xIA+ui35/MmNa
|
|
|
|
rtNuC+BdZ1tMuVCPFZcCAwEAAaNQME4wHQYDVR0OBBYEFJvKs8RfJaXTH08W+SGv
|
|
|
|
zQyKn0H8MB8GA1UdIwQYMBaAFJvKs8RfJaXTH08W+SGvzQyKn0H8MAwGA1UdEwQF
|
|
|
|
MAMBAf8wDQYJKoZIhvcNAQEFBQADQQBJlffJHybjDGxRMqaRmDhX0+6v02TUKZsW
|
|
|
|
r5QuVbpQhH6u+0UgcW0jp9QwpxoPTLTWGXEWBBBurxFwiCBhkQ+V
|
|
|
|
-----END CERTIFICATE-----
|
|
|
|
`
|
|
|
|
|
2012-11-16 08:33:59 +00:00
|
|
|
var rsaKeyPEM = `-----BEGIN RSA PRIVATE KEY-----
|
2012-09-13 16:00:16 +01:00
|
|
|
MIIBOwIBAAJBANLJhPHhITqQbPklG3ibCVxwGMRfp/v4XqhfdQHdcVfHap6NQ5Wo
|
|
|
|
k/4xIA+ui35/MmNartNuC+BdZ1tMuVCPFZcCAwEAAQJAEJ2N+zsR0Xn8/Q6twa4G
|
|
|
|
6OB1M1WO+k+ztnX/1SvNeWu8D6GImtupLTYgjZcHufykj09jiHmjHx8u8ZZB/o1N
|
|
|
|
MQIhAPW+eyZo7ay3lMz1V01WVjNKK9QSn1MJlb06h/LuYv9FAiEA25WPedKgVyCW
|
|
|
|
SmUwbPw8fnTcpqDWE3yTO3vKcebqMSsCIBF3UmVue8YU3jybC3NxuXq3wNm34R8T
|
|
|
|
xVLHwDXh/6NJAiEAl2oHGGLz64BuAfjKrqwz7qMYr9HCLIe/YsoWq/olzScCIQDi
|
|
|
|
D2lWusoe2/nEqfDVVWGWlyJ7yOmqaVm/iNUN9B2N2g==
|
|
|
|
-----END RSA PRIVATE KEY-----
|
|
|
|
`
|
|
|
|
|
2012-12-01 19:02:08 +00:00
|
|
|
// keyPEM is the same as rsaKeyPEM, but declares itself as just
|
2015-07-11 00:17:11 +01:00
|
|
|
// "PRIVATE KEY", not "RSA PRIVATE KEY". https://golang.org/issue/4477
|
2012-12-01 19:02:08 +00:00
|
|
|
var keyPEM = `-----BEGIN PRIVATE KEY-----
|
|
|
|
MIIBOwIBAAJBANLJhPHhITqQbPklG3ibCVxwGMRfp/v4XqhfdQHdcVfHap6NQ5Wo
|
|
|
|
k/4xIA+ui35/MmNartNuC+BdZ1tMuVCPFZcCAwEAAQJAEJ2N+zsR0Xn8/Q6twa4G
|
|
|
|
6OB1M1WO+k+ztnX/1SvNeWu8D6GImtupLTYgjZcHufykj09jiHmjHx8u8ZZB/o1N
|
|
|
|
MQIhAPW+eyZo7ay3lMz1V01WVjNKK9QSn1MJlb06h/LuYv9FAiEA25WPedKgVyCW
|
|
|
|
SmUwbPw8fnTcpqDWE3yTO3vKcebqMSsCIBF3UmVue8YU3jybC3NxuXq3wNm34R8T
|
|
|
|
xVLHwDXh/6NJAiEAl2oHGGLz64BuAfjKrqwz7qMYr9HCLIe/YsoWq/olzScCIQDi
|
|
|
|
D2lWusoe2/nEqfDVVWGWlyJ7yOmqaVm/iNUN9B2N2g==
|
|
|
|
-----END PRIVATE KEY-----
|
|
|
|
`
|
|
|
|
|
2012-11-16 08:33:59 +00:00
|
|
|
var ecdsaCertPEM = `-----BEGIN CERTIFICATE-----
|
|
|
|
MIIB/jCCAWICCQDscdUxw16XFDAJBgcqhkjOPQQBMEUxCzAJBgNVBAYTAkFVMRMw
|
|
|
|
EQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0
|
|
|
|
eSBMdGQwHhcNMTIxMTE0MTI0MDQ4WhcNMTUxMTE0MTI0MDQ4WjBFMQswCQYDVQQG
|
|
|
|
EwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lk
|
|
|
|
Z2l0cyBQdHkgTHRkMIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQBY9+my9OoeSUR
|
|
|
|
lDQdV/x8LsOuLilthhiS1Tz4aGDHIPwC1mlvnf7fg5lecYpMCrLLhauAc1UJXcgl
|
|
|
|
01xoLuzgtAEAgv2P/jgytzRSpUYvgLBt1UA0leLYBy6mQQbrNEuqT3INapKIcUv8
|
|
|
|
XxYP0xMEUksLPq6Ca+CRSqTtrd/23uTnapkwCQYHKoZIzj0EAQOBigAwgYYCQXJo
|
|
|
|
A7Sl2nLVf+4Iu/tAX/IF4MavARKC4PPHK3zfuGfPR3oCCcsAoz3kAzOeijvd0iXb
|
|
|
|
H5jBImIxPL4WxQNiBTexAkF8D1EtpYuWdlVQ80/h/f4pBcGiXPqX5h2PQSQY7hP1
|
|
|
|
+jwM1FGS4fREIOvlBYr/SzzQRtwrvrzGYxDEDbsC0ZGRnA==
|
|
|
|
-----END CERTIFICATE-----
|
|
|
|
`
|
|
|
|
|
|
|
|
var ecdsaKeyPEM = `-----BEGIN EC PARAMETERS-----
|
|
|
|
BgUrgQQAIw==
|
|
|
|
-----END EC PARAMETERS-----
|
|
|
|
-----BEGIN EC PRIVATE KEY-----
|
|
|
|
MIHcAgEBBEIBrsoKp0oqcv6/JovJJDoDVSGWdirrkgCWxrprGlzB9o0X8fV675X0
|
|
|
|
NwuBenXFfeZvVcwluO7/Q9wkYoPd/t3jGImgBwYFK4EEACOhgYkDgYYABAFj36bL
|
|
|
|
06h5JRGUNB1X/Hwuw64uKW2GGJLVPPhoYMcg/ALWaW+d/t+DmV5xikwKssuFq4Bz
|
|
|
|
VQldyCXTXGgu7OC0AQCC/Y/+ODK3NFKlRi+AsG3VQDSV4tgHLqZBBus0S6pPcg1q
|
|
|
|
kohxS/xfFg/TEwRSSws+roJr4JFKpO2t3/be5OdqmQ==
|
|
|
|
-----END EC PRIVATE KEY-----
|
|
|
|
`
|
|
|
|
|
|
|
|
var keyPairTests = []struct {
|
|
|
|
algo string
|
2012-12-01 19:02:08 +00:00
|
|
|
cert string
|
|
|
|
key string
|
2012-11-16 08:33:59 +00:00
|
|
|
}{
|
2012-12-01 19:02:08 +00:00
|
|
|
{"ECDSA", ecdsaCertPEM, ecdsaKeyPEM},
|
|
|
|
{"RSA", rsaCertPEM, rsaKeyPEM},
|
|
|
|
{"RSA-untyped", rsaCertPEM, keyPEM}, // golang.org/issue/4477
|
2012-11-16 08:33:59 +00:00
|
|
|
}
|
|
|
|
|
2012-09-13 16:00:16 +01:00
|
|
|
func TestX509KeyPair(t *testing.T) {
|
2016-11-04 05:28:01 +00:00
|
|
|
t.Parallel()
|
2012-11-16 08:33:59 +00:00
|
|
|
var pem []byte
|
|
|
|
for _, test := range keyPairTests {
|
2012-12-01 19:02:08 +00:00
|
|
|
pem = []byte(test.cert + test.key)
|
2012-11-16 08:33:59 +00:00
|
|
|
if _, err := X509KeyPair(pem, pem); err != nil {
|
|
|
|
t.Errorf("Failed to load %s cert followed by %s key: %s", test.algo, test.algo, err)
|
|
|
|
}
|
2012-12-01 19:02:08 +00:00
|
|
|
pem = []byte(test.key + test.cert)
|
2012-11-16 08:33:59 +00:00
|
|
|
if _, err := X509KeyPair(pem, pem); err != nil {
|
|
|
|
t.Errorf("Failed to load %s key followed by %s cert: %s", test.algo, test.algo, err)
|
|
|
|
}
|
2012-09-13 16:00:16 +01:00
|
|
|
}
|
2012-11-16 08:33:59 +00:00
|
|
|
}
|
2012-09-13 16:00:16 +01:00
|
|
|
|
2015-08-30 18:23:30 +01:00
|
|
|
func TestX509KeyPairErrors(t *testing.T) {
|
|
|
|
_, err := X509KeyPair([]byte(rsaKeyPEM), []byte(rsaCertPEM))
|
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("X509KeyPair didn't return an error when arguments were switched")
|
|
|
|
}
|
|
|
|
if subStr := "been switched"; !strings.Contains(err.Error(), subStr) {
|
|
|
|
t.Fatalf("Expected %q in the error when switching arguments to X509KeyPair, but the error was %q", subStr, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = X509KeyPair([]byte(rsaCertPEM), []byte(rsaCertPEM))
|
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("X509KeyPair didn't return an error when both arguments were certificates")
|
|
|
|
}
|
|
|
|
if subStr := "certificate"; !strings.Contains(err.Error(), subStr) {
|
|
|
|
t.Fatalf("Expected %q in the error when both arguments to X509KeyPair were certificates, but the error was %q", subStr, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
const nonsensePEM = `
|
|
|
|
-----BEGIN NONSENSE-----
|
|
|
|
Zm9vZm9vZm9v
|
|
|
|
-----END NONSENSE-----
|
|
|
|
`
|
|
|
|
|
|
|
|
_, err = X509KeyPair([]byte(nonsensePEM), []byte(nonsensePEM))
|
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("X509KeyPair didn't return an error when both arguments were nonsense")
|
|
|
|
}
|
|
|
|
if subStr := "NONSENSE"; !strings.Contains(err.Error(), subStr) {
|
|
|
|
t.Fatalf("Expected %q in the error when both arguments to X509KeyPair were nonsense, but the error was %q", subStr, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-16 08:33:59 +00:00
|
|
|
func TestX509MixedKeyPair(t *testing.T) {
|
|
|
|
if _, err := X509KeyPair([]byte(rsaCertPEM), []byte(ecdsaKeyPEM)); err == nil {
|
|
|
|
t.Error("Load of RSA certificate succeeded with ECDSA private key")
|
|
|
|
}
|
|
|
|
if _, err := X509KeyPair([]byte(ecdsaCertPEM), []byte(rsaKeyPEM)); err == nil {
|
|
|
|
t.Error("Load of ECDSA certificate succeeded with RSA private key")
|
2012-09-13 16:00:16 +01:00
|
|
|
}
|
|
|
|
}
|
2014-02-28 14:40:12 +00:00
|
|
|
|
crypto/tls: adjust dynamic record sizes to grow arithmetically
The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).
Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.
Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.
I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.
Comparing MaxPacket vs DynamicPacket benchmarks:
name maxpkt time/op dyn. time/op delta
Throughput/1MB-8 5.07ms ± 7% 5.21ms ± 7% +2.73% (p=0.023 n=16+16)
Throughput/2MB-8 15.7ms ±201% 8.4ms ± 5% ~ (p=0.604 n=20+16)
Throughput/4MB-8 14.3ms ± 1% 14.5ms ± 1% +1.53% (p=0.000 n=16+16)
Throughput/8MB-8 26.6ms ± 1% 26.8ms ± 1% +0.47% (p=0.003 n=19+18)
Throughput/16MB-8 51.0ms ± 1% 51.3ms ± 1% +0.47% (p=0.000 n=20+20)
Throughput/32MB-8 100ms ± 1% 100ms ± 1% +0.24% (p=0.033 n=20+20)
Throughput/64MB-8 197ms ± 0% 198ms ± 0% +0.56% (p=0.000 n=18+7)
The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.
Fixes #15713.
Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-05-27 14:50:06 +01:00
|
|
|
func newLocalListener(t testing.TB) net.Listener {
|
crypto/tls: make Conn.Read return (n, io.EOF) when EOF is next in buffer
Update #3514
An io.Reader is permitted to return either (n, nil)
or (n, io.EOF) on EOF or other error.
The tls package previously always returned (n, nil) for a read
of size n if n bytes were available, not surfacing errors at
the same time.
Amazon's HTTPS frontends like to hang up on clients without
sending the appropriate HTTP headers. (In their defense,
they're allowed to hang up any time, but generally a server
hangs up after a bit of inactivity, not immediately.) In any
case, the Go HTTP client tries to re-use connections by
looking at whether the response headers say to keep the
connection open, and because the connection looks okay, under
heavy load it's possible we'll reuse it immediately, writing
the next request, just as the Transport's always-reading
goroutine returns from tls.Conn.Read and sees (0, io.EOF).
But because Amazon does send an AlertCloseNotify record before
it hangs up on us, and the tls package does its own internal
buffering (up to 1024 bytes) of pending data, we have the
AlertCloseNotify in an unread buffer when our Conn.Read (to
the HTTP Transport code) reads its final bit of data in the
HTTP response body.
This change makes that final Read return (n, io.EOF) when
an AlertCloseNotify record is buffered right after, if we'd
otherwise return (n, nil).
A dependent change in the HTTP code then notes whether a
client connection has seen an io.EOF and uses that as an
additional signal to not reuse a HTTPS connection. With both
changes, the majority of Amazon request failures go
away. Without either one, 10-20 goroutines hitting the S3 API
leads to such an error rate that empirically up to 5 retries
are needed to complete an API call.
LGTM=agl, rsc
R=agl, rsc
CC=golang-codereviews
https://golang.org/cl/76400046
2014-03-25 17:58:35 +00:00
|
|
|
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
2014-02-28 14:40:12 +00:00
|
|
|
if err != nil {
|
crypto/tls: make Conn.Read return (n, io.EOF) when EOF is next in buffer
Update #3514
An io.Reader is permitted to return either (n, nil)
or (n, io.EOF) on EOF or other error.
The tls package previously always returned (n, nil) for a read
of size n if n bytes were available, not surfacing errors at
the same time.
Amazon's HTTPS frontends like to hang up on clients without
sending the appropriate HTTP headers. (In their defense,
they're allowed to hang up any time, but generally a server
hangs up after a bit of inactivity, not immediately.) In any
case, the Go HTTP client tries to re-use connections by
looking at whether the response headers say to keep the
connection open, and because the connection looks okay, under
heavy load it's possible we'll reuse it immediately, writing
the next request, just as the Transport's always-reading
goroutine returns from tls.Conn.Read and sees (0, io.EOF).
But because Amazon does send an AlertCloseNotify record before
it hangs up on us, and the tls package does its own internal
buffering (up to 1024 bytes) of pending data, we have the
AlertCloseNotify in an unread buffer when our Conn.Read (to
the HTTP Transport code) reads its final bit of data in the
HTTP response body.
This change makes that final Read return (n, io.EOF) when
an AlertCloseNotify record is buffered right after, if we'd
otherwise return (n, nil).
A dependent change in the HTTP code then notes whether a
client connection has seen an io.EOF and uses that as an
additional signal to not reuse a HTTPS connection. With both
changes, the majority of Amazon request failures go
away. Without either one, 10-20 goroutines hitting the S3 API
leads to such an error rate that empirically up to 5 retries
are needed to complete an API call.
LGTM=agl, rsc
R=agl, rsc
CC=golang-codereviews
https://golang.org/cl/76400046
2014-03-25 17:58:35 +00:00
|
|
|
ln, err = net.Listen("tcp6", "[::1]:0")
|
2014-02-28 14:40:12 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
crypto/tls: make Conn.Read return (n, io.EOF) when EOF is next in buffer
Update #3514
An io.Reader is permitted to return either (n, nil)
or (n, io.EOF) on EOF or other error.
The tls package previously always returned (n, nil) for a read
of size n if n bytes were available, not surfacing errors at
the same time.
Amazon's HTTPS frontends like to hang up on clients without
sending the appropriate HTTP headers. (In their defense,
they're allowed to hang up any time, but generally a server
hangs up after a bit of inactivity, not immediately.) In any
case, the Go HTTP client tries to re-use connections by
looking at whether the response headers say to keep the
connection open, and because the connection looks okay, under
heavy load it's possible we'll reuse it immediately, writing
the next request, just as the Transport's always-reading
goroutine returns from tls.Conn.Read and sees (0, io.EOF).
But because Amazon does send an AlertCloseNotify record before
it hangs up on us, and the tls package does its own internal
buffering (up to 1024 bytes) of pending data, we have the
AlertCloseNotify in an unread buffer when our Conn.Read (to
the HTTP Transport code) reads its final bit of data in the
HTTP response body.
This change makes that final Read return (n, io.EOF) when
an AlertCloseNotify record is buffered right after, if we'd
otherwise return (n, nil).
A dependent change in the HTTP code then notes whether a
client connection has seen an io.EOF and uses that as an
additional signal to not reuse a HTTPS connection. With both
changes, the majority of Amazon request failures go
away. Without either one, 10-20 goroutines hitting the S3 API
leads to such an error rate that empirically up to 5 retries
are needed to complete an API call.
LGTM=agl, rsc
R=agl, rsc
CC=golang-codereviews
https://golang.org/cl/76400046
2014-03-25 17:58:35 +00:00
|
|
|
return ln
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDialTimeout(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("skipping in short mode")
|
|
|
|
}
|
|
|
|
listener := newLocalListener(t)
|
2014-02-28 14:40:12 +00:00
|
|
|
|
|
|
|
addr := listener.Addr().String()
|
|
|
|
defer listener.Close()
|
|
|
|
|
|
|
|
complete := make(chan bool)
|
|
|
|
defer close(complete)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
conn, err := listener.Accept()
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
<-complete
|
|
|
|
conn.Close()
|
|
|
|
}()
|
|
|
|
|
|
|
|
dialer := &net.Dialer{
|
|
|
|
Timeout: 10 * time.Millisecond,
|
|
|
|
}
|
|
|
|
|
crypto/tls: make Conn.Read return (n, io.EOF) when EOF is next in buffer
Update #3514
An io.Reader is permitted to return either (n, nil)
or (n, io.EOF) on EOF or other error.
The tls package previously always returned (n, nil) for a read
of size n if n bytes were available, not surfacing errors at
the same time.
Amazon's HTTPS frontends like to hang up on clients without
sending the appropriate HTTP headers. (In their defense,
they're allowed to hang up any time, but generally a server
hangs up after a bit of inactivity, not immediately.) In any
case, the Go HTTP client tries to re-use connections by
looking at whether the response headers say to keep the
connection open, and because the connection looks okay, under
heavy load it's possible we'll reuse it immediately, writing
the next request, just as the Transport's always-reading
goroutine returns from tls.Conn.Read and sees (0, io.EOF).
But because Amazon does send an AlertCloseNotify record before
it hangs up on us, and the tls package does its own internal
buffering (up to 1024 bytes) of pending data, we have the
AlertCloseNotify in an unread buffer when our Conn.Read (to
the HTTP Transport code) reads its final bit of data in the
HTTP response body.
This change makes that final Read return (n, io.EOF) when
an AlertCloseNotify record is buffered right after, if we'd
otherwise return (n, nil).
A dependent change in the HTTP code then notes whether a
client connection has seen an io.EOF and uses that as an
additional signal to not reuse a HTTPS connection. With both
changes, the majority of Amazon request failures go
away. Without either one, 10-20 goroutines hitting the S3 API
leads to such an error rate that empirically up to 5 retries
are needed to complete an API call.
LGTM=agl, rsc
R=agl, rsc
CC=golang-codereviews
https://golang.org/cl/76400046
2014-03-25 17:58:35 +00:00
|
|
|
var err error
|
2014-02-28 14:40:12 +00:00
|
|
|
if _, err = DialWithDialer(dialer, "tcp", addr, nil); err == nil {
|
|
|
|
t.Fatal("DialWithTimeout completed successfully")
|
|
|
|
}
|
|
|
|
|
2016-03-08 22:10:28 +00:00
|
|
|
if !isTimeoutError(err) {
|
|
|
|
t.Errorf("resulting error not a timeout: %v\nType %T: %#v", err, err, err)
|
2014-02-28 14:40:12 +00:00
|
|
|
}
|
|
|
|
}
|
crypto/tls: make Conn.Read return (n, io.EOF) when EOF is next in buffer
Update #3514
An io.Reader is permitted to return either (n, nil)
or (n, io.EOF) on EOF or other error.
The tls package previously always returned (n, nil) for a read
of size n if n bytes were available, not surfacing errors at
the same time.
Amazon's HTTPS frontends like to hang up on clients without
sending the appropriate HTTP headers. (In their defense,
they're allowed to hang up any time, but generally a server
hangs up after a bit of inactivity, not immediately.) In any
case, the Go HTTP client tries to re-use connections by
looking at whether the response headers say to keep the
connection open, and because the connection looks okay, under
heavy load it's possible we'll reuse it immediately, writing
the next request, just as the Transport's always-reading
goroutine returns from tls.Conn.Read and sees (0, io.EOF).
But because Amazon does send an AlertCloseNotify record before
it hangs up on us, and the tls package does its own internal
buffering (up to 1024 bytes) of pending data, we have the
AlertCloseNotify in an unread buffer when our Conn.Read (to
the HTTP Transport code) reads its final bit of data in the
HTTP response body.
This change makes that final Read return (n, io.EOF) when
an AlertCloseNotify record is buffered right after, if we'd
otherwise return (n, nil).
A dependent change in the HTTP code then notes whether a
client connection has seen an io.EOF and uses that as an
additional signal to not reuse a HTTPS connection. With both
changes, the majority of Amazon request failures go
away. Without either one, 10-20 goroutines hitting the S3 API
leads to such an error rate that empirically up to 5 retries
are needed to complete an API call.
LGTM=agl, rsc
R=agl, rsc
CC=golang-codereviews
https://golang.org/cl/76400046
2014-03-25 17:58:35 +00:00
|
|
|
|
2016-03-08 22:10:28 +00:00
|
|
|
func isTimeoutError(err error) bool {
|
|
|
|
if ne, ok := err.(net.Error); ok {
|
|
|
|
return ne.Timeout()
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
crypto/tls: make Conn.Read return (n, io.EOF) when EOF is next in buffer
Update #3514
An io.Reader is permitted to return either (n, nil)
or (n, io.EOF) on EOF or other error.
The tls package previously always returned (n, nil) for a read
of size n if n bytes were available, not surfacing errors at
the same time.
Amazon's HTTPS frontends like to hang up on clients without
sending the appropriate HTTP headers. (In their defense,
they're allowed to hang up any time, but generally a server
hangs up after a bit of inactivity, not immediately.) In any
case, the Go HTTP client tries to re-use connections by
looking at whether the response headers say to keep the
connection open, and because the connection looks okay, under
heavy load it's possible we'll reuse it immediately, writing
the next request, just as the Transport's always-reading
goroutine returns from tls.Conn.Read and sees (0, io.EOF).
But because Amazon does send an AlertCloseNotify record before
it hangs up on us, and the tls package does its own internal
buffering (up to 1024 bytes) of pending data, we have the
AlertCloseNotify in an unread buffer when our Conn.Read (to
the HTTP Transport code) reads its final bit of data in the
HTTP response body.
This change makes that final Read return (n, io.EOF) when
an AlertCloseNotify record is buffered right after, if we'd
otherwise return (n, nil).
A dependent change in the HTTP code then notes whether a
client connection has seen an io.EOF and uses that as an
additional signal to not reuse a HTTPS connection. With both
changes, the majority of Amazon request failures go
away. Without either one, 10-20 goroutines hitting the S3 API
leads to such an error rate that empirically up to 5 retries
are needed to complete an API call.
LGTM=agl, rsc
R=agl, rsc
CC=golang-codereviews
https://golang.org/cl/76400046
2014-03-25 17:58:35 +00:00
|
|
|
// tests that Conn.Read returns (non-zero, io.EOF) instead of
|
|
|
|
// (non-zero, nil) when a Close (alertCloseNotify) is sitting right
|
|
|
|
// behind the application data in the buffer.
|
|
|
|
func TestConnReadNonzeroAndEOF(t *testing.T) {
|
2014-04-02 22:31:57 +01:00
|
|
|
// This test is racy: it assumes that after a write to a
|
|
|
|
// localhost TCP connection, the peer TCP connection can
|
2016-03-01 23:21:55 +00:00
|
|
|
// immediately read it. Because it's racy, we skip this test
|
2014-04-02 22:31:57 +01:00
|
|
|
// in short mode, and then retry it several times with an
|
|
|
|
// increasing sleep in between our final write (via srv.Close
|
|
|
|
// below) and the following read.
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("skipping in short mode")
|
|
|
|
}
|
|
|
|
var err error
|
|
|
|
for delay := time.Millisecond; delay <= 64*time.Millisecond; delay *= 2 {
|
|
|
|
if err = testConnReadNonzeroAndEOF(t, delay); err == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func testConnReadNonzeroAndEOF(t *testing.T, delay time.Duration) error {
|
crypto/tls: make Conn.Read return (n, io.EOF) when EOF is next in buffer
Update #3514
An io.Reader is permitted to return either (n, nil)
or (n, io.EOF) on EOF or other error.
The tls package previously always returned (n, nil) for a read
of size n if n bytes were available, not surfacing errors at
the same time.
Amazon's HTTPS frontends like to hang up on clients without
sending the appropriate HTTP headers. (In their defense,
they're allowed to hang up any time, but generally a server
hangs up after a bit of inactivity, not immediately.) In any
case, the Go HTTP client tries to re-use connections by
looking at whether the response headers say to keep the
connection open, and because the connection looks okay, under
heavy load it's possible we'll reuse it immediately, writing
the next request, just as the Transport's always-reading
goroutine returns from tls.Conn.Read and sees (0, io.EOF).
But because Amazon does send an AlertCloseNotify record before
it hangs up on us, and the tls package does its own internal
buffering (up to 1024 bytes) of pending data, we have the
AlertCloseNotify in an unread buffer when our Conn.Read (to
the HTTP Transport code) reads its final bit of data in the
HTTP response body.
This change makes that final Read return (n, io.EOF) when
an AlertCloseNotify record is buffered right after, if we'd
otherwise return (n, nil).
A dependent change in the HTTP code then notes whether a
client connection has seen an io.EOF and uses that as an
additional signal to not reuse a HTTPS connection. With both
changes, the majority of Amazon request failures go
away. Without either one, 10-20 goroutines hitting the S3 API
leads to such an error rate that empirically up to 5 retries
are needed to complete an API call.
LGTM=agl, rsc
R=agl, rsc
CC=golang-codereviews
https://golang.org/cl/76400046
2014-03-25 17:58:35 +00:00
|
|
|
ln := newLocalListener(t)
|
|
|
|
defer ln.Close()
|
|
|
|
|
|
|
|
srvCh := make(chan *Conn, 1)
|
2014-04-02 22:31:57 +01:00
|
|
|
var serr error
|
crypto/tls: make Conn.Read return (n, io.EOF) when EOF is next in buffer
Update #3514
An io.Reader is permitted to return either (n, nil)
or (n, io.EOF) on EOF or other error.
The tls package previously always returned (n, nil) for a read
of size n if n bytes were available, not surfacing errors at
the same time.
Amazon's HTTPS frontends like to hang up on clients without
sending the appropriate HTTP headers. (In their defense,
they're allowed to hang up any time, but generally a server
hangs up after a bit of inactivity, not immediately.) In any
case, the Go HTTP client tries to re-use connections by
looking at whether the response headers say to keep the
connection open, and because the connection looks okay, under
heavy load it's possible we'll reuse it immediately, writing
the next request, just as the Transport's always-reading
goroutine returns from tls.Conn.Read and sees (0, io.EOF).
But because Amazon does send an AlertCloseNotify record before
it hangs up on us, and the tls package does its own internal
buffering (up to 1024 bytes) of pending data, we have the
AlertCloseNotify in an unread buffer when our Conn.Read (to
the HTTP Transport code) reads its final bit of data in the
HTTP response body.
This change makes that final Read return (n, io.EOF) when
an AlertCloseNotify record is buffered right after, if we'd
otherwise return (n, nil).
A dependent change in the HTTP code then notes whether a
client connection has seen an io.EOF and uses that as an
additional signal to not reuse a HTTPS connection. With both
changes, the majority of Amazon request failures go
away. Without either one, 10-20 goroutines hitting the S3 API
leads to such an error rate that empirically up to 5 retries
are needed to complete an API call.
LGTM=agl, rsc
R=agl, rsc
CC=golang-codereviews
https://golang.org/cl/76400046
2014-03-25 17:58:35 +00:00
|
|
|
go func() {
|
|
|
|
sconn, err := ln.Accept()
|
|
|
|
if err != nil {
|
2014-04-02 22:31:57 +01:00
|
|
|
serr = err
|
crypto/tls: make Conn.Read return (n, io.EOF) when EOF is next in buffer
Update #3514
An io.Reader is permitted to return either (n, nil)
or (n, io.EOF) on EOF or other error.
The tls package previously always returned (n, nil) for a read
of size n if n bytes were available, not surfacing errors at
the same time.
Amazon's HTTPS frontends like to hang up on clients without
sending the appropriate HTTP headers. (In their defense,
they're allowed to hang up any time, but generally a server
hangs up after a bit of inactivity, not immediately.) In any
case, the Go HTTP client tries to re-use connections by
looking at whether the response headers say to keep the
connection open, and because the connection looks okay, under
heavy load it's possible we'll reuse it immediately, writing
the next request, just as the Transport's always-reading
goroutine returns from tls.Conn.Read and sees (0, io.EOF).
But because Amazon does send an AlertCloseNotify record before
it hangs up on us, and the tls package does its own internal
buffering (up to 1024 bytes) of pending data, we have the
AlertCloseNotify in an unread buffer when our Conn.Read (to
the HTTP Transport code) reads its final bit of data in the
HTTP response body.
This change makes that final Read return (n, io.EOF) when
an AlertCloseNotify record is buffered right after, if we'd
otherwise return (n, nil).
A dependent change in the HTTP code then notes whether a
client connection has seen an io.EOF and uses that as an
additional signal to not reuse a HTTPS connection. With both
changes, the majority of Amazon request failures go
away. Without either one, 10-20 goroutines hitting the S3 API
leads to such an error rate that empirically up to 5 retries
are needed to complete an API call.
LGTM=agl, rsc
R=agl, rsc
CC=golang-codereviews
https://golang.org/cl/76400046
2014-03-25 17:58:35 +00:00
|
|
|
srvCh <- nil
|
|
|
|
return
|
|
|
|
}
|
2016-08-30 04:19:01 +01:00
|
|
|
serverConfig := testConfig.Clone()
|
2016-06-21 15:00:41 +01:00
|
|
|
srv := Server(sconn, serverConfig)
|
crypto/tls: make Conn.Read return (n, io.EOF) when EOF is next in buffer
Update #3514
An io.Reader is permitted to return either (n, nil)
or (n, io.EOF) on EOF or other error.
The tls package previously always returned (n, nil) for a read
of size n if n bytes were available, not surfacing errors at
the same time.
Amazon's HTTPS frontends like to hang up on clients without
sending the appropriate HTTP headers. (In their defense,
they're allowed to hang up any time, but generally a server
hangs up after a bit of inactivity, not immediately.) In any
case, the Go HTTP client tries to re-use connections by
looking at whether the response headers say to keep the
connection open, and because the connection looks okay, under
heavy load it's possible we'll reuse it immediately, writing
the next request, just as the Transport's always-reading
goroutine returns from tls.Conn.Read and sees (0, io.EOF).
But because Amazon does send an AlertCloseNotify record before
it hangs up on us, and the tls package does its own internal
buffering (up to 1024 bytes) of pending data, we have the
AlertCloseNotify in an unread buffer when our Conn.Read (to
the HTTP Transport code) reads its final bit of data in the
HTTP response body.
This change makes that final Read return (n, io.EOF) when
an AlertCloseNotify record is buffered right after, if we'd
otherwise return (n, nil).
A dependent change in the HTTP code then notes whether a
client connection has seen an io.EOF and uses that as an
additional signal to not reuse a HTTPS connection. With both
changes, the majority of Amazon request failures go
away. Without either one, 10-20 goroutines hitting the S3 API
leads to such an error rate that empirically up to 5 retries
are needed to complete an API call.
LGTM=agl, rsc
R=agl, rsc
CC=golang-codereviews
https://golang.org/cl/76400046
2014-03-25 17:58:35 +00:00
|
|
|
if err := srv.Handshake(); err != nil {
|
2014-04-02 22:31:57 +01:00
|
|
|
serr = fmt.Errorf("handshake: %v", err)
|
crypto/tls: make Conn.Read return (n, io.EOF) when EOF is next in buffer
Update #3514
An io.Reader is permitted to return either (n, nil)
or (n, io.EOF) on EOF or other error.
The tls package previously always returned (n, nil) for a read
of size n if n bytes were available, not surfacing errors at
the same time.
Amazon's HTTPS frontends like to hang up on clients without
sending the appropriate HTTP headers. (In their defense,
they're allowed to hang up any time, but generally a server
hangs up after a bit of inactivity, not immediately.) In any
case, the Go HTTP client tries to re-use connections by
looking at whether the response headers say to keep the
connection open, and because the connection looks okay, under
heavy load it's possible we'll reuse it immediately, writing
the next request, just as the Transport's always-reading
goroutine returns from tls.Conn.Read and sees (0, io.EOF).
But because Amazon does send an AlertCloseNotify record before
it hangs up on us, and the tls package does its own internal
buffering (up to 1024 bytes) of pending data, we have the
AlertCloseNotify in an unread buffer when our Conn.Read (to
the HTTP Transport code) reads its final bit of data in the
HTTP response body.
This change makes that final Read return (n, io.EOF) when
an AlertCloseNotify record is buffered right after, if we'd
otherwise return (n, nil).
A dependent change in the HTTP code then notes whether a
client connection has seen an io.EOF and uses that as an
additional signal to not reuse a HTTPS connection. With both
changes, the majority of Amazon request failures go
away. Without either one, 10-20 goroutines hitting the S3 API
leads to such an error rate that empirically up to 5 retries
are needed to complete an API call.
LGTM=agl, rsc
R=agl, rsc
CC=golang-codereviews
https://golang.org/cl/76400046
2014-03-25 17:58:35 +00:00
|
|
|
srvCh <- nil
|
|
|
|
return
|
|
|
|
}
|
|
|
|
srvCh <- srv
|
|
|
|
}()
|
|
|
|
|
2016-08-30 04:19:01 +01:00
|
|
|
clientConfig := testConfig.Clone()
|
2016-06-21 15:00:41 +01:00
|
|
|
conn, err := Dial("tcp", ln.Addr().String(), clientConfig)
|
crypto/tls: make Conn.Read return (n, io.EOF) when EOF is next in buffer
Update #3514
An io.Reader is permitted to return either (n, nil)
or (n, io.EOF) on EOF or other error.
The tls package previously always returned (n, nil) for a read
of size n if n bytes were available, not surfacing errors at
the same time.
Amazon's HTTPS frontends like to hang up on clients without
sending the appropriate HTTP headers. (In their defense,
they're allowed to hang up any time, but generally a server
hangs up after a bit of inactivity, not immediately.) In any
case, the Go HTTP client tries to re-use connections by
looking at whether the response headers say to keep the
connection open, and because the connection looks okay, under
heavy load it's possible we'll reuse it immediately, writing
the next request, just as the Transport's always-reading
goroutine returns from tls.Conn.Read and sees (0, io.EOF).
But because Amazon does send an AlertCloseNotify record before
it hangs up on us, and the tls package does its own internal
buffering (up to 1024 bytes) of pending data, we have the
AlertCloseNotify in an unread buffer when our Conn.Read (to
the HTTP Transport code) reads its final bit of data in the
HTTP response body.
This change makes that final Read return (n, io.EOF) when
an AlertCloseNotify record is buffered right after, if we'd
otherwise return (n, nil).
A dependent change in the HTTP code then notes whether a
client connection has seen an io.EOF and uses that as an
additional signal to not reuse a HTTPS connection. With both
changes, the majority of Amazon request failures go
away. Without either one, 10-20 goroutines hitting the S3 API
leads to such an error rate that empirically up to 5 retries
are needed to complete an API call.
LGTM=agl, rsc
R=agl, rsc
CC=golang-codereviews
https://golang.org/cl/76400046
2014-03-25 17:58:35 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer conn.Close()
|
|
|
|
|
|
|
|
srv := <-srvCh
|
|
|
|
if srv == nil {
|
2014-04-02 22:31:57 +01:00
|
|
|
return serr
|
crypto/tls: make Conn.Read return (n, io.EOF) when EOF is next in buffer
Update #3514
An io.Reader is permitted to return either (n, nil)
or (n, io.EOF) on EOF or other error.
The tls package previously always returned (n, nil) for a read
of size n if n bytes were available, not surfacing errors at
the same time.
Amazon's HTTPS frontends like to hang up on clients without
sending the appropriate HTTP headers. (In their defense,
they're allowed to hang up any time, but generally a server
hangs up after a bit of inactivity, not immediately.) In any
case, the Go HTTP client tries to re-use connections by
looking at whether the response headers say to keep the
connection open, and because the connection looks okay, under
heavy load it's possible we'll reuse it immediately, writing
the next request, just as the Transport's always-reading
goroutine returns from tls.Conn.Read and sees (0, io.EOF).
But because Amazon does send an AlertCloseNotify record before
it hangs up on us, and the tls package does its own internal
buffering (up to 1024 bytes) of pending data, we have the
AlertCloseNotify in an unread buffer when our Conn.Read (to
the HTTP Transport code) reads its final bit of data in the
HTTP response body.
This change makes that final Read return (n, io.EOF) when
an AlertCloseNotify record is buffered right after, if we'd
otherwise return (n, nil).
A dependent change in the HTTP code then notes whether a
client connection has seen an io.EOF and uses that as an
additional signal to not reuse a HTTPS connection. With both
changes, the majority of Amazon request failures go
away. Without either one, 10-20 goroutines hitting the S3 API
leads to such an error rate that empirically up to 5 retries
are needed to complete an API call.
LGTM=agl, rsc
R=agl, rsc
CC=golang-codereviews
https://golang.org/cl/76400046
2014-03-25 17:58:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
buf := make([]byte, 6)
|
|
|
|
|
|
|
|
srv.Write([]byte("foobar"))
|
|
|
|
n, err := conn.Read(buf)
|
|
|
|
if n != 6 || err != nil || string(buf) != "foobar" {
|
2014-04-02 22:31:57 +01:00
|
|
|
return fmt.Errorf("Read = %d, %v, data %q; want 6, nil, foobar", n, err, buf)
|
crypto/tls: make Conn.Read return (n, io.EOF) when EOF is next in buffer
Update #3514
An io.Reader is permitted to return either (n, nil)
or (n, io.EOF) on EOF or other error.
The tls package previously always returned (n, nil) for a read
of size n if n bytes were available, not surfacing errors at
the same time.
Amazon's HTTPS frontends like to hang up on clients without
sending the appropriate HTTP headers. (In their defense,
they're allowed to hang up any time, but generally a server
hangs up after a bit of inactivity, not immediately.) In any
case, the Go HTTP client tries to re-use connections by
looking at whether the response headers say to keep the
connection open, and because the connection looks okay, under
heavy load it's possible we'll reuse it immediately, writing
the next request, just as the Transport's always-reading
goroutine returns from tls.Conn.Read and sees (0, io.EOF).
But because Amazon does send an AlertCloseNotify record before
it hangs up on us, and the tls package does its own internal
buffering (up to 1024 bytes) of pending data, we have the
AlertCloseNotify in an unread buffer when our Conn.Read (to
the HTTP Transport code) reads its final bit of data in the
HTTP response body.
This change makes that final Read return (n, io.EOF) when
an AlertCloseNotify record is buffered right after, if we'd
otherwise return (n, nil).
A dependent change in the HTTP code then notes whether a
client connection has seen an io.EOF and uses that as an
additional signal to not reuse a HTTPS connection. With both
changes, the majority of Amazon request failures go
away. Without either one, 10-20 goroutines hitting the S3 API
leads to such an error rate that empirically up to 5 retries
are needed to complete an API call.
LGTM=agl, rsc
R=agl, rsc
CC=golang-codereviews
https://golang.org/cl/76400046
2014-03-25 17:58:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
srv.Write([]byte("abcdef"))
|
|
|
|
srv.Close()
|
2014-04-02 22:31:57 +01:00
|
|
|
time.Sleep(delay)
|
crypto/tls: make Conn.Read return (n, io.EOF) when EOF is next in buffer
Update #3514
An io.Reader is permitted to return either (n, nil)
or (n, io.EOF) on EOF or other error.
The tls package previously always returned (n, nil) for a read
of size n if n bytes were available, not surfacing errors at
the same time.
Amazon's HTTPS frontends like to hang up on clients without
sending the appropriate HTTP headers. (In their defense,
they're allowed to hang up any time, but generally a server
hangs up after a bit of inactivity, not immediately.) In any
case, the Go HTTP client tries to re-use connections by
looking at whether the response headers say to keep the
connection open, and because the connection looks okay, under
heavy load it's possible we'll reuse it immediately, writing
the next request, just as the Transport's always-reading
goroutine returns from tls.Conn.Read and sees (0, io.EOF).
But because Amazon does send an AlertCloseNotify record before
it hangs up on us, and the tls package does its own internal
buffering (up to 1024 bytes) of pending data, we have the
AlertCloseNotify in an unread buffer when our Conn.Read (to
the HTTP Transport code) reads its final bit of data in the
HTTP response body.
This change makes that final Read return (n, io.EOF) when
an AlertCloseNotify record is buffered right after, if we'd
otherwise return (n, nil).
A dependent change in the HTTP code then notes whether a
client connection has seen an io.EOF and uses that as an
additional signal to not reuse a HTTPS connection. With both
changes, the majority of Amazon request failures go
away. Without either one, 10-20 goroutines hitting the S3 API
leads to such an error rate that empirically up to 5 retries
are needed to complete an API call.
LGTM=agl, rsc
R=agl, rsc
CC=golang-codereviews
https://golang.org/cl/76400046
2014-03-25 17:58:35 +00:00
|
|
|
n, err = conn.Read(buf)
|
|
|
|
if n != 6 || string(buf) != "abcdef" {
|
2014-04-02 22:31:57 +01:00
|
|
|
return fmt.Errorf("Read = %d, buf= %q; want 6, abcdef", n, buf)
|
crypto/tls: make Conn.Read return (n, io.EOF) when EOF is next in buffer
Update #3514
An io.Reader is permitted to return either (n, nil)
or (n, io.EOF) on EOF or other error.
The tls package previously always returned (n, nil) for a read
of size n if n bytes were available, not surfacing errors at
the same time.
Amazon's HTTPS frontends like to hang up on clients without
sending the appropriate HTTP headers. (In their defense,
they're allowed to hang up any time, but generally a server
hangs up after a bit of inactivity, not immediately.) In any
case, the Go HTTP client tries to re-use connections by
looking at whether the response headers say to keep the
connection open, and because the connection looks okay, under
heavy load it's possible we'll reuse it immediately, writing
the next request, just as the Transport's always-reading
goroutine returns from tls.Conn.Read and sees (0, io.EOF).
But because Amazon does send an AlertCloseNotify record before
it hangs up on us, and the tls package does its own internal
buffering (up to 1024 bytes) of pending data, we have the
AlertCloseNotify in an unread buffer when our Conn.Read (to
the HTTP Transport code) reads its final bit of data in the
HTTP response body.
This change makes that final Read return (n, io.EOF) when
an AlertCloseNotify record is buffered right after, if we'd
otherwise return (n, nil).
A dependent change in the HTTP code then notes whether a
client connection has seen an io.EOF and uses that as an
additional signal to not reuse a HTTPS connection. With both
changes, the majority of Amazon request failures go
away. Without either one, 10-20 goroutines hitting the S3 API
leads to such an error rate that empirically up to 5 retries
are needed to complete an API call.
LGTM=agl, rsc
R=agl, rsc
CC=golang-codereviews
https://golang.org/cl/76400046
2014-03-25 17:58:35 +00:00
|
|
|
}
|
|
|
|
if err != io.EOF {
|
2014-04-02 22:31:57 +01:00
|
|
|
return fmt.Errorf("Second Read error = %v; want io.EOF", err)
|
crypto/tls: make Conn.Read return (n, io.EOF) when EOF is next in buffer
Update #3514
An io.Reader is permitted to return either (n, nil)
or (n, io.EOF) on EOF or other error.
The tls package previously always returned (n, nil) for a read
of size n if n bytes were available, not surfacing errors at
the same time.
Amazon's HTTPS frontends like to hang up on clients without
sending the appropriate HTTP headers. (In their defense,
they're allowed to hang up any time, but generally a server
hangs up after a bit of inactivity, not immediately.) In any
case, the Go HTTP client tries to re-use connections by
looking at whether the response headers say to keep the
connection open, and because the connection looks okay, under
heavy load it's possible we'll reuse it immediately, writing
the next request, just as the Transport's always-reading
goroutine returns from tls.Conn.Read and sees (0, io.EOF).
But because Amazon does send an AlertCloseNotify record before
it hangs up on us, and the tls package does its own internal
buffering (up to 1024 bytes) of pending data, we have the
AlertCloseNotify in an unread buffer when our Conn.Read (to
the HTTP Transport code) reads its final bit of data in the
HTTP response body.
This change makes that final Read return (n, io.EOF) when
an AlertCloseNotify record is buffered right after, if we'd
otherwise return (n, nil).
A dependent change in the HTTP code then notes whether a
client connection has seen an io.EOF and uses that as an
additional signal to not reuse a HTTPS connection. With both
changes, the majority of Amazon request failures go
away. Without either one, 10-20 goroutines hitting the S3 API
leads to such an error rate that empirically up to 5 retries
are needed to complete an API call.
LGTM=agl, rsc
R=agl, rsc
CC=golang-codereviews
https://golang.org/cl/76400046
2014-03-25 17:58:35 +00:00
|
|
|
}
|
2014-04-02 22:31:57 +01:00
|
|
|
return nil
|
crypto/tls: make Conn.Read return (n, io.EOF) when EOF is next in buffer
Update #3514
An io.Reader is permitted to return either (n, nil)
or (n, io.EOF) on EOF or other error.
The tls package previously always returned (n, nil) for a read
of size n if n bytes were available, not surfacing errors at
the same time.
Amazon's HTTPS frontends like to hang up on clients without
sending the appropriate HTTP headers. (In their defense,
they're allowed to hang up any time, but generally a server
hangs up after a bit of inactivity, not immediately.) In any
case, the Go HTTP client tries to re-use connections by
looking at whether the response headers say to keep the
connection open, and because the connection looks okay, under
heavy load it's possible we'll reuse it immediately, writing
the next request, just as the Transport's always-reading
goroutine returns from tls.Conn.Read and sees (0, io.EOF).
But because Amazon does send an AlertCloseNotify record before
it hangs up on us, and the tls package does its own internal
buffering (up to 1024 bytes) of pending data, we have the
AlertCloseNotify in an unread buffer when our Conn.Read (to
the HTTP Transport code) reads its final bit of data in the
HTTP response body.
This change makes that final Read return (n, io.EOF) when
an AlertCloseNotify record is buffered right after, if we'd
otherwise return (n, nil).
A dependent change in the HTTP code then notes whether a
client connection has seen an io.EOF and uses that as an
additional signal to not reuse a HTTPS connection. With both
changes, the majority of Amazon request failures go
away. Without either one, 10-20 goroutines hitting the S3 API
leads to such an error rate that empirically up to 5 retries
are needed to complete an API call.
LGTM=agl, rsc
R=agl, rsc
CC=golang-codereviews
https://golang.org/cl/76400046
2014-03-25 17:58:35 +00:00
|
|
|
}
|
2014-08-12 00:40:42 +01:00
|
|
|
|
|
|
|
func TestTLSUniqueMatches(t *testing.T) {
|
|
|
|
ln := newLocalListener(t)
|
|
|
|
defer ln.Close()
|
|
|
|
|
|
|
|
serverTLSUniques := make(chan []byte)
|
|
|
|
go func() {
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
sconn, err := ln.Accept()
|
|
|
|
if err != nil {
|
2016-11-15 05:34:58 +00:00
|
|
|
t.Error(err)
|
|
|
|
return
|
2014-08-12 00:40:42 +01:00
|
|
|
}
|
2016-08-30 04:19:01 +01:00
|
|
|
serverConfig := testConfig.Clone()
|
2016-06-21 15:00:41 +01:00
|
|
|
srv := Server(sconn, serverConfig)
|
2014-08-12 00:40:42 +01:00
|
|
|
if err := srv.Handshake(); err != nil {
|
2016-11-15 05:34:58 +00:00
|
|
|
t.Error(err)
|
|
|
|
return
|
2014-08-12 00:40:42 +01:00
|
|
|
}
|
|
|
|
serverTLSUniques <- srv.ConnectionState().TLSUnique
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2016-08-30 04:19:01 +01:00
|
|
|
clientConfig := testConfig.Clone()
|
2014-08-12 00:40:42 +01:00
|
|
|
clientConfig.ClientSessionCache = NewLRUClientSessionCache(1)
|
2016-06-21 15:00:41 +01:00
|
|
|
conn, err := Dial("tcp", ln.Addr().String(), clientConfig)
|
2014-08-12 00:40:42 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(conn.ConnectionState().TLSUnique, <-serverTLSUniques) {
|
|
|
|
t.Error("client and server channel bindings differ")
|
|
|
|
}
|
|
|
|
conn.Close()
|
|
|
|
|
2016-06-21 15:00:41 +01:00
|
|
|
conn, err = Dial("tcp", ln.Addr().String(), clientConfig)
|
2014-08-12 00:40:42 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer conn.Close()
|
|
|
|
if !conn.ConnectionState().DidResume {
|
|
|
|
t.Error("second session did not use resumption")
|
|
|
|
}
|
|
|
|
if !bytes.Equal(conn.ConnectionState().TLSUnique, <-serverTLSUniques) {
|
|
|
|
t.Error("client and server channel bindings differ when session resumption is used")
|
|
|
|
}
|
|
|
|
}
|
2015-07-22 17:54:00 +01:00
|
|
|
|
|
|
|
func TestVerifyHostname(t *testing.T) {
|
|
|
|
c, err := Dial("tcp", "www.google.com:https", nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if err := c.VerifyHostname("www.google.com"); err != nil {
|
|
|
|
t.Fatalf("verify www.google.com: %v", err)
|
|
|
|
}
|
|
|
|
if err := c.VerifyHostname("www.yahoo.com"); err == nil {
|
|
|
|
t.Fatalf("verify www.yahoo.com succeeded")
|
|
|
|
}
|
|
|
|
|
|
|
|
c, err = Dial("tcp", "www.google.com:https", &Config{InsecureSkipVerify: true})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if err := c.VerifyHostname("www.google.com"); err == nil {
|
|
|
|
t.Fatalf("verify www.google.com succeeded with InsecureSkipVerify=true")
|
|
|
|
}
|
|
|
|
if err := c.VerifyHostname("www.yahoo.com"); err == nil {
|
|
|
|
t.Fatalf("verify www.google.com succeeded with InsecureSkipVerify=true")
|
|
|
|
}
|
|
|
|
}
|
2015-08-05 14:53:56 +01:00
|
|
|
|
|
|
|
func TestVerifyHostnameResumed(t *testing.T) {
|
|
|
|
config := &Config{
|
|
|
|
ClientSessionCache: NewLRUClientSessionCache(32),
|
|
|
|
}
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
c, err := Dial("tcp", "www.google.com:https", config)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Dial #%d: %v", i, err)
|
|
|
|
}
|
|
|
|
cs := c.ConnectionState()
|
|
|
|
if i > 0 && !cs.DidResume {
|
|
|
|
t.Fatalf("Subsequent connection unexpectedly didn't resume")
|
|
|
|
}
|
|
|
|
if cs.VerifiedChains == nil {
|
|
|
|
t.Fatalf("Dial #%d: cs.VerifiedChains == nil", i)
|
|
|
|
}
|
|
|
|
if err := c.VerifyHostname("www.google.com"); err != nil {
|
|
|
|
t.Fatalf("verify www.google.com #%d: %v", i, err)
|
|
|
|
}
|
|
|
|
c.Close()
|
|
|
|
}
|
|
|
|
}
|
2016-01-12 21:15:51 +00:00
|
|
|
|
|
|
|
func TestConnCloseBreakingWrite(t *testing.T) {
|
|
|
|
ln := newLocalListener(t)
|
|
|
|
defer ln.Close()
|
|
|
|
|
|
|
|
srvCh := make(chan *Conn, 1)
|
|
|
|
var serr error
|
|
|
|
var sconn net.Conn
|
|
|
|
go func() {
|
|
|
|
var err error
|
|
|
|
sconn, err = ln.Accept()
|
|
|
|
if err != nil {
|
|
|
|
serr = err
|
|
|
|
srvCh <- nil
|
|
|
|
return
|
|
|
|
}
|
2016-08-30 04:19:01 +01:00
|
|
|
serverConfig := testConfig.Clone()
|
2016-06-21 15:00:41 +01:00
|
|
|
srv := Server(sconn, serverConfig)
|
2016-01-12 21:15:51 +00:00
|
|
|
if err := srv.Handshake(); err != nil {
|
|
|
|
serr = fmt.Errorf("handshake: %v", err)
|
|
|
|
srvCh <- nil
|
|
|
|
return
|
|
|
|
}
|
|
|
|
srvCh <- srv
|
|
|
|
}()
|
|
|
|
|
|
|
|
cconn, err := net.Dial("tcp", ln.Addr().String())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer cconn.Close()
|
|
|
|
|
|
|
|
conn := &changeImplConn{
|
|
|
|
Conn: cconn,
|
|
|
|
}
|
|
|
|
|
2016-08-30 04:19:01 +01:00
|
|
|
clientConfig := testConfig.Clone()
|
2016-06-21 15:00:41 +01:00
|
|
|
tconn := Client(conn, clientConfig)
|
2016-01-12 21:15:51 +00:00
|
|
|
if err := tconn.Handshake(); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
srv := <-srvCh
|
|
|
|
if srv == nil {
|
|
|
|
t.Fatal(serr)
|
|
|
|
}
|
|
|
|
defer sconn.Close()
|
|
|
|
|
|
|
|
connClosed := make(chan struct{})
|
|
|
|
conn.closeFunc = func() error {
|
|
|
|
close(connClosed)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
inWrite := make(chan bool, 1)
|
|
|
|
var errConnClosed = errors.New("conn closed for test")
|
|
|
|
conn.writeFunc = func(p []byte) (n int, err error) {
|
|
|
|
inWrite <- true
|
|
|
|
<-connClosed
|
|
|
|
return 0, errConnClosed
|
|
|
|
}
|
|
|
|
|
|
|
|
closeReturned := make(chan bool, 1)
|
|
|
|
go func() {
|
|
|
|
<-inWrite
|
|
|
|
tconn.Close() // test that this doesn't block forever.
|
|
|
|
closeReturned <- true
|
|
|
|
}()
|
|
|
|
|
|
|
|
_, err = tconn.Write([]byte("foo"))
|
|
|
|
if err != errConnClosed {
|
|
|
|
t.Errorf("Write error = %v; want errConnClosed", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
<-closeReturned
|
|
|
|
if err := tconn.Close(); err != errClosed {
|
|
|
|
t.Errorf("Close error = %v; want errClosed", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-17 22:47:48 +01:00
|
|
|
func TestConnCloseWrite(t *testing.T) {
|
|
|
|
ln := newLocalListener(t)
|
|
|
|
defer ln.Close()
|
|
|
|
|
|
|
|
clientDoneChan := make(chan struct{})
|
|
|
|
|
|
|
|
serverCloseWrite := func() error {
|
|
|
|
sconn, err := ln.Accept()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("accept: %v", err)
|
|
|
|
}
|
|
|
|
defer sconn.Close()
|
|
|
|
|
|
|
|
serverConfig := testConfig.Clone()
|
|
|
|
srv := Server(sconn, serverConfig)
|
|
|
|
if err := srv.Handshake(); err != nil {
|
|
|
|
return fmt.Errorf("handshake: %v", err)
|
|
|
|
}
|
|
|
|
defer srv.Close()
|
|
|
|
|
|
|
|
data, err := ioutil.ReadAll(srv)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if len(data) > 0 {
|
|
|
|
return fmt.Errorf("Read data = %q; want nothing", data)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := srv.CloseWrite(); err != nil {
|
|
|
|
return fmt.Errorf("server CloseWrite: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for clientCloseWrite to finish, so we know we
|
|
|
|
// tested the CloseWrite before we defer the
|
|
|
|
// sconn.Close above, which would also cause the
|
|
|
|
// client to unblock like CloseWrite.
|
|
|
|
<-clientDoneChan
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
clientCloseWrite := func() error {
|
|
|
|
defer close(clientDoneChan)
|
|
|
|
|
|
|
|
clientConfig := testConfig.Clone()
|
|
|
|
conn, err := Dial("tcp", ln.Addr().String(), clientConfig)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := conn.Handshake(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer conn.Close()
|
|
|
|
|
|
|
|
if err := conn.CloseWrite(); err != nil {
|
|
|
|
return fmt.Errorf("client CloseWrite: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err := conn.Write([]byte{0}); err != errShutdown {
|
|
|
|
return fmt.Errorf("CloseWrite error = %v; want errShutdown", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
data, err := ioutil.ReadAll(conn)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if len(data) > 0 {
|
|
|
|
return fmt.Errorf("Read data = %q; want nothing", data)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
errChan := make(chan error, 2)
|
|
|
|
|
|
|
|
go func() { errChan <- serverCloseWrite() }()
|
|
|
|
go func() { errChan <- clientCloseWrite() }()
|
|
|
|
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
case <-time.After(10 * time.Second):
|
|
|
|
t.Fatal("deadlock")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Also test CloseWrite being called before the handshake is
|
|
|
|
// finished:
|
|
|
|
{
|
|
|
|
ln2 := newLocalListener(t)
|
|
|
|
defer ln2.Close()
|
|
|
|
|
|
|
|
netConn, err := net.Dial("tcp", ln2.Addr().String())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer netConn.Close()
|
|
|
|
conn := Client(netConn, testConfig.Clone())
|
|
|
|
|
|
|
|
if err := conn.CloseWrite(); err != errEarlyCloseWrite {
|
|
|
|
t.Errorf("CloseWrite error = %v; want errEarlyCloseWrite", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-01 18:43:57 +00:00
|
|
|
func TestCloneFuncFields(t *testing.T) {
|
|
|
|
const expectedCount = 5
|
|
|
|
called := 0
|
|
|
|
|
|
|
|
c1 := Config{
|
|
|
|
Time: func() time.Time {
|
|
|
|
called |= 1 << 0
|
|
|
|
return time.Time{}
|
|
|
|
},
|
|
|
|
GetCertificate: func(*ClientHelloInfo) (*Certificate, error) {
|
|
|
|
called |= 1 << 1
|
|
|
|
return nil, nil
|
|
|
|
},
|
|
|
|
GetClientCertificate: func(*CertificateRequestInfo) (*Certificate, error) {
|
|
|
|
called |= 1 << 2
|
|
|
|
return nil, nil
|
|
|
|
},
|
|
|
|
GetConfigForClient: func(*ClientHelloInfo) (*Config, error) {
|
|
|
|
called |= 1 << 3
|
|
|
|
return nil, nil
|
|
|
|
},
|
|
|
|
VerifyPeerCertificate: func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
|
|
|
|
called |= 1 << 4
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
c2 := c1.Clone()
|
|
|
|
|
|
|
|
c2.Time()
|
|
|
|
c2.GetCertificate(nil)
|
|
|
|
c2.GetClientCertificate(nil)
|
|
|
|
c2.GetConfigForClient(nil)
|
|
|
|
c2.VerifyPeerCertificate(nil, nil)
|
|
|
|
|
|
|
|
if called != (1<<expectedCount)-1 {
|
|
|
|
t.Fatalf("expected %d calls but saw calls %b", expectedCount, called)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCloneNonFuncFields(t *testing.T) {
|
2016-06-21 15:00:41 +01:00
|
|
|
var c1 Config
|
|
|
|
v := reflect.ValueOf(&c1).Elem()
|
|
|
|
|
|
|
|
typ := v.Type()
|
|
|
|
for i := 0; i < typ.NumField(); i++ {
|
|
|
|
f := v.Field(i)
|
|
|
|
if !f.CanSet() {
|
|
|
|
// unexported field; not cloned.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-03-01 18:43:57 +00:00
|
|
|
// testing/quick can't handle functions or interfaces and so
|
|
|
|
// isn't used here.
|
|
|
|
switch fn := typ.Field(i).Name; fn {
|
2016-06-21 15:00:41 +01:00
|
|
|
case "Rand":
|
|
|
|
f.Set(reflect.ValueOf(io.Reader(os.Stdin)))
|
2016-10-26 18:05:03 +01:00
|
|
|
case "Time", "GetCertificate", "GetConfigForClient", "VerifyPeerCertificate", "GetClientCertificate":
|
2017-03-01 18:43:57 +00:00
|
|
|
// DeepEqual can't compare functions. If you add a
|
|
|
|
// function field to this list, you must also change
|
|
|
|
// TestCloneFuncFields to ensure that the func field is
|
|
|
|
// cloned.
|
2016-06-21 15:00:41 +01:00
|
|
|
case "Certificates":
|
|
|
|
f.Set(reflect.ValueOf([]Certificate{
|
2016-06-28 00:41:37 +01:00
|
|
|
{Certificate: [][]byte{{'b'}}},
|
2016-06-21 15:00:41 +01:00
|
|
|
}))
|
|
|
|
case "NameToCertificate":
|
|
|
|
f.Set(reflect.ValueOf(map[string]*Certificate{"a": nil}))
|
|
|
|
case "RootCAs", "ClientCAs":
|
|
|
|
f.Set(reflect.ValueOf(x509.NewCertPool()))
|
|
|
|
case "ClientSessionCache":
|
|
|
|
f.Set(reflect.ValueOf(NewLRUClientSessionCache(10)))
|
2016-08-20 12:41:42 +01:00
|
|
|
case "KeyLogWriter":
|
|
|
|
f.Set(reflect.ValueOf(io.Writer(os.Stdout)))
|
2017-03-01 18:43:57 +00:00
|
|
|
case "NextProtos":
|
|
|
|
f.Set(reflect.ValueOf([]string{"a", "b"}))
|
|
|
|
case "ServerName":
|
|
|
|
f.Set(reflect.ValueOf("b"))
|
|
|
|
case "ClientAuth":
|
|
|
|
f.Set(reflect.ValueOf(VerifyClientCertIfGiven))
|
2016-11-25 21:46:50 +00:00
|
|
|
case "InsecureSkipVerify", "SessionTicketsDisabled", "DynamicRecordSizingDisabled", "PreferServerCipherSuites", "Accept0RTTData":
|
2017-03-01 18:43:57 +00:00
|
|
|
f.Set(reflect.ValueOf(true))
|
|
|
|
case "MinVersion", "MaxVersion":
|
|
|
|
f.Set(reflect.ValueOf(uint16(VersionTLS12)))
|
|
|
|
case "SessionTicketKey":
|
|
|
|
f.Set(reflect.ValueOf([32]byte{}))
|
|
|
|
case "CipherSuites":
|
2016-11-20 15:13:40 +00:00
|
|
|
case "TLS13CipherSuites":
|
2017-03-01 18:43:57 +00:00
|
|
|
f.Set(reflect.ValueOf([]uint16{1, 2}))
|
|
|
|
case "CurvePreferences":
|
|
|
|
f.Set(reflect.ValueOf([]CurveID{CurveP256}))
|
|
|
|
case "Renegotiation":
|
|
|
|
f.Set(reflect.ValueOf(RenegotiateOnceAsClient))
|
2016-11-25 21:46:50 +00:00
|
|
|
case "Max0RTTDataSize":
|
|
|
|
f.Set(reflect.ValueOf(uint32(0)))
|
2017-03-01 18:43:57 +00:00
|
|
|
default:
|
|
|
|
t.Errorf("all fields must be accounted for, but saw unknown field %q", fn)
|
2016-06-21 15:00:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-30 04:19:01 +01:00
|
|
|
c2 := c1.Clone()
|
2016-10-20 17:48:24 +01:00
|
|
|
// DeepEqual also compares unexported fields, thus c2 needs to have run
|
|
|
|
// serverInit in order to be DeepEqual to c1. Cloning it and discarding
|
|
|
|
// the result is sufficient.
|
|
|
|
c2.Clone()
|
2016-06-21 15:00:41 +01:00
|
|
|
|
|
|
|
if !reflect.DeepEqual(&c1, c2) {
|
|
|
|
t.Errorf("clone failed to copy a field")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-12 21:15:51 +00:00
|
|
|
// changeImplConn is a net.Conn which can change its Write and Close
|
|
|
|
// methods.
|
|
|
|
type changeImplConn struct {
|
|
|
|
net.Conn
|
|
|
|
writeFunc func([]byte) (int, error)
|
|
|
|
closeFunc func() error
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *changeImplConn) Write(p []byte) (n int, err error) {
|
|
|
|
if w.writeFunc != nil {
|
|
|
|
return w.writeFunc(p)
|
|
|
|
}
|
|
|
|
return w.Conn.Write(p)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *changeImplConn) Close() error {
|
|
|
|
if w.closeFunc != nil {
|
|
|
|
return w.closeFunc()
|
|
|
|
}
|
|
|
|
return w.Conn.Close()
|
|
|
|
}
|
crypto/tls: adjust dynamic record sizes to grow arithmetically
The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).
Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.
Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.
I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.
Comparing MaxPacket vs DynamicPacket benchmarks:
name maxpkt time/op dyn. time/op delta
Throughput/1MB-8 5.07ms ± 7% 5.21ms ± 7% +2.73% (p=0.023 n=16+16)
Throughput/2MB-8 15.7ms ±201% 8.4ms ± 5% ~ (p=0.604 n=20+16)
Throughput/4MB-8 14.3ms ± 1% 14.5ms ± 1% +1.53% (p=0.000 n=16+16)
Throughput/8MB-8 26.6ms ± 1% 26.8ms ± 1% +0.47% (p=0.003 n=19+18)
Throughput/16MB-8 51.0ms ± 1% 51.3ms ± 1% +0.47% (p=0.000 n=20+20)
Throughput/32MB-8 100ms ± 1% 100ms ± 1% +0.24% (p=0.033 n=20+20)
Throughput/64MB-8 197ms ± 0% 198ms ± 0% +0.56% (p=0.000 n=18+7)
The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.
Fixes #15713.
Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-05-27 14:50:06 +01:00
|
|
|
|
|
|
|
func throughput(b *testing.B, totalBytes int64, dynamicRecordSizingDisabled bool) {
|
|
|
|
ln := newLocalListener(b)
|
|
|
|
defer ln.Close()
|
|
|
|
|
2016-05-27 19:20:11 +01:00
|
|
|
N := b.N
|
|
|
|
|
2016-06-29 15:45:23 +01:00
|
|
|
// Less than 64KB because Windows appears to use a TCP rwin < 64KB.
|
|
|
|
// See Issue #15899.
|
|
|
|
const bufsize = 32 << 10
|
|
|
|
|
crypto/tls: adjust dynamic record sizes to grow arithmetically
The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).
Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.
Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.
I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.
Comparing MaxPacket vs DynamicPacket benchmarks:
name maxpkt time/op dyn. time/op delta
Throughput/1MB-8 5.07ms ± 7% 5.21ms ± 7% +2.73% (p=0.023 n=16+16)
Throughput/2MB-8 15.7ms ±201% 8.4ms ± 5% ~ (p=0.604 n=20+16)
Throughput/4MB-8 14.3ms ± 1% 14.5ms ± 1% +1.53% (p=0.000 n=16+16)
Throughput/8MB-8 26.6ms ± 1% 26.8ms ± 1% +0.47% (p=0.003 n=19+18)
Throughput/16MB-8 51.0ms ± 1% 51.3ms ± 1% +0.47% (p=0.000 n=20+20)
Throughput/32MB-8 100ms ± 1% 100ms ± 1% +0.24% (p=0.033 n=20+20)
Throughput/64MB-8 197ms ± 0% 198ms ± 0% +0.56% (p=0.000 n=18+7)
The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.
Fixes #15713.
Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-05-27 14:50:06 +01:00
|
|
|
go func() {
|
2016-06-29 15:45:23 +01:00
|
|
|
buf := make([]byte, bufsize)
|
2016-05-27 19:20:11 +01:00
|
|
|
for i := 0; i < N; i++ {
|
crypto/tls: adjust dynamic record sizes to grow arithmetically
The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).
Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.
Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.
I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.
Comparing MaxPacket vs DynamicPacket benchmarks:
name maxpkt time/op dyn. time/op delta
Throughput/1MB-8 5.07ms ± 7% 5.21ms ± 7% +2.73% (p=0.023 n=16+16)
Throughput/2MB-8 15.7ms ±201% 8.4ms ± 5% ~ (p=0.604 n=20+16)
Throughput/4MB-8 14.3ms ± 1% 14.5ms ± 1% +1.53% (p=0.000 n=16+16)
Throughput/8MB-8 26.6ms ± 1% 26.8ms ± 1% +0.47% (p=0.003 n=19+18)
Throughput/16MB-8 51.0ms ± 1% 51.3ms ± 1% +0.47% (p=0.000 n=20+20)
Throughput/32MB-8 100ms ± 1% 100ms ± 1% +0.24% (p=0.033 n=20+20)
Throughput/64MB-8 197ms ± 0% 198ms ± 0% +0.56% (p=0.000 n=18+7)
The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.
Fixes #15713.
Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-05-27 14:50:06 +01:00
|
|
|
sconn, err := ln.Accept()
|
|
|
|
if err != nil {
|
2016-05-28 03:47:55 +01:00
|
|
|
// panic rather than synchronize to avoid benchmark overhead
|
|
|
|
// (cannot call b.Fatal in goroutine)
|
|
|
|
panic(fmt.Errorf("accept: %v", err))
|
crypto/tls: adjust dynamic record sizes to grow arithmetically
The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).
Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.
Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.
I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.
Comparing MaxPacket vs DynamicPacket benchmarks:
name maxpkt time/op dyn. time/op delta
Throughput/1MB-8 5.07ms ± 7% 5.21ms ± 7% +2.73% (p=0.023 n=16+16)
Throughput/2MB-8 15.7ms ±201% 8.4ms ± 5% ~ (p=0.604 n=20+16)
Throughput/4MB-8 14.3ms ± 1% 14.5ms ± 1% +1.53% (p=0.000 n=16+16)
Throughput/8MB-8 26.6ms ± 1% 26.8ms ± 1% +0.47% (p=0.003 n=19+18)
Throughput/16MB-8 51.0ms ± 1% 51.3ms ± 1% +0.47% (p=0.000 n=20+20)
Throughput/32MB-8 100ms ± 1% 100ms ± 1% +0.24% (p=0.033 n=20+20)
Throughput/64MB-8 197ms ± 0% 198ms ± 0% +0.56% (p=0.000 n=18+7)
The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.
Fixes #15713.
Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-05-27 14:50:06 +01:00
|
|
|
}
|
2016-08-30 04:19:01 +01:00
|
|
|
serverConfig := testConfig.Clone()
|
2016-11-07 20:40:48 +00:00
|
|
|
serverConfig.CipherSuites = nil // the defaults may prefer faster ciphers
|
crypto/tls: adjust dynamic record sizes to grow arithmetically
The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).
Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.
Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.
I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.
Comparing MaxPacket vs DynamicPacket benchmarks:
name maxpkt time/op dyn. time/op delta
Throughput/1MB-8 5.07ms ± 7% 5.21ms ± 7% +2.73% (p=0.023 n=16+16)
Throughput/2MB-8 15.7ms ±201% 8.4ms ± 5% ~ (p=0.604 n=20+16)
Throughput/4MB-8 14.3ms ± 1% 14.5ms ± 1% +1.53% (p=0.000 n=16+16)
Throughput/8MB-8 26.6ms ± 1% 26.8ms ± 1% +0.47% (p=0.003 n=19+18)
Throughput/16MB-8 51.0ms ± 1% 51.3ms ± 1% +0.47% (p=0.000 n=20+20)
Throughput/32MB-8 100ms ± 1% 100ms ± 1% +0.24% (p=0.033 n=20+20)
Throughput/64MB-8 197ms ± 0% 198ms ± 0% +0.56% (p=0.000 n=18+7)
The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.
Fixes #15713.
Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-05-27 14:50:06 +01:00
|
|
|
serverConfig.DynamicRecordSizingDisabled = dynamicRecordSizingDisabled
|
2016-06-21 15:00:41 +01:00
|
|
|
srv := Server(sconn, serverConfig)
|
crypto/tls: adjust dynamic record sizes to grow arithmetically
The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).
Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.
Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.
I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.
Comparing MaxPacket vs DynamicPacket benchmarks:
name maxpkt time/op dyn. time/op delta
Throughput/1MB-8 5.07ms ± 7% 5.21ms ± 7% +2.73% (p=0.023 n=16+16)
Throughput/2MB-8 15.7ms ±201% 8.4ms ± 5% ~ (p=0.604 n=20+16)
Throughput/4MB-8 14.3ms ± 1% 14.5ms ± 1% +1.53% (p=0.000 n=16+16)
Throughput/8MB-8 26.6ms ± 1% 26.8ms ± 1% +0.47% (p=0.003 n=19+18)
Throughput/16MB-8 51.0ms ± 1% 51.3ms ± 1% +0.47% (p=0.000 n=20+20)
Throughput/32MB-8 100ms ± 1% 100ms ± 1% +0.24% (p=0.033 n=20+20)
Throughput/64MB-8 197ms ± 0% 198ms ± 0% +0.56% (p=0.000 n=18+7)
The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.
Fixes #15713.
Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-05-27 14:50:06 +01:00
|
|
|
if err := srv.Handshake(); err != nil {
|
2016-05-28 03:47:55 +01:00
|
|
|
panic(fmt.Errorf("handshake: %v", err))
|
crypto/tls: adjust dynamic record sizes to grow arithmetically
The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).
Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.
Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.
I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.
Comparing MaxPacket vs DynamicPacket benchmarks:
name maxpkt time/op dyn. time/op delta
Throughput/1MB-8 5.07ms ± 7% 5.21ms ± 7% +2.73% (p=0.023 n=16+16)
Throughput/2MB-8 15.7ms ±201% 8.4ms ± 5% ~ (p=0.604 n=20+16)
Throughput/4MB-8 14.3ms ± 1% 14.5ms ± 1% +1.53% (p=0.000 n=16+16)
Throughput/8MB-8 26.6ms ± 1% 26.8ms ± 1% +0.47% (p=0.003 n=19+18)
Throughput/16MB-8 51.0ms ± 1% 51.3ms ± 1% +0.47% (p=0.000 n=20+20)
Throughput/32MB-8 100ms ± 1% 100ms ± 1% +0.24% (p=0.033 n=20+20)
Throughput/64MB-8 197ms ± 0% 198ms ± 0% +0.56% (p=0.000 n=18+7)
The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.
Fixes #15713.
Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-05-27 14:50:06 +01:00
|
|
|
}
|
2016-06-29 15:45:23 +01:00
|
|
|
if _, err := io.CopyBuffer(srv, srv, buf); err != nil {
|
|
|
|
panic(fmt.Errorf("copy buffer: %v", err))
|
|
|
|
}
|
crypto/tls: adjust dynamic record sizes to grow arithmetically
The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).
Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.
Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.
I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.
Comparing MaxPacket vs DynamicPacket benchmarks:
name maxpkt time/op dyn. time/op delta
Throughput/1MB-8 5.07ms ± 7% 5.21ms ± 7% +2.73% (p=0.023 n=16+16)
Throughput/2MB-8 15.7ms ±201% 8.4ms ± 5% ~ (p=0.604 n=20+16)
Throughput/4MB-8 14.3ms ± 1% 14.5ms ± 1% +1.53% (p=0.000 n=16+16)
Throughput/8MB-8 26.6ms ± 1% 26.8ms ± 1% +0.47% (p=0.003 n=19+18)
Throughput/16MB-8 51.0ms ± 1% 51.3ms ± 1% +0.47% (p=0.000 n=20+20)
Throughput/32MB-8 100ms ± 1% 100ms ± 1% +0.24% (p=0.033 n=20+20)
Throughput/64MB-8 197ms ± 0% 198ms ± 0% +0.56% (p=0.000 n=18+7)
The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.
Fixes #15713.
Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-05-27 14:50:06 +01:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
b.SetBytes(totalBytes)
|
2016-08-30 04:19:01 +01:00
|
|
|
clientConfig := testConfig.Clone()
|
2016-11-07 20:40:48 +00:00
|
|
|
clientConfig.CipherSuites = nil // the defaults may prefer faster ciphers
|
crypto/tls: adjust dynamic record sizes to grow arithmetically
The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).
Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.
Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.
I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.
Comparing MaxPacket vs DynamicPacket benchmarks:
name maxpkt time/op dyn. time/op delta
Throughput/1MB-8 5.07ms ± 7% 5.21ms ± 7% +2.73% (p=0.023 n=16+16)
Throughput/2MB-8 15.7ms ±201% 8.4ms ± 5% ~ (p=0.604 n=20+16)
Throughput/4MB-8 14.3ms ± 1% 14.5ms ± 1% +1.53% (p=0.000 n=16+16)
Throughput/8MB-8 26.6ms ± 1% 26.8ms ± 1% +0.47% (p=0.003 n=19+18)
Throughput/16MB-8 51.0ms ± 1% 51.3ms ± 1% +0.47% (p=0.000 n=20+20)
Throughput/32MB-8 100ms ± 1% 100ms ± 1% +0.24% (p=0.033 n=20+20)
Throughput/64MB-8 197ms ± 0% 198ms ± 0% +0.56% (p=0.000 n=18+7)
The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.
Fixes #15713.
Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-05-27 14:50:06 +01:00
|
|
|
clientConfig.DynamicRecordSizingDisabled = dynamicRecordSizingDisabled
|
|
|
|
|
2016-06-29 15:45:23 +01:00
|
|
|
buf := make([]byte, bufsize)
|
crypto/tls: adjust dynamic record sizes to grow arithmetically
The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).
Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.
Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.
I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.
Comparing MaxPacket vs DynamicPacket benchmarks:
name maxpkt time/op dyn. time/op delta
Throughput/1MB-8 5.07ms ± 7% 5.21ms ± 7% +2.73% (p=0.023 n=16+16)
Throughput/2MB-8 15.7ms ±201% 8.4ms ± 5% ~ (p=0.604 n=20+16)
Throughput/4MB-8 14.3ms ± 1% 14.5ms ± 1% +1.53% (p=0.000 n=16+16)
Throughput/8MB-8 26.6ms ± 1% 26.8ms ± 1% +0.47% (p=0.003 n=19+18)
Throughput/16MB-8 51.0ms ± 1% 51.3ms ± 1% +0.47% (p=0.000 n=20+20)
Throughput/32MB-8 100ms ± 1% 100ms ± 1% +0.24% (p=0.033 n=20+20)
Throughput/64MB-8 197ms ± 0% 198ms ± 0% +0.56% (p=0.000 n=18+7)
The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.
Fixes #15713.
Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-05-27 14:50:06 +01:00
|
|
|
chunks := int(math.Ceil(float64(totalBytes) / float64(len(buf))))
|
2016-05-27 19:20:11 +01:00
|
|
|
for i := 0; i < N; i++ {
|
2016-06-21 15:00:41 +01:00
|
|
|
conn, err := Dial("tcp", ln.Addr().String(), clientConfig)
|
crypto/tls: adjust dynamic record sizes to grow arithmetically
The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).
Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.
Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.
I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.
Comparing MaxPacket vs DynamicPacket benchmarks:
name maxpkt time/op dyn. time/op delta
Throughput/1MB-8 5.07ms ± 7% 5.21ms ± 7% +2.73% (p=0.023 n=16+16)
Throughput/2MB-8 15.7ms ±201% 8.4ms ± 5% ~ (p=0.604 n=20+16)
Throughput/4MB-8 14.3ms ± 1% 14.5ms ± 1% +1.53% (p=0.000 n=16+16)
Throughput/8MB-8 26.6ms ± 1% 26.8ms ± 1% +0.47% (p=0.003 n=19+18)
Throughput/16MB-8 51.0ms ± 1% 51.3ms ± 1% +0.47% (p=0.000 n=20+20)
Throughput/32MB-8 100ms ± 1% 100ms ± 1% +0.24% (p=0.033 n=20+20)
Throughput/64MB-8 197ms ± 0% 198ms ± 0% +0.56% (p=0.000 n=18+7)
The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.
Fixes #15713.
Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-05-27 14:50:06 +01:00
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
for j := 0; j < chunks; j++ {
|
|
|
|
_, err := conn.Write(buf)
|
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
_, err = io.ReadFull(conn, buf)
|
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
conn.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkThroughput(b *testing.B) {
|
|
|
|
for _, mode := range []string{"Max", "Dynamic"} {
|
2016-05-27 19:25:16 +01:00
|
|
|
for size := 1; size <= 64; size <<= 1 {
|
crypto/tls: adjust dynamic record sizes to grow arithmetically
The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).
Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.
Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.
I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.
Comparing MaxPacket vs DynamicPacket benchmarks:
name maxpkt time/op dyn. time/op delta
Throughput/1MB-8 5.07ms ± 7% 5.21ms ± 7% +2.73% (p=0.023 n=16+16)
Throughput/2MB-8 15.7ms ±201% 8.4ms ± 5% ~ (p=0.604 n=20+16)
Throughput/4MB-8 14.3ms ± 1% 14.5ms ± 1% +1.53% (p=0.000 n=16+16)
Throughput/8MB-8 26.6ms ± 1% 26.8ms ± 1% +0.47% (p=0.003 n=19+18)
Throughput/16MB-8 51.0ms ± 1% 51.3ms ± 1% +0.47% (p=0.000 n=20+20)
Throughput/32MB-8 100ms ± 1% 100ms ± 1% +0.24% (p=0.033 n=20+20)
Throughput/64MB-8 197ms ± 0% 198ms ± 0% +0.56% (p=0.000 n=18+7)
The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.
Fixes #15713.
Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-05-27 14:50:06 +01:00
|
|
|
name := fmt.Sprintf("%sPacket/%dMB", mode, size)
|
|
|
|
b.Run(name, func(b *testing.B) {
|
|
|
|
throughput(b, int64(size<<20), mode == "Max")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type slowConn struct {
|
|
|
|
net.Conn
|
|
|
|
bps int
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *slowConn) Write(p []byte) (int, error) {
|
|
|
|
if c.bps == 0 {
|
|
|
|
panic("too slow")
|
|
|
|
}
|
|
|
|
t0 := time.Now()
|
|
|
|
wrote := 0
|
|
|
|
for wrote < len(p) {
|
2016-05-27 19:25:16 +01:00
|
|
|
time.Sleep(100 * time.Microsecond)
|
|
|
|
allowed := int(time.Since(t0).Seconds()*float64(c.bps)) / 8
|
crypto/tls: adjust dynamic record sizes to grow arithmetically
The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).
Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.
Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.
I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.
Comparing MaxPacket vs DynamicPacket benchmarks:
name maxpkt time/op dyn. time/op delta
Throughput/1MB-8 5.07ms ± 7% 5.21ms ± 7% +2.73% (p=0.023 n=16+16)
Throughput/2MB-8 15.7ms ±201% 8.4ms ± 5% ~ (p=0.604 n=20+16)
Throughput/4MB-8 14.3ms ± 1% 14.5ms ± 1% +1.53% (p=0.000 n=16+16)
Throughput/8MB-8 26.6ms ± 1% 26.8ms ± 1% +0.47% (p=0.003 n=19+18)
Throughput/16MB-8 51.0ms ± 1% 51.3ms ± 1% +0.47% (p=0.000 n=20+20)
Throughput/32MB-8 100ms ± 1% 100ms ± 1% +0.24% (p=0.033 n=20+20)
Throughput/64MB-8 197ms ± 0% 198ms ± 0% +0.56% (p=0.000 n=18+7)
The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.
Fixes #15713.
Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-05-27 14:50:06 +01:00
|
|
|
if allowed > len(p) {
|
|
|
|
allowed = len(p)
|
|
|
|
}
|
|
|
|
if wrote < allowed {
|
|
|
|
n, err := c.Conn.Write(p[wrote:allowed])
|
|
|
|
wrote += n
|
|
|
|
if err != nil {
|
|
|
|
return wrote, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return len(p), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func latency(b *testing.B, bps int, dynamicRecordSizingDisabled bool) {
|
|
|
|
ln := newLocalListener(b)
|
|
|
|
defer ln.Close()
|
|
|
|
|
2016-05-27 19:20:11 +01:00
|
|
|
N := b.N
|
|
|
|
|
crypto/tls: adjust dynamic record sizes to grow arithmetically
The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).
Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.
Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.
I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.
Comparing MaxPacket vs DynamicPacket benchmarks:
name maxpkt time/op dyn. time/op delta
Throughput/1MB-8 5.07ms ± 7% 5.21ms ± 7% +2.73% (p=0.023 n=16+16)
Throughput/2MB-8 15.7ms ±201% 8.4ms ± 5% ~ (p=0.604 n=20+16)
Throughput/4MB-8 14.3ms ± 1% 14.5ms ± 1% +1.53% (p=0.000 n=16+16)
Throughput/8MB-8 26.6ms ± 1% 26.8ms ± 1% +0.47% (p=0.003 n=19+18)
Throughput/16MB-8 51.0ms ± 1% 51.3ms ± 1% +0.47% (p=0.000 n=20+20)
Throughput/32MB-8 100ms ± 1% 100ms ± 1% +0.24% (p=0.033 n=20+20)
Throughput/64MB-8 197ms ± 0% 198ms ± 0% +0.56% (p=0.000 n=18+7)
The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.
Fixes #15713.
Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-05-27 14:50:06 +01:00
|
|
|
go func() {
|
2016-05-27 19:20:11 +01:00
|
|
|
for i := 0; i < N; i++ {
|
crypto/tls: adjust dynamic record sizes to grow arithmetically
The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).
Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.
Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.
I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.
Comparing MaxPacket vs DynamicPacket benchmarks:
name maxpkt time/op dyn. time/op delta
Throughput/1MB-8 5.07ms ± 7% 5.21ms ± 7% +2.73% (p=0.023 n=16+16)
Throughput/2MB-8 15.7ms ±201% 8.4ms ± 5% ~ (p=0.604 n=20+16)
Throughput/4MB-8 14.3ms ± 1% 14.5ms ± 1% +1.53% (p=0.000 n=16+16)
Throughput/8MB-8 26.6ms ± 1% 26.8ms ± 1% +0.47% (p=0.003 n=19+18)
Throughput/16MB-8 51.0ms ± 1% 51.3ms ± 1% +0.47% (p=0.000 n=20+20)
Throughput/32MB-8 100ms ± 1% 100ms ± 1% +0.24% (p=0.033 n=20+20)
Throughput/64MB-8 197ms ± 0% 198ms ± 0% +0.56% (p=0.000 n=18+7)
The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.
Fixes #15713.
Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-05-27 14:50:06 +01:00
|
|
|
sconn, err := ln.Accept()
|
|
|
|
if err != nil {
|
2016-05-28 03:47:55 +01:00
|
|
|
// panic rather than synchronize to avoid benchmark overhead
|
|
|
|
// (cannot call b.Fatal in goroutine)
|
|
|
|
panic(fmt.Errorf("accept: %v", err))
|
crypto/tls: adjust dynamic record sizes to grow arithmetically
The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).
Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.
Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.
I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.
Comparing MaxPacket vs DynamicPacket benchmarks:
name maxpkt time/op dyn. time/op delta
Throughput/1MB-8 5.07ms ± 7% 5.21ms ± 7% +2.73% (p=0.023 n=16+16)
Throughput/2MB-8 15.7ms ±201% 8.4ms ± 5% ~ (p=0.604 n=20+16)
Throughput/4MB-8 14.3ms ± 1% 14.5ms ± 1% +1.53% (p=0.000 n=16+16)
Throughput/8MB-8 26.6ms ± 1% 26.8ms ± 1% +0.47% (p=0.003 n=19+18)
Throughput/16MB-8 51.0ms ± 1% 51.3ms ± 1% +0.47% (p=0.000 n=20+20)
Throughput/32MB-8 100ms ± 1% 100ms ± 1% +0.24% (p=0.033 n=20+20)
Throughput/64MB-8 197ms ± 0% 198ms ± 0% +0.56% (p=0.000 n=18+7)
The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.
Fixes #15713.
Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-05-27 14:50:06 +01:00
|
|
|
}
|
2016-08-30 04:19:01 +01:00
|
|
|
serverConfig := testConfig.Clone()
|
crypto/tls: adjust dynamic record sizes to grow arithmetically
The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).
Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.
Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.
I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.
Comparing MaxPacket vs DynamicPacket benchmarks:
name maxpkt time/op dyn. time/op delta
Throughput/1MB-8 5.07ms ± 7% 5.21ms ± 7% +2.73% (p=0.023 n=16+16)
Throughput/2MB-8 15.7ms ±201% 8.4ms ± 5% ~ (p=0.604 n=20+16)
Throughput/4MB-8 14.3ms ± 1% 14.5ms ± 1% +1.53% (p=0.000 n=16+16)
Throughput/8MB-8 26.6ms ± 1% 26.8ms ± 1% +0.47% (p=0.003 n=19+18)
Throughput/16MB-8 51.0ms ± 1% 51.3ms ± 1% +0.47% (p=0.000 n=20+20)
Throughput/32MB-8 100ms ± 1% 100ms ± 1% +0.24% (p=0.033 n=20+20)
Throughput/64MB-8 197ms ± 0% 198ms ± 0% +0.56% (p=0.000 n=18+7)
The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.
Fixes #15713.
Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-05-27 14:50:06 +01:00
|
|
|
serverConfig.DynamicRecordSizingDisabled = dynamicRecordSizingDisabled
|
2016-06-21 15:00:41 +01:00
|
|
|
srv := Server(&slowConn{sconn, bps}, serverConfig)
|
crypto/tls: adjust dynamic record sizes to grow arithmetically
The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).
Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.
Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.
I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.
Comparing MaxPacket vs DynamicPacket benchmarks:
name maxpkt time/op dyn. time/op delta
Throughput/1MB-8 5.07ms ± 7% 5.21ms ± 7% +2.73% (p=0.023 n=16+16)
Throughput/2MB-8 15.7ms ±201% 8.4ms ± 5% ~ (p=0.604 n=20+16)
Throughput/4MB-8 14.3ms ± 1% 14.5ms ± 1% +1.53% (p=0.000 n=16+16)
Throughput/8MB-8 26.6ms ± 1% 26.8ms ± 1% +0.47% (p=0.003 n=19+18)
Throughput/16MB-8 51.0ms ± 1% 51.3ms ± 1% +0.47% (p=0.000 n=20+20)
Throughput/32MB-8 100ms ± 1% 100ms ± 1% +0.24% (p=0.033 n=20+20)
Throughput/64MB-8 197ms ± 0% 198ms ± 0% +0.56% (p=0.000 n=18+7)
The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.
Fixes #15713.
Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-05-27 14:50:06 +01:00
|
|
|
if err := srv.Handshake(); err != nil {
|
2016-05-28 03:47:55 +01:00
|
|
|
panic(fmt.Errorf("handshake: %v", err))
|
crypto/tls: adjust dynamic record sizes to grow arithmetically
The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).
Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.
Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.
I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.
Comparing MaxPacket vs DynamicPacket benchmarks:
name maxpkt time/op dyn. time/op delta
Throughput/1MB-8 5.07ms ± 7% 5.21ms ± 7% +2.73% (p=0.023 n=16+16)
Throughput/2MB-8 15.7ms ±201% 8.4ms ± 5% ~ (p=0.604 n=20+16)
Throughput/4MB-8 14.3ms ± 1% 14.5ms ± 1% +1.53% (p=0.000 n=16+16)
Throughput/8MB-8 26.6ms ± 1% 26.8ms ± 1% +0.47% (p=0.003 n=19+18)
Throughput/16MB-8 51.0ms ± 1% 51.3ms ± 1% +0.47% (p=0.000 n=20+20)
Throughput/32MB-8 100ms ± 1% 100ms ± 1% +0.24% (p=0.033 n=20+20)
Throughput/64MB-8 197ms ± 0% 198ms ± 0% +0.56% (p=0.000 n=18+7)
The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.
Fixes #15713.
Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-05-27 14:50:06 +01:00
|
|
|
}
|
|
|
|
io.Copy(srv, srv)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2016-08-30 04:19:01 +01:00
|
|
|
clientConfig := testConfig.Clone()
|
crypto/tls: adjust dynamic record sizes to grow arithmetically
The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).
Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.
Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.
I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.
Comparing MaxPacket vs DynamicPacket benchmarks:
name maxpkt time/op dyn. time/op delta
Throughput/1MB-8 5.07ms ± 7% 5.21ms ± 7% +2.73% (p=0.023 n=16+16)
Throughput/2MB-8 15.7ms ±201% 8.4ms ± 5% ~ (p=0.604 n=20+16)
Throughput/4MB-8 14.3ms ± 1% 14.5ms ± 1% +1.53% (p=0.000 n=16+16)
Throughput/8MB-8 26.6ms ± 1% 26.8ms ± 1% +0.47% (p=0.003 n=19+18)
Throughput/16MB-8 51.0ms ± 1% 51.3ms ± 1% +0.47% (p=0.000 n=20+20)
Throughput/32MB-8 100ms ± 1% 100ms ± 1% +0.24% (p=0.033 n=20+20)
Throughput/64MB-8 197ms ± 0% 198ms ± 0% +0.56% (p=0.000 n=18+7)
The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.
Fixes #15713.
Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-05-27 14:50:06 +01:00
|
|
|
clientConfig.DynamicRecordSizingDisabled = dynamicRecordSizingDisabled
|
|
|
|
|
|
|
|
buf := make([]byte, 16384)
|
|
|
|
peek := make([]byte, 1)
|
|
|
|
|
2016-05-27 19:20:11 +01:00
|
|
|
for i := 0; i < N; i++ {
|
2016-06-21 15:00:41 +01:00
|
|
|
conn, err := Dial("tcp", ln.Addr().String(), clientConfig)
|
crypto/tls: adjust dynamic record sizes to grow arithmetically
The current code, introduced after Go 1.6 to improve latency on
low-bandwidth connections, sends 1 kB packets until 1 MB has been sent,
and then sends 16 kB packets (the maximum record size).
Unfortunately this decreases throughput for 1-16 MB responses by 20% or so.
Following discussion on #15713, change cutoff to 128 kB sent
and also grow the size allowed for successive packets:
1 kB, 2 kB, 3 kB, ..., 15 kB, 16 kB.
This fixes the throughput problems: the overhead is now closer to 2%.
I hope this still helps with latency but I don't have a great way to test it.
At the least, it's not worse than Go 1.6.
Comparing MaxPacket vs DynamicPacket benchmarks:
name maxpkt time/op dyn. time/op delta
Throughput/1MB-8 5.07ms ± 7% 5.21ms ± 7% +2.73% (p=0.023 n=16+16)
Throughput/2MB-8 15.7ms ±201% 8.4ms ± 5% ~ (p=0.604 n=20+16)
Throughput/4MB-8 14.3ms ± 1% 14.5ms ± 1% +1.53% (p=0.000 n=16+16)
Throughput/8MB-8 26.6ms ± 1% 26.8ms ± 1% +0.47% (p=0.003 n=19+18)
Throughput/16MB-8 51.0ms ± 1% 51.3ms ± 1% +0.47% (p=0.000 n=20+20)
Throughput/32MB-8 100ms ± 1% 100ms ± 1% +0.24% (p=0.033 n=20+20)
Throughput/64MB-8 197ms ± 0% 198ms ± 0% +0.56% (p=0.000 n=18+7)
The small MB runs are bimodal in both cases, probably GC pauses.
But there's clearly no general slowdown anymore.
Fixes #15713.
Change-Id: I5fc44680ba71812d24baac142bceee0e23f2e382
Reviewed-on: https://go-review.googlesource.com/23487
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-05-27 14:50:06 +01:00
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
// make sure we're connected and previous connection has stopped
|
|
|
|
if _, err := conn.Write(buf[:1]); err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
if _, err := io.ReadFull(conn, peek); err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
if _, err := conn.Write(buf); err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
if _, err = io.ReadFull(conn, peek); err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
conn.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkLatency(b *testing.B) {
|
|
|
|
for _, mode := range []string{"Max", "Dynamic"} {
|
|
|
|
for _, kbps := range []int{200, 500, 1000, 2000, 5000} {
|
|
|
|
name := fmt.Sprintf("%sPacket/%dkbps", mode, kbps)
|
|
|
|
b.Run(name, func(b *testing.B) {
|
|
|
|
latency(b, kbps*1000, mode == "Max")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|