2014-06-20 20:00:00 +01:00
|
|
|
// Copyright 2010 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
// TLS low level connection and record layer
|
|
|
|
|
2015-09-29 23:21:04 +01:00
|
|
|
package runner
|
2014-06-20 20:00:00 +01:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"crypto/cipher"
|
2014-08-24 06:44:23 +01:00
|
|
|
"crypto/ecdsa"
|
2014-06-20 20:00:00 +01:00
|
|
|
"crypto/subtle"
|
|
|
|
"crypto/x509"
|
2015-07-25 23:29:23 +01:00
|
|
|
"encoding/binary"
|
2014-06-20 20:00:00 +01:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"net"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
)
|
|
|
|
|
|
|
|
// A Conn represents a secured connection.
|
|
|
|
// It implements the net.Conn interface.
|
|
|
|
type Conn struct {
|
|
|
|
// constant
|
|
|
|
conn net.Conn
|
2014-08-04 06:23:53 +01:00
|
|
|
isDTLS bool
|
2014-06-20 20:00:00 +01:00
|
|
|
isClient bool
|
|
|
|
|
|
|
|
// constant after handshake; protected by handshakeMutex
|
2014-10-11 00:23:43 +01:00
|
|
|
handshakeMutex sync.Mutex // handshakeMutex < in.Mutex, out.Mutex, errMutex
|
|
|
|
handshakeErr error // error resulting from handshake
|
|
|
|
vers uint16 // TLS version
|
|
|
|
haveVers bool // version has been negotiated
|
|
|
|
config *Config // configuration passed to constructor
|
|
|
|
handshakeComplete bool
|
|
|
|
didResume bool // whether this connection was a session resumption
|
|
|
|
extendedMasterSecret bool // whether this session used an extended master secret
|
2015-04-03 09:06:36 +01:00
|
|
|
cipherSuite *cipherSuite
|
2014-10-11 00:23:43 +01:00
|
|
|
ocspResponse []byte // stapled OCSP response
|
2015-09-09 13:44:55 +01:00
|
|
|
sctList []byte // signed certificate timestamp list
|
2014-10-11 00:23:43 +01:00
|
|
|
peerCertificates []*x509.Certificate
|
2014-06-20 20:00:00 +01:00
|
|
|
// verifiedChains contains the certificate chains that we built, as
|
|
|
|
// opposed to the ones presented by the server.
|
|
|
|
verifiedChains [][]*x509.Certificate
|
|
|
|
// serverName contains the server name indicated by the client, if any.
|
2015-06-03 17:57:23 +01:00
|
|
|
serverName string
|
|
|
|
// firstFinished contains the first Finished hash sent during the
|
|
|
|
// handshake. This is the "tls-unique" channel binding value.
|
|
|
|
firstFinished [12]byte
|
2016-06-21 23:19:24 +01:00
|
|
|
// peerSignatureAlgorithm contains the signature algorithm that was used
|
|
|
|
// by the peer in the handshake, or zero if not applicable.
|
|
|
|
peerSignatureAlgorithm signatureAlgorithm
|
2015-06-03 17:57:23 +01:00
|
|
|
|
2015-04-03 09:06:36 +01:00
|
|
|
clientRandom, serverRandom [32]byte
|
|
|
|
masterSecret [48]byte
|
2014-06-20 20:00:00 +01:00
|
|
|
|
|
|
|
clientProtocol string
|
|
|
|
clientProtocolFallback bool
|
2014-09-06 18:21:53 +01:00
|
|
|
usedALPN bool
|
2014-06-20 20:00:00 +01:00
|
|
|
|
2014-10-29 00:29:33 +00:00
|
|
|
// verify_data values for the renegotiation extension.
|
|
|
|
clientVerify []byte
|
|
|
|
serverVerify []byte
|
|
|
|
|
2014-08-24 06:44:23 +01:00
|
|
|
channelID *ecdsa.PublicKey
|
|
|
|
|
2014-11-16 00:06:08 +00:00
|
|
|
srtpProtectionProfile uint16
|
|
|
|
|
2014-11-23 17:11:01 +00:00
|
|
|
clientVersion uint16
|
|
|
|
|
2014-06-20 20:00:00 +01:00
|
|
|
// input/output
|
|
|
|
in, out halfConn // in.Mutex < out.Mutex
|
|
|
|
rawInput *block // raw input, right off the wire
|
2014-08-04 06:23:53 +01:00
|
|
|
input *block // application record waiting to be read
|
|
|
|
hand bytes.Buffer // handshake record waiting to be read
|
|
|
|
|
2016-07-07 20:33:25 +01:00
|
|
|
// pendingFlight, if PackHandshakeFlight is enabled, is the buffer of
|
|
|
|
// handshake data to be split into records at the end of the flight.
|
|
|
|
pendingFlight bytes.Buffer
|
|
|
|
|
2014-08-04 06:23:53 +01:00
|
|
|
// DTLS state
|
|
|
|
sendHandshakeSeq uint16
|
|
|
|
recvHandshakeSeq uint16
|
2015-01-31 22:16:01 +00:00
|
|
|
handMsg []byte // pending assembled handshake message
|
|
|
|
handMsgLen int // handshake message length, not including the header
|
|
|
|
pendingFragments [][]byte // pending outgoing handshake fragments.
|
2014-06-20 20:00:00 +01:00
|
|
|
|
|
|
|
tmp [16]byte
|
|
|
|
}
|
|
|
|
|
2014-11-07 06:48:35 +00:00
|
|
|
func (c *Conn) init() {
|
|
|
|
c.in.isDTLS = c.isDTLS
|
|
|
|
c.out.isDTLS = c.isDTLS
|
|
|
|
c.in.config = c.config
|
|
|
|
c.out.config = c.config
|
2015-07-25 23:29:23 +01:00
|
|
|
|
|
|
|
c.out.updateOutSeq()
|
2014-11-07 06:48:35 +00:00
|
|
|
}
|
|
|
|
|
2014-06-20 20:00:00 +01:00
|
|
|
// Access to net.Conn methods.
|
|
|
|
// Cannot just embed net.Conn because that would
|
|
|
|
// export the struct field too.
|
|
|
|
|
|
|
|
// LocalAddr returns the local network address.
|
|
|
|
func (c *Conn) LocalAddr() net.Addr {
|
|
|
|
return c.conn.LocalAddr()
|
|
|
|
}
|
|
|
|
|
|
|
|
// RemoteAddr returns the remote network address.
|
|
|
|
func (c *Conn) RemoteAddr() net.Addr {
|
|
|
|
return c.conn.RemoteAddr()
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetDeadline sets the read and write deadlines associated with the connection.
|
|
|
|
// A zero value for t means Read and Write will not time out.
|
|
|
|
// After a Write has timed out, the TLS state is corrupt and all future writes will return the same error.
|
|
|
|
func (c *Conn) SetDeadline(t time.Time) error {
|
|
|
|
return c.conn.SetDeadline(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetReadDeadline sets the read deadline on the underlying connection.
|
|
|
|
// A zero value for t means Read will not time out.
|
|
|
|
func (c *Conn) SetReadDeadline(t time.Time) error {
|
|
|
|
return c.conn.SetReadDeadline(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetWriteDeadline sets the write deadline on the underlying conneciton.
|
|
|
|
// A zero value for t means Write will not time out.
|
|
|
|
// After a Write has timed out, the TLS state is corrupt and all future writes will return the same error.
|
|
|
|
func (c *Conn) SetWriteDeadline(t time.Time) error {
|
|
|
|
return c.conn.SetWriteDeadline(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
// A halfConn represents one direction of the record layer
|
|
|
|
// connection, either sending or receiving.
|
|
|
|
type halfConn struct {
|
|
|
|
sync.Mutex
|
|
|
|
|
2014-08-04 06:23:53 +01:00
|
|
|
err error // first permanent error
|
|
|
|
version uint16 // protocol version
|
|
|
|
isDTLS bool
|
2014-06-20 20:00:00 +01:00
|
|
|
cipher interface{} // cipher algorithm
|
|
|
|
mac macFunction
|
|
|
|
seq [8]byte // 64-bit sequence number
|
2015-07-25 23:29:23 +01:00
|
|
|
outSeq [8]byte // Mapped sequence number
|
2014-06-20 20:00:00 +01:00
|
|
|
bfree *block // list of free blocks
|
|
|
|
|
|
|
|
nextCipher interface{} // next encryption state
|
|
|
|
nextMac macFunction // next MAC algorithm
|
Add DTLS timeout and retransmit tests.
This extends the packet adaptor protocol to send three commands:
type command =
| Packet of []byte
| Timeout of time.Duration
| TimeoutAck
When the shim processes a Timeout in BIO_read, it sends TimeoutAck, fails the
BIO_read, returns out of the SSL stack, advances the clock, calls
DTLSv1_handle_timeout, and continues.
If the Go side sends Timeout right between sending handshake flight N and
reading flight N+1, the shim won't read the Timeout until it has sent flight
N+1 (it only processes packet commands in BIO_read), so the TimeoutAck comes
after N+1. Go then drops all packets before the TimeoutAck, thus dropping one
transmit of flight N+1 without having to actually process the packets to
determine the end of the flight. The shim then sees the updated clock, calls
DTLSv1_handle_timeout, and re-sends flight N+1 for Go to process for real.
When dropping packets, Go checks the epoch and increments sequence numbers so
that we can continue to be strict here. This requires tracking the initial
sequence number of the next epoch.
The final Finished message takes an additional special-case to test. DTLS
triggers retransmits on either a timeout or seeing a stale flight. OpenSSL only
implements the former which should be sufficient (and is necessary) EXCEPT for
the final Finished message. If the peer's final Finished message is lost, it
won't be waiting for a message from us, so it won't time out anything. That
retransmit must be triggered on stale message, so we retransmit the Finished
message in Go.
Change-Id: I3ffbdb1de525beb2ee831b304670a3387877634c
Reviewed-on: https://boringssl-review.googlesource.com/3212
Reviewed-by: Adam Langley <agl@google.com>
2015-01-27 06:09:43 +00:00
|
|
|
nextSeq [6]byte // next epoch's starting sequence number in DTLS
|
2014-06-20 20:00:00 +01:00
|
|
|
|
|
|
|
// used to save allocating a new buffer for each MAC.
|
|
|
|
inDigestBuf, outDigestBuf []byte
|
2014-06-20 20:00:00 +01:00
|
|
|
|
|
|
|
config *Config
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func (hc *halfConn) setErrorLocked(err error) error {
|
|
|
|
hc.err = err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (hc *halfConn) error() error {
|
2014-10-29 00:29:33 +00:00
|
|
|
// This should be locked, but I've removed it for the renegotiation
|
|
|
|
// tests since we don't concurrently read and write the same tls.Conn
|
|
|
|
// in any case during testing.
|
2014-06-20 20:00:00 +01:00
|
|
|
err := hc.err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// prepareCipherSpec sets the encryption and MAC states
|
|
|
|
// that a subsequent changeCipherSpec will use.
|
|
|
|
func (hc *halfConn) prepareCipherSpec(version uint16, cipher interface{}, mac macFunction) {
|
|
|
|
hc.version = version
|
|
|
|
hc.nextCipher = cipher
|
|
|
|
hc.nextMac = mac
|
|
|
|
}
|
|
|
|
|
|
|
|
// changeCipherSpec changes the encryption and MAC states
|
|
|
|
// to the ones previously passed to prepareCipherSpec.
|
2014-06-20 20:00:00 +01:00
|
|
|
func (hc *halfConn) changeCipherSpec(config *Config) error {
|
2014-06-20 20:00:00 +01:00
|
|
|
if hc.nextCipher == nil {
|
|
|
|
return alertInternalError
|
|
|
|
}
|
|
|
|
hc.cipher = hc.nextCipher
|
|
|
|
hc.mac = hc.nextMac
|
|
|
|
hc.nextCipher = nil
|
|
|
|
hc.nextMac = nil
|
2014-06-20 20:00:00 +01:00
|
|
|
hc.config = config
|
2014-08-04 06:23:53 +01:00
|
|
|
hc.incEpoch()
|
2016-03-02 03:57:46 +00:00
|
|
|
|
|
|
|
if config.Bugs.NullAllCiphers {
|
|
|
|
hc.cipher = nil
|
|
|
|
hc.mac = nil
|
|
|
|
}
|
2014-06-20 20:00:00 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-07-01 22:50:32 +01:00
|
|
|
// updateKeys sets the current cipher state.
|
|
|
|
func (hc *halfConn) updateKeys(cipher interface{}, version uint16) {
|
|
|
|
hc.version = version
|
|
|
|
hc.cipher = cipher
|
|
|
|
hc.incEpoch()
|
|
|
|
}
|
|
|
|
|
2014-06-20 20:00:00 +01:00
|
|
|
// incSeq increments the sequence number.
|
2014-11-07 06:48:35 +00:00
|
|
|
func (hc *halfConn) incSeq(isOutgoing bool) {
|
2014-08-04 06:23:53 +01:00
|
|
|
limit := 0
|
2014-11-07 06:48:35 +00:00
|
|
|
increment := uint64(1)
|
2014-08-04 06:23:53 +01:00
|
|
|
if hc.isDTLS {
|
|
|
|
// Increment up to the epoch in DTLS.
|
|
|
|
limit = 2
|
|
|
|
}
|
|
|
|
for i := 7; i >= limit; i-- {
|
2014-11-07 06:48:35 +00:00
|
|
|
increment += uint64(hc.seq[i])
|
|
|
|
hc.seq[i] = byte(increment)
|
|
|
|
increment >>= 8
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Not allowed to let sequence number wrap.
|
|
|
|
// Instead, must renegotiate before it does.
|
|
|
|
// Not likely enough to bother.
|
2014-11-07 06:48:35 +00:00
|
|
|
if increment != 0 {
|
|
|
|
panic("TLS: sequence number wraparound")
|
|
|
|
}
|
2015-07-25 23:29:23 +01:00
|
|
|
|
|
|
|
hc.updateOutSeq()
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
|
Add DTLS timeout and retransmit tests.
This extends the packet adaptor protocol to send three commands:
type command =
| Packet of []byte
| Timeout of time.Duration
| TimeoutAck
When the shim processes a Timeout in BIO_read, it sends TimeoutAck, fails the
BIO_read, returns out of the SSL stack, advances the clock, calls
DTLSv1_handle_timeout, and continues.
If the Go side sends Timeout right between sending handshake flight N and
reading flight N+1, the shim won't read the Timeout until it has sent flight
N+1 (it only processes packet commands in BIO_read), so the TimeoutAck comes
after N+1. Go then drops all packets before the TimeoutAck, thus dropping one
transmit of flight N+1 without having to actually process the packets to
determine the end of the flight. The shim then sees the updated clock, calls
DTLSv1_handle_timeout, and re-sends flight N+1 for Go to process for real.
When dropping packets, Go checks the epoch and increments sequence numbers so
that we can continue to be strict here. This requires tracking the initial
sequence number of the next epoch.
The final Finished message takes an additional special-case to test. DTLS
triggers retransmits on either a timeout or seeing a stale flight. OpenSSL only
implements the former which should be sufficient (and is necessary) EXCEPT for
the final Finished message. If the peer's final Finished message is lost, it
won't be waiting for a message from us, so it won't time out anything. That
retransmit must be triggered on stale message, so we retransmit the Finished
message in Go.
Change-Id: I3ffbdb1de525beb2ee831b304670a3387877634c
Reviewed-on: https://boringssl-review.googlesource.com/3212
Reviewed-by: Adam Langley <agl@google.com>
2015-01-27 06:09:43 +00:00
|
|
|
// incNextSeq increments the starting sequence number for the next epoch.
|
|
|
|
func (hc *halfConn) incNextSeq() {
|
|
|
|
for i := len(hc.nextSeq) - 1; i >= 0; i-- {
|
|
|
|
hc.nextSeq[i]++
|
|
|
|
if hc.nextSeq[i] != 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
panic("TLS: sequence number wraparound")
|
|
|
|
}
|
|
|
|
|
|
|
|
// incEpoch resets the sequence number. In DTLS, it also increments the epoch
|
|
|
|
// half of the sequence number.
|
2014-08-04 06:23:53 +01:00
|
|
|
func (hc *halfConn) incEpoch() {
|
|
|
|
if hc.isDTLS {
|
|
|
|
for i := 1; i >= 0; i-- {
|
|
|
|
hc.seq[i]++
|
|
|
|
if hc.seq[i] != 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if i == 0 {
|
|
|
|
panic("TLS: epoch number wraparound")
|
|
|
|
}
|
|
|
|
}
|
Add DTLS timeout and retransmit tests.
This extends the packet adaptor protocol to send three commands:
type command =
| Packet of []byte
| Timeout of time.Duration
| TimeoutAck
When the shim processes a Timeout in BIO_read, it sends TimeoutAck, fails the
BIO_read, returns out of the SSL stack, advances the clock, calls
DTLSv1_handle_timeout, and continues.
If the Go side sends Timeout right between sending handshake flight N and
reading flight N+1, the shim won't read the Timeout until it has sent flight
N+1 (it only processes packet commands in BIO_read), so the TimeoutAck comes
after N+1. Go then drops all packets before the TimeoutAck, thus dropping one
transmit of flight N+1 without having to actually process the packets to
determine the end of the flight. The shim then sees the updated clock, calls
DTLSv1_handle_timeout, and re-sends flight N+1 for Go to process for real.
When dropping packets, Go checks the epoch and increments sequence numbers so
that we can continue to be strict here. This requires tracking the initial
sequence number of the next epoch.
The final Finished message takes an additional special-case to test. DTLS
triggers retransmits on either a timeout or seeing a stale flight. OpenSSL only
implements the former which should be sufficient (and is necessary) EXCEPT for
the final Finished message. If the peer's final Finished message is lost, it
won't be waiting for a message from us, so it won't time out anything. That
retransmit must be triggered on stale message, so we retransmit the Finished
message in Go.
Change-Id: I3ffbdb1de525beb2ee831b304670a3387877634c
Reviewed-on: https://boringssl-review.googlesource.com/3212
Reviewed-by: Adam Langley <agl@google.com>
2015-01-27 06:09:43 +00:00
|
|
|
copy(hc.seq[2:], hc.nextSeq[:])
|
|
|
|
for i := range hc.nextSeq {
|
|
|
|
hc.nextSeq[i] = 0
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for i := range hc.seq {
|
|
|
|
hc.seq[i] = 0
|
|
|
|
}
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
2015-07-25 23:29:23 +01:00
|
|
|
|
|
|
|
hc.updateOutSeq()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (hc *halfConn) updateOutSeq() {
|
|
|
|
if hc.config.Bugs.SequenceNumberMapping != nil {
|
|
|
|
seqU64 := binary.BigEndian.Uint64(hc.seq[:])
|
|
|
|
seqU64 = hc.config.Bugs.SequenceNumberMapping(seqU64)
|
|
|
|
binary.BigEndian.PutUint64(hc.outSeq[:], seqU64)
|
|
|
|
|
|
|
|
// The DTLS epoch cannot be changed.
|
|
|
|
copy(hc.outSeq[:2], hc.seq[:2])
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
copy(hc.outSeq[:], hc.seq[:])
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
|
2014-08-04 06:23:53 +01:00
|
|
|
func (hc *halfConn) recordHeaderLen() int {
|
|
|
|
if hc.isDTLS {
|
|
|
|
return dtlsRecordHeaderLen
|
|
|
|
}
|
|
|
|
return tlsRecordHeaderLen
|
|
|
|
}
|
|
|
|
|
2014-06-20 20:00:00 +01:00
|
|
|
// removePadding returns an unpadded slice, in constant time, which is a prefix
|
|
|
|
// of the input. It also returns a byte which is equal to 255 if the padding
|
|
|
|
// was valid and 0 otherwise. See RFC 2246, section 6.2.3.2
|
|
|
|
func removePadding(payload []byte) ([]byte, byte) {
|
|
|
|
if len(payload) < 1 {
|
|
|
|
return payload, 0
|
|
|
|
}
|
|
|
|
|
|
|
|
paddingLen := payload[len(payload)-1]
|
|
|
|
t := uint(len(payload)-1) - uint(paddingLen)
|
|
|
|
// if len(payload) >= (paddingLen - 1) then the MSB of t is zero
|
|
|
|
good := byte(int32(^t) >> 31)
|
|
|
|
|
|
|
|
toCheck := 255 // the maximum possible padding length
|
|
|
|
// The length of the padded data is public, so we can use an if here
|
|
|
|
if toCheck+1 > len(payload) {
|
|
|
|
toCheck = len(payload) - 1
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < toCheck; i++ {
|
|
|
|
t := uint(paddingLen) - uint(i)
|
|
|
|
// if i <= paddingLen then the MSB of t is zero
|
|
|
|
mask := byte(int32(^t) >> 31)
|
|
|
|
b := payload[len(payload)-1-i]
|
|
|
|
good &^= mask&paddingLen ^ mask&b
|
|
|
|
}
|
|
|
|
|
|
|
|
// We AND together the bits of good and replicate the result across
|
|
|
|
// all the bits.
|
|
|
|
good &= good << 4
|
|
|
|
good &= good << 2
|
|
|
|
good &= good << 1
|
|
|
|
good = uint8(int8(good) >> 7)
|
|
|
|
|
|
|
|
toRemove := good&paddingLen + 1
|
|
|
|
return payload[:len(payload)-int(toRemove)], good
|
|
|
|
}
|
|
|
|
|
|
|
|
// removePaddingSSL30 is a replacement for removePadding in the case that the
|
|
|
|
// protocol version is SSLv3. In this version, the contents of the padding
|
|
|
|
// are random and cannot be checked.
|
|
|
|
func removePaddingSSL30(payload []byte) ([]byte, byte) {
|
|
|
|
if len(payload) < 1 {
|
|
|
|
return payload, 0
|
|
|
|
}
|
|
|
|
|
|
|
|
paddingLen := int(payload[len(payload)-1]) + 1
|
|
|
|
if paddingLen > len(payload) {
|
|
|
|
return payload, 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return payload[:len(payload)-paddingLen], 255
|
|
|
|
}
|
|
|
|
|
|
|
|
func roundUp(a, b int) int {
|
|
|
|
return a + (b-a%b)%b
|
|
|
|
}
|
|
|
|
|
|
|
|
// cbcMode is an interface for block ciphers using cipher block chaining.
|
|
|
|
type cbcMode interface {
|
|
|
|
cipher.BlockMode
|
|
|
|
SetIV([]byte)
|
|
|
|
}
|
|
|
|
|
|
|
|
// decrypt checks and strips the mac and decrypts the data in b. Returns a
|
|
|
|
// success boolean, the number of bytes to skip from the start of the record in
|
2016-06-15 02:14:35 +01:00
|
|
|
// order to get the application payload, the encrypted record type (or 0
|
|
|
|
// if there is none), and an optional alert value.
|
|
|
|
func (hc *halfConn) decrypt(b *block) (ok bool, prefixLen int, contentType recordType, alertValue alert) {
|
2014-08-04 06:23:53 +01:00
|
|
|
recordHeaderLen := hc.recordHeaderLen()
|
|
|
|
|
2014-06-20 20:00:00 +01:00
|
|
|
// pull out payload
|
|
|
|
payload := b.data[recordHeaderLen:]
|
|
|
|
|
|
|
|
macSize := 0
|
|
|
|
if hc.mac != nil {
|
|
|
|
macSize = hc.mac.Size()
|
|
|
|
}
|
|
|
|
|
|
|
|
paddingGood := byte(255)
|
|
|
|
explicitIVLen := 0
|
|
|
|
|
2014-08-04 06:23:53 +01:00
|
|
|
seq := hc.seq[:]
|
|
|
|
if hc.isDTLS {
|
|
|
|
// DTLS sequence numbers are explicit.
|
|
|
|
seq = b.data[3:11]
|
|
|
|
}
|
|
|
|
|
2014-06-20 20:00:00 +01:00
|
|
|
// decrypt
|
|
|
|
if hc.cipher != nil {
|
|
|
|
switch c := hc.cipher.(type) {
|
|
|
|
case cipher.Stream:
|
|
|
|
c.XORKeyStream(payload, payload)
|
2015-04-07 05:46:46 +01:00
|
|
|
case *tlsAead:
|
|
|
|
nonce := seq
|
|
|
|
if c.explicitNonce {
|
|
|
|
explicitIVLen = 8
|
|
|
|
if len(payload) < explicitIVLen {
|
2016-06-15 02:14:35 +01:00
|
|
|
return false, 0, 0, alertBadRecordMAC
|
2015-04-07 05:46:46 +01:00
|
|
|
}
|
|
|
|
nonce = payload[:8]
|
|
|
|
payload = payload[8:]
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
|
2016-06-15 02:14:35 +01:00
|
|
|
var additionalData []byte
|
|
|
|
if hc.version < VersionTLS13 {
|
|
|
|
additionalData = make([]byte, 13)
|
|
|
|
copy(additionalData, seq)
|
|
|
|
copy(additionalData[8:], b.data[:3])
|
|
|
|
n := len(payload) - c.Overhead()
|
|
|
|
additionalData[11] = byte(n >> 8)
|
|
|
|
additionalData[12] = byte(n)
|
|
|
|
}
|
2014-06-20 20:00:00 +01:00
|
|
|
var err error
|
2016-06-15 02:14:35 +01:00
|
|
|
payload, err = c.Open(payload[:0], nonce, payload, additionalData)
|
2014-06-20 20:00:00 +01:00
|
|
|
if err != nil {
|
2016-06-15 02:14:35 +01:00
|
|
|
return false, 0, 0, alertBadRecordMAC
|
|
|
|
}
|
|
|
|
if hc.version >= VersionTLS13 {
|
|
|
|
i := len(payload)
|
|
|
|
for i > 0 && payload[i-1] == 0 {
|
|
|
|
i--
|
|
|
|
}
|
|
|
|
payload = payload[:i]
|
|
|
|
if len(payload) == 0 {
|
|
|
|
return false, 0, 0, alertUnexpectedMessage
|
|
|
|
}
|
|
|
|
contentType = recordType(payload[len(payload)-1])
|
|
|
|
payload = payload[:len(payload)-1]
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
b.resize(recordHeaderLen + explicitIVLen + len(payload))
|
|
|
|
case cbcMode:
|
|
|
|
blockSize := c.BlockSize()
|
2014-08-04 06:23:53 +01:00
|
|
|
if hc.version >= VersionTLS11 || hc.isDTLS {
|
2014-06-20 20:00:00 +01:00
|
|
|
explicitIVLen = blockSize
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(payload)%blockSize != 0 || len(payload) < roundUp(explicitIVLen+macSize+1, blockSize) {
|
2016-06-15 02:14:35 +01:00
|
|
|
return false, 0, 0, alertBadRecordMAC
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if explicitIVLen > 0 {
|
|
|
|
c.SetIV(payload[:explicitIVLen])
|
|
|
|
payload = payload[explicitIVLen:]
|
|
|
|
}
|
|
|
|
c.CryptBlocks(payload, payload)
|
|
|
|
if hc.version == VersionSSL30 {
|
|
|
|
payload, paddingGood = removePaddingSSL30(payload)
|
|
|
|
} else {
|
|
|
|
payload, paddingGood = removePadding(payload)
|
|
|
|
}
|
|
|
|
b.resize(recordHeaderLen + explicitIVLen + len(payload))
|
|
|
|
|
|
|
|
// note that we still have a timing side-channel in the
|
|
|
|
// MAC check, below. An attacker can align the record
|
|
|
|
// so that a correct padding will cause one less hash
|
|
|
|
// block to be calculated. Then they can iteratively
|
|
|
|
// decrypt a record by breaking each byte. See
|
|
|
|
// "Password Interception in a SSL/TLS Channel", Brice
|
|
|
|
// Canvel et al.
|
|
|
|
//
|
|
|
|
// However, our behavior matches OpenSSL, so we leak
|
|
|
|
// only as much as they do.
|
2015-09-03 03:48:16 +01:00
|
|
|
case nullCipher:
|
|
|
|
break
|
2014-06-20 20:00:00 +01:00
|
|
|
default:
|
|
|
|
panic("unknown cipher type")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check, strip mac
|
|
|
|
if hc.mac != nil {
|
|
|
|
if len(payload) < macSize {
|
2016-06-15 02:14:35 +01:00
|
|
|
return false, 0, 0, alertBadRecordMAC
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// strip mac off payload, b.data
|
|
|
|
n := len(payload) - macSize
|
2014-08-04 06:23:53 +01:00
|
|
|
b.data[recordHeaderLen-2] = byte(n >> 8)
|
|
|
|
b.data[recordHeaderLen-1] = byte(n)
|
2014-06-20 20:00:00 +01:00
|
|
|
b.resize(recordHeaderLen + explicitIVLen + n)
|
|
|
|
remoteMAC := payload[n:]
|
2014-08-04 06:23:53 +01:00
|
|
|
localMAC := hc.mac.MAC(hc.inDigestBuf, seq, b.data[:3], b.data[recordHeaderLen-2:recordHeaderLen], payload[:n])
|
2014-06-20 20:00:00 +01:00
|
|
|
|
|
|
|
if subtle.ConstantTimeCompare(localMAC, remoteMAC) != 1 || paddingGood != 255 {
|
2016-06-15 02:14:35 +01:00
|
|
|
return false, 0, 0, alertBadRecordMAC
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
hc.inDigestBuf = localMAC
|
|
|
|
}
|
2014-11-07 06:48:35 +00:00
|
|
|
hc.incSeq(false)
|
2014-06-20 20:00:00 +01:00
|
|
|
|
2016-06-15 02:14:35 +01:00
|
|
|
return true, recordHeaderLen + explicitIVLen, contentType, 0
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// padToBlockSize calculates the needed padding block, if any, for a payload.
|
|
|
|
// On exit, prefix aliases payload and extends to the end of the last full
|
|
|
|
// block of payload. finalBlock is a fresh slice which contains the contents of
|
|
|
|
// any suffix of payload as well as the needed padding to make finalBlock a
|
|
|
|
// full block.
|
2014-06-20 20:00:00 +01:00
|
|
|
func padToBlockSize(payload []byte, blockSize int, config *Config) (prefix, finalBlock []byte) {
|
2014-06-20 20:00:00 +01:00
|
|
|
overrun := len(payload) % blockSize
|
|
|
|
prefix = payload[:len(payload)-overrun]
|
2014-06-20 20:00:00 +01:00
|
|
|
|
|
|
|
paddingLen := blockSize - overrun
|
|
|
|
finalSize := blockSize
|
|
|
|
if config.Bugs.MaxPadding {
|
|
|
|
for paddingLen+blockSize <= 256 {
|
|
|
|
paddingLen += blockSize
|
|
|
|
}
|
|
|
|
finalSize = 256
|
|
|
|
}
|
|
|
|
finalBlock = make([]byte, finalSize)
|
|
|
|
for i := range finalBlock {
|
2014-06-20 20:00:00 +01:00
|
|
|
finalBlock[i] = byte(paddingLen - 1)
|
|
|
|
}
|
2014-06-20 20:00:00 +01:00
|
|
|
if config.Bugs.PaddingFirstByteBad || config.Bugs.PaddingFirstByteBadIf255 && paddingLen == 256 {
|
|
|
|
finalBlock[overrun] ^= 0xff
|
|
|
|
}
|
|
|
|
copy(finalBlock, payload[len(payload)-overrun:])
|
2014-06-20 20:00:00 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// encrypt encrypts and macs the data in b.
|
2016-06-15 02:14:35 +01:00
|
|
|
func (hc *halfConn) encrypt(b *block, explicitIVLen int, typ recordType) (bool, alert) {
|
2014-08-04 06:23:53 +01:00
|
|
|
recordHeaderLen := hc.recordHeaderLen()
|
|
|
|
|
2014-06-20 20:00:00 +01:00
|
|
|
// mac
|
|
|
|
if hc.mac != nil {
|
2015-07-25 23:29:23 +01:00
|
|
|
mac := hc.mac.MAC(hc.outDigestBuf, hc.outSeq[0:], b.data[:3], b.data[recordHeaderLen-2:recordHeaderLen], b.data[recordHeaderLen+explicitIVLen:])
|
2014-06-20 20:00:00 +01:00
|
|
|
|
|
|
|
n := len(b.data)
|
|
|
|
b.resize(n + len(mac))
|
|
|
|
copy(b.data[n:], mac)
|
|
|
|
hc.outDigestBuf = mac
|
|
|
|
}
|
|
|
|
|
|
|
|
payload := b.data[recordHeaderLen:]
|
|
|
|
|
|
|
|
// encrypt
|
|
|
|
if hc.cipher != nil {
|
|
|
|
switch c := hc.cipher.(type) {
|
|
|
|
case cipher.Stream:
|
|
|
|
c.XORKeyStream(payload, payload)
|
2015-04-07 05:46:46 +01:00
|
|
|
case *tlsAead:
|
2016-06-25 03:56:37 +01:00
|
|
|
payloadLen := len(b.data) - recordHeaderLen - explicitIVLen
|
|
|
|
paddingLen := 0
|
2016-06-15 02:14:35 +01:00
|
|
|
if hc.version >= VersionTLS13 {
|
2016-06-25 03:56:37 +01:00
|
|
|
payloadLen++
|
|
|
|
paddingLen = hc.config.Bugs.RecordPadding
|
2016-06-15 02:14:35 +01:00
|
|
|
}
|
2016-06-25 03:56:37 +01:00
|
|
|
if hc.config.Bugs.OmitRecordContents {
|
|
|
|
payloadLen = 0
|
|
|
|
}
|
|
|
|
b.resize(recordHeaderLen + explicitIVLen + payloadLen + paddingLen + c.Overhead())
|
2016-06-15 02:14:35 +01:00
|
|
|
if hc.version >= VersionTLS13 {
|
2016-06-25 03:56:37 +01:00
|
|
|
if !hc.config.Bugs.OmitRecordContents {
|
|
|
|
b.data[payloadLen+recordHeaderLen-1] = byte(typ)
|
|
|
|
}
|
|
|
|
for i := 0; i < hc.config.Bugs.RecordPadding; i++ {
|
|
|
|
b.data[payloadLen+recordHeaderLen+i] = 0
|
|
|
|
}
|
|
|
|
payloadLen += paddingLen
|
2016-06-15 02:14:35 +01:00
|
|
|
}
|
2015-07-25 23:29:23 +01:00
|
|
|
nonce := hc.outSeq[:]
|
2015-04-07 05:46:46 +01:00
|
|
|
if c.explicitNonce {
|
|
|
|
nonce = b.data[recordHeaderLen : recordHeaderLen+explicitIVLen]
|
|
|
|
}
|
2014-06-20 20:00:00 +01:00
|
|
|
payload := b.data[recordHeaderLen+explicitIVLen:]
|
|
|
|
payload = payload[:payloadLen]
|
|
|
|
|
2016-06-15 02:14:35 +01:00
|
|
|
var additionalData []byte
|
|
|
|
if hc.version < VersionTLS13 {
|
|
|
|
additionalData = make([]byte, 13)
|
|
|
|
copy(additionalData, hc.outSeq[:])
|
|
|
|
copy(additionalData[8:], b.data[:3])
|
|
|
|
additionalData[11] = byte(payloadLen >> 8)
|
|
|
|
additionalData[12] = byte(payloadLen)
|
|
|
|
}
|
2014-06-20 20:00:00 +01:00
|
|
|
|
2016-06-15 02:14:35 +01:00
|
|
|
c.Seal(payload[:0], nonce, payload, additionalData)
|
2014-06-20 20:00:00 +01:00
|
|
|
case cbcMode:
|
|
|
|
blockSize := c.BlockSize()
|
|
|
|
if explicitIVLen > 0 {
|
|
|
|
c.SetIV(payload[:explicitIVLen])
|
|
|
|
payload = payload[explicitIVLen:]
|
|
|
|
}
|
2014-06-20 20:00:00 +01:00
|
|
|
prefix, finalBlock := padToBlockSize(payload, blockSize, hc.config)
|
2014-06-20 20:00:00 +01:00
|
|
|
b.resize(recordHeaderLen + explicitIVLen + len(prefix) + len(finalBlock))
|
|
|
|
c.CryptBlocks(b.data[recordHeaderLen+explicitIVLen:], prefix)
|
|
|
|
c.CryptBlocks(b.data[recordHeaderLen+explicitIVLen+len(prefix):], finalBlock)
|
2015-09-03 03:48:16 +01:00
|
|
|
case nullCipher:
|
|
|
|
break
|
2014-06-20 20:00:00 +01:00
|
|
|
default:
|
|
|
|
panic("unknown cipher type")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// update length to include MAC and any block padding needed.
|
|
|
|
n := len(b.data) - recordHeaderLen
|
2014-08-04 06:23:53 +01:00
|
|
|
b.data[recordHeaderLen-2] = byte(n >> 8)
|
|
|
|
b.data[recordHeaderLen-1] = byte(n)
|
2014-11-07 06:48:35 +00:00
|
|
|
hc.incSeq(true)
|
2014-06-20 20:00:00 +01:00
|
|
|
|
|
|
|
return true, 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// A block is a simple data buffer.
|
|
|
|
type block struct {
|
|
|
|
data []byte
|
|
|
|
off int // index for Read
|
|
|
|
link *block
|
|
|
|
}
|
|
|
|
|
|
|
|
// resize resizes block to be n bytes, growing if necessary.
|
|
|
|
func (b *block) resize(n int) {
|
|
|
|
if n > cap(b.data) {
|
|
|
|
b.reserve(n)
|
|
|
|
}
|
|
|
|
b.data = b.data[0:n]
|
|
|
|
}
|
|
|
|
|
|
|
|
// reserve makes sure that block contains a capacity of at least n bytes.
|
|
|
|
func (b *block) reserve(n int) {
|
|
|
|
if cap(b.data) >= n {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
m := cap(b.data)
|
|
|
|
if m == 0 {
|
|
|
|
m = 1024
|
|
|
|
}
|
|
|
|
for m < n {
|
|
|
|
m *= 2
|
|
|
|
}
|
|
|
|
data := make([]byte, len(b.data), m)
|
|
|
|
copy(data, b.data)
|
|
|
|
b.data = data
|
|
|
|
}
|
|
|
|
|
|
|
|
// readFromUntil reads from r into b until b contains at least n bytes
|
|
|
|
// or else returns an error.
|
|
|
|
func (b *block) readFromUntil(r io.Reader, n int) error {
|
|
|
|
// quick case
|
|
|
|
if len(b.data) >= n {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// read until have enough.
|
|
|
|
b.reserve(n)
|
|
|
|
for {
|
|
|
|
m, err := r.Read(b.data[len(b.data):cap(b.data)])
|
|
|
|
b.data = b.data[0 : len(b.data)+m]
|
|
|
|
if len(b.data) >= n {
|
|
|
|
// TODO(bradfitz,agl): slightly suspicious
|
|
|
|
// that we're throwing away r.Read's err here.
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *block) Read(p []byte) (n int, err error) {
|
|
|
|
n = copy(p, b.data[b.off:])
|
|
|
|
b.off += n
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// newBlock allocates a new block, from hc's free list if possible.
|
|
|
|
func (hc *halfConn) newBlock() *block {
|
|
|
|
b := hc.bfree
|
|
|
|
if b == nil {
|
|
|
|
return new(block)
|
|
|
|
}
|
|
|
|
hc.bfree = b.link
|
|
|
|
b.link = nil
|
|
|
|
b.resize(0)
|
|
|
|
return b
|
|
|
|
}
|
|
|
|
|
|
|
|
// freeBlock returns a block to hc's free list.
|
|
|
|
// The protocol is such that each side only has a block or two on
|
|
|
|
// its free list at a time, so there's no need to worry about
|
|
|
|
// trimming the list, etc.
|
|
|
|
func (hc *halfConn) freeBlock(b *block) {
|
|
|
|
b.link = hc.bfree
|
|
|
|
hc.bfree = b
|
|
|
|
}
|
|
|
|
|
|
|
|
// splitBlock splits a block after the first n bytes,
|
|
|
|
// returning a block with those n bytes and a
|
|
|
|
// block with the remainder. the latter may be nil.
|
|
|
|
func (hc *halfConn) splitBlock(b *block, n int) (*block, *block) {
|
|
|
|
if len(b.data) <= n {
|
|
|
|
return b, nil
|
|
|
|
}
|
|
|
|
bb := hc.newBlock()
|
|
|
|
bb.resize(len(b.data) - n)
|
|
|
|
copy(bb.data, b.data[n:])
|
|
|
|
b.data = b.data[0:n]
|
|
|
|
return b, bb
|
|
|
|
}
|
|
|
|
|
2014-08-04 06:23:53 +01:00
|
|
|
func (c *Conn) doReadRecord(want recordType) (recordType, *block, error) {
|
|
|
|
if c.isDTLS {
|
|
|
|
return c.dtlsDoReadRecord(want)
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
|
2014-08-04 06:23:53 +01:00
|
|
|
recordHeaderLen := tlsRecordHeaderLen
|
|
|
|
|
2014-06-20 20:00:00 +01:00
|
|
|
if c.rawInput == nil {
|
|
|
|
c.rawInput = c.in.newBlock()
|
|
|
|
}
|
|
|
|
b := c.rawInput
|
|
|
|
|
|
|
|
// Read header, payload.
|
|
|
|
if err := b.readFromUntil(c.conn, recordHeaderLen); err != nil {
|
|
|
|
// RFC suggests that EOF without an alertCloseNotify is
|
|
|
|
// an error, but popular web sites seem to do this,
|
2015-08-30 03:56:45 +01:00
|
|
|
// so we can't make it an error, outside of tests.
|
|
|
|
if err == io.EOF && c.config.Bugs.ExpectCloseNotify {
|
|
|
|
err = io.ErrUnexpectedEOF
|
|
|
|
}
|
2014-06-20 20:00:00 +01:00
|
|
|
if e, ok := err.(net.Error); !ok || !e.Temporary() {
|
|
|
|
c.in.setErrorLocked(err)
|
|
|
|
}
|
2014-08-04 06:23:53 +01:00
|
|
|
return 0, nil, err
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
typ := recordType(b.data[0])
|
|
|
|
|
|
|
|
// No valid TLS record has a type of 0x80, however SSLv2 handshakes
|
|
|
|
// start with a uint16 length where the MSB is set and the first record
|
|
|
|
// is always < 256 bytes long. Therefore typ == 0x80 strongly suggests
|
|
|
|
// an SSLv2 client.
|
|
|
|
if want == recordTypeHandshake && typ == 0x80 {
|
|
|
|
c.sendAlert(alertProtocolVersion)
|
2014-08-04 06:23:53 +01:00
|
|
|
return 0, nil, c.in.setErrorLocked(errors.New("tls: unsupported SSLv2 handshake received"))
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
vers := uint16(b.data[1])<<8 | uint16(b.data[2])
|
|
|
|
n := int(b.data[3])<<8 | int(b.data[4])
|
2016-06-21 17:19:28 +01:00
|
|
|
// Alerts sent near version negotiation do not have a well-defined
|
|
|
|
// record-layer version prior to TLS 1.3. (In TLS 1.3, the record-layer
|
|
|
|
// version is irrelevant.)
|
|
|
|
if typ != recordTypeAlert {
|
|
|
|
if c.haveVers {
|
|
|
|
if vers != c.vers && c.vers < VersionTLS13 {
|
|
|
|
c.sendAlert(alertProtocolVersion)
|
|
|
|
return 0, nil, c.in.setErrorLocked(fmt.Errorf("tls: received record with version %x when expecting version %x", vers, c.vers))
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if expect := c.config.Bugs.ExpectInitialRecordVersion; expect != 0 && vers != expect {
|
|
|
|
c.sendAlert(alertProtocolVersion)
|
|
|
|
return 0, nil, c.in.setErrorLocked(fmt.Errorf("tls: received record with version %x when expecting version %x", vers, expect))
|
|
|
|
}
|
2014-12-10 07:27:24 +00:00
|
|
|
}
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
if n > maxCiphertext {
|
|
|
|
c.sendAlert(alertRecordOverflow)
|
2014-08-04 06:23:53 +01:00
|
|
|
return 0, nil, c.in.setErrorLocked(fmt.Errorf("tls: oversized record received with length %d", n))
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
if !c.haveVers {
|
|
|
|
// First message, be extra suspicious:
|
|
|
|
// this might not be a TLS client.
|
|
|
|
// Bail out before reading a full 'body', if possible.
|
|
|
|
// The current max version is 3.1.
|
|
|
|
// If the version is >= 16.0, it's probably not real.
|
|
|
|
// Similarly, a clientHello message encodes in
|
|
|
|
// well under a kilobyte. If the length is >= 12 kB,
|
|
|
|
// it's probably not real.
|
|
|
|
if (typ != recordTypeAlert && typ != want) || vers >= 0x1000 || n >= 0x3000 {
|
|
|
|
c.sendAlert(alertUnexpectedMessage)
|
2014-08-04 06:23:53 +01:00
|
|
|
return 0, nil, c.in.setErrorLocked(fmt.Errorf("tls: first record does not look like a TLS handshake"))
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if err := b.readFromUntil(c.conn, recordHeaderLen+n); err != nil {
|
|
|
|
if err == io.EOF {
|
|
|
|
err = io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
if e, ok := err.(net.Error); !ok || !e.Temporary() {
|
|
|
|
c.in.setErrorLocked(err)
|
|
|
|
}
|
2014-08-04 06:23:53 +01:00
|
|
|
return 0, nil, err
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Process message.
|
|
|
|
b, c.rawInput = c.in.splitBlock(b, recordHeaderLen+n)
|
2016-07-01 21:13:42 +01:00
|
|
|
ok, off, encTyp, alertValue := c.in.decrypt(b)
|
|
|
|
if !ok {
|
|
|
|
return 0, nil, c.in.setErrorLocked(c.sendAlert(alertValue))
|
|
|
|
}
|
|
|
|
b.off = off
|
|
|
|
|
2016-06-15 02:14:35 +01:00
|
|
|
if c.vers >= VersionTLS13 && c.in.cipher != nil {
|
2016-06-25 03:56:37 +01:00
|
|
|
if typ != recordTypeApplicationData {
|
|
|
|
return 0, nil, c.in.setErrorLocked(fmt.Errorf("tls: outer record type is not application data"))
|
|
|
|
}
|
2016-06-15 02:14:35 +01:00
|
|
|
typ = encTyp
|
|
|
|
}
|
2014-08-04 06:23:53 +01:00
|
|
|
return typ, b, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// readRecord reads the next TLS record from the connection
|
|
|
|
// and updates the record layer state.
|
|
|
|
// c.in.Mutex <= L; c.input == nil.
|
|
|
|
func (c *Conn) readRecord(want recordType) error {
|
|
|
|
// Caller must be in sync with connection:
|
|
|
|
// handshake data if handshake not yet completed,
|
2014-10-29 00:29:33 +00:00
|
|
|
// else application data.
|
2014-08-04 06:23:53 +01:00
|
|
|
switch want {
|
|
|
|
default:
|
|
|
|
c.sendAlert(alertInternalError)
|
|
|
|
return c.in.setErrorLocked(errors.New("tls: unknown record type requested"))
|
|
|
|
case recordTypeHandshake, recordTypeChangeCipherSpec:
|
|
|
|
if c.handshakeComplete {
|
|
|
|
c.sendAlert(alertInternalError)
|
|
|
|
return c.in.setErrorLocked(errors.New("tls: handshake or ChangeCipherSpec requested after handshake complete"))
|
|
|
|
}
|
|
|
|
case recordTypeApplicationData:
|
2014-08-24 08:47:07 +01:00
|
|
|
if !c.handshakeComplete && !c.config.Bugs.ExpectFalseStart {
|
2014-08-04 06:23:53 +01:00
|
|
|
c.sendAlert(alertInternalError)
|
|
|
|
return c.in.setErrorLocked(errors.New("tls: application data record requested before handshake complete"))
|
|
|
|
}
|
2015-08-30 03:56:45 +01:00
|
|
|
case recordTypeAlert:
|
|
|
|
// Looking for a close_notify. Note: unlike a real
|
|
|
|
// implementation, this is not tolerant of additional records.
|
|
|
|
// See the documentation for ExpectCloseNotify.
|
2014-08-04 06:23:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
Again:
|
|
|
|
typ, b, err := c.doReadRecord(want)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-06-20 20:00:00 +01:00
|
|
|
data := b.data[b.off:]
|
|
|
|
if len(data) > maxPlaintext {
|
|
|
|
err := c.sendAlert(alertRecordOverflow)
|
|
|
|
c.in.freeBlock(b)
|
|
|
|
return c.in.setErrorLocked(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
switch typ {
|
|
|
|
default:
|
|
|
|
c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
|
|
|
|
|
|
|
|
case recordTypeAlert:
|
|
|
|
if len(data) != 2 {
|
|
|
|
c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if alert(data[1]) == alertCloseNotify {
|
|
|
|
c.in.setErrorLocked(io.EOF)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
switch data[0] {
|
|
|
|
case alertLevelWarning:
|
|
|
|
// drop on the floor
|
|
|
|
c.in.freeBlock(b)
|
|
|
|
goto Again
|
|
|
|
case alertLevelError:
|
|
|
|
c.in.setErrorLocked(&net.OpError{Op: "remote error", Err: alert(data[1])})
|
|
|
|
default:
|
|
|
|
c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
|
|
|
|
}
|
|
|
|
|
|
|
|
case recordTypeChangeCipherSpec:
|
|
|
|
if typ != want || len(data) != 1 || data[0] != 1 {
|
|
|
|
c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
|
|
|
|
break
|
|
|
|
}
|
2014-06-20 20:00:00 +01:00
|
|
|
err := c.in.changeCipherSpec(c.config)
|
2014-06-20 20:00:00 +01:00
|
|
|
if err != nil {
|
|
|
|
c.in.setErrorLocked(c.sendAlert(err.(alert)))
|
|
|
|
}
|
|
|
|
|
|
|
|
case recordTypeApplicationData:
|
|
|
|
if typ != want {
|
|
|
|
c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
|
|
|
|
break
|
|
|
|
}
|
|
|
|
c.input = b
|
|
|
|
b = nil
|
|
|
|
|
|
|
|
case recordTypeHandshake:
|
|
|
|
// TODO(rsc): Should at least pick off connection close.
|
|
|
|
if typ != want {
|
2014-10-29 00:29:33 +00:00
|
|
|
// A client might need to process a HelloRequest from
|
|
|
|
// the server, thus receiving a handshake message when
|
2015-01-27 06:10:54 +00:00
|
|
|
// application data is expected is ok.
|
2015-08-30 03:56:45 +01:00
|
|
|
if !c.isClient || want != recordTypeApplicationData {
|
2014-10-29 00:29:33 +00:00
|
|
|
return c.in.setErrorLocked(c.sendAlert(alertNoRenegotiation))
|
|
|
|
}
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
c.hand.Write(data)
|
|
|
|
}
|
|
|
|
|
|
|
|
if b != nil {
|
|
|
|
c.in.freeBlock(b)
|
|
|
|
}
|
|
|
|
return c.in.err
|
|
|
|
}
|
|
|
|
|
|
|
|
// sendAlert sends a TLS alert message.
|
|
|
|
// c.out.Mutex <= L.
|
2015-06-06 08:28:08 +01:00
|
|
|
func (c *Conn) sendAlertLocked(level byte, err alert) error {
|
|
|
|
c.tmp[0] = level
|
2014-06-20 20:00:00 +01:00
|
|
|
c.tmp[1] = byte(err)
|
2014-11-01 23:39:08 +00:00
|
|
|
if c.config.Bugs.FragmentAlert {
|
|
|
|
c.writeRecord(recordTypeAlert, c.tmp[0:1])
|
|
|
|
c.writeRecord(recordTypeAlert, c.tmp[1:2])
|
2016-03-12 03:25:18 +00:00
|
|
|
} else if c.config.Bugs.DoubleAlert {
|
|
|
|
copy(c.tmp[2:4], c.tmp[0:2])
|
|
|
|
c.writeRecord(recordTypeAlert, c.tmp[0:4])
|
2014-11-01 23:39:08 +00:00
|
|
|
} else {
|
|
|
|
c.writeRecord(recordTypeAlert, c.tmp[0:2])
|
|
|
|
}
|
2015-06-06 08:28:08 +01:00
|
|
|
// Error alerts are fatal to the connection.
|
|
|
|
if level == alertLevelError {
|
2014-06-20 20:00:00 +01:00
|
|
|
return c.out.setErrorLocked(&net.OpError{Op: "local error", Err: err})
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// sendAlert sends a TLS alert message.
|
|
|
|
// L < c.out.Mutex.
|
|
|
|
func (c *Conn) sendAlert(err alert) error {
|
2015-06-06 08:28:08 +01:00
|
|
|
level := byte(alertLevelError)
|
2016-03-10 20:44:22 +00:00
|
|
|
if err == alertNoRenegotiation || err == alertCloseNotify || err == alertNoCertficate {
|
2015-06-06 08:28:08 +01:00
|
|
|
level = alertLevelWarning
|
|
|
|
}
|
|
|
|
return c.SendAlert(level, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Conn) SendAlert(level byte, err alert) error {
|
2014-06-20 20:00:00 +01:00
|
|
|
c.out.Lock()
|
|
|
|
defer c.out.Unlock()
|
2015-06-06 08:28:08 +01:00
|
|
|
return c.sendAlertLocked(level, err)
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
|
2014-08-02 09:07:12 +01:00
|
|
|
// writeV2Record writes a record for a V2ClientHello.
|
|
|
|
func (c *Conn) writeV2Record(data []byte) (n int, err error) {
|
|
|
|
record := make([]byte, 2+len(data))
|
|
|
|
record[0] = uint8(len(data)>>8) | 0x80
|
|
|
|
record[1] = uint8(len(data))
|
|
|
|
copy(record[2:], data)
|
|
|
|
return c.conn.Write(record)
|
|
|
|
}
|
|
|
|
|
2014-06-20 20:00:00 +01:00
|
|
|
// writeRecord writes a TLS record with the given type and payload
|
|
|
|
// to the connection and updates the record layer state.
|
|
|
|
// c.out.Mutex <= L.
|
|
|
|
func (c *Conn) writeRecord(typ recordType, data []byte) (n int, err error) {
|
2014-08-04 06:23:53 +01:00
|
|
|
if c.isDTLS {
|
|
|
|
return c.dtlsWriteRecord(typ, data)
|
|
|
|
}
|
|
|
|
|
2016-07-08 22:10:48 +01:00
|
|
|
if typ == recordTypeHandshake {
|
|
|
|
if c.config.Bugs.SendHelloRequestBeforeEveryHandshakeMessage {
|
|
|
|
newData := make([]byte, 0, 4+len(data))
|
|
|
|
newData = append(newData, typeHelloRequest, 0, 0, 0)
|
|
|
|
newData = append(newData, data...)
|
|
|
|
data = newData
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.config.Bugs.PackHandshakeFlight {
|
|
|
|
c.pendingFlight.Write(data)
|
|
|
|
return len(data), nil
|
|
|
|
}
|
2016-07-07 20:33:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return c.doWriteRecord(typ, data)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Conn) doWriteRecord(typ recordType, data []byte) (n int, err error) {
|
2014-08-04 06:23:53 +01:00
|
|
|
recordHeaderLen := tlsRecordHeaderLen
|
2014-06-20 20:00:00 +01:00
|
|
|
b := c.out.newBlock()
|
2014-08-07 23:02:39 +01:00
|
|
|
first := true
|
|
|
|
isClientHello := typ == recordTypeHandshake && len(data) > 0 && data[0] == typeClientHello
|
2015-06-06 08:04:39 +01:00
|
|
|
for len(data) > 0 || first {
|
2014-06-20 20:00:00 +01:00
|
|
|
m := len(data)
|
2015-09-01 15:23:00 +01:00
|
|
|
if m > maxPlaintext && !c.config.Bugs.SendLargeRecords {
|
2014-06-20 20:00:00 +01:00
|
|
|
m = maxPlaintext
|
|
|
|
}
|
2014-08-05 07:28:57 +01:00
|
|
|
if typ == recordTypeHandshake && c.config.Bugs.MaxHandshakeRecordLength > 0 && m > c.config.Bugs.MaxHandshakeRecordLength {
|
|
|
|
m = c.config.Bugs.MaxHandshakeRecordLength
|
2014-08-07 23:02:39 +01:00
|
|
|
// By default, do not fragment the client_version or
|
|
|
|
// server_version, which are located in the first 6
|
|
|
|
// bytes.
|
|
|
|
if first && isClientHello && !c.config.Bugs.FragmentClientVersion && m < 6 {
|
|
|
|
m = 6
|
|
|
|
}
|
2014-08-05 07:28:57 +01:00
|
|
|
}
|
2014-06-20 20:00:00 +01:00
|
|
|
explicitIVLen := 0
|
|
|
|
explicitIVIsSeq := false
|
2014-08-07 23:02:39 +01:00
|
|
|
first = false
|
2014-06-20 20:00:00 +01:00
|
|
|
|
|
|
|
var cbc cbcMode
|
|
|
|
if c.out.version >= VersionTLS11 {
|
|
|
|
var ok bool
|
|
|
|
if cbc, ok = c.out.cipher.(cbcMode); ok {
|
|
|
|
explicitIVLen = cbc.BlockSize()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if explicitIVLen == 0 {
|
2015-04-07 05:46:46 +01:00
|
|
|
if aead, ok := c.out.cipher.(*tlsAead); ok && aead.explicitNonce {
|
2014-06-20 20:00:00 +01:00
|
|
|
explicitIVLen = 8
|
|
|
|
// The AES-GCM construction in TLS has an
|
|
|
|
// explicit nonce so that the nonce can be
|
|
|
|
// random. However, the nonce is only 8 bytes
|
|
|
|
// which is too small for a secure, random
|
|
|
|
// nonce. Therefore we use the sequence number
|
|
|
|
// as the nonce.
|
|
|
|
explicitIVIsSeq = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
b.resize(recordHeaderLen + explicitIVLen + m)
|
|
|
|
b.data[0] = byte(typ)
|
2016-06-15 02:14:35 +01:00
|
|
|
if c.vers >= VersionTLS13 && c.out.cipher != nil {
|
|
|
|
b.data[0] = byte(recordTypeApplicationData)
|
2016-06-25 03:56:37 +01:00
|
|
|
if outerType := c.config.Bugs.OuterRecordType; outerType != 0 {
|
|
|
|
b.data[0] = byte(outerType)
|
|
|
|
}
|
2016-06-15 02:14:35 +01:00
|
|
|
}
|
2014-06-20 20:00:00 +01:00
|
|
|
vers := c.vers
|
2016-06-15 02:14:35 +01:00
|
|
|
if vers == 0 || vers >= VersionTLS13 {
|
2014-06-20 20:00:00 +01:00
|
|
|
// Some TLS servers fail if the record version is
|
|
|
|
// greater than TLS 1.0 for the initial ClientHello.
|
2016-06-15 02:14:35 +01:00
|
|
|
//
|
|
|
|
// TLS 1.3 fixes the version number in the record
|
|
|
|
// layer to {3, 1}.
|
2014-06-20 20:00:00 +01:00
|
|
|
vers = VersionTLS10
|
|
|
|
}
|
|
|
|
b.data[1] = byte(vers >> 8)
|
|
|
|
b.data[2] = byte(vers)
|
|
|
|
b.data[3] = byte(m >> 8)
|
|
|
|
b.data[4] = byte(m)
|
|
|
|
if explicitIVLen > 0 {
|
|
|
|
explicitIV := b.data[recordHeaderLen : recordHeaderLen+explicitIVLen]
|
|
|
|
if explicitIVIsSeq {
|
|
|
|
copy(explicitIV, c.out.seq[:])
|
|
|
|
} else {
|
|
|
|
if _, err = io.ReadFull(c.config.rand(), explicitIV); err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
copy(b.data[recordHeaderLen+explicitIVLen:], data)
|
2016-06-15 02:14:35 +01:00
|
|
|
c.out.encrypt(b, explicitIVLen, typ)
|
2014-06-20 20:00:00 +01:00
|
|
|
_, err = c.conn.Write(b.data)
|
|
|
|
if err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
n += m
|
|
|
|
data = data[m:]
|
|
|
|
}
|
|
|
|
c.out.freeBlock(b)
|
|
|
|
|
|
|
|
if typ == recordTypeChangeCipherSpec {
|
2014-06-20 20:00:00 +01:00
|
|
|
err = c.out.changeCipherSpec(c.config)
|
2014-06-20 20:00:00 +01:00
|
|
|
if err != nil {
|
|
|
|
// Cannot call sendAlert directly,
|
|
|
|
// because we already hold c.out.Mutex.
|
|
|
|
c.tmp[0] = alertLevelError
|
|
|
|
c.tmp[1] = byte(err.(alert))
|
|
|
|
c.writeRecord(recordTypeAlert, c.tmp[0:2])
|
|
|
|
return n, c.out.setErrorLocked(&net.OpError{Op: "local error", Err: err})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-07-07 20:33:25 +01:00
|
|
|
func (c *Conn) flushHandshake() error {
|
|
|
|
if c.isDTLS {
|
|
|
|
return c.dtlsFlushHandshake()
|
|
|
|
}
|
|
|
|
|
|
|
|
for c.pendingFlight.Len() > 0 {
|
|
|
|
var buf [maxPlaintext]byte
|
|
|
|
n, _ := c.pendingFlight.Read(buf[:])
|
|
|
|
if _, err := c.doWriteRecord(recordTypeHandshake, buf[:n]); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
c.pendingFlight.Reset()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-08-04 06:23:53 +01:00
|
|
|
func (c *Conn) doReadHandshake() ([]byte, error) {
|
|
|
|
if c.isDTLS {
|
|
|
|
return c.dtlsDoReadHandshake()
|
|
|
|
}
|
|
|
|
|
2014-06-20 20:00:00 +01:00
|
|
|
for c.hand.Len() < 4 {
|
|
|
|
if err := c.in.err; err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if err := c.readRecord(recordTypeHandshake); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
data := c.hand.Bytes()
|
|
|
|
n := int(data[1])<<16 | int(data[2])<<8 | int(data[3])
|
|
|
|
if n > maxHandshake {
|
|
|
|
return nil, c.in.setErrorLocked(c.sendAlert(alertInternalError))
|
|
|
|
}
|
|
|
|
for c.hand.Len() < 4+n {
|
|
|
|
if err := c.in.err; err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if err := c.readRecord(recordTypeHandshake); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2014-08-04 06:23:53 +01:00
|
|
|
return c.hand.Next(4 + n), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// readHandshake reads the next handshake message from
|
|
|
|
// the record layer.
|
|
|
|
// c.in.Mutex < L; c.out.Mutex < L.
|
|
|
|
func (c *Conn) readHandshake() (interface{}, error) {
|
|
|
|
data, err := c.doReadHandshake()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2014-06-20 20:00:00 +01:00
|
|
|
var m handshakeMessage
|
|
|
|
switch data[0] {
|
2014-10-29 00:29:33 +00:00
|
|
|
case typeHelloRequest:
|
|
|
|
m = new(helloRequestMsg)
|
2014-06-20 20:00:00 +01:00
|
|
|
case typeClientHello:
|
2014-08-04 06:23:53 +01:00
|
|
|
m = &clientHelloMsg{
|
|
|
|
isDTLS: c.isDTLS,
|
|
|
|
}
|
2014-06-20 20:00:00 +01:00
|
|
|
case typeServerHello:
|
2014-08-04 06:23:53 +01:00
|
|
|
m = &serverHelloMsg{
|
|
|
|
isDTLS: c.isDTLS,
|
|
|
|
}
|
2014-06-20 20:00:00 +01:00
|
|
|
case typeNewSessionTicket:
|
|
|
|
m = new(newSessionTicketMsg)
|
2016-07-01 22:50:32 +01:00
|
|
|
case typeEncryptedExtensions:
|
|
|
|
m = new(encryptedExtensionsMsg)
|
2014-06-20 20:00:00 +01:00
|
|
|
case typeCertificate:
|
2016-07-01 22:50:32 +01:00
|
|
|
m = &certificateMsg{
|
|
|
|
hasRequestContext: c.vers >= VersionTLS13 && enableTLS13Handshake,
|
|
|
|
}
|
2014-06-20 20:00:00 +01:00
|
|
|
case typeCertificateRequest:
|
|
|
|
m = &certificateRequestMsg{
|
2016-06-21 23:19:24 +01:00
|
|
|
hasSignatureAlgorithm: c.vers >= VersionTLS12,
|
2016-07-01 22:50:32 +01:00
|
|
|
hasRequestContext: c.vers >= VersionTLS13 && enableTLS13Handshake,
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
case typeCertificateStatus:
|
|
|
|
m = new(certificateStatusMsg)
|
|
|
|
case typeServerKeyExchange:
|
|
|
|
m = new(serverKeyExchangeMsg)
|
|
|
|
case typeServerHelloDone:
|
|
|
|
m = new(serverHelloDoneMsg)
|
|
|
|
case typeClientKeyExchange:
|
|
|
|
m = new(clientKeyExchangeMsg)
|
|
|
|
case typeCertificateVerify:
|
|
|
|
m = &certificateVerifyMsg{
|
2016-06-21 23:19:24 +01:00
|
|
|
hasSignatureAlgorithm: c.vers >= VersionTLS12,
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
case typeNextProtocol:
|
|
|
|
m = new(nextProtoMsg)
|
|
|
|
case typeFinished:
|
|
|
|
m = new(finishedMsg)
|
2014-08-04 06:23:53 +01:00
|
|
|
case typeHelloVerifyRequest:
|
|
|
|
m = new(helloVerifyRequestMsg)
|
2016-06-30 23:56:53 +01:00
|
|
|
case typeChannelID:
|
|
|
|
m = new(channelIDMsg)
|
2014-06-20 20:00:00 +01:00
|
|
|
default:
|
|
|
|
return nil, c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
|
|
|
|
}
|
|
|
|
|
|
|
|
// The handshake message unmarshallers
|
|
|
|
// expect to be able to keep references to data,
|
|
|
|
// so pass in a fresh copy that won't be overwritten.
|
|
|
|
data = append([]byte(nil), data...)
|
|
|
|
|
|
|
|
if !m.unmarshal(data) {
|
|
|
|
return nil, c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
|
|
|
|
}
|
|
|
|
return m, nil
|
|
|
|
}
|
|
|
|
|
Add DTLS timeout and retransmit tests.
This extends the packet adaptor protocol to send three commands:
type command =
| Packet of []byte
| Timeout of time.Duration
| TimeoutAck
When the shim processes a Timeout in BIO_read, it sends TimeoutAck, fails the
BIO_read, returns out of the SSL stack, advances the clock, calls
DTLSv1_handle_timeout, and continues.
If the Go side sends Timeout right between sending handshake flight N and
reading flight N+1, the shim won't read the Timeout until it has sent flight
N+1 (it only processes packet commands in BIO_read), so the TimeoutAck comes
after N+1. Go then drops all packets before the TimeoutAck, thus dropping one
transmit of flight N+1 without having to actually process the packets to
determine the end of the flight. The shim then sees the updated clock, calls
DTLSv1_handle_timeout, and re-sends flight N+1 for Go to process for real.
When dropping packets, Go checks the epoch and increments sequence numbers so
that we can continue to be strict here. This requires tracking the initial
sequence number of the next epoch.
The final Finished message takes an additional special-case to test. DTLS
triggers retransmits on either a timeout or seeing a stale flight. OpenSSL only
implements the former which should be sufficient (and is necessary) EXCEPT for
the final Finished message. If the peer's final Finished message is lost, it
won't be waiting for a message from us, so it won't time out anything. That
retransmit must be triggered on stale message, so we retransmit the Finished
message in Go.
Change-Id: I3ffbdb1de525beb2ee831b304670a3387877634c
Reviewed-on: https://boringssl-review.googlesource.com/3212
Reviewed-by: Adam Langley <agl@google.com>
2015-01-27 06:09:43 +00:00
|
|
|
// skipPacket processes all the DTLS records in packet. It updates
|
|
|
|
// sequence number expectations but otherwise ignores them.
|
|
|
|
func (c *Conn) skipPacket(packet []byte) error {
|
|
|
|
for len(packet) > 0 {
|
2015-08-28 21:16:25 +01:00
|
|
|
if len(packet) < 13 {
|
|
|
|
return errors.New("tls: bad packet")
|
|
|
|
}
|
Add DTLS timeout and retransmit tests.
This extends the packet adaptor protocol to send three commands:
type command =
| Packet of []byte
| Timeout of time.Duration
| TimeoutAck
When the shim processes a Timeout in BIO_read, it sends TimeoutAck, fails the
BIO_read, returns out of the SSL stack, advances the clock, calls
DTLSv1_handle_timeout, and continues.
If the Go side sends Timeout right between sending handshake flight N and
reading flight N+1, the shim won't read the Timeout until it has sent flight
N+1 (it only processes packet commands in BIO_read), so the TimeoutAck comes
after N+1. Go then drops all packets before the TimeoutAck, thus dropping one
transmit of flight N+1 without having to actually process the packets to
determine the end of the flight. The shim then sees the updated clock, calls
DTLSv1_handle_timeout, and re-sends flight N+1 for Go to process for real.
When dropping packets, Go checks the epoch and increments sequence numbers so
that we can continue to be strict here. This requires tracking the initial
sequence number of the next epoch.
The final Finished message takes an additional special-case to test. DTLS
triggers retransmits on either a timeout or seeing a stale flight. OpenSSL only
implements the former which should be sufficient (and is necessary) EXCEPT for
the final Finished message. If the peer's final Finished message is lost, it
won't be waiting for a message from us, so it won't time out anything. That
retransmit must be triggered on stale message, so we retransmit the Finished
message in Go.
Change-Id: I3ffbdb1de525beb2ee831b304670a3387877634c
Reviewed-on: https://boringssl-review.googlesource.com/3212
Reviewed-by: Adam Langley <agl@google.com>
2015-01-27 06:09:43 +00:00
|
|
|
// Dropped packets are completely ignored save to update
|
|
|
|
// expected sequence numbers for this and the next epoch. (We
|
|
|
|
// don't assert on the contents of the packets both for
|
|
|
|
// simplicity and because a previous test with one shorter
|
|
|
|
// timeout schedule would have done so.)
|
|
|
|
epoch := packet[3:5]
|
|
|
|
seq := packet[5:11]
|
|
|
|
length := uint16(packet[11])<<8 | uint16(packet[12])
|
|
|
|
if bytes.Equal(c.in.seq[:2], epoch) {
|
2015-11-02 22:16:13 +00:00
|
|
|
if bytes.Compare(seq, c.in.seq[2:]) < 0 {
|
Add DTLS timeout and retransmit tests.
This extends the packet adaptor protocol to send three commands:
type command =
| Packet of []byte
| Timeout of time.Duration
| TimeoutAck
When the shim processes a Timeout in BIO_read, it sends TimeoutAck, fails the
BIO_read, returns out of the SSL stack, advances the clock, calls
DTLSv1_handle_timeout, and continues.
If the Go side sends Timeout right between sending handshake flight N and
reading flight N+1, the shim won't read the Timeout until it has sent flight
N+1 (it only processes packet commands in BIO_read), so the TimeoutAck comes
after N+1. Go then drops all packets before the TimeoutAck, thus dropping one
transmit of flight N+1 without having to actually process the packets to
determine the end of the flight. The shim then sees the updated clock, calls
DTLSv1_handle_timeout, and re-sends flight N+1 for Go to process for real.
When dropping packets, Go checks the epoch and increments sequence numbers so
that we can continue to be strict here. This requires tracking the initial
sequence number of the next epoch.
The final Finished message takes an additional special-case to test. DTLS
triggers retransmits on either a timeout or seeing a stale flight. OpenSSL only
implements the former which should be sufficient (and is necessary) EXCEPT for
the final Finished message. If the peer's final Finished message is lost, it
won't be waiting for a message from us, so it won't time out anything. That
retransmit must be triggered on stale message, so we retransmit the Finished
message in Go.
Change-Id: I3ffbdb1de525beb2ee831b304670a3387877634c
Reviewed-on: https://boringssl-review.googlesource.com/3212
Reviewed-by: Adam Langley <agl@google.com>
2015-01-27 06:09:43 +00:00
|
|
|
return errors.New("tls: sequence mismatch")
|
|
|
|
}
|
2015-11-02 22:16:13 +00:00
|
|
|
copy(c.in.seq[2:], seq)
|
Add DTLS timeout and retransmit tests.
This extends the packet adaptor protocol to send three commands:
type command =
| Packet of []byte
| Timeout of time.Duration
| TimeoutAck
When the shim processes a Timeout in BIO_read, it sends TimeoutAck, fails the
BIO_read, returns out of the SSL stack, advances the clock, calls
DTLSv1_handle_timeout, and continues.
If the Go side sends Timeout right between sending handshake flight N and
reading flight N+1, the shim won't read the Timeout until it has sent flight
N+1 (it only processes packet commands in BIO_read), so the TimeoutAck comes
after N+1. Go then drops all packets before the TimeoutAck, thus dropping one
transmit of flight N+1 without having to actually process the packets to
determine the end of the flight. The shim then sees the updated clock, calls
DTLSv1_handle_timeout, and re-sends flight N+1 for Go to process for real.
When dropping packets, Go checks the epoch and increments sequence numbers so
that we can continue to be strict here. This requires tracking the initial
sequence number of the next epoch.
The final Finished message takes an additional special-case to test. DTLS
triggers retransmits on either a timeout or seeing a stale flight. OpenSSL only
implements the former which should be sufficient (and is necessary) EXCEPT for
the final Finished message. If the peer's final Finished message is lost, it
won't be waiting for a message from us, so it won't time out anything. That
retransmit must be triggered on stale message, so we retransmit the Finished
message in Go.
Change-Id: I3ffbdb1de525beb2ee831b304670a3387877634c
Reviewed-on: https://boringssl-review.googlesource.com/3212
Reviewed-by: Adam Langley <agl@google.com>
2015-01-27 06:09:43 +00:00
|
|
|
c.in.incSeq(false)
|
|
|
|
} else {
|
2015-11-02 22:16:13 +00:00
|
|
|
if bytes.Compare(seq, c.in.nextSeq[:]) < 0 {
|
Add DTLS timeout and retransmit tests.
This extends the packet adaptor protocol to send three commands:
type command =
| Packet of []byte
| Timeout of time.Duration
| TimeoutAck
When the shim processes a Timeout in BIO_read, it sends TimeoutAck, fails the
BIO_read, returns out of the SSL stack, advances the clock, calls
DTLSv1_handle_timeout, and continues.
If the Go side sends Timeout right between sending handshake flight N and
reading flight N+1, the shim won't read the Timeout until it has sent flight
N+1 (it only processes packet commands in BIO_read), so the TimeoutAck comes
after N+1. Go then drops all packets before the TimeoutAck, thus dropping one
transmit of flight N+1 without having to actually process the packets to
determine the end of the flight. The shim then sees the updated clock, calls
DTLSv1_handle_timeout, and re-sends flight N+1 for Go to process for real.
When dropping packets, Go checks the epoch and increments sequence numbers so
that we can continue to be strict here. This requires tracking the initial
sequence number of the next epoch.
The final Finished message takes an additional special-case to test. DTLS
triggers retransmits on either a timeout or seeing a stale flight. OpenSSL only
implements the former which should be sufficient (and is necessary) EXCEPT for
the final Finished message. If the peer's final Finished message is lost, it
won't be waiting for a message from us, so it won't time out anything. That
retransmit must be triggered on stale message, so we retransmit the Finished
message in Go.
Change-Id: I3ffbdb1de525beb2ee831b304670a3387877634c
Reviewed-on: https://boringssl-review.googlesource.com/3212
Reviewed-by: Adam Langley <agl@google.com>
2015-01-27 06:09:43 +00:00
|
|
|
return errors.New("tls: sequence mismatch")
|
|
|
|
}
|
2015-11-02 22:16:13 +00:00
|
|
|
copy(c.in.nextSeq[:], seq)
|
Add DTLS timeout and retransmit tests.
This extends the packet adaptor protocol to send three commands:
type command =
| Packet of []byte
| Timeout of time.Duration
| TimeoutAck
When the shim processes a Timeout in BIO_read, it sends TimeoutAck, fails the
BIO_read, returns out of the SSL stack, advances the clock, calls
DTLSv1_handle_timeout, and continues.
If the Go side sends Timeout right between sending handshake flight N and
reading flight N+1, the shim won't read the Timeout until it has sent flight
N+1 (it only processes packet commands in BIO_read), so the TimeoutAck comes
after N+1. Go then drops all packets before the TimeoutAck, thus dropping one
transmit of flight N+1 without having to actually process the packets to
determine the end of the flight. The shim then sees the updated clock, calls
DTLSv1_handle_timeout, and re-sends flight N+1 for Go to process for real.
When dropping packets, Go checks the epoch and increments sequence numbers so
that we can continue to be strict here. This requires tracking the initial
sequence number of the next epoch.
The final Finished message takes an additional special-case to test. DTLS
triggers retransmits on either a timeout or seeing a stale flight. OpenSSL only
implements the former which should be sufficient (and is necessary) EXCEPT for
the final Finished message. If the peer's final Finished message is lost, it
won't be waiting for a message from us, so it won't time out anything. That
retransmit must be triggered on stale message, so we retransmit the Finished
message in Go.
Change-Id: I3ffbdb1de525beb2ee831b304670a3387877634c
Reviewed-on: https://boringssl-review.googlesource.com/3212
Reviewed-by: Adam Langley <agl@google.com>
2015-01-27 06:09:43 +00:00
|
|
|
c.in.incNextSeq()
|
|
|
|
}
|
2015-08-28 21:16:25 +01:00
|
|
|
if len(packet) < 13+int(length) {
|
|
|
|
return errors.New("tls: bad packet")
|
|
|
|
}
|
Add DTLS timeout and retransmit tests.
This extends the packet adaptor protocol to send three commands:
type command =
| Packet of []byte
| Timeout of time.Duration
| TimeoutAck
When the shim processes a Timeout in BIO_read, it sends TimeoutAck, fails the
BIO_read, returns out of the SSL stack, advances the clock, calls
DTLSv1_handle_timeout, and continues.
If the Go side sends Timeout right between sending handshake flight N and
reading flight N+1, the shim won't read the Timeout until it has sent flight
N+1 (it only processes packet commands in BIO_read), so the TimeoutAck comes
after N+1. Go then drops all packets before the TimeoutAck, thus dropping one
transmit of flight N+1 without having to actually process the packets to
determine the end of the flight. The shim then sees the updated clock, calls
DTLSv1_handle_timeout, and re-sends flight N+1 for Go to process for real.
When dropping packets, Go checks the epoch and increments sequence numbers so
that we can continue to be strict here. This requires tracking the initial
sequence number of the next epoch.
The final Finished message takes an additional special-case to test. DTLS
triggers retransmits on either a timeout or seeing a stale flight. OpenSSL only
implements the former which should be sufficient (and is necessary) EXCEPT for
the final Finished message. If the peer's final Finished message is lost, it
won't be waiting for a message from us, so it won't time out anything. That
retransmit must be triggered on stale message, so we retransmit the Finished
message in Go.
Change-Id: I3ffbdb1de525beb2ee831b304670a3387877634c
Reviewed-on: https://boringssl-review.googlesource.com/3212
Reviewed-by: Adam Langley <agl@google.com>
2015-01-27 06:09:43 +00:00
|
|
|
packet = packet[13+length:]
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// simulatePacketLoss simulates the loss of a handshake leg from the
|
|
|
|
// peer based on the schedule in c.config.Bugs. If resendFunc is
|
|
|
|
// non-nil, it is called after each simulated timeout to retransmit
|
|
|
|
// handshake messages from the local end. This is used in cases where
|
|
|
|
// the peer retransmits on a stale Finished rather than a timeout.
|
|
|
|
func (c *Conn) simulatePacketLoss(resendFunc func()) error {
|
|
|
|
if len(c.config.Bugs.TimeoutSchedule) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if !c.isDTLS {
|
|
|
|
return errors.New("tls: TimeoutSchedule may only be set in DTLS")
|
|
|
|
}
|
|
|
|
if c.config.Bugs.PacketAdaptor == nil {
|
|
|
|
return errors.New("tls: TimeoutSchedule set without PacketAdapter")
|
|
|
|
}
|
|
|
|
for _, timeout := range c.config.Bugs.TimeoutSchedule {
|
|
|
|
// Simulate a timeout.
|
|
|
|
packets, err := c.config.Bugs.PacketAdaptor.SendReadTimeout(timeout)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, packet := range packets {
|
|
|
|
if err := c.skipPacket(packet); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if resendFunc != nil {
|
|
|
|
resendFunc()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-06-20 20:00:00 +01:00
|
|
|
// Write writes data to the connection.
|
|
|
|
func (c *Conn) Write(b []byte) (int, error) {
|
|
|
|
if err := c.Handshake(); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
c.out.Lock()
|
|
|
|
defer c.out.Unlock()
|
|
|
|
|
|
|
|
if err := c.out.err; err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if !c.handshakeComplete {
|
|
|
|
return 0, alertInternalError
|
|
|
|
}
|
|
|
|
|
2015-02-03 21:07:32 +00:00
|
|
|
if c.config.Bugs.SendSpuriousAlert != 0 {
|
2015-06-06 08:28:08 +01:00
|
|
|
c.sendAlertLocked(alertLevelError, c.config.Bugs.SendSpuriousAlert)
|
2014-11-01 23:39:08 +00:00
|
|
|
}
|
|
|
|
|
2015-11-03 21:34:10 +00:00
|
|
|
if c.config.Bugs.SendHelloRequestBeforeEveryAppDataRecord {
|
|
|
|
c.writeRecord(recordTypeHandshake, []byte{typeHelloRequest, 0, 0, 0})
|
2016-07-07 20:33:25 +01:00
|
|
|
c.flushHandshake()
|
2015-11-03 21:34:10 +00:00
|
|
|
}
|
|
|
|
|
2014-06-20 20:00:00 +01:00
|
|
|
// SSL 3.0 and TLS 1.0 are susceptible to a chosen-plaintext
|
|
|
|
// attack when using block mode ciphers due to predictable IVs.
|
|
|
|
// This can be prevented by splitting each Application Data
|
|
|
|
// record into two records, effectively randomizing the IV.
|
|
|
|
//
|
|
|
|
// http://www.openssl.org/~bodo/tls-cbc.txt
|
|
|
|
// https://bugzilla.mozilla.org/show_bug.cgi?id=665814
|
|
|
|
// http://www.imperialviolet.org/2012/01/15/beastfollowup.html
|
|
|
|
|
|
|
|
var m int
|
2014-08-04 06:23:53 +01:00
|
|
|
if len(b) > 1 && c.vers <= VersionTLS10 && !c.isDTLS {
|
2014-06-20 20:00:00 +01:00
|
|
|
if _, ok := c.out.cipher.(cipher.BlockMode); ok {
|
|
|
|
n, err := c.writeRecord(recordTypeApplicationData, b[:1])
|
|
|
|
if err != nil {
|
|
|
|
return n, c.out.setErrorLocked(err)
|
|
|
|
}
|
|
|
|
m, b = 1, b[1:]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
n, err := c.writeRecord(recordTypeApplicationData, b)
|
|
|
|
return n + m, c.out.setErrorLocked(err)
|
|
|
|
}
|
|
|
|
|
2014-10-29 00:29:33 +00:00
|
|
|
func (c *Conn) handleRenegotiation() error {
|
|
|
|
c.handshakeComplete = false
|
|
|
|
if !c.isClient {
|
|
|
|
panic("renegotiation should only happen for a client")
|
|
|
|
}
|
|
|
|
|
|
|
|
msg, err := c.readHandshake()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
_, ok := msg.(*helloRequestMsg)
|
|
|
|
if !ok {
|
|
|
|
c.sendAlert(alertUnexpectedMessage)
|
|
|
|
return alertUnexpectedMessage
|
|
|
|
}
|
|
|
|
|
|
|
|
return c.Handshake()
|
|
|
|
}
|
|
|
|
|
2014-10-29 02:06:14 +00:00
|
|
|
func (c *Conn) Renegotiate() error {
|
|
|
|
if !c.isClient {
|
2015-12-06 18:17:07 +00:00
|
|
|
helloReq := new(helloRequestMsg).marshal()
|
|
|
|
if c.config.Bugs.BadHelloRequest != nil {
|
|
|
|
helloReq = c.config.Bugs.BadHelloRequest
|
|
|
|
}
|
|
|
|
c.writeRecord(recordTypeHandshake, helloReq)
|
2016-07-07 20:33:25 +01:00
|
|
|
c.flushHandshake()
|
2014-10-29 02:06:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
c.handshakeComplete = false
|
|
|
|
return c.Handshake()
|
|
|
|
}
|
|
|
|
|
2014-06-20 20:00:00 +01:00
|
|
|
// Read can be made to time out and return a net.Error with Timeout() == true
|
|
|
|
// after a fixed time limit; see SetDeadline and SetReadDeadline.
|
|
|
|
func (c *Conn) Read(b []byte) (n int, err error) {
|
|
|
|
if err = c.Handshake(); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
c.in.Lock()
|
|
|
|
defer c.in.Unlock()
|
|
|
|
|
|
|
|
// Some OpenSSL servers send empty records in order to randomize the
|
|
|
|
// CBC IV. So this loop ignores a limited number of empty records.
|
|
|
|
const maxConsecutiveEmptyRecords = 100
|
|
|
|
for emptyRecordCount := 0; emptyRecordCount <= maxConsecutiveEmptyRecords; emptyRecordCount++ {
|
|
|
|
for c.input == nil && c.in.err == nil {
|
|
|
|
if err := c.readRecord(recordTypeApplicationData); err != nil {
|
|
|
|
// Soft error, like EAGAIN
|
|
|
|
return 0, err
|
|
|
|
}
|
2015-01-27 06:10:54 +00:00
|
|
|
if c.hand.Len() > 0 {
|
2014-10-29 00:29:33 +00:00
|
|
|
// We received handshake bytes, indicating the
|
2015-01-27 06:10:54 +00:00
|
|
|
// start of a renegotiation.
|
2014-10-29 00:29:33 +00:00
|
|
|
if err := c.handleRenegotiation(); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
if err := c.in.err; err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
n, err = c.input.Read(b)
|
2014-08-04 06:23:53 +01:00
|
|
|
if c.input.off >= len(c.input.data) || c.isDTLS {
|
2014-06-20 20:00:00 +01:00
|
|
|
c.in.freeBlock(c.input)
|
|
|
|
c.input = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If a close-notify alert is waiting, read it so that
|
|
|
|
// we can return (n, EOF) instead of (n, nil), to signal
|
|
|
|
// to the HTTP response reading goroutine that the
|
|
|
|
// connection is now closed. This eliminates a race
|
|
|
|
// where the HTTP response reading goroutine would
|
|
|
|
// otherwise not observe the EOF until its next read,
|
|
|
|
// by which time a client goroutine might have already
|
|
|
|
// tried to reuse the HTTP connection for a new
|
|
|
|
// request.
|
|
|
|
// See https://codereview.appspot.com/76400046
|
|
|
|
// and http://golang.org/issue/3514
|
|
|
|
if ri := c.rawInput; ri != nil &&
|
|
|
|
n != 0 && err == nil &&
|
|
|
|
c.input == nil && len(ri.data) > 0 && recordType(ri.data[0]) == recordTypeAlert {
|
|
|
|
if recErr := c.readRecord(recordTypeApplicationData); recErr != nil {
|
|
|
|
err = recErr // will be io.EOF on closeNotify
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if n != 0 || err != nil {
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0, io.ErrNoProgress
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close closes the connection.
|
|
|
|
func (c *Conn) Close() error {
|
|
|
|
var alertErr error
|
|
|
|
|
|
|
|
c.handshakeMutex.Lock()
|
|
|
|
defer c.handshakeMutex.Unlock()
|
2015-08-30 03:56:45 +01:00
|
|
|
if c.handshakeComplete && !c.config.Bugs.NoCloseNotify {
|
2016-05-10 22:03:10 +01:00
|
|
|
alert := alertCloseNotify
|
|
|
|
if c.config.Bugs.SendAlertOnShutdown != 0 {
|
|
|
|
alert = c.config.Bugs.SendAlertOnShutdown
|
|
|
|
}
|
|
|
|
alertErr = c.sendAlert(alert)
|
2016-05-18 19:31:51 +01:00
|
|
|
// Clear local alerts when sending alerts so we continue to wait
|
|
|
|
// for the peer rather than closing the socket early.
|
|
|
|
if opErr, ok := alertErr.(*net.OpError); ok && opErr.Op == "local error" {
|
|
|
|
alertErr = nil
|
|
|
|
}
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
|
2015-08-30 03:56:45 +01:00
|
|
|
// Consume a close_notify from the peer if one hasn't been received
|
|
|
|
// already. This avoids the peer from failing |SSL_shutdown| due to a
|
|
|
|
// write failing.
|
|
|
|
if c.handshakeComplete && alertErr == nil && c.config.Bugs.ExpectCloseNotify {
|
|
|
|
for c.in.error() == nil {
|
|
|
|
c.readRecord(recordTypeAlert)
|
|
|
|
}
|
|
|
|
if c.in.error() != io.EOF {
|
|
|
|
alertErr = c.in.error()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-20 20:00:00 +01:00
|
|
|
if err := c.conn.Close(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return alertErr
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handshake runs the client or server handshake
|
|
|
|
// protocol if it has not yet been run.
|
|
|
|
// Most uses of this package need not call Handshake
|
|
|
|
// explicitly: the first Read or Write will call it automatically.
|
|
|
|
func (c *Conn) Handshake() error {
|
|
|
|
c.handshakeMutex.Lock()
|
|
|
|
defer c.handshakeMutex.Unlock()
|
|
|
|
if err := c.handshakeErr; err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if c.handshakeComplete {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-05-16 06:30:09 +01:00
|
|
|
if c.isDTLS && c.config.Bugs.SendSplitAlert {
|
|
|
|
c.conn.Write([]byte{
|
|
|
|
byte(recordTypeAlert), // type
|
|
|
|
0xfe, 0xff, // version
|
|
|
|
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // sequence
|
|
|
|
0x0, 0x2, // length
|
|
|
|
})
|
|
|
|
c.conn.Write([]byte{alertLevelError, byte(alertInternalError)})
|
|
|
|
}
|
2015-08-22 06:35:43 +01:00
|
|
|
if data := c.config.Bugs.AppDataBeforeHandshake; data != nil {
|
|
|
|
c.writeRecord(recordTypeApplicationData, data)
|
|
|
|
}
|
2014-06-20 20:00:00 +01:00
|
|
|
if c.isClient {
|
|
|
|
c.handshakeErr = c.clientHandshake()
|
|
|
|
} else {
|
|
|
|
c.handshakeErr = c.serverHandshake()
|
|
|
|
}
|
2015-02-03 20:44:39 +00:00
|
|
|
if c.handshakeErr == nil && c.config.Bugs.SendInvalidRecordType {
|
|
|
|
c.writeRecord(recordType(42), []byte("invalid record"))
|
|
|
|
}
|
2014-06-20 20:00:00 +01:00
|
|
|
return c.handshakeErr
|
|
|
|
}
|
|
|
|
|
|
|
|
// ConnectionState returns basic TLS details about the connection.
|
|
|
|
func (c *Conn) ConnectionState() ConnectionState {
|
|
|
|
c.handshakeMutex.Lock()
|
|
|
|
defer c.handshakeMutex.Unlock()
|
|
|
|
|
|
|
|
var state ConnectionState
|
|
|
|
state.HandshakeComplete = c.handshakeComplete
|
|
|
|
if c.handshakeComplete {
|
|
|
|
state.Version = c.vers
|
|
|
|
state.NegotiatedProtocol = c.clientProtocol
|
|
|
|
state.DidResume = c.didResume
|
|
|
|
state.NegotiatedProtocolIsMutual = !c.clientProtocolFallback
|
2014-09-06 18:21:53 +01:00
|
|
|
state.NegotiatedProtocolFromALPN = c.usedALPN
|
2015-04-03 09:06:36 +01:00
|
|
|
state.CipherSuite = c.cipherSuite.id
|
2014-06-20 20:00:00 +01:00
|
|
|
state.PeerCertificates = c.peerCertificates
|
|
|
|
state.VerifiedChains = c.verifiedChains
|
|
|
|
state.ServerName = c.serverName
|
2014-08-24 06:44:23 +01:00
|
|
|
state.ChannelID = c.channelID
|
2014-11-16 00:06:08 +00:00
|
|
|
state.SRTPProtectionProfile = c.srtpProtectionProfile
|
2015-06-03 17:57:23 +01:00
|
|
|
state.TLSUnique = c.firstFinished[:]
|
2015-09-09 13:44:55 +01:00
|
|
|
state.SCTList = c.sctList
|
2016-06-21 23:19:24 +01:00
|
|
|
state.PeerSignatureAlgorithm = c.peerSignatureAlgorithm
|
2014-06-20 20:00:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return state
|
|
|
|
}
|
|
|
|
|
|
|
|
// OCSPResponse returns the stapled OCSP response from the TLS server, if
|
|
|
|
// any. (Only valid for client connections.)
|
|
|
|
func (c *Conn) OCSPResponse() []byte {
|
|
|
|
c.handshakeMutex.Lock()
|
|
|
|
defer c.handshakeMutex.Unlock()
|
|
|
|
|
|
|
|
return c.ocspResponse
|
|
|
|
}
|
|
|
|
|
|
|
|
// VerifyHostname checks that the peer certificate chain is valid for
|
|
|
|
// connecting to host. If so, it returns nil; if not, it returns an error
|
|
|
|
// describing the problem.
|
|
|
|
func (c *Conn) VerifyHostname(host string) error {
|
|
|
|
c.handshakeMutex.Lock()
|
|
|
|
defer c.handshakeMutex.Unlock()
|
|
|
|
if !c.isClient {
|
|
|
|
return errors.New("tls: VerifyHostname called on TLS server connection")
|
|
|
|
}
|
|
|
|
if !c.handshakeComplete {
|
|
|
|
return errors.New("tls: handshake has not yet been performed")
|
|
|
|
}
|
|
|
|
return c.peerCertificates[0].VerifyHostname(host)
|
|
|
|
}
|
2015-04-03 09:06:36 +01:00
|
|
|
|
|
|
|
// ExportKeyingMaterial exports keying material from the current connection
|
|
|
|
// state, as per RFC 5705.
|
|
|
|
func (c *Conn) ExportKeyingMaterial(length int, label, context []byte, useContext bool) ([]byte, error) {
|
|
|
|
c.handshakeMutex.Lock()
|
|
|
|
defer c.handshakeMutex.Unlock()
|
|
|
|
if !c.handshakeComplete {
|
|
|
|
return nil, errors.New("tls: handshake has not yet been performed")
|
|
|
|
}
|
|
|
|
|
|
|
|
seedLen := len(c.clientRandom) + len(c.serverRandom)
|
|
|
|
if useContext {
|
|
|
|
seedLen += 2 + len(context)
|
|
|
|
}
|
|
|
|
seed := make([]byte, 0, seedLen)
|
|
|
|
seed = append(seed, c.clientRandom[:]...)
|
|
|
|
seed = append(seed, c.serverRandom[:]...)
|
|
|
|
if useContext {
|
|
|
|
seed = append(seed, byte(len(context)>>8), byte(len(context)))
|
|
|
|
seed = append(seed, context...)
|
|
|
|
}
|
|
|
|
result := make([]byte, length)
|
|
|
|
prfForVersion(c.vers, c.cipherSuite)(result, c.masterSecret[:], label, seed)
|
|
|
|
return result, nil
|
|
|
|
}
|
2015-11-26 01:10:31 +00:00
|
|
|
|
|
|
|
// noRenegotiationInfo returns true if the renegotiation info extension
|
|
|
|
// should be supported in the current handshake.
|
|
|
|
func (c *Conn) noRenegotiationInfo() bool {
|
|
|
|
if c.config.Bugs.NoRenegotiationInfo {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if c.cipherSuite == nil && c.config.Bugs.NoRenegotiationInfoInInitial {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if c.cipherSuite != nil && c.config.Bugs.NoRenegotiationInfoAfterInitial {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|