2014-08-04 06:23:53 +01:00
|
|
|
// Copyright 2014 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
// DTLS implementation.
|
|
|
|
//
|
|
|
|
// NOTE: This is a not even a remotely production-quality DTLS
|
|
|
|
// implementation. It is the bare minimum necessary to be able to
|
|
|
|
// achieve coverage on BoringSSL's implementation. Of note is that
|
|
|
|
// this implementation assumes the underlying net.PacketConn is not
|
|
|
|
// only reliable but also ordered. BoringSSL will be expected to deal
|
|
|
|
// with simulated loss, but there is no point in forcing the test
|
|
|
|
// driver to.
|
|
|
|
|
2015-09-29 23:21:04 +01:00
|
|
|
package runner
|
2014-08-04 06:23:53 +01:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
2015-01-31 22:16:01 +00:00
|
|
|
"math/rand"
|
2014-08-04 06:23:53 +01:00
|
|
|
"net"
|
|
|
|
)
|
|
|
|
|
2016-09-27 00:20:48 +01:00
|
|
|
func wireToVersion(vers uint16, isDTLS bool) (uint16, bool) {
|
2014-08-04 06:23:53 +01:00
|
|
|
if isDTLS {
|
2016-09-27 00:20:48 +01:00
|
|
|
switch vers {
|
2017-06-20 15:55:02 +01:00
|
|
|
case VersionDTLS12:
|
2016-09-27 00:20:48 +01:00
|
|
|
return VersionTLS12, true
|
2017-06-20 15:55:02 +01:00
|
|
|
case VersionDTLS10:
|
2016-09-27 00:20:48 +01:00
|
|
|
return VersionTLS10, true
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
switch vers {
|
2016-09-15 21:27:05 +01:00
|
|
|
case VersionSSL30, VersionTLS10, VersionTLS11, VersionTLS12:
|
2016-09-27 00:20:48 +01:00
|
|
|
return vers, true
|
2017-06-13 17:45:25 +01:00
|
|
|
case tls13DraftVersion, tls13ExperimentVersion:
|
2016-09-15 21:27:05 +01:00
|
|
|
return VersionTLS13, true
|
2016-09-27 00:20:48 +01:00
|
|
|
}
|
2014-08-04 06:23:53 +01:00
|
|
|
}
|
2016-09-27 00:20:48 +01:00
|
|
|
|
|
|
|
return 0, false
|
2014-08-04 06:23:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Conn) dtlsDoReadRecord(want recordType) (recordType, *block, error) {
|
|
|
|
recordHeaderLen := dtlsRecordHeaderLen
|
|
|
|
|
|
|
|
if c.rawInput == nil {
|
|
|
|
c.rawInput = c.in.newBlock()
|
|
|
|
}
|
|
|
|
b := c.rawInput
|
|
|
|
|
|
|
|
// Read a new packet only if the current one is empty.
|
2016-05-06 01:45:48 +01:00
|
|
|
var newPacket bool
|
2014-08-04 06:23:53 +01:00
|
|
|
if len(b.data) == 0 {
|
|
|
|
// Pick some absurdly large buffer size.
|
|
|
|
b.resize(maxCiphertext + recordHeaderLen)
|
|
|
|
n, err := c.conn.Read(c.rawInput.data)
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, err
|
|
|
|
}
|
2015-01-11 21:29:36 +00:00
|
|
|
if c.config.Bugs.MaxPacketLength != 0 && n > c.config.Bugs.MaxPacketLength {
|
|
|
|
return 0, nil, fmt.Errorf("dtls: exceeded maximum packet length")
|
|
|
|
}
|
2014-08-04 06:23:53 +01:00
|
|
|
c.rawInput.resize(n)
|
2016-05-06 01:45:48 +01:00
|
|
|
newPacket = true
|
2014-08-04 06:23:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Read out one record.
|
|
|
|
//
|
|
|
|
// A real DTLS implementation should be tolerant of errors,
|
|
|
|
// but this is test code. We should not be tolerant of our
|
|
|
|
// peer sending garbage.
|
|
|
|
if len(b.data) < recordHeaderLen {
|
|
|
|
return 0, nil, errors.New("dtls: failed to read record header")
|
|
|
|
}
|
|
|
|
typ := recordType(b.data[0])
|
2016-09-27 00:20:48 +01:00
|
|
|
vers := uint16(b.data[1])<<8 | uint16(b.data[2])
|
2016-06-21 17:19:28 +01:00
|
|
|
// Alerts sent near version negotiation do not have a well-defined
|
|
|
|
// record-layer version prior to TLS 1.3. (In TLS 1.3, the record-layer
|
|
|
|
// version is irrelevant.)
|
|
|
|
if typ != recordTypeAlert {
|
|
|
|
if c.haveVers {
|
2017-06-20 15:55:02 +01:00
|
|
|
if vers != c.wireVersion {
|
2016-06-21 17:19:28 +01:00
|
|
|
c.sendAlert(alertProtocolVersion)
|
2017-06-20 15:55:02 +01:00
|
|
|
return 0, nil, c.in.setErrorLocked(fmt.Errorf("dtls: received record with version %x when expecting version %x", vers, c.wireVersion))
|
2016-06-21 17:19:28 +01:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Pre-version-negotiation alerts may be sent with any version.
|
|
|
|
if expect := c.config.Bugs.ExpectInitialRecordVersion; expect != 0 && vers != expect {
|
|
|
|
c.sendAlert(alertProtocolVersion)
|
|
|
|
return 0, nil, c.in.setErrorLocked(fmt.Errorf("dtls: received record with version %x when expecting version %x", vers, expect))
|
|
|
|
}
|
2014-12-10 07:27:24 +00:00
|
|
|
}
|
2014-08-04 06:23:53 +01:00
|
|
|
}
|
2015-11-02 22:16:13 +00:00
|
|
|
epoch := b.data[3:5]
|
|
|
|
seq := b.data[5:11]
|
|
|
|
// For test purposes, require the sequence number be monotonically
|
|
|
|
// increasing, so c.in includes the minimum next sequence number. Gaps
|
|
|
|
// may occur if packets failed to be sent out. A real implementation
|
|
|
|
// would maintain a replay window and such.
|
|
|
|
if !bytes.Equal(epoch, c.in.seq[:2]) {
|
|
|
|
c.sendAlert(alertIllegalParameter)
|
|
|
|
return 0, nil, c.in.setErrorLocked(fmt.Errorf("dtls: bad epoch"))
|
|
|
|
}
|
|
|
|
if bytes.Compare(seq, c.in.seq[2:]) < 0 {
|
2014-08-04 06:23:53 +01:00
|
|
|
c.sendAlert(alertIllegalParameter)
|
|
|
|
return 0, nil, c.in.setErrorLocked(fmt.Errorf("dtls: bad sequence number"))
|
|
|
|
}
|
2015-11-02 22:16:13 +00:00
|
|
|
copy(c.in.seq[2:], seq)
|
2014-08-04 06:23:53 +01:00
|
|
|
n := int(b.data[11])<<8 | int(b.data[12])
|
|
|
|
if n > maxCiphertext || len(b.data) < recordHeaderLen+n {
|
|
|
|
c.sendAlert(alertRecordOverflow)
|
|
|
|
return 0, nil, c.in.setErrorLocked(fmt.Errorf("dtls: oversized record received with length %d", n))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Process message.
|
|
|
|
b, c.rawInput = c.in.splitBlock(b, recordHeaderLen+n)
|
2016-07-01 21:13:42 +01:00
|
|
|
ok, off, _, alertValue := c.in.decrypt(b)
|
2014-08-04 06:23:53 +01:00
|
|
|
if !ok {
|
2016-07-01 21:13:42 +01:00
|
|
|
// A real DTLS implementation would silently ignore bad records,
|
|
|
|
// but we want to notice errors from the implementation under
|
|
|
|
// test.
|
|
|
|
return 0, nil, c.in.setErrorLocked(c.sendAlert(alertValue))
|
2014-08-04 06:23:53 +01:00
|
|
|
}
|
|
|
|
b.off = off
|
2016-05-06 01:45:48 +01:00
|
|
|
|
2016-07-01 21:13:42 +01:00
|
|
|
// TODO(nharper): Once DTLS 1.3 is defined, handle the extra
|
|
|
|
// parameter from decrypt.
|
|
|
|
|
2016-05-06 01:45:48 +01:00
|
|
|
// Require that ChangeCipherSpec always share a packet with either the
|
|
|
|
// previous or next handshake message.
|
|
|
|
if newPacket && typ == recordTypeChangeCipherSpec && c.rawInput == nil {
|
|
|
|
return 0, nil, c.in.setErrorLocked(fmt.Errorf("dtls: ChangeCipherSpec not packed together with Finished"))
|
|
|
|
}
|
|
|
|
|
2014-08-04 06:23:53 +01:00
|
|
|
return typ, b, nil
|
|
|
|
}
|
|
|
|
|
2015-03-03 00:30:30 +00:00
|
|
|
func (c *Conn) makeFragment(header, data []byte, fragOffset, fragLen int) []byte {
|
|
|
|
fragment := make([]byte, 0, 12+fragLen)
|
|
|
|
fragment = append(fragment, header...)
|
|
|
|
fragment = append(fragment, byte(c.sendHandshakeSeq>>8), byte(c.sendHandshakeSeq))
|
|
|
|
fragment = append(fragment, byte(fragOffset>>16), byte(fragOffset>>8), byte(fragOffset))
|
|
|
|
fragment = append(fragment, byte(fragLen>>16), byte(fragLen>>8), byte(fragLen))
|
|
|
|
fragment = append(fragment, data[fragOffset:fragOffset+fragLen]...)
|
|
|
|
return fragment
|
|
|
|
}
|
|
|
|
|
2014-08-04 06:23:53 +01:00
|
|
|
func (c *Conn) dtlsWriteRecord(typ recordType, data []byte) (n int, err error) {
|
2015-01-31 20:13:21 +00:00
|
|
|
if typ != recordTypeHandshake {
|
|
|
|
// Only handshake messages are fragmented.
|
2016-06-22 22:05:13 +01:00
|
|
|
n, err = c.dtlsWriteRawRecord(typ, data)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if typ == recordTypeChangeCipherSpec {
|
|
|
|
err = c.out.changeCipherSpec(c.config)
|
|
|
|
if err != nil {
|
2017-07-07 23:10:57 +01:00
|
|
|
return n, c.sendAlertLocked(alertLevelError, err.(alert))
|
2016-06-22 22:05:13 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.out.cipher == nil && c.config.Bugs.StrayChangeCipherSpec {
|
|
|
|
_, err = c.dtlsWriteRawRecord(recordTypeChangeCipherSpec, []byte{1})
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2015-01-31 20:13:21 +00:00
|
|
|
}
|
|
|
|
|
2014-08-04 06:23:53 +01:00
|
|
|
maxLen := c.config.Bugs.MaxHandshakeRecordLength
|
|
|
|
if maxLen <= 0 {
|
|
|
|
maxLen = 1024
|
|
|
|
}
|
|
|
|
|
2015-01-31 20:13:21 +00:00
|
|
|
// Handshake messages have to be modified to include fragment
|
|
|
|
// offset and length and with the header replicated. Save the
|
|
|
|
// TLS header here.
|
|
|
|
//
|
|
|
|
// TODO(davidben): This assumes that data contains exactly one
|
|
|
|
// handshake message. This is incompatible with
|
|
|
|
// FragmentAcrossChangeCipherSpec. (Which is unfortunate
|
|
|
|
// because OpenSSL's DTLS implementation will probably accept
|
|
|
|
// such fragmentation and could do with a fix + tests.)
|
|
|
|
header := data[:4]
|
|
|
|
data = data[4:]
|
2014-08-04 06:23:53 +01:00
|
|
|
|
2015-03-03 00:10:53 +00:00
|
|
|
isFinished := header[0] == typeFinished
|
|
|
|
|
2015-03-03 00:30:30 +00:00
|
|
|
if c.config.Bugs.SendEmptyFragments {
|
|
|
|
fragment := c.makeFragment(header, data, 0, 0)
|
|
|
|
c.pendingFragments = append(c.pendingFragments, fragment)
|
|
|
|
}
|
|
|
|
|
2014-08-04 06:23:53 +01:00
|
|
|
firstRun := true
|
2015-03-03 00:30:30 +00:00
|
|
|
fragOffset := 0
|
|
|
|
for firstRun || fragOffset < len(data) {
|
2014-08-04 06:23:53 +01:00
|
|
|
firstRun = false
|
2015-03-03 00:30:30 +00:00
|
|
|
fragLen := len(data) - fragOffset
|
|
|
|
if fragLen > maxLen {
|
|
|
|
fragLen = maxLen
|
2014-08-04 06:23:53 +01:00
|
|
|
}
|
|
|
|
|
2015-03-03 00:30:30 +00:00
|
|
|
fragment := c.makeFragment(header, data, fragOffset, fragLen)
|
|
|
|
if c.config.Bugs.FragmentMessageTypeMismatch && fragOffset > 0 {
|
|
|
|
fragment[0]++
|
|
|
|
}
|
|
|
|
if c.config.Bugs.FragmentMessageLengthMismatch && fragOffset > 0 {
|
|
|
|
fragment[3]++
|
|
|
|
}
|
2014-08-04 06:23:53 +01:00
|
|
|
|
2015-01-31 22:16:01 +00:00
|
|
|
// Buffer the fragment for later. They will be sent (and
|
|
|
|
// reordered) on flush.
|
|
|
|
c.pendingFragments = append(c.pendingFragments, fragment)
|
2015-03-03 00:10:53 +00:00
|
|
|
if c.config.Bugs.ReorderHandshakeFragments {
|
|
|
|
// Don't duplicate Finished to avoid the peer
|
|
|
|
// interpreting it as a retransmit request.
|
|
|
|
if !isFinished {
|
|
|
|
c.pendingFragments = append(c.pendingFragments, fragment)
|
|
|
|
}
|
|
|
|
|
2015-03-03 00:30:30 +00:00
|
|
|
if fragLen > (maxLen+1)/2 {
|
2015-03-03 00:10:53 +00:00
|
|
|
// Overlap each fragment by half.
|
2015-03-03 00:30:30 +00:00
|
|
|
fragLen = (maxLen + 1) / 2
|
2015-03-03 00:10:53 +00:00
|
|
|
}
|
2014-08-04 06:23:53 +01:00
|
|
|
}
|
2015-03-03 00:30:30 +00:00
|
|
|
fragOffset += fragLen
|
|
|
|
n += fragLen
|
|
|
|
}
|
|
|
|
if !isFinished && c.config.Bugs.MixCompleteMessageWithFragments {
|
|
|
|
fragment := c.makeFragment(header, data, 0, len(data))
|
|
|
|
c.pendingFragments = append(c.pendingFragments, fragment)
|
2014-08-04 06:23:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Increment the handshake sequence number for the next
|
|
|
|
// handshake message.
|
2015-01-31 20:13:21 +00:00
|
|
|
c.sendHandshakeSeq++
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-03-03 00:10:53 +00:00
|
|
|
func (c *Conn) dtlsFlushHandshake() error {
|
2015-05-29 23:48:16 +01:00
|
|
|
// This is a test-only DTLS implementation, so there is no need to
|
|
|
|
// retain |c.pendingFragments| for a future retransmit.
|
2015-02-01 07:33:59 +00:00
|
|
|
var fragments [][]byte
|
2015-01-31 22:16:01 +00:00
|
|
|
fragments, c.pendingFragments = c.pendingFragments, fragments
|
|
|
|
|
|
|
|
if c.config.Bugs.ReorderHandshakeFragments {
|
|
|
|
perm := rand.New(rand.NewSource(0)).Perm(len(fragments))
|
|
|
|
tmp := make([][]byte, len(fragments))
|
|
|
|
for i := range tmp {
|
|
|
|
tmp[i] = fragments[perm[i]]
|
|
|
|
}
|
|
|
|
fragments = tmp
|
2016-07-15 04:10:43 +01:00
|
|
|
} else if c.config.Bugs.ReverseHandshakeFragments {
|
|
|
|
tmp := make([][]byte, len(fragments))
|
|
|
|
for i := range tmp {
|
|
|
|
tmp[i] = fragments[len(fragments)-i-1]
|
|
|
|
}
|
|
|
|
fragments = tmp
|
2015-01-31 22:16:01 +00:00
|
|
|
}
|
|
|
|
|
2015-05-29 23:48:16 +01:00
|
|
|
maxRecordLen := c.config.Bugs.PackHandshakeFragments
|
|
|
|
maxPacketLen := c.config.Bugs.PackHandshakeRecords
|
|
|
|
|
|
|
|
// Pack handshake fragments into records.
|
|
|
|
var records [][]byte
|
2015-01-31 22:16:01 +00:00
|
|
|
for _, fragment := range fragments {
|
2015-06-16 16:40:24 +01:00
|
|
|
if n := c.config.Bugs.SplitFragments; n > 0 {
|
|
|
|
if len(fragment) > n {
|
|
|
|
records = append(records, fragment[:n])
|
|
|
|
records = append(records, fragment[n:])
|
2015-05-29 23:48:16 +01:00
|
|
|
} else {
|
|
|
|
records = append(records, fragment)
|
2015-03-03 00:30:30 +00:00
|
|
|
}
|
2015-05-29 23:48:16 +01:00
|
|
|
} else if i := len(records) - 1; len(records) > 0 && len(records[i])+len(fragment) <= maxRecordLen {
|
|
|
|
records[i] = append(records[i], fragment...)
|
|
|
|
} else {
|
|
|
|
// The fragment will be appended to, so copy it.
|
|
|
|
records = append(records, append([]byte{}, fragment...))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Format them into packets.
|
|
|
|
var packets [][]byte
|
|
|
|
for _, record := range records {
|
|
|
|
b, err := c.dtlsSealRecord(recordTypeHandshake, record)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if i := len(packets) - 1; len(packets) > 0 && len(packets[i])+len(b.data) <= maxPacketLen {
|
|
|
|
packets[i] = append(packets[i], b.data...)
|
|
|
|
} else {
|
|
|
|
// The sealed record will be appended to and reused by
|
|
|
|
// |c.out|, so copy it.
|
|
|
|
packets = append(packets, append([]byte{}, b.data...))
|
2015-03-03 00:30:30 +00:00
|
|
|
}
|
2015-05-29 23:48:16 +01:00
|
|
|
c.out.freeBlock(b)
|
|
|
|
}
|
2015-03-03 00:30:30 +00:00
|
|
|
|
2015-05-29 23:48:16 +01:00
|
|
|
// Send all the packets.
|
|
|
|
for _, packet := range packets {
|
|
|
|
if _, err := c.conn.Write(packet); err != nil {
|
2015-01-31 22:16:01 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-05-29 23:48:16 +01:00
|
|
|
// dtlsSealRecord seals a record into a block from |c.out|'s pool.
|
|
|
|
func (c *Conn) dtlsSealRecord(typ recordType, data []byte) (b *block, err error) {
|
2015-01-31 20:13:21 +00:00
|
|
|
recordHeaderLen := dtlsRecordHeaderLen
|
|
|
|
maxLen := c.config.Bugs.MaxHandshakeRecordLength
|
|
|
|
if maxLen <= 0 {
|
|
|
|
maxLen = 1024
|
|
|
|
}
|
|
|
|
|
2015-05-29 23:48:16 +01:00
|
|
|
b = c.out.newBlock()
|
2015-01-31 20:13:21 +00:00
|
|
|
|
|
|
|
explicitIVLen := 0
|
|
|
|
explicitIVIsSeq := false
|
|
|
|
|
|
|
|
if cbc, ok := c.out.cipher.(cbcMode); ok {
|
|
|
|
// Block cipher modes have an explicit IV.
|
|
|
|
explicitIVLen = cbc.BlockSize()
|
2015-04-07 05:46:46 +01:00
|
|
|
} else if aead, ok := c.out.cipher.(*tlsAead); ok {
|
|
|
|
if aead.explicitNonce {
|
|
|
|
explicitIVLen = 8
|
|
|
|
// The AES-GCM construction in TLS has an explicit nonce so that
|
|
|
|
// the nonce can be random. However, the nonce is only 8 bytes
|
|
|
|
// which is too small for a secure, random nonce. Therefore we
|
|
|
|
// use the sequence number as the nonce.
|
|
|
|
explicitIVIsSeq = true
|
|
|
|
}
|
2016-09-20 22:58:14 +01:00
|
|
|
} else if _, ok := c.out.cipher.(nullCipher); !ok && c.out.cipher != nil {
|
2015-01-31 20:13:21 +00:00
|
|
|
panic("Unknown cipher")
|
|
|
|
}
|
|
|
|
b.resize(recordHeaderLen + explicitIVLen + len(data))
|
2016-06-15 02:14:35 +01:00
|
|
|
// TODO(nharper): DTLS 1.3 will likely need to set this to
|
|
|
|
// recordTypeApplicationData if c.out.cipher != nil.
|
2015-01-31 20:13:21 +00:00
|
|
|
b.data[0] = byte(typ)
|
2017-06-20 15:55:02 +01:00
|
|
|
vers := c.wireVersion
|
2015-01-31 20:13:21 +00:00
|
|
|
if vers == 0 {
|
|
|
|
// Some TLS servers fail if the record version is greater than
|
|
|
|
// TLS 1.0 for the initial ClientHello.
|
2017-06-20 15:55:02 +01:00
|
|
|
if c.isDTLS {
|
|
|
|
vers = VersionDTLS10
|
|
|
|
} else {
|
|
|
|
vers = VersionTLS10
|
|
|
|
}
|
2014-08-04 06:23:53 +01:00
|
|
|
}
|
2015-01-31 20:13:21 +00:00
|
|
|
b.data[1] = byte(vers >> 8)
|
|
|
|
b.data[2] = byte(vers)
|
|
|
|
// DTLS records include an explicit sequence number.
|
2015-07-25 23:29:23 +01:00
|
|
|
copy(b.data[3:11], c.out.outSeq[0:])
|
2015-01-31 20:13:21 +00:00
|
|
|
b.data[11] = byte(len(data) >> 8)
|
|
|
|
b.data[12] = byte(len(data))
|
|
|
|
if explicitIVLen > 0 {
|
|
|
|
explicitIV := b.data[recordHeaderLen : recordHeaderLen+explicitIVLen]
|
|
|
|
if explicitIVIsSeq {
|
2015-07-25 23:29:23 +01:00
|
|
|
copy(explicitIV, c.out.outSeq[:])
|
2015-01-31 20:13:21 +00:00
|
|
|
} else {
|
|
|
|
if _, err = io.ReadFull(c.config.rand(), explicitIV); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
copy(b.data[recordHeaderLen+explicitIVLen:], data)
|
2016-06-15 02:14:35 +01:00
|
|
|
c.out.encrypt(b, explicitIVLen, typ)
|
2015-05-29 23:48:16 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Conn) dtlsWriteRawRecord(typ recordType, data []byte) (n int, err error) {
|
|
|
|
b, err := c.dtlsSealRecord(typ, data)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2015-01-31 20:13:21 +00:00
|
|
|
|
|
|
|
_, err = c.conn.Write(b.data)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
n = len(data)
|
|
|
|
|
|
|
|
c.out.freeBlock(b)
|
2014-08-04 06:23:53 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Conn) dtlsDoReadHandshake() ([]byte, error) {
|
|
|
|
// Assemble a full handshake message. For test purposes, this
|
2015-01-27 06:10:54 +00:00
|
|
|
// implementation assumes fragments arrive in order. It may
|
|
|
|
// need to be cleverer if we ever test BoringSSL's retransmit
|
|
|
|
// behavior.
|
2014-08-04 06:23:53 +01:00
|
|
|
for len(c.handMsg) < 4+c.handMsgLen {
|
|
|
|
// Get a new handshake record if the previous has been
|
|
|
|
// exhausted.
|
|
|
|
if c.hand.Len() == 0 {
|
|
|
|
if err := c.in.err; err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if err := c.readRecord(recordTypeHandshake); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read the next fragment. It must fit entirely within
|
|
|
|
// the record.
|
|
|
|
if c.hand.Len() < 12 {
|
|
|
|
return nil, errors.New("dtls: bad handshake record")
|
|
|
|
}
|
|
|
|
header := c.hand.Next(12)
|
|
|
|
fragN := int(header[1])<<16 | int(header[2])<<8 | int(header[3])
|
|
|
|
fragSeq := uint16(header[4])<<8 | uint16(header[5])
|
|
|
|
fragOff := int(header[6])<<16 | int(header[7])<<8 | int(header[8])
|
|
|
|
fragLen := int(header[9])<<16 | int(header[10])<<8 | int(header[11])
|
|
|
|
|
|
|
|
if c.hand.Len() < fragLen {
|
|
|
|
return nil, errors.New("dtls: fragment length too long")
|
|
|
|
}
|
|
|
|
fragment := c.hand.Next(fragLen)
|
|
|
|
|
2015-01-27 06:10:54 +00:00
|
|
|
// Check it's a fragment for the right message.
|
|
|
|
if fragSeq != c.recvHandshakeSeq {
|
|
|
|
return nil, errors.New("dtls: bad handshake sequence number")
|
2014-08-04 06:23:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the length is consistent.
|
|
|
|
if c.handMsg == nil {
|
|
|
|
c.handMsgLen = fragN
|
|
|
|
if c.handMsgLen > maxHandshake {
|
|
|
|
return nil, c.in.setErrorLocked(c.sendAlert(alertInternalError))
|
|
|
|
}
|
|
|
|
// Start with the TLS handshake header,
|
|
|
|
// without the DTLS bits.
|
|
|
|
c.handMsg = append([]byte{}, header[:4]...)
|
|
|
|
} else if fragN != c.handMsgLen {
|
|
|
|
return nil, errors.New("dtls: bad handshake length")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add the fragment to the pending message.
|
|
|
|
if 4+fragOff != len(c.handMsg) {
|
|
|
|
return nil, errors.New("dtls: bad fragment offset")
|
|
|
|
}
|
|
|
|
if fragOff+fragLen > c.handMsgLen {
|
|
|
|
return nil, errors.New("dtls: bad fragment length")
|
|
|
|
}
|
|
|
|
c.handMsg = append(c.handMsg, fragment...)
|
|
|
|
}
|
|
|
|
c.recvHandshakeSeq++
|
|
|
|
ret := c.handMsg
|
|
|
|
c.handMsg, c.handMsgLen = nil, 0
|
|
|
|
return ret, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DTLSServer returns a new DTLS server side connection
|
|
|
|
// using conn as the underlying transport.
|
|
|
|
// The configuration config must be non-nil and must have
|
|
|
|
// at least one certificate.
|
|
|
|
func DTLSServer(conn net.Conn, config *Config) *Conn {
|
2014-11-07 06:48:35 +00:00
|
|
|
c := &Conn{config: config, isDTLS: true, conn: conn}
|
|
|
|
c.init()
|
|
|
|
return c
|
2014-08-04 06:23:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// DTLSClient returns a new DTLS client side connection
|
|
|
|
// using conn as the underlying transport.
|
|
|
|
// The config cannot be nil: users must set either ServerHostname or
|
|
|
|
// InsecureSkipVerify in the config.
|
|
|
|
func DTLSClient(conn net.Conn, config *Config) *Conn {
|
2014-11-07 06:48:35 +00:00
|
|
|
c := &Conn{config: config, isClient: true, isDTLS: true, conn: conn}
|
|
|
|
c.init()
|
|
|
|
return c
|
2014-08-04 06:23:53 +01:00
|
|
|
}
|