1
0
mirror of https://git.zx2c4.com/wireguard-go synced 2024-11-15 01:05:15 +01:00
wireguard-go/src/send.go

375 lines
8.2 KiB
Go
Raw Normal View History

2017-06-26 13:14:02 +02:00
package main
import (
"encoding/binary"
2017-07-27 23:45:37 +02:00
"errors"
"golang.org/x/crypto/chacha20poly1305"
"golang.org/x/net/ipv4"
"golang.org/x/net/ipv6"
2017-06-26 13:14:02 +02:00
"net"
"sync"
"sync/atomic"
"time"
2017-06-26 13:14:02 +02:00
)
/* Handles outbound flow
*
* 1. TUN queue
* 2. Routing (sequential)
* 3. Nonce assignment (sequential)
* 4. Encryption (parallel)
* 5. Transmission (sequential)
2017-06-26 13:14:02 +02:00
*
* The order of packets (per peer) is maintained.
* The functions in this file occure (roughly) in the order packets are processed.
2017-06-26 13:14:02 +02:00
*/
/* The sequential consumers will attempt to take the lock,
* workers release lock when they have completed work (encryption) on the packet.
*
* If the element is inserted into the "encryption queue",
* the content is preceeded by enough "junk" to contain the transport header
2017-07-07 13:47:09 +02:00
* (to allow the construction of transport messages in-place)
*/
type QueueOutboundElement struct {
2017-07-08 23:51:26 +02:00
dropped int32
mutex sync.Mutex
buffer *[MaxMessageSize]byte // slice holding the packet data
packet []byte // slice of "buffer" (always!)
nonce uint64 // nonce for encryption
keyPair *KeyPair // key-pair for encryption
peer *Peer // related peer
2017-06-26 13:14:02 +02:00
}
func (peer *Peer) FlushNonceQueue() {
elems := len(peer.queue.nonce)
for i := 0; i < elems; i++ {
select {
case <-peer.queue.nonce:
default:
return
}
}
2017-06-27 17:33:06 +02:00
}
func (device *Device) NewOutboundElement() *QueueOutboundElement {
return &QueueOutboundElement{
dropped: AtomicFalse,
buffer: device.pool.messageBuffers.Get().(*[MaxMessageSize]byte),
}
}
func (elem *QueueOutboundElement) Drop() {
2017-07-08 23:51:26 +02:00
atomic.StoreInt32(&elem.dropped, AtomicTrue)
}
func (elem *QueueOutboundElement) IsDropped() bool {
2017-07-08 23:51:26 +02:00
return atomic.LoadInt32(&elem.dropped) == AtomicTrue
}
func addToOutboundQueue(
queue chan *QueueOutboundElement,
element *QueueOutboundElement,
) {
for {
select {
case queue <- element:
return
default:
select {
case old := <-queue:
old.Drop()
default:
}
}
}
}
2017-06-26 13:14:02 +02:00
2017-07-08 23:51:26 +02:00
func addToEncryptionQueue(
queue chan *QueueOutboundElement,
element *QueueOutboundElement,
) {
for {
select {
case queue <- element:
return
default:
select {
case old := <-queue:
2017-08-25 14:53:23 +02:00
// drop & release to potential consumer
2017-07-08 23:51:26 +02:00
old.Drop()
old.mutex.Unlock()
default:
}
}
}
}
2017-07-27 23:45:37 +02:00
func (peer *Peer) SendBuffer(buffer []byte) (int, error) {
2017-08-04 16:15:53 +02:00
peer.device.net.mutex.RLock()
defer peer.device.net.mutex.RUnlock()
2017-07-27 23:45:37 +02:00
peer.mutex.RLock()
2017-08-04 16:15:53 +02:00
defer peer.mutex.RUnlock()
2017-07-27 23:45:37 +02:00
endpoint := peer.endpoint
if endpoint == nil {
return 0, errors.New("No known endpoint for peer")
2017-07-27 23:45:37 +02:00
}
conn := peer.device.net.conn
2017-07-27 23:45:37 +02:00
if conn == nil {
return 0, errors.New("No UDP socket for device")
2017-07-27 23:45:37 +02:00
}
return conn.WriteToUDP(buffer, endpoint)
}
/* Reads packets from the TUN and inserts
* into nonce queue for peer
*
* Obs. Single instance per TUN device
*/
2017-08-04 16:15:53 +02:00
func (device *Device) RoutineReadFromTUN() {
2017-08-25 14:53:23 +02:00
elem := device.NewOutboundElement()
logDebug := device.log.Debug
logError := device.log.Error
2017-08-25 14:53:23 +02:00
logDebug.Println("Routine, TUN Reader started")
for {
2017-06-26 13:14:02 +02:00
2017-08-25 14:53:23 +02:00
// read packet
elem.packet = elem.buffer[MessageTransportHeaderSize:]
size, err := device.tun.device.Read(elem.packet)
if err != nil {
logError.Println("Failed to read packet from TUN device:", err)
device.Close()
return
}
2017-08-25 14:53:23 +02:00
if size == 0 || size > MaxContentSize {
continue
}
2017-06-26 13:14:02 +02:00
2017-08-04 16:15:53 +02:00
elem.packet = elem.packet[:size]
// lookup peer
2017-06-26 13:14:02 +02:00
var peer *Peer
switch elem.packet[0] >> 4 {
case ipv4.Version:
2017-08-04 16:15:53 +02:00
if len(elem.packet) < ipv4.HeaderLen {
continue
}
dst := elem.packet[IPv4offsetDst : IPv4offsetDst+net.IPv4len]
peer = device.routingTable.LookupIPv4(dst)
2017-06-26 13:14:02 +02:00
case ipv6.Version:
2017-08-04 16:15:53 +02:00
if len(elem.packet) < ipv6.HeaderLen {
continue
}
dst := elem.packet[IPv6offsetDst : IPv6offsetDst+net.IPv6len]
peer = device.routingTable.LookupIPv6(dst)
2017-06-26 13:14:02 +02:00
default:
logDebug.Println("Receieved packet with unknown IP version")
}
if peer == nil {
continue
}
// insert into nonce/pre-handshake queue
signalSend(peer.signal.handshakeReset)
addToOutboundQueue(peer.queue.nonce, elem)
2017-08-25 14:53:23 +02:00
elem = device.NewOutboundElement()
2017-06-26 13:14:02 +02:00
}
}
/* Queues packets when there is no handshake.
* Then assigns nonces to packets sequentially
* and creates "work" structs for workers
2017-06-26 13:14:02 +02:00
*
* Obs. A single instance per peer
2017-06-26 13:14:02 +02:00
*/
func (peer *Peer) RoutineNonce() {
var keyPair *KeyPair
device := peer.device
2017-07-07 13:47:09 +02:00
logDebug := device.log.Debug
logDebug.Println("Routine, nonce worker, started for peer", peer.String())
for {
NextPacket:
select {
case <-peer.signal.stop:
return
case elem := <-peer.queue.nonce:
2017-06-26 13:14:02 +02:00
// wait for key pair
for {
keyPair = peer.keyPairs.Current()
if keyPair != nil && keyPair.sendNonce < RejectAfterMessages {
if time.Now().Sub(keyPair.created) < RejectAfterTime {
break
}
}
2017-07-08 23:51:26 +02:00
signalSend(peer.signal.handshakeBegin)
logDebug.Println("Awaiting key-pair for", peer.String())
select {
case <-peer.signal.newKeyPair:
case <-peer.signal.flushNonceQueue:
logDebug.Println("Clearing queue for", peer.String())
peer.FlushNonceQueue()
goto NextPacket
case <-peer.signal.stop:
return
}
}
2017-06-26 13:14:02 +02:00
// populate work element
2017-06-26 13:14:02 +02:00
elem.peer = peer
elem.nonce = atomic.AddUint64(&keyPair.sendNonce, 1) - 1
elem.keyPair = keyPair
elem.dropped = AtomicFalse
elem.mutex.Lock()
// add to parallel and sequential queue
addToEncryptionQueue(device.queue.encryption, elem)
addToOutboundQueue(peer.queue.outbound, elem)
2017-06-26 13:14:02 +02:00
}
}
2017-06-26 13:14:02 +02:00
}
/* Encrypts the elements in the queue
* and marks them for sequential consumption (by releasing the mutex)
*
* Obs. One instance per core
*/
func (device *Device) RoutineEncryption() {
2017-07-17 16:16:18 +02:00
var nonce [chacha20poly1305.NonceSize]byte
2017-07-17 16:16:18 +02:00
logDebug := device.log.Debug
logDebug.Println("Routine, encryption worker, started")
for {
// fetch next element
select {
case <-device.signal.stop:
logDebug.Println("Routine, encryption worker, stopped")
return
2017-07-08 23:51:26 +02:00
case elem := <-device.queue.encryption:
2017-07-08 23:51:26 +02:00
// check if dropped
if elem.IsDropped() {
continue
}
// populate header fields
2017-07-02 15:28:38 +02:00
header := elem.buffer[:MessageTransportHeaderSize]
2017-06-26 13:14:02 +02:00
fieldType := header[0:4]
fieldReceiver := header[4:8]
fieldNonce := header[8:16]
2017-07-02 15:28:38 +02:00
binary.LittleEndian.PutUint32(fieldType, MessageTransportType)
binary.LittleEndian.PutUint32(fieldReceiver, elem.keyPair.remoteIndex)
binary.LittleEndian.PutUint64(fieldNonce, elem.nonce)
// pad content to multiple of 16
mtu := int(atomic.LoadInt32(&device.tun.mtu))
rem := len(elem.packet) % PaddingMultiple
if rem > 0 {
for i := 0; i < PaddingMultiple-rem && len(elem.packet) < mtu; i++ {
elem.packet = append(elem.packet, 0)
}
2017-08-04 16:15:53 +02:00
}
2017-07-02 15:28:38 +02:00
// encrypt content (append to header)
binary.LittleEndian.PutUint64(nonce[4:], elem.nonce)
elem.keyPair.send.mutex.RLock()
if elem.keyPair.send.aead == nil {
// very unlikely (the key was deleted during queuing)
elem.Drop()
} else {
elem.packet = elem.keyPair.send.aead.Seal(
header,
nonce[:],
elem.packet,
nil,
)
}
elem.mutex.Unlock()
elem.keyPair.send.mutex.RUnlock()
// refresh key if necessary
elem.peer.KeepKeyFreshSending()
}
}
}
/* Sequentially reads packets from queue and sends to endpoint
*
* Obs. Single instance per peer.
* The routine terminates then the outbound queue is closed.
*/
func (peer *Peer) RoutineSequentialSender() {
device := peer.device
2017-07-08 23:51:26 +02:00
logDebug := device.log.Debug
logDebug.Println("Routine, sequential sender, started for", peer.String())
2017-07-08 23:51:26 +02:00
for {
select {
case <-peer.signal.stop:
logDebug.Println("Routine, sequential sender, stopped for", peer.String())
return
2017-07-17 16:16:18 +02:00
case elem := <-peer.queue.outbound:
elem.mutex.Lock()
2017-07-27 23:45:37 +02:00
if elem.IsDropped() {
continue
}
2017-07-08 23:51:26 +02:00
2017-07-27 23:45:37 +02:00
// send message and return buffer to pool
2017-07-27 23:45:37 +02:00
length := uint64(len(elem.packet))
_, err := peer.SendBuffer(elem.packet)
device.PutMessageBuffer(elem.buffer)
if err != nil {
logDebug.Println("Failed to send authenticated packet to peer", peer.String())
2017-07-27 23:45:37 +02:00
continue
}
atomic.AddUint64(&peer.stats.txBytes, length)
2017-07-27 23:45:37 +02:00
// update timers
2017-07-17 16:16:18 +02:00
2017-08-04 16:15:53 +02:00
peer.TimerAnyAuthenticatedPacketTraversal()
2017-07-27 23:45:37 +02:00
if len(elem.packet) != MessageKeepaliveSize {
peer.TimerDataSent()
}
peer.KeepKeyFreshSending()
}
2017-06-26 13:14:02 +02:00
}
}