mirror of
https://git.zx2c4.com/wireguard-go
synced 2024-11-15 01:05:15 +01:00
69b39db0b4
In 097af6e
("tun: windows: protect reads from closing") we made sure no
functions are running when End() is called, to avoid a UaF. But we still
need to kick that event somehow, so that Read() is allowed to exit, in
order to release the lock. So this commit calls SetEvent, while moving
the closing boolean to be atomic so it can be modified without locks,
and then moves to a WaitGroup for the RCU-like pattern.
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
255 lines
6.3 KiB
Go
255 lines
6.3 KiB
Go
/* SPDX-License-Identifier: MIT
|
|
*
|
|
* Copyright (C) 2018-2021 WireGuard LLC. All Rights Reserved.
|
|
*/
|
|
|
|
package tun
|
|
|
|
import (
|
|
"errors"
|
|
"fmt"
|
|
"log"
|
|
"os"
|
|
"sync"
|
|
"sync/atomic"
|
|
"time"
|
|
_ "unsafe"
|
|
|
|
"golang.org/x/sys/windows"
|
|
|
|
"golang.zx2c4.com/wireguard/tun/wintun"
|
|
)
|
|
|
|
const (
|
|
rateMeasurementGranularity = uint64((time.Second / 2) / time.Nanosecond)
|
|
spinloopRateThreshold = 800000000 / 8 // 800mbps
|
|
spinloopDuration = uint64(time.Millisecond / 80 / time.Nanosecond) // ~1gbit/s
|
|
)
|
|
|
|
type rateJuggler struct {
|
|
current uint64
|
|
nextByteCount uint64
|
|
nextStartTime int64
|
|
changing int32
|
|
}
|
|
|
|
type NativeTun struct {
|
|
wt *wintun.Adapter
|
|
handle windows.Handle
|
|
rate rateJuggler
|
|
session wintun.Session
|
|
readWait windows.Handle
|
|
events chan Event
|
|
running sync.WaitGroup
|
|
closeOnce sync.Once
|
|
close int32
|
|
forcedMTU int
|
|
}
|
|
|
|
var WintunPool, _ = wintun.MakePool("WireGuard")
|
|
var WintunStaticRequestedGUID *windows.GUID
|
|
|
|
//go:linkname procyield runtime.procyield
|
|
func procyield(cycles uint32)
|
|
|
|
//go:linkname nanotime runtime.nanotime
|
|
func nanotime() int64
|
|
|
|
//
|
|
// CreateTUN creates a Wintun interface with the given name. Should a Wintun
|
|
// interface with the same name exist, it is reused.
|
|
//
|
|
func CreateTUN(ifname string, mtu int) (Device, error) {
|
|
return CreateTUNWithRequestedGUID(ifname, WintunStaticRequestedGUID, mtu)
|
|
}
|
|
|
|
//
|
|
// CreateTUNWithRequestedGUID creates a Wintun interface with the given name and
|
|
// a requested GUID. Should a Wintun interface with the same name exist, it is reused.
|
|
//
|
|
func CreateTUNWithRequestedGUID(ifname string, requestedGUID *windows.GUID, mtu int) (Device, error) {
|
|
var err error
|
|
var wt *wintun.Adapter
|
|
|
|
// Does an interface with this name already exist?
|
|
wt, err = WintunPool.OpenAdapter(ifname)
|
|
if err == nil {
|
|
// If so, we delete it, in case it has weird residual configuration.
|
|
_, err = wt.Delete(true)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("Error deleting already existing interface: %w", err)
|
|
}
|
|
}
|
|
wt, rebootRequired, err := WintunPool.CreateAdapter(ifname, requestedGUID)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("Error creating interface: %w", err)
|
|
}
|
|
if rebootRequired {
|
|
log.Println("Windows indicated a reboot is required.")
|
|
}
|
|
|
|
forcedMTU := 1420
|
|
if mtu > 0 {
|
|
forcedMTU = mtu
|
|
}
|
|
|
|
tun := &NativeTun{
|
|
wt: wt,
|
|
handle: windows.InvalidHandle,
|
|
events: make(chan Event, 10),
|
|
forcedMTU: forcedMTU,
|
|
}
|
|
|
|
tun.session, err = wt.StartSession(0x800000) // Ring capacity, 8 MiB
|
|
if err != nil {
|
|
tun.wt.Delete(false)
|
|
close(tun.events)
|
|
return nil, fmt.Errorf("Error starting session: %w", err)
|
|
}
|
|
tun.readWait = tun.session.ReadWaitEvent()
|
|
return tun, nil
|
|
}
|
|
|
|
func (tun *NativeTun) Name() (string, error) {
|
|
tun.running.Add(1)
|
|
defer tun.running.Done()
|
|
if atomic.LoadInt32(&tun.close) == 1 {
|
|
return "", os.ErrClosed
|
|
}
|
|
return tun.wt.Name()
|
|
}
|
|
|
|
func (tun *NativeTun) File() *os.File {
|
|
return nil
|
|
}
|
|
|
|
func (tun *NativeTun) Events() chan Event {
|
|
return tun.events
|
|
}
|
|
|
|
func (tun *NativeTun) Close() error {
|
|
var err error
|
|
tun.closeOnce.Do(func() {
|
|
atomic.StoreInt32(&tun.close, 1)
|
|
windows.SetEvent(tun.readWait)
|
|
tun.running.Wait()
|
|
tun.session.End()
|
|
if tun.wt != nil {
|
|
_, err = tun.wt.Delete(false)
|
|
}
|
|
close(tun.events)
|
|
})
|
|
return err
|
|
}
|
|
|
|
func (tun *NativeTun) MTU() (int, error) {
|
|
return tun.forcedMTU, nil
|
|
}
|
|
|
|
// TODO: This is a temporary hack. We really need to be monitoring the interface in real time and adapting to MTU changes.
|
|
func (tun *NativeTun) ForceMTU(mtu int) {
|
|
update := tun.forcedMTU != mtu
|
|
tun.forcedMTU = mtu
|
|
if update {
|
|
tun.events <- EventMTUUpdate
|
|
}
|
|
}
|
|
|
|
// Note: Read() and Write() assume the caller comes only from a single thread; there's no locking.
|
|
|
|
func (tun *NativeTun) Read(buff []byte, offset int) (int, error) {
|
|
tun.running.Add(1)
|
|
defer tun.running.Done()
|
|
retry:
|
|
if atomic.LoadInt32(&tun.close) == 1 {
|
|
return 0, os.ErrClosed
|
|
}
|
|
start := nanotime()
|
|
shouldSpin := atomic.LoadUint64(&tun.rate.current) >= spinloopRateThreshold && uint64(start-atomic.LoadInt64(&tun.rate.nextStartTime)) <= rateMeasurementGranularity*2
|
|
for {
|
|
if atomic.LoadInt32(&tun.close) == 1 {
|
|
return 0, os.ErrClosed
|
|
}
|
|
packet, err := tun.session.ReceivePacket()
|
|
switch err {
|
|
case nil:
|
|
packetSize := len(packet)
|
|
copy(buff[offset:], packet)
|
|
tun.session.ReleaseReceivePacket(packet)
|
|
tun.rate.update(uint64(packetSize))
|
|
return packetSize, nil
|
|
case windows.ERROR_NO_MORE_ITEMS:
|
|
if !shouldSpin || uint64(nanotime()-start) >= spinloopDuration {
|
|
windows.WaitForSingleObject(tun.readWait, windows.INFINITE)
|
|
goto retry
|
|
}
|
|
procyield(1)
|
|
continue
|
|
case windows.ERROR_HANDLE_EOF:
|
|
return 0, os.ErrClosed
|
|
case windows.ERROR_INVALID_DATA:
|
|
return 0, errors.New("Send ring corrupt")
|
|
}
|
|
return 0, fmt.Errorf("Read failed: %w", err)
|
|
}
|
|
}
|
|
|
|
func (tun *NativeTun) Flush() error {
|
|
return nil
|
|
}
|
|
|
|
func (tun *NativeTun) Write(buff []byte, offset int) (int, error) {
|
|
tun.running.Add(1)
|
|
defer tun.running.Done()
|
|
if atomic.LoadInt32(&tun.close) == 1 {
|
|
return 0, os.ErrClosed
|
|
}
|
|
|
|
packetSize := len(buff) - offset
|
|
tun.rate.update(uint64(packetSize))
|
|
|
|
packet, err := tun.session.AllocateSendPacket(packetSize)
|
|
if err == nil {
|
|
copy(packet, buff[offset:])
|
|
tun.session.SendPacket(packet)
|
|
return packetSize, nil
|
|
}
|
|
switch err {
|
|
case windows.ERROR_HANDLE_EOF:
|
|
return 0, os.ErrClosed
|
|
case windows.ERROR_BUFFER_OVERFLOW:
|
|
return 0, nil // Dropping when ring is full.
|
|
}
|
|
return 0, fmt.Errorf("Write failed: %w", err)
|
|
}
|
|
|
|
// LUID returns Windows interface instance ID.
|
|
func (tun *NativeTun) LUID() uint64 {
|
|
tun.running.Add(1)
|
|
defer tun.running.Done()
|
|
if atomic.LoadInt32(&tun.close) == 1 {
|
|
return 0
|
|
}
|
|
return tun.wt.LUID()
|
|
}
|
|
|
|
// RunningVersion returns the running version of the Wintun driver.
|
|
func (tun *NativeTun) RunningVersion() (version uint32, err error) {
|
|
return wintun.RunningVersion()
|
|
}
|
|
|
|
func (rate *rateJuggler) update(packetLen uint64) {
|
|
now := nanotime()
|
|
total := atomic.AddUint64(&rate.nextByteCount, packetLen)
|
|
period := uint64(now - atomic.LoadInt64(&rate.nextStartTime))
|
|
if period >= rateMeasurementGranularity {
|
|
if !atomic.CompareAndSwapInt32(&rate.changing, 0, 1) {
|
|
return
|
|
}
|
|
atomic.StoreInt64(&rate.nextStartTime, now)
|
|
atomic.StoreUint64(&rate.current, total*uint64(time.Second/time.Nanosecond)/period)
|
|
atomic.StoreUint64(&rate.nextByteCount, 0)
|
|
atomic.StoreInt32(&rate.changing, 0)
|
|
}
|
|
}
|