use the actual maximum packet size in the Reno congestion controller

This commit is contained in:
Marten Seemann 2021-01-25 14:23:11 +08:00
parent 8895a79e30
commit dd8b21f264
7 changed files with 97 additions and 39 deletions

View file

@ -11,12 +11,13 @@ import (
const (
// maxDatagramSize is the default maximum packet size used in the Linux TCP implementation.
// Used in QUIC for congestion window computations in bytes.
maxDatagramSize = protocol.ByteCount(protocol.InitialPacketSizeIPv4)
maxBurstBytes = 3 * maxDatagramSize
renoBeta = 0.7 // Reno backoff factor.
maxCongestionWindow = protocol.MaxCongestionWindowPackets * maxDatagramSize
minCongestionWindow = 2 * maxDatagramSize
initialCongestionWindow = 32 * maxDatagramSize
maxDatagramSize = protocol.ByteCount(protocol.InitialPacketSizeIPv4)
initialMaxDatagramSize = protocol.ByteCount(protocol.InitialPacketSizeIPv4)
maxBurstPackets = 3
renoBeta = 0.7 // Reno backoff factor.
initialMaxCongestionWindow = protocol.MaxCongestionWindowPackets * initialMaxDatagramSize
minCongestionWindowPackets = 2
initialCongestionWindow = 32 * initialMaxDatagramSize
)
type cubicSender struct {
@ -44,12 +45,6 @@ type cubicSender struct {
// Congestion window in packets.
congestionWindow protocol.ByteCount
// Minimum congestion window in packets.
minCongestionWindow protocol.ByteCount
// Maximum congestion window.
maxCongestionWindow protocol.ByteCount
// Slow start congestion window in bytes, aka ssthresh.
slowStartThreshold protocol.ByteCount
@ -59,6 +54,8 @@ type cubicSender struct {
initialCongestionWindow protocol.ByteCount
initialMaxCongestionWindow protocol.ByteCount
maxDatagramSize protocol.ByteCount
lastState logging.CongestionState
tracer logging.ConnectionTracer
}
@ -70,7 +67,7 @@ var (
// NewCubicSender makes a new cubic sender
func NewCubicSender(clock Clock, rttStats *utils.RTTStats, reno bool, tracer logging.ConnectionTracer) *cubicSender {
return newCubicSender(clock, rttStats, reno, initialCongestionWindow, maxCongestionWindow, tracer)
return newCubicSender(clock, rttStats, reno, initialCongestionWindow, initialMaxCongestionWindow, tracer)
}
func newCubicSender(clock Clock, rttStats *utils.RTTStats, reno bool, initialCongestionWindow, initialMaxCongestionWindow protocol.ByteCount, tracer logging.ConnectionTracer) *cubicSender {
@ -82,13 +79,12 @@ func newCubicSender(clock Clock, rttStats *utils.RTTStats, reno bool, initialCon
initialCongestionWindow: initialCongestionWindow,
initialMaxCongestionWindow: initialMaxCongestionWindow,
congestionWindow: initialCongestionWindow,
minCongestionWindow: minCongestionWindow,
slowStartThreshold: protocol.MaxByteCount,
maxCongestionWindow: initialMaxCongestionWindow,
cubic: NewCubic(clock),
clock: clock,
reno: reno,
tracer: tracer,
maxDatagramSize: initialMaxDatagramSize,
}
c.pacer = newPacer(c.BandwidthEstimate)
if c.tracer != nil {
@ -104,12 +100,20 @@ func (c *cubicSender) TimeUntilSend(_ protocol.ByteCount) time.Time {
}
func (c *cubicSender) HasPacingBudget() bool {
return c.pacer.Budget(c.clock.Now()) >= maxDatagramSize
return c.pacer.Budget(c.clock.Now()) >= c.maxDatagramSize
}
func (c *cubicSender) maxCongestionWindow() protocol.ByteCount {
return c.maxDatagramSize * protocol.MaxCongestionWindowPackets
}
func (c *cubicSender) minCongestionWindow() protocol.ByteCount {
return c.maxDatagramSize * minCongestionWindowPackets
}
func (c *cubicSender) OnPacketSent(
sentTime time.Time,
bytesInFlight protocol.ByteCount,
_ protocol.ByteCount,
packetNumber protocol.PacketNumber,
bytes protocol.ByteCount,
isRetransmittable bool,
@ -139,7 +143,8 @@ func (c *cubicSender) GetCongestionWindow() protocol.ByteCount {
}
func (c *cubicSender) MaybeExitSlowStart() {
if c.InSlowStart() && c.hybridSlowStart.ShouldExitSlowStart(c.rttStats.LatestRTT(), c.rttStats.MinRTT(), c.GetCongestionWindow()/maxDatagramSize) {
if c.InSlowStart() &&
c.hybridSlowStart.ShouldExitSlowStart(c.rttStats.LatestRTT(), c.rttStats.MinRTT(), c.GetCongestionWindow()/c.maxDatagramSize) {
// exit slow start
c.slowStartThreshold = c.congestionWindow
c.maybeTraceStateChange(logging.CongestionStateCongestionAvoidance)
@ -162,11 +167,7 @@ func (c *cubicSender) OnPacketAcked(
}
}
func (c *cubicSender) OnPacketLost(
packetNumber protocol.PacketNumber,
lostBytes protocol.ByteCount,
priorInFlight protocol.ByteCount,
) {
func (c *cubicSender) OnPacketLost(packetNumber protocol.PacketNumber, lostBytes, priorInFlight protocol.ByteCount) {
// TCP NewReno (RFC6582) says that once a loss occurs, any losses in packets
// already sent should be treated as a single loss event, since it's expected.
if packetNumber <= c.largestSentAtLastCutback {
@ -180,8 +181,8 @@ func (c *cubicSender) OnPacketLost(
} else {
c.congestionWindow = c.cubic.CongestionWindowAfterPacketLoss(c.congestionWindow)
}
if c.congestionWindow < c.minCongestionWindow {
c.congestionWindow = c.minCongestionWindow
if minCwnd := c.minCongestionWindow(); c.congestionWindow < minCwnd {
c.congestionWindow = minCwnd
}
c.slowStartThreshold = c.congestionWindow
c.largestSentAtLastCutback = c.largestSentPacketNumber
@ -205,12 +206,12 @@ func (c *cubicSender) maybeIncreaseCwnd(
c.maybeTraceStateChange(logging.CongestionStateApplicationLimited)
return
}
if c.congestionWindow >= c.maxCongestionWindow {
if c.congestionWindow >= c.maxCongestionWindow() {
return
}
if c.InSlowStart() {
// TCP slow start, exponential growth, increase by one for each ACK.
c.congestionWindow += maxDatagramSize
c.congestionWindow += c.maxDatagramSize
c.maybeTraceStateChange(logging.CongestionStateSlowStart)
return
}
@ -219,12 +220,12 @@ func (c *cubicSender) maybeIncreaseCwnd(
if c.reno {
// Classic Reno congestion avoidance.
c.numAckedPackets++
if c.numAckedPackets >= uint64(c.congestionWindow/maxDatagramSize) {
c.congestionWindow += maxDatagramSize
if c.numAckedPackets >= uint64(c.congestionWindow/c.maxDatagramSize) {
c.congestionWindow += c.maxDatagramSize
c.numAckedPackets = 0
}
} else {
c.congestionWindow = utils.MinByteCount(c.maxCongestionWindow, c.cubic.CongestionWindowAfterAck(ackedBytes, c.congestionWindow, c.rttStats.MinRTT(), eventTime))
c.congestionWindow = utils.MinByteCount(c.maxCongestionWindow(), c.cubic.CongestionWindowAfterAck(ackedBytes, c.congestionWindow, c.rttStats.MinRTT(), eventTime))
}
}
@ -235,7 +236,7 @@ func (c *cubicSender) isCwndLimited(bytesInFlight protocol.ByteCount) bool {
}
availableBytes := congestionWindow - bytesInFlight
slowStartLimited := c.InSlowStart() && bytesInFlight > congestionWindow/2
return slowStartLimited || availableBytes <= maxBurstBytes
return slowStartLimited || availableBytes <= maxBurstPackets*c.maxDatagramSize
}
// BandwidthEstimate returns the current bandwidth estimate
@ -257,7 +258,7 @@ func (c *cubicSender) OnRetransmissionTimeout(packetsRetransmitted bool) {
c.hybridSlowStart.Restart()
c.cubic.Reset()
c.slowStartThreshold = c.congestionWindow / 2
c.congestionWindow = c.minCongestionWindow
c.congestionWindow = c.minCongestionWindow()
}
// OnConnectionMigration is called when the connection is migrated (?)
@ -271,7 +272,6 @@ func (c *cubicSender) OnConnectionMigration() {
c.numAckedPackets = 0
c.congestionWindow = c.initialCongestionWindow
c.slowStartThreshold = c.initialMaxCongestionWindow
c.maxCongestionWindow = c.initialMaxCongestionWindow
}
func (c *cubicSender) maybeTraceStateChange(new logging.CongestionState) {
@ -281,3 +281,14 @@ func (c *cubicSender) maybeTraceStateChange(new logging.CongestionState) {
c.tracer.UpdatedCongestionState(new)
c.lastState = new
}
func (c *cubicSender) SetMaxDatagramSize(s protocol.ByteCount) {
if s < c.maxDatagramSize {
panic("congestion BUG: decreased max datagram size")
}
cwndIsMinCwnd := c.congestionWindow == c.minCongestionWindow()
c.maxDatagramSize = s
if cwndIsMinCwnd {
c.congestionWindow = c.minCongestionWindow()
}
}