mirror of
https://github.com/refraction-networking/uquic.git
synced 2025-04-03 20:27:35 +03:00
rename defaultTCPMSS to maxDatagramSize
This commit is contained in:
parent
31e4691ffd
commit
d4de582fad
6 changed files with 131 additions and 131 deletions
|
@ -19,7 +19,7 @@ import (
|
|||
// where 0.100 is 100 ms which is the scaling round trip time.
|
||||
const cubeScale = 40
|
||||
const cubeCongestionWindowScale = 410
|
||||
const cubeFactor protocol.ByteCount = 1 << cubeScale / cubeCongestionWindowScale / defaultTCPMSS
|
||||
const cubeFactor protocol.ByteCount = 1 << cubeScale / cubeCongestionWindowScale / maxDatagramSize
|
||||
|
||||
const defaultNumConnections = 1
|
||||
|
||||
|
@ -125,7 +125,7 @@ func (c *Cubic) OnApplicationLimited() {
|
|||
// a loss event. Returns the new congestion window in packets. The new
|
||||
// congestion window is a multiplicative decrease of our current window.
|
||||
func (c *Cubic) CongestionWindowAfterPacketLoss(currentCongestionWindow protocol.ByteCount) protocol.ByteCount {
|
||||
if currentCongestionWindow+defaultTCPMSS < c.lastMaxCongestionWindow {
|
||||
if currentCongestionWindow+maxDatagramSize < c.lastMaxCongestionWindow {
|
||||
// We never reached the old max, so assume we are competing with another
|
||||
// flow. Use our extra back off factor to allow the other flow to go up.
|
||||
c.lastMaxCongestionWindow = protocol.ByteCount(c.betaLastMax() * float32(currentCongestionWindow))
|
||||
|
@ -175,7 +175,7 @@ func (c *Cubic) CongestionWindowAfterAck(
|
|||
offset = -offset
|
||||
}
|
||||
|
||||
deltaCongestionWindow := protocol.ByteCount(cubeCongestionWindowScale*offset*offset*offset) * defaultTCPMSS >> cubeScale
|
||||
deltaCongestionWindow := protocol.ByteCount(cubeCongestionWindowScale*offset*offset*offset) * maxDatagramSize >> cubeScale
|
||||
var targetCongestionWindow protocol.ByteCount
|
||||
if elapsedTime > int64(c.timeToOriginPoint) {
|
||||
targetCongestionWindow = c.originPointCongestionWindow + deltaCongestionWindow
|
||||
|
@ -190,7 +190,7 @@ func (c *Cubic) CongestionWindowAfterAck(
|
|||
// congestion windows (less than 25), the formula below will
|
||||
// increase slightly slower than linearly per estimated tcp window
|
||||
// of bytes.
|
||||
c.estimatedTCPcongestionWindow += protocol.ByteCount(float32(c.ackedBytesCount) * c.alpha() * float32(defaultTCPMSS) / float32(c.estimatedTCPcongestionWindow))
|
||||
c.estimatedTCPcongestionWindow += protocol.ByteCount(float32(c.ackedBytesCount) * c.alpha() * float32(maxDatagramSize) / float32(c.estimatedTCPcongestionWindow))
|
||||
c.ackedBytesCount = 0
|
||||
|
||||
// We have a new cubic congestion window.
|
||||
|
|
|
@ -8,14 +8,14 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
// defaultTCPMSS is the default maximum packet size used in the Linux TCP implementation.
|
||||
// maxDatagramSize is the default maximum packet size used in the Linux TCP implementation.
|
||||
// Used in QUIC for congestion window computations in bytes.
|
||||
defaultTCPMSS protocol.ByteCount = 1460
|
||||
maxBurstBytes = 3 * defaultTCPMSS
|
||||
maxDatagramSize protocol.ByteCount = 1460
|
||||
maxBurstBytes = 3 * maxDatagramSize
|
||||
renoBeta float32 = 0.7 // Reno backoff factor.
|
||||
maxCongestionWindow = protocol.MaxCongestionWindowPackets * defaultTCPMSS
|
||||
minCongestionWindow = 2 * defaultTCPMSS
|
||||
initialCongestionWindow = 32 * defaultTCPMSS
|
||||
maxCongestionWindow = protocol.MaxCongestionWindowPackets * maxDatagramSize
|
||||
minCongestionWindow = 2 * maxDatagramSize
|
||||
initialCongestionWindow = 32 * maxDatagramSize
|
||||
)
|
||||
|
||||
type cubicSender struct {
|
||||
|
@ -102,7 +102,7 @@ func (c *cubicSender) TimeUntilSend(bytesInFlight protocol.ByteCount) time.Durat
|
|||
return 0
|
||||
}
|
||||
}
|
||||
return c.rttStats.SmoothedRTT() * time.Duration(defaultTCPMSS) / time.Duration(2*c.GetCongestionWindow())
|
||||
return c.rttStats.SmoothedRTT() * time.Duration(maxDatagramSize) / time.Duration(2*c.GetCongestionWindow())
|
||||
}
|
||||
|
||||
func (c *cubicSender) OnPacketSent(
|
||||
|
@ -155,7 +155,7 @@ func (c *cubicSender) SlowstartThreshold() protocol.ByteCount {
|
|||
}
|
||||
|
||||
func (c *cubicSender) MaybeExitSlowStart() {
|
||||
if c.InSlowStart() && c.hybridSlowStart.ShouldExitSlowStart(c.rttStats.LatestRTT(), c.rttStats.MinRTT(), c.GetCongestionWindow()/defaultTCPMSS) {
|
||||
if c.InSlowStart() && c.hybridSlowStart.ShouldExitSlowStart(c.rttStats.LatestRTT(), c.rttStats.MinRTT(), c.GetCongestionWindow()/maxDatagramSize) {
|
||||
c.ExitSlowstart()
|
||||
}
|
||||
}
|
||||
|
@ -213,7 +213,7 @@ func (c *cubicSender) OnPacketLost(
|
|||
if c.congestionWindow >= 2*c.initialCongestionWindow {
|
||||
c.minSlowStartExitWindow = c.congestionWindow / 2
|
||||
}
|
||||
c.congestionWindow -= defaultTCPMSS
|
||||
c.congestionWindow -= maxDatagramSize
|
||||
} else if c.reno {
|
||||
c.congestionWindow = protocol.ByteCount(float32(c.congestionWindow) * c.RenoBeta())
|
||||
} else {
|
||||
|
@ -256,7 +256,7 @@ func (c *cubicSender) maybeIncreaseCwnd(
|
|||
}
|
||||
if c.InSlowStart() {
|
||||
// TCP slow start, exponential growth, increase by one for each ACK.
|
||||
c.congestionWindow += defaultTCPMSS
|
||||
c.congestionWindow += maxDatagramSize
|
||||
return
|
||||
}
|
||||
// Congestion avoidance
|
||||
|
@ -265,8 +265,8 @@ func (c *cubicSender) maybeIncreaseCwnd(
|
|||
c.numAckedPackets++
|
||||
// Divide by num_connections to smoothly increase the CWND at a faster
|
||||
// rate than conventional Reno.
|
||||
if c.numAckedPackets*uint64(c.numConnections) >= uint64(c.congestionWindow)/uint64(defaultTCPMSS) {
|
||||
c.congestionWindow += defaultTCPMSS
|
||||
if c.numAckedPackets*uint64(c.numConnections) >= uint64(c.congestionWindow)/uint64(maxDatagramSize) {
|
||||
c.congestionWindow += maxDatagramSize
|
||||
c.numAckedPackets = 0
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
)
|
||||
|
||||
const initialCongestionWindowPackets = 10
|
||||
const defaultWindowTCP = protocol.ByteCount(initialCongestionWindowPackets) * defaultTCPMSS
|
||||
const defaultWindowTCP = protocol.ByteCount(initialCongestionWindowPackets) * maxDatagramSize
|
||||
|
||||
type mockClock time.Time
|
||||
|
||||
|
@ -22,7 +22,7 @@ func (c *mockClock) Advance(d time.Duration) {
|
|||
*c = mockClock(time.Time(*c).Add(d))
|
||||
}
|
||||
|
||||
const MaxCongestionWindow protocol.ByteCount = 200 * defaultTCPMSS
|
||||
const MaxCongestionWindow protocol.ByteCount = 200 * maxDatagramSize
|
||||
|
||||
var _ = Describe("Cubic Sender", func() {
|
||||
var (
|
||||
|
@ -40,7 +40,7 @@ var _ = Describe("Cubic Sender", func() {
|
|||
ackedPacketNumber = 0
|
||||
clock = mockClock{}
|
||||
rttStats = NewRTTStats()
|
||||
sender = newCubicSender(&clock, rttStats, true /*reno*/, initialCongestionWindowPackets*defaultTCPMSS, MaxCongestionWindow)
|
||||
sender = newCubicSender(&clock, rttStats, true /*reno*/, initialCongestionWindowPackets*maxDatagramSize, MaxCongestionWindow)
|
||||
})
|
||||
|
||||
SendAvailableSendWindowLen := func(packetLength protocol.ByteCount) int {
|
||||
|
@ -60,9 +60,9 @@ var _ = Describe("Cubic Sender", func() {
|
|||
sender.MaybeExitSlowStart()
|
||||
for i := 0; i < n; i++ {
|
||||
ackedPacketNumber++
|
||||
sender.OnPacketAcked(ackedPacketNumber, defaultTCPMSS, bytesInFlight, clock.Now())
|
||||
sender.OnPacketAcked(ackedPacketNumber, maxDatagramSize, bytesInFlight, clock.Now())
|
||||
}
|
||||
bytesInFlight -= protocol.ByteCount(n) * defaultTCPMSS
|
||||
bytesInFlight -= protocol.ByteCount(n) * maxDatagramSize
|
||||
clock.Advance(time.Millisecond)
|
||||
}
|
||||
|
||||
|
@ -76,12 +76,12 @@ var _ = Describe("Cubic Sender", func() {
|
|||
|
||||
// Does not increment acked_packet_number_.
|
||||
LosePacket := func(number protocol.PacketNumber) {
|
||||
sender.OnPacketLost(number, defaultTCPMSS, bytesInFlight)
|
||||
bytesInFlight -= defaultTCPMSS
|
||||
sender.OnPacketLost(number, maxDatagramSize, bytesInFlight)
|
||||
bytesInFlight -= maxDatagramSize
|
||||
}
|
||||
|
||||
SendAvailableSendWindow := func() int { return SendAvailableSendWindowLen(defaultTCPMSS) }
|
||||
LoseNPackets := func(n int) { LoseNPacketsLen(n, defaultTCPMSS) }
|
||||
SendAvailableSendWindow := func() int { return SendAvailableSendWindowLen(maxDatagramSize) }
|
||||
LoseNPackets := func(n int) { LoseNPacketsLen(n, maxDatagramSize) }
|
||||
|
||||
It("has the right values at startup", func() {
|
||||
// At startup make sure we are at the default.
|
||||
|
@ -121,7 +121,7 @@ var _ = Describe("Cubic Sender", func() {
|
|||
bytesToSend := sender.GetCongestionWindow()
|
||||
// It's expected 2 acks will arrive when the bytes_in_flight are greater than
|
||||
// half the CWND.
|
||||
Expect(bytesToSend).To(Equal(defaultWindowTCP + defaultTCPMSS*2*2))
|
||||
Expect(bytesToSend).To(Equal(defaultWindowTCP + maxDatagramSize*2*2))
|
||||
})
|
||||
|
||||
It("exponential slow start", func() {
|
||||
|
@ -139,7 +139,7 @@ var _ = Describe("Cubic Sender", func() {
|
|||
AckNPackets(2)
|
||||
}
|
||||
cwnd := sender.GetCongestionWindow()
|
||||
Expect(cwnd).To(Equal(defaultWindowTCP + defaultTCPMSS*2*numberOfAcks))
|
||||
Expect(cwnd).To(Equal(defaultWindowTCP + maxDatagramSize*2*numberOfAcks))
|
||||
Expect(sender.BandwidthEstimate()).To(Equal(BandwidthFromDelta(cwnd, rttStats.SmoothedRTT())))
|
||||
})
|
||||
|
||||
|
@ -152,12 +152,12 @@ var _ = Describe("Cubic Sender", func() {
|
|||
AckNPackets(2)
|
||||
}
|
||||
SendAvailableSendWindow()
|
||||
expectedSendWindow := defaultWindowTCP + (defaultTCPMSS * 2 * numberOfAcks)
|
||||
expectedSendWindow := defaultWindowTCP + (maxDatagramSize * 2 * numberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Lose a packet to exit slow start.
|
||||
LoseNPackets(1)
|
||||
packetsInRecoveryWindow := expectedSendWindow / defaultTCPMSS
|
||||
packetsInRecoveryWindow := expectedSendWindow / maxDatagramSize
|
||||
|
||||
// We should now have fallen out of slow start with a reduced window.
|
||||
expectedSendWindow = protocol.ByteCount(float32(expectedSendWindow) * renoBeta)
|
||||
|
@ -165,7 +165,7 @@ var _ = Describe("Cubic Sender", func() {
|
|||
|
||||
// Recovery phase. We need to ack every packet in the recovery window before
|
||||
// we exit recovery.
|
||||
numberOfPacketsInWindow := expectedSendWindow / defaultTCPMSS
|
||||
numberOfPacketsInWindow := expectedSendWindow / maxDatagramSize
|
||||
AckNPackets(int(packetsInRecoveryWindow))
|
||||
SendAvailableSendWindow()
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
@ -177,7 +177,7 @@ var _ = Describe("Cubic Sender", func() {
|
|||
|
||||
// Next ack should increase cwnd by 1.
|
||||
AckNPackets(1)
|
||||
expectedSendWindow += defaultTCPMSS
|
||||
expectedSendWindow += maxDatagramSize
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Now RTO and ensure slow start gets reset.
|
||||
|
@ -197,26 +197,26 @@ var _ = Describe("Cubic Sender", func() {
|
|||
AckNPackets(2)
|
||||
}
|
||||
SendAvailableSendWindow()
|
||||
expectedSendWindow := defaultWindowTCP + (defaultTCPMSS * 2 * numberOfAcks)
|
||||
expectedSendWindow := defaultWindowTCP + (maxDatagramSize * 2 * numberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Lose a packet to exit slow start. We should now have fallen out of
|
||||
// slow start with a window reduced by 1.
|
||||
LoseNPackets(1)
|
||||
expectedSendWindow -= defaultTCPMSS
|
||||
expectedSendWindow -= maxDatagramSize
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Lose 5 packets in recovery and verify that congestion window is reduced
|
||||
// further.
|
||||
LoseNPackets(5)
|
||||
expectedSendWindow -= 5 * defaultTCPMSS
|
||||
expectedSendWindow -= 5 * maxDatagramSize
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
packetsInRecoveryWindow := expectedSendWindow / defaultTCPMSS
|
||||
packetsInRecoveryWindow := expectedSendWindow / maxDatagramSize
|
||||
|
||||
// Recovery phase. We need to ack every packet in the recovery window before
|
||||
// we exit recovery.
|
||||
numberOfPacketsInWindow := expectedSendWindow / defaultTCPMSS
|
||||
numberOfPacketsInWindow := expectedSendWindow / maxDatagramSize
|
||||
AckNPackets(int(packetsInRecoveryWindow))
|
||||
SendAvailableSendWindow()
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
@ -228,7 +228,7 @@ var _ = Describe("Cubic Sender", func() {
|
|||
|
||||
// Next ack should increase cwnd by 1.
|
||||
AckNPackets(1)
|
||||
expectedSendWindow += defaultTCPMSS
|
||||
expectedSendWindow += maxDatagramSize
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Now RTO and ensure slow start gets reset.
|
||||
|
@ -244,23 +244,23 @@ var _ = Describe("Cubic Sender", func() {
|
|||
const numberOfAcks = 10
|
||||
for i := 0; i < numberOfAcks; i++ {
|
||||
// Send our full send window in half sized packets.
|
||||
SendAvailableSendWindowLen(defaultTCPMSS / 2)
|
||||
SendAvailableSendWindowLen(maxDatagramSize / 2)
|
||||
AckNPackets(2)
|
||||
}
|
||||
SendAvailableSendWindowLen(defaultTCPMSS / 2)
|
||||
expectedSendWindow := defaultWindowTCP + (defaultTCPMSS * 2 * numberOfAcks)
|
||||
SendAvailableSendWindowLen(maxDatagramSize / 2)
|
||||
expectedSendWindow := defaultWindowTCP + (maxDatagramSize * 2 * numberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Lose a packet to exit slow start. We should now have fallen out of
|
||||
// slow start with a window reduced by 1.
|
||||
LoseNPackets(1)
|
||||
expectedSendWindow -= defaultTCPMSS
|
||||
expectedSendWindow -= maxDatagramSize
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Lose 10 packets in recovery and verify that congestion window is reduced
|
||||
// by 5 packets.
|
||||
LoseNPacketsLen(10, defaultTCPMSS/2)
|
||||
expectedSendWindow -= 5 * defaultTCPMSS
|
||||
LoseNPacketsLen(10, maxDatagramSize/2)
|
||||
expectedSendWindow -= 5 * maxDatagramSize
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
})
|
||||
|
||||
|
@ -287,7 +287,7 @@ var _ = Describe("Cubic Sender", func() {
|
|||
AckNPackets(2)
|
||||
}
|
||||
SendAvailableSendWindow()
|
||||
expectedSendWindow := defaultWindowTCP + (defaultTCPMSS * 2 * numberOfAcks)
|
||||
expectedSendWindow := defaultWindowTCP + (maxDatagramSize * 2 * numberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
LoseNPackets(1)
|
||||
|
@ -302,7 +302,7 @@ var _ = Describe("Cubic Sender", func() {
|
|||
// outstanding packets. The number of packets before we exit recovery is the
|
||||
// original CWND minus the packet that has been lost and the one which
|
||||
// triggered the loss.
|
||||
remainingPacketsInRecovery := sendWindowBeforeLoss/defaultTCPMSS - 2
|
||||
remainingPacketsInRecovery := sendWindowBeforeLoss/maxDatagramSize - 2
|
||||
|
||||
for i := protocol.ByteCount(0); i < remainingPacketsInRecovery; i++ {
|
||||
AckNPackets(1)
|
||||
|
@ -311,7 +311,7 @@ var _ = Describe("Cubic Sender", func() {
|
|||
}
|
||||
|
||||
// We need to ack another window before we increase CWND by 1.
|
||||
numberOfPacketsInWindow := expectedSendWindow / defaultTCPMSS
|
||||
numberOfPacketsInWindow := expectedSendWindow / maxDatagramSize
|
||||
for i := protocol.ByteCount(0); i < numberOfPacketsInWindow; i++ {
|
||||
AckNPackets(1)
|
||||
Expect(SendAvailableSendWindow()).To(Equal(1))
|
||||
|
@ -319,7 +319,7 @@ var _ = Describe("Cubic Sender", func() {
|
|||
}
|
||||
|
||||
AckNPackets(1)
|
||||
expectedSendWindow += defaultTCPMSS
|
||||
expectedSendWindow += maxDatagramSize
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
})
|
||||
|
||||
|
@ -336,13 +336,13 @@ var _ = Describe("Cubic Sender", func() {
|
|||
AckNPackets(2)
|
||||
}
|
||||
SendAvailableSendWindow()
|
||||
expectedSendWindow := defaultWindowTCP + (defaultTCPMSS * 2 * numberOfAcks)
|
||||
expectedSendWindow := defaultWindowTCP + (maxDatagramSize * 2 * numberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Lose one more than the congestion window reduction, so that after loss,
|
||||
// bytes_in_flight is lesser than the congestion window.
|
||||
sendWindowAfterLoss := protocol.ByteCount(renoBeta * float32(expectedSendWindow))
|
||||
numPacketsToLose := (expectedSendWindow-sendWindowAfterLoss)/defaultTCPMSS + 1
|
||||
numPacketsToLose := (expectedSendWindow-sendWindowAfterLoss)/maxDatagramSize + 1
|
||||
LoseNPackets(int(numPacketsToLose))
|
||||
// Immediately after the loss, ensure at least one packet can be sent.
|
||||
// Losses without subsequent acks can occur with timer based loss detection.
|
||||
|
@ -384,8 +384,8 @@ var _ = Describe("Cubic Sender", func() {
|
|||
// Expect the window to decrease to the minimum once the RTO fires
|
||||
// and slow start threshold to be set to 1/2 of the CWND.
|
||||
sender.OnRetransmissionTimeout(true)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(2 * defaultTCPMSS))
|
||||
Expect(sender.SlowstartThreshold()).To(Equal(5 * defaultTCPMSS))
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(2 * maxDatagramSize))
|
||||
Expect(sender.SlowstartThreshold()).To(Equal(5 * maxDatagramSize))
|
||||
})
|
||||
|
||||
It("RTO congestion window no retransmission", func() {
|
||||
|
@ -399,8 +399,8 @@ var _ = Describe("Cubic Sender", func() {
|
|||
|
||||
It("tcp cubic reset epoch on quiescence", func() {
|
||||
const maxCongestionWindow = 50
|
||||
const maxCongestionWindowBytes = maxCongestionWindow * defaultTCPMSS
|
||||
sender = newCubicSender(&clock, rttStats, false, initialCongestionWindowPackets*defaultTCPMSS, maxCongestionWindowBytes)
|
||||
const maxCongestionWindowBytes = maxCongestionWindow * maxDatagramSize
|
||||
sender = newCubicSender(&clock, rttStats, false, initialCongestionWindowPackets*maxDatagramSize, maxCongestionWindowBytes)
|
||||
|
||||
numSent := SendAvailableSendWindow()
|
||||
|
||||
|
@ -433,7 +433,7 @@ var _ = Describe("Cubic Sender", func() {
|
|||
savedCwnd = sender.GetCongestionWindow()
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(1)
|
||||
Expect(savedCwnd).To(BeNumerically("~", sender.GetCongestionWindow(), defaultTCPMSS))
|
||||
Expect(savedCwnd).To(BeNumerically("~", sender.GetCongestionWindow(), maxDatagramSize))
|
||||
Expect(maxCongestionWindowBytes).To(BeNumerically(">", sender.GetCongestionWindow()))
|
||||
})
|
||||
|
||||
|
@ -463,7 +463,7 @@ var _ = Describe("Cubic Sender", func() {
|
|||
AckNPackets(2)
|
||||
}
|
||||
SendAvailableSendWindow()
|
||||
expectedSendWindow := defaultWindowTCP + (defaultTCPMSS * 2 * numberOfAcks)
|
||||
expectedSendWindow := defaultWindowTCP + (maxDatagramSize * 2 * numberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
LoseNPackets(1)
|
||||
|
@ -484,7 +484,7 @@ var _ = Describe("Cubic Sender", func() {
|
|||
Expect(sender.InRecovery()).To(BeFalse())
|
||||
|
||||
// Out of recovery now. Congestion window should not grow for half an RTT.
|
||||
packetsInSendWindow := expectedSendWindow / defaultTCPMSS
|
||||
packetsInSendWindow := expectedSendWindow / maxDatagramSize
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(int(packetsInSendWindow/2 - 2))
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
@ -492,7 +492,7 @@ var _ = Describe("Cubic Sender", func() {
|
|||
// Next ack should increase congestion window by 1MSS.
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(2)
|
||||
expectedSendWindow += defaultTCPMSS
|
||||
expectedSendWindow += maxDatagramSize
|
||||
packetsInSendWindow++
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
|
@ -504,7 +504,7 @@ var _ = Describe("Cubic Sender", func() {
|
|||
// Next ack should cause congestion window to grow by 1MSS.
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(2)
|
||||
expectedSendWindow += defaultTCPMSS
|
||||
expectedSendWindow += maxDatagramSize
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
})
|
||||
|
||||
|
@ -518,7 +518,7 @@ var _ = Describe("Cubic Sender", func() {
|
|||
AckNPackets(2)
|
||||
}
|
||||
SendAvailableSendWindow()
|
||||
expectedSendWindow := defaultWindowTCP + (defaultTCPMSS * 2 * numberOfAcks)
|
||||
expectedSendWindow := defaultWindowTCP + (maxDatagramSize * 2 * numberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
LoseNPackets(1)
|
||||
|
@ -539,7 +539,7 @@ var _ = Describe("Cubic Sender", func() {
|
|||
Expect(sender.InRecovery()).To(BeFalse())
|
||||
|
||||
// Out of recovery now. Congestion window should not grow during RTT.
|
||||
for i := protocol.ByteCount(0); i < expectedSendWindow/defaultTCPMSS-2; i += 2 {
|
||||
for i := protocol.ByteCount(0); i < expectedSendWindow/maxDatagramSize-2; i += 2 {
|
||||
// Send our full send window.
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(2)
|
||||
|
@ -549,7 +549,7 @@ var _ = Describe("Cubic Sender", func() {
|
|||
// Next ack should cause congestion window to grow by 1MSS.
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(2)
|
||||
expectedSendWindow += defaultTCPMSS
|
||||
expectedSendWindow += maxDatagramSize
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
})
|
||||
|
||||
|
@ -562,7 +562,7 @@ var _ = Describe("Cubic Sender", func() {
|
|||
AckNPackets(1)
|
||||
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(protocol.ByteCount(renoBeta * float32(defaultWindowTCP))))
|
||||
windowInPackets := renoBeta * float32(defaultWindowTCP) / float32(defaultTCPMSS)
|
||||
windowInPackets := renoBeta * float32(defaultWindowTCP) / float32(maxDatagramSize)
|
||||
numSent := SendAvailableSendWindow()
|
||||
Expect(numSent).To(BeEquivalentTo(windowInPackets))
|
||||
})
|
||||
|
@ -580,7 +580,7 @@ var _ = Describe("Cubic Sender", func() {
|
|||
AckNPackets(2)
|
||||
}
|
||||
SendAvailableSendWindow()
|
||||
expectedSendWindow := defaultWindowTCP + (defaultTCPMSS * 2 * numberOfAcks)
|
||||
expectedSendWindow := defaultWindowTCP + (maxDatagramSize * 2 * numberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Loses a packet to exit slow start.
|
||||
|
@ -600,9 +600,9 @@ var _ = Describe("Cubic Sender", func() {
|
|||
})
|
||||
|
||||
It("default max cwnd", func() {
|
||||
sender = newCubicSender(&clock, rttStats, true /*reno*/, initialCongestionWindowPackets*defaultTCPMSS, maxCongestionWindow)
|
||||
sender = newCubicSender(&clock, rttStats, true /*reno*/, initialCongestionWindowPackets*maxDatagramSize, maxCongestionWindow)
|
||||
|
||||
defaultMaxCongestionWindowPackets := maxCongestionWindow / defaultTCPMSS
|
||||
defaultMaxCongestionWindowPackets := maxCongestionWindow / maxDatagramSize
|
||||
for i := 1; i < int(defaultMaxCongestionWindowPackets); i++ {
|
||||
sender.MaybeExitSlowStart()
|
||||
sender.OnPacketAcked(protocol.PacketNumber(i), 1350, sender.GetCongestionWindow(), clock.Now())
|
||||
|
@ -612,7 +612,7 @@ var _ = Describe("Cubic Sender", func() {
|
|||
|
||||
It("limit cwnd increase in congestion avoidance", func() {
|
||||
// Enable Cubic.
|
||||
sender = newCubicSender(&clock, rttStats, false, initialCongestionWindowPackets*defaultTCPMSS, MaxCongestionWindow)
|
||||
sender = newCubicSender(&clock, rttStats, false, initialCongestionWindowPackets*maxDatagramSize, MaxCongestionWindow)
|
||||
numSent := SendAvailableSendWindow()
|
||||
|
||||
// Make sure we fall out of slow start.
|
||||
|
@ -644,6 +644,6 @@ var _ = Describe("Cubic Sender", func() {
|
|||
|
||||
// Ack two packets. The CWND should increase by only one packet.
|
||||
AckNPackets(2)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(savedCwnd + defaultTCPMSS))
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(savedCwnd + maxDatagramSize))
|
||||
})
|
||||
})
|
||||
|
|
|
@ -28,12 +28,12 @@ var _ = Describe("Cubic", func() {
|
|||
})
|
||||
|
||||
renoCwnd := func(currentCwnd protocol.ByteCount) protocol.ByteCount {
|
||||
return currentCwnd + protocol.ByteCount(float32(defaultTCPMSS)*nConnectionAlpha*float32(defaultTCPMSS)/float32(currentCwnd))
|
||||
return currentCwnd + protocol.ByteCount(float32(maxDatagramSize)*nConnectionAlpha*float32(maxDatagramSize)/float32(currentCwnd))
|
||||
}
|
||||
|
||||
cubicConvexCwnd := func(initialCwnd protocol.ByteCount, rtt, elapsedTime time.Duration) protocol.ByteCount {
|
||||
offset := protocol.ByteCount((elapsedTime+rtt)/time.Microsecond) << 10 / 1000000
|
||||
deltaCongestionWindow := 410 * offset * offset * offset * defaultTCPMSS >> 40
|
||||
deltaCongestionWindow := 410 * offset * offset * offset * maxDatagramSize >> 40
|
||||
return initialCwnd + deltaCongestionWindow
|
||||
}
|
||||
|
||||
|
@ -41,13 +41,13 @@ var _ = Describe("Cubic", func() {
|
|||
// Convex growth.
|
||||
const rttMin = 100 * time.Millisecond
|
||||
const rttMinS = float32(rttMin/time.Millisecond) / 1000.0
|
||||
currentCwnd := 10 * defaultTCPMSS
|
||||
currentCwnd := 10 * maxDatagramSize
|
||||
initialCwnd := currentCwnd
|
||||
|
||||
clock.Advance(time.Millisecond)
|
||||
initialTime := clock.Now()
|
||||
expectedFirstCwnd := renoCwnd(currentCwnd)
|
||||
currentCwnd = cubic.CongestionWindowAfterAck(defaultTCPMSS, currentCwnd, rttMin, initialTime)
|
||||
currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, initialTime)
|
||||
Expect(expectedFirstCwnd).To(Equal(currentCwnd))
|
||||
|
||||
// Normal TCP phase.
|
||||
|
@ -59,13 +59,13 @@ var _ = Describe("Cubic", func() {
|
|||
// receive current_cwnd/Alpha acks back. (This is another way of
|
||||
// saying we expect cwnd to increase by approximately Alpha once
|
||||
// we receive current_cwnd number ofacks back).
|
||||
numAcksThisEpoch := int(float32(currentCwnd/defaultTCPMSS) / nConnectionAlpha)
|
||||
numAcksThisEpoch := int(float32(currentCwnd/maxDatagramSize) / nConnectionAlpha)
|
||||
|
||||
initialCwndThisEpoch := currentCwnd
|
||||
for n := 0; n < numAcksThisEpoch; n++ {
|
||||
// Call once per ACK.
|
||||
expectedNextCwnd := renoCwnd(currentCwnd)
|
||||
currentCwnd = cubic.CongestionWindowAfterAck(defaultTCPMSS, currentCwnd, rttMin, clock.Now())
|
||||
currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
|
||||
Expect(currentCwnd).To(Equal(expectedNextCwnd))
|
||||
}
|
||||
// Our byte-wise Reno implementation is an estimate. We expect
|
||||
|
@ -73,51 +73,51 @@ var _ = Describe("Cubic", func() {
|
|||
// cwnd/kDefaultTCPMSS/Alpha acks, but it may be off by as much as
|
||||
// half a packet for smaller values of current_cwnd.
|
||||
cwndChangeThisEpoch := currentCwnd - initialCwndThisEpoch
|
||||
Expect(cwndChangeThisEpoch).To(BeNumerically("~", defaultTCPMSS, defaultTCPMSS/2))
|
||||
Expect(cwndChangeThisEpoch).To(BeNumerically("~", maxDatagramSize, maxDatagramSize/2))
|
||||
clock.Advance(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
for i := 0; i < 54; i++ {
|
||||
maxAcksThisEpoch := currentCwnd / defaultTCPMSS
|
||||
maxAcksThisEpoch := currentCwnd / maxDatagramSize
|
||||
interval := time.Duration(100*1000/maxAcksThisEpoch) * time.Microsecond
|
||||
for n := 0; n < int(maxAcksThisEpoch); n++ {
|
||||
clock.Advance(interval)
|
||||
currentCwnd = cubic.CongestionWindowAfterAck(defaultTCPMSS, currentCwnd, rttMin, clock.Now())
|
||||
currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
|
||||
expectedCwnd := cubicConvexCwnd(initialCwnd, rttMin, clock.Now().Sub(initialTime))
|
||||
// If we allow per-ack updates, every update is a small cubic update.
|
||||
Expect(currentCwnd).To(Equal(expectedCwnd))
|
||||
}
|
||||
}
|
||||
expectedCwnd := cubicConvexCwnd(initialCwnd, rttMin, clock.Now().Sub(initialTime))
|
||||
currentCwnd = cubic.CongestionWindowAfterAck(defaultTCPMSS, currentCwnd, rttMin, clock.Now())
|
||||
currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
|
||||
Expect(currentCwnd).To(Equal(expectedCwnd))
|
||||
})
|
||||
|
||||
It("works above the origin with fine grained cubing", func() {
|
||||
// Start the test with an artificially large cwnd to prevent Reno
|
||||
// from over-taking cubic.
|
||||
currentCwnd := 1000 * defaultTCPMSS
|
||||
currentCwnd := 1000 * maxDatagramSize
|
||||
initialCwnd := currentCwnd
|
||||
rttMin := 100 * time.Millisecond
|
||||
clock.Advance(time.Millisecond)
|
||||
initialTime := clock.Now()
|
||||
|
||||
currentCwnd = cubic.CongestionWindowAfterAck(defaultTCPMSS, currentCwnd, rttMin, clock.Now())
|
||||
currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
|
||||
clock.Advance(600 * time.Millisecond)
|
||||
currentCwnd = cubic.CongestionWindowAfterAck(defaultTCPMSS, currentCwnd, rttMin, clock.Now())
|
||||
currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
|
||||
|
||||
// We expect the algorithm to perform only non-zero, fine-grained cubic
|
||||
// increases on every ack in this case.
|
||||
for i := 0; i < 100; i++ {
|
||||
clock.Advance(10 * time.Millisecond)
|
||||
expectedCwnd := cubicConvexCwnd(initialCwnd, rttMin, clock.Now().Sub(initialTime))
|
||||
nextCwnd := cubic.CongestionWindowAfterAck(defaultTCPMSS, currentCwnd, rttMin, clock.Now())
|
||||
nextCwnd := cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
|
||||
// Make sure we are performing cubic increases.
|
||||
Expect(nextCwnd).To(Equal(expectedCwnd))
|
||||
// Make sure that these are non-zero, less-than-packet sized increases.
|
||||
Expect(nextCwnd).To(BeNumerically(">", currentCwnd))
|
||||
cwndDelta := nextCwnd - currentCwnd
|
||||
Expect(defaultTCPMSS / 10).To(BeNumerically(">", cwndDelta))
|
||||
Expect(maxDatagramSize / 10).To(BeNumerically(">", cwndDelta))
|
||||
currentCwnd = nextCwnd
|
||||
}
|
||||
})
|
||||
|
@ -126,14 +126,14 @@ var _ = Describe("Cubic", func() {
|
|||
// Start the test with a large cwnd and RTT, to force the first
|
||||
// increase to be a cubic increase.
|
||||
initialCwndPackets := 150
|
||||
currentCwnd := protocol.ByteCount(initialCwndPackets) * defaultTCPMSS
|
||||
currentCwnd := protocol.ByteCount(initialCwndPackets) * maxDatagramSize
|
||||
rttMin := 350 * time.Millisecond
|
||||
|
||||
// Initialize the epoch
|
||||
clock.Advance(time.Millisecond)
|
||||
// Keep track of the growth of the reno-equivalent cwnd.
|
||||
rCwnd := renoCwnd(currentCwnd)
|
||||
currentCwnd = cubic.CongestionWindowAfterAck(defaultTCPMSS, currentCwnd, rttMin, clock.Now())
|
||||
currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
|
||||
initialCwnd := currentCwnd
|
||||
|
||||
// Simulate the return of cwnd packets in less than
|
||||
|
@ -149,10 +149,10 @@ var _ = Describe("Cubic", func() {
|
|||
// regardless of the temporary plateau.
|
||||
clock.Advance(interval)
|
||||
rCwnd = renoCwnd(rCwnd)
|
||||
Expect(cubic.CongestionWindowAfterAck(defaultTCPMSS, currentCwnd, rttMin, clock.Now())).To(Equal(currentCwnd))
|
||||
Expect(cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())).To(Equal(currentCwnd))
|
||||
for i := 1; i < maxAcks; i++ {
|
||||
clock.Advance(interval)
|
||||
nextCwnd := cubic.CongestionWindowAfterAck(defaultTCPMSS, currentCwnd, rttMin, clock.Now())
|
||||
nextCwnd := cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
|
||||
rCwnd = renoCwnd(rCwnd)
|
||||
// The window shoud increase on every ack.
|
||||
Expect(nextCwnd).To(BeNumerically(">", currentCwnd))
|
||||
|
@ -165,17 +165,17 @@ var _ = Describe("Cubic", func() {
|
|||
// packet, because our byte-wise Reno algorithm is always a slight
|
||||
// under-estimation). Without per-ack updates, the current_cwnd
|
||||
// would otherwise be unchanged.
|
||||
minimumExpectedIncrease := defaultTCPMSS * 9 / 10
|
||||
minimumExpectedIncrease := maxDatagramSize * 9 / 10
|
||||
Expect(currentCwnd).To(BeNumerically(">", initialCwnd+minimumExpectedIncrease))
|
||||
})
|
||||
|
||||
It("handles loss events", func() {
|
||||
rttMin := 100 * time.Millisecond
|
||||
currentCwnd := 422 * defaultTCPMSS
|
||||
currentCwnd := 422 * maxDatagramSize
|
||||
expectedCwnd := renoCwnd(currentCwnd)
|
||||
// Initialize the state.
|
||||
clock.Advance(time.Millisecond)
|
||||
Expect(cubic.CongestionWindowAfterAck(defaultTCPMSS, currentCwnd, rttMin, clock.Now())).To(Equal(expectedCwnd))
|
||||
Expect(cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())).To(Equal(expectedCwnd))
|
||||
|
||||
// On the first loss, the last max congestion window is set to the
|
||||
// congestion window before the loss.
|
||||
|
@ -199,7 +199,7 @@ var _ = Describe("Cubic", func() {
|
|||
Expect(cubic.lastMaxCongestionWindow).To(Equal(expectedLastMax))
|
||||
Expect(expectedCwnd).To(BeNumerically("<", cubic.lastMaxCongestionWindow))
|
||||
// Simulate an increase, and check that we are below the origin.
|
||||
currentCwnd = cubic.CongestionWindowAfterAck(defaultTCPMSS, currentCwnd, rttMin, clock.Now())
|
||||
currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
|
||||
Expect(cubic.lastMaxCongestionWindow).To(BeNumerically(">", currentCwnd))
|
||||
|
||||
// On the final loss, simulate the condition where the congestion
|
||||
|
@ -215,21 +215,21 @@ var _ = Describe("Cubic", func() {
|
|||
It("works below origin", func() {
|
||||
// Concave growth.
|
||||
rttMin := 100 * time.Millisecond
|
||||
currentCwnd := 422 * defaultTCPMSS
|
||||
currentCwnd := 422 * maxDatagramSize
|
||||
expectedCwnd := renoCwnd(currentCwnd)
|
||||
// Initialize the state.
|
||||
clock.Advance(time.Millisecond)
|
||||
Expect(cubic.CongestionWindowAfterAck(defaultTCPMSS, currentCwnd, rttMin, clock.Now())).To(Equal(expectedCwnd))
|
||||
Expect(cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())).To(Equal(expectedCwnd))
|
||||
|
||||
expectedCwnd = protocol.ByteCount(float32(currentCwnd) * nConnectionBeta)
|
||||
Expect(cubic.CongestionWindowAfterPacketLoss(currentCwnd)).To(Equal(expectedCwnd))
|
||||
currentCwnd = expectedCwnd
|
||||
// First update after loss to initialize the epoch.
|
||||
currentCwnd = cubic.CongestionWindowAfterAck(defaultTCPMSS, currentCwnd, rttMin, clock.Now())
|
||||
currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
|
||||
// Cubic phase.
|
||||
for i := 0; i < 40; i++ {
|
||||
clock.Advance(100 * time.Millisecond)
|
||||
currentCwnd = cubic.CongestionWindowAfterAck(defaultTCPMSS, currentCwnd, rttMin, clock.Now())
|
||||
currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
|
||||
}
|
||||
expectedCwnd = 553632
|
||||
Expect(currentCwnd).To(Equal(expectedCwnd))
|
||||
|
|
|
@ -36,7 +36,7 @@ func (p *PrrSender) OnPacketAcked(ackedBytes protocol.ByteCount) {
|
|||
// CanSend returns if packets can be sent
|
||||
func (p *PrrSender) CanSend(congestionWindow, bytesInFlight, slowstartThreshold protocol.ByteCount) bool {
|
||||
// Return QuicTime::Zero In order to ensure limited transmit always works.
|
||||
if p.bytesSentSinceLoss == 0 || bytesInFlight < defaultTCPMSS {
|
||||
if p.bytesSentSinceLoss == 0 || bytesInFlight < maxDatagramSize {
|
||||
return true
|
||||
}
|
||||
if congestionWindow > bytesInFlight {
|
||||
|
@ -44,7 +44,7 @@ func (p *PrrSender) CanSend(congestionWindow, bytesInFlight, slowstartThreshold
|
|||
// of sending the entire available window. This prevents burst retransmits
|
||||
// when more packets are lost than the CWND reduction.
|
||||
// limit = MAX(prr_delivered - prr_out, DeliveredData) + MSS
|
||||
return p.bytesDeliveredSinceLoss+p.ackCountSinceLoss*defaultTCPMSS > p.bytesSentSinceLoss
|
||||
return p.bytesDeliveredSinceLoss+p.ackCountSinceLoss*maxDatagramSize > p.bytesSentSinceLoss
|
||||
}
|
||||
// Implement Proportional Rate Reduction (RFC6937).
|
||||
// Checks a simplified version of the PRR formula that doesn't use division:
|
||||
|
|
|
@ -18,35 +18,35 @@ var _ = Describe("PRR sender", func() {
|
|||
|
||||
It("single loss results in send on every other ack", func() {
|
||||
numPacketsInFlight := protocol.ByteCount(50)
|
||||
bytesInFlight := numPacketsInFlight * defaultTCPMSS
|
||||
bytesInFlight := numPacketsInFlight * maxDatagramSize
|
||||
sshthreshAfterLoss := numPacketsInFlight / 2
|
||||
congestionWindow := sshthreshAfterLoss * defaultTCPMSS
|
||||
congestionWindow := sshthreshAfterLoss * maxDatagramSize
|
||||
|
||||
prr.OnPacketLost(bytesInFlight)
|
||||
// Ack a packet. PRR allows one packet to leave immediately.
|
||||
prr.OnPacketAcked(defaultTCPMSS)
|
||||
bytesInFlight -= defaultTCPMSS
|
||||
Expect(prr.CanSend(congestionWindow, bytesInFlight, sshthreshAfterLoss*defaultTCPMSS)).To(BeTrue())
|
||||
prr.OnPacketAcked(maxDatagramSize)
|
||||
bytesInFlight -= maxDatagramSize
|
||||
Expect(prr.CanSend(congestionWindow, bytesInFlight, sshthreshAfterLoss*maxDatagramSize)).To(BeTrue())
|
||||
// Send retransmission.
|
||||
prr.OnPacketSent(defaultTCPMSS)
|
||||
prr.OnPacketSent(maxDatagramSize)
|
||||
// PRR shouldn't allow sending any more packets.
|
||||
Expect(prr.CanSend(congestionWindow, bytesInFlight, sshthreshAfterLoss*defaultTCPMSS)).To(BeFalse())
|
||||
Expect(prr.CanSend(congestionWindow, bytesInFlight, sshthreshAfterLoss*maxDatagramSize)).To(BeFalse())
|
||||
|
||||
// One packet is lost, and one ack was consumed above. PRR now paces
|
||||
// transmissions through the remaining 48 acks. PRR will alternatively
|
||||
// disallow and allow a packet to be sent in response to an ack.
|
||||
for i := protocol.ByteCount(0); i < sshthreshAfterLoss-1; i++ {
|
||||
// Ack a packet. PRR shouldn't allow sending a packet in response.
|
||||
prr.OnPacketAcked(defaultTCPMSS)
|
||||
bytesInFlight -= defaultTCPMSS
|
||||
Expect(prr.CanSend(congestionWindow, bytesInFlight, sshthreshAfterLoss*defaultTCPMSS)).To(BeFalse())
|
||||
prr.OnPacketAcked(maxDatagramSize)
|
||||
bytesInFlight -= maxDatagramSize
|
||||
Expect(prr.CanSend(congestionWindow, bytesInFlight, sshthreshAfterLoss*maxDatagramSize)).To(BeFalse())
|
||||
// Ack another packet. PRR should now allow sending a packet in response.
|
||||
prr.OnPacketAcked(defaultTCPMSS)
|
||||
bytesInFlight -= defaultTCPMSS
|
||||
Expect(prr.CanSend(congestionWindow, bytesInFlight, sshthreshAfterLoss*defaultTCPMSS)).To(BeTrue())
|
||||
prr.OnPacketAcked(maxDatagramSize)
|
||||
bytesInFlight -= maxDatagramSize
|
||||
Expect(prr.CanSend(congestionWindow, bytesInFlight, sshthreshAfterLoss*maxDatagramSize)).To(BeTrue())
|
||||
// Send a packet in response.
|
||||
prr.OnPacketSent(defaultTCPMSS)
|
||||
bytesInFlight += defaultTCPMSS
|
||||
prr.OnPacketSent(maxDatagramSize)
|
||||
bytesInFlight += maxDatagramSize
|
||||
}
|
||||
|
||||
// Since bytes_in_flight is now equal to congestion_window, PRR now maintains
|
||||
|
@ -54,54 +54,54 @@ var _ = Describe("PRR sender", func() {
|
|||
Expect(bytesInFlight).To(Equal(congestionWindow))
|
||||
for i := 0; i < 10; i++ {
|
||||
// Ack a packet.
|
||||
prr.OnPacketAcked(defaultTCPMSS)
|
||||
bytesInFlight -= defaultTCPMSS
|
||||
Expect(prr.CanSend(congestionWindow, bytesInFlight, sshthreshAfterLoss*defaultTCPMSS)).To(BeTrue())
|
||||
prr.OnPacketAcked(maxDatagramSize)
|
||||
bytesInFlight -= maxDatagramSize
|
||||
Expect(prr.CanSend(congestionWindow, bytesInFlight, sshthreshAfterLoss*maxDatagramSize)).To(BeTrue())
|
||||
// Send a packet in response, since PRR allows it.
|
||||
prr.OnPacketSent(defaultTCPMSS)
|
||||
bytesInFlight += defaultTCPMSS
|
||||
prr.OnPacketSent(maxDatagramSize)
|
||||
bytesInFlight += maxDatagramSize
|
||||
|
||||
// Since bytes_in_flight is equal to the congestion_window,
|
||||
// PRR disallows sending.
|
||||
Expect(bytesInFlight).To(Equal(congestionWindow))
|
||||
Expect(prr.CanSend(congestionWindow, bytesInFlight, sshthreshAfterLoss*defaultTCPMSS)).To(BeFalse())
|
||||
Expect(prr.CanSend(congestionWindow, bytesInFlight, sshthreshAfterLoss*maxDatagramSize)).To(BeFalse())
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
It("burst loss results in slow start", func() {
|
||||
bytesInFlight := 20 * defaultTCPMSS
|
||||
bytesInFlight := 20 * maxDatagramSize
|
||||
const numPacketsLost = 13
|
||||
const ssthreshAfterLoss = 10
|
||||
const congestionWindow = ssthreshAfterLoss * defaultTCPMSS
|
||||
const congestionWindow = ssthreshAfterLoss * maxDatagramSize
|
||||
|
||||
// Lose 13 packets.
|
||||
bytesInFlight -= numPacketsLost * defaultTCPMSS
|
||||
bytesInFlight -= numPacketsLost * maxDatagramSize
|
||||
prr.OnPacketLost(bytesInFlight)
|
||||
|
||||
// PRR-SSRB will allow the following 3 acks to send up to 2 packets.
|
||||
for i := 0; i < 3; i++ {
|
||||
prr.OnPacketAcked(defaultTCPMSS)
|
||||
bytesInFlight -= defaultTCPMSS
|
||||
prr.OnPacketAcked(maxDatagramSize)
|
||||
bytesInFlight -= maxDatagramSize
|
||||
// PRR-SSRB should allow two packets to be sent.
|
||||
for j := 0; j < 2; j++ {
|
||||
Expect(prr.CanSend(congestionWindow, bytesInFlight, ssthreshAfterLoss*defaultTCPMSS)).To(BeTrue())
|
||||
Expect(prr.CanSend(congestionWindow, bytesInFlight, ssthreshAfterLoss*maxDatagramSize)).To(BeTrue())
|
||||
// Send a packet in response.
|
||||
prr.OnPacketSent(defaultTCPMSS)
|
||||
bytesInFlight += defaultTCPMSS
|
||||
prr.OnPacketSent(maxDatagramSize)
|
||||
bytesInFlight += maxDatagramSize
|
||||
}
|
||||
// PRR should allow no more than 2 packets in response to an ack.
|
||||
Expect(prr.CanSend(congestionWindow, bytesInFlight, ssthreshAfterLoss*defaultTCPMSS)).To(BeFalse())
|
||||
Expect(prr.CanSend(congestionWindow, bytesInFlight, ssthreshAfterLoss*maxDatagramSize)).To(BeFalse())
|
||||
}
|
||||
|
||||
// Out of SSRB mode, PRR allows one send in response to each ack.
|
||||
for i := 0; i < 10; i++ {
|
||||
prr.OnPacketAcked(defaultTCPMSS)
|
||||
bytesInFlight -= defaultTCPMSS
|
||||
Expect(prr.CanSend(congestionWindow, bytesInFlight, ssthreshAfterLoss*defaultTCPMSS)).To(BeTrue())
|
||||
prr.OnPacketAcked(maxDatagramSize)
|
||||
bytesInFlight -= maxDatagramSize
|
||||
Expect(prr.CanSend(congestionWindow, bytesInFlight, ssthreshAfterLoss*maxDatagramSize)).To(BeTrue())
|
||||
// Send a packet in response.
|
||||
prr.OnPacketSent(defaultTCPMSS)
|
||||
bytesInFlight += defaultTCPMSS
|
||||
prr.OnPacketSent(maxDatagramSize)
|
||||
bytesInFlight += maxDatagramSize
|
||||
}
|
||||
})
|
||||
})
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue