mirror of
https://github.com/refraction-networking/uquic.git
synced 2025-04-03 20:27:35 +03:00
make golint happy about the congestion tests
This commit is contained in:
parent
c65b1aca4e
commit
6fcdcdbea1
5 changed files with 329 additions and 330 deletions
|
@ -45,14 +45,14 @@ var _ = Describe("Cubic Sender", func() {
|
|||
|
||||
SendAvailableSendWindowLen := func(packetLength protocol.ByteCount) int {
|
||||
// Send as long as TimeUntilSend returns InfDuration.
|
||||
packets_sent := 0
|
||||
packetsSent := 0
|
||||
for bytesInFlight < sender.GetCongestionWindow() {
|
||||
sender.OnPacketSent(clock.Now(), bytesInFlight, packetNumber, packetLength, true)
|
||||
packetNumber++
|
||||
packets_sent++
|
||||
packetsSent++
|
||||
bytesInFlight += packetLength
|
||||
}
|
||||
return packets_sent
|
||||
return packetsSent
|
||||
}
|
||||
|
||||
// Normal is that TCP acks every other segment.
|
||||
|
@ -108,14 +108,14 @@ var _ = Describe("Cubic Sender", func() {
|
|||
|
||||
It("application limited slow start", func() {
|
||||
// Send exactly 10 packets and ensure the CWND ends at 14 packets.
|
||||
const kNumberOfAcks = 5
|
||||
const numberOfAcks = 5
|
||||
// At startup make sure we can send.
|
||||
Expect(sender.TimeUntilSend(0)).To(BeZero())
|
||||
// Make sure we can send.
|
||||
Expect(sender.TimeUntilSend(0)).To(BeZero())
|
||||
|
||||
SendAvailableSendWindow()
|
||||
for i := 0; i < kNumberOfAcks; i++ {
|
||||
for i := 0; i < numberOfAcks; i++ {
|
||||
AckNPackets(2)
|
||||
}
|
||||
bytesToSend := sender.GetCongestionWindow()
|
||||
|
@ -125,59 +125,59 @@ var _ = Describe("Cubic Sender", func() {
|
|||
})
|
||||
|
||||
It("exponential slow start", func() {
|
||||
const kNumberOfAcks = 20
|
||||
const numberOfAcks = 20
|
||||
// At startup make sure we can send.
|
||||
Expect(sender.TimeUntilSend(0)).To(BeZero())
|
||||
Expect(sender.BandwidthEstimate()).To(BeZero())
|
||||
// Make sure we can send.
|
||||
Expect(sender.TimeUntilSend(0)).To(BeZero())
|
||||
|
||||
for i := 0; i < kNumberOfAcks; i++ {
|
||||
for i := 0; i < numberOfAcks; i++ {
|
||||
// Send our full send window.
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(2)
|
||||
}
|
||||
cwnd := sender.GetCongestionWindow()
|
||||
Expect(cwnd).To(Equal(defaultWindowTCP + protocol.DefaultTCPMSS*2*kNumberOfAcks))
|
||||
Expect(cwnd).To(Equal(defaultWindowTCP + protocol.DefaultTCPMSS*2*numberOfAcks))
|
||||
Expect(sender.BandwidthEstimate()).To(Equal(BandwidthFromDelta(cwnd, rttStats.SmoothedRTT())))
|
||||
})
|
||||
|
||||
It("slow start packet loss", func() {
|
||||
sender.SetNumEmulatedConnections(1)
|
||||
const kNumberOfAcks = 10
|
||||
for i := 0; i < kNumberOfAcks; i++ {
|
||||
const numberOfAcks = 10
|
||||
for i := 0; i < numberOfAcks; i++ {
|
||||
// Send our full send window.
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(2)
|
||||
}
|
||||
SendAvailableSendWindow()
|
||||
expected_send_window := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * kNumberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
expectedSendWindow := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * numberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Lose a packet to exit slow start.
|
||||
LoseNPackets(1)
|
||||
packets_in_recovery_window := expected_send_window / protocol.DefaultTCPMSS
|
||||
packetsInRecoveryWindow := expectedSendWindow / protocol.DefaultTCPMSS
|
||||
|
||||
// We should now have fallen out of slow start with a reduced window.
|
||||
expected_send_window = protocol.ByteCount(float32(expected_send_window) * renoBeta)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
expectedSendWindow = protocol.ByteCount(float32(expectedSendWindow) * renoBeta)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Recovery phase. We need to ack every packet in the recovery window before
|
||||
// we exit recovery.
|
||||
number_of_packets_in_window := expected_send_window / protocol.DefaultTCPMSS
|
||||
AckNPackets(int(packets_in_recovery_window))
|
||||
numberOfPacketsInWindow := expectedSendWindow / protocol.DefaultTCPMSS
|
||||
AckNPackets(int(packetsInRecoveryWindow))
|
||||
SendAvailableSendWindow()
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// We need to ack an entire window before we increase CWND by 1.
|
||||
AckNPackets(int(number_of_packets_in_window) - 2)
|
||||
AckNPackets(int(numberOfPacketsInWindow) - 2)
|
||||
SendAvailableSendWindow()
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Next ack should increase cwnd by 1.
|
||||
AckNPackets(1)
|
||||
expected_send_window += protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
expectedSendWindow += protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Now RTO and ensure slow start gets reset.
|
||||
Expect(sender.HybridSlowStart().Started()).To(BeTrue())
|
||||
|
@ -189,46 +189,46 @@ var _ = Describe("Cubic Sender", func() {
|
|||
sender.SetSlowStartLargeReduction(true)
|
||||
|
||||
sender.SetNumEmulatedConnections(1)
|
||||
const kNumberOfAcks = 10
|
||||
for i := 0; i < kNumberOfAcks; i++ {
|
||||
const numberOfAcks = 10
|
||||
for i := 0; i < numberOfAcks; i++ {
|
||||
// Send our full send window.
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(2)
|
||||
}
|
||||
SendAvailableSendWindow()
|
||||
expected_send_window := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * kNumberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
expectedSendWindow := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * numberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Lose a packet to exit slow start. We should now have fallen out of
|
||||
// slow start with a window reduced by 1.
|
||||
LoseNPackets(1)
|
||||
expected_send_window -= protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
expectedSendWindow -= protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Lose 5 packets in recovery and verify that congestion window is reduced
|
||||
// further.
|
||||
LoseNPackets(5)
|
||||
expected_send_window -= 5 * protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
expectedSendWindow -= 5 * protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
packets_in_recovery_window := expected_send_window / protocol.DefaultTCPMSS
|
||||
packetsInRecoveryWindow := expectedSendWindow / protocol.DefaultTCPMSS
|
||||
|
||||
// Recovery phase. We need to ack every packet in the recovery window before
|
||||
// we exit recovery.
|
||||
number_of_packets_in_window := expected_send_window / protocol.DefaultTCPMSS
|
||||
AckNPackets(int(packets_in_recovery_window))
|
||||
numberOfPacketsInWindow := expectedSendWindow / protocol.DefaultTCPMSS
|
||||
AckNPackets(int(packetsInRecoveryWindow))
|
||||
SendAvailableSendWindow()
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// We need to ack the rest of the window before cwnd increases by 1.
|
||||
AckNPackets(int(number_of_packets_in_window - 1))
|
||||
AckNPackets(int(numberOfPacketsInWindow - 1))
|
||||
SendAvailableSendWindow()
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Next ack should increase cwnd by 1.
|
||||
AckNPackets(1)
|
||||
expected_send_window += protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
expectedSendWindow += protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Now RTO and ensure slow start gets reset.
|
||||
Expect(sender.HybridSlowStart().Started()).To(BeTrue())
|
||||
|
@ -240,27 +240,27 @@ var _ = Describe("Cubic Sender", func() {
|
|||
sender.SetSlowStartLargeReduction(true)
|
||||
|
||||
sender.SetNumEmulatedConnections(1)
|
||||
const kNumberOfAcks = 10
|
||||
for i := 0; i < kNumberOfAcks; i++ {
|
||||
const numberOfAcks = 10
|
||||
for i := 0; i < numberOfAcks; i++ {
|
||||
// Send our full send window in half sized packets.
|
||||
SendAvailableSendWindowLen(protocol.DefaultTCPMSS / 2)
|
||||
AckNPackets(2)
|
||||
}
|
||||
SendAvailableSendWindowLen(protocol.DefaultTCPMSS / 2)
|
||||
expected_send_window := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * kNumberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
expectedSendWindow := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * numberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Lose a packet to exit slow start. We should now have fallen out of
|
||||
// slow start with a window reduced by 1.
|
||||
LoseNPackets(1)
|
||||
expected_send_window -= protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
expectedSendWindow -= protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Lose 10 packets in recovery and verify that congestion window is reduced
|
||||
// by 5 packets.
|
||||
LoseNPacketsLen(10, protocol.DefaultTCPMSS/2)
|
||||
expected_send_window -= 5 * protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
expectedSendWindow -= 5 * protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
})
|
||||
|
||||
// this test doesn't work any more after introducing the pacing needed for QUIC
|
||||
|
@ -280,47 +280,47 @@ var _ = Describe("Cubic Sender", func() {
|
|||
sender.SetNumEmulatedConnections(1)
|
||||
// Test based on the first example in RFC6937.
|
||||
// Ack 10 packets in 5 acks to raise the CWND to 20, as in the example.
|
||||
const kNumberOfAcks = 5
|
||||
for i := 0; i < kNumberOfAcks; i++ {
|
||||
const numberOfAcks = 5
|
||||
for i := 0; i < numberOfAcks; i++ {
|
||||
// Send our full send window.
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(2)
|
||||
}
|
||||
SendAvailableSendWindow()
|
||||
expected_send_window := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * kNumberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
expectedSendWindow := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * numberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
LoseNPackets(1)
|
||||
|
||||
// We should now have fallen out of slow start with a reduced window.
|
||||
send_window_before_loss := expected_send_window
|
||||
expected_send_window = protocol.ByteCount(float32(expected_send_window) * renoBeta)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
sendWindowBeforeLoss := expectedSendWindow
|
||||
expectedSendWindow = protocol.ByteCount(float32(expectedSendWindow) * renoBeta)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Testing TCP proportional rate reduction.
|
||||
// We should send packets paced over the received acks for the remaining
|
||||
// outstanding packets. The number of packets before we exit recovery is the
|
||||
// original CWND minus the packet that has been lost and the one which
|
||||
// triggered the loss.
|
||||
remaining_packets_in_recovery := send_window_before_loss/protocol.DefaultTCPMSS - 2
|
||||
remainingPacketsInRecovery := sendWindowBeforeLoss/protocol.DefaultTCPMSS - 2
|
||||
|
||||
for i := protocol.ByteCount(0); i < remaining_packets_in_recovery; i++ {
|
||||
for i := protocol.ByteCount(0); i < remainingPacketsInRecovery; i++ {
|
||||
AckNPackets(1)
|
||||
SendAvailableSendWindow()
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
}
|
||||
|
||||
// We need to ack another window before we increase CWND by 1.
|
||||
number_of_packets_in_window := expected_send_window / protocol.DefaultTCPMSS
|
||||
for i := protocol.ByteCount(0); i < number_of_packets_in_window; i++ {
|
||||
numberOfPacketsInWindow := expectedSendWindow / protocol.DefaultTCPMSS
|
||||
for i := protocol.ByteCount(0); i < numberOfPacketsInWindow; i++ {
|
||||
AckNPackets(1)
|
||||
Expect(SendAvailableSendWindow()).To(Equal(1))
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
}
|
||||
|
||||
AckNPackets(1)
|
||||
expected_send_window += protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
expectedSendWindow += protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
})
|
||||
|
||||
It("slow start burst packet loss PRR", func() {
|
||||
|
@ -329,29 +329,29 @@ var _ = Describe("Cubic Sender", func() {
|
|||
// forward acknowledgements, so the first two incoming acks will trigger
|
||||
// PRR immediately.
|
||||
// Ack 20 packets in 10 acks to raise the CWND to 30.
|
||||
const kNumberOfAcks = 10
|
||||
for i := 0; i < kNumberOfAcks; i++ {
|
||||
const numberOfAcks = 10
|
||||
for i := 0; i < numberOfAcks; i++ {
|
||||
// Send our full send window.
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(2)
|
||||
}
|
||||
SendAvailableSendWindow()
|
||||
expected_send_window := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * kNumberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
expectedSendWindow := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * numberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Lose one more than the congestion window reduction, so that after loss,
|
||||
// bytes_in_flight is lesser than the congestion window.
|
||||
send_window_after_loss := protocol.ByteCount(renoBeta * float32(expected_send_window))
|
||||
num_packets_to_lose := (expected_send_window-send_window_after_loss)/protocol.DefaultTCPMSS + 1
|
||||
LoseNPackets(int(num_packets_to_lose))
|
||||
sendWindowAfterLoss := protocol.ByteCount(renoBeta * float32(expectedSendWindow))
|
||||
numPacketsToLose := (expectedSendWindow-sendWindowAfterLoss)/protocol.DefaultTCPMSS + 1
|
||||
LoseNPackets(int(numPacketsToLose))
|
||||
// Immediately after the loss, ensure at least one packet can be sent.
|
||||
// Losses without subsequent acks can occur with timer based loss detection.
|
||||
Expect(sender.TimeUntilSend(bytesInFlight)).To(BeZero())
|
||||
AckNPackets(1)
|
||||
|
||||
// We should now have fallen out of slow start with a reduced window.
|
||||
expected_send_window = protocol.ByteCount(float32(expected_send_window) * renoBeta)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
expectedSendWindow = protocol.ByteCount(float32(expectedSendWindow) * renoBeta)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Only 2 packets should be allowed to be sent, per PRR-SSRB
|
||||
Expect(SendAvailableSendWindow()).To(Equal(2))
|
||||
|
@ -371,7 +371,7 @@ var _ = Describe("Cubic Sender", func() {
|
|||
Expect(SendAvailableSendWindow()).To(Equal(2))
|
||||
|
||||
// Exit recovery and return to sending at the new rate.
|
||||
for i := 0; i < kNumberOfAcks; i++ {
|
||||
for i := 0; i < numberOfAcks; i++ {
|
||||
AckNPackets(1)
|
||||
Expect(SendAvailableSendWindow()).To(Equal(1))
|
||||
}
|
||||
|
@ -398,114 +398,114 @@ var _ = Describe("Cubic Sender", func() {
|
|||
})
|
||||
|
||||
It("retransmission delay", func() {
|
||||
const kRttMs = 10 * time.Millisecond
|
||||
const kDeviationMs = 3 * time.Millisecond
|
||||
const rtt = 10 * time.Millisecond
|
||||
const deviation = 3 * time.Millisecond
|
||||
Expect(sender.RetransmissionDelay()).To(BeZero())
|
||||
|
||||
rttStats.UpdateRTT(kRttMs, 0, clock.Now())
|
||||
rttStats.UpdateRTT(rtt, 0, clock.Now())
|
||||
|
||||
// Initial value is to set the median deviation to half of the initial
|
||||
// rtt, the median in then multiplied by a factor of 4 and finally the
|
||||
// smoothed rtt is added which is the initial rtt.
|
||||
expected_delay := kRttMs + kRttMs/2*4
|
||||
Expect(sender.RetransmissionDelay()).To(Equal(expected_delay))
|
||||
expectedDelay := rtt + rtt/2*4
|
||||
Expect(sender.RetransmissionDelay()).To(Equal(expectedDelay))
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
// run to make sure that we converge.
|
||||
rttStats.UpdateRTT(kRttMs+kDeviationMs, 0, clock.Now())
|
||||
rttStats.UpdateRTT(kRttMs-kDeviationMs, 0, clock.Now())
|
||||
rttStats.UpdateRTT(rtt+deviation, 0, clock.Now())
|
||||
rttStats.UpdateRTT(rtt-deviation, 0, clock.Now())
|
||||
}
|
||||
expected_delay = kRttMs + kDeviationMs*4
|
||||
expectedDelay = rtt + deviation*4
|
||||
|
||||
Expect(rttStats.SmoothedRTT()).To(BeNumerically("~", kRttMs, time.Millisecond))
|
||||
Expect(sender.RetransmissionDelay()).To(BeNumerically("~", expected_delay, time.Millisecond))
|
||||
Expect(rttStats.SmoothedRTT()).To(BeNumerically("~", rtt, time.Millisecond))
|
||||
Expect(sender.RetransmissionDelay()).To(BeNumerically("~", expectedDelay, time.Millisecond))
|
||||
Expect(sender.BandwidthEstimate() / BytesPerSecond).To(Equal(Bandwidth(
|
||||
sender.GetCongestionWindow() * protocol.ByteCount(time.Second) / protocol.ByteCount(rttStats.SmoothedRTT()),
|
||||
)))
|
||||
})
|
||||
|
||||
It("slow start max send window", func() {
|
||||
const kMaxCongestionWindowTCP = 50
|
||||
const kNumberOfAcks = 100
|
||||
sender = NewCubicSender(&clock, rttStats, false, initialCongestionWindowPackets, kMaxCongestionWindowTCP)
|
||||
const maxCongestionWindowTCP = 50
|
||||
const numberOfAcks = 100
|
||||
sender = NewCubicSender(&clock, rttStats, false, initialCongestionWindowPackets, maxCongestionWindowTCP)
|
||||
|
||||
for i := 0; i < kNumberOfAcks; i++ {
|
||||
for i := 0; i < numberOfAcks; i++ {
|
||||
// Send our full send window.
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(2)
|
||||
}
|
||||
expected_send_window := kMaxCongestionWindowTCP * protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(protocol.ByteCount(expected_send_window)))
|
||||
expectedSendWindow := maxCongestionWindowTCP * protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(protocol.ByteCount(expectedSendWindow)))
|
||||
})
|
||||
|
||||
It("tcp reno max congestion window", func() {
|
||||
const kMaxCongestionWindowTCP = 50
|
||||
const kNumberOfAcks = 1000
|
||||
sender = NewCubicSender(&clock, rttStats, false, initialCongestionWindowPackets, kMaxCongestionWindowTCP)
|
||||
const maxCongestionWindowTCP = 50
|
||||
const numberOfAcks = 1000
|
||||
sender = NewCubicSender(&clock, rttStats, false, initialCongestionWindowPackets, maxCongestionWindowTCP)
|
||||
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(2)
|
||||
// Make sure we fall out of slow start.
|
||||
LoseNPackets(1)
|
||||
|
||||
for i := 0; i < kNumberOfAcks; i++ {
|
||||
for i := 0; i < numberOfAcks; i++ {
|
||||
// Send our full send window.
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(2)
|
||||
}
|
||||
|
||||
expected_send_window := kMaxCongestionWindowTCP * protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(protocol.ByteCount(expected_send_window)))
|
||||
expectedSendWindow := maxCongestionWindowTCP * protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(protocol.ByteCount(expectedSendWindow)))
|
||||
})
|
||||
|
||||
It("tcp cubic max congestion window", func() {
|
||||
const kMaxCongestionWindowTCP = 50
|
||||
const maxCongestionWindowTCP = 50
|
||||
// Set to 10000 to compensate for small cubic alpha.
|
||||
const kNumberOfAcks = 10000
|
||||
const numberOfAcks = 10000
|
||||
|
||||
sender = NewCubicSender(&clock, rttStats, false, initialCongestionWindowPackets, kMaxCongestionWindowTCP)
|
||||
sender = NewCubicSender(&clock, rttStats, false, initialCongestionWindowPackets, maxCongestionWindowTCP)
|
||||
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(2)
|
||||
// Make sure we fall out of slow start.
|
||||
LoseNPackets(1)
|
||||
|
||||
for i := 0; i < kNumberOfAcks; i++ {
|
||||
for i := 0; i < numberOfAcks; i++ {
|
||||
// Send our full send window.
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(2)
|
||||
}
|
||||
|
||||
expected_send_window := kMaxCongestionWindowTCP * protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(protocol.ByteCount(expected_send_window)))
|
||||
expectedSendWindow := maxCongestionWindowTCP * protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(protocol.ByteCount(expectedSendWindow)))
|
||||
})
|
||||
|
||||
It("tcp cubic reset epoch on quiescence", func() {
|
||||
const kMaxCongestionWindow = 50
|
||||
const kMaxCongestionWindowBytes = kMaxCongestionWindow * protocol.DefaultTCPMSS
|
||||
sender = NewCubicSender(&clock, rttStats, false, initialCongestionWindowPackets, kMaxCongestionWindow)
|
||||
const maxCongestionWindow = 50
|
||||
const maxCongestionWindowBytes = maxCongestionWindow * protocol.DefaultTCPMSS
|
||||
sender = NewCubicSender(&clock, rttStats, false, initialCongestionWindowPackets, maxCongestionWindow)
|
||||
|
||||
num_sent := SendAvailableSendWindow()
|
||||
numSent := SendAvailableSendWindow()
|
||||
|
||||
// Make sure we fall out of slow start.
|
||||
saved_cwnd := sender.GetCongestionWindow()
|
||||
saveCwnd := sender.GetCongestionWindow()
|
||||
LoseNPackets(1)
|
||||
Expect(saved_cwnd).To(BeNumerically(">", sender.GetCongestionWindow()))
|
||||
Expect(saveCwnd).To(BeNumerically(">", sender.GetCongestionWindow()))
|
||||
|
||||
// Ack the rest of the outstanding packets to get out of recovery.
|
||||
for i := 1; i < num_sent; i++ {
|
||||
for i := 1; i < numSent; i++ {
|
||||
AckNPackets(1)
|
||||
}
|
||||
Expect(bytesInFlight).To(BeZero())
|
||||
|
||||
// Send a new window of data and ack all; cubic growth should occur.
|
||||
saved_cwnd = sender.GetCongestionWindow()
|
||||
num_sent = SendAvailableSendWindow()
|
||||
for i := 0; i < num_sent; i++ {
|
||||
saveCwnd = sender.GetCongestionWindow()
|
||||
numSent = SendAvailableSendWindow()
|
||||
for i := 0; i < numSent; i++ {
|
||||
AckNPackets(1)
|
||||
}
|
||||
Expect(saved_cwnd).To(BeNumerically("<", sender.GetCongestionWindow()))
|
||||
Expect(kMaxCongestionWindowBytes).To(BeNumerically(">", sender.GetCongestionWindow()))
|
||||
Expect(saveCwnd).To(BeNumerically("<", sender.GetCongestionWindow()))
|
||||
Expect(maxCongestionWindowBytes).To(BeNumerically(">", sender.GetCongestionWindow()))
|
||||
Expect(bytesInFlight).To(BeZero())
|
||||
|
||||
// Quiescent time of 100 seconds
|
||||
|
@ -513,39 +513,39 @@ var _ = Describe("Cubic Sender", func() {
|
|||
|
||||
// Send new window of data and ack one packet. Cubic epoch should have
|
||||
// been reset; ensure cwnd increase is not dramatic.
|
||||
saved_cwnd = sender.GetCongestionWindow()
|
||||
saveCwnd = sender.GetCongestionWindow()
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(1)
|
||||
Expect(saved_cwnd).To(BeNumerically("~", sender.GetCongestionWindow(), protocol.DefaultTCPMSS))
|
||||
Expect(kMaxCongestionWindowBytes).To(BeNumerically(">", sender.GetCongestionWindow()))
|
||||
Expect(saveCwnd).To(BeNumerically("~", sender.GetCongestionWindow(), protocol.DefaultTCPMSS))
|
||||
Expect(maxCongestionWindowBytes).To(BeNumerically(">", sender.GetCongestionWindow()))
|
||||
})
|
||||
|
||||
It("tcp cubic shifted epoch on quiescence", func() {
|
||||
const kMaxCongestionWindow = 50
|
||||
const kMaxCongestionWindowBytes = kMaxCongestionWindow * protocol.DefaultTCPMSS
|
||||
sender = NewCubicSender(&clock, rttStats, false, initialCongestionWindowPackets, kMaxCongestionWindow)
|
||||
const maxCongestionWindow = 50
|
||||
const maxCongestionWindowBytes = maxCongestionWindow * protocol.DefaultTCPMSS
|
||||
sender = NewCubicSender(&clock, rttStats, false, initialCongestionWindowPackets, maxCongestionWindow)
|
||||
|
||||
num_sent := SendAvailableSendWindow()
|
||||
numSent := SendAvailableSendWindow()
|
||||
|
||||
// Make sure we fall out of slow start.
|
||||
saved_cwnd := sender.GetCongestionWindow()
|
||||
saveCwnd := sender.GetCongestionWindow()
|
||||
LoseNPackets(1)
|
||||
Expect(saved_cwnd).To(BeNumerically(">", sender.GetCongestionWindow()))
|
||||
Expect(saveCwnd).To(BeNumerically(">", sender.GetCongestionWindow()))
|
||||
|
||||
// Ack the rest of the outstanding packets to get out of recovery.
|
||||
for i := 1; i < num_sent; i++ {
|
||||
for i := 1; i < numSent; i++ {
|
||||
AckNPackets(1)
|
||||
}
|
||||
Expect(bytesInFlight).To(BeZero())
|
||||
|
||||
// Send a new window of data and ack all; cubic growth should occur.
|
||||
saved_cwnd = sender.GetCongestionWindow()
|
||||
num_sent = SendAvailableSendWindow()
|
||||
for i := 0; i < num_sent; i++ {
|
||||
saveCwnd = sender.GetCongestionWindow()
|
||||
numSent = SendAvailableSendWindow()
|
||||
for i := 0; i < numSent; i++ {
|
||||
AckNPackets(1)
|
||||
}
|
||||
Expect(saved_cwnd).To(BeNumerically("<", sender.GetCongestionWindow()))
|
||||
Expect(kMaxCongestionWindowBytes).To(BeNumerically(">", sender.GetCongestionWindow()))
|
||||
Expect(saveCwnd).To(BeNumerically("<", sender.GetCongestionWindow()))
|
||||
Expect(maxCongestionWindowBytes).To(BeNumerically(">", sender.GetCongestionWindow()))
|
||||
Expect(bytesInFlight).To(BeZero())
|
||||
|
||||
// Quiescent time of 100 seconds
|
||||
|
@ -553,27 +553,27 @@ var _ = Describe("Cubic Sender", func() {
|
|||
|
||||
// Send new window of data and ack one packet. Cubic epoch should have
|
||||
// been reset; ensure cwnd increase is not dramatic.
|
||||
saved_cwnd = sender.GetCongestionWindow()
|
||||
saveCwnd = sender.GetCongestionWindow()
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(1)
|
||||
Expect(saved_cwnd).To(BeNumerically("~", sender.GetCongestionWindow(), protocol.DefaultTCPMSS))
|
||||
Expect(kMaxCongestionWindowBytes).To(BeNumerically(">", sender.GetCongestionWindow()))
|
||||
Expect(saveCwnd).To(BeNumerically("~", sender.GetCongestionWindow(), protocol.DefaultTCPMSS))
|
||||
Expect(maxCongestionWindowBytes).To(BeNumerically(">", sender.GetCongestionWindow()))
|
||||
})
|
||||
|
||||
It("multiple losses in one window", func() {
|
||||
SendAvailableSendWindow()
|
||||
initial_window := sender.GetCongestionWindow()
|
||||
initialWindow := sender.GetCongestionWindow()
|
||||
LosePacket(ackedPacketNumber + 1)
|
||||
post_loss_window := sender.GetCongestionWindow()
|
||||
Expect(initial_window).To(BeNumerically(">", post_loss_window))
|
||||
postLossWindow := sender.GetCongestionWindow()
|
||||
Expect(initialWindow).To(BeNumerically(">", postLossWindow))
|
||||
LosePacket(ackedPacketNumber + 3)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(post_loss_window))
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(postLossWindow))
|
||||
LosePacket(packetNumber - 1)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(post_loss_window))
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(postLossWindow))
|
||||
|
||||
// Lose a later packet and ensure the window decreases.
|
||||
LosePacket(packetNumber)
|
||||
Expect(post_loss_window).To(BeNumerically(">", sender.GetCongestionWindow()))
|
||||
Expect(postLossWindow).To(BeNumerically(">", sender.GetCongestionWindow()))
|
||||
})
|
||||
|
||||
It("don't track ack packets", func() {
|
||||
|
@ -628,21 +628,21 @@ var _ = Describe("Cubic Sender", func() {
|
|||
It("2 connection congestion avoidance at end of recovery", func() {
|
||||
sender.SetNumEmulatedConnections(2)
|
||||
// Ack 10 packets in 5 acks to raise the CWND to 20.
|
||||
const kNumberOfAcks = 5
|
||||
for i := 0; i < kNumberOfAcks; i++ {
|
||||
const numberOfAcks = 5
|
||||
for i := 0; i < numberOfAcks; i++ {
|
||||
// Send our full send window.
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(2)
|
||||
}
|
||||
SendAvailableSendWindow()
|
||||
expected_send_window := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * kNumberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
expectedSendWindow := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * numberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
LoseNPackets(1)
|
||||
|
||||
// We should now have fallen out of slow start with a reduced window.
|
||||
expected_send_window = protocol.ByteCount(float32(expected_send_window) * sender.RenoBeta())
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
expectedSendWindow = protocol.ByteCount(float32(expectedSendWindow) * sender.RenoBeta())
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// No congestion window growth should occur in recovery phase, i.e., until the
|
||||
// currently outstanding 20 packets are acked.
|
||||
|
@ -651,53 +651,53 @@ var _ = Describe("Cubic Sender", func() {
|
|||
SendAvailableSendWindow()
|
||||
Expect(sender.InRecovery()).To(BeTrue())
|
||||
AckNPackets(2)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
}
|
||||
Expect(sender.InRecovery()).To(BeFalse())
|
||||
|
||||
// Out of recovery now. Congestion window should not grow for half an RTT.
|
||||
packets_in_send_window := expected_send_window / protocol.DefaultTCPMSS
|
||||
packetsInSendWindow := expectedSendWindow / protocol.DefaultTCPMSS
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(int(packets_in_send_window/2 - 2))
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
AckNPackets(int(packetsInSendWindow/2 - 2))
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Next ack should increase congestion window by 1MSS.
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(2)
|
||||
expected_send_window += protocol.DefaultTCPMSS
|
||||
packets_in_send_window += 1
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
expectedSendWindow += protocol.DefaultTCPMSS
|
||||
packetsInSendWindow++
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Congestion window should remain steady again for half an RTT.
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(int(packets_in_send_window/2 - 1))
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
AckNPackets(int(packetsInSendWindow/2 - 1))
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Next ack should cause congestion window to grow by 1MSS.
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(2)
|
||||
expected_send_window += protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
expectedSendWindow += protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
})
|
||||
|
||||
It("1 connection congestion avoidance at end of recovery", func() {
|
||||
sender.SetNumEmulatedConnections(1)
|
||||
// Ack 10 packets in 5 acks to raise the CWND to 20.
|
||||
const kNumberOfAcks = 5
|
||||
for i := 0; i < kNumberOfAcks; i++ {
|
||||
const numberOfAcks = 5
|
||||
for i := 0; i < numberOfAcks; i++ {
|
||||
// Send our full send window.
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(2)
|
||||
}
|
||||
SendAvailableSendWindow()
|
||||
expected_send_window := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * kNumberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
expectedSendWindow := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * numberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
LoseNPackets(1)
|
||||
|
||||
// We should now have fallen out of slow start with a reduced window.
|
||||
expected_send_window = protocol.ByteCount(float32(expected_send_window) * renoBeta)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
expectedSendWindow = protocol.ByteCount(float32(expectedSendWindow) * renoBeta)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// No congestion window growth should occur in recovery phase, i.e., until the
|
||||
// currently outstanding 20 packets are acked.
|
||||
|
@ -706,23 +706,23 @@ var _ = Describe("Cubic Sender", func() {
|
|||
SendAvailableSendWindow()
|
||||
Expect(sender.InRecovery()).To(BeTrue())
|
||||
AckNPackets(2)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
}
|
||||
Expect(sender.InRecovery()).To(BeFalse())
|
||||
|
||||
// Out of recovery now. Congestion window should not grow during RTT.
|
||||
for i := protocol.ByteCount(0); i < expected_send_window/protocol.DefaultTCPMSS-2; i += 2 {
|
||||
for i := protocol.ByteCount(0); i < expectedSendWindow/protocol.DefaultTCPMSS-2; i += 2 {
|
||||
// Send our full send window.
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(2)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
}
|
||||
|
||||
// Next ack should cause congestion window to grow by 1MSS.
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(2)
|
||||
expected_send_window += protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
expectedSendWindow += protocol.DefaultTCPMSS
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
})
|
||||
|
||||
// TEST_F(TcpCubicSenderPacketsTest, BandwidthResumption) {
|
||||
|
@ -747,9 +747,9 @@ var _ = Describe("Cubic Sender", func() {
|
|||
//
|
||||
// // Resumed CWND is limited to be in a sensible range.
|
||||
// cached_network_params.set_bandwidth_estimate_bytes_per_second(
|
||||
// (kMaxCongestionWindow + 1) * protocol.DefaultTCPMSS);
|
||||
// (maxCongestionWindow + 1) * protocol.DefaultTCPMSS);
|
||||
// sender.ResumeConnectionState(cached_network_params, false);
|
||||
// Expect( sender.congestion_window()).To(Equal(kMaxCongestionWindow))
|
||||
// Expect( sender.congestion_window()).To(Equal(maxCongestionWindow))
|
||||
//
|
||||
// cached_network_params.set_bandwidth_estimate_bytes_per_second(
|
||||
// (kMinCongestionWindowForBandwidthResumption - 1) * protocol.DefaultTCPMSS);
|
||||
|
@ -792,24 +792,24 @@ var _ = Describe("Cubic Sender", func() {
|
|||
|
||||
// Starts with slow start.
|
||||
sender.SetNumEmulatedConnections(1)
|
||||
const kNumberOfAcks = 10
|
||||
for i := 0; i < kNumberOfAcks; i++ {
|
||||
const numberOfAcks = 10
|
||||
for i := 0; i < numberOfAcks; i++ {
|
||||
// Send our full send window.
|
||||
SendAvailableSendWindow()
|
||||
AckNPackets(2)
|
||||
}
|
||||
SendAvailableSendWindow()
|
||||
expected_send_window := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * kNumberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
expectedSendWindow := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * numberOfAcks)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
|
||||
// Loses a packet to exit slow start.
|
||||
LoseNPackets(1)
|
||||
|
||||
// We should now have fallen out of slow start with a reduced window. Slow
|
||||
// start threshold is also updated.
|
||||
expected_send_window = protocol.ByteCount(float32(expected_send_window) * renoBeta)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||
Expect(sender.SlowstartThreshold()).To(Equal(protocol.PacketNumber(expected_send_window / protocol.DefaultTCPMSS)))
|
||||
expectedSendWindow = protocol.ByteCount(float32(expectedSendWindow) * renoBeta)
|
||||
Expect(sender.GetCongestionWindow()).To(Equal(expectedSendWindow))
|
||||
Expect(sender.SlowstartThreshold()).To(Equal(protocol.PacketNumber(expectedSendWindow / protocol.DefaultTCPMSS)))
|
||||
|
||||
// Resets cwnd and slow start threshold on connection migrations.
|
||||
sender.OnConnectionMigration()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue