Skip to content
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 22 additions & 4 deletions quiche/src/stream/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -135,10 +135,16 @@ pub struct StreamMap<F: BufFactory = DefaultBufFactory> {
local_max_streams_bidi: u64,
local_max_streams_bidi_next: u64,

/// Initial maximum bidirectional stream count
initial_max_streams_bidi: u64,

/// Local maximum unidirectional stream count limit.
local_max_streams_uni: u64,
local_max_streams_uni_next: u64,

/// Initial maximum unidirectional stream count
initial_max_streams_uni: u64,

/// The total number of bidirectional streams opened by the local endpoint.
local_opened_streams_bidi: u64,

Expand Down Expand Up @@ -193,9 +199,11 @@ impl<F: BufFactory> StreamMap<F> {
StreamMap {
local_max_streams_bidi: max_streams_bidi,
local_max_streams_bidi_next: max_streams_bidi,
initial_max_streams_bidi: max_streams_bidi,

local_max_streams_uni: max_streams_uni,
local_max_streams_uni_next: max_streams_uni,
initial_max_streams_uni: max_streams_uni,

max_stream_window,

Expand Down Expand Up @@ -654,18 +662,28 @@ impl<F: BufFactory> StreamMap<F> {

/// Returns true if the max bidirectional streams count needs to be updated
/// by sending a MAX_STREAMS frame to the peer.
///
/// This only sends MAX_STREAMS when available capacity is at or below 50%
/// of the initial maximum streams target.
pub fn should_update_max_streams_bidi(&self) -> bool {
let available = self
.local_max_streams_bidi
.saturating_sub(self.peer_opened_streams_bidi);
self.local_max_streams_bidi_next != self.local_max_streams_bidi &&
self.local_max_streams_bidi_next / 2 >
self.local_max_streams_bidi - self.peer_opened_streams_bidi
Copy link
Contributor

@antoniovicente antoniovicente Nov 13, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So the logic before was to send an increase following each stream completion?

Consider an application that requires 9 active requests at all times and the stream limit is 10. If 2 of the streams close and the client attempts to re-create them, it will only be able to create 1 of the two and remain in this state until 3 other streams close and trigger a max stream update.

So the application would need 2 QUIC connections in order to work reliably in this case even though the number of streams it wants to keep open is less than the per connection stream limit.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In that case, I would expect "available" to be 1 which is less than 10/2, so we should be sending MAX_STREAMS in that case. I added another test case to try and capture this scenario.

available <= self.initial_max_streams_bidi / 2
}

/// Returns true if the max unidirectional streams count needs to be updated
/// by sending a MAX_STREAMS frame to the peer.
///
/// This only send MAX_STREAMS when available capacity is at or below 50% of
/// the initial maximum streams target.
pub fn should_update_max_streams_uni(&self) -> bool {
let available = self
.local_max_streams_uni
.saturating_sub(self.peer_opened_streams_uni);
self.local_max_streams_uni_next != self.local_max_streams_uni &&
self.local_max_streams_uni_next / 2 >
self.local_max_streams_uni - self.peer_opened_streams_uni
available <= self.initial_max_streams_uni / 2
}

/// Returns the number of active streams in the map.
Expand Down
99 changes: 99 additions & 0 deletions quiche/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4176,6 +4176,105 @@ fn stream_limit_update_uni(
assert_eq!(pipe.server.readable().len(), 3);
}

#[rstest]
/// Tests that MAX_STREAMS is correctly sent only when available capacity
/// reaches the threshold (50% of initial).
fn max_streams_sent_only_when_at_threshold(
#[values("cubic", "bbr2", "bbr2_gcongestion")] cc_algorithm_name: &str,
) {
let mut config = Config::new(PROTOCOL_VERSION).unwrap();
assert_eq!(config.set_cc_algorithm_name(cc_algorithm_name), Ok(()));
config
.load_cert_chain_from_pem_file("examples/cert.crt")
.unwrap();
config
.load_priv_key_from_pem_file("examples/cert.key")
.unwrap();
config
.set_application_protos(&[b"proto1", b"proto2"])
.unwrap();
config.set_initial_max_data(1000);
config.set_initial_max_stream_data_bidi_local(100);
config.set_initial_max_stream_data_bidi_remote(100);
config.set_initial_max_streams_bidi(6);
config.set_initial_max_streams_uni(0);
config.verify_peer(false);

let mut pipe = test_utils::Pipe::with_config(&mut config).unwrap();
assert_eq!(pipe.handshake(), Ok(()));

let mut buf = [0; 100];

// Test aged connection behavior: initial max_streams_bidi = 6, threshold = 3
// Complete 10 batches of 6 streams (60 total) to simulate aged connection
// This will increase next to 66
for batch in 0..=9 {
// Client side: send 6 streams with fin
for i in 0..6 {
let stream_id = (batch * 6 + i) * 4;
pipe.client.stream_send(stream_id, b"a", true).ok();
}
pipe.advance().ok();

// Server side: receive and send back with fin
for i in 0..6 {
let stream_id = (batch * 6 + i) * 4;
pipe.server.stream_recv(stream_id, &mut buf).ok();
pipe.server.stream_send(stream_id, b"a", true).ok();
}
pipe.advance().ok();

// Client side: receive to complete
for i in 0..6 {
let stream_id = (batch * 6 + i) * 4;
pipe.client.stream_recv(stream_id, &mut buf).ok();
}
pipe.advance().ok();
}

// At this point: next = 66, completed = 60, available = 6
// Complete 2 more streams → available = 4 (> 3)
// MAX_STREAMS should NOT be sent
assert_eq!(pipe.server.streams.max_streams_bidi_next(), 66);
assert_eq!(pipe.client.streams.peer_streams_left_bidi(), 6);
pipe.client.stream_send(240, b"a", true).ok();
pipe.client.stream_send(244, b"a", true).ok();
pipe.advance().ok();

pipe.server.stream_recv(240, &mut buf).ok();
pipe.server.stream_recv(244, &mut buf).ok();
pipe.server.stream_send(240, b"a", true).ok();
pipe.server.stream_send(244, b"a", true).ok();
pipe.advance().ok();

pipe.client.stream_recv(240, &mut buf).ok();
pipe.client.stream_recv(244, &mut buf).ok();
pipe.advance().ok();

// Verify MAX_STREAMS was NOT sent (4 > 3 threshold)
assert_eq!(pipe.client.streams.peer_streams_left_bidi(), 4);

// Complete 1 more stream → available = 3 (== 3)
// MAX_STREAMS should be sent (new limit: 72)
pipe.client.stream_send(248, b"a", true).ok();
pipe.advance().ok();

pipe.server.stream_recv(248, &mut buf).ok();
pipe.server.stream_send(248, b"a", true).ok();
pipe.advance().ok();

pipe.client.stream_recv(248, &mut buf).ok();
pipe.advance().ok();

// Verify MAX_STREAMS was sent (limit increased from 66)
let left_after = pipe.client.streams.peer_streams_left_bidi();
assert!(
left_after > 4,
"MAX_STREAMS should have been sent, expected > 4 streams left, got {}",
left_after
);
}

#[rstest]
/// Tests that the stream's fin flag is properly flushed even if there's no
/// data in the buffer, and that the buffer becomes readable on the other
Expand Down
Loading