@@ -405,7 +405,7 @@ where
405
405
// - pass large buffers to readers that do not initialize the spare capacity. this can amortize per-call overheads
406
406
// - and finally pass not-too-small and not-too-large buffers to Windows read APIs because they manage to suffer from both problems
407
407
// at the same time, i.e. small reads suffer from syscall overhead, all reads incur costs proportional to buffer size (#110650)
408
- //
408
+ // - also avoid <4 byte reads as this may split UTF-8 code points, which can be a problem for Windows console reads (#142847)
409
409
pub ( crate ) fn default_read_to_end < R : Read + ?Sized > (
410
410
r : & mut R ,
411
411
buf : & mut Vec < u8 > ,
@@ -452,7 +452,7 @@ pub(crate) fn default_read_to_end<R: Read + ?Sized>(
452
452
let mut consecutive_short_reads = 0 ;
453
453
454
454
loop {
455
- if buf. len ( ) == buf . capacity ( ) && buf. capacity ( ) == start_cap {
455
+ if buf. spare_capacity_mut ( ) . len ( ) < PROBE_SIZE && buf. capacity ( ) == start_cap {
456
456
// The buffer might be an exact fit. Let's read into a probe buffer
457
457
// and see if it returns `Ok(0)`. If so, we've avoided an
458
458
// unnecessary doubling of the capacity. But if not, append the
@@ -462,12 +462,13 @@ pub(crate) fn default_read_to_end<R: Read + ?Sized>(
462
462
if read == 0 {
463
463
return Ok ( buf. len ( ) - start_len) ;
464
464
}
465
+ // In the case of very short reads, continue to use the stack buffer
466
+ // until either we reach the end or we need to reallocate.
467
+ continue ;
465
468
}
466
469
467
- if buf. len ( ) == buf. capacity ( ) {
468
- // buf is full, need more space
469
- buf. try_reserve ( PROBE_SIZE ) ?;
470
- }
470
+ // Avoid unnecessarily short reads by ensuring there's at least PROBE_SIZE space available.
471
+ buf. try_reserve ( PROBE_SIZE ) ?;
471
472
472
473
let mut spare = buf. spare_capacity_mut ( ) ;
473
474
let buf_len = cmp:: min ( spare. len ( ) , max_read_size) ;
0 commit comments