Skip to content

Commit 845bea3

Browse files
committed
Remove unnecessary zeroing of row conversion buffer
1 parent c56ec18 commit 845bea3

File tree

1 file changed

+0
-8
lines changed

1 file changed

+0
-8
lines changed

sql/catalyst/src/main/java/org/apache/spark/sql/execution/UnsafeExternalRowSorter.java

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,7 @@
1818
package org.apache.spark.sql.execution;
1919

2020
import java.io.IOException;
21-
import java.util.Arrays;
2221

23-
import org.apache.spark.sql.Row;
2422
import scala.collection.Iterator;
2523
import scala.math.Ordering;
2624

@@ -100,12 +98,6 @@ void insertRow(InternalRow row) throws IOException {
10098
final int sizeRequirement = rowConverter.getSizeRequirement(row);
10199
if (sizeRequirement > rowConversionBuffer.length) {
102100
rowConversionBuffer = new byte[sizeRequirement];
103-
} else {
104-
// Zero out the buffer that's used to hold the current row. This is necessary in order
105-
// to ensure that rows hash properly, since garbage data from the previous row could
106-
// otherwise end up as padding in this row. As a performance optimization, we only zero
107-
// out the portion of the buffer that we'll actually write to.
108-
Arrays.fill(rowConversionBuffer, 0, sizeRequirement, (byte) 0);
109101
}
110102
final int bytesWritten = rowConverter.writeRow(
111103
row, rowConversionBuffer, PlatformDependent.BYTE_ARRAY_OFFSET, objPool);

0 commit comments

Comments
 (0)