@@ -35,6 +35,7 @@ import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, ReturnAnswer}
35
35
import org .apache .spark .sql .catalyst .plans .physical .{Distribution , UnspecifiedDistribution }
36
36
import org .apache .spark .sql .catalyst .rules .{PlanChangeLogger , Rule }
37
37
import org .apache .spark .sql .catalyst .trees .TreeNodeTag
38
+ import org .apache .spark .sql .catalyst .util .sideBySide
38
39
import org .apache .spark .sql .errors .QueryExecutionErrors
39
40
import org .apache .spark .sql .execution ._
40
41
import org .apache .spark .sql .execution .adaptive .AdaptiveSparkPlanExec ._
@@ -306,7 +307,8 @@ case class AdaptiveSparkPlanExec(
306
307
val newCost = costEvaluator.evaluateCost(newPhysicalPlan)
307
308
if (newCost < origCost ||
308
309
(newCost == origCost && currentPhysicalPlan != newPhysicalPlan)) {
309
- logOnLevel(s " Plan changed from $currentPhysicalPlan to $newPhysicalPlan" )
310
+ logOnLevel(" Plan changed:\n " +
311
+ sideBySide(currentPhysicalPlan.treeString, newPhysicalPlan.treeString).mkString(" \n " ))
310
312
cleanUpTempTags(newPhysicalPlan)
311
313
currentPhysicalPlan = newPhysicalPlan
312
314
currentLogicalPlan = newLogicalPlan
@@ -335,7 +337,7 @@ case class AdaptiveSparkPlanExec(
335
337
if (! isSubquery && currentPhysicalPlan.exists(_.subqueries.nonEmpty)) {
336
338
getExecutionId.foreach(onUpdatePlan(_, Seq .empty))
337
339
}
338
- logOnLevel(s " Final plan: $currentPhysicalPlan" )
340
+ logOnLevel(s " Final plan: \n $currentPhysicalPlan" )
339
341
}
340
342
341
343
override def executeCollect (): Array [InternalRow ] = {
0 commit comments