@@ -647,14 +647,27 @@ static void dpm_async_resume_children(struct device *dev, async_func_t func)
647
647
/*
648
648
* Start processing "async" children of the device unless it's been
649
649
* started already for them.
650
- *
651
- * This could have been done for the device's "async" consumers too, but
652
- * they either need to wait for their parents or the processing has
653
- * already started for them after their parents were processed.
654
650
*/
655
651
device_for_each_child (dev , func , dpm_async_with_cleanup );
656
652
}
657
653
654
+ static void dpm_async_resume_subordinate (struct device * dev , async_func_t func )
655
+ {
656
+ struct device_link * link ;
657
+ int idx ;
658
+
659
+ dpm_async_resume_children (dev , func );
660
+
661
+ idx = device_links_read_lock ();
662
+
663
+ /* Start processing the device's "async" consumers. */
664
+ list_for_each_entry_rcu (link , & dev -> links .consumers , s_node )
665
+ if (READ_ONCE (link -> status ) != DL_STATE_DORMANT )
666
+ dpm_async_with_cleanup (link -> consumer , func );
667
+
668
+ device_links_read_unlock (idx );
669
+ }
670
+
658
671
static void dpm_clear_async_state (struct device * dev )
659
672
{
660
673
reinit_completion (& dev -> power .completion );
@@ -663,7 +676,14 @@ static void dpm_clear_async_state(struct device *dev)
663
676
664
677
static bool dpm_root_device (struct device * dev )
665
678
{
666
- return !dev -> parent ;
679
+ lockdep_assert_held (& dpm_list_mtx );
680
+
681
+ /*
682
+ * Since this function is required to run under dpm_list_mtx, the
683
+ * list_empty() below will only return true if the device's list of
684
+ * consumers is actually empty before calling it.
685
+ */
686
+ return !dev -> parent && list_empty (& dev -> links .suppliers );
667
687
}
668
688
669
689
static void async_resume_noirq (void * data , async_cookie_t cookie );
@@ -752,7 +772,7 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy
752
772
pm_dev_err (dev , state , async ? " async noirq" : " noirq" , error );
753
773
}
754
774
755
- dpm_async_resume_children (dev , async_resume_noirq );
775
+ dpm_async_resume_subordinate (dev , async_resume_noirq );
756
776
}
757
777
758
778
static void async_resume_noirq (void * data , async_cookie_t cookie )
@@ -895,7 +915,7 @@ static void device_resume_early(struct device *dev, pm_message_t state, bool asy
895
915
pm_dev_err (dev , state , async ? " async early" : " early" , error );
896
916
}
897
917
898
- dpm_async_resume_children (dev , async_resume_early );
918
+ dpm_async_resume_subordinate (dev , async_resume_early );
899
919
}
900
920
901
921
static void async_resume_early (void * data , async_cookie_t cookie )
@@ -1071,7 +1091,7 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
1071
1091
pm_dev_err (dev , state , async ? " async" : "" , error );
1072
1092
}
1073
1093
1074
- dpm_async_resume_children (dev , async_resume );
1094
+ dpm_async_resume_subordinate (dev , async_resume );
1075
1095
}
1076
1096
1077
1097
static void async_resume (void * data , async_cookie_t cookie )
0 commit comments