@@ -206,7 +206,7 @@ static void dax_wake_entry(struct xa_state *xas, void *entry,
206
206
*
207
207
* Must be called with the i_pages lock held.
208
208
*/
209
- static void * get_unlocked_entry (struct xa_state * xas , unsigned int order )
209
+ static void * get_next_unlocked_entry (struct xa_state * xas , unsigned int order )
210
210
{
211
211
void * entry ;
212
212
struct wait_exceptional_entry_queue ewait ;
@@ -235,6 +235,37 @@ static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
235
235
}
236
236
}
237
237
238
+ /*
239
+ * Wait for the given entry to become unlocked. Caller must hold the i_pages
240
+ * lock and call either put_unlocked_entry() if it did not lock the entry or
241
+ * dax_unlock_entry() if it did. Returns an unlocked entry if still present.
242
+ */
243
+ static void * wait_entry_unlocked_exclusive (struct xa_state * xas , void * entry )
244
+ {
245
+ struct wait_exceptional_entry_queue ewait ;
246
+ wait_queue_head_t * wq ;
247
+
248
+ init_wait (& ewait .wait );
249
+ ewait .wait .func = wake_exceptional_entry_func ;
250
+
251
+ while (unlikely (dax_is_locked (entry ))) {
252
+ wq = dax_entry_waitqueue (xas , entry , & ewait .key );
253
+ prepare_to_wait_exclusive (wq , & ewait .wait ,
254
+ TASK_UNINTERRUPTIBLE );
255
+ xas_pause (xas );
256
+ xas_unlock_irq (xas );
257
+ schedule ();
258
+ finish_wait (wq , & ewait .wait );
259
+ xas_lock_irq (xas );
260
+ entry = xas_load (xas );
261
+ }
262
+
263
+ if (xa_is_internal (entry ))
264
+ return NULL ;
265
+
266
+ return entry ;
267
+ }
268
+
238
269
/*
239
270
* The only thing keeping the address space around is the i_pages lock
240
271
* (it's cycled in clear_inode() after removing the entries from i_pages)
@@ -250,7 +281,7 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
250
281
251
282
wq = dax_entry_waitqueue (xas , entry , & ewait .key );
252
283
/*
253
- * Unlike get_unlocked_entry () there is no guarantee that this
284
+ * Unlike get_next_unlocked_entry () there is no guarantee that this
254
285
* path ever successfully retrieves an unlocked entry before an
255
286
* inode dies. Perform a non-exclusive wait in case this path
256
287
* never successfully performs its own wake up.
@@ -581,7 +612,7 @@ static void *grab_mapping_entry(struct xa_state *xas,
581
612
retry :
582
613
pmd_downgrade = false;
583
614
xas_lock_irq (xas );
584
- entry = get_unlocked_entry (xas , order );
615
+ entry = get_next_unlocked_entry (xas , order );
585
616
586
617
if (entry ) {
587
618
if (dax_is_conflict (entry ))
@@ -717,8 +748,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping,
717
748
xas_for_each (& xas , entry , end_idx ) {
718
749
if (WARN_ON_ONCE (!xa_is_value (entry )))
719
750
continue ;
720
- if (unlikely (dax_is_locked (entry )))
721
- entry = get_unlocked_entry (& xas , 0 );
751
+ entry = wait_entry_unlocked_exclusive (& xas , entry );
722
752
if (entry )
723
753
page = dax_busy_page (entry );
724
754
put_unlocked_entry (& xas , entry , WAKE_NEXT );
@@ -751,7 +781,7 @@ static int __dax_invalidate_entry(struct address_space *mapping,
751
781
void * entry ;
752
782
753
783
xas_lock_irq (& xas );
754
- entry = get_unlocked_entry (& xas , 0 );
784
+ entry = get_next_unlocked_entry (& xas , 0 );
755
785
if (!entry || WARN_ON_ONCE (!xa_is_value (entry )))
756
786
goto out ;
757
787
if (!trunc &&
@@ -777,7 +807,9 @@ static int __dax_clear_dirty_range(struct address_space *mapping,
777
807
778
808
xas_lock_irq (& xas );
779
809
xas_for_each (& xas , entry , end ) {
780
- entry = get_unlocked_entry (& xas , 0 );
810
+ entry = wait_entry_unlocked_exclusive (& xas , entry );
811
+ if (!entry )
812
+ continue ;
781
813
xas_clear_mark (& xas , PAGECACHE_TAG_DIRTY );
782
814
xas_clear_mark (& xas , PAGECACHE_TAG_TOWRITE );
783
815
put_unlocked_entry (& xas , entry , WAKE_NEXT );
@@ -941,7 +973,7 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
941
973
if (unlikely (dax_is_locked (entry ))) {
942
974
void * old_entry = entry ;
943
975
944
- entry = get_unlocked_entry (xas , 0 );
976
+ entry = get_next_unlocked_entry (xas , 0 );
945
977
946
978
/* Entry got punched out / reallocated? */
947
979
if (!entry || WARN_ON_ONCE (!xa_is_value (entry )))
@@ -1950,7 +1982,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
1950
1982
vm_fault_t ret ;
1951
1983
1952
1984
xas_lock_irq (& xas );
1953
- entry = get_unlocked_entry (& xas , order );
1985
+ entry = get_next_unlocked_entry (& xas , order );
1954
1986
/* Did we race with someone splitting entry or so? */
1955
1987
if (!entry || dax_is_conflict (entry ) ||
1956
1988
(order == 0 && !dax_is_pte_entry (entry ))) {
0 commit comments