@@ -1354,32 +1354,29 @@ static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
1354
1354
/*
1355
1355
* lock for reading
1356
1356
*/
1357
- static inline void __down_read (struct rw_semaphore * sem )
1357
+ static inline int __down_read_common (struct rw_semaphore * sem , int state )
1358
1358
{
1359
1359
if (!rwsem_read_trylock (sem )) {
1360
- rwsem_down_read_slowpath (sem , TASK_UNINTERRUPTIBLE );
1360
+ if (IS_ERR (rwsem_down_read_slowpath (sem , state )))
1361
+ return - EINTR ;
1361
1362
DEBUG_RWSEMS_WARN_ON (!is_rwsem_reader_owned (sem ), sem );
1362
1363
}
1364
+ return 0 ;
1365
+ }
1366
+
1367
+ static inline void __down_read (struct rw_semaphore * sem )
1368
+ {
1369
+ __down_read_common (sem , TASK_UNINTERRUPTIBLE );
1363
1370
}
1364
1371
1365
1372
static inline int __down_read_interruptible (struct rw_semaphore * sem )
1366
1373
{
1367
- if (!rwsem_read_trylock (sem )) {
1368
- if (IS_ERR (rwsem_down_read_slowpath (sem , TASK_INTERRUPTIBLE )))
1369
- return - EINTR ;
1370
- DEBUG_RWSEMS_WARN_ON (!is_rwsem_reader_owned (sem ), sem );
1371
- }
1372
- return 0 ;
1374
+ return __down_read_common (sem , TASK_INTERRUPTIBLE );
1373
1375
}
1374
1376
1375
1377
static inline int __down_read_killable (struct rw_semaphore * sem )
1376
1378
{
1377
- if (!rwsem_read_trylock (sem )) {
1378
- if (IS_ERR (rwsem_down_read_slowpath (sem , TASK_KILLABLE )))
1379
- return - EINTR ;
1380
- DEBUG_RWSEMS_WARN_ON (!is_rwsem_reader_owned (sem ), sem );
1381
- }
1382
- return 0 ;
1379
+ return __down_read_common (sem , TASK_KILLABLE );
1383
1380
}
1384
1381
1385
1382
static inline int __down_read_trylock (struct rw_semaphore * sem )
@@ -1405,22 +1402,26 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
1405
1402
/*
1406
1403
* lock for writing
1407
1404
*/
1408
- static inline void __down_write (struct rw_semaphore * sem )
1409
- {
1410
- if (unlikely (!rwsem_write_trylock (sem )))
1411
- rwsem_down_write_slowpath (sem , TASK_UNINTERRUPTIBLE );
1412
- }
1413
-
1414
- static inline int __down_write_killable (struct rw_semaphore * sem )
1405
+ static inline int __down_write_common (struct rw_semaphore * sem , int state )
1415
1406
{
1416
1407
if (unlikely (!rwsem_write_trylock (sem ))) {
1417
- if (IS_ERR (rwsem_down_write_slowpath (sem , TASK_KILLABLE )))
1408
+ if (IS_ERR (rwsem_down_write_slowpath (sem , state )))
1418
1409
return - EINTR ;
1419
1410
}
1420
1411
1421
1412
return 0 ;
1422
1413
}
1423
1414
1415
+ static inline void __down_write (struct rw_semaphore * sem )
1416
+ {
1417
+ __down_write_common (sem , TASK_UNINTERRUPTIBLE );
1418
+ }
1419
+
1420
+ static inline int __down_write_killable (struct rw_semaphore * sem )
1421
+ {
1422
+ return __down_write_common (sem , TASK_KILLABLE );
1423
+ }
1424
+
1424
1425
static inline int __down_write_trylock (struct rw_semaphore * sem )
1425
1426
{
1426
1427
DEBUG_RWSEMS_WARN_ON (sem -> magic != sem , sem );
0 commit comments