@@ -96,16 +96,15 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
96
96
*/
97
97
static void idpf_ctlq_shutdown (struct idpf_hw * hw , struct idpf_ctlq_info * cq )
98
98
{
99
- mutex_lock (& cq -> cq_lock );
99
+ spin_lock (& cq -> cq_lock );
100
100
101
101
/* free ring buffers and the ring itself */
102
102
idpf_ctlq_dealloc_ring_res (hw , cq );
103
103
104
104
/* Set ring_size to 0 to indicate uninitialized queue */
105
105
cq -> ring_size = 0 ;
106
106
107
- mutex_unlock (& cq -> cq_lock );
108
- mutex_destroy (& cq -> cq_lock );
107
+ spin_unlock (& cq -> cq_lock );
109
108
}
110
109
111
110
/**
@@ -173,7 +172,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,
173
172
174
173
idpf_ctlq_init_regs (hw , cq , is_rxq );
175
174
176
- mutex_init (& cq -> cq_lock );
175
+ spin_lock_init (& cq -> cq_lock );
177
176
178
177
list_add (& cq -> cq_list , & hw -> cq_list_head );
179
178
@@ -272,7 +271,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
272
271
int err = 0 ;
273
272
int i ;
274
273
275
- mutex_lock (& cq -> cq_lock );
274
+ spin_lock (& cq -> cq_lock );
276
275
277
276
/* Ensure there are enough descriptors to send all messages */
278
277
num_desc_avail = IDPF_CTLQ_DESC_UNUSED (cq );
@@ -332,7 +331,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
332
331
wr32 (hw , cq -> reg .tail , cq -> next_to_use );
333
332
334
333
err_unlock :
335
- mutex_unlock (& cq -> cq_lock );
334
+ spin_unlock (& cq -> cq_lock );
336
335
337
336
return err ;
338
337
}
@@ -364,7 +363,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
364
363
if (* clean_count > cq -> ring_size )
365
364
return - EBADR ;
366
365
367
- mutex_lock (& cq -> cq_lock );
366
+ spin_lock (& cq -> cq_lock );
368
367
369
368
ntc = cq -> next_to_clean ;
370
369
@@ -397,7 +396,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
397
396
398
397
cq -> next_to_clean = ntc ;
399
398
400
- mutex_unlock (& cq -> cq_lock );
399
+ spin_unlock (& cq -> cq_lock );
401
400
402
401
/* Return number of descriptors actually cleaned */
403
402
* clean_count = i ;
@@ -435,7 +434,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
435
434
if (* buff_count > 0 )
436
435
buffs_avail = true;
437
436
438
- mutex_lock (& cq -> cq_lock );
437
+ spin_lock (& cq -> cq_lock );
439
438
440
439
if (tbp >= cq -> ring_size )
441
440
tbp = 0 ;
@@ -524,7 +523,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
524
523
wr32 (hw , cq -> reg .tail , cq -> next_to_post );
525
524
}
526
525
527
- mutex_unlock (& cq -> cq_lock );
526
+ spin_unlock (& cq -> cq_lock );
528
527
529
528
/* return the number of buffers that were not posted */
530
529
* buff_count = * buff_count - i ;
@@ -552,7 +551,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
552
551
u16 i ;
553
552
554
553
/* take the lock before we start messing with the ring */
555
- mutex_lock (& cq -> cq_lock );
554
+ spin_lock (& cq -> cq_lock );
556
555
557
556
ntc = cq -> next_to_clean ;
558
557
@@ -614,7 +613,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
614
613
615
614
cq -> next_to_clean = ntc ;
616
615
617
- mutex_unlock (& cq -> cq_lock );
616
+ spin_unlock (& cq -> cq_lock );
618
617
619
618
* num_q_msg = i ;
620
619
if (* num_q_msg == 0 )
0 commit comments