Skip to content

Commit 66c27e3

Browse files
alobakinanguy11
authored andcommitted
idpf: stop using macros for accessing queue descriptors
In C, we have structures and unions. Casting `void *` via macros is not only error-prone, but also looks confusing and awful in general. In preparation for splitting the queue structs, replace it with a union and direct array dereferences. Reviewed-by: Przemek Kitszel <[email protected]> Reviewed-by: Mina Almasry <[email protected]> Signed-off-by: Alexander Lobakin <[email protected]> Signed-off-by: Tony Nguyen <[email protected]>
1 parent 62c8842 commit 66c27e3

File tree

5 files changed

+52
-50
lines changed

5 files changed

+52
-50
lines changed

drivers/net/ethernet/intel/idpf/idpf.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ struct idpf_vport_max_q;
2020
#include <linux/dim.h>
2121

2222
#include "virtchnl2.h"
23-
#include "idpf_lan_txrx.h"
2423
#include "idpf_txrx.h"
2524
#include "idpf_controlq.h"
2625

drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@
44
#ifndef _IDPF_LAN_TXRX_H_
55
#define _IDPF_LAN_TXRX_H_
66

7+
#include <linux/bits.h>
8+
79
enum idpf_rss_hash {
810
IDPF_HASH_INVALID = 0,
911
/* Values 1 - 28 are reserved for future use */

drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -205,7 +205,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
205205
data_len = skb->data_len;
206206
size = skb_headlen(skb);
207207

208-
tx_desc = IDPF_BASE_TX_DESC(tx_q, i);
208+
tx_desc = &tx_q->base_tx[i];
209209

210210
dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
211211

@@ -239,7 +239,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
239239
i++;
240240

241241
if (i == tx_q->desc_count) {
242-
tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
242+
tx_desc = &tx_q->base_tx[0];
243243
i = 0;
244244
}
245245

@@ -259,7 +259,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
259259
i++;
260260

261261
if (i == tx_q->desc_count) {
262-
tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
262+
tx_desc = &tx_q->base_tx[0];
263263
i = 0;
264264
}
265265

@@ -307,7 +307,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq)
307307
memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf));
308308
txq->tx_buf[ntu].ctx_entry = true;
309309

310-
ctx_desc = IDPF_BASE_TX_CTX_DESC(txq, ntu);
310+
ctx_desc = &txq->base_ctx[ntu];
311311

312312
IDPF_SINGLEQ_BUMP_RING_IDX(txq, ntu);
313313
txq->next_to_use = ntu;
@@ -455,7 +455,7 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget,
455455
struct netdev_queue *nq;
456456
bool dont_wake;
457457

458-
tx_desc = IDPF_BASE_TX_DESC(tx_q, ntc);
458+
tx_desc = &tx_q->base_tx[ntc];
459459
tx_buf = &tx_q->tx_buf[ntc];
460460
ntc -= tx_q->desc_count;
461461

@@ -517,7 +517,7 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget,
517517
if (unlikely(!ntc)) {
518518
ntc -= tx_q->desc_count;
519519
tx_buf = tx_q->tx_buf;
520-
tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
520+
tx_desc = &tx_q->base_tx[0];
521521
}
522522

523523
/* unmap any remaining paged data */
@@ -540,7 +540,7 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget,
540540
if (unlikely(!ntc)) {
541541
ntc -= tx_q->desc_count;
542542
tx_buf = tx_q->tx_buf;
543-
tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
543+
tx_desc = &tx_q->base_tx[0];
544544
}
545545
} while (likely(budget));
546546

@@ -895,7 +895,7 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
895895
if (!cleaned_count)
896896
return false;
897897

898-
desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, nta);
898+
desc = &rx_q->single_buf[nta];
899899
buf = &rx_q->rx_buf.buf[nta];
900900

901901
do {
@@ -915,7 +915,7 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
915915
buf++;
916916
nta++;
917917
if (unlikely(nta == rx_q->desc_count)) {
918-
desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, 0);
918+
desc = &rx_q->single_buf[0];
919919
buf = rx_q->rx_buf.buf;
920920
nta = 0;
921921
}
@@ -1016,7 +1016,7 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget)
10161016
struct idpf_rx_buf *rx_buf;
10171017

10181018
/* get the Rx desc from Rx queue based on 'next_to_clean' */
1019-
rx_desc = IDPF_RX_DESC(rx_q, ntc);
1019+
rx_desc = &rx_q->rx[ntc];
10201020

10211021
/* status_error_ptype_len will always be zero for unused
10221022
* descriptors because it's cleared in cleanup, and overlaps

drivers/net/ethernet/intel/idpf/idpf_txrx.c

Lines changed: 15 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -531,7 +531,7 @@ static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id)
531531
struct idpf_rx_buf *buf;
532532
dma_addr_t addr;
533533

534-
splitq_rx_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, nta);
534+
splitq_rx_desc = &bufq->split_buf[nta];
535535
buf = &bufq->rx_buf.buf[buf_id];
536536

537537
if (bufq->rx_hsplit_en) {
@@ -1584,7 +1584,7 @@ do { \
15841584
if (unlikely(!(ntc))) { \
15851585
ntc -= (txq)->desc_count; \
15861586
buf = (txq)->tx_buf; \
1587-
desc = IDPF_FLEX_TX_DESC(txq, 0); \
1587+
desc = &(txq)->flex_tx[0]; \
15881588
} else { \
15891589
(buf)++; \
15901590
(desc)++; \
@@ -1617,8 +1617,8 @@ static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end,
16171617
s16 ntc = tx_q->next_to_clean;
16181618
struct idpf_tx_buf *tx_buf;
16191619

1620-
tx_desc = IDPF_FLEX_TX_DESC(tx_q, ntc);
1621-
next_pending_desc = IDPF_FLEX_TX_DESC(tx_q, end);
1620+
tx_desc = &tx_q->flex_tx[ntc];
1621+
next_pending_desc = &tx_q->flex_tx[end];
16221622
tx_buf = &tx_q->tx_buf[ntc];
16231623
ntc -= tx_q->desc_count;
16241624

@@ -1814,7 +1814,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
18141814
int i;
18151815

18161816
complq_budget = vport->compln_clean_budget;
1817-
tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, ntc);
1817+
tx_desc = &complq->comp[ntc];
18181818
ntc -= complq->desc_count;
18191819

18201820
do {
@@ -1879,7 +1879,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
18791879
ntc++;
18801880
if (unlikely(!ntc)) {
18811881
ntc -= complq->desc_count;
1882-
tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, 0);
1882+
tx_desc = &complq->comp[0];
18831883
change_bit(__IDPF_Q_GEN_CHK, complq->flags);
18841884
}
18851885

@@ -2143,7 +2143,7 @@ void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
21432143
* used one additional descriptor for a context
21442144
* descriptor. Reset that here.
21452145
*/
2146-
tx_desc = IDPF_FLEX_TX_DESC(txq, idx);
2146+
tx_desc = &txq->flex_tx[idx];
21472147
memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc));
21482148
if (idx == 0)
21492149
idx = txq->desc_count;
@@ -2202,7 +2202,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
22022202
data_len = skb->data_len;
22032203
size = skb_headlen(skb);
22042204

2205-
tx_desc = IDPF_FLEX_TX_DESC(tx_q, i);
2205+
tx_desc = &tx_q->flex_tx[i];
22062206

22072207
dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
22082208

@@ -2275,7 +2275,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
22752275
i++;
22762276

22772277
if (i == tx_q->desc_count) {
2278-
tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
2278+
tx_desc = &tx_q->flex_tx[0];
22792279
i = 0;
22802280
tx_q->compl_tag_cur_gen =
22812281
IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
@@ -2320,7 +2320,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
23202320
i++;
23212321

23222322
if (i == tx_q->desc_count) {
2323-
tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
2323+
tx_desc = &tx_q->flex_tx[0];
23242324
i = 0;
23252325
tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
23262326
}
@@ -2553,7 +2553,7 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq)
25532553
txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
25542554

25552555
/* grab the next descriptor */
2556-
desc = IDPF_FLEX_TX_CTX_DESC(txq, i);
2556+
desc = &txq->flex_ctx[i];
25572557
txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
25582558

25592559
return desc;
@@ -3128,7 +3128,6 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
31283128
struct idpf_sw_queue *refillq = NULL;
31293129
struct idpf_rxq_set *rxq_set = NULL;
31303130
struct idpf_rx_buf *rx_buf = NULL;
3131-
union virtchnl2_rx_desc *desc;
31323131
unsigned int pkt_len = 0;
31333132
unsigned int hdr_len = 0;
31343133
u16 gen_id, buf_id = 0;
@@ -3138,8 +3137,7 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
31383137
u8 rxdid;
31393138

31403139
/* get the Rx desc from Rx queue based on 'next_to_clean' */
3141-
desc = IDPF_RX_DESC(rxq, ntc);
3142-
rx_desc = (struct virtchnl2_rx_flex_desc_adv_nic_3 *)desc;
3140+
rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
31433141

31443142
/* This memory barrier is needed to keep us from reading
31453143
* any other fields out of the rx_desc
@@ -3320,11 +3318,11 @@ static void idpf_rx_clean_refillq(struct idpf_queue *bufq,
33203318
int cleaned = 0;
33213319
u16 gen;
33223320

3323-
buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, bufq_nta);
3321+
buf_desc = &bufq->split_buf[bufq_nta];
33243322

33253323
/* make sure we stop at ring wrap in the unlikely case ring is full */
33263324
while (likely(cleaned < refillq->desc_count)) {
3327-
u16 refill_desc = IDPF_SPLITQ_RX_BI_DESC(refillq, ntc);
3325+
u16 refill_desc = refillq->ring[ntc];
33283326
bool failure;
33293327

33303328
gen = FIELD_GET(IDPF_RX_BI_GEN_M, refill_desc);
@@ -3342,7 +3340,7 @@ static void idpf_rx_clean_refillq(struct idpf_queue *bufq,
33423340
}
33433341

33443342
if (unlikely(++bufq_nta == bufq->desc_count)) {
3345-
buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, 0);
3343+
buf_desc = &bufq->split_buf[0];
33463344
bufq_nta = 0;
33473345
} else {
33483346
buf_desc++;

drivers/net/ethernet/intel/idpf/idpf_txrx.h

Lines changed: 25 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
#include <net/tcp.h>
99
#include <net/netdev_queues.h>
1010

11+
#include "idpf_lan_txrx.h"
1112
#include "virtchnl2_lan_desc.h"
1213

1314
#define IDPF_LARGE_MAX_Q 256
@@ -117,24 +118,6 @@ do { \
117118
#define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
118119
#define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
119120

120-
#define IDPF_SINGLEQ_RX_BUF_DESC(rxq, i) \
121-
(&(((struct virtchnl2_singleq_rx_buf_desc *)((rxq)->desc_ring))[i]))
122-
#define IDPF_SPLITQ_RX_BUF_DESC(rxq, i) \
123-
(&(((struct virtchnl2_splitq_rx_buf_desc *)((rxq)->desc_ring))[i]))
124-
#define IDPF_SPLITQ_RX_BI_DESC(rxq, i) ((((rxq)->ring))[i])
125-
126-
#define IDPF_BASE_TX_DESC(txq, i) \
127-
(&(((struct idpf_base_tx_desc *)((txq)->desc_ring))[i]))
128-
#define IDPF_BASE_TX_CTX_DESC(txq, i) \
129-
(&(((struct idpf_base_tx_ctx_desc *)((txq)->desc_ring))[i]))
130-
#define IDPF_SPLITQ_TX_COMPLQ_DESC(txcq, i) \
131-
(&(((struct idpf_splitq_tx_compl_desc *)((txcq)->desc_ring))[i]))
132-
133-
#define IDPF_FLEX_TX_DESC(txq, i) \
134-
(&(((union idpf_tx_flex_desc *)((txq)->desc_ring))[i]))
135-
#define IDPF_FLEX_TX_CTX_DESC(txq, i) \
136-
(&(((struct idpf_flex_tx_ctx_desc *)((txq)->desc_ring))[i]))
137-
138121
#define IDPF_DESC_UNUSED(txq) \
139122
((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
140123
(txq)->next_to_clean - (txq)->next_to_use - 1)
@@ -317,8 +300,6 @@ struct idpf_rx_extracted {
317300

318301
#define IDPF_RX_DMA_ATTR \
319302
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
320-
#define IDPF_RX_DESC(rxq, i) \
321-
(&(((union virtchnl2_rx_desc *)((rxq)->desc_ring))[i]))
322303

323304
struct idpf_rx_buf {
324305
struct page *page;
@@ -655,7 +636,15 @@ union idpf_queue_stats {
655636
* @q_vector: Backreference to associated vector
656637
* @size: Length of descriptor ring in bytes
657638
* @dma: Physical address of ring
658-
* @desc_ring: Descriptor ring memory
639+
* @rx: universal receive descriptor array
640+
* @single_buf: Rx buffer descriptor array in singleq
641+
* @split_buf: Rx buffer descriptor array in splitq
642+
* @base_tx: basic Tx descriptor array
643+
* @base_ctx: basic Tx context descriptor array
644+
* @flex_tx: flex Tx descriptor array
645+
* @flex_ctx: flex Tx context descriptor array
646+
* @comp: completion descriptor array
647+
* @desc_ring: virtual descriptor ring address
659648
* @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
660649
* @tx_min_pkt_len: Min supported packet length
661650
* @num_completions: Only relevant for TX completion queue. It tracks the
@@ -733,7 +722,21 @@ struct idpf_queue {
733722
struct idpf_q_vector *q_vector;
734723
unsigned int size;
735724
dma_addr_t dma;
736-
void *desc_ring;
725+
union {
726+
union virtchnl2_rx_desc *rx;
727+
728+
struct virtchnl2_singleq_rx_buf_desc *single_buf;
729+
struct virtchnl2_splitq_rx_buf_desc *split_buf;
730+
731+
struct idpf_base_tx_desc *base_tx;
732+
struct idpf_base_tx_ctx_desc *base_ctx;
733+
union idpf_tx_flex_desc *flex_tx;
734+
struct idpf_flex_tx_ctx_desc *flex_ctx;
735+
736+
struct idpf_splitq_tx_compl_desc *comp;
737+
738+
void *desc_ring;
739+
};
737740

738741
u16 tx_max_bufs;
739742
u8 tx_min_pkt_len;

0 commit comments

Comments
 (0)