1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3 * Copyright (c) 2024, Microsoft Corporation. All rights reserved.
4 */
5
6 #ifndef _MANA_SHADOW_QUEUE_H_
7 #define _MANA_SHADOW_QUEUE_H_
8
9 struct shadow_wqe_header {
10 u16 opcode;
11 u16 error_code;
12 u32 posted_wqe_size;
13 u64 wr_id;
14 };
15
16 struct ud_rq_shadow_wqe {
17 struct shadow_wqe_header header;
18 u32 byte_len;
19 u32 src_qpn;
20 };
21
22 struct ud_sq_shadow_wqe {
23 struct shadow_wqe_header header;
24 };
25
26 struct shadow_queue {
27 /* Unmasked producer index, Incremented on wqe posting */
28 u64 prod_idx;
29 /* Unmasked consumer index, Incremented on cq polling */
30 u64 cons_idx;
31 /* Unmasked index of next-to-complete (from HW) shadow WQE */
32 u64 next_to_complete_idx;
33 /* queue size in wqes */
34 u32 length;
35 /* distance between elements in bytes */
36 u32 stride;
37 /* ring buffer holding wqes */
38 void *buffer;
39 };
40
create_shadow_queue(struct shadow_queue * queue,uint32_t length,uint32_t stride)41 static inline int create_shadow_queue(struct shadow_queue *queue, uint32_t length, uint32_t stride)
42 {
43 queue->buffer = kvmalloc_array(length, stride, GFP_KERNEL);
44 if (!queue->buffer)
45 return -ENOMEM;
46
47 queue->length = length;
48 queue->stride = stride;
49
50 return 0;
51 }
52
destroy_shadow_queue(struct shadow_queue * queue)53 static inline void destroy_shadow_queue(struct shadow_queue *queue)
54 {
55 kvfree(queue->buffer);
56 }
57
shadow_queue_full(struct shadow_queue * queue)58 static inline bool shadow_queue_full(struct shadow_queue *queue)
59 {
60 return (queue->prod_idx - queue->cons_idx) >= queue->length;
61 }
62
shadow_queue_empty(struct shadow_queue * queue)63 static inline bool shadow_queue_empty(struct shadow_queue *queue)
64 {
65 return queue->prod_idx == queue->cons_idx;
66 }
67
68 static inline void *
shadow_queue_get_element(const struct shadow_queue * queue,u64 unmasked_index)69 shadow_queue_get_element(const struct shadow_queue *queue, u64 unmasked_index)
70 {
71 u32 index = unmasked_index % queue->length;
72
73 return ((u8 *)queue->buffer + index * queue->stride);
74 }
75
76 static inline void *
shadow_queue_producer_entry(struct shadow_queue * queue)77 shadow_queue_producer_entry(struct shadow_queue *queue)
78 {
79 return shadow_queue_get_element(queue, queue->prod_idx);
80 }
81
82 static inline void *
shadow_queue_get_next_to_consume(const struct shadow_queue * queue)83 shadow_queue_get_next_to_consume(const struct shadow_queue *queue)
84 {
85 if (queue->cons_idx == queue->next_to_complete_idx)
86 return NULL;
87
88 return shadow_queue_get_element(queue, queue->cons_idx);
89 }
90
91 static inline void *
shadow_queue_get_next_to_complete(struct shadow_queue * queue)92 shadow_queue_get_next_to_complete(struct shadow_queue *queue)
93 {
94 if (queue->next_to_complete_idx == queue->prod_idx)
95 return NULL;
96
97 return shadow_queue_get_element(queue, queue->next_to_complete_idx);
98 }
99
shadow_queue_advance_producer(struct shadow_queue * queue)100 static inline void shadow_queue_advance_producer(struct shadow_queue *queue)
101 {
102 queue->prod_idx++;
103 }
104
shadow_queue_advance_consumer(struct shadow_queue * queue)105 static inline void shadow_queue_advance_consumer(struct shadow_queue *queue)
106 {
107 queue->cons_idx++;
108 }
109
shadow_queue_advance_next_to_complete(struct shadow_queue * queue)110 static inline void shadow_queue_advance_next_to_complete(struct shadow_queue *queue)
111 {
112 queue->next_to_complete_idx++;
113 }
114
115 #endif
116