1 /*
2 * Copyright (c) 2024, Broadcom. All rights reserved. The term
3 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in
13 * the documentation and/or other materials provided with the
14 * distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
18 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 */
29
30 #ifndef __BNXT_RE_MEMORY_H__
31 #define __BNXT_RE_MEMORY_H__
32
33 #include <pthread.h>
34
35 #include "main.h"
36
37 struct bnxt_re_mem {
38 void *va_head;
39 void *va_tail;
40 uint32_t head;
41 uint32_t tail;
42 uint32_t size;
43 uint32_t pad;
44 };
45
46 #define BNXT_RE_QATTR_SQ_INDX 0
47 #define BNXT_RE_QATTR_RQ_INDX 1
48 struct bnxt_re_qattr {
49 uint32_t esize;
50 uint32_t slots;
51 uint32_t nwr;
52 uint32_t sz_ring;
53 uint32_t sz_shad;
54 uint32_t sw_nwr;
55 };
56
57 /* spin lock wrapper struct */
58 struct bnxt_spinlock {
59 pthread_spinlock_t lock;
60 int in_use;
61 int need_lock;
62 };
63
64 struct bnxt_re_queue {
65 struct bnxt_spinlock qlock;
66 uint32_t flags;
67 uint32_t *dbtail;
68 void *va;
69 uint32_t head;
70 uint32_t depth; /* no of entries */
71 void *pad;
72 uint32_t pad_stride_log2;
73 uint32_t tail;
74 uint32_t max_slots;
75 /* Represents the difference between the real queue depth allocated in
76 * HW and the user requested queue depth and is used to correctly flag
77 * queue full condition based on user supplied queue depth.
78 * This value can vary depending on the type of queue and any HW
79 * requirements that mandate keeping a fixed gap between the producer
80 * and the consumer indices in the queue
81 */
82 uint32_t diff;
83 uint32_t stride;
84 uint32_t msn;
85 uint32_t msn_tbl_sz;
86 };
87
get_aligned(uint32_t size,uint32_t al_size)88 static inline unsigned long get_aligned(uint32_t size, uint32_t al_size)
89 {
90 return (unsigned long) (size + al_size - 1) & ~(al_size - 1);
91 }
92
roundup_pow_of_two(unsigned long val)93 static inline unsigned long roundup_pow_of_two(unsigned long val)
94 {
95 unsigned long roundup = 1;
96
97 if (val == 1)
98 return (roundup << 1);
99
100 while (roundup < val)
101 roundup <<= 1;
102
103 return roundup;
104 }
105
106 #define iowrite64(dst, val) (*((volatile __u64 *) (dst)) = val)
107 #define iowrite32(dst, val) (*((volatile __u32 *) (dst)) = val)
108
109 /* Basic queue operation */
bnxt_re_get_hwqe(struct bnxt_re_queue * que,uint32_t idx)110 static inline void *bnxt_re_get_hwqe(struct bnxt_re_queue *que, uint32_t idx)
111 {
112 idx += que->tail;
113 if (idx >= que->depth)
114 idx -= que->depth;
115 return (void *)(que->va + (idx << 4));
116 }
117
bnxt_re_get_hwqe_hdr(struct bnxt_re_queue * que)118 static inline void *bnxt_re_get_hwqe_hdr(struct bnxt_re_queue *que)
119 {
120 return (void *)(que->va + ((que->tail) << 4));
121 }
122
bnxt_re_is_que_full(struct bnxt_re_queue * que,uint32_t slots)123 static inline uint32_t bnxt_re_is_que_full(struct bnxt_re_queue *que,
124 uint32_t slots)
125 {
126 int32_t avail, head, tail;
127
128 head = que->head;
129 tail = que->tail;
130 avail = head - tail;
131 if (head <= tail)
132 avail += que->depth;
133 return avail <= (slots + que->diff);
134 }
135
bnxt_re_is_que_empty(struct bnxt_re_queue * que)136 static inline uint32_t bnxt_re_is_que_empty(struct bnxt_re_queue *que)
137 {
138 return que->tail == que->head;
139 }
140
bnxt_re_incr_tail(struct bnxt_re_queue * que,uint8_t cnt)141 static inline void bnxt_re_incr_tail(struct bnxt_re_queue *que, uint8_t cnt)
142 {
143 que->tail += cnt;
144 if (que->tail >= que->depth) {
145 que->tail %= que->depth;
146 /* Rolled over, Toggle Tail bit in epoch flags */
147 que->flags ^= 1UL << BNXT_RE_FLAG_EPOCH_TAIL_SHIFT;
148 }
149 }
150
bnxt_re_incr_head(struct bnxt_re_queue * que,uint8_t cnt)151 static inline void bnxt_re_incr_head(struct bnxt_re_queue *que, uint8_t cnt)
152 {
153 que->head += cnt;
154 if (que->head >= que->depth) {
155 que->head %= que->depth;
156 /* Rolled over, Toggle HEAD bit in epoch flags */
157 que->flags ^= 1UL << BNXT_RE_FLAG_EPOCH_HEAD_SHIFT;
158 }
159
160 }
161
162 void bnxt_re_free_mem(struct bnxt_re_mem *mem);
163 void *bnxt_re_alloc_mem(size_t size, uint32_t pg_size);
164 void *bnxt_re_get_obj(struct bnxt_re_mem *mem, size_t req);
165 void *bnxt_re_get_ring(struct bnxt_re_mem *mem, size_t req);
166
167 #endif
168